1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_compat.h" 41 42 #include <sys/param.h> 43 #include <sys/limits.h> 44 #include <sys/systm.h> 45 #include <sys/syscallsubr.h> 46 #include <sys/sysproto.h> 47 #include <sys/conf.h> 48 #include <sys/filedesc.h> 49 #include <sys/lock.h> 50 #include <sys/jail.h> 51 #include <sys/kernel.h> 52 #include <sys/limits.h> 53 #include <sys/malloc.h> 54 #include <sys/mutex.h> 55 #include <sys/sysctl.h> 56 #include <sys/vnode.h> 57 #include <sys/mount.h> 58 #include <sys/proc.h> 59 #include <sys/namei.h> 60 #include <sys/file.h> 61 #include <sys/stat.h> 62 #include <sys/filio.h> 63 #include <sys/fcntl.h> 64 #include <sys/unistd.h> 65 #include <sys/resourcevar.h> 66 #include <sys/event.h> 67 #include <sys/sx.h> 68 #include <sys/socketvar.h> 69 #include <sys/signalvar.h> 70 71 #include <vm/vm.h> 72 #include <vm/vm_extern.h> 73 #include <vm/uma.h> 74 75 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table"); 76 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader", 77 "file desc to leader structures"); 78 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures"); 79 80 static uma_zone_t file_zone; 81 82 static d_open_t fdopen; 83 #define NUMFDESC 64 84 85 #define CDEV_MAJOR 22 86 static struct cdevsw fildesc_cdevsw = { 87 .d_version = D_VERSION, 88 .d_flags = D_NEEDGIANT, 89 .d_open = fdopen, 90 .d_name = "FD", 91 .d_maj = CDEV_MAJOR, 92 }; 93 94 /* How to treat 'new' parameter when allocating a fd for do_dup(). */ 95 enum dup_type { DUP_VARIABLE, DUP_FIXED }; 96 97 static int do_dup(struct thread *td, enum dup_type type, int old, int new, 98 register_t *retval); 99 static int fd_first_free(struct filedesc *, int, int); 100 static int fd_last_used(struct filedesc *, int, int); 101 static void fdgrowtable(struct filedesc *, int); 102 103 /* 104 * Descriptor management. 105 */ 106 struct filelist filehead; /* head of list of open files */ 107 int nfiles; /* actual number of open files */ 108 struct sx filelist_lock; /* sx to protect filelist */ 109 struct mtx sigio_lock; /* mtx to protect pointers to sigio */ 110 111 /* 112 * Find the first zero bit in the given bitmap, starting at low and not 113 * exceeding size - 1. 114 */ 115 static int 116 fd_first_free(struct filedesc *fdp, int low, int size) 117 { 118 NDSLOTTYPE *map = fdp->fd_map; 119 NDSLOTTYPE mask; 120 int off, maxoff; 121 122 if (low >= size) 123 return (low); 124 125 off = NDSLOT(low); 126 if (low % NDENTRIES) { 127 mask = ~(~(NDSLOTTYPE)0 >> (NDENTRIES - (low % NDENTRIES))); 128 if ((mask &= ~map[off]) != 0UL) 129 return (off * NDENTRIES + ffsl(mask) - 1); 130 ++off; 131 } 132 for (maxoff = NDSLOTS(size); off < maxoff; ++off) 133 if (map[off] != ~0UL) 134 return (off * NDENTRIES + ffsl(~map[off]) - 1); 135 return (size); 136 } 137 138 /* 139 * Find the highest non-zero bit in the given bitmap, starting at low and 140 * not exceeding size - 1. 141 */ 142 static int 143 fd_last_used(struct filedesc *fdp, int low, int size) 144 { 145 NDSLOTTYPE *map = fdp->fd_map; 146 NDSLOTTYPE mask; 147 int off, minoff; 148 149 if (low >= size) 150 return (-1); 151 152 off = NDSLOT(size); 153 if (size % NDENTRIES) { 154 mask = ~(~(NDSLOTTYPE)0 << (size % NDENTRIES)); 155 if ((mask &= map[off]) != 0) 156 return (off * NDENTRIES + flsl(mask) - 1); 157 --off; 158 } 159 for (minoff = NDSLOT(low); off >= minoff; --off) 160 if (map[off] != 0) 161 return (off * NDENTRIES + flsl(map[off]) - 1); 162 return (size - 1); 163 } 164 165 static int 166 fdisused(struct filedesc *fdp, int fd) 167 { 168 KASSERT(fd >= 0 && fd < fdp->fd_nfiles, 169 ("file descriptor %d out of range (0, %d)", fd, fdp->fd_nfiles)); 170 return ((fdp->fd_map[NDSLOT(fd)] & NDBIT(fd)) != 0); 171 } 172 173 /* 174 * Mark a file descriptor as used. 175 */ 176 void 177 fdused(struct filedesc *fdp, int fd) 178 { 179 FILEDESC_LOCK_ASSERT(fdp, MA_OWNED); 180 KASSERT(!fdisused(fdp, fd), 181 ("fd already used")); 182 fdp->fd_map[NDSLOT(fd)] |= NDBIT(fd); 183 if (fd > fdp->fd_lastfile) 184 fdp->fd_lastfile = fd; 185 if (fd == fdp->fd_freefile) 186 fdp->fd_freefile = fd_first_free(fdp, fd, fdp->fd_nfiles); 187 } 188 189 /* 190 * Mark a file descriptor as unused. 191 */ 192 void 193 fdunused(struct filedesc *fdp, int fd) 194 { 195 FILEDESC_LOCK_ASSERT(fdp, MA_OWNED); 196 KASSERT(fdisused(fdp, fd), 197 ("fd is already unused")); 198 KASSERT(fdp->fd_ofiles[fd] == NULL, 199 ("fd is still in use")); 200 fdp->fd_map[NDSLOT(fd)] &= ~NDBIT(fd); 201 if (fd < fdp->fd_freefile) 202 fdp->fd_freefile = fd; 203 if (fd == fdp->fd_lastfile) 204 fdp->fd_lastfile = fd_last_used(fdp, 0, fd); 205 } 206 207 /* 208 * System calls on descriptors. 209 */ 210 #ifndef _SYS_SYSPROTO_H_ 211 struct getdtablesize_args { 212 int dummy; 213 }; 214 #endif 215 /* 216 * MPSAFE 217 */ 218 /* ARGSUSED */ 219 int 220 getdtablesize(td, uap) 221 struct thread *td; 222 struct getdtablesize_args *uap; 223 { 224 struct proc *p = td->td_proc; 225 226 PROC_LOCK(p); 227 td->td_retval[0] = 228 min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc); 229 PROC_UNLOCK(p); 230 return (0); 231 } 232 233 /* 234 * Duplicate a file descriptor to a particular value. 235 * 236 * note: keep in mind that a potential race condition exists when closing 237 * descriptors from a shared descriptor table (via rfork). 238 */ 239 #ifndef _SYS_SYSPROTO_H_ 240 struct dup2_args { 241 u_int from; 242 u_int to; 243 }; 244 #endif 245 /* 246 * MPSAFE 247 */ 248 /* ARGSUSED */ 249 int 250 dup2(td, uap) 251 struct thread *td; 252 struct dup2_args *uap; 253 { 254 255 return (do_dup(td, DUP_FIXED, (int)uap->from, (int)uap->to, 256 td->td_retval)); 257 } 258 259 /* 260 * Duplicate a file descriptor. 261 */ 262 #ifndef _SYS_SYSPROTO_H_ 263 struct dup_args { 264 u_int fd; 265 }; 266 #endif 267 /* 268 * MPSAFE 269 */ 270 /* ARGSUSED */ 271 int 272 dup(td, uap) 273 struct thread *td; 274 struct dup_args *uap; 275 { 276 277 return (do_dup(td, DUP_VARIABLE, (int)uap->fd, 0, td->td_retval)); 278 } 279 280 /* 281 * The file control system call. 282 */ 283 #ifndef _SYS_SYSPROTO_H_ 284 struct fcntl_args { 285 int fd; 286 int cmd; 287 long arg; 288 }; 289 #endif 290 /* 291 * MPSAFE 292 */ 293 /* ARGSUSED */ 294 int 295 fcntl(td, uap) 296 struct thread *td; 297 struct fcntl_args *uap; 298 { 299 struct flock fl; 300 intptr_t arg; 301 int error; 302 303 error = 0; 304 switch (uap->cmd) { 305 case F_GETLK: 306 case F_SETLK: 307 case F_SETLKW: 308 error = copyin((void *)(intptr_t)uap->arg, &fl, sizeof(fl)); 309 arg = (intptr_t)&fl; 310 break; 311 default: 312 arg = uap->arg; 313 break; 314 } 315 if (error) 316 return (error); 317 error = kern_fcntl(td, uap->fd, uap->cmd, arg); 318 if (error) 319 return (error); 320 if (uap->cmd == F_GETLK) 321 error = copyout(&fl, (void *)(intptr_t)uap->arg, sizeof(fl)); 322 return (error); 323 } 324 325 int 326 kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg) 327 { 328 struct filedesc *fdp; 329 struct flock *flp; 330 struct file *fp; 331 struct proc *p; 332 char *pop; 333 struct vnode *vp; 334 u_int newmin; 335 int error, flg, tmp; 336 337 error = 0; 338 flg = F_POSIX; 339 p = td->td_proc; 340 fdp = p->p_fd; 341 mtx_lock(&Giant); 342 FILEDESC_LOCK(fdp); 343 if ((unsigned)fd >= fdp->fd_nfiles || 344 (fp = fdp->fd_ofiles[fd]) == NULL) { 345 FILEDESC_UNLOCK(fdp); 346 error = EBADF; 347 goto done2; 348 } 349 pop = &fdp->fd_ofileflags[fd]; 350 351 switch (cmd) { 352 case F_DUPFD: 353 FILEDESC_UNLOCK(fdp); 354 newmin = arg; 355 PROC_LOCK(p); 356 if (newmin >= lim_cur(p, RLIMIT_NOFILE) || 357 newmin >= maxfilesperproc) { 358 PROC_UNLOCK(p); 359 error = EINVAL; 360 break; 361 } 362 PROC_UNLOCK(p); 363 error = do_dup(td, DUP_VARIABLE, fd, newmin, td->td_retval); 364 break; 365 366 case F_GETFD: 367 td->td_retval[0] = (*pop & UF_EXCLOSE) ? FD_CLOEXEC : 0; 368 FILEDESC_UNLOCK(fdp); 369 break; 370 371 case F_SETFD: 372 *pop = (*pop &~ UF_EXCLOSE) | 373 (arg & FD_CLOEXEC ? UF_EXCLOSE : 0); 374 FILEDESC_UNLOCK(fdp); 375 break; 376 377 case F_GETFL: 378 FILE_LOCK(fp); 379 FILEDESC_UNLOCK(fdp); 380 td->td_retval[0] = OFLAGS(fp->f_flag); 381 FILE_UNLOCK(fp); 382 break; 383 384 case F_SETFL: 385 FILE_LOCK(fp); 386 FILEDESC_UNLOCK(fdp); 387 fhold_locked(fp); 388 fp->f_flag &= ~FCNTLFLAGS; 389 fp->f_flag |= FFLAGS(arg & ~O_ACCMODE) & FCNTLFLAGS; 390 FILE_UNLOCK(fp); 391 tmp = fp->f_flag & FNONBLOCK; 392 error = fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td); 393 if (error) { 394 fdrop(fp, td); 395 break; 396 } 397 tmp = fp->f_flag & FASYNC; 398 error = fo_ioctl(fp, FIOASYNC, &tmp, td->td_ucred, td); 399 if (error == 0) { 400 fdrop(fp, td); 401 break; 402 } 403 FILE_LOCK(fp); 404 fp->f_flag &= ~FNONBLOCK; 405 FILE_UNLOCK(fp); 406 tmp = 0; 407 (void)fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td); 408 fdrop(fp, td); 409 break; 410 411 case F_GETOWN: 412 fhold(fp); 413 FILEDESC_UNLOCK(fdp); 414 error = fo_ioctl(fp, FIOGETOWN, &tmp, td->td_ucred, td); 415 if (error == 0) 416 td->td_retval[0] = tmp; 417 fdrop(fp, td); 418 break; 419 420 case F_SETOWN: 421 fhold(fp); 422 FILEDESC_UNLOCK(fdp); 423 tmp = arg; 424 error = fo_ioctl(fp, FIOSETOWN, &tmp, td->td_ucred, td); 425 fdrop(fp, td); 426 break; 427 428 case F_SETLKW: 429 flg |= F_WAIT; 430 /* FALLTHROUGH F_SETLK */ 431 432 case F_SETLK: 433 if (fp->f_type != DTYPE_VNODE) { 434 FILEDESC_UNLOCK(fdp); 435 error = EBADF; 436 break; 437 } 438 439 flp = (struct flock *)arg; 440 if (flp->l_whence == SEEK_CUR) { 441 if (fp->f_offset < 0 || 442 (flp->l_start > 0 && 443 fp->f_offset > OFF_MAX - flp->l_start)) { 444 FILEDESC_UNLOCK(fdp); 445 error = EOVERFLOW; 446 break; 447 } 448 flp->l_start += fp->f_offset; 449 } 450 451 /* 452 * VOP_ADVLOCK() may block. 453 */ 454 fhold(fp); 455 FILEDESC_UNLOCK(fdp); 456 vp = fp->f_vnode; 457 458 switch (flp->l_type) { 459 case F_RDLCK: 460 if ((fp->f_flag & FREAD) == 0) { 461 error = EBADF; 462 break; 463 } 464 PROC_LOCK(p->p_leader); 465 p->p_leader->p_flag |= P_ADVLOCK; 466 PROC_UNLOCK(p->p_leader); 467 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 468 flp, flg); 469 break; 470 case F_WRLCK: 471 if ((fp->f_flag & FWRITE) == 0) { 472 error = EBADF; 473 break; 474 } 475 PROC_LOCK(p->p_leader); 476 p->p_leader->p_flag |= P_ADVLOCK; 477 PROC_UNLOCK(p->p_leader); 478 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 479 flp, flg); 480 break; 481 case F_UNLCK: 482 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK, 483 flp, F_POSIX); 484 break; 485 default: 486 error = EINVAL; 487 break; 488 } 489 /* Check for race with close */ 490 FILEDESC_LOCK(fdp); 491 if ((unsigned) fd >= fdp->fd_nfiles || 492 fp != fdp->fd_ofiles[fd]) { 493 FILEDESC_UNLOCK(fdp); 494 flp->l_whence = SEEK_SET; 495 flp->l_start = 0; 496 flp->l_len = 0; 497 flp->l_type = F_UNLCK; 498 (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader, 499 F_UNLCK, flp, F_POSIX); 500 } else 501 FILEDESC_UNLOCK(fdp); 502 fdrop(fp, td); 503 break; 504 505 case F_GETLK: 506 if (fp->f_type != DTYPE_VNODE) { 507 FILEDESC_UNLOCK(fdp); 508 error = EBADF; 509 break; 510 } 511 flp = (struct flock *)arg; 512 if (flp->l_type != F_RDLCK && flp->l_type != F_WRLCK && 513 flp->l_type != F_UNLCK) { 514 FILEDESC_UNLOCK(fdp); 515 error = EINVAL; 516 break; 517 } 518 if (flp->l_whence == SEEK_CUR) { 519 if ((flp->l_start > 0 && 520 fp->f_offset > OFF_MAX - flp->l_start) || 521 (flp->l_start < 0 && 522 fp->f_offset < OFF_MIN - flp->l_start)) { 523 FILEDESC_UNLOCK(fdp); 524 error = EOVERFLOW; 525 break; 526 } 527 flp->l_start += fp->f_offset; 528 } 529 /* 530 * VOP_ADVLOCK() may block. 531 */ 532 fhold(fp); 533 FILEDESC_UNLOCK(fdp); 534 vp = fp->f_vnode; 535 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, flp, 536 F_POSIX); 537 fdrop(fp, td); 538 break; 539 default: 540 FILEDESC_UNLOCK(fdp); 541 error = EINVAL; 542 break; 543 } 544 done2: 545 mtx_unlock(&Giant); 546 return (error); 547 } 548 549 /* 550 * Common code for dup, dup2, and fcntl(F_DUPFD). 551 */ 552 static int 553 do_dup(td, type, old, new, retval) 554 enum dup_type type; 555 int old, new; 556 register_t *retval; 557 struct thread *td; 558 { 559 struct filedesc *fdp; 560 struct proc *p; 561 struct file *fp; 562 struct file *delfp; 563 int error, holdleaders, maxfd; 564 565 KASSERT((type == DUP_VARIABLE || type == DUP_FIXED), 566 ("invalid dup type %d", type)); 567 568 p = td->td_proc; 569 fdp = p->p_fd; 570 571 /* 572 * Verify we have a valid descriptor to dup from and possibly to 573 * dup to. 574 */ 575 if (old < 0 || new < 0) 576 return (EBADF); 577 PROC_LOCK(p); 578 maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc); 579 PROC_UNLOCK(p); 580 if (new >= maxfd) 581 return (EMFILE); 582 583 FILEDESC_LOCK(fdp); 584 if (old >= fdp->fd_nfiles || fdp->fd_ofiles[old] == NULL) { 585 FILEDESC_UNLOCK(fdp); 586 return (EBADF); 587 } 588 if (type == DUP_FIXED && old == new) { 589 *retval = new; 590 FILEDESC_UNLOCK(fdp); 591 return (0); 592 } 593 fp = fdp->fd_ofiles[old]; 594 fhold(fp); 595 596 /* 597 * If the caller specified a file descriptor, make sure the file 598 * table is large enough to hold it, and grab it. Otherwise, just 599 * allocate a new descriptor the usual way. Since the filedesc 600 * lock may be temporarily dropped in the process, we have to look 601 * out for a race. 602 */ 603 if (type == DUP_FIXED) { 604 if (new >= fdp->fd_nfiles) 605 fdgrowtable(fdp, new + 1); 606 if (fdp->fd_ofiles[new] == NULL) 607 fdused(fdp, new); 608 } else { 609 if ((error = fdalloc(td, new, &new)) != 0) { 610 FILEDESC_UNLOCK(fdp); 611 fdrop(fp, td); 612 return (error); 613 } 614 } 615 616 /* 617 * If the old file changed out from under us then treat it as a 618 * bad file descriptor. Userland should do its own locking to 619 * avoid this case. 620 */ 621 if (fdp->fd_ofiles[old] != fp) { 622 /* we've allocated a descriptor which we won't use */ 623 if (fdp->fd_ofiles[new] == NULL) 624 fdunused(fdp, new); 625 FILEDESC_UNLOCK(fdp); 626 fdrop(fp, td); 627 return (EBADF); 628 } 629 KASSERT(old != new, 630 ("new fd is same as old")); 631 632 /* 633 * Save info on the descriptor being overwritten. We cannot close 634 * it without introducing an ownership race for the slot, since we 635 * need to drop the filedesc lock to call closef(). 636 * 637 * XXX this duplicates parts of close(). 638 */ 639 delfp = fdp->fd_ofiles[new]; 640 holdleaders = 0; 641 if (delfp != NULL) { 642 if (td->td_proc->p_fdtol != NULL) { 643 /* 644 * Ask fdfree() to sleep to ensure that all relevant 645 * process leaders can be traversed in closef(). 646 */ 647 fdp->fd_holdleaderscount++; 648 holdleaders = 1; 649 } 650 } 651 652 /* 653 * Duplicate the source descriptor 654 */ 655 fdp->fd_ofiles[new] = fp; 656 fdp->fd_ofileflags[new] = fdp->fd_ofileflags[old] &~ UF_EXCLOSE; 657 if (new > fdp->fd_lastfile) 658 fdp->fd_lastfile = new; 659 FILEDESC_UNLOCK(fdp); 660 *retval = new; 661 662 /* 663 * If we dup'd over a valid file, we now own the reference to it 664 * and must dispose of it using closef() semantics (as if a 665 * close() were performed on it). 666 * 667 * XXX this duplicates parts of close(). 668 */ 669 if (delfp != NULL) { 670 /* XXX need to call knote_fdclose() */ 671 mtx_lock(&Giant); 672 (void) closef(delfp, td); 673 mtx_unlock(&Giant); 674 if (holdleaders) { 675 FILEDESC_LOCK(fdp); 676 fdp->fd_holdleaderscount--; 677 if (fdp->fd_holdleaderscount == 0 && 678 fdp->fd_holdleaderswakeup != 0) { 679 fdp->fd_holdleaderswakeup = 0; 680 wakeup(&fdp->fd_holdleaderscount); 681 } 682 FILEDESC_UNLOCK(fdp); 683 } 684 } 685 return (0); 686 } 687 688 /* 689 * If sigio is on the list associated with a process or process group, 690 * disable signalling from the device, remove sigio from the list and 691 * free sigio. 692 */ 693 void 694 funsetown(sigiop) 695 struct sigio **sigiop; 696 { 697 struct sigio *sigio; 698 699 SIGIO_LOCK(); 700 sigio = *sigiop; 701 if (sigio == NULL) { 702 SIGIO_UNLOCK(); 703 return; 704 } 705 *(sigio->sio_myref) = NULL; 706 if ((sigio)->sio_pgid < 0) { 707 struct pgrp *pg = (sigio)->sio_pgrp; 708 PGRP_LOCK(pg); 709 SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio, 710 sigio, sio_pgsigio); 711 PGRP_UNLOCK(pg); 712 } else { 713 struct proc *p = (sigio)->sio_proc; 714 PROC_LOCK(p); 715 SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio, 716 sigio, sio_pgsigio); 717 PROC_UNLOCK(p); 718 } 719 SIGIO_UNLOCK(); 720 crfree(sigio->sio_ucred); 721 FREE(sigio, M_SIGIO); 722 } 723 724 /* 725 * Free a list of sigio structures. 726 * We only need to lock the SIGIO_LOCK because we have made ourselves 727 * inaccessable to callers of fsetown and therefore do not need to lock 728 * the proc or pgrp struct for the list manipulation. 729 */ 730 void 731 funsetownlst(sigiolst) 732 struct sigiolst *sigiolst; 733 { 734 struct proc *p; 735 struct pgrp *pg; 736 struct sigio *sigio; 737 738 sigio = SLIST_FIRST(sigiolst); 739 if (sigio == NULL) 740 return; 741 p = NULL; 742 pg = NULL; 743 744 /* 745 * Every entry of the list should belong 746 * to a single proc or pgrp. 747 */ 748 if (sigio->sio_pgid < 0) { 749 pg = sigio->sio_pgrp; 750 PGRP_LOCK_ASSERT(pg, MA_NOTOWNED); 751 } else /* if (sigio->sio_pgid > 0) */ { 752 p = sigio->sio_proc; 753 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 754 } 755 756 SIGIO_LOCK(); 757 while ((sigio = SLIST_FIRST(sigiolst)) != NULL) { 758 *(sigio->sio_myref) = NULL; 759 if (pg != NULL) { 760 KASSERT(sigio->sio_pgid < 0, 761 ("Proc sigio in pgrp sigio list")); 762 KASSERT(sigio->sio_pgrp == pg, 763 ("Bogus pgrp in sigio list")); 764 PGRP_LOCK(pg); 765 SLIST_REMOVE(&pg->pg_sigiolst, sigio, sigio, 766 sio_pgsigio); 767 PGRP_UNLOCK(pg); 768 } else /* if (p != NULL) */ { 769 KASSERT(sigio->sio_pgid > 0, 770 ("Pgrp sigio in proc sigio list")); 771 KASSERT(sigio->sio_proc == p, 772 ("Bogus proc in sigio list")); 773 PROC_LOCK(p); 774 SLIST_REMOVE(&p->p_sigiolst, sigio, sigio, 775 sio_pgsigio); 776 PROC_UNLOCK(p); 777 } 778 SIGIO_UNLOCK(); 779 crfree(sigio->sio_ucred); 780 FREE(sigio, M_SIGIO); 781 SIGIO_LOCK(); 782 } 783 SIGIO_UNLOCK(); 784 } 785 786 /* 787 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg). 788 * 789 * After permission checking, add a sigio structure to the sigio list for 790 * the process or process group. 791 */ 792 int 793 fsetown(pgid, sigiop) 794 pid_t pgid; 795 struct sigio **sigiop; 796 { 797 struct proc *proc; 798 struct pgrp *pgrp; 799 struct sigio *sigio; 800 int ret; 801 802 if (pgid == 0) { 803 funsetown(sigiop); 804 return (0); 805 } 806 807 ret = 0; 808 809 /* Allocate and fill in the new sigio out of locks. */ 810 MALLOC(sigio, struct sigio *, sizeof(struct sigio), M_SIGIO, M_WAITOK); 811 sigio->sio_pgid = pgid; 812 sigio->sio_ucred = crhold(curthread->td_ucred); 813 sigio->sio_myref = sigiop; 814 815 sx_slock(&proctree_lock); 816 if (pgid > 0) { 817 proc = pfind(pgid); 818 if (proc == NULL) { 819 ret = ESRCH; 820 goto fail; 821 } 822 823 /* 824 * Policy - Don't allow a process to FSETOWN a process 825 * in another session. 826 * 827 * Remove this test to allow maximum flexibility or 828 * restrict FSETOWN to the current process or process 829 * group for maximum safety. 830 */ 831 PROC_UNLOCK(proc); 832 if (proc->p_session != curthread->td_proc->p_session) { 833 ret = EPERM; 834 goto fail; 835 } 836 837 pgrp = NULL; 838 } else /* if (pgid < 0) */ { 839 pgrp = pgfind(-pgid); 840 if (pgrp == NULL) { 841 ret = ESRCH; 842 goto fail; 843 } 844 PGRP_UNLOCK(pgrp); 845 846 /* 847 * Policy - Don't allow a process to FSETOWN a process 848 * in another session. 849 * 850 * Remove this test to allow maximum flexibility or 851 * restrict FSETOWN to the current process or process 852 * group for maximum safety. 853 */ 854 if (pgrp->pg_session != curthread->td_proc->p_session) { 855 ret = EPERM; 856 goto fail; 857 } 858 859 proc = NULL; 860 } 861 funsetown(sigiop); 862 if (pgid > 0) { 863 PROC_LOCK(proc); 864 /* 865 * Since funsetownlst() is called without the proctree 866 * locked, we need to check for P_WEXIT. 867 * XXX: is ESRCH correct? 868 */ 869 if ((proc->p_flag & P_WEXIT) != 0) { 870 PROC_UNLOCK(proc); 871 ret = ESRCH; 872 goto fail; 873 } 874 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio); 875 sigio->sio_proc = proc; 876 PROC_UNLOCK(proc); 877 } else { 878 PGRP_LOCK(pgrp); 879 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio); 880 sigio->sio_pgrp = pgrp; 881 PGRP_UNLOCK(pgrp); 882 } 883 sx_sunlock(&proctree_lock); 884 SIGIO_LOCK(); 885 *sigiop = sigio; 886 SIGIO_UNLOCK(); 887 return (0); 888 889 fail: 890 sx_sunlock(&proctree_lock); 891 crfree(sigio->sio_ucred); 892 FREE(sigio, M_SIGIO); 893 return (ret); 894 } 895 896 /* 897 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg). 898 */ 899 pid_t 900 fgetown(sigiop) 901 struct sigio **sigiop; 902 { 903 pid_t pgid; 904 905 SIGIO_LOCK(); 906 pgid = (*sigiop != NULL) ? (*sigiop)->sio_pgid : 0; 907 SIGIO_UNLOCK(); 908 return (pgid); 909 } 910 911 /* 912 * Close a file descriptor. 913 */ 914 #ifndef _SYS_SYSPROTO_H_ 915 struct close_args { 916 int fd; 917 }; 918 #endif 919 /* 920 * MPSAFE 921 */ 922 /* ARGSUSED */ 923 int 924 close(td, uap) 925 struct thread *td; 926 struct close_args *uap; 927 { 928 struct filedesc *fdp; 929 struct file *fp; 930 int fd, error; 931 int holdleaders; 932 933 fd = uap->fd; 934 error = 0; 935 holdleaders = 0; 936 fdp = td->td_proc->p_fd; 937 mtx_lock(&Giant); 938 FILEDESC_LOCK(fdp); 939 if ((unsigned)fd >= fdp->fd_nfiles || 940 (fp = fdp->fd_ofiles[fd]) == NULL) { 941 FILEDESC_UNLOCK(fdp); 942 mtx_unlock(&Giant); 943 return (EBADF); 944 } 945 fdp->fd_ofiles[fd] = NULL; 946 fdp->fd_ofileflags[fd] = 0; 947 fdunused(fdp, fd); 948 if (td->td_proc->p_fdtol != NULL) { 949 /* 950 * Ask fdfree() to sleep to ensure that all relevant 951 * process leaders can be traversed in closef(). 952 */ 953 fdp->fd_holdleaderscount++; 954 holdleaders = 1; 955 } 956 957 /* 958 * we now hold the fp reference that used to be owned by the descriptor 959 * array. 960 */ 961 if (fd < fdp->fd_knlistsize) { 962 FILEDESC_UNLOCK(fdp); 963 knote_fdclose(td, fd); 964 } else 965 FILEDESC_UNLOCK(fdp); 966 967 error = closef(fp, td); 968 mtx_unlock(&Giant); 969 if (holdleaders) { 970 FILEDESC_LOCK(fdp); 971 fdp->fd_holdleaderscount--; 972 if (fdp->fd_holdleaderscount == 0 && 973 fdp->fd_holdleaderswakeup != 0) { 974 fdp->fd_holdleaderswakeup = 0; 975 wakeup(&fdp->fd_holdleaderscount); 976 } 977 FILEDESC_UNLOCK(fdp); 978 } 979 return (error); 980 } 981 982 #if defined(COMPAT_43) 983 /* 984 * Return status information about a file descriptor. 985 */ 986 #ifndef _SYS_SYSPROTO_H_ 987 struct ofstat_args { 988 int fd; 989 struct ostat *sb; 990 }; 991 #endif 992 /* 993 * MPSAFE 994 */ 995 /* ARGSUSED */ 996 int 997 ofstat(td, uap) 998 struct thread *td; 999 struct ofstat_args *uap; 1000 { 1001 struct file *fp; 1002 struct stat ub; 1003 struct ostat oub; 1004 int error; 1005 1006 if ((error = fget(td, uap->fd, &fp)) != 0) 1007 goto done2; 1008 mtx_lock(&Giant); 1009 error = fo_stat(fp, &ub, td->td_ucred, td); 1010 mtx_unlock(&Giant); 1011 if (error == 0) { 1012 cvtstat(&ub, &oub); 1013 error = copyout(&oub, uap->sb, sizeof(oub)); 1014 } 1015 fdrop(fp, td); 1016 done2: 1017 return (error); 1018 } 1019 #endif /* COMPAT_43 */ 1020 1021 /* 1022 * Return status information about a file descriptor. 1023 */ 1024 #ifndef _SYS_SYSPROTO_H_ 1025 struct fstat_args { 1026 int fd; 1027 struct stat *sb; 1028 }; 1029 #endif 1030 /* 1031 * MPSAFE 1032 */ 1033 /* ARGSUSED */ 1034 int 1035 fstat(td, uap) 1036 struct thread *td; 1037 struct fstat_args *uap; 1038 { 1039 struct file *fp; 1040 struct stat ub; 1041 int error; 1042 1043 if ((error = fget(td, uap->fd, &fp)) != 0) 1044 goto done2; 1045 mtx_lock(&Giant); 1046 error = fo_stat(fp, &ub, td->td_ucred, td); 1047 mtx_unlock(&Giant); 1048 if (error == 0) 1049 error = copyout(&ub, uap->sb, sizeof(ub)); 1050 fdrop(fp, td); 1051 done2: 1052 return (error); 1053 } 1054 1055 /* 1056 * Return status information about a file descriptor. 1057 */ 1058 #ifndef _SYS_SYSPROTO_H_ 1059 struct nfstat_args { 1060 int fd; 1061 struct nstat *sb; 1062 }; 1063 #endif 1064 /* 1065 * MPSAFE 1066 */ 1067 /* ARGSUSED */ 1068 int 1069 nfstat(td, uap) 1070 struct thread *td; 1071 struct nfstat_args *uap; 1072 { 1073 struct file *fp; 1074 struct stat ub; 1075 struct nstat nub; 1076 int error; 1077 1078 if ((error = fget(td, uap->fd, &fp)) != 0) 1079 goto done2; 1080 mtx_lock(&Giant); 1081 error = fo_stat(fp, &ub, td->td_ucred, td); 1082 mtx_unlock(&Giant); 1083 if (error == 0) { 1084 cvtnstat(&ub, &nub); 1085 error = copyout(&nub, uap->sb, sizeof(nub)); 1086 } 1087 fdrop(fp, td); 1088 done2: 1089 return (error); 1090 } 1091 1092 /* 1093 * Return pathconf information about a file descriptor. 1094 */ 1095 #ifndef _SYS_SYSPROTO_H_ 1096 struct fpathconf_args { 1097 int fd; 1098 int name; 1099 }; 1100 #endif 1101 /* 1102 * MPSAFE 1103 */ 1104 /* ARGSUSED */ 1105 int 1106 fpathconf(td, uap) 1107 struct thread *td; 1108 struct fpathconf_args *uap; 1109 { 1110 struct file *fp; 1111 struct vnode *vp; 1112 int error; 1113 1114 if ((error = fget(td, uap->fd, &fp)) != 0) 1115 return (error); 1116 1117 /* If asynchronous I/O is available, it works for all descriptors. */ 1118 if (uap->name == _PC_ASYNC_IO) { 1119 td->td_retval[0] = async_io_version; 1120 goto out; 1121 } 1122 vp = fp->f_vnode; 1123 if (vp != NULL) { 1124 mtx_lock(&Giant); 1125 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1126 error = VOP_PATHCONF(vp, uap->name, td->td_retval); 1127 VOP_UNLOCK(vp, 0, td); 1128 mtx_unlock(&Giant); 1129 } else if (fp->f_type == DTYPE_PIPE || fp->f_type == DTYPE_SOCKET) { 1130 if (uap->name != _PC_PIPE_BUF) { 1131 error = EINVAL; 1132 } else { 1133 td->td_retval[0] = PIPE_BUF; 1134 error = 0; 1135 } 1136 } else { 1137 error = EOPNOTSUPP; 1138 } 1139 out: 1140 fdrop(fp, td); 1141 return (error); 1142 } 1143 1144 /* 1145 * Grow the file table to accomodate (at least) nfd descriptors. This may 1146 * block and drop the filedesc lock, but it will reacquire it before 1147 * returing. 1148 */ 1149 static void 1150 fdgrowtable(struct filedesc *fdp, int nfd) 1151 { 1152 struct file **ntable; 1153 char *nfileflags; 1154 int nnfiles, onfiles; 1155 NDSLOTTYPE *nmap; 1156 1157 FILEDESC_LOCK_ASSERT(fdp, MA_OWNED); 1158 1159 KASSERT(fdp->fd_nfiles > 0, 1160 ("zero-length file table")); 1161 1162 /* compute the size of the new table */ 1163 onfiles = fdp->fd_nfiles; 1164 nnfiles = NDSLOTS(nfd) * NDENTRIES; /* round up */ 1165 if (nnfiles <= onfiles) 1166 /* the table is already large enough */ 1167 return; 1168 1169 /* allocate a new table and (if required) new bitmaps */ 1170 FILEDESC_UNLOCK(fdp); 1171 MALLOC(ntable, struct file **, nnfiles * OFILESIZE, 1172 M_FILEDESC, M_ZERO | M_WAITOK); 1173 nfileflags = (char *)&ntable[nnfiles]; 1174 if (NDSLOTS(nnfiles) > NDSLOTS(onfiles)) 1175 MALLOC(nmap, NDSLOTTYPE *, NDSLOTS(nnfiles) * NDSLOTSIZE, 1176 M_FILEDESC, M_ZERO | M_WAITOK); 1177 else 1178 nmap = NULL; 1179 FILEDESC_LOCK(fdp); 1180 1181 /* 1182 * We now have new tables ready to go. Since we dropped the 1183 * filedesc lock to call malloc(), watch out for a race. 1184 */ 1185 onfiles = fdp->fd_nfiles; 1186 if (onfiles >= nnfiles) { 1187 /* we lost the race, but that's OK */ 1188 free(ntable, M_FILEDESC); 1189 if (nmap != NULL) 1190 free(nmap, M_FILEDESC); 1191 return; 1192 } 1193 bcopy(fdp->fd_ofiles, ntable, onfiles * sizeof(*ntable)); 1194 bcopy(fdp->fd_ofileflags, nfileflags, onfiles); 1195 if (onfiles > NDFILE) 1196 free(fdp->fd_ofiles, M_FILEDESC); 1197 fdp->fd_ofiles = ntable; 1198 fdp->fd_ofileflags = nfileflags; 1199 if (NDSLOTS(nnfiles) > NDSLOTS(onfiles)) { 1200 bcopy(fdp->fd_map, nmap, NDSLOTS(onfiles) * sizeof(*nmap)); 1201 if (NDSLOTS(onfiles) > NDSLOTS(NDFILE)) 1202 free(fdp->fd_map, M_FILEDESC); 1203 fdp->fd_map = nmap; 1204 } 1205 fdp->fd_nfiles = nnfiles; 1206 } 1207 1208 /* 1209 * Allocate a file descriptor for the process. 1210 */ 1211 int 1212 fdalloc(struct thread *td, int minfd, int *result) 1213 { 1214 struct proc *p = td->td_proc; 1215 struct filedesc *fdp = p->p_fd; 1216 int fd = -1, maxfd; 1217 1218 FILEDESC_LOCK_ASSERT(fdp, MA_OWNED); 1219 1220 PROC_LOCK(p); 1221 maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc); 1222 PROC_UNLOCK(p); 1223 1224 /* 1225 * Search the bitmap for a free descriptor. If none is found, try 1226 * to grow the file table. Keep at it until we either get a file 1227 * descriptor or run into process or system limits; fdgrowtable() 1228 * may drop the filedesc lock, so we're in a race. 1229 */ 1230 for (;;) { 1231 fd = fd_first_free(fdp, minfd, fdp->fd_nfiles); 1232 if (fd >= maxfd) 1233 return (EMFILE); 1234 if (fd < fdp->fd_nfiles) 1235 break; 1236 fdgrowtable(fdp, min(fdp->fd_nfiles * 2, maxfd)); 1237 } 1238 1239 /* 1240 * Perform some sanity checks, then mark the file descriptor as 1241 * used and return it to the caller. 1242 */ 1243 KASSERT(!fdisused(fdp, fd), 1244 ("fd_first_free() returned non-free descriptor")); 1245 KASSERT(fdp->fd_ofiles[fd] == NULL, 1246 ("free descriptor isn't")); 1247 fdp->fd_ofileflags[fd] = 0; /* XXX needed? */ 1248 fdused(fdp, fd); 1249 fdp->fd_freefile = fd_first_free(fdp, fd, fdp->fd_nfiles); 1250 *result = fd; 1251 return (0); 1252 } 1253 1254 /* 1255 * Check to see whether n user file descriptors 1256 * are available to the process p. 1257 */ 1258 int 1259 fdavail(td, n) 1260 struct thread *td; 1261 int n; 1262 { 1263 struct proc *p = td->td_proc; 1264 struct filedesc *fdp = td->td_proc->p_fd; 1265 struct file **fpp; 1266 int i, lim, last; 1267 1268 FILEDESC_LOCK_ASSERT(fdp, MA_OWNED); 1269 1270 PROC_LOCK(p); 1271 lim = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc); 1272 PROC_UNLOCK(p); 1273 if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) 1274 return (1); 1275 last = min(fdp->fd_nfiles, lim); 1276 fpp = &fdp->fd_ofiles[fdp->fd_freefile]; 1277 for (i = last - fdp->fd_freefile; --i >= 0; fpp++) { 1278 if (*fpp == NULL && --n <= 0) 1279 return (1); 1280 } 1281 return (0); 1282 } 1283 1284 /* 1285 * Create a new open file structure and allocate 1286 * a file decriptor for the process that refers to it. 1287 * We add one reference to the file for the descriptor table 1288 * and one reference for resultfp. This is to prevent us being 1289 * prempted and the entry in the descriptor table closed after 1290 * we release the FILEDESC lock. 1291 */ 1292 int 1293 falloc(td, resultfp, resultfd) 1294 struct thread *td; 1295 struct file **resultfp; 1296 int *resultfd; 1297 { 1298 struct proc *p = td->td_proc; 1299 struct file *fp, *fq; 1300 int error, i; 1301 int maxuserfiles = maxfiles - (maxfiles / 20); 1302 static struct timeval lastfail; 1303 static int curfail; 1304 1305 fp = uma_zalloc(file_zone, M_WAITOK | M_ZERO); 1306 sx_xlock(&filelist_lock); 1307 if ((nfiles >= maxuserfiles && (td->td_ucred->cr_ruid != 0 || 1308 jailed(td->td_ucred))) || nfiles >= maxfiles) { 1309 if (ppsratecheck(&lastfail, &curfail, 1)) { 1310 printf("kern.maxfiles limit exceeded by uid %i, please see tuning(7).\n", 1311 td->td_ucred->cr_ruid); 1312 } 1313 sx_xunlock(&filelist_lock); 1314 uma_zfree(file_zone, fp); 1315 return (ENFILE); 1316 } 1317 nfiles++; 1318 1319 /* 1320 * If the process has file descriptor zero open, add the new file 1321 * descriptor to the list of open files at that point, otherwise 1322 * put it at the front of the list of open files. 1323 */ 1324 fp->f_mtxp = mtx_pool_alloc(mtxpool_sleep); 1325 fp->f_count = 1; 1326 if (resultfp) 1327 fp->f_count++; 1328 fp->f_cred = crhold(td->td_ucred); 1329 fp->f_ops = &badfileops; 1330 fp->f_data = NULL; 1331 fp->f_vnode = NULL; 1332 FILEDESC_LOCK(p->p_fd); 1333 if ((fq = p->p_fd->fd_ofiles[0])) { 1334 LIST_INSERT_AFTER(fq, fp, f_list); 1335 } else { 1336 LIST_INSERT_HEAD(&filehead, fp, f_list); 1337 } 1338 sx_xunlock(&filelist_lock); 1339 if ((error = fdalloc(td, 0, &i))) { 1340 FILEDESC_UNLOCK(p->p_fd); 1341 fdrop(fp, td); 1342 if (resultfp) 1343 fdrop(fp, td); 1344 return (error); 1345 } 1346 p->p_fd->fd_ofiles[i] = fp; 1347 FILEDESC_UNLOCK(p->p_fd); 1348 if (resultfp) 1349 *resultfp = fp; 1350 if (resultfd) 1351 *resultfd = i; 1352 return (0); 1353 } 1354 1355 /* 1356 * Free a file descriptor. 1357 */ 1358 void 1359 ffree(fp) 1360 struct file *fp; 1361 { 1362 1363 KASSERT(fp->f_count == 0, ("ffree: fp_fcount not 0!")); 1364 sx_xlock(&filelist_lock); 1365 LIST_REMOVE(fp, f_list); 1366 nfiles--; 1367 sx_xunlock(&filelist_lock); 1368 crfree(fp->f_cred); 1369 uma_zfree(file_zone, fp); 1370 } 1371 1372 /* 1373 * Build a new filedesc structure from another. 1374 * Copy the current, root, and jail root vnode references. 1375 */ 1376 struct filedesc * 1377 fdinit(fdp) 1378 struct filedesc *fdp; 1379 { 1380 struct filedesc0 *newfdp; 1381 1382 FILEDESC_LOCK_ASSERT(fdp, MA_OWNED); 1383 1384 FILEDESC_UNLOCK(fdp); 1385 MALLOC(newfdp, struct filedesc0 *, sizeof(struct filedesc0), 1386 M_FILEDESC, M_WAITOK | M_ZERO); 1387 FILEDESC_LOCK(fdp); 1388 mtx_init(&newfdp->fd_fd.fd_mtx, FILEDESC_LOCK_DESC, NULL, MTX_DEF); 1389 newfdp->fd_fd.fd_cdir = fdp->fd_cdir; 1390 if (newfdp->fd_fd.fd_cdir) 1391 VREF(newfdp->fd_fd.fd_cdir); 1392 newfdp->fd_fd.fd_rdir = fdp->fd_rdir; 1393 if (newfdp->fd_fd.fd_rdir) 1394 VREF(newfdp->fd_fd.fd_rdir); 1395 newfdp->fd_fd.fd_jdir = fdp->fd_jdir; 1396 if (newfdp->fd_fd.fd_jdir) 1397 VREF(newfdp->fd_fd.fd_jdir); 1398 1399 /* Create the file descriptor table. */ 1400 newfdp->fd_fd.fd_refcnt = 1; 1401 newfdp->fd_fd.fd_cmask = CMASK; 1402 newfdp->fd_fd.fd_ofiles = newfdp->fd_dfiles; 1403 newfdp->fd_fd.fd_ofileflags = newfdp->fd_dfileflags; 1404 newfdp->fd_fd.fd_nfiles = NDFILE; 1405 newfdp->fd_fd.fd_knlistsize = -1; 1406 newfdp->fd_fd.fd_map = newfdp->fd_dmap; 1407 return (&newfdp->fd_fd); 1408 } 1409 1410 /* 1411 * Share a filedesc structure. 1412 */ 1413 struct filedesc * 1414 fdshare(fdp) 1415 struct filedesc *fdp; 1416 { 1417 FILEDESC_LOCK(fdp); 1418 fdp->fd_refcnt++; 1419 FILEDESC_UNLOCK(fdp); 1420 return (fdp); 1421 } 1422 1423 /* 1424 * Copy a filedesc structure. 1425 * A NULL pointer in returns a NULL reference, this is to ease callers, 1426 * not catch errors. 1427 */ 1428 struct filedesc * 1429 fdcopy(fdp) 1430 struct filedesc *fdp; 1431 { 1432 struct filedesc *newfdp; 1433 int i; 1434 1435 /* Certain daemons might not have file descriptors. */ 1436 if (fdp == NULL) 1437 return (NULL); 1438 1439 FILEDESC_LOCK_ASSERT(fdp, MA_OWNED); 1440 newfdp = fdinit(fdp); 1441 while (fdp->fd_lastfile >= newfdp->fd_nfiles) { 1442 FILEDESC_UNLOCK(fdp); 1443 FILEDESC_LOCK(newfdp); 1444 fdgrowtable(newfdp, fdp->fd_lastfile + 1); 1445 FILEDESC_UNLOCK(newfdp); 1446 FILEDESC_LOCK(fdp); 1447 } 1448 /* copy everything except kqueue descriptors */ 1449 newfdp->fd_freefile = -1; 1450 for (i = 0; i <= fdp->fd_lastfile; ++i) { 1451 if (fdisused(fdp, i) && 1452 fdp->fd_ofiles[i]->f_type != DTYPE_KQUEUE) { 1453 newfdp->fd_ofiles[i] = fdp->fd_ofiles[i]; 1454 newfdp->fd_ofileflags[i] = fdp->fd_ofileflags[i]; 1455 fhold(newfdp->fd_ofiles[i]); 1456 newfdp->fd_lastfile = i; 1457 } else { 1458 if (newfdp->fd_freefile == -1) 1459 newfdp->fd_freefile = i; 1460 } 1461 } 1462 FILEDESC_UNLOCK(fdp); 1463 FILEDESC_LOCK(newfdp); 1464 for (i = 0; i <= newfdp->fd_lastfile; ++i) 1465 if (newfdp->fd_ofiles[i] != NULL) 1466 fdused(newfdp, i); 1467 FILEDESC_UNLOCK(newfdp); 1468 FILEDESC_LOCK(fdp); 1469 if (newfdp->fd_freefile == -1) 1470 newfdp->fd_freefile = i; 1471 newfdp->fd_cmask = fdp->fd_cmask; 1472 return (newfdp); 1473 } 1474 1475 /* A mutex to protect the association between a proc and filedesc. */ 1476 struct mtx fdesc_mtx; 1477 MTX_SYSINIT(fdesc, &fdesc_mtx, "fdesc", MTX_DEF); 1478 1479 /* 1480 * Release a filedesc structure. 1481 */ 1482 void 1483 fdfree(td) 1484 struct thread *td; 1485 { 1486 struct filedesc *fdp; 1487 struct file **fpp; 1488 int i; 1489 struct filedesc_to_leader *fdtol; 1490 struct file *fp; 1491 struct vnode *vp; 1492 struct flock lf; 1493 1494 /* Certain daemons might not have file descriptors. */ 1495 fdp = td->td_proc->p_fd; 1496 if (fdp == NULL) 1497 return; 1498 1499 /* Check for special need to clear POSIX style locks */ 1500 fdtol = td->td_proc->p_fdtol; 1501 if (fdtol != NULL) { 1502 FILEDESC_LOCK(fdp); 1503 KASSERT(fdtol->fdl_refcount > 0, 1504 ("filedesc_to_refcount botch: fdl_refcount=%d", 1505 fdtol->fdl_refcount)); 1506 if (fdtol->fdl_refcount == 1 && 1507 (td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) { 1508 i = 0; 1509 fpp = fdp->fd_ofiles; 1510 for (i = 0, fpp = fdp->fd_ofiles; 1511 i <= fdp->fd_lastfile; 1512 i++, fpp++) { 1513 if (*fpp == NULL || 1514 (*fpp)->f_type != DTYPE_VNODE) 1515 continue; 1516 fp = *fpp; 1517 fhold(fp); 1518 FILEDESC_UNLOCK(fdp); 1519 lf.l_whence = SEEK_SET; 1520 lf.l_start = 0; 1521 lf.l_len = 0; 1522 lf.l_type = F_UNLCK; 1523 vp = fp->f_vnode; 1524 (void) VOP_ADVLOCK(vp, 1525 (caddr_t)td->td_proc-> 1526 p_leader, 1527 F_UNLCK, 1528 &lf, 1529 F_POSIX); 1530 FILEDESC_LOCK(fdp); 1531 fdrop(fp, td); 1532 fpp = fdp->fd_ofiles + i; 1533 } 1534 } 1535 retry: 1536 if (fdtol->fdl_refcount == 1) { 1537 if (fdp->fd_holdleaderscount > 0 && 1538 (td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) { 1539 /* 1540 * close() or do_dup() has cleared a reference 1541 * in a shared file descriptor table. 1542 */ 1543 fdp->fd_holdleaderswakeup = 1; 1544 msleep(&fdp->fd_holdleaderscount, &fdp->fd_mtx, 1545 PLOCK, "fdlhold", 0); 1546 goto retry; 1547 } 1548 if (fdtol->fdl_holdcount > 0) { 1549 /* 1550 * Ensure that fdtol->fdl_leader 1551 * remains valid in closef(). 1552 */ 1553 fdtol->fdl_wakeup = 1; 1554 msleep(fdtol, &fdp->fd_mtx, 1555 PLOCK, "fdlhold", 0); 1556 goto retry; 1557 } 1558 } 1559 fdtol->fdl_refcount--; 1560 if (fdtol->fdl_refcount == 0 && 1561 fdtol->fdl_holdcount == 0) { 1562 fdtol->fdl_next->fdl_prev = fdtol->fdl_prev; 1563 fdtol->fdl_prev->fdl_next = fdtol->fdl_next; 1564 } else 1565 fdtol = NULL; 1566 td->td_proc->p_fdtol = NULL; 1567 FILEDESC_UNLOCK(fdp); 1568 if (fdtol != NULL) 1569 FREE(fdtol, M_FILEDESC_TO_LEADER); 1570 } 1571 FILEDESC_LOCK(fdp); 1572 if (--fdp->fd_refcnt > 0) { 1573 FILEDESC_UNLOCK(fdp); 1574 return; 1575 } 1576 1577 /* 1578 * We are the last reference to the structure, so we can 1579 * safely assume it will not change out from under us. 1580 */ 1581 FILEDESC_UNLOCK(fdp); 1582 fpp = fdp->fd_ofiles; 1583 for (i = fdp->fd_lastfile; i-- >= 0; fpp++) { 1584 if (*fpp) 1585 (void) closef(*fpp, td); 1586 } 1587 1588 /* XXX This should happen earlier. */ 1589 mtx_lock(&fdesc_mtx); 1590 td->td_proc->p_fd = NULL; 1591 mtx_unlock(&fdesc_mtx); 1592 1593 if (fdp->fd_nfiles > NDFILE) 1594 FREE(fdp->fd_ofiles, M_FILEDESC); 1595 if (NDSLOTS(fdp->fd_nfiles) > NDSLOTS(NDFILE)) 1596 FREE(fdp->fd_map, M_FILEDESC); 1597 if (fdp->fd_cdir) 1598 vrele(fdp->fd_cdir); 1599 if (fdp->fd_rdir) 1600 vrele(fdp->fd_rdir); 1601 if (fdp->fd_jdir) 1602 vrele(fdp->fd_jdir); 1603 if (fdp->fd_knlist) 1604 FREE(fdp->fd_knlist, M_KQUEUE); 1605 if (fdp->fd_knhash) 1606 FREE(fdp->fd_knhash, M_KQUEUE); 1607 mtx_destroy(&fdp->fd_mtx); 1608 FREE(fdp, M_FILEDESC); 1609 } 1610 1611 /* 1612 * For setugid programs, we don't want to people to use that setugidness 1613 * to generate error messages which write to a file which otherwise would 1614 * otherwise be off-limits to the process. We check for filesystems where 1615 * the vnode can change out from under us after execve (like [lin]procfs). 1616 * 1617 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is 1618 * sufficient. We also don't for check setugidness since we know we are. 1619 */ 1620 static int 1621 is_unsafe(struct file *fp) 1622 { 1623 if (fp->f_type == DTYPE_VNODE) { 1624 struct vnode *vp = fp->f_vnode; 1625 1626 if ((vp->v_vflag & VV_PROCDEP) != 0) 1627 return (1); 1628 } 1629 return (0); 1630 } 1631 1632 /* 1633 * Make this setguid thing safe, if at all possible. 1634 */ 1635 void 1636 setugidsafety(td) 1637 struct thread *td; 1638 { 1639 struct filedesc *fdp; 1640 int i; 1641 1642 /* Certain daemons might not have file descriptors. */ 1643 fdp = td->td_proc->p_fd; 1644 if (fdp == NULL) 1645 return; 1646 1647 /* 1648 * Note: fdp->fd_ofiles may be reallocated out from under us while 1649 * we are blocked in a close. Be careful! 1650 */ 1651 FILEDESC_LOCK(fdp); 1652 for (i = 0; i <= fdp->fd_lastfile; i++) { 1653 if (i > 2) 1654 break; 1655 if (fdp->fd_ofiles[i] && is_unsafe(fdp->fd_ofiles[i])) { 1656 struct file *fp; 1657 1658 if (i < fdp->fd_knlistsize) { 1659 FILEDESC_UNLOCK(fdp); 1660 knote_fdclose(td, i); 1661 FILEDESC_LOCK(fdp); 1662 } 1663 /* 1664 * NULL-out descriptor prior to close to avoid 1665 * a race while close blocks. 1666 */ 1667 fp = fdp->fd_ofiles[i]; 1668 fdp->fd_ofiles[i] = NULL; 1669 fdp->fd_ofileflags[i] = 0; 1670 fdunused(fdp, i); 1671 FILEDESC_UNLOCK(fdp); 1672 (void) closef(fp, td); 1673 FILEDESC_LOCK(fdp); 1674 } 1675 } 1676 FILEDESC_UNLOCK(fdp); 1677 } 1678 1679 /* 1680 * Close any files on exec? 1681 */ 1682 void 1683 fdcloseexec(td) 1684 struct thread *td; 1685 { 1686 struct filedesc *fdp; 1687 int i; 1688 1689 /* Certain daemons might not have file descriptors. */ 1690 fdp = td->td_proc->p_fd; 1691 if (fdp == NULL) 1692 return; 1693 1694 FILEDESC_LOCK(fdp); 1695 1696 /* 1697 * We cannot cache fd_ofiles or fd_ofileflags since operations 1698 * may block and rip them out from under us. 1699 */ 1700 for (i = 0; i <= fdp->fd_lastfile; i++) { 1701 if (fdp->fd_ofiles[i] != NULL && 1702 (fdp->fd_ofileflags[i] & UF_EXCLOSE)) { 1703 struct file *fp; 1704 1705 if (i < fdp->fd_knlistsize) { 1706 FILEDESC_UNLOCK(fdp); 1707 knote_fdclose(td, i); 1708 FILEDESC_LOCK(fdp); 1709 } 1710 /* 1711 * NULL-out descriptor prior to close to avoid 1712 * a race while close blocks. 1713 */ 1714 fp = fdp->fd_ofiles[i]; 1715 fdp->fd_ofiles[i] = NULL; 1716 fdp->fd_ofileflags[i] = 0; 1717 fdunused(fdp, i); 1718 FILEDESC_UNLOCK(fdp); 1719 (void) closef(fp, td); 1720 FILEDESC_LOCK(fdp); 1721 } 1722 } 1723 FILEDESC_UNLOCK(fdp); 1724 } 1725 1726 /* 1727 * It is unsafe for set[ug]id processes to be started with file 1728 * descriptors 0..2 closed, as these descriptors are given implicit 1729 * significance in the Standard C library. fdcheckstd() will create a 1730 * descriptor referencing /dev/null for each of stdin, stdout, and 1731 * stderr that is not already open. 1732 */ 1733 int 1734 fdcheckstd(td) 1735 struct thread *td; 1736 { 1737 struct nameidata nd; 1738 struct filedesc *fdp; 1739 struct file *fp; 1740 register_t retval; 1741 int fd, i, error, flags, devnull; 1742 1743 fdp = td->td_proc->p_fd; 1744 if (fdp == NULL) 1745 return (0); 1746 KASSERT(fdp->fd_refcnt == 1, ("the fdtable should not be shared")); 1747 devnull = -1; 1748 error = 0; 1749 for (i = 0; i < 3; i++) { 1750 if (fdp->fd_ofiles[i] != NULL) 1751 continue; 1752 if (devnull < 0) { 1753 error = falloc(td, &fp, &fd); 1754 if (error != 0) 1755 break; 1756 /* Note extra ref on `fp' held for us by falloc(). */ 1757 KASSERT(fd == i, ("oof, we didn't get our fd")); 1758 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, "/dev/null", 1759 td); 1760 flags = FREAD | FWRITE; 1761 error = vn_open(&nd, &flags, 0, -1); 1762 if (error != 0) { 1763 /* 1764 * Someone may have closed the entry in the 1765 * file descriptor table, so check it hasn't 1766 * changed before dropping the reference count. 1767 */ 1768 FILEDESC_LOCK(fdp); 1769 KASSERT(fdp->fd_ofiles[fd] == fp, 1770 ("table not shared, how did it change?")); 1771 fdp->fd_ofiles[fd] = NULL; 1772 fdunused(fdp, fd); 1773 FILEDESC_UNLOCK(fdp); 1774 fdrop(fp, td); 1775 fdrop(fp, td); 1776 break; 1777 } 1778 NDFREE(&nd, NDF_ONLY_PNBUF); 1779 fp->f_vnode = nd.ni_vp; 1780 fp->f_data = nd.ni_vp; 1781 fp->f_flag = flags; 1782 fp->f_ops = &vnops; 1783 fp->f_type = DTYPE_VNODE; 1784 VOP_UNLOCK(nd.ni_vp, 0, td); 1785 devnull = fd; 1786 fdrop(fp, td); 1787 } else { 1788 error = do_dup(td, DUP_FIXED, devnull, i, &retval); 1789 if (error != 0) 1790 break; 1791 } 1792 } 1793 return (error); 1794 } 1795 1796 /* 1797 * Internal form of close. 1798 * Decrement reference count on file structure. 1799 * Note: td may be NULL when closing a file 1800 * that was being passed in a message. 1801 */ 1802 int 1803 closef(fp, td) 1804 struct file *fp; 1805 struct thread *td; 1806 { 1807 struct vnode *vp; 1808 struct flock lf; 1809 struct filedesc_to_leader *fdtol; 1810 struct filedesc *fdp; 1811 1812 if (fp == NULL) 1813 return (0); 1814 /* 1815 * POSIX record locking dictates that any close releases ALL 1816 * locks owned by this process. This is handled by setting 1817 * a flag in the unlock to free ONLY locks obeying POSIX 1818 * semantics, and not to free BSD-style file locks. 1819 * If the descriptor was in a message, POSIX-style locks 1820 * aren't passed with the descriptor. 1821 */ 1822 if (td != NULL && 1823 fp->f_type == DTYPE_VNODE) { 1824 if ((td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) { 1825 lf.l_whence = SEEK_SET; 1826 lf.l_start = 0; 1827 lf.l_len = 0; 1828 lf.l_type = F_UNLCK; 1829 vp = fp->f_vnode; 1830 (void) VOP_ADVLOCK(vp, (caddr_t)td->td_proc->p_leader, 1831 F_UNLCK, &lf, F_POSIX); 1832 } 1833 fdtol = td->td_proc->p_fdtol; 1834 if (fdtol != NULL) { 1835 /* 1836 * Handle special case where file descriptor table 1837 * is shared between multiple process leaders. 1838 */ 1839 fdp = td->td_proc->p_fd; 1840 FILEDESC_LOCK(fdp); 1841 for (fdtol = fdtol->fdl_next; 1842 fdtol != td->td_proc->p_fdtol; 1843 fdtol = fdtol->fdl_next) { 1844 if ((fdtol->fdl_leader->p_flag & 1845 P_ADVLOCK) == 0) 1846 continue; 1847 fdtol->fdl_holdcount++; 1848 FILEDESC_UNLOCK(fdp); 1849 lf.l_whence = SEEK_SET; 1850 lf.l_start = 0; 1851 lf.l_len = 0; 1852 lf.l_type = F_UNLCK; 1853 vp = fp->f_vnode; 1854 (void) VOP_ADVLOCK(vp, 1855 (caddr_t)fdtol->fdl_leader, 1856 F_UNLCK, &lf, F_POSIX); 1857 FILEDESC_LOCK(fdp); 1858 fdtol->fdl_holdcount--; 1859 if (fdtol->fdl_holdcount == 0 && 1860 fdtol->fdl_wakeup != 0) { 1861 fdtol->fdl_wakeup = 0; 1862 wakeup(fdtol); 1863 } 1864 } 1865 FILEDESC_UNLOCK(fdp); 1866 } 1867 } 1868 return (fdrop(fp, td)); 1869 } 1870 1871 /* 1872 * Drop reference on struct file passed in, may call closef if the 1873 * reference hits zero. 1874 */ 1875 int 1876 fdrop(fp, td) 1877 struct file *fp; 1878 struct thread *td; 1879 { 1880 1881 FILE_LOCK(fp); 1882 return (fdrop_locked(fp, td)); 1883 } 1884 1885 /* 1886 * Extract the file pointer associated with the specified descriptor for 1887 * the current user process. 1888 * 1889 * If the descriptor doesn't exist, EBADF is returned. 1890 * 1891 * If the descriptor exists but doesn't match 'flags' then 1892 * return EBADF for read attempts and EINVAL for write attempts. 1893 * 1894 * If 'hold' is set (non-zero) the file's refcount will be bumped on return. 1895 * It should be droped with fdrop(). 1896 * If it is not set, then the refcount will not be bumped however the 1897 * thread's filedesc struct will be returned locked (for fgetsock). 1898 * 1899 * If an error occured the non-zero error is returned and *fpp is set to NULL. 1900 * Otherwise *fpp is set and zero is returned. 1901 */ 1902 static __inline int 1903 _fget(struct thread *td, int fd, struct file **fpp, int flags, int hold) 1904 { 1905 struct filedesc *fdp; 1906 struct file *fp; 1907 1908 *fpp = NULL; 1909 if (td == NULL || (fdp = td->td_proc->p_fd) == NULL) 1910 return (EBADF); 1911 FILEDESC_LOCK(fdp); 1912 if ((fp = fget_locked(fdp, fd)) == NULL || fp->f_ops == &badfileops) { 1913 FILEDESC_UNLOCK(fdp); 1914 return (EBADF); 1915 } 1916 1917 /* 1918 * Note: FREAD failures returns EBADF to maintain backwards 1919 * compatibility with what routines returned before. 1920 * 1921 * Only one flag, or 0, may be specified. 1922 */ 1923 if (flags == FREAD && (fp->f_flag & FREAD) == 0) { 1924 FILEDESC_UNLOCK(fdp); 1925 return (EBADF); 1926 } 1927 if (flags == FWRITE && (fp->f_flag & FWRITE) == 0) { 1928 FILEDESC_UNLOCK(fdp); 1929 return (EINVAL); 1930 } 1931 if (hold) { 1932 fhold(fp); 1933 FILEDESC_UNLOCK(fdp); 1934 } 1935 *fpp = fp; 1936 return (0); 1937 } 1938 1939 int 1940 fget(struct thread *td, int fd, struct file **fpp) 1941 { 1942 1943 return(_fget(td, fd, fpp, 0, 1)); 1944 } 1945 1946 int 1947 fget_read(struct thread *td, int fd, struct file **fpp) 1948 { 1949 1950 return(_fget(td, fd, fpp, FREAD, 1)); 1951 } 1952 1953 int 1954 fget_write(struct thread *td, int fd, struct file **fpp) 1955 { 1956 1957 return(_fget(td, fd, fpp, FWRITE, 1)); 1958 } 1959 1960 /* 1961 * Like fget() but loads the underlying vnode, or returns an error if 1962 * the descriptor does not represent a vnode. Note that pipes use vnodes 1963 * but never have VM objects (so VOP_GETVOBJECT() calls will return an 1964 * error). The returned vnode will be vref()d. 1965 */ 1966 static __inline int 1967 _fgetvp(struct thread *td, int fd, struct vnode **vpp, int flags) 1968 { 1969 struct file *fp; 1970 int error; 1971 1972 *vpp = NULL; 1973 if ((error = _fget(td, fd, &fp, 0, 0)) != 0) 1974 return (error); 1975 if (fp->f_vnode == NULL) { 1976 error = EINVAL; 1977 } else { 1978 *vpp = fp->f_vnode; 1979 vref(*vpp); 1980 } 1981 FILEDESC_UNLOCK(td->td_proc->p_fd); 1982 return (error); 1983 } 1984 1985 int 1986 fgetvp(struct thread *td, int fd, struct vnode **vpp) 1987 { 1988 1989 return (_fgetvp(td, fd, vpp, 0)); 1990 } 1991 1992 int 1993 fgetvp_read(struct thread *td, int fd, struct vnode **vpp) 1994 { 1995 1996 return (_fgetvp(td, fd, vpp, FREAD)); 1997 } 1998 1999 int 2000 fgetvp_write(struct thread *td, int fd, struct vnode **vpp) 2001 { 2002 2003 return (_fgetvp(td, fd, vpp, FWRITE)); 2004 } 2005 2006 /* 2007 * Like fget() but loads the underlying socket, or returns an error if 2008 * the descriptor does not represent a socket. 2009 * 2010 * We bump the ref count on the returned socket. XXX Also obtain the SX 2011 * lock in the future. 2012 */ 2013 int 2014 fgetsock(struct thread *td, int fd, struct socket **spp, u_int *fflagp) 2015 { 2016 struct file *fp; 2017 int error; 2018 2019 *spp = NULL; 2020 if (fflagp != NULL) 2021 *fflagp = 0; 2022 if ((error = _fget(td, fd, &fp, 0, 0)) != 0) 2023 return (error); 2024 if (fp->f_type != DTYPE_SOCKET) { 2025 error = ENOTSOCK; 2026 } else { 2027 *spp = fp->f_data; 2028 if (fflagp) 2029 *fflagp = fp->f_flag; 2030 SOCK_LOCK(*spp); 2031 soref(*spp); 2032 SOCK_UNLOCK(*spp); 2033 } 2034 FILEDESC_UNLOCK(td->td_proc->p_fd); 2035 return (error); 2036 } 2037 2038 /* 2039 * Drop the reference count on the the socket and XXX release the SX lock in 2040 * the future. The last reference closes the socket. 2041 */ 2042 void 2043 fputsock(struct socket *so) 2044 { 2045 2046 NET_ASSERT_GIANT(); 2047 SOCK_LOCK(so); 2048 sorele(so); 2049 } 2050 2051 /* 2052 * Drop reference on struct file passed in, may call closef if the 2053 * reference hits zero. 2054 * Expects struct file locked, and will unlock it. 2055 */ 2056 int 2057 fdrop_locked(fp, td) 2058 struct file *fp; 2059 struct thread *td; 2060 { 2061 int error; 2062 2063 FILE_LOCK_ASSERT(fp, MA_OWNED); 2064 2065 if (--fp->f_count > 0) { 2066 FILE_UNLOCK(fp); 2067 return (0); 2068 } 2069 /* We have the last ref so we can proceed without the file lock. */ 2070 FILE_UNLOCK(fp); 2071 if (fp->f_count < 0) 2072 panic("fdrop: count < 0"); 2073 mtx_lock(&Giant); 2074 if (fp->f_ops != &badfileops) 2075 error = fo_close(fp, td); 2076 else 2077 error = 0; 2078 ffree(fp); 2079 mtx_unlock(&Giant); 2080 return (error); 2081 } 2082 2083 /* 2084 * Apply an advisory lock on a file descriptor. 2085 * 2086 * Just attempt to get a record lock of the requested type on 2087 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0). 2088 */ 2089 #ifndef _SYS_SYSPROTO_H_ 2090 struct flock_args { 2091 int fd; 2092 int how; 2093 }; 2094 #endif 2095 /* 2096 * MPSAFE 2097 */ 2098 /* ARGSUSED */ 2099 int 2100 flock(td, uap) 2101 struct thread *td; 2102 struct flock_args *uap; 2103 { 2104 struct file *fp; 2105 struct vnode *vp; 2106 struct flock lf; 2107 int error; 2108 2109 if ((error = fget(td, uap->fd, &fp)) != 0) 2110 return (error); 2111 if (fp->f_type != DTYPE_VNODE) { 2112 fdrop(fp, td); 2113 return (EOPNOTSUPP); 2114 } 2115 2116 mtx_lock(&Giant); 2117 vp = fp->f_vnode; 2118 lf.l_whence = SEEK_SET; 2119 lf.l_start = 0; 2120 lf.l_len = 0; 2121 if (uap->how & LOCK_UN) { 2122 lf.l_type = F_UNLCK; 2123 FILE_LOCK(fp); 2124 fp->f_flag &= ~FHASLOCK; 2125 FILE_UNLOCK(fp); 2126 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK); 2127 goto done2; 2128 } 2129 if (uap->how & LOCK_EX) 2130 lf.l_type = F_WRLCK; 2131 else if (uap->how & LOCK_SH) 2132 lf.l_type = F_RDLCK; 2133 else { 2134 error = EBADF; 2135 goto done2; 2136 } 2137 FILE_LOCK(fp); 2138 fp->f_flag |= FHASLOCK; 2139 FILE_UNLOCK(fp); 2140 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, 2141 (uap->how & LOCK_NB) ? F_FLOCK : F_FLOCK | F_WAIT); 2142 done2: 2143 fdrop(fp, td); 2144 mtx_unlock(&Giant); 2145 return (error); 2146 } 2147 2148 /* 2149 * File Descriptor pseudo-device driver (/dev/fd/). 2150 * 2151 * Opening minor device N dup()s the file (if any) connected to file 2152 * descriptor N belonging to the calling process. Note that this driver 2153 * consists of only the ``open()'' routine, because all subsequent 2154 * references to this file will be direct to the other driver. 2155 */ 2156 /* ARGSUSED */ 2157 static int 2158 fdopen(dev, mode, type, td) 2159 struct cdev *dev; 2160 int mode, type; 2161 struct thread *td; 2162 { 2163 2164 /* 2165 * XXX Kludge: set curthread->td_dupfd to contain the value of the 2166 * the file descriptor being sought for duplication. The error 2167 * return ensures that the vnode for this device will be released 2168 * by vn_open. Open will detect this special error and take the 2169 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN 2170 * will simply report the error. 2171 */ 2172 td->td_dupfd = dev2unit(dev); 2173 return (ENODEV); 2174 } 2175 2176 /* 2177 * Duplicate the specified descriptor to a free descriptor. 2178 */ 2179 int 2180 dupfdopen(td, fdp, indx, dfd, mode, error) 2181 struct thread *td; 2182 struct filedesc *fdp; 2183 int indx, dfd; 2184 int mode; 2185 int error; 2186 { 2187 struct file *wfp; 2188 struct file *fp; 2189 2190 /* 2191 * If the to-be-dup'd fd number is greater than the allowed number 2192 * of file descriptors, or the fd to be dup'd has already been 2193 * closed, then reject. 2194 */ 2195 FILEDESC_LOCK(fdp); 2196 if (dfd < 0 || dfd >= fdp->fd_nfiles || 2197 (wfp = fdp->fd_ofiles[dfd]) == NULL) { 2198 FILEDESC_UNLOCK(fdp); 2199 return (EBADF); 2200 } 2201 2202 /* 2203 * There are two cases of interest here. 2204 * 2205 * For ENODEV simply dup (dfd) to file descriptor 2206 * (indx) and return. 2207 * 2208 * For ENXIO steal away the file structure from (dfd) and 2209 * store it in (indx). (dfd) is effectively closed by 2210 * this operation. 2211 * 2212 * Any other error code is just returned. 2213 */ 2214 switch (error) { 2215 case ENODEV: 2216 /* 2217 * Check that the mode the file is being opened for is a 2218 * subset of the mode of the existing descriptor. 2219 */ 2220 FILE_LOCK(wfp); 2221 if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) { 2222 FILE_UNLOCK(wfp); 2223 FILEDESC_UNLOCK(fdp); 2224 return (EACCES); 2225 } 2226 fp = fdp->fd_ofiles[indx]; 2227 fdp->fd_ofiles[indx] = wfp; 2228 fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd]; 2229 if (fp == NULL) 2230 fdused(fdp, indx); 2231 fhold_locked(wfp); 2232 FILE_UNLOCK(wfp); 2233 if (fp != NULL) 2234 FILE_LOCK(fp); 2235 FILEDESC_UNLOCK(fdp); 2236 /* 2237 * We now own the reference to fp that the ofiles[] array 2238 * used to own. Release it. 2239 */ 2240 if (fp != NULL) 2241 fdrop_locked(fp, td); 2242 return (0); 2243 2244 case ENXIO: 2245 /* 2246 * Steal away the file pointer from dfd and stuff it into indx. 2247 */ 2248 fp = fdp->fd_ofiles[indx]; 2249 fdp->fd_ofiles[indx] = fdp->fd_ofiles[dfd]; 2250 fdp->fd_ofiles[dfd] = NULL; 2251 fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd]; 2252 fdp->fd_ofileflags[dfd] = 0; 2253 fdunused(fdp, dfd); 2254 if (fp == NULL) 2255 fdused(fdp, indx); 2256 if (fp != NULL) 2257 FILE_LOCK(fp); 2258 FILEDESC_UNLOCK(fdp); 2259 2260 /* 2261 * we now own the reference to fp that the ofiles[] array 2262 * used to own. Release it. 2263 */ 2264 if (fp != NULL) 2265 fdrop_locked(fp, td); 2266 return (0); 2267 2268 default: 2269 FILEDESC_UNLOCK(fdp); 2270 return (error); 2271 } 2272 /* NOTREACHED */ 2273 } 2274 2275 struct filedesc_to_leader * 2276 filedesc_to_leader_alloc(struct filedesc_to_leader *old, 2277 struct filedesc *fdp, 2278 struct proc *leader) 2279 { 2280 struct filedesc_to_leader *fdtol; 2281 2282 MALLOC(fdtol, struct filedesc_to_leader *, 2283 sizeof(struct filedesc_to_leader), 2284 M_FILEDESC_TO_LEADER, 2285 M_WAITOK); 2286 fdtol->fdl_refcount = 1; 2287 fdtol->fdl_holdcount = 0; 2288 fdtol->fdl_wakeup = 0; 2289 fdtol->fdl_leader = leader; 2290 if (old != NULL) { 2291 FILEDESC_LOCK(fdp); 2292 fdtol->fdl_next = old->fdl_next; 2293 fdtol->fdl_prev = old; 2294 old->fdl_next = fdtol; 2295 fdtol->fdl_next->fdl_prev = fdtol; 2296 FILEDESC_UNLOCK(fdp); 2297 } else { 2298 fdtol->fdl_next = fdtol; 2299 fdtol->fdl_prev = fdtol; 2300 } 2301 return (fdtol); 2302 } 2303 2304 /* 2305 * Get file structures. 2306 */ 2307 static int 2308 sysctl_kern_file(SYSCTL_HANDLER_ARGS) 2309 { 2310 struct xfile xf; 2311 struct filedesc *fdp; 2312 struct file *fp; 2313 struct proc *p; 2314 int error, n; 2315 2316 /* 2317 * Note: because the number of file descriptors is calculated 2318 * in different ways for sizing vs returning the data, 2319 * there is information leakage from the first loop. However, 2320 * it is of a similar order of magnitude to the leakage from 2321 * global system statistics such as kern.openfiles. 2322 */ 2323 error = sysctl_wire_old_buffer(req, 0); 2324 if (error != 0) 2325 return (error); 2326 if (req->oldptr == NULL) { 2327 n = 16; /* A slight overestimate. */ 2328 sx_slock(&filelist_lock); 2329 LIST_FOREACH(fp, &filehead, f_list) { 2330 /* 2331 * We should grab the lock, but this is an 2332 * estimate, so does it really matter? 2333 */ 2334 /* mtx_lock(fp->f_mtxp); */ 2335 n += fp->f_count; 2336 /* mtx_unlock(f->f_mtxp); */ 2337 } 2338 sx_sunlock(&filelist_lock); 2339 return (SYSCTL_OUT(req, 0, n * sizeof(xf))); 2340 } 2341 error = 0; 2342 bzero(&xf, sizeof(xf)); 2343 xf.xf_size = sizeof(xf); 2344 sx_slock(&allproc_lock); 2345 LIST_FOREACH(p, &allproc, p_list) { 2346 PROC_LOCK(p); 2347 if (p_cansee(req->td, p) != 0) { 2348 PROC_UNLOCK(p); 2349 continue; 2350 } 2351 xf.xf_pid = p->p_pid; 2352 xf.xf_uid = p->p_ucred->cr_uid; 2353 PROC_UNLOCK(p); 2354 mtx_lock(&fdesc_mtx); 2355 if ((fdp = p->p_fd) == NULL) { 2356 mtx_unlock(&fdesc_mtx); 2357 continue; 2358 } 2359 FILEDESC_LOCK(fdp); 2360 for (n = 0; n < fdp->fd_nfiles; ++n) { 2361 if ((fp = fdp->fd_ofiles[n]) == NULL) 2362 continue; 2363 xf.xf_fd = n; 2364 xf.xf_file = fp; 2365 xf.xf_data = fp->f_data; 2366 xf.xf_vnode = fp->f_vnode; 2367 xf.xf_type = fp->f_type; 2368 xf.xf_count = fp->f_count; 2369 xf.xf_msgcount = fp->f_msgcount; 2370 xf.xf_offset = fp->f_offset; 2371 xf.xf_flag = fp->f_flag; 2372 error = SYSCTL_OUT(req, &xf, sizeof(xf)); 2373 if (error) 2374 break; 2375 } 2376 FILEDESC_UNLOCK(fdp); 2377 mtx_unlock(&fdesc_mtx); 2378 if (error) 2379 break; 2380 } 2381 sx_sunlock(&allproc_lock); 2382 return (error); 2383 } 2384 2385 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD, 2386 0, 0, sysctl_kern_file, "S,xfile", "Entire file table"); 2387 2388 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW, 2389 &maxfilesperproc, 0, "Maximum files allowed open per process"); 2390 2391 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW, 2392 &maxfiles, 0, "Maximum number of files"); 2393 2394 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD, 2395 &nfiles, 0, "System-wide number of open files"); 2396 2397 static void 2398 fildesc_drvinit(void *unused) 2399 { 2400 struct cdev *dev; 2401 2402 dev = make_dev(&fildesc_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "fd/0"); 2403 make_dev_alias(dev, "stdin"); 2404 dev = make_dev(&fildesc_cdevsw, 1, UID_ROOT, GID_WHEEL, 0666, "fd/1"); 2405 make_dev_alias(dev, "stdout"); 2406 dev = make_dev(&fildesc_cdevsw, 2, UID_ROOT, GID_WHEEL, 0666, "fd/2"); 2407 make_dev_alias(dev, "stderr"); 2408 } 2409 2410 static fo_rdwr_t badfo_readwrite; 2411 static fo_ioctl_t badfo_ioctl; 2412 static fo_poll_t badfo_poll; 2413 static fo_kqfilter_t badfo_kqfilter; 2414 static fo_stat_t badfo_stat; 2415 static fo_close_t badfo_close; 2416 2417 struct fileops badfileops = { 2418 .fo_read = badfo_readwrite, 2419 .fo_write = badfo_readwrite, 2420 .fo_ioctl = badfo_ioctl, 2421 .fo_poll = badfo_poll, 2422 .fo_kqfilter = badfo_kqfilter, 2423 .fo_stat = badfo_stat, 2424 .fo_close = badfo_close, 2425 }; 2426 2427 static int 2428 badfo_readwrite(fp, uio, active_cred, flags, td) 2429 struct file *fp; 2430 struct uio *uio; 2431 struct ucred *active_cred; 2432 struct thread *td; 2433 int flags; 2434 { 2435 2436 return (EBADF); 2437 } 2438 2439 static int 2440 badfo_ioctl(fp, com, data, active_cred, td) 2441 struct file *fp; 2442 u_long com; 2443 void *data; 2444 struct ucred *active_cred; 2445 struct thread *td; 2446 { 2447 2448 return (EBADF); 2449 } 2450 2451 static int 2452 badfo_poll(fp, events, active_cred, td) 2453 struct file *fp; 2454 int events; 2455 struct ucred *active_cred; 2456 struct thread *td; 2457 { 2458 2459 return (0); 2460 } 2461 2462 static int 2463 badfo_kqfilter(fp, kn) 2464 struct file *fp; 2465 struct knote *kn; 2466 { 2467 2468 return (0); 2469 } 2470 2471 static int 2472 badfo_stat(fp, sb, active_cred, td) 2473 struct file *fp; 2474 struct stat *sb; 2475 struct ucred *active_cred; 2476 struct thread *td; 2477 { 2478 2479 return (EBADF); 2480 } 2481 2482 static int 2483 badfo_close(fp, td) 2484 struct file *fp; 2485 struct thread *td; 2486 { 2487 2488 return (EBADF); 2489 } 2490 2491 SYSINIT(fildescdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR, 2492 fildesc_drvinit,NULL) 2493 2494 static void filelistinit(void *); 2495 SYSINIT(select, SI_SUB_LOCK, SI_ORDER_FIRST, filelistinit, NULL) 2496 2497 /* ARGSUSED*/ 2498 static void 2499 filelistinit(dummy) 2500 void *dummy; 2501 { 2502 2503 file_zone = uma_zcreate("Files", sizeof(struct file), NULL, NULL, 2504 NULL, NULL, UMA_ALIGN_PTR, 0); 2505 sx_init(&filelist_lock, "filelist lock"); 2506 mtx_init(&sigio_lock, "sigio lock", NULL, MTX_DEF); 2507 } 2508