1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_compat.h" 41 #include "opt_ddb.h" 42 #include "opt_ktrace.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 47 #include <sys/conf.h> 48 #include <sys/domain.h> 49 #include <sys/fcntl.h> 50 #include <sys/file.h> 51 #include <sys/filedesc.h> 52 #include <sys/filio.h> 53 #include <sys/jail.h> 54 #include <sys/kernel.h> 55 #include <sys/limits.h> 56 #include <sys/lock.h> 57 #include <sys/malloc.h> 58 #include <sys/mount.h> 59 #include <sys/mqueue.h> 60 #include <sys/mutex.h> 61 #include <sys/namei.h> 62 #include <sys/priv.h> 63 #include <sys/proc.h> 64 #include <sys/protosw.h> 65 #include <sys/resourcevar.h> 66 #include <sys/signalvar.h> 67 #include <sys/socketvar.h> 68 #include <sys/stat.h> 69 #include <sys/sx.h> 70 #include <sys/syscallsubr.h> 71 #include <sys/sysctl.h> 72 #include <sys/sysproto.h> 73 #include <sys/tty.h> 74 #include <sys/unistd.h> 75 #include <sys/user.h> 76 #include <sys/vnode.h> 77 #ifdef KTRACE 78 #include <sys/ktrace.h> 79 #endif 80 81 #include <security/audit/audit.h> 82 83 #include <vm/uma.h> 84 85 #include <ddb/ddb.h> 86 87 static MALLOC_DEFINE(M_FILEDESC, "filedesc", "Open file descriptor table"); 88 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "filedesc_to_leader", 89 "file desc to leader structures"); 90 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures"); 91 92 static uma_zone_t file_zone; 93 94 95 /* Flags for do_dup() */ 96 #define DUP_FIXED 0x1 /* Force fixed allocation */ 97 #define DUP_FCNTL 0x2 /* fcntl()-style errors */ 98 99 static int do_dup(struct thread *td, int flags, int old, int new, 100 register_t *retval); 101 static int fd_first_free(struct filedesc *, int, int); 102 static int fd_last_used(struct filedesc *, int, int); 103 static void fdgrowtable(struct filedesc *, int); 104 static void fdunused(struct filedesc *fdp, int fd); 105 static void fdused(struct filedesc *fdp, int fd); 106 107 /* 108 * A process is initially started out with NDFILE descriptors stored within 109 * this structure, selected to be enough for typical applications based on 110 * the historical limit of 20 open files (and the usage of descriptors by 111 * shells). If these descriptors are exhausted, a larger descriptor table 112 * may be allocated, up to a process' resource limit; the internal arrays 113 * are then unused. 114 */ 115 #define NDFILE 20 116 #define NDSLOTSIZE sizeof(NDSLOTTYPE) 117 #define NDENTRIES (NDSLOTSIZE * __CHAR_BIT) 118 #define NDSLOT(x) ((x) / NDENTRIES) 119 #define NDBIT(x) ((NDSLOTTYPE)1 << ((x) % NDENTRIES)) 120 #define NDSLOTS(x) (((x) + NDENTRIES - 1) / NDENTRIES) 121 122 /* 123 * Storage required per open file descriptor. 124 */ 125 #define OFILESIZE (sizeof(struct file *) + sizeof(char)) 126 127 /* 128 * Basic allocation of descriptors: 129 * one of the above, plus arrays for NDFILE descriptors. 130 */ 131 struct filedesc0 { 132 struct filedesc fd_fd; 133 /* 134 * These arrays are used when the number of open files is 135 * <= NDFILE, and are then pointed to by the pointers above. 136 */ 137 struct file *fd_dfiles[NDFILE]; 138 char fd_dfileflags[NDFILE]; 139 NDSLOTTYPE fd_dmap[NDSLOTS(NDFILE)]; 140 }; 141 142 /* 143 * Descriptor management. 144 */ 145 volatile int openfiles; /* actual number of open files */ 146 struct mtx sigio_lock; /* mtx to protect pointers to sigio */ 147 void (*mq_fdclose)(struct thread *td, int fd, struct file *fp); 148 149 /* A mutex to protect the association between a proc and filedesc. */ 150 static struct mtx fdesc_mtx; 151 152 /* 153 * Find the first zero bit in the given bitmap, starting at low and not 154 * exceeding size - 1. 155 */ 156 static int 157 fd_first_free(struct filedesc *fdp, int low, int size) 158 { 159 NDSLOTTYPE *map = fdp->fd_map; 160 NDSLOTTYPE mask; 161 int off, maxoff; 162 163 if (low >= size) 164 return (low); 165 166 off = NDSLOT(low); 167 if (low % NDENTRIES) { 168 mask = ~(~(NDSLOTTYPE)0 >> (NDENTRIES - (low % NDENTRIES))); 169 if ((mask &= ~map[off]) != 0UL) 170 return (off * NDENTRIES + ffsl(mask) - 1); 171 ++off; 172 } 173 for (maxoff = NDSLOTS(size); off < maxoff; ++off) 174 if (map[off] != ~0UL) 175 return (off * NDENTRIES + ffsl(~map[off]) - 1); 176 return (size); 177 } 178 179 /* 180 * Find the highest non-zero bit in the given bitmap, starting at low and 181 * not exceeding size - 1. 182 */ 183 static int 184 fd_last_used(struct filedesc *fdp, int low, int size) 185 { 186 NDSLOTTYPE *map = fdp->fd_map; 187 NDSLOTTYPE mask; 188 int off, minoff; 189 190 if (low >= size) 191 return (-1); 192 193 off = NDSLOT(size); 194 if (size % NDENTRIES) { 195 mask = ~(~(NDSLOTTYPE)0 << (size % NDENTRIES)); 196 if ((mask &= map[off]) != 0) 197 return (off * NDENTRIES + flsl(mask) - 1); 198 --off; 199 } 200 for (minoff = NDSLOT(low); off >= minoff; --off) 201 if (map[off] != 0) 202 return (off * NDENTRIES + flsl(map[off]) - 1); 203 return (low - 1); 204 } 205 206 static int 207 fdisused(struct filedesc *fdp, int fd) 208 { 209 KASSERT(fd >= 0 && fd < fdp->fd_nfiles, 210 ("file descriptor %d out of range (0, %d)", fd, fdp->fd_nfiles)); 211 return ((fdp->fd_map[NDSLOT(fd)] & NDBIT(fd)) != 0); 212 } 213 214 /* 215 * Mark a file descriptor as used. 216 */ 217 static void 218 fdused(struct filedesc *fdp, int fd) 219 { 220 221 FILEDESC_XLOCK_ASSERT(fdp); 222 KASSERT(!fdisused(fdp, fd), 223 ("fd already used")); 224 225 fdp->fd_map[NDSLOT(fd)] |= NDBIT(fd); 226 if (fd > fdp->fd_lastfile) 227 fdp->fd_lastfile = fd; 228 if (fd == fdp->fd_freefile) 229 fdp->fd_freefile = fd_first_free(fdp, fd, fdp->fd_nfiles); 230 } 231 232 /* 233 * Mark a file descriptor as unused. 234 */ 235 static void 236 fdunused(struct filedesc *fdp, int fd) 237 { 238 239 FILEDESC_XLOCK_ASSERT(fdp); 240 KASSERT(fdisused(fdp, fd), 241 ("fd is already unused")); 242 KASSERT(fdp->fd_ofiles[fd] == NULL, 243 ("fd is still in use")); 244 245 fdp->fd_map[NDSLOT(fd)] &= ~NDBIT(fd); 246 if (fd < fdp->fd_freefile) 247 fdp->fd_freefile = fd; 248 if (fd == fdp->fd_lastfile) 249 fdp->fd_lastfile = fd_last_used(fdp, 0, fd); 250 } 251 252 /* 253 * System calls on descriptors. 254 */ 255 #ifndef _SYS_SYSPROTO_H_ 256 struct getdtablesize_args { 257 int dummy; 258 }; 259 #endif 260 /* ARGSUSED */ 261 int 262 getdtablesize(struct thread *td, struct getdtablesize_args *uap) 263 { 264 struct proc *p = td->td_proc; 265 266 PROC_LOCK(p); 267 td->td_retval[0] = 268 min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc); 269 PROC_UNLOCK(p); 270 return (0); 271 } 272 273 /* 274 * Duplicate a file descriptor to a particular value. 275 * 276 * Note: keep in mind that a potential race condition exists when closing 277 * descriptors from a shared descriptor table (via rfork). 278 */ 279 #ifndef _SYS_SYSPROTO_H_ 280 struct dup2_args { 281 u_int from; 282 u_int to; 283 }; 284 #endif 285 /* ARGSUSED */ 286 int 287 dup2(struct thread *td, struct dup2_args *uap) 288 { 289 290 return (do_dup(td, DUP_FIXED, (int)uap->from, (int)uap->to, 291 td->td_retval)); 292 } 293 294 /* 295 * Duplicate a file descriptor. 296 */ 297 #ifndef _SYS_SYSPROTO_H_ 298 struct dup_args { 299 u_int fd; 300 }; 301 #endif 302 /* ARGSUSED */ 303 int 304 dup(struct thread *td, struct dup_args *uap) 305 { 306 307 return (do_dup(td, 0, (int)uap->fd, 0, td->td_retval)); 308 } 309 310 /* 311 * The file control system call. 312 */ 313 #ifndef _SYS_SYSPROTO_H_ 314 struct fcntl_args { 315 int fd; 316 int cmd; 317 long arg; 318 }; 319 #endif 320 /* ARGSUSED */ 321 int 322 fcntl(struct thread *td, struct fcntl_args *uap) 323 { 324 struct flock fl; 325 struct oflock ofl; 326 intptr_t arg; 327 int error; 328 int cmd; 329 330 error = 0; 331 cmd = uap->cmd; 332 switch (uap->cmd) { 333 case F_OGETLK: 334 case F_OSETLK: 335 case F_OSETLKW: 336 /* 337 * Convert old flock structure to new. 338 */ 339 error = copyin((void *)(intptr_t)uap->arg, &ofl, sizeof(ofl)); 340 fl.l_start = ofl.l_start; 341 fl.l_len = ofl.l_len; 342 fl.l_pid = ofl.l_pid; 343 fl.l_type = ofl.l_type; 344 fl.l_whence = ofl.l_whence; 345 fl.l_sysid = 0; 346 347 switch (uap->cmd) { 348 case F_OGETLK: 349 cmd = F_GETLK; 350 break; 351 case F_OSETLK: 352 cmd = F_SETLK; 353 break; 354 case F_OSETLKW: 355 cmd = F_SETLKW; 356 break; 357 } 358 arg = (intptr_t)&fl; 359 break; 360 case F_GETLK: 361 case F_SETLK: 362 case F_SETLKW: 363 case F_SETLK_REMOTE: 364 error = copyin((void *)(intptr_t)uap->arg, &fl, sizeof(fl)); 365 arg = (intptr_t)&fl; 366 break; 367 default: 368 arg = uap->arg; 369 break; 370 } 371 if (error) 372 return (error); 373 error = kern_fcntl(td, uap->fd, cmd, arg); 374 if (error) 375 return (error); 376 if (uap->cmd == F_OGETLK) { 377 ofl.l_start = fl.l_start; 378 ofl.l_len = fl.l_len; 379 ofl.l_pid = fl.l_pid; 380 ofl.l_type = fl.l_type; 381 ofl.l_whence = fl.l_whence; 382 error = copyout(&ofl, (void *)(intptr_t)uap->arg, sizeof(ofl)); 383 } else if (uap->cmd == F_GETLK) { 384 error = copyout(&fl, (void *)(intptr_t)uap->arg, sizeof(fl)); 385 } 386 return (error); 387 } 388 389 static inline struct file * 390 fdtofp(int fd, struct filedesc *fdp) 391 { 392 struct file *fp; 393 394 FILEDESC_LOCK_ASSERT(fdp); 395 if ((unsigned)fd >= fdp->fd_nfiles || 396 (fp = fdp->fd_ofiles[fd]) == NULL) 397 return (NULL); 398 return (fp); 399 } 400 401 int 402 kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg) 403 { 404 struct filedesc *fdp; 405 struct flock *flp; 406 struct file *fp; 407 struct proc *p; 408 char *pop; 409 struct vnode *vp; 410 int error, flg, tmp; 411 int vfslocked; 412 413 vfslocked = 0; 414 error = 0; 415 flg = F_POSIX; 416 p = td->td_proc; 417 fdp = p->p_fd; 418 419 switch (cmd) { 420 case F_DUPFD: 421 tmp = arg; 422 error = do_dup(td, DUP_FCNTL, fd, tmp, td->td_retval); 423 break; 424 425 case F_DUP2FD: 426 tmp = arg; 427 error = do_dup(td, DUP_FIXED, fd, tmp, td->td_retval); 428 break; 429 430 case F_GETFD: 431 FILEDESC_SLOCK(fdp); 432 if ((fp = fdtofp(fd, fdp)) == NULL) { 433 FILEDESC_SUNLOCK(fdp); 434 error = EBADF; 435 break; 436 } 437 pop = &fdp->fd_ofileflags[fd]; 438 td->td_retval[0] = (*pop & UF_EXCLOSE) ? FD_CLOEXEC : 0; 439 FILEDESC_SUNLOCK(fdp); 440 break; 441 442 case F_SETFD: 443 FILEDESC_XLOCK(fdp); 444 if ((fp = fdtofp(fd, fdp)) == NULL) { 445 FILEDESC_XUNLOCK(fdp); 446 error = EBADF; 447 break; 448 } 449 pop = &fdp->fd_ofileflags[fd]; 450 *pop = (*pop &~ UF_EXCLOSE) | 451 (arg & FD_CLOEXEC ? UF_EXCLOSE : 0); 452 FILEDESC_XUNLOCK(fdp); 453 break; 454 455 case F_GETFL: 456 FILEDESC_SLOCK(fdp); 457 if ((fp = fdtofp(fd, fdp)) == NULL) { 458 FILEDESC_SUNLOCK(fdp); 459 error = EBADF; 460 break; 461 } 462 td->td_retval[0] = OFLAGS(fp->f_flag); 463 FILEDESC_SUNLOCK(fdp); 464 break; 465 466 case F_SETFL: 467 FILEDESC_SLOCK(fdp); 468 if ((fp = fdtofp(fd, fdp)) == NULL) { 469 FILEDESC_SUNLOCK(fdp); 470 error = EBADF; 471 break; 472 } 473 fhold(fp); 474 FILEDESC_SUNLOCK(fdp); 475 do { 476 tmp = flg = fp->f_flag; 477 tmp &= ~FCNTLFLAGS; 478 tmp |= FFLAGS(arg & ~O_ACCMODE) & FCNTLFLAGS; 479 } while(atomic_cmpset_int(&fp->f_flag, flg, tmp) == 0); 480 tmp = fp->f_flag & FNONBLOCK; 481 error = fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td); 482 if (error) { 483 fdrop(fp, td); 484 break; 485 } 486 tmp = fp->f_flag & FASYNC; 487 error = fo_ioctl(fp, FIOASYNC, &tmp, td->td_ucred, td); 488 if (error == 0) { 489 fdrop(fp, td); 490 break; 491 } 492 atomic_clear_int(&fp->f_flag, FNONBLOCK); 493 tmp = 0; 494 (void)fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td); 495 fdrop(fp, td); 496 break; 497 498 case F_GETOWN: 499 FILEDESC_SLOCK(fdp); 500 if ((fp = fdtofp(fd, fdp)) == NULL) { 501 FILEDESC_SUNLOCK(fdp); 502 error = EBADF; 503 break; 504 } 505 fhold(fp); 506 FILEDESC_SUNLOCK(fdp); 507 error = fo_ioctl(fp, FIOGETOWN, &tmp, td->td_ucred, td); 508 if (error == 0) 509 td->td_retval[0] = tmp; 510 fdrop(fp, td); 511 break; 512 513 case F_SETOWN: 514 FILEDESC_SLOCK(fdp); 515 if ((fp = fdtofp(fd, fdp)) == NULL) { 516 FILEDESC_SUNLOCK(fdp); 517 error = EBADF; 518 break; 519 } 520 fhold(fp); 521 FILEDESC_SUNLOCK(fdp); 522 tmp = arg; 523 error = fo_ioctl(fp, FIOSETOWN, &tmp, td->td_ucred, td); 524 fdrop(fp, td); 525 break; 526 527 case F_SETLK_REMOTE: 528 error = priv_check(td, PRIV_NFS_LOCKD); 529 if (error) 530 return (error); 531 flg = F_REMOTE; 532 goto do_setlk; 533 534 case F_SETLKW: 535 flg |= F_WAIT; 536 /* FALLTHROUGH F_SETLK */ 537 538 case F_SETLK: 539 do_setlk: 540 FILEDESC_SLOCK(fdp); 541 if ((fp = fdtofp(fd, fdp)) == NULL) { 542 FILEDESC_SUNLOCK(fdp); 543 error = EBADF; 544 break; 545 } 546 if (fp->f_type != DTYPE_VNODE) { 547 FILEDESC_SUNLOCK(fdp); 548 error = EBADF; 549 break; 550 } 551 flp = (struct flock *)arg; 552 if (flp->l_whence == SEEK_CUR) { 553 if (fp->f_offset < 0 || 554 (flp->l_start > 0 && 555 fp->f_offset > OFF_MAX - flp->l_start)) { 556 FILEDESC_SUNLOCK(fdp); 557 error = EOVERFLOW; 558 break; 559 } 560 flp->l_start += fp->f_offset; 561 } 562 563 /* 564 * VOP_ADVLOCK() may block. 565 */ 566 fhold(fp); 567 FILEDESC_SUNLOCK(fdp); 568 vp = fp->f_vnode; 569 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 570 switch (flp->l_type) { 571 case F_RDLCK: 572 if ((fp->f_flag & FREAD) == 0) { 573 error = EBADF; 574 break; 575 } 576 PROC_LOCK(p->p_leader); 577 p->p_leader->p_flag |= P_ADVLOCK; 578 PROC_UNLOCK(p->p_leader); 579 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 580 flp, flg); 581 break; 582 case F_WRLCK: 583 if ((fp->f_flag & FWRITE) == 0) { 584 error = EBADF; 585 break; 586 } 587 PROC_LOCK(p->p_leader); 588 p->p_leader->p_flag |= P_ADVLOCK; 589 PROC_UNLOCK(p->p_leader); 590 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 591 flp, flg); 592 break; 593 case F_UNLCK: 594 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK, 595 flp, flg); 596 break; 597 case F_UNLCKSYS: 598 /* 599 * Temporary api for testing remote lock 600 * infrastructure. 601 */ 602 if (flg != F_REMOTE) { 603 error = EINVAL; 604 break; 605 } 606 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, 607 F_UNLCKSYS, flp, flg); 608 break; 609 default: 610 error = EINVAL; 611 break; 612 } 613 VFS_UNLOCK_GIANT(vfslocked); 614 vfslocked = 0; 615 /* Check for race with close */ 616 FILEDESC_SLOCK(fdp); 617 if ((unsigned) fd >= fdp->fd_nfiles || 618 fp != fdp->fd_ofiles[fd]) { 619 FILEDESC_SUNLOCK(fdp); 620 flp->l_whence = SEEK_SET; 621 flp->l_start = 0; 622 flp->l_len = 0; 623 flp->l_type = F_UNLCK; 624 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 625 (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader, 626 F_UNLCK, flp, F_POSIX); 627 VFS_UNLOCK_GIANT(vfslocked); 628 vfslocked = 0; 629 } else 630 FILEDESC_SUNLOCK(fdp); 631 fdrop(fp, td); 632 break; 633 634 case F_GETLK: 635 FILEDESC_SLOCK(fdp); 636 if ((fp = fdtofp(fd, fdp)) == NULL) { 637 FILEDESC_SUNLOCK(fdp); 638 error = EBADF; 639 break; 640 } 641 if (fp->f_type != DTYPE_VNODE) { 642 FILEDESC_SUNLOCK(fdp); 643 error = EBADF; 644 break; 645 } 646 flp = (struct flock *)arg; 647 if (flp->l_type != F_RDLCK && flp->l_type != F_WRLCK && 648 flp->l_type != F_UNLCK) { 649 FILEDESC_SUNLOCK(fdp); 650 error = EINVAL; 651 break; 652 } 653 if (flp->l_whence == SEEK_CUR) { 654 if ((flp->l_start > 0 && 655 fp->f_offset > OFF_MAX - flp->l_start) || 656 (flp->l_start < 0 && 657 fp->f_offset < OFF_MIN - flp->l_start)) { 658 FILEDESC_SUNLOCK(fdp); 659 error = EOVERFLOW; 660 break; 661 } 662 flp->l_start += fp->f_offset; 663 } 664 /* 665 * VOP_ADVLOCK() may block. 666 */ 667 fhold(fp); 668 FILEDESC_SUNLOCK(fdp); 669 vp = fp->f_vnode; 670 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 671 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, flp, 672 F_POSIX); 673 VFS_UNLOCK_GIANT(vfslocked); 674 vfslocked = 0; 675 fdrop(fp, td); 676 break; 677 default: 678 error = EINVAL; 679 break; 680 } 681 VFS_UNLOCK_GIANT(vfslocked); 682 return (error); 683 } 684 685 /* 686 * Common code for dup, dup2, fcntl(F_DUPFD) and fcntl(F_DUP2FD). 687 */ 688 static int 689 do_dup(struct thread *td, int flags, int old, int new, 690 register_t *retval) 691 { 692 struct filedesc *fdp; 693 struct proc *p; 694 struct file *fp; 695 struct file *delfp; 696 int error, holdleaders, maxfd; 697 698 p = td->td_proc; 699 fdp = p->p_fd; 700 701 /* 702 * Verify we have a valid descriptor to dup from and possibly to 703 * dup to. Unlike dup() and dup2(), fcntl()'s F_DUPFD should 704 * return EINVAL when the new descriptor is out of bounds. 705 */ 706 if (old < 0) 707 return (EBADF); 708 if (new < 0) 709 return (flags & DUP_FCNTL ? EINVAL : EBADF); 710 PROC_LOCK(p); 711 maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc); 712 PROC_UNLOCK(p); 713 if (new >= maxfd) 714 return (flags & DUP_FCNTL ? EINVAL : EMFILE); 715 716 FILEDESC_XLOCK(fdp); 717 if (old >= fdp->fd_nfiles || fdp->fd_ofiles[old] == NULL) { 718 FILEDESC_XUNLOCK(fdp); 719 return (EBADF); 720 } 721 if (flags & DUP_FIXED && old == new) { 722 *retval = new; 723 FILEDESC_XUNLOCK(fdp); 724 return (0); 725 } 726 fp = fdp->fd_ofiles[old]; 727 fhold(fp); 728 729 /* 730 * If the caller specified a file descriptor, make sure the file 731 * table is large enough to hold it, and grab it. Otherwise, just 732 * allocate a new descriptor the usual way. Since the filedesc 733 * lock may be temporarily dropped in the process, we have to look 734 * out for a race. 735 */ 736 if (flags & DUP_FIXED) { 737 if (new >= fdp->fd_nfiles) 738 fdgrowtable(fdp, new + 1); 739 if (fdp->fd_ofiles[new] == NULL) 740 fdused(fdp, new); 741 } else { 742 if ((error = fdalloc(td, new, &new)) != 0) { 743 FILEDESC_XUNLOCK(fdp); 744 fdrop(fp, td); 745 return (error); 746 } 747 } 748 749 /* 750 * If the old file changed out from under us then treat it as a 751 * bad file descriptor. Userland should do its own locking to 752 * avoid this case. 753 */ 754 if (fdp->fd_ofiles[old] != fp) { 755 /* we've allocated a descriptor which we won't use */ 756 if (fdp->fd_ofiles[new] == NULL) 757 fdunused(fdp, new); 758 FILEDESC_XUNLOCK(fdp); 759 fdrop(fp, td); 760 return (EBADF); 761 } 762 KASSERT(old != new, 763 ("new fd is same as old")); 764 765 /* 766 * Save info on the descriptor being overwritten. We cannot close 767 * it without introducing an ownership race for the slot, since we 768 * need to drop the filedesc lock to call closef(). 769 * 770 * XXX this duplicates parts of close(). 771 */ 772 delfp = fdp->fd_ofiles[new]; 773 holdleaders = 0; 774 if (delfp != NULL) { 775 if (td->td_proc->p_fdtol != NULL) { 776 /* 777 * Ask fdfree() to sleep to ensure that all relevant 778 * process leaders can be traversed in closef(). 779 */ 780 fdp->fd_holdleaderscount++; 781 holdleaders = 1; 782 } 783 } 784 785 /* 786 * Duplicate the source descriptor 787 */ 788 fdp->fd_ofiles[new] = fp; 789 fdp->fd_ofileflags[new] = fdp->fd_ofileflags[old] &~ UF_EXCLOSE; 790 if (new > fdp->fd_lastfile) 791 fdp->fd_lastfile = new; 792 *retval = new; 793 794 /* 795 * If we dup'd over a valid file, we now own the reference to it 796 * and must dispose of it using closef() semantics (as if a 797 * close() were performed on it). 798 * 799 * XXX this duplicates parts of close(). 800 */ 801 if (delfp != NULL) { 802 knote_fdclose(td, new); 803 if (delfp->f_type == DTYPE_MQUEUE) 804 mq_fdclose(td, new, delfp); 805 FILEDESC_XUNLOCK(fdp); 806 (void) closef(delfp, td); 807 if (holdleaders) { 808 FILEDESC_XLOCK(fdp); 809 fdp->fd_holdleaderscount--; 810 if (fdp->fd_holdleaderscount == 0 && 811 fdp->fd_holdleaderswakeup != 0) { 812 fdp->fd_holdleaderswakeup = 0; 813 wakeup(&fdp->fd_holdleaderscount); 814 } 815 FILEDESC_XUNLOCK(fdp); 816 } 817 } else { 818 FILEDESC_XUNLOCK(fdp); 819 } 820 return (0); 821 } 822 823 /* 824 * If sigio is on the list associated with a process or process group, 825 * disable signalling from the device, remove sigio from the list and 826 * free sigio. 827 */ 828 void 829 funsetown(struct sigio **sigiop) 830 { 831 struct sigio *sigio; 832 833 SIGIO_LOCK(); 834 sigio = *sigiop; 835 if (sigio == NULL) { 836 SIGIO_UNLOCK(); 837 return; 838 } 839 *(sigio->sio_myref) = NULL; 840 if ((sigio)->sio_pgid < 0) { 841 struct pgrp *pg = (sigio)->sio_pgrp; 842 PGRP_LOCK(pg); 843 SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio, 844 sigio, sio_pgsigio); 845 PGRP_UNLOCK(pg); 846 } else { 847 struct proc *p = (sigio)->sio_proc; 848 PROC_LOCK(p); 849 SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio, 850 sigio, sio_pgsigio); 851 PROC_UNLOCK(p); 852 } 853 SIGIO_UNLOCK(); 854 crfree(sigio->sio_ucred); 855 free(sigio, M_SIGIO); 856 } 857 858 /* 859 * Free a list of sigio structures. 860 * We only need to lock the SIGIO_LOCK because we have made ourselves 861 * inaccessible to callers of fsetown and therefore do not need to lock 862 * the proc or pgrp struct for the list manipulation. 863 */ 864 void 865 funsetownlst(struct sigiolst *sigiolst) 866 { 867 struct proc *p; 868 struct pgrp *pg; 869 struct sigio *sigio; 870 871 sigio = SLIST_FIRST(sigiolst); 872 if (sigio == NULL) 873 return; 874 p = NULL; 875 pg = NULL; 876 877 /* 878 * Every entry of the list should belong 879 * to a single proc or pgrp. 880 */ 881 if (sigio->sio_pgid < 0) { 882 pg = sigio->sio_pgrp; 883 PGRP_LOCK_ASSERT(pg, MA_NOTOWNED); 884 } else /* if (sigio->sio_pgid > 0) */ { 885 p = sigio->sio_proc; 886 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 887 } 888 889 SIGIO_LOCK(); 890 while ((sigio = SLIST_FIRST(sigiolst)) != NULL) { 891 *(sigio->sio_myref) = NULL; 892 if (pg != NULL) { 893 KASSERT(sigio->sio_pgid < 0, 894 ("Proc sigio in pgrp sigio list")); 895 KASSERT(sigio->sio_pgrp == pg, 896 ("Bogus pgrp in sigio list")); 897 PGRP_LOCK(pg); 898 SLIST_REMOVE(&pg->pg_sigiolst, sigio, sigio, 899 sio_pgsigio); 900 PGRP_UNLOCK(pg); 901 } else /* if (p != NULL) */ { 902 KASSERT(sigio->sio_pgid > 0, 903 ("Pgrp sigio in proc sigio list")); 904 KASSERT(sigio->sio_proc == p, 905 ("Bogus proc in sigio list")); 906 PROC_LOCK(p); 907 SLIST_REMOVE(&p->p_sigiolst, sigio, sigio, 908 sio_pgsigio); 909 PROC_UNLOCK(p); 910 } 911 SIGIO_UNLOCK(); 912 crfree(sigio->sio_ucred); 913 free(sigio, M_SIGIO); 914 SIGIO_LOCK(); 915 } 916 SIGIO_UNLOCK(); 917 } 918 919 /* 920 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg). 921 * 922 * After permission checking, add a sigio structure to the sigio list for 923 * the process or process group. 924 */ 925 int 926 fsetown(pid_t pgid, struct sigio **sigiop) 927 { 928 struct proc *proc; 929 struct pgrp *pgrp; 930 struct sigio *sigio; 931 int ret; 932 933 if (pgid == 0) { 934 funsetown(sigiop); 935 return (0); 936 } 937 938 ret = 0; 939 940 /* Allocate and fill in the new sigio out of locks. */ 941 sigio = malloc(sizeof(struct sigio), M_SIGIO, M_WAITOK); 942 sigio->sio_pgid = pgid; 943 sigio->sio_ucred = crhold(curthread->td_ucred); 944 sigio->sio_myref = sigiop; 945 946 sx_slock(&proctree_lock); 947 if (pgid > 0) { 948 proc = pfind(pgid); 949 if (proc == NULL) { 950 ret = ESRCH; 951 goto fail; 952 } 953 954 /* 955 * Policy - Don't allow a process to FSETOWN a process 956 * in another session. 957 * 958 * Remove this test to allow maximum flexibility or 959 * restrict FSETOWN to the current process or process 960 * group for maximum safety. 961 */ 962 PROC_UNLOCK(proc); 963 if (proc->p_session != curthread->td_proc->p_session) { 964 ret = EPERM; 965 goto fail; 966 } 967 968 pgrp = NULL; 969 } else /* if (pgid < 0) */ { 970 pgrp = pgfind(-pgid); 971 if (pgrp == NULL) { 972 ret = ESRCH; 973 goto fail; 974 } 975 PGRP_UNLOCK(pgrp); 976 977 /* 978 * Policy - Don't allow a process to FSETOWN a process 979 * in another session. 980 * 981 * Remove this test to allow maximum flexibility or 982 * restrict FSETOWN to the current process or process 983 * group for maximum safety. 984 */ 985 if (pgrp->pg_session != curthread->td_proc->p_session) { 986 ret = EPERM; 987 goto fail; 988 } 989 990 proc = NULL; 991 } 992 funsetown(sigiop); 993 if (pgid > 0) { 994 PROC_LOCK(proc); 995 /* 996 * Since funsetownlst() is called without the proctree 997 * locked, we need to check for P_WEXIT. 998 * XXX: is ESRCH correct? 999 */ 1000 if ((proc->p_flag & P_WEXIT) != 0) { 1001 PROC_UNLOCK(proc); 1002 ret = ESRCH; 1003 goto fail; 1004 } 1005 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio); 1006 sigio->sio_proc = proc; 1007 PROC_UNLOCK(proc); 1008 } else { 1009 PGRP_LOCK(pgrp); 1010 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio); 1011 sigio->sio_pgrp = pgrp; 1012 PGRP_UNLOCK(pgrp); 1013 } 1014 sx_sunlock(&proctree_lock); 1015 SIGIO_LOCK(); 1016 *sigiop = sigio; 1017 SIGIO_UNLOCK(); 1018 return (0); 1019 1020 fail: 1021 sx_sunlock(&proctree_lock); 1022 crfree(sigio->sio_ucred); 1023 free(sigio, M_SIGIO); 1024 return (ret); 1025 } 1026 1027 /* 1028 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg). 1029 */ 1030 pid_t 1031 fgetown(sigiop) 1032 struct sigio **sigiop; 1033 { 1034 pid_t pgid; 1035 1036 SIGIO_LOCK(); 1037 pgid = (*sigiop != NULL) ? (*sigiop)->sio_pgid : 0; 1038 SIGIO_UNLOCK(); 1039 return (pgid); 1040 } 1041 1042 /* 1043 * Close a file descriptor. 1044 */ 1045 #ifndef _SYS_SYSPROTO_H_ 1046 struct close_args { 1047 int fd; 1048 }; 1049 #endif 1050 /* ARGSUSED */ 1051 int 1052 close(td, uap) 1053 struct thread *td; 1054 struct close_args *uap; 1055 { 1056 1057 return (kern_close(td, uap->fd)); 1058 } 1059 1060 int 1061 kern_close(td, fd) 1062 struct thread *td; 1063 int fd; 1064 { 1065 struct filedesc *fdp; 1066 struct file *fp; 1067 int error; 1068 int holdleaders; 1069 1070 error = 0; 1071 holdleaders = 0; 1072 fdp = td->td_proc->p_fd; 1073 1074 AUDIT_SYSCLOSE(td, fd); 1075 1076 FILEDESC_XLOCK(fdp); 1077 if ((unsigned)fd >= fdp->fd_nfiles || 1078 (fp = fdp->fd_ofiles[fd]) == NULL) { 1079 FILEDESC_XUNLOCK(fdp); 1080 return (EBADF); 1081 } 1082 fdp->fd_ofiles[fd] = NULL; 1083 fdp->fd_ofileflags[fd] = 0; 1084 fdunused(fdp, fd); 1085 if (td->td_proc->p_fdtol != NULL) { 1086 /* 1087 * Ask fdfree() to sleep to ensure that all relevant 1088 * process leaders can be traversed in closef(). 1089 */ 1090 fdp->fd_holdleaderscount++; 1091 holdleaders = 1; 1092 } 1093 1094 /* 1095 * We now hold the fp reference that used to be owned by the 1096 * descriptor array. We have to unlock the FILEDESC *AFTER* 1097 * knote_fdclose to prevent a race of the fd getting opened, a knote 1098 * added, and deleteing a knote for the new fd. 1099 */ 1100 knote_fdclose(td, fd); 1101 if (fp->f_type == DTYPE_MQUEUE) 1102 mq_fdclose(td, fd, fp); 1103 FILEDESC_XUNLOCK(fdp); 1104 1105 error = closef(fp, td); 1106 if (holdleaders) { 1107 FILEDESC_XLOCK(fdp); 1108 fdp->fd_holdleaderscount--; 1109 if (fdp->fd_holdleaderscount == 0 && 1110 fdp->fd_holdleaderswakeup != 0) { 1111 fdp->fd_holdleaderswakeup = 0; 1112 wakeup(&fdp->fd_holdleaderscount); 1113 } 1114 FILEDESC_XUNLOCK(fdp); 1115 } 1116 return (error); 1117 } 1118 1119 #if defined(COMPAT_43) 1120 /* 1121 * Return status information about a file descriptor. 1122 */ 1123 #ifndef _SYS_SYSPROTO_H_ 1124 struct ofstat_args { 1125 int fd; 1126 struct ostat *sb; 1127 }; 1128 #endif 1129 /* ARGSUSED */ 1130 int 1131 ofstat(struct thread *td, struct ofstat_args *uap) 1132 { 1133 struct ostat oub; 1134 struct stat ub; 1135 int error; 1136 1137 error = kern_fstat(td, uap->fd, &ub); 1138 if (error == 0) { 1139 cvtstat(&ub, &oub); 1140 error = copyout(&oub, uap->sb, sizeof(oub)); 1141 } 1142 return (error); 1143 } 1144 #endif /* COMPAT_43 */ 1145 1146 /* 1147 * Return status information about a file descriptor. 1148 */ 1149 #ifndef _SYS_SYSPROTO_H_ 1150 struct fstat_args { 1151 int fd; 1152 struct stat *sb; 1153 }; 1154 #endif 1155 /* ARGSUSED */ 1156 int 1157 fstat(struct thread *td, struct fstat_args *uap) 1158 { 1159 struct stat ub; 1160 int error; 1161 1162 error = kern_fstat(td, uap->fd, &ub); 1163 if (error == 0) 1164 error = copyout(&ub, uap->sb, sizeof(ub)); 1165 return (error); 1166 } 1167 1168 int 1169 kern_fstat(struct thread *td, int fd, struct stat *sbp) 1170 { 1171 struct file *fp; 1172 int error; 1173 1174 AUDIT_ARG(fd, fd); 1175 1176 if ((error = fget(td, fd, &fp)) != 0) 1177 return (error); 1178 1179 AUDIT_ARG(file, td->td_proc, fp); 1180 1181 error = fo_stat(fp, sbp, td->td_ucred, td); 1182 fdrop(fp, td); 1183 #ifdef KTRACE 1184 if (error == 0 && KTRPOINT(td, KTR_STRUCT)) 1185 ktrstat(sbp); 1186 #endif 1187 return (error); 1188 } 1189 1190 /* 1191 * Return status information about a file descriptor. 1192 */ 1193 #ifndef _SYS_SYSPROTO_H_ 1194 struct nfstat_args { 1195 int fd; 1196 struct nstat *sb; 1197 }; 1198 #endif 1199 /* ARGSUSED */ 1200 int 1201 nfstat(struct thread *td, struct nfstat_args *uap) 1202 { 1203 struct nstat nub; 1204 struct stat ub; 1205 int error; 1206 1207 error = kern_fstat(td, uap->fd, &ub); 1208 if (error == 0) { 1209 cvtnstat(&ub, &nub); 1210 error = copyout(&nub, uap->sb, sizeof(nub)); 1211 } 1212 return (error); 1213 } 1214 1215 /* 1216 * Return pathconf information about a file descriptor. 1217 */ 1218 #ifndef _SYS_SYSPROTO_H_ 1219 struct fpathconf_args { 1220 int fd; 1221 int name; 1222 }; 1223 #endif 1224 /* ARGSUSED */ 1225 int 1226 fpathconf(struct thread *td, struct fpathconf_args *uap) 1227 { 1228 struct file *fp; 1229 struct vnode *vp; 1230 int error; 1231 1232 if ((error = fget(td, uap->fd, &fp)) != 0) 1233 return (error); 1234 1235 /* If asynchronous I/O is available, it works for all descriptors. */ 1236 if (uap->name == _PC_ASYNC_IO) { 1237 td->td_retval[0] = async_io_version; 1238 goto out; 1239 } 1240 vp = fp->f_vnode; 1241 if (vp != NULL) { 1242 int vfslocked; 1243 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1244 vn_lock(vp, LK_SHARED | LK_RETRY); 1245 error = VOP_PATHCONF(vp, uap->name, td->td_retval); 1246 VOP_UNLOCK(vp, 0); 1247 VFS_UNLOCK_GIANT(vfslocked); 1248 } else if (fp->f_type == DTYPE_PIPE || fp->f_type == DTYPE_SOCKET) { 1249 if (uap->name != _PC_PIPE_BUF) { 1250 error = EINVAL; 1251 } else { 1252 td->td_retval[0] = PIPE_BUF; 1253 error = 0; 1254 } 1255 } else { 1256 error = EOPNOTSUPP; 1257 } 1258 out: 1259 fdrop(fp, td); 1260 return (error); 1261 } 1262 1263 /* 1264 * Grow the file table to accomodate (at least) nfd descriptors. This may 1265 * block and drop the filedesc lock, but it will reacquire it before 1266 * returning. 1267 */ 1268 static void 1269 fdgrowtable(struct filedesc *fdp, int nfd) 1270 { 1271 struct file **ntable; 1272 char *nfileflags; 1273 int nnfiles, onfiles; 1274 NDSLOTTYPE *nmap; 1275 1276 FILEDESC_XLOCK_ASSERT(fdp); 1277 1278 KASSERT(fdp->fd_nfiles > 0, 1279 ("zero-length file table")); 1280 1281 /* compute the size of the new table */ 1282 onfiles = fdp->fd_nfiles; 1283 nnfiles = NDSLOTS(nfd) * NDENTRIES; /* round up */ 1284 if (nnfiles <= onfiles) 1285 /* the table is already large enough */ 1286 return; 1287 1288 /* allocate a new table and (if required) new bitmaps */ 1289 FILEDESC_XUNLOCK(fdp); 1290 ntable = malloc(nnfiles * OFILESIZE, 1291 M_FILEDESC, M_ZERO | M_WAITOK); 1292 nfileflags = (char *)&ntable[nnfiles]; 1293 if (NDSLOTS(nnfiles) > NDSLOTS(onfiles)) 1294 nmap = malloc(NDSLOTS(nnfiles) * NDSLOTSIZE, 1295 M_FILEDESC, M_ZERO | M_WAITOK); 1296 else 1297 nmap = NULL; 1298 FILEDESC_XLOCK(fdp); 1299 1300 /* 1301 * We now have new tables ready to go. Since we dropped the 1302 * filedesc lock to call malloc(), watch out for a race. 1303 */ 1304 onfiles = fdp->fd_nfiles; 1305 if (onfiles >= nnfiles) { 1306 /* we lost the race, but that's OK */ 1307 free(ntable, M_FILEDESC); 1308 if (nmap != NULL) 1309 free(nmap, M_FILEDESC); 1310 return; 1311 } 1312 bcopy(fdp->fd_ofiles, ntable, onfiles * sizeof(*ntable)); 1313 bcopy(fdp->fd_ofileflags, nfileflags, onfiles); 1314 if (onfiles > NDFILE) 1315 free(fdp->fd_ofiles, M_FILEDESC); 1316 fdp->fd_ofiles = ntable; 1317 fdp->fd_ofileflags = nfileflags; 1318 if (NDSLOTS(nnfiles) > NDSLOTS(onfiles)) { 1319 bcopy(fdp->fd_map, nmap, NDSLOTS(onfiles) * sizeof(*nmap)); 1320 if (NDSLOTS(onfiles) > NDSLOTS(NDFILE)) 1321 free(fdp->fd_map, M_FILEDESC); 1322 fdp->fd_map = nmap; 1323 } 1324 fdp->fd_nfiles = nnfiles; 1325 } 1326 1327 /* 1328 * Allocate a file descriptor for the process. 1329 */ 1330 int 1331 fdalloc(struct thread *td, int minfd, int *result) 1332 { 1333 struct proc *p = td->td_proc; 1334 struct filedesc *fdp = p->p_fd; 1335 int fd = -1, maxfd; 1336 1337 FILEDESC_XLOCK_ASSERT(fdp); 1338 1339 if (fdp->fd_freefile > minfd) 1340 minfd = fdp->fd_freefile; 1341 1342 PROC_LOCK(p); 1343 maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc); 1344 PROC_UNLOCK(p); 1345 1346 /* 1347 * Search the bitmap for a free descriptor. If none is found, try 1348 * to grow the file table. Keep at it until we either get a file 1349 * descriptor or run into process or system limits; fdgrowtable() 1350 * may drop the filedesc lock, so we're in a race. 1351 */ 1352 for (;;) { 1353 fd = fd_first_free(fdp, minfd, fdp->fd_nfiles); 1354 if (fd >= maxfd) 1355 return (EMFILE); 1356 if (fd < fdp->fd_nfiles) 1357 break; 1358 fdgrowtable(fdp, min(fdp->fd_nfiles * 2, maxfd)); 1359 } 1360 1361 /* 1362 * Perform some sanity checks, then mark the file descriptor as 1363 * used and return it to the caller. 1364 */ 1365 KASSERT(!fdisused(fdp, fd), 1366 ("fd_first_free() returned non-free descriptor")); 1367 KASSERT(fdp->fd_ofiles[fd] == NULL, 1368 ("free descriptor isn't")); 1369 fdp->fd_ofileflags[fd] = 0; /* XXX needed? */ 1370 fdused(fdp, fd); 1371 *result = fd; 1372 return (0); 1373 } 1374 1375 /* 1376 * Check to see whether n user file descriptors are available to the process 1377 * p. 1378 */ 1379 int 1380 fdavail(struct thread *td, int n) 1381 { 1382 struct proc *p = td->td_proc; 1383 struct filedesc *fdp = td->td_proc->p_fd; 1384 struct file **fpp; 1385 int i, lim, last; 1386 1387 FILEDESC_LOCK_ASSERT(fdp); 1388 1389 PROC_LOCK(p); 1390 lim = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc); 1391 PROC_UNLOCK(p); 1392 if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) 1393 return (1); 1394 last = min(fdp->fd_nfiles, lim); 1395 fpp = &fdp->fd_ofiles[fdp->fd_freefile]; 1396 for (i = last - fdp->fd_freefile; --i >= 0; fpp++) { 1397 if (*fpp == NULL && --n <= 0) 1398 return (1); 1399 } 1400 return (0); 1401 } 1402 1403 /* 1404 * Create a new open file structure and allocate a file decriptor for the 1405 * process that refers to it. We add one reference to the file for the 1406 * descriptor table and one reference for resultfp. This is to prevent us 1407 * being preempted and the entry in the descriptor table closed after we 1408 * release the FILEDESC lock. 1409 */ 1410 int 1411 falloc(struct thread *td, struct file **resultfp, int *resultfd) 1412 { 1413 struct proc *p = td->td_proc; 1414 struct file *fp; 1415 int error, i; 1416 int maxuserfiles = maxfiles - (maxfiles / 20); 1417 static struct timeval lastfail; 1418 static int curfail; 1419 1420 fp = uma_zalloc(file_zone, M_WAITOK | M_ZERO); 1421 if ((openfiles >= maxuserfiles && 1422 priv_check(td, PRIV_MAXFILES) != 0) || 1423 openfiles >= maxfiles) { 1424 if (ppsratecheck(&lastfail, &curfail, 1)) { 1425 printf("kern.maxfiles limit exceeded by uid %i, please see tuning(7).\n", 1426 td->td_ucred->cr_ruid); 1427 } 1428 uma_zfree(file_zone, fp); 1429 return (ENFILE); 1430 } 1431 atomic_add_int(&openfiles, 1); 1432 1433 /* 1434 * If the process has file descriptor zero open, add the new file 1435 * descriptor to the list of open files at that point, otherwise 1436 * put it at the front of the list of open files. 1437 */ 1438 refcount_init(&fp->f_count, 1); 1439 if (resultfp) 1440 fhold(fp); 1441 fp->f_cred = crhold(td->td_ucred); 1442 fp->f_ops = &badfileops; 1443 fp->f_data = NULL; 1444 fp->f_vnode = NULL; 1445 FILEDESC_XLOCK(p->p_fd); 1446 if ((error = fdalloc(td, 0, &i))) { 1447 FILEDESC_XUNLOCK(p->p_fd); 1448 fdrop(fp, td); 1449 if (resultfp) 1450 fdrop(fp, td); 1451 return (error); 1452 } 1453 p->p_fd->fd_ofiles[i] = fp; 1454 FILEDESC_XUNLOCK(p->p_fd); 1455 if (resultfp) 1456 *resultfp = fp; 1457 if (resultfd) 1458 *resultfd = i; 1459 return (0); 1460 } 1461 1462 /* 1463 * Build a new filedesc structure from another. 1464 * Copy the current, root, and jail root vnode references. 1465 */ 1466 struct filedesc * 1467 fdinit(struct filedesc *fdp) 1468 { 1469 struct filedesc0 *newfdp; 1470 1471 newfdp = malloc(sizeof *newfdp, M_FILEDESC, M_WAITOK | M_ZERO); 1472 FILEDESC_LOCK_INIT(&newfdp->fd_fd); 1473 if (fdp != NULL) { 1474 FILEDESC_XLOCK(fdp); 1475 newfdp->fd_fd.fd_cdir = fdp->fd_cdir; 1476 if (newfdp->fd_fd.fd_cdir) 1477 VREF(newfdp->fd_fd.fd_cdir); 1478 newfdp->fd_fd.fd_rdir = fdp->fd_rdir; 1479 if (newfdp->fd_fd.fd_rdir) 1480 VREF(newfdp->fd_fd.fd_rdir); 1481 newfdp->fd_fd.fd_jdir = fdp->fd_jdir; 1482 if (newfdp->fd_fd.fd_jdir) 1483 VREF(newfdp->fd_fd.fd_jdir); 1484 FILEDESC_XUNLOCK(fdp); 1485 } 1486 1487 /* Create the file descriptor table. */ 1488 newfdp->fd_fd.fd_refcnt = 1; 1489 newfdp->fd_fd.fd_holdcnt = 1; 1490 newfdp->fd_fd.fd_cmask = CMASK; 1491 newfdp->fd_fd.fd_ofiles = newfdp->fd_dfiles; 1492 newfdp->fd_fd.fd_ofileflags = newfdp->fd_dfileflags; 1493 newfdp->fd_fd.fd_nfiles = NDFILE; 1494 newfdp->fd_fd.fd_map = newfdp->fd_dmap; 1495 newfdp->fd_fd.fd_lastfile = -1; 1496 return (&newfdp->fd_fd); 1497 } 1498 1499 static struct filedesc * 1500 fdhold(struct proc *p) 1501 { 1502 struct filedesc *fdp; 1503 1504 mtx_lock(&fdesc_mtx); 1505 fdp = p->p_fd; 1506 if (fdp != NULL) 1507 fdp->fd_holdcnt++; 1508 mtx_unlock(&fdesc_mtx); 1509 return (fdp); 1510 } 1511 1512 static void 1513 fddrop(struct filedesc *fdp) 1514 { 1515 int i; 1516 1517 mtx_lock(&fdesc_mtx); 1518 i = --fdp->fd_holdcnt; 1519 mtx_unlock(&fdesc_mtx); 1520 if (i > 0) 1521 return; 1522 1523 FILEDESC_LOCK_DESTROY(fdp); 1524 free(fdp, M_FILEDESC); 1525 } 1526 1527 /* 1528 * Share a filedesc structure. 1529 */ 1530 struct filedesc * 1531 fdshare(struct filedesc *fdp) 1532 { 1533 1534 FILEDESC_XLOCK(fdp); 1535 fdp->fd_refcnt++; 1536 FILEDESC_XUNLOCK(fdp); 1537 return (fdp); 1538 } 1539 1540 /* 1541 * Unshare a filedesc structure, if necessary by making a copy 1542 */ 1543 void 1544 fdunshare(struct proc *p, struct thread *td) 1545 { 1546 1547 FILEDESC_XLOCK(p->p_fd); 1548 if (p->p_fd->fd_refcnt > 1) { 1549 struct filedesc *tmp; 1550 1551 FILEDESC_XUNLOCK(p->p_fd); 1552 tmp = fdcopy(p->p_fd); 1553 fdfree(td); 1554 p->p_fd = tmp; 1555 } else 1556 FILEDESC_XUNLOCK(p->p_fd); 1557 } 1558 1559 /* 1560 * Copy a filedesc structure. A NULL pointer in returns a NULL reference, 1561 * this is to ease callers, not catch errors. 1562 */ 1563 struct filedesc * 1564 fdcopy(struct filedesc *fdp) 1565 { 1566 struct filedesc *newfdp; 1567 int i; 1568 1569 /* Certain daemons might not have file descriptors. */ 1570 if (fdp == NULL) 1571 return (NULL); 1572 1573 newfdp = fdinit(fdp); 1574 FILEDESC_SLOCK(fdp); 1575 while (fdp->fd_lastfile >= newfdp->fd_nfiles) { 1576 FILEDESC_SUNLOCK(fdp); 1577 FILEDESC_XLOCK(newfdp); 1578 fdgrowtable(newfdp, fdp->fd_lastfile + 1); 1579 FILEDESC_XUNLOCK(newfdp); 1580 FILEDESC_SLOCK(fdp); 1581 } 1582 /* copy everything except kqueue descriptors */ 1583 newfdp->fd_freefile = -1; 1584 for (i = 0; i <= fdp->fd_lastfile; ++i) { 1585 if (fdisused(fdp, i) && 1586 fdp->fd_ofiles[i]->f_type != DTYPE_KQUEUE) { 1587 newfdp->fd_ofiles[i] = fdp->fd_ofiles[i]; 1588 newfdp->fd_ofileflags[i] = fdp->fd_ofileflags[i]; 1589 fhold(newfdp->fd_ofiles[i]); 1590 newfdp->fd_lastfile = i; 1591 } else { 1592 if (newfdp->fd_freefile == -1) 1593 newfdp->fd_freefile = i; 1594 } 1595 } 1596 FILEDESC_SUNLOCK(fdp); 1597 FILEDESC_XLOCK(newfdp); 1598 for (i = 0; i <= newfdp->fd_lastfile; ++i) 1599 if (newfdp->fd_ofiles[i] != NULL) 1600 fdused(newfdp, i); 1601 FILEDESC_XUNLOCK(newfdp); 1602 FILEDESC_SLOCK(fdp); 1603 if (newfdp->fd_freefile == -1) 1604 newfdp->fd_freefile = i; 1605 newfdp->fd_cmask = fdp->fd_cmask; 1606 FILEDESC_SUNLOCK(fdp); 1607 return (newfdp); 1608 } 1609 1610 /* 1611 * Release a filedesc structure. 1612 */ 1613 void 1614 fdfree(struct thread *td) 1615 { 1616 struct filedesc *fdp; 1617 struct file **fpp; 1618 int i, locked; 1619 struct filedesc_to_leader *fdtol; 1620 struct file *fp; 1621 struct vnode *cdir, *jdir, *rdir, *vp; 1622 struct flock lf; 1623 1624 /* Certain daemons might not have file descriptors. */ 1625 fdp = td->td_proc->p_fd; 1626 if (fdp == NULL) 1627 return; 1628 1629 /* Check for special need to clear POSIX style locks */ 1630 fdtol = td->td_proc->p_fdtol; 1631 if (fdtol != NULL) { 1632 FILEDESC_XLOCK(fdp); 1633 KASSERT(fdtol->fdl_refcount > 0, 1634 ("filedesc_to_refcount botch: fdl_refcount=%d", 1635 fdtol->fdl_refcount)); 1636 if (fdtol->fdl_refcount == 1 && 1637 (td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) { 1638 for (i = 0, fpp = fdp->fd_ofiles; 1639 i <= fdp->fd_lastfile; 1640 i++, fpp++) { 1641 if (*fpp == NULL || 1642 (*fpp)->f_type != DTYPE_VNODE) 1643 continue; 1644 fp = *fpp; 1645 fhold(fp); 1646 FILEDESC_XUNLOCK(fdp); 1647 lf.l_whence = SEEK_SET; 1648 lf.l_start = 0; 1649 lf.l_len = 0; 1650 lf.l_type = F_UNLCK; 1651 vp = fp->f_vnode; 1652 locked = VFS_LOCK_GIANT(vp->v_mount); 1653 (void) VOP_ADVLOCK(vp, 1654 (caddr_t)td->td_proc-> 1655 p_leader, 1656 F_UNLCK, 1657 &lf, 1658 F_POSIX); 1659 VFS_UNLOCK_GIANT(locked); 1660 FILEDESC_XLOCK(fdp); 1661 fdrop(fp, td); 1662 fpp = fdp->fd_ofiles + i; 1663 } 1664 } 1665 retry: 1666 if (fdtol->fdl_refcount == 1) { 1667 if (fdp->fd_holdleaderscount > 0 && 1668 (td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) { 1669 /* 1670 * close() or do_dup() has cleared a reference 1671 * in a shared file descriptor table. 1672 */ 1673 fdp->fd_holdleaderswakeup = 1; 1674 sx_sleep(&fdp->fd_holdleaderscount, 1675 FILEDESC_LOCK(fdp), PLOCK, "fdlhold", 0); 1676 goto retry; 1677 } 1678 if (fdtol->fdl_holdcount > 0) { 1679 /* 1680 * Ensure that fdtol->fdl_leader remains 1681 * valid in closef(). 1682 */ 1683 fdtol->fdl_wakeup = 1; 1684 sx_sleep(fdtol, FILEDESC_LOCK(fdp), PLOCK, 1685 "fdlhold", 0); 1686 goto retry; 1687 } 1688 } 1689 fdtol->fdl_refcount--; 1690 if (fdtol->fdl_refcount == 0 && 1691 fdtol->fdl_holdcount == 0) { 1692 fdtol->fdl_next->fdl_prev = fdtol->fdl_prev; 1693 fdtol->fdl_prev->fdl_next = fdtol->fdl_next; 1694 } else 1695 fdtol = NULL; 1696 td->td_proc->p_fdtol = NULL; 1697 FILEDESC_XUNLOCK(fdp); 1698 if (fdtol != NULL) 1699 free(fdtol, M_FILEDESC_TO_LEADER); 1700 } 1701 FILEDESC_XLOCK(fdp); 1702 i = --fdp->fd_refcnt; 1703 FILEDESC_XUNLOCK(fdp); 1704 if (i > 0) 1705 return; 1706 /* 1707 * We are the last reference to the structure, so we can 1708 * safely assume it will not change out from under us. 1709 */ 1710 fpp = fdp->fd_ofiles; 1711 for (i = fdp->fd_lastfile; i-- >= 0; fpp++) { 1712 if (*fpp) 1713 (void) closef(*fpp, td); 1714 } 1715 FILEDESC_XLOCK(fdp); 1716 1717 /* XXX This should happen earlier. */ 1718 mtx_lock(&fdesc_mtx); 1719 td->td_proc->p_fd = NULL; 1720 mtx_unlock(&fdesc_mtx); 1721 1722 if (fdp->fd_nfiles > NDFILE) 1723 free(fdp->fd_ofiles, M_FILEDESC); 1724 if (NDSLOTS(fdp->fd_nfiles) > NDSLOTS(NDFILE)) 1725 free(fdp->fd_map, M_FILEDESC); 1726 1727 fdp->fd_nfiles = 0; 1728 1729 cdir = fdp->fd_cdir; 1730 fdp->fd_cdir = NULL; 1731 rdir = fdp->fd_rdir; 1732 fdp->fd_rdir = NULL; 1733 jdir = fdp->fd_jdir; 1734 fdp->fd_jdir = NULL; 1735 FILEDESC_XUNLOCK(fdp); 1736 1737 if (cdir) { 1738 locked = VFS_LOCK_GIANT(cdir->v_mount); 1739 vrele(cdir); 1740 VFS_UNLOCK_GIANT(locked); 1741 } 1742 if (rdir) { 1743 locked = VFS_LOCK_GIANT(rdir->v_mount); 1744 vrele(rdir); 1745 VFS_UNLOCK_GIANT(locked); 1746 } 1747 if (jdir) { 1748 locked = VFS_LOCK_GIANT(jdir->v_mount); 1749 vrele(jdir); 1750 VFS_UNLOCK_GIANT(locked); 1751 } 1752 1753 fddrop(fdp); 1754 } 1755 1756 /* 1757 * For setugid programs, we don't want to people to use that setugidness 1758 * to generate error messages which write to a file which otherwise would 1759 * otherwise be off-limits to the process. We check for filesystems where 1760 * the vnode can change out from under us after execve (like [lin]procfs). 1761 * 1762 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is 1763 * sufficient. We also don't check for setugidness since we know we are. 1764 */ 1765 static int 1766 is_unsafe(struct file *fp) 1767 { 1768 if (fp->f_type == DTYPE_VNODE) { 1769 struct vnode *vp = fp->f_vnode; 1770 1771 if ((vp->v_vflag & VV_PROCDEP) != 0) 1772 return (1); 1773 } 1774 return (0); 1775 } 1776 1777 /* 1778 * Make this setguid thing safe, if at all possible. 1779 */ 1780 void 1781 setugidsafety(struct thread *td) 1782 { 1783 struct filedesc *fdp; 1784 int i; 1785 1786 /* Certain daemons might not have file descriptors. */ 1787 fdp = td->td_proc->p_fd; 1788 if (fdp == NULL) 1789 return; 1790 1791 /* 1792 * Note: fdp->fd_ofiles may be reallocated out from under us while 1793 * we are blocked in a close. Be careful! 1794 */ 1795 FILEDESC_XLOCK(fdp); 1796 for (i = 0; i <= fdp->fd_lastfile; i++) { 1797 if (i > 2) 1798 break; 1799 if (fdp->fd_ofiles[i] && is_unsafe(fdp->fd_ofiles[i])) { 1800 struct file *fp; 1801 1802 knote_fdclose(td, i); 1803 /* 1804 * NULL-out descriptor prior to close to avoid 1805 * a race while close blocks. 1806 */ 1807 fp = fdp->fd_ofiles[i]; 1808 fdp->fd_ofiles[i] = NULL; 1809 fdp->fd_ofileflags[i] = 0; 1810 fdunused(fdp, i); 1811 FILEDESC_XUNLOCK(fdp); 1812 (void) closef(fp, td); 1813 FILEDESC_XLOCK(fdp); 1814 } 1815 } 1816 FILEDESC_XUNLOCK(fdp); 1817 } 1818 1819 /* 1820 * If a specific file object occupies a specific file descriptor, close the 1821 * file descriptor entry and drop a reference on the file object. This is a 1822 * convenience function to handle a subsequent error in a function that calls 1823 * falloc() that handles the race that another thread might have closed the 1824 * file descriptor out from under the thread creating the file object. 1825 */ 1826 void 1827 fdclose(struct filedesc *fdp, struct file *fp, int idx, struct thread *td) 1828 { 1829 1830 FILEDESC_XLOCK(fdp); 1831 if (fdp->fd_ofiles[idx] == fp) { 1832 fdp->fd_ofiles[idx] = NULL; 1833 fdunused(fdp, idx); 1834 FILEDESC_XUNLOCK(fdp); 1835 fdrop(fp, td); 1836 } else 1837 FILEDESC_XUNLOCK(fdp); 1838 } 1839 1840 /* 1841 * Close any files on exec? 1842 */ 1843 void 1844 fdcloseexec(struct thread *td) 1845 { 1846 struct filedesc *fdp; 1847 int i; 1848 1849 /* Certain daemons might not have file descriptors. */ 1850 fdp = td->td_proc->p_fd; 1851 if (fdp == NULL) 1852 return; 1853 1854 FILEDESC_XLOCK(fdp); 1855 1856 /* 1857 * We cannot cache fd_ofiles or fd_ofileflags since operations 1858 * may block and rip them out from under us. 1859 */ 1860 for (i = 0; i <= fdp->fd_lastfile; i++) { 1861 if (fdp->fd_ofiles[i] != NULL && 1862 (fdp->fd_ofiles[i]->f_type == DTYPE_MQUEUE || 1863 (fdp->fd_ofileflags[i] & UF_EXCLOSE))) { 1864 struct file *fp; 1865 1866 knote_fdclose(td, i); 1867 /* 1868 * NULL-out descriptor prior to close to avoid 1869 * a race while close blocks. 1870 */ 1871 fp = fdp->fd_ofiles[i]; 1872 fdp->fd_ofiles[i] = NULL; 1873 fdp->fd_ofileflags[i] = 0; 1874 fdunused(fdp, i); 1875 if (fp->f_type == DTYPE_MQUEUE) 1876 mq_fdclose(td, i, fp); 1877 FILEDESC_XUNLOCK(fdp); 1878 (void) closef(fp, td); 1879 FILEDESC_XLOCK(fdp); 1880 } 1881 } 1882 FILEDESC_XUNLOCK(fdp); 1883 } 1884 1885 /* 1886 * It is unsafe for set[ug]id processes to be started with file 1887 * descriptors 0..2 closed, as these descriptors are given implicit 1888 * significance in the Standard C library. fdcheckstd() will create a 1889 * descriptor referencing /dev/null for each of stdin, stdout, and 1890 * stderr that is not already open. 1891 */ 1892 int 1893 fdcheckstd(struct thread *td) 1894 { 1895 struct filedesc *fdp; 1896 register_t retval, save; 1897 int i, error, devnull; 1898 1899 fdp = td->td_proc->p_fd; 1900 if (fdp == NULL) 1901 return (0); 1902 KASSERT(fdp->fd_refcnt == 1, ("the fdtable should not be shared")); 1903 devnull = -1; 1904 error = 0; 1905 for (i = 0; i < 3; i++) { 1906 if (fdp->fd_ofiles[i] != NULL) 1907 continue; 1908 if (devnull < 0) { 1909 save = td->td_retval[0]; 1910 error = kern_open(td, "/dev/null", UIO_SYSSPACE, 1911 O_RDWR, 0); 1912 devnull = td->td_retval[0]; 1913 KASSERT(devnull == i, ("oof, we didn't get our fd")); 1914 td->td_retval[0] = save; 1915 if (error) 1916 break; 1917 } else { 1918 error = do_dup(td, DUP_FIXED, devnull, i, &retval); 1919 if (error != 0) 1920 break; 1921 } 1922 } 1923 return (error); 1924 } 1925 1926 /* 1927 * Internal form of close. Decrement reference count on file structure. 1928 * Note: td may be NULL when closing a file that was being passed in a 1929 * message. 1930 * 1931 * XXXRW: Giant is not required for the caller, but often will be held; this 1932 * makes it moderately likely the Giant will be recursed in the VFS case. 1933 */ 1934 int 1935 closef(struct file *fp, struct thread *td) 1936 { 1937 struct vnode *vp; 1938 struct flock lf; 1939 struct filedesc_to_leader *fdtol; 1940 struct filedesc *fdp; 1941 1942 /* 1943 * POSIX record locking dictates that any close releases ALL 1944 * locks owned by this process. This is handled by setting 1945 * a flag in the unlock to free ONLY locks obeying POSIX 1946 * semantics, and not to free BSD-style file locks. 1947 * If the descriptor was in a message, POSIX-style locks 1948 * aren't passed with the descriptor, and the thread pointer 1949 * will be NULL. Callers should be careful only to pass a 1950 * NULL thread pointer when there really is no owning 1951 * context that might have locks, or the locks will be 1952 * leaked. 1953 */ 1954 if (fp->f_type == DTYPE_VNODE && td != NULL) { 1955 int vfslocked; 1956 1957 vp = fp->f_vnode; 1958 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1959 if ((td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) { 1960 lf.l_whence = SEEK_SET; 1961 lf.l_start = 0; 1962 lf.l_len = 0; 1963 lf.l_type = F_UNLCK; 1964 (void) VOP_ADVLOCK(vp, (caddr_t)td->td_proc->p_leader, 1965 F_UNLCK, &lf, F_POSIX); 1966 } 1967 fdtol = td->td_proc->p_fdtol; 1968 if (fdtol != NULL) { 1969 /* 1970 * Handle special case where file descriptor table is 1971 * shared between multiple process leaders. 1972 */ 1973 fdp = td->td_proc->p_fd; 1974 FILEDESC_XLOCK(fdp); 1975 for (fdtol = fdtol->fdl_next; 1976 fdtol != td->td_proc->p_fdtol; 1977 fdtol = fdtol->fdl_next) { 1978 if ((fdtol->fdl_leader->p_flag & 1979 P_ADVLOCK) == 0) 1980 continue; 1981 fdtol->fdl_holdcount++; 1982 FILEDESC_XUNLOCK(fdp); 1983 lf.l_whence = SEEK_SET; 1984 lf.l_start = 0; 1985 lf.l_len = 0; 1986 lf.l_type = F_UNLCK; 1987 vp = fp->f_vnode; 1988 (void) VOP_ADVLOCK(vp, 1989 (caddr_t)fdtol->fdl_leader, 1990 F_UNLCK, &lf, F_POSIX); 1991 FILEDESC_XLOCK(fdp); 1992 fdtol->fdl_holdcount--; 1993 if (fdtol->fdl_holdcount == 0 && 1994 fdtol->fdl_wakeup != 0) { 1995 fdtol->fdl_wakeup = 0; 1996 wakeup(fdtol); 1997 } 1998 } 1999 FILEDESC_XUNLOCK(fdp); 2000 } 2001 VFS_UNLOCK_GIANT(vfslocked); 2002 } 2003 return (fdrop(fp, td)); 2004 } 2005 2006 /* 2007 * Initialize the file pointer with the specified properties. 2008 * 2009 * The ops are set with release semantics to be certain that the flags, type, 2010 * and data are visible when ops is. This is to prevent ops methods from being 2011 * called with bad data. 2012 */ 2013 void 2014 finit(struct file *fp, u_int flag, short type, void *data, struct fileops *ops) 2015 { 2016 fp->f_data = data; 2017 fp->f_flag = flag; 2018 fp->f_type = type; 2019 atomic_store_rel_ptr((volatile uintptr_t *)&fp->f_ops, (uintptr_t)ops); 2020 } 2021 2022 2023 /* 2024 * Extract the file pointer associated with the specified descriptor for the 2025 * current user process. 2026 * 2027 * If the descriptor doesn't exist, EBADF is returned. 2028 * 2029 * If the descriptor exists but doesn't match 'flags' then return EBADF for 2030 * read attempts and EINVAL for write attempts. 2031 * 2032 * If 'hold' is set (non-zero) the file's refcount will be bumped on return. 2033 * It should be dropped with fdrop(). If it is not set, then the refcount 2034 * will not be bumped however the thread's filedesc struct will be returned 2035 * locked (for fgetsock). 2036 * 2037 * If an error occured the non-zero error is returned and *fpp is set to 2038 * NULL. Otherwise *fpp is set and zero is returned. 2039 */ 2040 static __inline int 2041 _fget(struct thread *td, int fd, struct file **fpp, int flags, int hold) 2042 { 2043 struct filedesc *fdp; 2044 struct file *fp; 2045 2046 *fpp = NULL; 2047 if (td == NULL || (fdp = td->td_proc->p_fd) == NULL) 2048 return (EBADF); 2049 FILEDESC_SLOCK(fdp); 2050 if ((fp = fget_locked(fdp, fd)) == NULL || fp->f_ops == &badfileops) { 2051 FILEDESC_SUNLOCK(fdp); 2052 return (EBADF); 2053 } 2054 2055 /* 2056 * FREAD and FWRITE failure return EBADF as per POSIX. 2057 * 2058 * Only one flag, or 0, may be specified. 2059 */ 2060 if (flags == FREAD && (fp->f_flag & FREAD) == 0) { 2061 FILEDESC_SUNLOCK(fdp); 2062 return (EBADF); 2063 } 2064 if (flags == FWRITE && (fp->f_flag & FWRITE) == 0) { 2065 FILEDESC_SUNLOCK(fdp); 2066 return (EBADF); 2067 } 2068 if (hold) { 2069 fhold(fp); 2070 FILEDESC_SUNLOCK(fdp); 2071 } 2072 *fpp = fp; 2073 return (0); 2074 } 2075 2076 int 2077 fget(struct thread *td, int fd, struct file **fpp) 2078 { 2079 2080 return(_fget(td, fd, fpp, 0, 1)); 2081 } 2082 2083 int 2084 fget_read(struct thread *td, int fd, struct file **fpp) 2085 { 2086 2087 return(_fget(td, fd, fpp, FREAD, 1)); 2088 } 2089 2090 int 2091 fget_write(struct thread *td, int fd, struct file **fpp) 2092 { 2093 2094 return(_fget(td, fd, fpp, FWRITE, 1)); 2095 } 2096 2097 /* 2098 * Like fget() but loads the underlying vnode, or returns an error if the 2099 * descriptor does not represent a vnode. Note that pipes use vnodes but 2100 * never have VM objects. The returned vnode will be vref()'d. 2101 * 2102 * XXX: what about the unused flags ? 2103 */ 2104 static __inline int 2105 _fgetvp(struct thread *td, int fd, struct vnode **vpp, int flags) 2106 { 2107 struct file *fp; 2108 int error; 2109 2110 *vpp = NULL; 2111 if ((error = _fget(td, fd, &fp, flags, 0)) != 0) 2112 return (error); 2113 if (fp->f_vnode == NULL) { 2114 error = EINVAL; 2115 } else { 2116 *vpp = fp->f_vnode; 2117 vref(*vpp); 2118 } 2119 FILEDESC_SUNLOCK(td->td_proc->p_fd); 2120 return (error); 2121 } 2122 2123 int 2124 fgetvp(struct thread *td, int fd, struct vnode **vpp) 2125 { 2126 2127 return (_fgetvp(td, fd, vpp, 0)); 2128 } 2129 2130 int 2131 fgetvp_read(struct thread *td, int fd, struct vnode **vpp) 2132 { 2133 2134 return (_fgetvp(td, fd, vpp, FREAD)); 2135 } 2136 2137 #ifdef notyet 2138 int 2139 fgetvp_write(struct thread *td, int fd, struct vnode **vpp) 2140 { 2141 2142 return (_fgetvp(td, fd, vpp, FWRITE)); 2143 } 2144 #endif 2145 2146 /* 2147 * Like fget() but loads the underlying socket, or returns an error if the 2148 * descriptor does not represent a socket. 2149 * 2150 * We bump the ref count on the returned socket. XXX Also obtain the SX lock 2151 * in the future. 2152 * 2153 * Note: fgetsock() and fputsock() are deprecated, as consumers should rely 2154 * on their file descriptor reference to prevent the socket from being free'd 2155 * during use. 2156 */ 2157 int 2158 fgetsock(struct thread *td, int fd, struct socket **spp, u_int *fflagp) 2159 { 2160 struct file *fp; 2161 int error; 2162 2163 *spp = NULL; 2164 if (fflagp != NULL) 2165 *fflagp = 0; 2166 if ((error = _fget(td, fd, &fp, 0, 0)) != 0) 2167 return (error); 2168 if (fp->f_type != DTYPE_SOCKET) { 2169 error = ENOTSOCK; 2170 } else { 2171 *spp = fp->f_data; 2172 if (fflagp) 2173 *fflagp = fp->f_flag; 2174 SOCK_LOCK(*spp); 2175 soref(*spp); 2176 SOCK_UNLOCK(*spp); 2177 } 2178 FILEDESC_SUNLOCK(td->td_proc->p_fd); 2179 return (error); 2180 } 2181 2182 /* 2183 * Drop the reference count on the socket and XXX release the SX lock in the 2184 * future. The last reference closes the socket. 2185 * 2186 * Note: fputsock() is deprecated, see comment for fgetsock(). 2187 */ 2188 void 2189 fputsock(struct socket *so) 2190 { 2191 2192 ACCEPT_LOCK(); 2193 SOCK_LOCK(so); 2194 sorele(so); 2195 } 2196 2197 /* 2198 * Handle the last reference to a file being closed. 2199 */ 2200 int 2201 _fdrop(struct file *fp, struct thread *td) 2202 { 2203 int error; 2204 2205 error = 0; 2206 if (fp->f_count != 0) 2207 panic("fdrop: count %d", fp->f_count); 2208 if (fp->f_ops != &badfileops) 2209 error = fo_close(fp, td); 2210 /* 2211 * The f_cdevpriv cannot be assigned non-NULL value while we 2212 * are destroying the file. 2213 */ 2214 if (fp->f_cdevpriv != NULL) 2215 devfs_fpdrop(fp); 2216 atomic_subtract_int(&openfiles, 1); 2217 crfree(fp->f_cred); 2218 uma_zfree(file_zone, fp); 2219 2220 return (error); 2221 } 2222 2223 /* 2224 * Apply an advisory lock on a file descriptor. 2225 * 2226 * Just attempt to get a record lock of the requested type on the entire file 2227 * (l_whence = SEEK_SET, l_start = 0, l_len = 0). 2228 */ 2229 #ifndef _SYS_SYSPROTO_H_ 2230 struct flock_args { 2231 int fd; 2232 int how; 2233 }; 2234 #endif 2235 /* ARGSUSED */ 2236 int 2237 flock(struct thread *td, struct flock_args *uap) 2238 { 2239 struct file *fp; 2240 struct vnode *vp; 2241 struct flock lf; 2242 int vfslocked; 2243 int error; 2244 2245 if ((error = fget(td, uap->fd, &fp)) != 0) 2246 return (error); 2247 if (fp->f_type != DTYPE_VNODE) { 2248 fdrop(fp, td); 2249 return (EOPNOTSUPP); 2250 } 2251 2252 vp = fp->f_vnode; 2253 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 2254 lf.l_whence = SEEK_SET; 2255 lf.l_start = 0; 2256 lf.l_len = 0; 2257 if (uap->how & LOCK_UN) { 2258 lf.l_type = F_UNLCK; 2259 atomic_clear_int(&fp->f_flag, FHASLOCK); 2260 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK); 2261 goto done2; 2262 } 2263 if (uap->how & LOCK_EX) 2264 lf.l_type = F_WRLCK; 2265 else if (uap->how & LOCK_SH) 2266 lf.l_type = F_RDLCK; 2267 else { 2268 error = EBADF; 2269 goto done2; 2270 } 2271 atomic_set_int(&fp->f_flag, FHASLOCK); 2272 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, 2273 (uap->how & LOCK_NB) ? F_FLOCK : F_FLOCK | F_WAIT); 2274 done2: 2275 fdrop(fp, td); 2276 VFS_UNLOCK_GIANT(vfslocked); 2277 return (error); 2278 } 2279 /* 2280 * Duplicate the specified descriptor to a free descriptor. 2281 */ 2282 int 2283 dupfdopen(struct thread *td, struct filedesc *fdp, int indx, int dfd, int mode, int error) 2284 { 2285 struct file *wfp; 2286 struct file *fp; 2287 2288 /* 2289 * If the to-be-dup'd fd number is greater than the allowed number 2290 * of file descriptors, or the fd to be dup'd has already been 2291 * closed, then reject. 2292 */ 2293 FILEDESC_XLOCK(fdp); 2294 if (dfd < 0 || dfd >= fdp->fd_nfiles || 2295 (wfp = fdp->fd_ofiles[dfd]) == NULL) { 2296 FILEDESC_XUNLOCK(fdp); 2297 return (EBADF); 2298 } 2299 2300 /* 2301 * There are two cases of interest here. 2302 * 2303 * For ENODEV simply dup (dfd) to file descriptor (indx) and return. 2304 * 2305 * For ENXIO steal away the file structure from (dfd) and store it in 2306 * (indx). (dfd) is effectively closed by this operation. 2307 * 2308 * Any other error code is just returned. 2309 */ 2310 switch (error) { 2311 case ENODEV: 2312 /* 2313 * Check that the mode the file is being opened for is a 2314 * subset of the mode of the existing descriptor. 2315 */ 2316 if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) { 2317 FILEDESC_XUNLOCK(fdp); 2318 return (EACCES); 2319 } 2320 fp = fdp->fd_ofiles[indx]; 2321 fdp->fd_ofiles[indx] = wfp; 2322 fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd]; 2323 if (fp == NULL) 2324 fdused(fdp, indx); 2325 fhold(wfp); 2326 FILEDESC_XUNLOCK(fdp); 2327 if (fp != NULL) 2328 /* 2329 * We now own the reference to fp that the ofiles[] 2330 * array used to own. Release it. 2331 */ 2332 fdrop(fp, td); 2333 return (0); 2334 2335 case ENXIO: 2336 /* 2337 * Steal away the file pointer from dfd and stuff it into indx. 2338 */ 2339 fp = fdp->fd_ofiles[indx]; 2340 fdp->fd_ofiles[indx] = fdp->fd_ofiles[dfd]; 2341 fdp->fd_ofiles[dfd] = NULL; 2342 fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd]; 2343 fdp->fd_ofileflags[dfd] = 0; 2344 fdunused(fdp, dfd); 2345 if (fp == NULL) 2346 fdused(fdp, indx); 2347 FILEDESC_XUNLOCK(fdp); 2348 2349 /* 2350 * We now own the reference to fp that the ofiles[] array 2351 * used to own. Release it. 2352 */ 2353 if (fp != NULL) 2354 fdrop(fp, td); 2355 return (0); 2356 2357 default: 2358 FILEDESC_XUNLOCK(fdp); 2359 return (error); 2360 } 2361 /* NOTREACHED */ 2362 } 2363 2364 /* 2365 * Scan all active processes to see if any of them have a current or root 2366 * directory of `olddp'. If so, replace them with the new mount point. 2367 */ 2368 void 2369 mountcheckdirs(struct vnode *olddp, struct vnode *newdp) 2370 { 2371 struct filedesc *fdp; 2372 struct proc *p; 2373 int nrele; 2374 2375 if (vrefcnt(olddp) == 1) 2376 return; 2377 sx_slock(&allproc_lock); 2378 FOREACH_PROC_IN_SYSTEM(p) { 2379 fdp = fdhold(p); 2380 if (fdp == NULL) 2381 continue; 2382 nrele = 0; 2383 FILEDESC_XLOCK(fdp); 2384 if (fdp->fd_cdir == olddp) { 2385 vref(newdp); 2386 fdp->fd_cdir = newdp; 2387 nrele++; 2388 } 2389 if (fdp->fd_rdir == olddp) { 2390 vref(newdp); 2391 fdp->fd_rdir = newdp; 2392 nrele++; 2393 } 2394 FILEDESC_XUNLOCK(fdp); 2395 fddrop(fdp); 2396 while (nrele--) 2397 vrele(olddp); 2398 } 2399 sx_sunlock(&allproc_lock); 2400 if (rootvnode == olddp) { 2401 vrele(rootvnode); 2402 vref(newdp); 2403 rootvnode = newdp; 2404 } 2405 } 2406 2407 struct filedesc_to_leader * 2408 filedesc_to_leader_alloc(struct filedesc_to_leader *old, struct filedesc *fdp, struct proc *leader) 2409 { 2410 struct filedesc_to_leader *fdtol; 2411 2412 fdtol = malloc(sizeof(struct filedesc_to_leader), 2413 M_FILEDESC_TO_LEADER, 2414 M_WAITOK); 2415 fdtol->fdl_refcount = 1; 2416 fdtol->fdl_holdcount = 0; 2417 fdtol->fdl_wakeup = 0; 2418 fdtol->fdl_leader = leader; 2419 if (old != NULL) { 2420 FILEDESC_XLOCK(fdp); 2421 fdtol->fdl_next = old->fdl_next; 2422 fdtol->fdl_prev = old; 2423 old->fdl_next = fdtol; 2424 fdtol->fdl_next->fdl_prev = fdtol; 2425 FILEDESC_XUNLOCK(fdp); 2426 } else { 2427 fdtol->fdl_next = fdtol; 2428 fdtol->fdl_prev = fdtol; 2429 } 2430 return (fdtol); 2431 } 2432 2433 /* 2434 * Get file structures globally. 2435 */ 2436 static int 2437 sysctl_kern_file(SYSCTL_HANDLER_ARGS) 2438 { 2439 struct xfile xf; 2440 struct filedesc *fdp; 2441 struct file *fp; 2442 struct proc *p; 2443 int error, n; 2444 2445 error = sysctl_wire_old_buffer(req, 0); 2446 if (error != 0) 2447 return (error); 2448 if (req->oldptr == NULL) { 2449 n = 0; 2450 sx_slock(&allproc_lock); 2451 FOREACH_PROC_IN_SYSTEM(p) { 2452 if (p->p_state == PRS_NEW) 2453 continue; 2454 fdp = fdhold(p); 2455 if (fdp == NULL) 2456 continue; 2457 /* overestimates sparse tables. */ 2458 if (fdp->fd_lastfile > 0) 2459 n += fdp->fd_lastfile; 2460 fddrop(fdp); 2461 } 2462 sx_sunlock(&allproc_lock); 2463 return (SYSCTL_OUT(req, 0, n * sizeof(xf))); 2464 } 2465 error = 0; 2466 bzero(&xf, sizeof(xf)); 2467 xf.xf_size = sizeof(xf); 2468 sx_slock(&allproc_lock); 2469 FOREACH_PROC_IN_SYSTEM(p) { 2470 if (p->p_state == PRS_NEW) 2471 continue; 2472 PROC_LOCK(p); 2473 if (p_cansee(req->td, p) != 0) { 2474 PROC_UNLOCK(p); 2475 continue; 2476 } 2477 xf.xf_pid = p->p_pid; 2478 xf.xf_uid = p->p_ucred->cr_uid; 2479 PROC_UNLOCK(p); 2480 fdp = fdhold(p); 2481 if (fdp == NULL) 2482 continue; 2483 FILEDESC_SLOCK(fdp); 2484 for (n = 0; fdp->fd_refcnt > 0 && n < fdp->fd_nfiles; ++n) { 2485 if ((fp = fdp->fd_ofiles[n]) == NULL) 2486 continue; 2487 xf.xf_fd = n; 2488 xf.xf_file = fp; 2489 xf.xf_data = fp->f_data; 2490 xf.xf_vnode = fp->f_vnode; 2491 xf.xf_type = fp->f_type; 2492 xf.xf_count = fp->f_count; 2493 xf.xf_msgcount = 0; 2494 xf.xf_offset = fp->f_offset; 2495 xf.xf_flag = fp->f_flag; 2496 error = SYSCTL_OUT(req, &xf, sizeof(xf)); 2497 if (error) 2498 break; 2499 } 2500 FILEDESC_SUNLOCK(fdp); 2501 fddrop(fdp); 2502 if (error) 2503 break; 2504 } 2505 sx_sunlock(&allproc_lock); 2506 return (error); 2507 } 2508 2509 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD, 2510 0, 0, sysctl_kern_file, "S,xfile", "Entire file table"); 2511 2512 #ifdef KINFO_OFILE_SIZE 2513 CTASSERT(sizeof(struct kinfo_ofile) == KINFO_OFILE_SIZE); 2514 #endif 2515 2516 #ifdef COMPAT_FREEBSD7 2517 static int 2518 export_vnode_for_osysctl(struct vnode *vp, int type, 2519 struct kinfo_ofile *kif, struct filedesc *fdp, struct sysctl_req *req) 2520 { 2521 int error; 2522 char *fullpath, *freepath; 2523 int vfslocked; 2524 2525 bzero(kif, sizeof(*kif)); 2526 kif->kf_structsize = sizeof(*kif); 2527 2528 vref(vp); 2529 kif->kf_fd = type; 2530 kif->kf_type = KF_TYPE_VNODE; 2531 /* This function only handles directories. */ 2532 KASSERT(vp->v_type == VDIR, ("export_vnode_for_osysctl: vnode not directory")); 2533 kif->kf_vnode_type = KF_VTYPE_VDIR; 2534 2535 /* 2536 * This is not a true file descriptor, so we set a bogus refcount 2537 * and offset to indicate these fields should be ignored. 2538 */ 2539 kif->kf_ref_count = -1; 2540 kif->kf_offset = -1; 2541 2542 freepath = NULL; 2543 fullpath = "-"; 2544 FILEDESC_SUNLOCK(fdp); 2545 vn_fullpath(curthread, vp, &fullpath, &freepath); 2546 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 2547 vrele(vp); 2548 VFS_UNLOCK_GIANT(vfslocked); 2549 strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path)); 2550 if (freepath != NULL) 2551 free(freepath, M_TEMP); 2552 error = SYSCTL_OUT(req, kif, sizeof(*kif)); 2553 FILEDESC_SLOCK(fdp); 2554 return (error); 2555 } 2556 2557 /* 2558 * Get per-process file descriptors for use by procstat(1), et al. 2559 */ 2560 static int 2561 sysctl_kern_proc_ofiledesc(SYSCTL_HANDLER_ARGS) 2562 { 2563 char *fullpath, *freepath; 2564 struct kinfo_ofile *kif; 2565 struct filedesc *fdp; 2566 int error, i, *name; 2567 struct socket *so; 2568 struct vnode *vp; 2569 struct file *fp; 2570 struct proc *p; 2571 struct tty *tp; 2572 int vfslocked; 2573 2574 name = (int *)arg1; 2575 if ((p = pfind((pid_t)name[0])) == NULL) 2576 return (ESRCH); 2577 if ((error = p_candebug(curthread, p))) { 2578 PROC_UNLOCK(p); 2579 return (error); 2580 } 2581 fdp = fdhold(p); 2582 PROC_UNLOCK(p); 2583 if (fdp == NULL) 2584 return (ENOENT); 2585 kif = malloc(sizeof(*kif), M_TEMP, M_WAITOK); 2586 FILEDESC_SLOCK(fdp); 2587 if (fdp->fd_cdir != NULL) 2588 export_vnode_for_osysctl(fdp->fd_cdir, KF_FD_TYPE_CWD, kif, 2589 fdp, req); 2590 if (fdp->fd_rdir != NULL) 2591 export_vnode_for_osysctl(fdp->fd_rdir, KF_FD_TYPE_ROOT, kif, 2592 fdp, req); 2593 if (fdp->fd_jdir != NULL) 2594 export_vnode_for_osysctl(fdp->fd_jdir, KF_FD_TYPE_JAIL, kif, 2595 fdp, req); 2596 for (i = 0; i < fdp->fd_nfiles; i++) { 2597 if ((fp = fdp->fd_ofiles[i]) == NULL) 2598 continue; 2599 bzero(kif, sizeof(*kif)); 2600 kif->kf_structsize = sizeof(*kif); 2601 vp = NULL; 2602 so = NULL; 2603 tp = NULL; 2604 kif->kf_fd = i; 2605 switch (fp->f_type) { 2606 case DTYPE_VNODE: 2607 kif->kf_type = KF_TYPE_VNODE; 2608 vp = fp->f_vnode; 2609 break; 2610 2611 case DTYPE_SOCKET: 2612 kif->kf_type = KF_TYPE_SOCKET; 2613 so = fp->f_data; 2614 break; 2615 2616 case DTYPE_PIPE: 2617 kif->kf_type = KF_TYPE_PIPE; 2618 break; 2619 2620 case DTYPE_FIFO: 2621 kif->kf_type = KF_TYPE_FIFO; 2622 vp = fp->f_vnode; 2623 vref(vp); 2624 break; 2625 2626 case DTYPE_KQUEUE: 2627 kif->kf_type = KF_TYPE_KQUEUE; 2628 break; 2629 2630 case DTYPE_CRYPTO: 2631 kif->kf_type = KF_TYPE_CRYPTO; 2632 break; 2633 2634 case DTYPE_MQUEUE: 2635 kif->kf_type = KF_TYPE_MQUEUE; 2636 break; 2637 2638 case DTYPE_SHM: 2639 kif->kf_type = KF_TYPE_SHM; 2640 break; 2641 2642 case DTYPE_SEM: 2643 kif->kf_type = KF_TYPE_SEM; 2644 break; 2645 2646 case DTYPE_PTS: 2647 kif->kf_type = KF_TYPE_PTS; 2648 tp = fp->f_data; 2649 break; 2650 2651 default: 2652 kif->kf_type = KF_TYPE_UNKNOWN; 2653 break; 2654 } 2655 kif->kf_ref_count = fp->f_count; 2656 if (fp->f_flag & FREAD) 2657 kif->kf_flags |= KF_FLAG_READ; 2658 if (fp->f_flag & FWRITE) 2659 kif->kf_flags |= KF_FLAG_WRITE; 2660 if (fp->f_flag & FAPPEND) 2661 kif->kf_flags |= KF_FLAG_APPEND; 2662 if (fp->f_flag & FASYNC) 2663 kif->kf_flags |= KF_FLAG_ASYNC; 2664 if (fp->f_flag & FFSYNC) 2665 kif->kf_flags |= KF_FLAG_FSYNC; 2666 if (fp->f_flag & FNONBLOCK) 2667 kif->kf_flags |= KF_FLAG_NONBLOCK; 2668 if (fp->f_flag & O_DIRECT) 2669 kif->kf_flags |= KF_FLAG_DIRECT; 2670 if (fp->f_flag & FHASLOCK) 2671 kif->kf_flags |= KF_FLAG_HASLOCK; 2672 kif->kf_offset = fp->f_offset; 2673 if (vp != NULL) { 2674 vref(vp); 2675 switch (vp->v_type) { 2676 case VNON: 2677 kif->kf_vnode_type = KF_VTYPE_VNON; 2678 break; 2679 case VREG: 2680 kif->kf_vnode_type = KF_VTYPE_VREG; 2681 break; 2682 case VDIR: 2683 kif->kf_vnode_type = KF_VTYPE_VDIR; 2684 break; 2685 case VBLK: 2686 kif->kf_vnode_type = KF_VTYPE_VBLK; 2687 break; 2688 case VCHR: 2689 kif->kf_vnode_type = KF_VTYPE_VCHR; 2690 break; 2691 case VLNK: 2692 kif->kf_vnode_type = KF_VTYPE_VLNK; 2693 break; 2694 case VSOCK: 2695 kif->kf_vnode_type = KF_VTYPE_VSOCK; 2696 break; 2697 case VFIFO: 2698 kif->kf_vnode_type = KF_VTYPE_VFIFO; 2699 break; 2700 case VBAD: 2701 kif->kf_vnode_type = KF_VTYPE_VBAD; 2702 break; 2703 default: 2704 kif->kf_vnode_type = KF_VTYPE_UNKNOWN; 2705 break; 2706 } 2707 /* 2708 * It is OK to drop the filedesc lock here as we will 2709 * re-validate and re-evaluate its properties when 2710 * the loop continues. 2711 */ 2712 freepath = NULL; 2713 fullpath = "-"; 2714 FILEDESC_SUNLOCK(fdp); 2715 vn_fullpath(curthread, vp, &fullpath, &freepath); 2716 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 2717 vrele(vp); 2718 VFS_UNLOCK_GIANT(vfslocked); 2719 strlcpy(kif->kf_path, fullpath, 2720 sizeof(kif->kf_path)); 2721 if (freepath != NULL) 2722 free(freepath, M_TEMP); 2723 FILEDESC_SLOCK(fdp); 2724 } 2725 if (so != NULL) { 2726 struct sockaddr *sa; 2727 2728 if (so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa) 2729 == 0 && sa->sa_len <= sizeof(kif->kf_sa_local)) { 2730 bcopy(sa, &kif->kf_sa_local, sa->sa_len); 2731 free(sa, M_SONAME); 2732 } 2733 if (so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa) 2734 == 00 && sa->sa_len <= sizeof(kif->kf_sa_peer)) { 2735 bcopy(sa, &kif->kf_sa_peer, sa->sa_len); 2736 free(sa, M_SONAME); 2737 } 2738 kif->kf_sock_domain = 2739 so->so_proto->pr_domain->dom_family; 2740 kif->kf_sock_type = so->so_type; 2741 kif->kf_sock_protocol = so->so_proto->pr_protocol; 2742 } 2743 if (tp != NULL) { 2744 strlcpy(kif->kf_path, tty_devname(tp), 2745 sizeof(kif->kf_path)); 2746 } 2747 error = SYSCTL_OUT(req, kif, sizeof(*kif)); 2748 if (error) 2749 break; 2750 } 2751 FILEDESC_SUNLOCK(fdp); 2752 fddrop(fdp); 2753 free(kif, M_TEMP); 2754 return (0); 2755 } 2756 2757 static SYSCTL_NODE(_kern_proc, KERN_PROC_OFILEDESC, ofiledesc, CTLFLAG_RD, 2758 sysctl_kern_proc_ofiledesc, "Process ofiledesc entries"); 2759 #endif /* COMPAT_FREEBSD7 */ 2760 2761 #ifdef KINFO_FILE_SIZE 2762 CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE); 2763 #endif 2764 2765 static int 2766 export_vnode_for_sysctl(struct vnode *vp, int type, 2767 struct kinfo_file *kif, struct filedesc *fdp, struct sysctl_req *req) 2768 { 2769 int error; 2770 char *fullpath, *freepath; 2771 int vfslocked; 2772 2773 bzero(kif, sizeof(*kif)); 2774 2775 vref(vp); 2776 kif->kf_fd = type; 2777 kif->kf_type = KF_TYPE_VNODE; 2778 /* This function only handles directories. */ 2779 KASSERT(vp->v_type == VDIR, ("export_vnode_for_sysctl: vnode not directory")); 2780 kif->kf_vnode_type = KF_VTYPE_VDIR; 2781 2782 /* 2783 * This is not a true file descriptor, so we set a bogus refcount 2784 * and offset to indicate these fields should be ignored. 2785 */ 2786 kif->kf_ref_count = -1; 2787 kif->kf_offset = -1; 2788 2789 freepath = NULL; 2790 fullpath = "-"; 2791 FILEDESC_SUNLOCK(fdp); 2792 vn_fullpath(curthread, vp, &fullpath, &freepath); 2793 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 2794 vrele(vp); 2795 VFS_UNLOCK_GIANT(vfslocked); 2796 strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path)); 2797 if (freepath != NULL) 2798 free(freepath, M_TEMP); 2799 /* Pack record size down */ 2800 kif->kf_structsize = offsetof(struct kinfo_file, kf_path) + 2801 strlen(kif->kf_path) + 1; 2802 kif->kf_structsize = roundup(kif->kf_structsize, sizeof(uint64_t)); 2803 error = SYSCTL_OUT(req, kif, kif->kf_structsize); 2804 FILEDESC_SLOCK(fdp); 2805 return (error); 2806 } 2807 2808 /* 2809 * Get per-process file descriptors for use by procstat(1), et al. 2810 */ 2811 static int 2812 sysctl_kern_proc_filedesc(SYSCTL_HANDLER_ARGS) 2813 { 2814 char *fullpath, *freepath; 2815 struct kinfo_file *kif; 2816 struct filedesc *fdp; 2817 int error, i, *name; 2818 struct socket *so; 2819 struct vnode *vp; 2820 struct file *fp; 2821 struct proc *p; 2822 struct tty *tp; 2823 int vfslocked; 2824 2825 name = (int *)arg1; 2826 if ((p = pfind((pid_t)name[0])) == NULL) 2827 return (ESRCH); 2828 if ((error = p_candebug(curthread, p))) { 2829 PROC_UNLOCK(p); 2830 return (error); 2831 } 2832 fdp = fdhold(p); 2833 PROC_UNLOCK(p); 2834 if (fdp == NULL) 2835 return (ENOENT); 2836 kif = malloc(sizeof(*kif), M_TEMP, M_WAITOK); 2837 FILEDESC_SLOCK(fdp); 2838 if (fdp->fd_cdir != NULL) 2839 export_vnode_for_sysctl(fdp->fd_cdir, KF_FD_TYPE_CWD, kif, 2840 fdp, req); 2841 if (fdp->fd_rdir != NULL) 2842 export_vnode_for_sysctl(fdp->fd_rdir, KF_FD_TYPE_ROOT, kif, 2843 fdp, req); 2844 if (fdp->fd_jdir != NULL) 2845 export_vnode_for_sysctl(fdp->fd_jdir, KF_FD_TYPE_JAIL, kif, 2846 fdp, req); 2847 for (i = 0; i < fdp->fd_nfiles; i++) { 2848 if ((fp = fdp->fd_ofiles[i]) == NULL) 2849 continue; 2850 bzero(kif, sizeof(*kif)); 2851 vp = NULL; 2852 so = NULL; 2853 tp = NULL; 2854 kif->kf_fd = i; 2855 switch (fp->f_type) { 2856 case DTYPE_VNODE: 2857 kif->kf_type = KF_TYPE_VNODE; 2858 vp = fp->f_vnode; 2859 break; 2860 2861 case DTYPE_SOCKET: 2862 kif->kf_type = KF_TYPE_SOCKET; 2863 so = fp->f_data; 2864 break; 2865 2866 case DTYPE_PIPE: 2867 kif->kf_type = KF_TYPE_PIPE; 2868 break; 2869 2870 case DTYPE_FIFO: 2871 kif->kf_type = KF_TYPE_FIFO; 2872 vp = fp->f_vnode; 2873 vref(vp); 2874 break; 2875 2876 case DTYPE_KQUEUE: 2877 kif->kf_type = KF_TYPE_KQUEUE; 2878 break; 2879 2880 case DTYPE_CRYPTO: 2881 kif->kf_type = KF_TYPE_CRYPTO; 2882 break; 2883 2884 case DTYPE_MQUEUE: 2885 kif->kf_type = KF_TYPE_MQUEUE; 2886 break; 2887 2888 case DTYPE_SHM: 2889 kif->kf_type = KF_TYPE_SHM; 2890 break; 2891 2892 case DTYPE_SEM: 2893 kif->kf_type = KF_TYPE_SEM; 2894 break; 2895 2896 case DTYPE_PTS: 2897 kif->kf_type = KF_TYPE_PTS; 2898 tp = fp->f_data; 2899 break; 2900 2901 default: 2902 kif->kf_type = KF_TYPE_UNKNOWN; 2903 break; 2904 } 2905 kif->kf_ref_count = fp->f_count; 2906 if (fp->f_flag & FREAD) 2907 kif->kf_flags |= KF_FLAG_READ; 2908 if (fp->f_flag & FWRITE) 2909 kif->kf_flags |= KF_FLAG_WRITE; 2910 if (fp->f_flag & FAPPEND) 2911 kif->kf_flags |= KF_FLAG_APPEND; 2912 if (fp->f_flag & FASYNC) 2913 kif->kf_flags |= KF_FLAG_ASYNC; 2914 if (fp->f_flag & FFSYNC) 2915 kif->kf_flags |= KF_FLAG_FSYNC; 2916 if (fp->f_flag & FNONBLOCK) 2917 kif->kf_flags |= KF_FLAG_NONBLOCK; 2918 if (fp->f_flag & O_DIRECT) 2919 kif->kf_flags |= KF_FLAG_DIRECT; 2920 if (fp->f_flag & FHASLOCK) 2921 kif->kf_flags |= KF_FLAG_HASLOCK; 2922 kif->kf_offset = fp->f_offset; 2923 if (vp != NULL) { 2924 vref(vp); 2925 switch (vp->v_type) { 2926 case VNON: 2927 kif->kf_vnode_type = KF_VTYPE_VNON; 2928 break; 2929 case VREG: 2930 kif->kf_vnode_type = KF_VTYPE_VREG; 2931 break; 2932 case VDIR: 2933 kif->kf_vnode_type = KF_VTYPE_VDIR; 2934 break; 2935 case VBLK: 2936 kif->kf_vnode_type = KF_VTYPE_VBLK; 2937 break; 2938 case VCHR: 2939 kif->kf_vnode_type = KF_VTYPE_VCHR; 2940 break; 2941 case VLNK: 2942 kif->kf_vnode_type = KF_VTYPE_VLNK; 2943 break; 2944 case VSOCK: 2945 kif->kf_vnode_type = KF_VTYPE_VSOCK; 2946 break; 2947 case VFIFO: 2948 kif->kf_vnode_type = KF_VTYPE_VFIFO; 2949 break; 2950 case VBAD: 2951 kif->kf_vnode_type = KF_VTYPE_VBAD; 2952 break; 2953 default: 2954 kif->kf_vnode_type = KF_VTYPE_UNKNOWN; 2955 break; 2956 } 2957 /* 2958 * It is OK to drop the filedesc lock here as we will 2959 * re-validate and re-evaluate its properties when 2960 * the loop continues. 2961 */ 2962 freepath = NULL; 2963 fullpath = "-"; 2964 FILEDESC_SUNLOCK(fdp); 2965 vn_fullpath(curthread, vp, &fullpath, &freepath); 2966 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 2967 vrele(vp); 2968 VFS_UNLOCK_GIANT(vfslocked); 2969 strlcpy(kif->kf_path, fullpath, 2970 sizeof(kif->kf_path)); 2971 if (freepath != NULL) 2972 free(freepath, M_TEMP); 2973 FILEDESC_SLOCK(fdp); 2974 } 2975 if (so != NULL) { 2976 struct sockaddr *sa; 2977 2978 if (so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa) 2979 == 0 && sa->sa_len <= sizeof(kif->kf_sa_local)) { 2980 bcopy(sa, &kif->kf_sa_local, sa->sa_len); 2981 free(sa, M_SONAME); 2982 } 2983 if (so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa) 2984 == 00 && sa->sa_len <= sizeof(kif->kf_sa_peer)) { 2985 bcopy(sa, &kif->kf_sa_peer, sa->sa_len); 2986 free(sa, M_SONAME); 2987 } 2988 kif->kf_sock_domain = 2989 so->so_proto->pr_domain->dom_family; 2990 kif->kf_sock_type = so->so_type; 2991 kif->kf_sock_protocol = so->so_proto->pr_protocol; 2992 } 2993 if (tp != NULL) { 2994 strlcpy(kif->kf_path, tty_devname(tp), 2995 sizeof(kif->kf_path)); 2996 } 2997 /* Pack record size down */ 2998 kif->kf_structsize = offsetof(struct kinfo_file, kf_path) + 2999 strlen(kif->kf_path) + 1; 3000 kif->kf_structsize = roundup(kif->kf_structsize, 3001 sizeof(uint64_t)); 3002 error = SYSCTL_OUT(req, kif, kif->kf_structsize); 3003 if (error) 3004 break; 3005 } 3006 FILEDESC_SUNLOCK(fdp); 3007 fddrop(fdp); 3008 free(kif, M_TEMP); 3009 return (0); 3010 } 3011 3012 static SYSCTL_NODE(_kern_proc, KERN_PROC_FILEDESC, filedesc, CTLFLAG_RD, 3013 sysctl_kern_proc_filedesc, "Process filedesc entries"); 3014 3015 #ifdef DDB 3016 /* 3017 * For the purposes of debugging, generate a human-readable string for the 3018 * file type. 3019 */ 3020 static const char * 3021 file_type_to_name(short type) 3022 { 3023 3024 switch (type) { 3025 case 0: 3026 return ("zero"); 3027 case DTYPE_VNODE: 3028 return ("vnod"); 3029 case DTYPE_SOCKET: 3030 return ("sock"); 3031 case DTYPE_PIPE: 3032 return ("pipe"); 3033 case DTYPE_FIFO: 3034 return ("fifo"); 3035 case DTYPE_KQUEUE: 3036 return ("kque"); 3037 case DTYPE_CRYPTO: 3038 return ("crpt"); 3039 case DTYPE_MQUEUE: 3040 return ("mque"); 3041 case DTYPE_SHM: 3042 return ("shm"); 3043 case DTYPE_SEM: 3044 return ("ksem"); 3045 default: 3046 return ("unkn"); 3047 } 3048 } 3049 3050 /* 3051 * For the purposes of debugging, identify a process (if any, perhaps one of 3052 * many) that references the passed file in its file descriptor array. Return 3053 * NULL if none. 3054 */ 3055 static struct proc * 3056 file_to_first_proc(struct file *fp) 3057 { 3058 struct filedesc *fdp; 3059 struct proc *p; 3060 int n; 3061 3062 FOREACH_PROC_IN_SYSTEM(p) { 3063 if (p->p_state == PRS_NEW) 3064 continue; 3065 fdp = p->p_fd; 3066 if (fdp == NULL) 3067 continue; 3068 for (n = 0; n < fdp->fd_nfiles; n++) { 3069 if (fp == fdp->fd_ofiles[n]) 3070 return (p); 3071 } 3072 } 3073 return (NULL); 3074 } 3075 3076 static void 3077 db_print_file(struct file *fp, int header) 3078 { 3079 struct proc *p; 3080 3081 if (header) 3082 db_printf("%8s %4s %8s %8s %4s %5s %6s %8s %5s %12s\n", 3083 "File", "Type", "Data", "Flag", "GCFl", "Count", 3084 "MCount", "Vnode", "FPID", "FCmd"); 3085 p = file_to_first_proc(fp); 3086 db_printf("%8p %4s %8p %08x %04x %5d %6d %8p %5d %12s\n", fp, 3087 file_type_to_name(fp->f_type), fp->f_data, fp->f_flag, 3088 0, fp->f_count, 0, fp->f_vnode, 3089 p != NULL ? p->p_pid : -1, p != NULL ? p->p_comm : "-"); 3090 } 3091 3092 DB_SHOW_COMMAND(file, db_show_file) 3093 { 3094 struct file *fp; 3095 3096 if (!have_addr) { 3097 db_printf("usage: show file <addr>\n"); 3098 return; 3099 } 3100 fp = (struct file *)addr; 3101 db_print_file(fp, 1); 3102 } 3103 3104 DB_SHOW_COMMAND(files, db_show_files) 3105 { 3106 struct filedesc *fdp; 3107 struct file *fp; 3108 struct proc *p; 3109 int header; 3110 int n; 3111 3112 header = 1; 3113 FOREACH_PROC_IN_SYSTEM(p) { 3114 if (p->p_state == PRS_NEW) 3115 continue; 3116 if ((fdp = p->p_fd) == NULL) 3117 continue; 3118 for (n = 0; n < fdp->fd_nfiles; ++n) { 3119 if ((fp = fdp->fd_ofiles[n]) == NULL) 3120 continue; 3121 db_print_file(fp, header); 3122 header = 0; 3123 } 3124 } 3125 } 3126 #endif 3127 3128 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW, 3129 &maxfilesperproc, 0, "Maximum files allowed open per process"); 3130 3131 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW, 3132 &maxfiles, 0, "Maximum number of files"); 3133 3134 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD, 3135 __DEVOLATILE(int *, &openfiles), 0, "System-wide number of open files"); 3136 3137 /* ARGSUSED*/ 3138 static void 3139 filelistinit(void *dummy) 3140 { 3141 3142 file_zone = uma_zcreate("Files", sizeof(struct file), NULL, NULL, 3143 NULL, NULL, UMA_ALIGN_PTR, 0); 3144 mtx_init(&sigio_lock, "sigio lock", NULL, MTX_DEF); 3145 mtx_init(&fdesc_mtx, "fdesc", NULL, MTX_DEF); 3146 } 3147 SYSINIT(select, SI_SUB_LOCK, SI_ORDER_FIRST, filelistinit, NULL); 3148 3149 /*-------------------------------------------------------------------*/ 3150 3151 static int 3152 badfo_readwrite(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) 3153 { 3154 3155 return (EBADF); 3156 } 3157 3158 static int 3159 badfo_truncate(struct file *fp, off_t length, struct ucred *active_cred, struct thread *td) 3160 { 3161 3162 return (EINVAL); 3163 } 3164 3165 static int 3166 badfo_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred, struct thread *td) 3167 { 3168 3169 return (EBADF); 3170 } 3171 3172 static int 3173 badfo_poll(struct file *fp, int events, struct ucred *active_cred, struct thread *td) 3174 { 3175 3176 return (0); 3177 } 3178 3179 static int 3180 badfo_kqfilter(struct file *fp, struct knote *kn) 3181 { 3182 3183 return (EBADF); 3184 } 3185 3186 static int 3187 badfo_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, struct thread *td) 3188 { 3189 3190 return (EBADF); 3191 } 3192 3193 static int 3194 badfo_close(struct file *fp, struct thread *td) 3195 { 3196 3197 return (EBADF); 3198 } 3199 3200 struct fileops badfileops = { 3201 .fo_read = badfo_readwrite, 3202 .fo_write = badfo_readwrite, 3203 .fo_truncate = badfo_truncate, 3204 .fo_ioctl = badfo_ioctl, 3205 .fo_poll = badfo_poll, 3206 .fo_kqfilter = badfo_kqfilter, 3207 .fo_stat = badfo_stat, 3208 .fo_close = badfo_close, 3209 }; 3210 3211 3212 /*-------------------------------------------------------------------*/ 3213 3214 /* 3215 * File Descriptor pseudo-device driver (/dev/fd/). 3216 * 3217 * Opening minor device N dup()s the file (if any) connected to file 3218 * descriptor N belonging to the calling process. Note that this driver 3219 * consists of only the ``open()'' routine, because all subsequent 3220 * references to this file will be direct to the other driver. 3221 * 3222 * XXX: we could give this one a cloning event handler if necessary. 3223 */ 3224 3225 /* ARGSUSED */ 3226 static int 3227 fdopen(struct cdev *dev, int mode, int type, struct thread *td) 3228 { 3229 3230 /* 3231 * XXX Kludge: set curthread->td_dupfd to contain the value of the 3232 * the file descriptor being sought for duplication. The error 3233 * return ensures that the vnode for this device will be released 3234 * by vn_open. Open will detect this special error and take the 3235 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN 3236 * will simply report the error. 3237 */ 3238 td->td_dupfd = dev2unit(dev); 3239 return (ENODEV); 3240 } 3241 3242 static struct cdevsw fildesc_cdevsw = { 3243 .d_version = D_VERSION, 3244 .d_open = fdopen, 3245 .d_name = "FD", 3246 }; 3247 3248 static void 3249 fildesc_drvinit(void *unused) 3250 { 3251 struct cdev *dev; 3252 3253 dev = make_dev(&fildesc_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "fd/0"); 3254 make_dev_alias(dev, "stdin"); 3255 dev = make_dev(&fildesc_cdevsw, 1, UID_ROOT, GID_WHEEL, 0666, "fd/1"); 3256 make_dev_alias(dev, "stdout"); 3257 dev = make_dev(&fildesc_cdevsw, 2, UID_ROOT, GID_WHEEL, 0666, "fd/2"); 3258 make_dev_alias(dev, "stderr"); 3259 } 3260 3261 SYSINIT(fildescdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, fildesc_drvinit, NULL); 3262