1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_compat.h" 41 #include "opt_ddb.h" 42 #include "opt_ktrace.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 47 #include <sys/conf.h> 48 #include <sys/domain.h> 49 #include <sys/fcntl.h> 50 #include <sys/file.h> 51 #include <sys/filedesc.h> 52 #include <sys/filio.h> 53 #include <sys/jail.h> 54 #include <sys/kernel.h> 55 #include <sys/limits.h> 56 #include <sys/lock.h> 57 #include <sys/malloc.h> 58 #include <sys/mount.h> 59 #include <sys/mqueue.h> 60 #include <sys/mutex.h> 61 #include <sys/namei.h> 62 #include <sys/priv.h> 63 #include <sys/proc.h> 64 #include <sys/protosw.h> 65 #include <sys/resourcevar.h> 66 #include <sys/signalvar.h> 67 #include <sys/socketvar.h> 68 #include <sys/stat.h> 69 #include <sys/sx.h> 70 #include <sys/syscallsubr.h> 71 #include <sys/sysctl.h> 72 #include <sys/sysproto.h> 73 #include <sys/tty.h> 74 #include <sys/unistd.h> 75 #include <sys/user.h> 76 #include <sys/vnode.h> 77 #ifdef KTRACE 78 #include <sys/ktrace.h> 79 #endif 80 81 #include <security/audit/audit.h> 82 83 #include <vm/uma.h> 84 85 #include <ddb/ddb.h> 86 87 static MALLOC_DEFINE(M_FILEDESC, "filedesc", "Open file descriptor table"); 88 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "filedesc_to_leader", 89 "file desc to leader structures"); 90 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures"); 91 92 static uma_zone_t file_zone; 93 94 95 /* Flags for do_dup() */ 96 #define DUP_FIXED 0x1 /* Force fixed allocation */ 97 #define DUP_FCNTL 0x2 /* fcntl()-style errors */ 98 99 static int do_dup(struct thread *td, int flags, int old, int new, 100 register_t *retval); 101 static int fd_first_free(struct filedesc *, int, int); 102 static int fd_last_used(struct filedesc *, int, int); 103 static void fdgrowtable(struct filedesc *, int); 104 static void fdunused(struct filedesc *fdp, int fd); 105 static void fdused(struct filedesc *fdp, int fd); 106 107 /* 108 * A process is initially started out with NDFILE descriptors stored within 109 * this structure, selected to be enough for typical applications based on 110 * the historical limit of 20 open files (and the usage of descriptors by 111 * shells). If these descriptors are exhausted, a larger descriptor table 112 * may be allocated, up to a process' resource limit; the internal arrays 113 * are then unused. 114 */ 115 #define NDFILE 20 116 #define NDSLOTSIZE sizeof(NDSLOTTYPE) 117 #define NDENTRIES (NDSLOTSIZE * __CHAR_BIT) 118 #define NDSLOT(x) ((x) / NDENTRIES) 119 #define NDBIT(x) ((NDSLOTTYPE)1 << ((x) % NDENTRIES)) 120 #define NDSLOTS(x) (((x) + NDENTRIES - 1) / NDENTRIES) 121 122 /* 123 * Storage required per open file descriptor. 124 */ 125 #define OFILESIZE (sizeof(struct file *) + sizeof(char)) 126 127 /* 128 * Basic allocation of descriptors: 129 * one of the above, plus arrays for NDFILE descriptors. 130 */ 131 struct filedesc0 { 132 struct filedesc fd_fd; 133 /* 134 * These arrays are used when the number of open files is 135 * <= NDFILE, and are then pointed to by the pointers above. 136 */ 137 struct file *fd_dfiles[NDFILE]; 138 char fd_dfileflags[NDFILE]; 139 NDSLOTTYPE fd_dmap[NDSLOTS(NDFILE)]; 140 }; 141 142 /* 143 * Descriptor management. 144 */ 145 volatile int openfiles; /* actual number of open files */ 146 struct mtx sigio_lock; /* mtx to protect pointers to sigio */ 147 void (*mq_fdclose)(struct thread *td, int fd, struct file *fp); 148 149 /* A mutex to protect the association between a proc and filedesc. */ 150 static struct mtx fdesc_mtx; 151 152 /* 153 * Find the first zero bit in the given bitmap, starting at low and not 154 * exceeding size - 1. 155 */ 156 static int 157 fd_first_free(struct filedesc *fdp, int low, int size) 158 { 159 NDSLOTTYPE *map = fdp->fd_map; 160 NDSLOTTYPE mask; 161 int off, maxoff; 162 163 if (low >= size) 164 return (low); 165 166 off = NDSLOT(low); 167 if (low % NDENTRIES) { 168 mask = ~(~(NDSLOTTYPE)0 >> (NDENTRIES - (low % NDENTRIES))); 169 if ((mask &= ~map[off]) != 0UL) 170 return (off * NDENTRIES + ffsl(mask) - 1); 171 ++off; 172 } 173 for (maxoff = NDSLOTS(size); off < maxoff; ++off) 174 if (map[off] != ~0UL) 175 return (off * NDENTRIES + ffsl(~map[off]) - 1); 176 return (size); 177 } 178 179 /* 180 * Find the highest non-zero bit in the given bitmap, starting at low and 181 * not exceeding size - 1. 182 */ 183 static int 184 fd_last_used(struct filedesc *fdp, int low, int size) 185 { 186 NDSLOTTYPE *map = fdp->fd_map; 187 NDSLOTTYPE mask; 188 int off, minoff; 189 190 if (low >= size) 191 return (-1); 192 193 off = NDSLOT(size); 194 if (size % NDENTRIES) { 195 mask = ~(~(NDSLOTTYPE)0 << (size % NDENTRIES)); 196 if ((mask &= map[off]) != 0) 197 return (off * NDENTRIES + flsl(mask) - 1); 198 --off; 199 } 200 for (minoff = NDSLOT(low); off >= minoff; --off) 201 if (map[off] != 0) 202 return (off * NDENTRIES + flsl(map[off]) - 1); 203 return (low - 1); 204 } 205 206 static int 207 fdisused(struct filedesc *fdp, int fd) 208 { 209 KASSERT(fd >= 0 && fd < fdp->fd_nfiles, 210 ("file descriptor %d out of range (0, %d)", fd, fdp->fd_nfiles)); 211 return ((fdp->fd_map[NDSLOT(fd)] & NDBIT(fd)) != 0); 212 } 213 214 /* 215 * Mark a file descriptor as used. 216 */ 217 static void 218 fdused(struct filedesc *fdp, int fd) 219 { 220 221 FILEDESC_XLOCK_ASSERT(fdp); 222 KASSERT(!fdisused(fdp, fd), 223 ("fd already used")); 224 225 fdp->fd_map[NDSLOT(fd)] |= NDBIT(fd); 226 if (fd > fdp->fd_lastfile) 227 fdp->fd_lastfile = fd; 228 if (fd == fdp->fd_freefile) 229 fdp->fd_freefile = fd_first_free(fdp, fd, fdp->fd_nfiles); 230 } 231 232 /* 233 * Mark a file descriptor as unused. 234 */ 235 static void 236 fdunused(struct filedesc *fdp, int fd) 237 { 238 239 FILEDESC_XLOCK_ASSERT(fdp); 240 KASSERT(fdisused(fdp, fd), 241 ("fd is already unused")); 242 KASSERT(fdp->fd_ofiles[fd] == NULL, 243 ("fd is still in use")); 244 245 fdp->fd_map[NDSLOT(fd)] &= ~NDBIT(fd); 246 if (fd < fdp->fd_freefile) 247 fdp->fd_freefile = fd; 248 if (fd == fdp->fd_lastfile) 249 fdp->fd_lastfile = fd_last_used(fdp, 0, fd); 250 } 251 252 /* 253 * System calls on descriptors. 254 */ 255 #ifndef _SYS_SYSPROTO_H_ 256 struct getdtablesize_args { 257 int dummy; 258 }; 259 #endif 260 /* ARGSUSED */ 261 int 262 getdtablesize(struct thread *td, struct getdtablesize_args *uap) 263 { 264 struct proc *p = td->td_proc; 265 266 PROC_LOCK(p); 267 td->td_retval[0] = 268 min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc); 269 PROC_UNLOCK(p); 270 return (0); 271 } 272 273 /* 274 * Duplicate a file descriptor to a particular value. 275 * 276 * Note: keep in mind that a potential race condition exists when closing 277 * descriptors from a shared descriptor table (via rfork). 278 */ 279 #ifndef _SYS_SYSPROTO_H_ 280 struct dup2_args { 281 u_int from; 282 u_int to; 283 }; 284 #endif 285 /* ARGSUSED */ 286 int 287 dup2(struct thread *td, struct dup2_args *uap) 288 { 289 290 return (do_dup(td, DUP_FIXED, (int)uap->from, (int)uap->to, 291 td->td_retval)); 292 } 293 294 /* 295 * Duplicate a file descriptor. 296 */ 297 #ifndef _SYS_SYSPROTO_H_ 298 struct dup_args { 299 u_int fd; 300 }; 301 #endif 302 /* ARGSUSED */ 303 int 304 dup(struct thread *td, struct dup_args *uap) 305 { 306 307 return (do_dup(td, 0, (int)uap->fd, 0, td->td_retval)); 308 } 309 310 /* 311 * The file control system call. 312 */ 313 #ifndef _SYS_SYSPROTO_H_ 314 struct fcntl_args { 315 int fd; 316 int cmd; 317 long arg; 318 }; 319 #endif 320 /* ARGSUSED */ 321 int 322 fcntl(struct thread *td, struct fcntl_args *uap) 323 { 324 struct flock fl; 325 struct oflock ofl; 326 intptr_t arg; 327 int error; 328 int cmd; 329 330 error = 0; 331 cmd = uap->cmd; 332 switch (uap->cmd) { 333 case F_OGETLK: 334 case F_OSETLK: 335 case F_OSETLKW: 336 /* 337 * Convert old flock structure to new. 338 */ 339 error = copyin((void *)(intptr_t)uap->arg, &ofl, sizeof(ofl)); 340 fl.l_start = ofl.l_start; 341 fl.l_len = ofl.l_len; 342 fl.l_pid = ofl.l_pid; 343 fl.l_type = ofl.l_type; 344 fl.l_whence = ofl.l_whence; 345 fl.l_sysid = 0; 346 347 switch (uap->cmd) { 348 case F_OGETLK: 349 cmd = F_GETLK; 350 break; 351 case F_OSETLK: 352 cmd = F_SETLK; 353 break; 354 case F_OSETLKW: 355 cmd = F_SETLKW; 356 break; 357 } 358 arg = (intptr_t)&fl; 359 break; 360 case F_GETLK: 361 case F_SETLK: 362 case F_SETLKW: 363 case F_SETLK_REMOTE: 364 error = copyin((void *)(intptr_t)uap->arg, &fl, sizeof(fl)); 365 arg = (intptr_t)&fl; 366 break; 367 default: 368 arg = uap->arg; 369 break; 370 } 371 if (error) 372 return (error); 373 error = kern_fcntl(td, uap->fd, cmd, arg); 374 if (error) 375 return (error); 376 if (uap->cmd == F_OGETLK) { 377 ofl.l_start = fl.l_start; 378 ofl.l_len = fl.l_len; 379 ofl.l_pid = fl.l_pid; 380 ofl.l_type = fl.l_type; 381 ofl.l_whence = fl.l_whence; 382 error = copyout(&ofl, (void *)(intptr_t)uap->arg, sizeof(ofl)); 383 } else if (uap->cmd == F_GETLK) { 384 error = copyout(&fl, (void *)(intptr_t)uap->arg, sizeof(fl)); 385 } 386 return (error); 387 } 388 389 static inline struct file * 390 fdtofp(int fd, struct filedesc *fdp) 391 { 392 struct file *fp; 393 394 FILEDESC_LOCK_ASSERT(fdp); 395 if ((unsigned)fd >= fdp->fd_nfiles || 396 (fp = fdp->fd_ofiles[fd]) == NULL) 397 return (NULL); 398 return (fp); 399 } 400 401 int 402 kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg) 403 { 404 struct filedesc *fdp; 405 struct flock *flp; 406 struct file *fp; 407 struct proc *p; 408 char *pop; 409 struct vnode *vp; 410 int error, flg, tmp; 411 int vfslocked; 412 413 vfslocked = 0; 414 error = 0; 415 flg = F_POSIX; 416 p = td->td_proc; 417 fdp = p->p_fd; 418 419 switch (cmd) { 420 case F_DUPFD: 421 tmp = arg; 422 error = do_dup(td, DUP_FCNTL, fd, tmp, td->td_retval); 423 break; 424 425 case F_DUP2FD: 426 tmp = arg; 427 error = do_dup(td, DUP_FIXED, fd, tmp, td->td_retval); 428 break; 429 430 case F_GETFD: 431 FILEDESC_SLOCK(fdp); 432 if ((fp = fdtofp(fd, fdp)) == NULL) { 433 FILEDESC_SUNLOCK(fdp); 434 error = EBADF; 435 break; 436 } 437 pop = &fdp->fd_ofileflags[fd]; 438 td->td_retval[0] = (*pop & UF_EXCLOSE) ? FD_CLOEXEC : 0; 439 FILEDESC_SUNLOCK(fdp); 440 break; 441 442 case F_SETFD: 443 FILEDESC_XLOCK(fdp); 444 if ((fp = fdtofp(fd, fdp)) == NULL) { 445 FILEDESC_XUNLOCK(fdp); 446 error = EBADF; 447 break; 448 } 449 pop = &fdp->fd_ofileflags[fd]; 450 *pop = (*pop &~ UF_EXCLOSE) | 451 (arg & FD_CLOEXEC ? UF_EXCLOSE : 0); 452 FILEDESC_XUNLOCK(fdp); 453 break; 454 455 case F_GETFL: 456 FILEDESC_SLOCK(fdp); 457 if ((fp = fdtofp(fd, fdp)) == NULL) { 458 FILEDESC_SUNLOCK(fdp); 459 error = EBADF; 460 break; 461 } 462 td->td_retval[0] = OFLAGS(fp->f_flag); 463 FILEDESC_SUNLOCK(fdp); 464 break; 465 466 case F_SETFL: 467 FILEDESC_SLOCK(fdp); 468 if ((fp = fdtofp(fd, fdp)) == NULL) { 469 FILEDESC_SUNLOCK(fdp); 470 error = EBADF; 471 break; 472 } 473 fhold(fp); 474 FILEDESC_SUNLOCK(fdp); 475 do { 476 tmp = flg = fp->f_flag; 477 tmp &= ~FCNTLFLAGS; 478 tmp |= FFLAGS(arg & ~O_ACCMODE) & FCNTLFLAGS; 479 } while(atomic_cmpset_int(&fp->f_flag, flg, tmp) == 0); 480 tmp = fp->f_flag & FNONBLOCK; 481 error = fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td); 482 if (error) { 483 fdrop(fp, td); 484 break; 485 } 486 tmp = fp->f_flag & FASYNC; 487 error = fo_ioctl(fp, FIOASYNC, &tmp, td->td_ucred, td); 488 if (error == 0) { 489 fdrop(fp, td); 490 break; 491 } 492 atomic_clear_int(&fp->f_flag, FNONBLOCK); 493 tmp = 0; 494 (void)fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td); 495 fdrop(fp, td); 496 break; 497 498 case F_GETOWN: 499 FILEDESC_SLOCK(fdp); 500 if ((fp = fdtofp(fd, fdp)) == NULL) { 501 FILEDESC_SUNLOCK(fdp); 502 error = EBADF; 503 break; 504 } 505 fhold(fp); 506 FILEDESC_SUNLOCK(fdp); 507 error = fo_ioctl(fp, FIOGETOWN, &tmp, td->td_ucred, td); 508 if (error == 0) 509 td->td_retval[0] = tmp; 510 fdrop(fp, td); 511 break; 512 513 case F_SETOWN: 514 FILEDESC_SLOCK(fdp); 515 if ((fp = fdtofp(fd, fdp)) == NULL) { 516 FILEDESC_SUNLOCK(fdp); 517 error = EBADF; 518 break; 519 } 520 fhold(fp); 521 FILEDESC_SUNLOCK(fdp); 522 tmp = arg; 523 error = fo_ioctl(fp, FIOSETOWN, &tmp, td->td_ucred, td); 524 fdrop(fp, td); 525 break; 526 527 case F_SETLK_REMOTE: 528 error = priv_check(td, PRIV_NFS_LOCKD); 529 if (error) 530 return (error); 531 flg = F_REMOTE; 532 goto do_setlk; 533 534 case F_SETLKW: 535 flg |= F_WAIT; 536 /* FALLTHROUGH F_SETLK */ 537 538 case F_SETLK: 539 do_setlk: 540 FILEDESC_SLOCK(fdp); 541 if ((fp = fdtofp(fd, fdp)) == NULL) { 542 FILEDESC_SUNLOCK(fdp); 543 error = EBADF; 544 break; 545 } 546 if (fp->f_type != DTYPE_VNODE) { 547 FILEDESC_SUNLOCK(fdp); 548 error = EBADF; 549 break; 550 } 551 flp = (struct flock *)arg; 552 if (flp->l_whence == SEEK_CUR) { 553 if (fp->f_offset < 0 || 554 (flp->l_start > 0 && 555 fp->f_offset > OFF_MAX - flp->l_start)) { 556 FILEDESC_SUNLOCK(fdp); 557 error = EOVERFLOW; 558 break; 559 } 560 flp->l_start += fp->f_offset; 561 } 562 563 /* 564 * VOP_ADVLOCK() may block. 565 */ 566 fhold(fp); 567 FILEDESC_SUNLOCK(fdp); 568 vp = fp->f_vnode; 569 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 570 switch (flp->l_type) { 571 case F_RDLCK: 572 if ((fp->f_flag & FREAD) == 0) { 573 error = EBADF; 574 break; 575 } 576 PROC_LOCK(p->p_leader); 577 p->p_leader->p_flag |= P_ADVLOCK; 578 PROC_UNLOCK(p->p_leader); 579 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 580 flp, flg); 581 break; 582 case F_WRLCK: 583 if ((fp->f_flag & FWRITE) == 0) { 584 error = EBADF; 585 break; 586 } 587 PROC_LOCK(p->p_leader); 588 p->p_leader->p_flag |= P_ADVLOCK; 589 PROC_UNLOCK(p->p_leader); 590 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 591 flp, flg); 592 break; 593 case F_UNLCK: 594 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK, 595 flp, flg); 596 break; 597 case F_UNLCKSYS: 598 /* 599 * Temporary api for testing remote lock 600 * infrastructure. 601 */ 602 if (flg != F_REMOTE) { 603 error = EINVAL; 604 break; 605 } 606 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, 607 F_UNLCKSYS, flp, flg); 608 break; 609 default: 610 error = EINVAL; 611 break; 612 } 613 VFS_UNLOCK_GIANT(vfslocked); 614 vfslocked = 0; 615 /* Check for race with close */ 616 FILEDESC_SLOCK(fdp); 617 if ((unsigned) fd >= fdp->fd_nfiles || 618 fp != fdp->fd_ofiles[fd]) { 619 FILEDESC_SUNLOCK(fdp); 620 flp->l_whence = SEEK_SET; 621 flp->l_start = 0; 622 flp->l_len = 0; 623 flp->l_type = F_UNLCK; 624 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 625 (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader, 626 F_UNLCK, flp, F_POSIX); 627 VFS_UNLOCK_GIANT(vfslocked); 628 vfslocked = 0; 629 } else 630 FILEDESC_SUNLOCK(fdp); 631 fdrop(fp, td); 632 break; 633 634 case F_GETLK: 635 FILEDESC_SLOCK(fdp); 636 if ((fp = fdtofp(fd, fdp)) == NULL) { 637 FILEDESC_SUNLOCK(fdp); 638 error = EBADF; 639 break; 640 } 641 if (fp->f_type != DTYPE_VNODE) { 642 FILEDESC_SUNLOCK(fdp); 643 error = EBADF; 644 break; 645 } 646 flp = (struct flock *)arg; 647 if (flp->l_type != F_RDLCK && flp->l_type != F_WRLCK && 648 flp->l_type != F_UNLCK) { 649 FILEDESC_SUNLOCK(fdp); 650 error = EINVAL; 651 break; 652 } 653 if (flp->l_whence == SEEK_CUR) { 654 if ((flp->l_start > 0 && 655 fp->f_offset > OFF_MAX - flp->l_start) || 656 (flp->l_start < 0 && 657 fp->f_offset < OFF_MIN - flp->l_start)) { 658 FILEDESC_SUNLOCK(fdp); 659 error = EOVERFLOW; 660 break; 661 } 662 flp->l_start += fp->f_offset; 663 } 664 /* 665 * VOP_ADVLOCK() may block. 666 */ 667 fhold(fp); 668 FILEDESC_SUNLOCK(fdp); 669 vp = fp->f_vnode; 670 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 671 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, flp, 672 F_POSIX); 673 VFS_UNLOCK_GIANT(vfslocked); 674 vfslocked = 0; 675 fdrop(fp, td); 676 break; 677 default: 678 error = EINVAL; 679 break; 680 } 681 VFS_UNLOCK_GIANT(vfslocked); 682 return (error); 683 } 684 685 /* 686 * Common code for dup, dup2, fcntl(F_DUPFD) and fcntl(F_DUP2FD). 687 */ 688 static int 689 do_dup(struct thread *td, int flags, int old, int new, 690 register_t *retval) 691 { 692 struct filedesc *fdp; 693 struct proc *p; 694 struct file *fp; 695 struct file *delfp; 696 int error, holdleaders, maxfd; 697 698 p = td->td_proc; 699 fdp = p->p_fd; 700 701 /* 702 * Verify we have a valid descriptor to dup from and possibly to 703 * dup to. Unlike dup() and dup2(), fcntl()'s F_DUPFD should 704 * return EINVAL when the new descriptor is out of bounds. 705 */ 706 if (old < 0) 707 return (EBADF); 708 if (new < 0) 709 return (flags & DUP_FCNTL ? EINVAL : EBADF); 710 PROC_LOCK(p); 711 maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc); 712 PROC_UNLOCK(p); 713 if (new >= maxfd) 714 return (flags & DUP_FCNTL ? EINVAL : EMFILE); 715 716 FILEDESC_XLOCK(fdp); 717 if (old >= fdp->fd_nfiles || fdp->fd_ofiles[old] == NULL) { 718 FILEDESC_XUNLOCK(fdp); 719 return (EBADF); 720 } 721 if (flags & DUP_FIXED && old == new) { 722 *retval = new; 723 FILEDESC_XUNLOCK(fdp); 724 return (0); 725 } 726 fp = fdp->fd_ofiles[old]; 727 fhold(fp); 728 729 /* 730 * If the caller specified a file descriptor, make sure the file 731 * table is large enough to hold it, and grab it. Otherwise, just 732 * allocate a new descriptor the usual way. Since the filedesc 733 * lock may be temporarily dropped in the process, we have to look 734 * out for a race. 735 */ 736 if (flags & DUP_FIXED) { 737 if (new >= fdp->fd_nfiles) 738 fdgrowtable(fdp, new + 1); 739 if (fdp->fd_ofiles[new] == NULL) 740 fdused(fdp, new); 741 } else { 742 if ((error = fdalloc(td, new, &new)) != 0) { 743 FILEDESC_XUNLOCK(fdp); 744 fdrop(fp, td); 745 return (error); 746 } 747 } 748 749 /* 750 * If the old file changed out from under us then treat it as a 751 * bad file descriptor. Userland should do its own locking to 752 * avoid this case. 753 */ 754 if (fdp->fd_ofiles[old] != fp) { 755 /* we've allocated a descriptor which we won't use */ 756 if (fdp->fd_ofiles[new] == NULL) 757 fdunused(fdp, new); 758 FILEDESC_XUNLOCK(fdp); 759 fdrop(fp, td); 760 return (EBADF); 761 } 762 KASSERT(old != new, 763 ("new fd is same as old")); 764 765 /* 766 * Save info on the descriptor being overwritten. We cannot close 767 * it without introducing an ownership race for the slot, since we 768 * need to drop the filedesc lock to call closef(). 769 * 770 * XXX this duplicates parts of close(). 771 */ 772 delfp = fdp->fd_ofiles[new]; 773 holdleaders = 0; 774 if (delfp != NULL) { 775 if (td->td_proc->p_fdtol != NULL) { 776 /* 777 * Ask fdfree() to sleep to ensure that all relevant 778 * process leaders can be traversed in closef(). 779 */ 780 fdp->fd_holdleaderscount++; 781 holdleaders = 1; 782 } 783 } 784 785 /* 786 * Duplicate the source descriptor 787 */ 788 fdp->fd_ofiles[new] = fp; 789 fdp->fd_ofileflags[new] = fdp->fd_ofileflags[old] &~ UF_EXCLOSE; 790 if (new > fdp->fd_lastfile) 791 fdp->fd_lastfile = new; 792 *retval = new; 793 794 /* 795 * If we dup'd over a valid file, we now own the reference to it 796 * and must dispose of it using closef() semantics (as if a 797 * close() were performed on it). 798 * 799 * XXX this duplicates parts of close(). 800 */ 801 if (delfp != NULL) { 802 knote_fdclose(td, new); 803 if (delfp->f_type == DTYPE_MQUEUE) 804 mq_fdclose(td, new, delfp); 805 FILEDESC_XUNLOCK(fdp); 806 (void) closef(delfp, td); 807 if (holdleaders) { 808 FILEDESC_XLOCK(fdp); 809 fdp->fd_holdleaderscount--; 810 if (fdp->fd_holdleaderscount == 0 && 811 fdp->fd_holdleaderswakeup != 0) { 812 fdp->fd_holdleaderswakeup = 0; 813 wakeup(&fdp->fd_holdleaderscount); 814 } 815 FILEDESC_XUNLOCK(fdp); 816 } 817 } else { 818 FILEDESC_XUNLOCK(fdp); 819 } 820 return (0); 821 } 822 823 /* 824 * If sigio is on the list associated with a process or process group, 825 * disable signalling from the device, remove sigio from the list and 826 * free sigio. 827 */ 828 void 829 funsetown(struct sigio **sigiop) 830 { 831 struct sigio *sigio; 832 833 SIGIO_LOCK(); 834 sigio = *sigiop; 835 if (sigio == NULL) { 836 SIGIO_UNLOCK(); 837 return; 838 } 839 *(sigio->sio_myref) = NULL; 840 if ((sigio)->sio_pgid < 0) { 841 struct pgrp *pg = (sigio)->sio_pgrp; 842 PGRP_LOCK(pg); 843 SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio, 844 sigio, sio_pgsigio); 845 PGRP_UNLOCK(pg); 846 } else { 847 struct proc *p = (sigio)->sio_proc; 848 PROC_LOCK(p); 849 SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio, 850 sigio, sio_pgsigio); 851 PROC_UNLOCK(p); 852 } 853 SIGIO_UNLOCK(); 854 crfree(sigio->sio_ucred); 855 free(sigio, M_SIGIO); 856 } 857 858 /* 859 * Free a list of sigio structures. 860 * We only need to lock the SIGIO_LOCK because we have made ourselves 861 * inaccessible to callers of fsetown and therefore do not need to lock 862 * the proc or pgrp struct for the list manipulation. 863 */ 864 void 865 funsetownlst(struct sigiolst *sigiolst) 866 { 867 struct proc *p; 868 struct pgrp *pg; 869 struct sigio *sigio; 870 871 sigio = SLIST_FIRST(sigiolst); 872 if (sigio == NULL) 873 return; 874 p = NULL; 875 pg = NULL; 876 877 /* 878 * Every entry of the list should belong 879 * to a single proc or pgrp. 880 */ 881 if (sigio->sio_pgid < 0) { 882 pg = sigio->sio_pgrp; 883 PGRP_LOCK_ASSERT(pg, MA_NOTOWNED); 884 } else /* if (sigio->sio_pgid > 0) */ { 885 p = sigio->sio_proc; 886 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 887 } 888 889 SIGIO_LOCK(); 890 while ((sigio = SLIST_FIRST(sigiolst)) != NULL) { 891 *(sigio->sio_myref) = NULL; 892 if (pg != NULL) { 893 KASSERT(sigio->sio_pgid < 0, 894 ("Proc sigio in pgrp sigio list")); 895 KASSERT(sigio->sio_pgrp == pg, 896 ("Bogus pgrp in sigio list")); 897 PGRP_LOCK(pg); 898 SLIST_REMOVE(&pg->pg_sigiolst, sigio, sigio, 899 sio_pgsigio); 900 PGRP_UNLOCK(pg); 901 } else /* if (p != NULL) */ { 902 KASSERT(sigio->sio_pgid > 0, 903 ("Pgrp sigio in proc sigio list")); 904 KASSERT(sigio->sio_proc == p, 905 ("Bogus proc in sigio list")); 906 PROC_LOCK(p); 907 SLIST_REMOVE(&p->p_sigiolst, sigio, sigio, 908 sio_pgsigio); 909 PROC_UNLOCK(p); 910 } 911 SIGIO_UNLOCK(); 912 crfree(sigio->sio_ucred); 913 free(sigio, M_SIGIO); 914 SIGIO_LOCK(); 915 } 916 SIGIO_UNLOCK(); 917 } 918 919 /* 920 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg). 921 * 922 * After permission checking, add a sigio structure to the sigio list for 923 * the process or process group. 924 */ 925 int 926 fsetown(pid_t pgid, struct sigio **sigiop) 927 { 928 struct proc *proc; 929 struct pgrp *pgrp; 930 struct sigio *sigio; 931 int ret; 932 933 if (pgid == 0) { 934 funsetown(sigiop); 935 return (0); 936 } 937 938 ret = 0; 939 940 /* Allocate and fill in the new sigio out of locks. */ 941 sigio = malloc(sizeof(struct sigio), M_SIGIO, M_WAITOK); 942 sigio->sio_pgid = pgid; 943 sigio->sio_ucred = crhold(curthread->td_ucred); 944 sigio->sio_myref = sigiop; 945 946 sx_slock(&proctree_lock); 947 if (pgid > 0) { 948 proc = pfind(pgid); 949 if (proc == NULL) { 950 ret = ESRCH; 951 goto fail; 952 } 953 954 /* 955 * Policy - Don't allow a process to FSETOWN a process 956 * in another session. 957 * 958 * Remove this test to allow maximum flexibility or 959 * restrict FSETOWN to the current process or process 960 * group for maximum safety. 961 */ 962 PROC_UNLOCK(proc); 963 if (proc->p_session != curthread->td_proc->p_session) { 964 ret = EPERM; 965 goto fail; 966 } 967 968 pgrp = NULL; 969 } else /* if (pgid < 0) */ { 970 pgrp = pgfind(-pgid); 971 if (pgrp == NULL) { 972 ret = ESRCH; 973 goto fail; 974 } 975 PGRP_UNLOCK(pgrp); 976 977 /* 978 * Policy - Don't allow a process to FSETOWN a process 979 * in another session. 980 * 981 * Remove this test to allow maximum flexibility or 982 * restrict FSETOWN to the current process or process 983 * group for maximum safety. 984 */ 985 if (pgrp->pg_session != curthread->td_proc->p_session) { 986 ret = EPERM; 987 goto fail; 988 } 989 990 proc = NULL; 991 } 992 funsetown(sigiop); 993 if (pgid > 0) { 994 PROC_LOCK(proc); 995 /* 996 * Since funsetownlst() is called without the proctree 997 * locked, we need to check for P_WEXIT. 998 * XXX: is ESRCH correct? 999 */ 1000 if ((proc->p_flag & P_WEXIT) != 0) { 1001 PROC_UNLOCK(proc); 1002 ret = ESRCH; 1003 goto fail; 1004 } 1005 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio); 1006 sigio->sio_proc = proc; 1007 PROC_UNLOCK(proc); 1008 } else { 1009 PGRP_LOCK(pgrp); 1010 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio); 1011 sigio->sio_pgrp = pgrp; 1012 PGRP_UNLOCK(pgrp); 1013 } 1014 sx_sunlock(&proctree_lock); 1015 SIGIO_LOCK(); 1016 *sigiop = sigio; 1017 SIGIO_UNLOCK(); 1018 return (0); 1019 1020 fail: 1021 sx_sunlock(&proctree_lock); 1022 crfree(sigio->sio_ucred); 1023 free(sigio, M_SIGIO); 1024 return (ret); 1025 } 1026 1027 /* 1028 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg). 1029 */ 1030 pid_t 1031 fgetown(sigiop) 1032 struct sigio **sigiop; 1033 { 1034 pid_t pgid; 1035 1036 SIGIO_LOCK(); 1037 pgid = (*sigiop != NULL) ? (*sigiop)->sio_pgid : 0; 1038 SIGIO_UNLOCK(); 1039 return (pgid); 1040 } 1041 1042 /* 1043 * Close a file descriptor. 1044 */ 1045 #ifndef _SYS_SYSPROTO_H_ 1046 struct close_args { 1047 int fd; 1048 }; 1049 #endif 1050 /* ARGSUSED */ 1051 int 1052 close(td, uap) 1053 struct thread *td; 1054 struct close_args *uap; 1055 { 1056 1057 return (kern_close(td, uap->fd)); 1058 } 1059 1060 int 1061 kern_close(td, fd) 1062 struct thread *td; 1063 int fd; 1064 { 1065 struct filedesc *fdp; 1066 struct file *fp; 1067 int error; 1068 int holdleaders; 1069 1070 error = 0; 1071 holdleaders = 0; 1072 fdp = td->td_proc->p_fd; 1073 1074 AUDIT_SYSCLOSE(td, fd); 1075 1076 FILEDESC_XLOCK(fdp); 1077 if ((unsigned)fd >= fdp->fd_nfiles || 1078 (fp = fdp->fd_ofiles[fd]) == NULL) { 1079 FILEDESC_XUNLOCK(fdp); 1080 return (EBADF); 1081 } 1082 fdp->fd_ofiles[fd] = NULL; 1083 fdp->fd_ofileflags[fd] = 0; 1084 fdunused(fdp, fd); 1085 if (td->td_proc->p_fdtol != NULL) { 1086 /* 1087 * Ask fdfree() to sleep to ensure that all relevant 1088 * process leaders can be traversed in closef(). 1089 */ 1090 fdp->fd_holdleaderscount++; 1091 holdleaders = 1; 1092 } 1093 1094 /* 1095 * We now hold the fp reference that used to be owned by the 1096 * descriptor array. We have to unlock the FILEDESC *AFTER* 1097 * knote_fdclose to prevent a race of the fd getting opened, a knote 1098 * added, and deleteing a knote for the new fd. 1099 */ 1100 knote_fdclose(td, fd); 1101 if (fp->f_type == DTYPE_MQUEUE) 1102 mq_fdclose(td, fd, fp); 1103 FILEDESC_XUNLOCK(fdp); 1104 1105 error = closef(fp, td); 1106 if (holdleaders) { 1107 FILEDESC_XLOCK(fdp); 1108 fdp->fd_holdleaderscount--; 1109 if (fdp->fd_holdleaderscount == 0 && 1110 fdp->fd_holdleaderswakeup != 0) { 1111 fdp->fd_holdleaderswakeup = 0; 1112 wakeup(&fdp->fd_holdleaderscount); 1113 } 1114 FILEDESC_XUNLOCK(fdp); 1115 } 1116 return (error); 1117 } 1118 1119 #if defined(COMPAT_43) 1120 /* 1121 * Return status information about a file descriptor. 1122 */ 1123 #ifndef _SYS_SYSPROTO_H_ 1124 struct ofstat_args { 1125 int fd; 1126 struct ostat *sb; 1127 }; 1128 #endif 1129 /* ARGSUSED */ 1130 int 1131 ofstat(struct thread *td, struct ofstat_args *uap) 1132 { 1133 struct ostat oub; 1134 struct stat ub; 1135 int error; 1136 1137 error = kern_fstat(td, uap->fd, &ub); 1138 if (error == 0) { 1139 cvtstat(&ub, &oub); 1140 error = copyout(&oub, uap->sb, sizeof(oub)); 1141 } 1142 return (error); 1143 } 1144 #endif /* COMPAT_43 */ 1145 1146 /* 1147 * Return status information about a file descriptor. 1148 */ 1149 #ifndef _SYS_SYSPROTO_H_ 1150 struct fstat_args { 1151 int fd; 1152 struct stat *sb; 1153 }; 1154 #endif 1155 /* ARGSUSED */ 1156 int 1157 fstat(struct thread *td, struct fstat_args *uap) 1158 { 1159 struct stat ub; 1160 int error; 1161 1162 error = kern_fstat(td, uap->fd, &ub); 1163 if (error == 0) 1164 error = copyout(&ub, uap->sb, sizeof(ub)); 1165 return (error); 1166 } 1167 1168 int 1169 kern_fstat(struct thread *td, int fd, struct stat *sbp) 1170 { 1171 struct file *fp; 1172 int error; 1173 1174 AUDIT_ARG(fd, fd); 1175 1176 if ((error = fget(td, fd, &fp)) != 0) 1177 return (error); 1178 1179 AUDIT_ARG(file, td->td_proc, fp); 1180 1181 error = fo_stat(fp, sbp, td->td_ucred, td); 1182 fdrop(fp, td); 1183 #ifdef KTRACE 1184 if (error == 0 && KTRPOINT(td, KTR_STRUCT)) 1185 ktrstat(sbp); 1186 #endif 1187 return (error); 1188 } 1189 1190 /* 1191 * Return status information about a file descriptor. 1192 */ 1193 #ifndef _SYS_SYSPROTO_H_ 1194 struct nfstat_args { 1195 int fd; 1196 struct nstat *sb; 1197 }; 1198 #endif 1199 /* ARGSUSED */ 1200 int 1201 nfstat(struct thread *td, struct nfstat_args *uap) 1202 { 1203 struct nstat nub; 1204 struct stat ub; 1205 int error; 1206 1207 error = kern_fstat(td, uap->fd, &ub); 1208 if (error == 0) { 1209 cvtnstat(&ub, &nub); 1210 error = copyout(&nub, uap->sb, sizeof(nub)); 1211 } 1212 return (error); 1213 } 1214 1215 /* 1216 * Return pathconf information about a file descriptor. 1217 */ 1218 #ifndef _SYS_SYSPROTO_H_ 1219 struct fpathconf_args { 1220 int fd; 1221 int name; 1222 }; 1223 #endif 1224 /* ARGSUSED */ 1225 int 1226 fpathconf(struct thread *td, struct fpathconf_args *uap) 1227 { 1228 struct file *fp; 1229 struct vnode *vp; 1230 int error; 1231 1232 if ((error = fget(td, uap->fd, &fp)) != 0) 1233 return (error); 1234 1235 /* If asynchronous I/O is available, it works for all descriptors. */ 1236 if (uap->name == _PC_ASYNC_IO) { 1237 td->td_retval[0] = async_io_version; 1238 goto out; 1239 } 1240 vp = fp->f_vnode; 1241 if (vp != NULL) { 1242 int vfslocked; 1243 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1244 vn_lock(vp, LK_SHARED | LK_RETRY); 1245 error = VOP_PATHCONF(vp, uap->name, td->td_retval); 1246 VOP_UNLOCK(vp, 0); 1247 VFS_UNLOCK_GIANT(vfslocked); 1248 } else if (fp->f_type == DTYPE_PIPE || fp->f_type == DTYPE_SOCKET) { 1249 if (uap->name != _PC_PIPE_BUF) { 1250 error = EINVAL; 1251 } else { 1252 td->td_retval[0] = PIPE_BUF; 1253 error = 0; 1254 } 1255 } else { 1256 error = EOPNOTSUPP; 1257 } 1258 out: 1259 fdrop(fp, td); 1260 return (error); 1261 } 1262 1263 /* 1264 * Grow the file table to accomodate (at least) nfd descriptors. This may 1265 * block and drop the filedesc lock, but it will reacquire it before 1266 * returning. 1267 */ 1268 static void 1269 fdgrowtable(struct filedesc *fdp, int nfd) 1270 { 1271 struct file **ntable; 1272 char *nfileflags; 1273 int nnfiles, onfiles; 1274 NDSLOTTYPE *nmap; 1275 1276 FILEDESC_XLOCK_ASSERT(fdp); 1277 1278 KASSERT(fdp->fd_nfiles > 0, 1279 ("zero-length file table")); 1280 1281 /* compute the size of the new table */ 1282 onfiles = fdp->fd_nfiles; 1283 nnfiles = NDSLOTS(nfd) * NDENTRIES; /* round up */ 1284 if (nnfiles <= onfiles) 1285 /* the table is already large enough */ 1286 return; 1287 1288 /* allocate a new table and (if required) new bitmaps */ 1289 FILEDESC_XUNLOCK(fdp); 1290 ntable = malloc(nnfiles * OFILESIZE, 1291 M_FILEDESC, M_ZERO | M_WAITOK); 1292 nfileflags = (char *)&ntable[nnfiles]; 1293 if (NDSLOTS(nnfiles) > NDSLOTS(onfiles)) 1294 nmap = malloc(NDSLOTS(nnfiles) * NDSLOTSIZE, 1295 M_FILEDESC, M_ZERO | M_WAITOK); 1296 else 1297 nmap = NULL; 1298 FILEDESC_XLOCK(fdp); 1299 1300 /* 1301 * We now have new tables ready to go. Since we dropped the 1302 * filedesc lock to call malloc(), watch out for a race. 1303 */ 1304 onfiles = fdp->fd_nfiles; 1305 if (onfiles >= nnfiles) { 1306 /* we lost the race, but that's OK */ 1307 free(ntable, M_FILEDESC); 1308 if (nmap != NULL) 1309 free(nmap, M_FILEDESC); 1310 return; 1311 } 1312 bcopy(fdp->fd_ofiles, ntable, onfiles * sizeof(*ntable)); 1313 bcopy(fdp->fd_ofileflags, nfileflags, onfiles); 1314 if (onfiles > NDFILE) 1315 free(fdp->fd_ofiles, M_FILEDESC); 1316 fdp->fd_ofiles = ntable; 1317 fdp->fd_ofileflags = nfileflags; 1318 if (NDSLOTS(nnfiles) > NDSLOTS(onfiles)) { 1319 bcopy(fdp->fd_map, nmap, NDSLOTS(onfiles) * sizeof(*nmap)); 1320 if (NDSLOTS(onfiles) > NDSLOTS(NDFILE)) 1321 free(fdp->fd_map, M_FILEDESC); 1322 fdp->fd_map = nmap; 1323 } 1324 fdp->fd_nfiles = nnfiles; 1325 } 1326 1327 /* 1328 * Allocate a file descriptor for the process. 1329 */ 1330 int 1331 fdalloc(struct thread *td, int minfd, int *result) 1332 { 1333 struct proc *p = td->td_proc; 1334 struct filedesc *fdp = p->p_fd; 1335 int fd = -1, maxfd; 1336 1337 FILEDESC_XLOCK_ASSERT(fdp); 1338 1339 if (fdp->fd_freefile > minfd) 1340 minfd = fdp->fd_freefile; 1341 1342 PROC_LOCK(p); 1343 maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc); 1344 PROC_UNLOCK(p); 1345 1346 /* 1347 * Search the bitmap for a free descriptor. If none is found, try 1348 * to grow the file table. Keep at it until we either get a file 1349 * descriptor or run into process or system limits; fdgrowtable() 1350 * may drop the filedesc lock, so we're in a race. 1351 */ 1352 for (;;) { 1353 fd = fd_first_free(fdp, minfd, fdp->fd_nfiles); 1354 if (fd >= maxfd) 1355 return (EMFILE); 1356 if (fd < fdp->fd_nfiles) 1357 break; 1358 fdgrowtable(fdp, min(fdp->fd_nfiles * 2, maxfd)); 1359 } 1360 1361 /* 1362 * Perform some sanity checks, then mark the file descriptor as 1363 * used and return it to the caller. 1364 */ 1365 KASSERT(!fdisused(fdp, fd), 1366 ("fd_first_free() returned non-free descriptor")); 1367 KASSERT(fdp->fd_ofiles[fd] == NULL, 1368 ("free descriptor isn't")); 1369 fdp->fd_ofileflags[fd] = 0; /* XXX needed? */ 1370 fdused(fdp, fd); 1371 *result = fd; 1372 return (0); 1373 } 1374 1375 /* 1376 * Check to see whether n user file descriptors are available to the process 1377 * p. 1378 */ 1379 int 1380 fdavail(struct thread *td, int n) 1381 { 1382 struct proc *p = td->td_proc; 1383 struct filedesc *fdp = td->td_proc->p_fd; 1384 struct file **fpp; 1385 int i, lim, last; 1386 1387 FILEDESC_LOCK_ASSERT(fdp); 1388 1389 PROC_LOCK(p); 1390 lim = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc); 1391 PROC_UNLOCK(p); 1392 if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) 1393 return (1); 1394 last = min(fdp->fd_nfiles, lim); 1395 fpp = &fdp->fd_ofiles[fdp->fd_freefile]; 1396 for (i = last - fdp->fd_freefile; --i >= 0; fpp++) { 1397 if (*fpp == NULL && --n <= 0) 1398 return (1); 1399 } 1400 return (0); 1401 } 1402 1403 /* 1404 * Create a new open file structure and allocate a file decriptor for the 1405 * process that refers to it. We add one reference to the file for the 1406 * descriptor table and one reference for resultfp. This is to prevent us 1407 * being preempted and the entry in the descriptor table closed after we 1408 * release the FILEDESC lock. 1409 */ 1410 int 1411 falloc(struct thread *td, struct file **resultfp, int *resultfd) 1412 { 1413 struct proc *p = td->td_proc; 1414 struct file *fp; 1415 int error, i; 1416 int maxuserfiles = maxfiles - (maxfiles / 20); 1417 static struct timeval lastfail; 1418 static int curfail; 1419 1420 fp = uma_zalloc(file_zone, M_WAITOK | M_ZERO); 1421 if ((openfiles >= maxuserfiles && 1422 priv_check(td, PRIV_MAXFILES) != 0) || 1423 openfiles >= maxfiles) { 1424 if (ppsratecheck(&lastfail, &curfail, 1)) { 1425 printf("kern.maxfiles limit exceeded by uid %i, please see tuning(7).\n", 1426 td->td_ucred->cr_ruid); 1427 } 1428 uma_zfree(file_zone, fp); 1429 return (ENFILE); 1430 } 1431 atomic_add_int(&openfiles, 1); 1432 1433 /* 1434 * If the process has file descriptor zero open, add the new file 1435 * descriptor to the list of open files at that point, otherwise 1436 * put it at the front of the list of open files. 1437 */ 1438 refcount_init(&fp->f_count, 1); 1439 if (resultfp) 1440 fhold(fp); 1441 fp->f_cred = crhold(td->td_ucred); 1442 fp->f_ops = &badfileops; 1443 fp->f_data = NULL; 1444 fp->f_vnode = NULL; 1445 FILEDESC_XLOCK(p->p_fd); 1446 if ((error = fdalloc(td, 0, &i))) { 1447 FILEDESC_XUNLOCK(p->p_fd); 1448 fdrop(fp, td); 1449 if (resultfp) 1450 fdrop(fp, td); 1451 return (error); 1452 } 1453 p->p_fd->fd_ofiles[i] = fp; 1454 FILEDESC_XUNLOCK(p->p_fd); 1455 if (resultfp) 1456 *resultfp = fp; 1457 if (resultfd) 1458 *resultfd = i; 1459 return (0); 1460 } 1461 1462 /* 1463 * Build a new filedesc structure from another. 1464 * Copy the current, root, and jail root vnode references. 1465 */ 1466 struct filedesc * 1467 fdinit(struct filedesc *fdp) 1468 { 1469 struct filedesc0 *newfdp; 1470 1471 newfdp = malloc(sizeof *newfdp, M_FILEDESC, M_WAITOK | M_ZERO); 1472 FILEDESC_LOCK_INIT(&newfdp->fd_fd); 1473 if (fdp != NULL) { 1474 FILEDESC_XLOCK(fdp); 1475 newfdp->fd_fd.fd_cdir = fdp->fd_cdir; 1476 if (newfdp->fd_fd.fd_cdir) 1477 VREF(newfdp->fd_fd.fd_cdir); 1478 newfdp->fd_fd.fd_rdir = fdp->fd_rdir; 1479 if (newfdp->fd_fd.fd_rdir) 1480 VREF(newfdp->fd_fd.fd_rdir); 1481 newfdp->fd_fd.fd_jdir = fdp->fd_jdir; 1482 if (newfdp->fd_fd.fd_jdir) 1483 VREF(newfdp->fd_fd.fd_jdir); 1484 FILEDESC_XUNLOCK(fdp); 1485 } 1486 1487 /* Create the file descriptor table. */ 1488 newfdp->fd_fd.fd_refcnt = 1; 1489 newfdp->fd_fd.fd_holdcnt = 1; 1490 newfdp->fd_fd.fd_cmask = CMASK; 1491 newfdp->fd_fd.fd_ofiles = newfdp->fd_dfiles; 1492 newfdp->fd_fd.fd_ofileflags = newfdp->fd_dfileflags; 1493 newfdp->fd_fd.fd_nfiles = NDFILE; 1494 newfdp->fd_fd.fd_map = newfdp->fd_dmap; 1495 newfdp->fd_fd.fd_lastfile = -1; 1496 return (&newfdp->fd_fd); 1497 } 1498 1499 static struct filedesc * 1500 fdhold(struct proc *p) 1501 { 1502 struct filedesc *fdp; 1503 1504 mtx_lock(&fdesc_mtx); 1505 fdp = p->p_fd; 1506 if (fdp != NULL) 1507 fdp->fd_holdcnt++; 1508 mtx_unlock(&fdesc_mtx); 1509 return (fdp); 1510 } 1511 1512 static void 1513 fddrop(struct filedesc *fdp) 1514 { 1515 int i; 1516 1517 mtx_lock(&fdesc_mtx); 1518 i = --fdp->fd_holdcnt; 1519 mtx_unlock(&fdesc_mtx); 1520 if (i > 0) 1521 return; 1522 1523 FILEDESC_LOCK_DESTROY(fdp); 1524 free(fdp, M_FILEDESC); 1525 } 1526 1527 /* 1528 * Share a filedesc structure. 1529 */ 1530 struct filedesc * 1531 fdshare(struct filedesc *fdp) 1532 { 1533 1534 FILEDESC_XLOCK(fdp); 1535 fdp->fd_refcnt++; 1536 FILEDESC_XUNLOCK(fdp); 1537 return (fdp); 1538 } 1539 1540 /* 1541 * Unshare a filedesc structure, if necessary by making a copy 1542 */ 1543 void 1544 fdunshare(struct proc *p, struct thread *td) 1545 { 1546 1547 FILEDESC_XLOCK(p->p_fd); 1548 if (p->p_fd->fd_refcnt > 1) { 1549 struct filedesc *tmp; 1550 1551 FILEDESC_XUNLOCK(p->p_fd); 1552 tmp = fdcopy(p->p_fd); 1553 fdfree(td); 1554 p->p_fd = tmp; 1555 } else 1556 FILEDESC_XUNLOCK(p->p_fd); 1557 } 1558 1559 /* 1560 * Copy a filedesc structure. A NULL pointer in returns a NULL reference, 1561 * this is to ease callers, not catch errors. 1562 */ 1563 struct filedesc * 1564 fdcopy(struct filedesc *fdp) 1565 { 1566 struct filedesc *newfdp; 1567 int i; 1568 1569 /* Certain daemons might not have file descriptors. */ 1570 if (fdp == NULL) 1571 return (NULL); 1572 1573 newfdp = fdinit(fdp); 1574 FILEDESC_SLOCK(fdp); 1575 while (fdp->fd_lastfile >= newfdp->fd_nfiles) { 1576 FILEDESC_SUNLOCK(fdp); 1577 FILEDESC_XLOCK(newfdp); 1578 fdgrowtable(newfdp, fdp->fd_lastfile + 1); 1579 FILEDESC_XUNLOCK(newfdp); 1580 FILEDESC_SLOCK(fdp); 1581 } 1582 /* copy everything except kqueue descriptors */ 1583 newfdp->fd_freefile = -1; 1584 for (i = 0; i <= fdp->fd_lastfile; ++i) { 1585 if (fdisused(fdp, i) && 1586 fdp->fd_ofiles[i]->f_type != DTYPE_KQUEUE && 1587 fdp->fd_ofiles[i]->f_ops != &badfileops) { 1588 newfdp->fd_ofiles[i] = fdp->fd_ofiles[i]; 1589 newfdp->fd_ofileflags[i] = fdp->fd_ofileflags[i]; 1590 fhold(newfdp->fd_ofiles[i]); 1591 newfdp->fd_lastfile = i; 1592 } else { 1593 if (newfdp->fd_freefile == -1) 1594 newfdp->fd_freefile = i; 1595 } 1596 } 1597 FILEDESC_SUNLOCK(fdp); 1598 FILEDESC_XLOCK(newfdp); 1599 for (i = 0; i <= newfdp->fd_lastfile; ++i) 1600 if (newfdp->fd_ofiles[i] != NULL) 1601 fdused(newfdp, i); 1602 FILEDESC_XUNLOCK(newfdp); 1603 FILEDESC_SLOCK(fdp); 1604 if (newfdp->fd_freefile == -1) 1605 newfdp->fd_freefile = i; 1606 newfdp->fd_cmask = fdp->fd_cmask; 1607 FILEDESC_SUNLOCK(fdp); 1608 return (newfdp); 1609 } 1610 1611 /* 1612 * Release a filedesc structure. 1613 */ 1614 void 1615 fdfree(struct thread *td) 1616 { 1617 struct filedesc *fdp; 1618 struct file **fpp; 1619 int i, locked; 1620 struct filedesc_to_leader *fdtol; 1621 struct file *fp; 1622 struct vnode *cdir, *jdir, *rdir, *vp; 1623 struct flock lf; 1624 1625 /* Certain daemons might not have file descriptors. */ 1626 fdp = td->td_proc->p_fd; 1627 if (fdp == NULL) 1628 return; 1629 1630 /* Check for special need to clear POSIX style locks */ 1631 fdtol = td->td_proc->p_fdtol; 1632 if (fdtol != NULL) { 1633 FILEDESC_XLOCK(fdp); 1634 KASSERT(fdtol->fdl_refcount > 0, 1635 ("filedesc_to_refcount botch: fdl_refcount=%d", 1636 fdtol->fdl_refcount)); 1637 if (fdtol->fdl_refcount == 1 && 1638 (td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) { 1639 for (i = 0, fpp = fdp->fd_ofiles; 1640 i <= fdp->fd_lastfile; 1641 i++, fpp++) { 1642 if (*fpp == NULL || 1643 (*fpp)->f_type != DTYPE_VNODE) 1644 continue; 1645 fp = *fpp; 1646 fhold(fp); 1647 FILEDESC_XUNLOCK(fdp); 1648 lf.l_whence = SEEK_SET; 1649 lf.l_start = 0; 1650 lf.l_len = 0; 1651 lf.l_type = F_UNLCK; 1652 vp = fp->f_vnode; 1653 locked = VFS_LOCK_GIANT(vp->v_mount); 1654 (void) VOP_ADVLOCK(vp, 1655 (caddr_t)td->td_proc-> 1656 p_leader, 1657 F_UNLCK, 1658 &lf, 1659 F_POSIX); 1660 VFS_UNLOCK_GIANT(locked); 1661 FILEDESC_XLOCK(fdp); 1662 fdrop(fp, td); 1663 fpp = fdp->fd_ofiles + i; 1664 } 1665 } 1666 retry: 1667 if (fdtol->fdl_refcount == 1) { 1668 if (fdp->fd_holdleaderscount > 0 && 1669 (td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) { 1670 /* 1671 * close() or do_dup() has cleared a reference 1672 * in a shared file descriptor table. 1673 */ 1674 fdp->fd_holdleaderswakeup = 1; 1675 sx_sleep(&fdp->fd_holdleaderscount, 1676 FILEDESC_LOCK(fdp), PLOCK, "fdlhold", 0); 1677 goto retry; 1678 } 1679 if (fdtol->fdl_holdcount > 0) { 1680 /* 1681 * Ensure that fdtol->fdl_leader remains 1682 * valid in closef(). 1683 */ 1684 fdtol->fdl_wakeup = 1; 1685 sx_sleep(fdtol, FILEDESC_LOCK(fdp), PLOCK, 1686 "fdlhold", 0); 1687 goto retry; 1688 } 1689 } 1690 fdtol->fdl_refcount--; 1691 if (fdtol->fdl_refcount == 0 && 1692 fdtol->fdl_holdcount == 0) { 1693 fdtol->fdl_next->fdl_prev = fdtol->fdl_prev; 1694 fdtol->fdl_prev->fdl_next = fdtol->fdl_next; 1695 } else 1696 fdtol = NULL; 1697 td->td_proc->p_fdtol = NULL; 1698 FILEDESC_XUNLOCK(fdp); 1699 if (fdtol != NULL) 1700 free(fdtol, M_FILEDESC_TO_LEADER); 1701 } 1702 FILEDESC_XLOCK(fdp); 1703 i = --fdp->fd_refcnt; 1704 FILEDESC_XUNLOCK(fdp); 1705 if (i > 0) 1706 return; 1707 1708 fpp = fdp->fd_ofiles; 1709 for (i = fdp->fd_lastfile; i-- >= 0; fpp++) { 1710 if (*fpp) { 1711 FILEDESC_XLOCK(fdp); 1712 fp = *fpp; 1713 *fpp = NULL; 1714 FILEDESC_XUNLOCK(fdp); 1715 (void) closef(fp, td); 1716 } 1717 } 1718 FILEDESC_XLOCK(fdp); 1719 1720 /* XXX This should happen earlier. */ 1721 mtx_lock(&fdesc_mtx); 1722 td->td_proc->p_fd = NULL; 1723 mtx_unlock(&fdesc_mtx); 1724 1725 if (fdp->fd_nfiles > NDFILE) 1726 free(fdp->fd_ofiles, M_FILEDESC); 1727 if (NDSLOTS(fdp->fd_nfiles) > NDSLOTS(NDFILE)) 1728 free(fdp->fd_map, M_FILEDESC); 1729 1730 fdp->fd_nfiles = 0; 1731 1732 cdir = fdp->fd_cdir; 1733 fdp->fd_cdir = NULL; 1734 rdir = fdp->fd_rdir; 1735 fdp->fd_rdir = NULL; 1736 jdir = fdp->fd_jdir; 1737 fdp->fd_jdir = NULL; 1738 FILEDESC_XUNLOCK(fdp); 1739 1740 if (cdir) { 1741 locked = VFS_LOCK_GIANT(cdir->v_mount); 1742 vrele(cdir); 1743 VFS_UNLOCK_GIANT(locked); 1744 } 1745 if (rdir) { 1746 locked = VFS_LOCK_GIANT(rdir->v_mount); 1747 vrele(rdir); 1748 VFS_UNLOCK_GIANT(locked); 1749 } 1750 if (jdir) { 1751 locked = VFS_LOCK_GIANT(jdir->v_mount); 1752 vrele(jdir); 1753 VFS_UNLOCK_GIANT(locked); 1754 } 1755 1756 fddrop(fdp); 1757 } 1758 1759 /* 1760 * For setugid programs, we don't want to people to use that setugidness 1761 * to generate error messages which write to a file which otherwise would 1762 * otherwise be off-limits to the process. We check for filesystems where 1763 * the vnode can change out from under us after execve (like [lin]procfs). 1764 * 1765 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is 1766 * sufficient. We also don't check for setugidness since we know we are. 1767 */ 1768 static int 1769 is_unsafe(struct file *fp) 1770 { 1771 if (fp->f_type == DTYPE_VNODE) { 1772 struct vnode *vp = fp->f_vnode; 1773 1774 if ((vp->v_vflag & VV_PROCDEP) != 0) 1775 return (1); 1776 } 1777 return (0); 1778 } 1779 1780 /* 1781 * Make this setguid thing safe, if at all possible. 1782 */ 1783 void 1784 setugidsafety(struct thread *td) 1785 { 1786 struct filedesc *fdp; 1787 int i; 1788 1789 /* Certain daemons might not have file descriptors. */ 1790 fdp = td->td_proc->p_fd; 1791 if (fdp == NULL) 1792 return; 1793 1794 /* 1795 * Note: fdp->fd_ofiles may be reallocated out from under us while 1796 * we are blocked in a close. Be careful! 1797 */ 1798 FILEDESC_XLOCK(fdp); 1799 for (i = 0; i <= fdp->fd_lastfile; i++) { 1800 if (i > 2) 1801 break; 1802 if (fdp->fd_ofiles[i] && is_unsafe(fdp->fd_ofiles[i])) { 1803 struct file *fp; 1804 1805 knote_fdclose(td, i); 1806 /* 1807 * NULL-out descriptor prior to close to avoid 1808 * a race while close blocks. 1809 */ 1810 fp = fdp->fd_ofiles[i]; 1811 fdp->fd_ofiles[i] = NULL; 1812 fdp->fd_ofileflags[i] = 0; 1813 fdunused(fdp, i); 1814 FILEDESC_XUNLOCK(fdp); 1815 (void) closef(fp, td); 1816 FILEDESC_XLOCK(fdp); 1817 } 1818 } 1819 FILEDESC_XUNLOCK(fdp); 1820 } 1821 1822 /* 1823 * If a specific file object occupies a specific file descriptor, close the 1824 * file descriptor entry and drop a reference on the file object. This is a 1825 * convenience function to handle a subsequent error in a function that calls 1826 * falloc() that handles the race that another thread might have closed the 1827 * file descriptor out from under the thread creating the file object. 1828 */ 1829 void 1830 fdclose(struct filedesc *fdp, struct file *fp, int idx, struct thread *td) 1831 { 1832 1833 FILEDESC_XLOCK(fdp); 1834 if (fdp->fd_ofiles[idx] == fp) { 1835 fdp->fd_ofiles[idx] = NULL; 1836 fdunused(fdp, idx); 1837 FILEDESC_XUNLOCK(fdp); 1838 fdrop(fp, td); 1839 } else 1840 FILEDESC_XUNLOCK(fdp); 1841 } 1842 1843 /* 1844 * Close any files on exec? 1845 */ 1846 void 1847 fdcloseexec(struct thread *td) 1848 { 1849 struct filedesc *fdp; 1850 int i; 1851 1852 /* Certain daemons might not have file descriptors. */ 1853 fdp = td->td_proc->p_fd; 1854 if (fdp == NULL) 1855 return; 1856 1857 FILEDESC_XLOCK(fdp); 1858 1859 /* 1860 * We cannot cache fd_ofiles or fd_ofileflags since operations 1861 * may block and rip them out from under us. 1862 */ 1863 for (i = 0; i <= fdp->fd_lastfile; i++) { 1864 if (fdp->fd_ofiles[i] != NULL && 1865 (fdp->fd_ofiles[i]->f_type == DTYPE_MQUEUE || 1866 (fdp->fd_ofileflags[i] & UF_EXCLOSE))) { 1867 struct file *fp; 1868 1869 knote_fdclose(td, i); 1870 /* 1871 * NULL-out descriptor prior to close to avoid 1872 * a race while close blocks. 1873 */ 1874 fp = fdp->fd_ofiles[i]; 1875 fdp->fd_ofiles[i] = NULL; 1876 fdp->fd_ofileflags[i] = 0; 1877 fdunused(fdp, i); 1878 if (fp->f_type == DTYPE_MQUEUE) 1879 mq_fdclose(td, i, fp); 1880 FILEDESC_XUNLOCK(fdp); 1881 (void) closef(fp, td); 1882 FILEDESC_XLOCK(fdp); 1883 } 1884 } 1885 FILEDESC_XUNLOCK(fdp); 1886 } 1887 1888 /* 1889 * It is unsafe for set[ug]id processes to be started with file 1890 * descriptors 0..2 closed, as these descriptors are given implicit 1891 * significance in the Standard C library. fdcheckstd() will create a 1892 * descriptor referencing /dev/null for each of stdin, stdout, and 1893 * stderr that is not already open. 1894 */ 1895 int 1896 fdcheckstd(struct thread *td) 1897 { 1898 struct filedesc *fdp; 1899 register_t retval, save; 1900 int i, error, devnull; 1901 1902 fdp = td->td_proc->p_fd; 1903 if (fdp == NULL) 1904 return (0); 1905 KASSERT(fdp->fd_refcnt == 1, ("the fdtable should not be shared")); 1906 devnull = -1; 1907 error = 0; 1908 for (i = 0; i < 3; i++) { 1909 if (fdp->fd_ofiles[i] != NULL) 1910 continue; 1911 if (devnull < 0) { 1912 save = td->td_retval[0]; 1913 error = kern_open(td, "/dev/null", UIO_SYSSPACE, 1914 O_RDWR, 0); 1915 devnull = td->td_retval[0]; 1916 KASSERT(devnull == i, ("oof, we didn't get our fd")); 1917 td->td_retval[0] = save; 1918 if (error) 1919 break; 1920 } else { 1921 error = do_dup(td, DUP_FIXED, devnull, i, &retval); 1922 if (error != 0) 1923 break; 1924 } 1925 } 1926 return (error); 1927 } 1928 1929 /* 1930 * Internal form of close. Decrement reference count on file structure. 1931 * Note: td may be NULL when closing a file that was being passed in a 1932 * message. 1933 * 1934 * XXXRW: Giant is not required for the caller, but often will be held; this 1935 * makes it moderately likely the Giant will be recursed in the VFS case. 1936 */ 1937 int 1938 closef(struct file *fp, struct thread *td) 1939 { 1940 struct vnode *vp; 1941 struct flock lf; 1942 struct filedesc_to_leader *fdtol; 1943 struct filedesc *fdp; 1944 1945 /* 1946 * POSIX record locking dictates that any close releases ALL 1947 * locks owned by this process. This is handled by setting 1948 * a flag in the unlock to free ONLY locks obeying POSIX 1949 * semantics, and not to free BSD-style file locks. 1950 * If the descriptor was in a message, POSIX-style locks 1951 * aren't passed with the descriptor, and the thread pointer 1952 * will be NULL. Callers should be careful only to pass a 1953 * NULL thread pointer when there really is no owning 1954 * context that might have locks, or the locks will be 1955 * leaked. 1956 */ 1957 if (fp->f_type == DTYPE_VNODE && td != NULL) { 1958 int vfslocked; 1959 1960 vp = fp->f_vnode; 1961 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1962 if ((td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) { 1963 lf.l_whence = SEEK_SET; 1964 lf.l_start = 0; 1965 lf.l_len = 0; 1966 lf.l_type = F_UNLCK; 1967 (void) VOP_ADVLOCK(vp, (caddr_t)td->td_proc->p_leader, 1968 F_UNLCK, &lf, F_POSIX); 1969 } 1970 fdtol = td->td_proc->p_fdtol; 1971 if (fdtol != NULL) { 1972 /* 1973 * Handle special case where file descriptor table is 1974 * shared between multiple process leaders. 1975 */ 1976 fdp = td->td_proc->p_fd; 1977 FILEDESC_XLOCK(fdp); 1978 for (fdtol = fdtol->fdl_next; 1979 fdtol != td->td_proc->p_fdtol; 1980 fdtol = fdtol->fdl_next) { 1981 if ((fdtol->fdl_leader->p_flag & 1982 P_ADVLOCK) == 0) 1983 continue; 1984 fdtol->fdl_holdcount++; 1985 FILEDESC_XUNLOCK(fdp); 1986 lf.l_whence = SEEK_SET; 1987 lf.l_start = 0; 1988 lf.l_len = 0; 1989 lf.l_type = F_UNLCK; 1990 vp = fp->f_vnode; 1991 (void) VOP_ADVLOCK(vp, 1992 (caddr_t)fdtol->fdl_leader, 1993 F_UNLCK, &lf, F_POSIX); 1994 FILEDESC_XLOCK(fdp); 1995 fdtol->fdl_holdcount--; 1996 if (fdtol->fdl_holdcount == 0 && 1997 fdtol->fdl_wakeup != 0) { 1998 fdtol->fdl_wakeup = 0; 1999 wakeup(fdtol); 2000 } 2001 } 2002 FILEDESC_XUNLOCK(fdp); 2003 } 2004 VFS_UNLOCK_GIANT(vfslocked); 2005 } 2006 return (fdrop(fp, td)); 2007 } 2008 2009 /* 2010 * Initialize the file pointer with the specified properties. 2011 * 2012 * The ops are set with release semantics to be certain that the flags, type, 2013 * and data are visible when ops is. This is to prevent ops methods from being 2014 * called with bad data. 2015 */ 2016 void 2017 finit(struct file *fp, u_int flag, short type, void *data, struct fileops *ops) 2018 { 2019 fp->f_data = data; 2020 fp->f_flag = flag; 2021 fp->f_type = type; 2022 atomic_store_rel_ptr((volatile uintptr_t *)&fp->f_ops, (uintptr_t)ops); 2023 } 2024 2025 2026 /* 2027 * Extract the file pointer associated with the specified descriptor for the 2028 * current user process. 2029 * 2030 * If the descriptor doesn't exist, EBADF is returned. 2031 * 2032 * If the descriptor exists but doesn't match 'flags' then return EBADF for 2033 * read attempts and EINVAL for write attempts. 2034 * 2035 * If 'hold' is set (non-zero) the file's refcount will be bumped on return. 2036 * It should be dropped with fdrop(). If it is not set, then the refcount 2037 * will not be bumped however the thread's filedesc struct will be returned 2038 * locked (for fgetsock). 2039 * 2040 * If an error occured the non-zero error is returned and *fpp is set to 2041 * NULL. Otherwise *fpp is set and zero is returned. 2042 */ 2043 static __inline int 2044 _fget(struct thread *td, int fd, struct file **fpp, int flags, int hold) 2045 { 2046 struct filedesc *fdp; 2047 struct file *fp; 2048 2049 *fpp = NULL; 2050 if (td == NULL || (fdp = td->td_proc->p_fd) == NULL) 2051 return (EBADF); 2052 FILEDESC_SLOCK(fdp); 2053 if ((fp = fget_locked(fdp, fd)) == NULL || fp->f_ops == &badfileops) { 2054 FILEDESC_SUNLOCK(fdp); 2055 return (EBADF); 2056 } 2057 2058 /* 2059 * FREAD and FWRITE failure return EBADF as per POSIX. 2060 * 2061 * Only one flag, or 0, may be specified. 2062 */ 2063 if (flags == FREAD && (fp->f_flag & FREAD) == 0) { 2064 FILEDESC_SUNLOCK(fdp); 2065 return (EBADF); 2066 } 2067 if (flags == FWRITE && (fp->f_flag & FWRITE) == 0) { 2068 FILEDESC_SUNLOCK(fdp); 2069 return (EBADF); 2070 } 2071 if (hold) { 2072 fhold(fp); 2073 FILEDESC_SUNLOCK(fdp); 2074 } 2075 *fpp = fp; 2076 return (0); 2077 } 2078 2079 int 2080 fget(struct thread *td, int fd, struct file **fpp) 2081 { 2082 2083 return(_fget(td, fd, fpp, 0, 1)); 2084 } 2085 2086 int 2087 fget_read(struct thread *td, int fd, struct file **fpp) 2088 { 2089 2090 return(_fget(td, fd, fpp, FREAD, 1)); 2091 } 2092 2093 int 2094 fget_write(struct thread *td, int fd, struct file **fpp) 2095 { 2096 2097 return(_fget(td, fd, fpp, FWRITE, 1)); 2098 } 2099 2100 /* 2101 * Like fget() but loads the underlying vnode, or returns an error if the 2102 * descriptor does not represent a vnode. Note that pipes use vnodes but 2103 * never have VM objects. The returned vnode will be vref()'d. 2104 * 2105 * XXX: what about the unused flags ? 2106 */ 2107 static __inline int 2108 _fgetvp(struct thread *td, int fd, struct vnode **vpp, int flags) 2109 { 2110 struct file *fp; 2111 int error; 2112 2113 *vpp = NULL; 2114 if ((error = _fget(td, fd, &fp, flags, 0)) != 0) 2115 return (error); 2116 if (fp->f_vnode == NULL) { 2117 error = EINVAL; 2118 } else { 2119 *vpp = fp->f_vnode; 2120 vref(*vpp); 2121 } 2122 FILEDESC_SUNLOCK(td->td_proc->p_fd); 2123 return (error); 2124 } 2125 2126 int 2127 fgetvp(struct thread *td, int fd, struct vnode **vpp) 2128 { 2129 2130 return (_fgetvp(td, fd, vpp, 0)); 2131 } 2132 2133 int 2134 fgetvp_read(struct thread *td, int fd, struct vnode **vpp) 2135 { 2136 2137 return (_fgetvp(td, fd, vpp, FREAD)); 2138 } 2139 2140 #ifdef notyet 2141 int 2142 fgetvp_write(struct thread *td, int fd, struct vnode **vpp) 2143 { 2144 2145 return (_fgetvp(td, fd, vpp, FWRITE)); 2146 } 2147 #endif 2148 2149 /* 2150 * Like fget() but loads the underlying socket, or returns an error if the 2151 * descriptor does not represent a socket. 2152 * 2153 * We bump the ref count on the returned socket. XXX Also obtain the SX lock 2154 * in the future. 2155 * 2156 * Note: fgetsock() and fputsock() are deprecated, as consumers should rely 2157 * on their file descriptor reference to prevent the socket from being free'd 2158 * during use. 2159 */ 2160 int 2161 fgetsock(struct thread *td, int fd, struct socket **spp, u_int *fflagp) 2162 { 2163 struct file *fp; 2164 int error; 2165 2166 *spp = NULL; 2167 if (fflagp != NULL) 2168 *fflagp = 0; 2169 if ((error = _fget(td, fd, &fp, 0, 0)) != 0) 2170 return (error); 2171 if (fp->f_type != DTYPE_SOCKET) { 2172 error = ENOTSOCK; 2173 } else { 2174 *spp = fp->f_data; 2175 if (fflagp) 2176 *fflagp = fp->f_flag; 2177 SOCK_LOCK(*spp); 2178 soref(*spp); 2179 SOCK_UNLOCK(*spp); 2180 } 2181 FILEDESC_SUNLOCK(td->td_proc->p_fd); 2182 return (error); 2183 } 2184 2185 /* 2186 * Drop the reference count on the socket and XXX release the SX lock in the 2187 * future. The last reference closes the socket. 2188 * 2189 * Note: fputsock() is deprecated, see comment for fgetsock(). 2190 */ 2191 void 2192 fputsock(struct socket *so) 2193 { 2194 2195 ACCEPT_LOCK(); 2196 SOCK_LOCK(so); 2197 sorele(so); 2198 } 2199 2200 /* 2201 * Handle the last reference to a file being closed. 2202 */ 2203 int 2204 _fdrop(struct file *fp, struct thread *td) 2205 { 2206 int error; 2207 2208 error = 0; 2209 if (fp->f_count != 0) 2210 panic("fdrop: count %d", fp->f_count); 2211 if (fp->f_ops != &badfileops) 2212 error = fo_close(fp, td); 2213 /* 2214 * The f_cdevpriv cannot be assigned non-NULL value while we 2215 * are destroying the file. 2216 */ 2217 if (fp->f_cdevpriv != NULL) 2218 devfs_fpdrop(fp); 2219 atomic_subtract_int(&openfiles, 1); 2220 crfree(fp->f_cred); 2221 uma_zfree(file_zone, fp); 2222 2223 return (error); 2224 } 2225 2226 /* 2227 * Apply an advisory lock on a file descriptor. 2228 * 2229 * Just attempt to get a record lock of the requested type on the entire file 2230 * (l_whence = SEEK_SET, l_start = 0, l_len = 0). 2231 */ 2232 #ifndef _SYS_SYSPROTO_H_ 2233 struct flock_args { 2234 int fd; 2235 int how; 2236 }; 2237 #endif 2238 /* ARGSUSED */ 2239 int 2240 flock(struct thread *td, struct flock_args *uap) 2241 { 2242 struct file *fp; 2243 struct vnode *vp; 2244 struct flock lf; 2245 int vfslocked; 2246 int error; 2247 2248 if ((error = fget(td, uap->fd, &fp)) != 0) 2249 return (error); 2250 if (fp->f_type != DTYPE_VNODE) { 2251 fdrop(fp, td); 2252 return (EOPNOTSUPP); 2253 } 2254 2255 vp = fp->f_vnode; 2256 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 2257 lf.l_whence = SEEK_SET; 2258 lf.l_start = 0; 2259 lf.l_len = 0; 2260 if (uap->how & LOCK_UN) { 2261 lf.l_type = F_UNLCK; 2262 atomic_clear_int(&fp->f_flag, FHASLOCK); 2263 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK); 2264 goto done2; 2265 } 2266 if (uap->how & LOCK_EX) 2267 lf.l_type = F_WRLCK; 2268 else if (uap->how & LOCK_SH) 2269 lf.l_type = F_RDLCK; 2270 else { 2271 error = EBADF; 2272 goto done2; 2273 } 2274 atomic_set_int(&fp->f_flag, FHASLOCK); 2275 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, 2276 (uap->how & LOCK_NB) ? F_FLOCK : F_FLOCK | F_WAIT); 2277 done2: 2278 fdrop(fp, td); 2279 VFS_UNLOCK_GIANT(vfslocked); 2280 return (error); 2281 } 2282 /* 2283 * Duplicate the specified descriptor to a free descriptor. 2284 */ 2285 int 2286 dupfdopen(struct thread *td, struct filedesc *fdp, int indx, int dfd, int mode, int error) 2287 { 2288 struct file *wfp; 2289 struct file *fp; 2290 2291 /* 2292 * If the to-be-dup'd fd number is greater than the allowed number 2293 * of file descriptors, or the fd to be dup'd has already been 2294 * closed, then reject. 2295 */ 2296 FILEDESC_XLOCK(fdp); 2297 if (dfd < 0 || dfd >= fdp->fd_nfiles || 2298 (wfp = fdp->fd_ofiles[dfd]) == NULL) { 2299 FILEDESC_XUNLOCK(fdp); 2300 return (EBADF); 2301 } 2302 2303 /* 2304 * There are two cases of interest here. 2305 * 2306 * For ENODEV simply dup (dfd) to file descriptor (indx) and return. 2307 * 2308 * For ENXIO steal away the file structure from (dfd) and store it in 2309 * (indx). (dfd) is effectively closed by this operation. 2310 * 2311 * Any other error code is just returned. 2312 */ 2313 switch (error) { 2314 case ENODEV: 2315 /* 2316 * Check that the mode the file is being opened for is a 2317 * subset of the mode of the existing descriptor. 2318 */ 2319 if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) { 2320 FILEDESC_XUNLOCK(fdp); 2321 return (EACCES); 2322 } 2323 fp = fdp->fd_ofiles[indx]; 2324 fdp->fd_ofiles[indx] = wfp; 2325 fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd]; 2326 if (fp == NULL) 2327 fdused(fdp, indx); 2328 fhold(wfp); 2329 FILEDESC_XUNLOCK(fdp); 2330 if (fp != NULL) 2331 /* 2332 * We now own the reference to fp that the ofiles[] 2333 * array used to own. Release it. 2334 */ 2335 fdrop(fp, td); 2336 return (0); 2337 2338 case ENXIO: 2339 /* 2340 * Steal away the file pointer from dfd and stuff it into indx. 2341 */ 2342 fp = fdp->fd_ofiles[indx]; 2343 fdp->fd_ofiles[indx] = fdp->fd_ofiles[dfd]; 2344 fdp->fd_ofiles[dfd] = NULL; 2345 fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd]; 2346 fdp->fd_ofileflags[dfd] = 0; 2347 fdunused(fdp, dfd); 2348 if (fp == NULL) 2349 fdused(fdp, indx); 2350 FILEDESC_XUNLOCK(fdp); 2351 2352 /* 2353 * We now own the reference to fp that the ofiles[] array 2354 * used to own. Release it. 2355 */ 2356 if (fp != NULL) 2357 fdrop(fp, td); 2358 return (0); 2359 2360 default: 2361 FILEDESC_XUNLOCK(fdp); 2362 return (error); 2363 } 2364 /* NOTREACHED */ 2365 } 2366 2367 /* 2368 * Scan all active processes to see if any of them have a current or root 2369 * directory of `olddp'. If so, replace them with the new mount point. 2370 */ 2371 void 2372 mountcheckdirs(struct vnode *olddp, struct vnode *newdp) 2373 { 2374 struct filedesc *fdp; 2375 struct proc *p; 2376 int nrele; 2377 2378 if (vrefcnt(olddp) == 1) 2379 return; 2380 sx_slock(&allproc_lock); 2381 FOREACH_PROC_IN_SYSTEM(p) { 2382 fdp = fdhold(p); 2383 if (fdp == NULL) 2384 continue; 2385 nrele = 0; 2386 FILEDESC_XLOCK(fdp); 2387 if (fdp->fd_cdir == olddp) { 2388 vref(newdp); 2389 fdp->fd_cdir = newdp; 2390 nrele++; 2391 } 2392 if (fdp->fd_rdir == olddp) { 2393 vref(newdp); 2394 fdp->fd_rdir = newdp; 2395 nrele++; 2396 } 2397 FILEDESC_XUNLOCK(fdp); 2398 fddrop(fdp); 2399 while (nrele--) 2400 vrele(olddp); 2401 } 2402 sx_sunlock(&allproc_lock); 2403 if (rootvnode == olddp) { 2404 vrele(rootvnode); 2405 vref(newdp); 2406 rootvnode = newdp; 2407 } 2408 } 2409 2410 struct filedesc_to_leader * 2411 filedesc_to_leader_alloc(struct filedesc_to_leader *old, struct filedesc *fdp, struct proc *leader) 2412 { 2413 struct filedesc_to_leader *fdtol; 2414 2415 fdtol = malloc(sizeof(struct filedesc_to_leader), 2416 M_FILEDESC_TO_LEADER, 2417 M_WAITOK); 2418 fdtol->fdl_refcount = 1; 2419 fdtol->fdl_holdcount = 0; 2420 fdtol->fdl_wakeup = 0; 2421 fdtol->fdl_leader = leader; 2422 if (old != NULL) { 2423 FILEDESC_XLOCK(fdp); 2424 fdtol->fdl_next = old->fdl_next; 2425 fdtol->fdl_prev = old; 2426 old->fdl_next = fdtol; 2427 fdtol->fdl_next->fdl_prev = fdtol; 2428 FILEDESC_XUNLOCK(fdp); 2429 } else { 2430 fdtol->fdl_next = fdtol; 2431 fdtol->fdl_prev = fdtol; 2432 } 2433 return (fdtol); 2434 } 2435 2436 /* 2437 * Get file structures globally. 2438 */ 2439 static int 2440 sysctl_kern_file(SYSCTL_HANDLER_ARGS) 2441 { 2442 struct xfile xf; 2443 struct filedesc *fdp; 2444 struct file *fp; 2445 struct proc *p; 2446 int error, n; 2447 2448 error = sysctl_wire_old_buffer(req, 0); 2449 if (error != 0) 2450 return (error); 2451 if (req->oldptr == NULL) { 2452 n = 0; 2453 sx_slock(&allproc_lock); 2454 FOREACH_PROC_IN_SYSTEM(p) { 2455 if (p->p_state == PRS_NEW) 2456 continue; 2457 fdp = fdhold(p); 2458 if (fdp == NULL) 2459 continue; 2460 /* overestimates sparse tables. */ 2461 if (fdp->fd_lastfile > 0) 2462 n += fdp->fd_lastfile; 2463 fddrop(fdp); 2464 } 2465 sx_sunlock(&allproc_lock); 2466 return (SYSCTL_OUT(req, 0, n * sizeof(xf))); 2467 } 2468 error = 0; 2469 bzero(&xf, sizeof(xf)); 2470 xf.xf_size = sizeof(xf); 2471 sx_slock(&allproc_lock); 2472 FOREACH_PROC_IN_SYSTEM(p) { 2473 if (p->p_state == PRS_NEW) 2474 continue; 2475 PROC_LOCK(p); 2476 if (p_cansee(req->td, p) != 0) { 2477 PROC_UNLOCK(p); 2478 continue; 2479 } 2480 xf.xf_pid = p->p_pid; 2481 xf.xf_uid = p->p_ucred->cr_uid; 2482 PROC_UNLOCK(p); 2483 fdp = fdhold(p); 2484 if (fdp == NULL) 2485 continue; 2486 FILEDESC_SLOCK(fdp); 2487 for (n = 0; fdp->fd_refcnt > 0 && n < fdp->fd_nfiles; ++n) { 2488 if ((fp = fdp->fd_ofiles[n]) == NULL) 2489 continue; 2490 xf.xf_fd = n; 2491 xf.xf_file = fp; 2492 xf.xf_data = fp->f_data; 2493 xf.xf_vnode = fp->f_vnode; 2494 xf.xf_type = fp->f_type; 2495 xf.xf_count = fp->f_count; 2496 xf.xf_msgcount = 0; 2497 xf.xf_offset = fp->f_offset; 2498 xf.xf_flag = fp->f_flag; 2499 error = SYSCTL_OUT(req, &xf, sizeof(xf)); 2500 if (error) 2501 break; 2502 } 2503 FILEDESC_SUNLOCK(fdp); 2504 fddrop(fdp); 2505 if (error) 2506 break; 2507 } 2508 sx_sunlock(&allproc_lock); 2509 return (error); 2510 } 2511 2512 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD, 2513 0, 0, sysctl_kern_file, "S,xfile", "Entire file table"); 2514 2515 #ifdef KINFO_OFILE_SIZE 2516 CTASSERT(sizeof(struct kinfo_ofile) == KINFO_OFILE_SIZE); 2517 #endif 2518 2519 #ifdef COMPAT_FREEBSD7 2520 static int 2521 export_vnode_for_osysctl(struct vnode *vp, int type, 2522 struct kinfo_ofile *kif, struct filedesc *fdp, struct sysctl_req *req) 2523 { 2524 int error; 2525 char *fullpath, *freepath; 2526 int vfslocked; 2527 2528 bzero(kif, sizeof(*kif)); 2529 kif->kf_structsize = sizeof(*kif); 2530 2531 vref(vp); 2532 kif->kf_fd = type; 2533 kif->kf_type = KF_TYPE_VNODE; 2534 /* This function only handles directories. */ 2535 if (vp->v_type != VDIR) { 2536 vrele(vp); 2537 return (ENOTDIR); 2538 } 2539 kif->kf_vnode_type = KF_VTYPE_VDIR; 2540 2541 /* 2542 * This is not a true file descriptor, so we set a bogus refcount 2543 * and offset to indicate these fields should be ignored. 2544 */ 2545 kif->kf_ref_count = -1; 2546 kif->kf_offset = -1; 2547 2548 freepath = NULL; 2549 fullpath = "-"; 2550 FILEDESC_SUNLOCK(fdp); 2551 vn_fullpath(curthread, vp, &fullpath, &freepath); 2552 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 2553 vrele(vp); 2554 VFS_UNLOCK_GIANT(vfslocked); 2555 strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path)); 2556 if (freepath != NULL) 2557 free(freepath, M_TEMP); 2558 error = SYSCTL_OUT(req, kif, sizeof(*kif)); 2559 FILEDESC_SLOCK(fdp); 2560 return (error); 2561 } 2562 2563 /* 2564 * Get per-process file descriptors for use by procstat(1), et al. 2565 */ 2566 static int 2567 sysctl_kern_proc_ofiledesc(SYSCTL_HANDLER_ARGS) 2568 { 2569 char *fullpath, *freepath; 2570 struct kinfo_ofile *kif; 2571 struct filedesc *fdp; 2572 int error, i, *name; 2573 struct socket *so; 2574 struct vnode *vp; 2575 struct file *fp; 2576 struct proc *p; 2577 struct tty *tp; 2578 int vfslocked; 2579 2580 name = (int *)arg1; 2581 if ((p = pfind((pid_t)name[0])) == NULL) 2582 return (ESRCH); 2583 if ((error = p_candebug(curthread, p))) { 2584 PROC_UNLOCK(p); 2585 return (error); 2586 } 2587 fdp = fdhold(p); 2588 PROC_UNLOCK(p); 2589 if (fdp == NULL) 2590 return (ENOENT); 2591 kif = malloc(sizeof(*kif), M_TEMP, M_WAITOK); 2592 FILEDESC_SLOCK(fdp); 2593 if (fdp->fd_cdir != NULL) 2594 export_vnode_for_osysctl(fdp->fd_cdir, KF_FD_TYPE_CWD, kif, 2595 fdp, req); 2596 if (fdp->fd_rdir != NULL) 2597 export_vnode_for_osysctl(fdp->fd_rdir, KF_FD_TYPE_ROOT, kif, 2598 fdp, req); 2599 if (fdp->fd_jdir != NULL) 2600 export_vnode_for_osysctl(fdp->fd_jdir, KF_FD_TYPE_JAIL, kif, 2601 fdp, req); 2602 for (i = 0; i < fdp->fd_nfiles; i++) { 2603 if ((fp = fdp->fd_ofiles[i]) == NULL) 2604 continue; 2605 bzero(kif, sizeof(*kif)); 2606 kif->kf_structsize = sizeof(*kif); 2607 vp = NULL; 2608 so = NULL; 2609 tp = NULL; 2610 kif->kf_fd = i; 2611 switch (fp->f_type) { 2612 case DTYPE_VNODE: 2613 kif->kf_type = KF_TYPE_VNODE; 2614 vp = fp->f_vnode; 2615 break; 2616 2617 case DTYPE_SOCKET: 2618 kif->kf_type = KF_TYPE_SOCKET; 2619 so = fp->f_data; 2620 break; 2621 2622 case DTYPE_PIPE: 2623 kif->kf_type = KF_TYPE_PIPE; 2624 break; 2625 2626 case DTYPE_FIFO: 2627 kif->kf_type = KF_TYPE_FIFO; 2628 vp = fp->f_vnode; 2629 vref(vp); 2630 break; 2631 2632 case DTYPE_KQUEUE: 2633 kif->kf_type = KF_TYPE_KQUEUE; 2634 break; 2635 2636 case DTYPE_CRYPTO: 2637 kif->kf_type = KF_TYPE_CRYPTO; 2638 break; 2639 2640 case DTYPE_MQUEUE: 2641 kif->kf_type = KF_TYPE_MQUEUE; 2642 break; 2643 2644 case DTYPE_SHM: 2645 kif->kf_type = KF_TYPE_SHM; 2646 break; 2647 2648 case DTYPE_SEM: 2649 kif->kf_type = KF_TYPE_SEM; 2650 break; 2651 2652 case DTYPE_PTS: 2653 kif->kf_type = KF_TYPE_PTS; 2654 tp = fp->f_data; 2655 break; 2656 2657 default: 2658 kif->kf_type = KF_TYPE_UNKNOWN; 2659 break; 2660 } 2661 kif->kf_ref_count = fp->f_count; 2662 if (fp->f_flag & FREAD) 2663 kif->kf_flags |= KF_FLAG_READ; 2664 if (fp->f_flag & FWRITE) 2665 kif->kf_flags |= KF_FLAG_WRITE; 2666 if (fp->f_flag & FAPPEND) 2667 kif->kf_flags |= KF_FLAG_APPEND; 2668 if (fp->f_flag & FASYNC) 2669 kif->kf_flags |= KF_FLAG_ASYNC; 2670 if (fp->f_flag & FFSYNC) 2671 kif->kf_flags |= KF_FLAG_FSYNC; 2672 if (fp->f_flag & FNONBLOCK) 2673 kif->kf_flags |= KF_FLAG_NONBLOCK; 2674 if (fp->f_flag & O_DIRECT) 2675 kif->kf_flags |= KF_FLAG_DIRECT; 2676 if (fp->f_flag & FHASLOCK) 2677 kif->kf_flags |= KF_FLAG_HASLOCK; 2678 kif->kf_offset = fp->f_offset; 2679 if (vp != NULL) { 2680 vref(vp); 2681 switch (vp->v_type) { 2682 case VNON: 2683 kif->kf_vnode_type = KF_VTYPE_VNON; 2684 break; 2685 case VREG: 2686 kif->kf_vnode_type = KF_VTYPE_VREG; 2687 break; 2688 case VDIR: 2689 kif->kf_vnode_type = KF_VTYPE_VDIR; 2690 break; 2691 case VBLK: 2692 kif->kf_vnode_type = KF_VTYPE_VBLK; 2693 break; 2694 case VCHR: 2695 kif->kf_vnode_type = KF_VTYPE_VCHR; 2696 break; 2697 case VLNK: 2698 kif->kf_vnode_type = KF_VTYPE_VLNK; 2699 break; 2700 case VSOCK: 2701 kif->kf_vnode_type = KF_VTYPE_VSOCK; 2702 break; 2703 case VFIFO: 2704 kif->kf_vnode_type = KF_VTYPE_VFIFO; 2705 break; 2706 case VBAD: 2707 kif->kf_vnode_type = KF_VTYPE_VBAD; 2708 break; 2709 default: 2710 kif->kf_vnode_type = KF_VTYPE_UNKNOWN; 2711 break; 2712 } 2713 /* 2714 * It is OK to drop the filedesc lock here as we will 2715 * re-validate and re-evaluate its properties when 2716 * the loop continues. 2717 */ 2718 freepath = NULL; 2719 fullpath = "-"; 2720 FILEDESC_SUNLOCK(fdp); 2721 vn_fullpath(curthread, vp, &fullpath, &freepath); 2722 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 2723 vrele(vp); 2724 VFS_UNLOCK_GIANT(vfslocked); 2725 strlcpy(kif->kf_path, fullpath, 2726 sizeof(kif->kf_path)); 2727 if (freepath != NULL) 2728 free(freepath, M_TEMP); 2729 FILEDESC_SLOCK(fdp); 2730 } 2731 if (so != NULL) { 2732 struct sockaddr *sa; 2733 2734 if (so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa) 2735 == 0 && sa->sa_len <= sizeof(kif->kf_sa_local)) { 2736 bcopy(sa, &kif->kf_sa_local, sa->sa_len); 2737 free(sa, M_SONAME); 2738 } 2739 if (so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa) 2740 == 00 && sa->sa_len <= sizeof(kif->kf_sa_peer)) { 2741 bcopy(sa, &kif->kf_sa_peer, sa->sa_len); 2742 free(sa, M_SONAME); 2743 } 2744 kif->kf_sock_domain = 2745 so->so_proto->pr_domain->dom_family; 2746 kif->kf_sock_type = so->so_type; 2747 kif->kf_sock_protocol = so->so_proto->pr_protocol; 2748 } 2749 if (tp != NULL) { 2750 strlcpy(kif->kf_path, tty_devname(tp), 2751 sizeof(kif->kf_path)); 2752 } 2753 error = SYSCTL_OUT(req, kif, sizeof(*kif)); 2754 if (error) 2755 break; 2756 } 2757 FILEDESC_SUNLOCK(fdp); 2758 fddrop(fdp); 2759 free(kif, M_TEMP); 2760 return (0); 2761 } 2762 2763 static SYSCTL_NODE(_kern_proc, KERN_PROC_OFILEDESC, ofiledesc, CTLFLAG_RD, 2764 sysctl_kern_proc_ofiledesc, "Process ofiledesc entries"); 2765 #endif /* COMPAT_FREEBSD7 */ 2766 2767 #ifdef KINFO_FILE_SIZE 2768 CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE); 2769 #endif 2770 2771 static int 2772 export_vnode_for_sysctl(struct vnode *vp, int type, 2773 struct kinfo_file *kif, struct filedesc *fdp, struct sysctl_req *req) 2774 { 2775 int error; 2776 char *fullpath, *freepath; 2777 int vfslocked; 2778 2779 bzero(kif, sizeof(*kif)); 2780 2781 vref(vp); 2782 kif->kf_fd = type; 2783 kif->kf_type = KF_TYPE_VNODE; 2784 /* This function only handles directories. */ 2785 if (vp->v_type != VDIR) { 2786 vrele(vp); 2787 return (ENOTDIR); 2788 } 2789 kif->kf_vnode_type = KF_VTYPE_VDIR; 2790 2791 /* 2792 * This is not a true file descriptor, so we set a bogus refcount 2793 * and offset to indicate these fields should be ignored. 2794 */ 2795 kif->kf_ref_count = -1; 2796 kif->kf_offset = -1; 2797 2798 freepath = NULL; 2799 fullpath = "-"; 2800 FILEDESC_SUNLOCK(fdp); 2801 vn_fullpath(curthread, vp, &fullpath, &freepath); 2802 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 2803 vrele(vp); 2804 VFS_UNLOCK_GIANT(vfslocked); 2805 strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path)); 2806 if (freepath != NULL) 2807 free(freepath, M_TEMP); 2808 /* Pack record size down */ 2809 kif->kf_structsize = offsetof(struct kinfo_file, kf_path) + 2810 strlen(kif->kf_path) + 1; 2811 kif->kf_structsize = roundup(kif->kf_structsize, sizeof(uint64_t)); 2812 error = SYSCTL_OUT(req, kif, kif->kf_structsize); 2813 FILEDESC_SLOCK(fdp); 2814 return (error); 2815 } 2816 2817 /* 2818 * Get per-process file descriptors for use by procstat(1), et al. 2819 */ 2820 static int 2821 sysctl_kern_proc_filedesc(SYSCTL_HANDLER_ARGS) 2822 { 2823 char *fullpath, *freepath; 2824 struct kinfo_file *kif; 2825 struct filedesc *fdp; 2826 int error, i, *name; 2827 struct socket *so; 2828 struct vnode *vp; 2829 struct file *fp; 2830 struct proc *p; 2831 struct tty *tp; 2832 int vfslocked; 2833 2834 name = (int *)arg1; 2835 if ((p = pfind((pid_t)name[0])) == NULL) 2836 return (ESRCH); 2837 if ((error = p_candebug(curthread, p))) { 2838 PROC_UNLOCK(p); 2839 return (error); 2840 } 2841 fdp = fdhold(p); 2842 PROC_UNLOCK(p); 2843 if (fdp == NULL) 2844 return (ENOENT); 2845 kif = malloc(sizeof(*kif), M_TEMP, M_WAITOK); 2846 FILEDESC_SLOCK(fdp); 2847 if (fdp->fd_cdir != NULL) 2848 export_vnode_for_sysctl(fdp->fd_cdir, KF_FD_TYPE_CWD, kif, 2849 fdp, req); 2850 if (fdp->fd_rdir != NULL) 2851 export_vnode_for_sysctl(fdp->fd_rdir, KF_FD_TYPE_ROOT, kif, 2852 fdp, req); 2853 if (fdp->fd_jdir != NULL) 2854 export_vnode_for_sysctl(fdp->fd_jdir, KF_FD_TYPE_JAIL, kif, 2855 fdp, req); 2856 for (i = 0; i < fdp->fd_nfiles; i++) { 2857 if ((fp = fdp->fd_ofiles[i]) == NULL) 2858 continue; 2859 bzero(kif, sizeof(*kif)); 2860 vp = NULL; 2861 so = NULL; 2862 tp = NULL; 2863 kif->kf_fd = i; 2864 switch (fp->f_type) { 2865 case DTYPE_VNODE: 2866 kif->kf_type = KF_TYPE_VNODE; 2867 vp = fp->f_vnode; 2868 break; 2869 2870 case DTYPE_SOCKET: 2871 kif->kf_type = KF_TYPE_SOCKET; 2872 so = fp->f_data; 2873 break; 2874 2875 case DTYPE_PIPE: 2876 kif->kf_type = KF_TYPE_PIPE; 2877 break; 2878 2879 case DTYPE_FIFO: 2880 kif->kf_type = KF_TYPE_FIFO; 2881 vp = fp->f_vnode; 2882 vref(vp); 2883 break; 2884 2885 case DTYPE_KQUEUE: 2886 kif->kf_type = KF_TYPE_KQUEUE; 2887 break; 2888 2889 case DTYPE_CRYPTO: 2890 kif->kf_type = KF_TYPE_CRYPTO; 2891 break; 2892 2893 case DTYPE_MQUEUE: 2894 kif->kf_type = KF_TYPE_MQUEUE; 2895 break; 2896 2897 case DTYPE_SHM: 2898 kif->kf_type = KF_TYPE_SHM; 2899 break; 2900 2901 case DTYPE_SEM: 2902 kif->kf_type = KF_TYPE_SEM; 2903 break; 2904 2905 case DTYPE_PTS: 2906 kif->kf_type = KF_TYPE_PTS; 2907 tp = fp->f_data; 2908 break; 2909 2910 default: 2911 kif->kf_type = KF_TYPE_UNKNOWN; 2912 break; 2913 } 2914 kif->kf_ref_count = fp->f_count; 2915 if (fp->f_flag & FREAD) 2916 kif->kf_flags |= KF_FLAG_READ; 2917 if (fp->f_flag & FWRITE) 2918 kif->kf_flags |= KF_FLAG_WRITE; 2919 if (fp->f_flag & FAPPEND) 2920 kif->kf_flags |= KF_FLAG_APPEND; 2921 if (fp->f_flag & FASYNC) 2922 kif->kf_flags |= KF_FLAG_ASYNC; 2923 if (fp->f_flag & FFSYNC) 2924 kif->kf_flags |= KF_FLAG_FSYNC; 2925 if (fp->f_flag & FNONBLOCK) 2926 kif->kf_flags |= KF_FLAG_NONBLOCK; 2927 if (fp->f_flag & O_DIRECT) 2928 kif->kf_flags |= KF_FLAG_DIRECT; 2929 if (fp->f_flag & FHASLOCK) 2930 kif->kf_flags |= KF_FLAG_HASLOCK; 2931 kif->kf_offset = fp->f_offset; 2932 if (vp != NULL) { 2933 vref(vp); 2934 switch (vp->v_type) { 2935 case VNON: 2936 kif->kf_vnode_type = KF_VTYPE_VNON; 2937 break; 2938 case VREG: 2939 kif->kf_vnode_type = KF_VTYPE_VREG; 2940 break; 2941 case VDIR: 2942 kif->kf_vnode_type = KF_VTYPE_VDIR; 2943 break; 2944 case VBLK: 2945 kif->kf_vnode_type = KF_VTYPE_VBLK; 2946 break; 2947 case VCHR: 2948 kif->kf_vnode_type = KF_VTYPE_VCHR; 2949 break; 2950 case VLNK: 2951 kif->kf_vnode_type = KF_VTYPE_VLNK; 2952 break; 2953 case VSOCK: 2954 kif->kf_vnode_type = KF_VTYPE_VSOCK; 2955 break; 2956 case VFIFO: 2957 kif->kf_vnode_type = KF_VTYPE_VFIFO; 2958 break; 2959 case VBAD: 2960 kif->kf_vnode_type = KF_VTYPE_VBAD; 2961 break; 2962 default: 2963 kif->kf_vnode_type = KF_VTYPE_UNKNOWN; 2964 break; 2965 } 2966 /* 2967 * It is OK to drop the filedesc lock here as we will 2968 * re-validate and re-evaluate its properties when 2969 * the loop continues. 2970 */ 2971 freepath = NULL; 2972 fullpath = "-"; 2973 FILEDESC_SUNLOCK(fdp); 2974 vn_fullpath(curthread, vp, &fullpath, &freepath); 2975 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 2976 vrele(vp); 2977 VFS_UNLOCK_GIANT(vfslocked); 2978 strlcpy(kif->kf_path, fullpath, 2979 sizeof(kif->kf_path)); 2980 if (freepath != NULL) 2981 free(freepath, M_TEMP); 2982 FILEDESC_SLOCK(fdp); 2983 } 2984 if (so != NULL) { 2985 struct sockaddr *sa; 2986 2987 if (so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa) 2988 == 0 && sa->sa_len <= sizeof(kif->kf_sa_local)) { 2989 bcopy(sa, &kif->kf_sa_local, sa->sa_len); 2990 free(sa, M_SONAME); 2991 } 2992 if (so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa) 2993 == 00 && sa->sa_len <= sizeof(kif->kf_sa_peer)) { 2994 bcopy(sa, &kif->kf_sa_peer, sa->sa_len); 2995 free(sa, M_SONAME); 2996 } 2997 kif->kf_sock_domain = 2998 so->so_proto->pr_domain->dom_family; 2999 kif->kf_sock_type = so->so_type; 3000 kif->kf_sock_protocol = so->so_proto->pr_protocol; 3001 } 3002 if (tp != NULL) { 3003 strlcpy(kif->kf_path, tty_devname(tp), 3004 sizeof(kif->kf_path)); 3005 } 3006 /* Pack record size down */ 3007 kif->kf_structsize = offsetof(struct kinfo_file, kf_path) + 3008 strlen(kif->kf_path) + 1; 3009 kif->kf_structsize = roundup(kif->kf_structsize, 3010 sizeof(uint64_t)); 3011 error = SYSCTL_OUT(req, kif, kif->kf_structsize); 3012 if (error) 3013 break; 3014 } 3015 FILEDESC_SUNLOCK(fdp); 3016 fddrop(fdp); 3017 free(kif, M_TEMP); 3018 return (0); 3019 } 3020 3021 static SYSCTL_NODE(_kern_proc, KERN_PROC_FILEDESC, filedesc, CTLFLAG_RD, 3022 sysctl_kern_proc_filedesc, "Process filedesc entries"); 3023 3024 #ifdef DDB 3025 /* 3026 * For the purposes of debugging, generate a human-readable string for the 3027 * file type. 3028 */ 3029 static const char * 3030 file_type_to_name(short type) 3031 { 3032 3033 switch (type) { 3034 case 0: 3035 return ("zero"); 3036 case DTYPE_VNODE: 3037 return ("vnod"); 3038 case DTYPE_SOCKET: 3039 return ("sock"); 3040 case DTYPE_PIPE: 3041 return ("pipe"); 3042 case DTYPE_FIFO: 3043 return ("fifo"); 3044 case DTYPE_KQUEUE: 3045 return ("kque"); 3046 case DTYPE_CRYPTO: 3047 return ("crpt"); 3048 case DTYPE_MQUEUE: 3049 return ("mque"); 3050 case DTYPE_SHM: 3051 return ("shm"); 3052 case DTYPE_SEM: 3053 return ("ksem"); 3054 default: 3055 return ("unkn"); 3056 } 3057 } 3058 3059 /* 3060 * For the purposes of debugging, identify a process (if any, perhaps one of 3061 * many) that references the passed file in its file descriptor array. Return 3062 * NULL if none. 3063 */ 3064 static struct proc * 3065 file_to_first_proc(struct file *fp) 3066 { 3067 struct filedesc *fdp; 3068 struct proc *p; 3069 int n; 3070 3071 FOREACH_PROC_IN_SYSTEM(p) { 3072 if (p->p_state == PRS_NEW) 3073 continue; 3074 fdp = p->p_fd; 3075 if (fdp == NULL) 3076 continue; 3077 for (n = 0; n < fdp->fd_nfiles; n++) { 3078 if (fp == fdp->fd_ofiles[n]) 3079 return (p); 3080 } 3081 } 3082 return (NULL); 3083 } 3084 3085 static void 3086 db_print_file(struct file *fp, int header) 3087 { 3088 struct proc *p; 3089 3090 if (header) 3091 db_printf("%8s %4s %8s %8s %4s %5s %6s %8s %5s %12s\n", 3092 "File", "Type", "Data", "Flag", "GCFl", "Count", 3093 "MCount", "Vnode", "FPID", "FCmd"); 3094 p = file_to_first_proc(fp); 3095 db_printf("%8p %4s %8p %08x %04x %5d %6d %8p %5d %12s\n", fp, 3096 file_type_to_name(fp->f_type), fp->f_data, fp->f_flag, 3097 0, fp->f_count, 0, fp->f_vnode, 3098 p != NULL ? p->p_pid : -1, p != NULL ? p->p_comm : "-"); 3099 } 3100 3101 DB_SHOW_COMMAND(file, db_show_file) 3102 { 3103 struct file *fp; 3104 3105 if (!have_addr) { 3106 db_printf("usage: show file <addr>\n"); 3107 return; 3108 } 3109 fp = (struct file *)addr; 3110 db_print_file(fp, 1); 3111 } 3112 3113 DB_SHOW_COMMAND(files, db_show_files) 3114 { 3115 struct filedesc *fdp; 3116 struct file *fp; 3117 struct proc *p; 3118 int header; 3119 int n; 3120 3121 header = 1; 3122 FOREACH_PROC_IN_SYSTEM(p) { 3123 if (p->p_state == PRS_NEW) 3124 continue; 3125 if ((fdp = p->p_fd) == NULL) 3126 continue; 3127 for (n = 0; n < fdp->fd_nfiles; ++n) { 3128 if ((fp = fdp->fd_ofiles[n]) == NULL) 3129 continue; 3130 db_print_file(fp, header); 3131 header = 0; 3132 } 3133 } 3134 } 3135 #endif 3136 3137 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW, 3138 &maxfilesperproc, 0, "Maximum files allowed open per process"); 3139 3140 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW, 3141 &maxfiles, 0, "Maximum number of files"); 3142 3143 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD, 3144 __DEVOLATILE(int *, &openfiles), 0, "System-wide number of open files"); 3145 3146 /* ARGSUSED*/ 3147 static void 3148 filelistinit(void *dummy) 3149 { 3150 3151 file_zone = uma_zcreate("Files", sizeof(struct file), NULL, NULL, 3152 NULL, NULL, UMA_ALIGN_PTR, 0); 3153 mtx_init(&sigio_lock, "sigio lock", NULL, MTX_DEF); 3154 mtx_init(&fdesc_mtx, "fdesc", NULL, MTX_DEF); 3155 } 3156 SYSINIT(select, SI_SUB_LOCK, SI_ORDER_FIRST, filelistinit, NULL); 3157 3158 /*-------------------------------------------------------------------*/ 3159 3160 static int 3161 badfo_readwrite(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) 3162 { 3163 3164 return (EBADF); 3165 } 3166 3167 static int 3168 badfo_truncate(struct file *fp, off_t length, struct ucred *active_cred, struct thread *td) 3169 { 3170 3171 return (EINVAL); 3172 } 3173 3174 static int 3175 badfo_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred, struct thread *td) 3176 { 3177 3178 return (EBADF); 3179 } 3180 3181 static int 3182 badfo_poll(struct file *fp, int events, struct ucred *active_cred, struct thread *td) 3183 { 3184 3185 return (0); 3186 } 3187 3188 static int 3189 badfo_kqfilter(struct file *fp, struct knote *kn) 3190 { 3191 3192 return (EBADF); 3193 } 3194 3195 static int 3196 badfo_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, struct thread *td) 3197 { 3198 3199 return (EBADF); 3200 } 3201 3202 static int 3203 badfo_close(struct file *fp, struct thread *td) 3204 { 3205 3206 return (EBADF); 3207 } 3208 3209 struct fileops badfileops = { 3210 .fo_read = badfo_readwrite, 3211 .fo_write = badfo_readwrite, 3212 .fo_truncate = badfo_truncate, 3213 .fo_ioctl = badfo_ioctl, 3214 .fo_poll = badfo_poll, 3215 .fo_kqfilter = badfo_kqfilter, 3216 .fo_stat = badfo_stat, 3217 .fo_close = badfo_close, 3218 }; 3219 3220 3221 /*-------------------------------------------------------------------*/ 3222 3223 /* 3224 * File Descriptor pseudo-device driver (/dev/fd/). 3225 * 3226 * Opening minor device N dup()s the file (if any) connected to file 3227 * descriptor N belonging to the calling process. Note that this driver 3228 * consists of only the ``open()'' routine, because all subsequent 3229 * references to this file will be direct to the other driver. 3230 * 3231 * XXX: we could give this one a cloning event handler if necessary. 3232 */ 3233 3234 /* ARGSUSED */ 3235 static int 3236 fdopen(struct cdev *dev, int mode, int type, struct thread *td) 3237 { 3238 3239 /* 3240 * XXX Kludge: set curthread->td_dupfd to contain the value of the 3241 * the file descriptor being sought for duplication. The error 3242 * return ensures that the vnode for this device will be released 3243 * by vn_open. Open will detect this special error and take the 3244 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN 3245 * will simply report the error. 3246 */ 3247 td->td_dupfd = dev2unit(dev); 3248 return (ENODEV); 3249 } 3250 3251 static struct cdevsw fildesc_cdevsw = { 3252 .d_version = D_VERSION, 3253 .d_open = fdopen, 3254 .d_name = "FD", 3255 }; 3256 3257 static void 3258 fildesc_drvinit(void *unused) 3259 { 3260 struct cdev *dev; 3261 3262 dev = make_dev(&fildesc_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "fd/0"); 3263 make_dev_alias(dev, "stdin"); 3264 dev = make_dev(&fildesc_cdevsw, 1, UID_ROOT, GID_WHEEL, 0666, "fd/1"); 3265 make_dev_alias(dev, "stdout"); 3266 dev = make_dev(&fildesc_cdevsw, 2, UID_ROOT, GID_WHEEL, 0666, "fd/2"); 3267 make_dev_alias(dev, "stderr"); 3268 } 3269 3270 SYSINIT(fildescdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, fildesc_drvinit, NULL); 3271