1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 39 * $Id: vfs_subr.c,v 1.36 1995/08/25 20:49:44 bde Exp $ 40 */ 41 42 /* 43 * External virtual filesystem routines 44 */ 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/file.h> 49 #include <sys/proc.h> 50 #include <sys/mount.h> 51 #include <sys/time.h> 52 #include <sys/vnode.h> 53 #include <sys/stat.h> 54 #include <sys/namei.h> 55 #include <sys/ucred.h> 56 #include <sys/buf.h> 57 #include <sys/errno.h> 58 #include <sys/malloc.h> 59 #include <sys/domain.h> 60 #include <sys/mbuf.h> 61 62 #include <vm/vm.h> 63 #include <sys/sysctl.h> 64 65 #include <miscfs/specfs/specdev.h> 66 67 void insmntque __P((struct vnode *, struct mount *)); 68 69 enum vtype iftovt_tab[16] = { 70 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 71 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 72 }; 73 int vttoif_tab[9] = { 74 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 75 S_IFSOCK, S_IFIFO, S_IFMT, 76 }; 77 78 /* 79 * Insq/Remq for the vnode usage lists. 80 */ 81 #define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) 82 #define bufremvn(bp) { \ 83 LIST_REMOVE(bp, b_vnbufs); \ 84 (bp)->b_vnbufs.le_next = NOLIST; \ 85 } 86 87 TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */ 88 u_long freevnodes = 0; 89 90 struct mntlist mountlist; /* mounted filesystem list */ 91 92 int desiredvnodes; 93 94 /* 95 * Initialize the vnode management data structures. 96 */ 97 void 98 vntblinit() 99 { 100 desiredvnodes = maxproc + vm_object_cache_max; 101 102 TAILQ_INIT(&vnode_free_list); 103 CIRCLEQ_INIT(&mountlist); 104 } 105 106 /* 107 * Lock a filesystem. 108 * Used to prevent access to it while mounting and unmounting. 109 */ 110 int 111 vfs_lock(mp) 112 register struct mount *mp; 113 { 114 115 while (mp->mnt_flag & MNT_MLOCK) { 116 mp->mnt_flag |= MNT_MWAIT; 117 (void) tsleep((caddr_t) mp, PVFS, "vfslck", 0); 118 } 119 mp->mnt_flag |= MNT_MLOCK; 120 return (0); 121 } 122 123 /* 124 * Unlock a locked filesystem. 125 * Panic if filesystem is not locked. 126 */ 127 void 128 vfs_unlock(mp) 129 register struct mount *mp; 130 { 131 132 if ((mp->mnt_flag & MNT_MLOCK) == 0) 133 panic("vfs_unlock: not locked"); 134 mp->mnt_flag &= ~MNT_MLOCK; 135 if (mp->mnt_flag & MNT_MWAIT) { 136 mp->mnt_flag &= ~MNT_MWAIT; 137 wakeup((caddr_t) mp); 138 } 139 } 140 141 /* 142 * Mark a mount point as busy. 143 * Used to synchronize access and to delay unmounting. 144 */ 145 int 146 vfs_busy(mp) 147 register struct mount *mp; 148 { 149 150 while (mp->mnt_flag & MNT_MPBUSY) { 151 mp->mnt_flag |= MNT_MPWANT; 152 (void) tsleep((caddr_t) &mp->mnt_flag, PVFS, "vfsbsy", 0); 153 } 154 if (mp->mnt_flag & MNT_UNMOUNT) 155 return (1); 156 mp->mnt_flag |= MNT_MPBUSY; 157 return (0); 158 } 159 160 /* 161 * Free a busy filesystem. 162 * Panic if filesystem is not busy. 163 */ 164 void 165 vfs_unbusy(mp) 166 register struct mount *mp; 167 { 168 169 if ((mp->mnt_flag & MNT_MPBUSY) == 0) 170 panic("vfs_unbusy: not busy"); 171 mp->mnt_flag &= ~MNT_MPBUSY; 172 if (mp->mnt_flag & MNT_MPWANT) { 173 mp->mnt_flag &= ~MNT_MPWANT; 174 wakeup((caddr_t) &mp->mnt_flag); 175 } 176 } 177 178 void 179 vfs_unmountroot(rootfs) 180 struct mount *rootfs; 181 { 182 struct mount *mp = rootfs; 183 int error; 184 185 if (vfs_busy(mp)) { 186 printf("failed to unmount root\n"); 187 return; 188 } 189 mp->mnt_flag |= MNT_UNMOUNT; 190 if ((error = vfs_lock(mp))) { 191 printf("lock of root filesystem failed (%d)\n", error); 192 return; 193 } 194 vnode_pager_umount(mp); /* release cached vnodes */ 195 cache_purgevfs(mp); /* remove cache entries for this file sys */ 196 197 if ((error = VFS_SYNC(mp, MNT_WAIT, initproc->p_ucred, initproc))) 198 printf("sync of root filesystem failed (%d)\n", error); 199 200 if ((error = VFS_UNMOUNT(mp, MNT_FORCE, initproc))) { 201 printf("unmount of root filesystem failed ("); 202 if (error == EBUSY) 203 printf("BUSY)\n"); 204 else 205 printf("%d)\n", error); 206 } 207 mp->mnt_flag &= ~MNT_UNMOUNT; 208 vfs_unbusy(mp); 209 } 210 211 /* 212 * Unmount all filesystems. Should only be called by halt(). 213 */ 214 void 215 vfs_unmountall() 216 { 217 struct mount *mp, *nmp, *rootfs = NULL; 218 int error; 219 220 /* unmount all but rootfs */ 221 for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) { 222 nmp = mp->mnt_list.cqe_prev; 223 224 if (mp->mnt_flag & MNT_ROOTFS) { 225 rootfs = mp; 226 continue; 227 } 228 error = dounmount(mp, MNT_FORCE, initproc); 229 if (error) { 230 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 231 if (error == EBUSY) 232 printf("BUSY)\n"); 233 else 234 printf("%d)\n", error); 235 } 236 } 237 238 /* and finally... */ 239 if (rootfs) { 240 vfs_unmountroot(rootfs); 241 } else { 242 printf("no root filesystem\n"); 243 } 244 } 245 246 /* 247 * Lookup a mount point by filesystem identifier. 248 */ 249 struct mount * 250 getvfs(fsid) 251 fsid_t *fsid; 252 { 253 register struct mount *mp; 254 255 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; 256 mp = mp->mnt_list.cqe_next) { 257 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 258 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) 259 return (mp); 260 } 261 return ((struct mount *) 0); 262 } 263 264 /* 265 * Get a new unique fsid 266 */ 267 void 268 getnewfsid(mp, mtype) 269 struct mount *mp; 270 int mtype; 271 { 272 static u_short xxxfs_mntid; 273 274 fsid_t tfsid; 275 276 mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0); 277 mp->mnt_stat.f_fsid.val[1] = mtype; 278 if (xxxfs_mntid == 0) 279 ++xxxfs_mntid; 280 tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid); 281 tfsid.val[1] = mtype; 282 if (mountlist.cqh_first != (void *)&mountlist) { 283 while (getvfs(&tfsid)) { 284 tfsid.val[0]++; 285 xxxfs_mntid++; 286 } 287 } 288 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 289 } 290 291 /* 292 * Set vnode attributes to VNOVAL 293 */ 294 void 295 vattr_null(vap) 296 register struct vattr *vap; 297 { 298 299 vap->va_type = VNON; 300 vap->va_size = VNOVAL; 301 vap->va_bytes = VNOVAL; 302 vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid = 303 vap->va_fsid = vap->va_fileid = 304 vap->va_blocksize = vap->va_rdev = 305 vap->va_atime.ts_sec = vap->va_atime.ts_nsec = 306 vap->va_mtime.ts_sec = vap->va_mtime.ts_nsec = 307 vap->va_ctime.ts_sec = vap->va_ctime.ts_nsec = 308 vap->va_flags = vap->va_gen = VNOVAL; 309 vap->va_vaflags = 0; 310 } 311 312 /* 313 * Routines having to do with the management of the vnode table. 314 */ 315 extern int (**dead_vnodeop_p) (); 316 extern void vclean(); 317 318 /* 319 * Return the next vnode from the free list. 320 */ 321 int 322 getnewvnode(tag, mp, vops, vpp) 323 enum vtagtype tag; 324 struct mount *mp; 325 int (**vops) (); 326 struct vnode **vpp; 327 { 328 register struct vnode *vp; 329 330 vp = vnode_free_list.tqh_first; 331 /* 332 * we allocate a new vnode if 333 * 1. we don't have any free 334 * Pretty obvious, we actually used to panic, but that 335 * is a silly thing to do. 336 * 2. we havn't filled our pool yet 337 * We don't want to trash the incore (VM-)vnodecache. 338 * 3. if less that 1/4th of our vnodes are free. 339 * We don't want to trash the namei cache either. 340 */ 341 if (freevnodes < (numvnodes >> 2) || 342 numvnodes < desiredvnodes || 343 vp == NULL) { 344 vp = (struct vnode *) malloc((u_long) sizeof *vp, 345 M_VNODE, M_WAITOK); 346 bzero((char *) vp, sizeof *vp); 347 numvnodes++; 348 } else { 349 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 350 freevnodes--; 351 352 if (vp->v_usecount) 353 panic("free vnode isn't"); 354 355 /* see comment on why 0xdeadb is set at end of vgone (below) */ 356 vp->v_freelist.tqe_prev = (struct vnode **) 0xdeadb; 357 vp->v_lease = NULL; 358 if (vp->v_type != VBAD) 359 vgone(vp); 360 #ifdef DIAGNOSTIC 361 { 362 int s; 363 364 if (vp->v_data) 365 panic("cleaned vnode isn't"); 366 s = splbio(); 367 if (vp->v_numoutput) 368 panic("Clean vnode has pending I/O's"); 369 splx(s); 370 } 371 #endif 372 vp->v_flag = 0; 373 vp->v_lastr = 0; 374 vp->v_ralen = 0; 375 vp->v_maxra = 0; 376 vp->v_lastw = 0; 377 vp->v_lasta = 0; 378 vp->v_cstart = 0; 379 vp->v_clen = 0; 380 vp->v_socket = 0; 381 vp->v_writecount = 0; /* XXX */ 382 } 383 vp->v_type = VNON; 384 cache_purge(vp); 385 vp->v_tag = tag; 386 vp->v_op = vops; 387 insmntque(vp, mp); 388 *vpp = vp; 389 vp->v_usecount = 1; 390 vp->v_data = 0; 391 return (0); 392 } 393 394 /* 395 * Move a vnode from one mount queue to another. 396 */ 397 void 398 insmntque(vp, mp) 399 register struct vnode *vp; 400 register struct mount *mp; 401 { 402 403 /* 404 * Delete from old mount point vnode list, if on one. 405 */ 406 if (vp->v_mount != NULL) 407 LIST_REMOVE(vp, v_mntvnodes); 408 /* 409 * Insert into list of vnodes for the new mount point, if available. 410 */ 411 if ((vp->v_mount = mp) == NULL) 412 return; 413 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 414 } 415 416 /* 417 * Update outstanding I/O count and do wakeup if requested. 418 */ 419 void 420 vwakeup(bp) 421 register struct buf *bp; 422 { 423 register struct vnode *vp; 424 425 bp->b_flags &= ~B_WRITEINPROG; 426 if ((vp = bp->b_vp)) { 427 vp->v_numoutput--; 428 if (vp->v_numoutput < 0) 429 panic("vwakeup: neg numoutput"); 430 if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) { 431 vp->v_flag &= ~VBWAIT; 432 wakeup((caddr_t) &vp->v_numoutput); 433 } 434 } 435 } 436 437 /* 438 * Flush out and invalidate all buffers associated with a vnode. 439 * Called with the underlying object locked. 440 */ 441 int 442 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo) 443 register struct vnode *vp; 444 int flags; 445 struct ucred *cred; 446 struct proc *p; 447 int slpflag, slptimeo; 448 { 449 register struct buf *bp; 450 struct buf *nbp, *blist; 451 int s, error; 452 vm_object_t object; 453 454 if (flags & V_SAVE) { 455 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p))) 456 return (error); 457 if (vp->v_dirtyblkhd.lh_first != NULL) 458 panic("vinvalbuf: dirty bufs"); 459 } 460 for (;;) { 461 if ((blist = vp->v_cleanblkhd.lh_first) && (flags & V_SAVEMETA)) 462 while (blist && blist->b_lblkno < 0) 463 blist = blist->b_vnbufs.le_next; 464 if (!blist && (blist = vp->v_dirtyblkhd.lh_first) && 465 (flags & V_SAVEMETA)) 466 while (blist && blist->b_lblkno < 0) 467 blist = blist->b_vnbufs.le_next; 468 if (!blist) 469 break; 470 471 for (bp = blist; bp; bp = nbp) { 472 nbp = bp->b_vnbufs.le_next; 473 if ((flags & V_SAVEMETA) && bp->b_lblkno < 0) 474 continue; 475 s = splbio(); 476 if (bp->b_flags & B_BUSY) { 477 bp->b_flags |= B_WANTED; 478 error = tsleep((caddr_t) bp, 479 slpflag | (PRIBIO + 1), "vinvalbuf", 480 slptimeo); 481 splx(s); 482 if (error) 483 return (error); 484 break; 485 } 486 bremfree(bp); 487 bp->b_flags |= B_BUSY; 488 splx(s); 489 /* 490 * XXX Since there are no node locks for NFS, I 491 * believe there is a slight chance that a delayed 492 * write will occur while sleeping just above, so 493 * check for it. 494 */ 495 if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) { 496 (void) VOP_BWRITE(bp); 497 break; 498 } 499 bp->b_flags |= (B_INVAL|B_NOCACHE|B_RELBUF); 500 brelse(bp); 501 } 502 } 503 504 s = splbio(); 505 while (vp->v_numoutput > 0) { 506 vp->v_flag |= VBWAIT; 507 tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0); 508 } 509 splx(s); 510 511 /* 512 * Destroy the copy in the VM cache, too. 513 */ 514 object = vp->v_object; 515 if (object != NULL) { 516 vm_object_page_remove(object, 0, object->size, 517 (flags & V_SAVE) ? TRUE : FALSE); 518 } 519 if (!(flags & V_SAVEMETA) && 520 (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first)) 521 panic("vinvalbuf: flush failed"); 522 return (0); 523 } 524 525 /* 526 * Associate a buffer with a vnode. 527 */ 528 void 529 bgetvp(vp, bp) 530 register struct vnode *vp; 531 register struct buf *bp; 532 { 533 int s; 534 535 if (bp->b_vp) 536 panic("bgetvp: not free"); 537 VHOLD(vp); 538 bp->b_vp = vp; 539 if (vp->v_type == VBLK || vp->v_type == VCHR) 540 bp->b_dev = vp->v_rdev; 541 else 542 bp->b_dev = NODEV; 543 /* 544 * Insert onto list for new vnode. 545 */ 546 s = splbio(); 547 bufinsvn(bp, &vp->v_cleanblkhd); 548 splx(s); 549 } 550 551 /* 552 * Disassociate a buffer from a vnode. 553 */ 554 void 555 brelvp(bp) 556 register struct buf *bp; 557 { 558 struct vnode *vp; 559 int s; 560 561 if (bp->b_vp == (struct vnode *) 0) 562 panic("brelvp: NULL"); 563 /* 564 * Delete from old vnode list, if on one. 565 */ 566 s = splbio(); 567 if (bp->b_vnbufs.le_next != NOLIST) 568 bufremvn(bp); 569 splx(s); 570 571 vp = bp->b_vp; 572 bp->b_vp = (struct vnode *) 0; 573 HOLDRELE(vp); 574 } 575 576 /* 577 * Associate a p-buffer with a vnode. 578 */ 579 void 580 pbgetvp(vp, bp) 581 register struct vnode *vp; 582 register struct buf *bp; 583 { 584 if (bp->b_vp) 585 panic("pbgetvp: not free"); 586 VHOLD(vp); 587 bp->b_vp = vp; 588 if (vp->v_type == VBLK || vp->v_type == VCHR) 589 bp->b_dev = vp->v_rdev; 590 else 591 bp->b_dev = NODEV; 592 } 593 594 /* 595 * Disassociate a p-buffer from a vnode. 596 */ 597 void 598 pbrelvp(bp) 599 register struct buf *bp; 600 { 601 struct vnode *vp; 602 603 if (bp->b_vp == (struct vnode *) 0) 604 panic("brelvp: NULL"); 605 606 vp = bp->b_vp; 607 bp->b_vp = (struct vnode *) 0; 608 HOLDRELE(vp); 609 } 610 611 /* 612 * Reassign a buffer from one vnode to another. 613 * Used to assign file specific control information 614 * (indirect blocks) to the vnode to which they belong. 615 */ 616 void 617 reassignbuf(bp, newvp) 618 register struct buf *bp; 619 register struct vnode *newvp; 620 { 621 register struct buflists *listheadp; 622 623 if (newvp == NULL) { 624 printf("reassignbuf: NULL"); 625 return; 626 } 627 /* 628 * Delete from old vnode list, if on one. 629 */ 630 if (bp->b_vnbufs.le_next != NOLIST) 631 bufremvn(bp); 632 /* 633 * If dirty, put on list of dirty buffers; otherwise insert onto list 634 * of clean buffers. 635 */ 636 if (bp->b_flags & B_DELWRI) { 637 struct buf *tbp; 638 639 tbp = newvp->v_dirtyblkhd.lh_first; 640 if (!tbp || (tbp->b_lblkno > bp->b_lblkno)) { 641 bufinsvn(bp, &newvp->v_dirtyblkhd); 642 } else { 643 while (tbp->b_vnbufs.le_next && (tbp->b_vnbufs.le_next->b_lblkno < bp->b_lblkno)) { 644 tbp = tbp->b_vnbufs.le_next; 645 } 646 LIST_INSERT_AFTER(tbp, bp, b_vnbufs); 647 } 648 } else { 649 listheadp = &newvp->v_cleanblkhd; 650 bufinsvn(bp, listheadp); 651 } 652 } 653 654 /* 655 * Create a vnode for a block device. 656 * Used for root filesystem, argdev, and swap areas. 657 * Also used for memory file system special devices. 658 */ 659 int 660 bdevvp(dev, vpp) 661 dev_t dev; 662 struct vnode **vpp; 663 { 664 register struct vnode *vp; 665 struct vnode *nvp; 666 int error; 667 668 if (dev == NODEV) 669 return (0); 670 error = getnewvnode(VT_NON, (struct mount *) 0, spec_vnodeop_p, &nvp); 671 if (error) { 672 *vpp = 0; 673 return (error); 674 } 675 vp = nvp; 676 vp->v_type = VBLK; 677 if ((nvp = checkalias(vp, dev, (struct mount *) 0))) { 678 vput(vp); 679 vp = nvp; 680 } 681 *vpp = vp; 682 return (0); 683 } 684 685 /* 686 * Check to see if the new vnode represents a special device 687 * for which we already have a vnode (either because of 688 * bdevvp() or because of a different vnode representing 689 * the same block device). If such an alias exists, deallocate 690 * the existing contents and return the aliased vnode. The 691 * caller is responsible for filling it with its new contents. 692 */ 693 struct vnode * 694 checkalias(nvp, nvp_rdev, mp) 695 register struct vnode *nvp; 696 dev_t nvp_rdev; 697 struct mount *mp; 698 { 699 register struct vnode *vp; 700 struct vnode **vpp; 701 702 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 703 return (NULLVP); 704 705 vpp = &speclisth[SPECHASH(nvp_rdev)]; 706 loop: 707 for (vp = *vpp; vp; vp = vp->v_specnext) { 708 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) 709 continue; 710 /* 711 * Alias, but not in use, so flush it out. 712 */ 713 if (vp->v_usecount == 0) { 714 vgone(vp); 715 goto loop; 716 } 717 if (vget(vp, 1)) 718 goto loop; 719 break; 720 } 721 if (vp == NULL || vp->v_tag != VT_NON) { 722 MALLOC(nvp->v_specinfo, struct specinfo *, 723 sizeof(struct specinfo), M_VNODE, M_WAITOK); 724 nvp->v_rdev = nvp_rdev; 725 nvp->v_hashchain = vpp; 726 nvp->v_specnext = *vpp; 727 nvp->v_specflags = 0; 728 *vpp = nvp; 729 if (vp != NULL) { 730 nvp->v_flag |= VALIASED; 731 vp->v_flag |= VALIASED; 732 vput(vp); 733 } 734 return (NULLVP); 735 } 736 VOP_UNLOCK(vp); 737 vclean(vp, 0); 738 vp->v_op = nvp->v_op; 739 vp->v_tag = nvp->v_tag; 740 nvp->v_type = VNON; 741 insmntque(vp, mp); 742 return (vp); 743 } 744 745 /* 746 * Grab a particular vnode from the free list, increment its 747 * reference count and lock it. The vnode lock bit is set the 748 * vnode is being eliminated in vgone. The process is awakened 749 * when the transition is completed, and an error returned to 750 * indicate that the vnode is no longer usable (possibly having 751 * been changed to a new file system type). 752 */ 753 int 754 vget(vp, lockflag) 755 register struct vnode *vp; 756 int lockflag; 757 { 758 759 /* 760 * If the vnode is in the process of being cleaned out for another 761 * use, we wait for the cleaning to finish and then return failure. 762 * Cleaning is determined either by checking that the VXLOCK flag is 763 * set, or that the use count is zero with the back pointer set to 764 * show that it has been removed from the free list by getnewvnode. 765 * The VXLOCK flag may not have been set yet because vclean is blocked 766 * in the VOP_LOCK call waiting for the VOP_INACTIVE to complete. 767 */ 768 if ((vp->v_flag & VXLOCK) || 769 (vp->v_usecount == 0 && 770 vp->v_freelist.tqe_prev == (struct vnode **) 0xdeadb)) { 771 vp->v_flag |= VXWANT; 772 (void) tsleep((caddr_t) vp, PINOD, "vget", 0); 773 return (1); 774 } 775 if (vp->v_usecount == 0) { 776 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 777 freevnodes--; 778 } 779 vp->v_usecount++; 780 if (lockflag) 781 VOP_LOCK(vp); 782 return (0); 783 } 784 785 /* 786 * Vnode reference, just increment the count 787 */ 788 void 789 vref(vp) 790 struct vnode *vp; 791 { 792 793 if (vp->v_usecount <= 0) 794 panic("vref used where vget required"); 795 vp->v_usecount++; 796 } 797 798 /* 799 * vput(), just unlock and vrele() 800 */ 801 void 802 vput(vp) 803 register struct vnode *vp; 804 { 805 806 VOP_UNLOCK(vp); 807 vrele(vp); 808 } 809 810 /* 811 * Vnode release. 812 * If count drops to zero, call inactive routine and return to freelist. 813 */ 814 void 815 vrele(vp) 816 register struct vnode *vp; 817 { 818 819 #ifdef DIAGNOSTIC 820 if (vp == NULL) 821 panic("vrele: null vp"); 822 #endif 823 vp->v_usecount--; 824 if (vp->v_usecount > 0) 825 return; 826 #ifdef DIAGNOSTIC 827 if (vp->v_usecount < 0 /* || vp->v_writecount < 0 */ ) { 828 vprint("vrele: negative ref count", vp); 829 panic("vrele: negative reference cnt"); 830 } 831 #endif 832 if (vp->v_flag & VAGE) { 833 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 834 vp->v_flag &= ~VAGE; 835 } else { 836 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 837 } 838 freevnodes++; 839 840 VOP_INACTIVE(vp); 841 } 842 843 #ifdef DIAGNOSTIC 844 /* 845 * Page or buffer structure gets a reference. 846 */ 847 void 848 vhold(vp) 849 register struct vnode *vp; 850 { 851 852 vp->v_holdcnt++; 853 } 854 855 /* 856 * Page or buffer structure frees a reference. 857 */ 858 void 859 holdrele(vp) 860 register struct vnode *vp; 861 { 862 863 if (vp->v_holdcnt <= 0) 864 panic("holdrele: holdcnt"); 865 vp->v_holdcnt--; 866 } 867 #endif /* DIAGNOSTIC */ 868 869 /* 870 * Remove any vnodes in the vnode table belonging to mount point mp. 871 * 872 * If MNT_NOFORCE is specified, there should not be any active ones, 873 * return error if any are found (nb: this is a user error, not a 874 * system error). If MNT_FORCE is specified, detach any active vnodes 875 * that are found. 876 */ 877 #ifdef DIAGNOSTIC 878 int busyprt = 0; /* print out busy vnodes */ 879 struct ctldebug debug1 = {"busyprt", &busyprt}; 880 881 #endif 882 883 int 884 vflush(mp, skipvp, flags) 885 struct mount *mp; 886 struct vnode *skipvp; 887 int flags; 888 { 889 register struct vnode *vp, *nvp; 890 int busy = 0; 891 892 if ((mp->mnt_flag & MNT_MPBUSY) == 0) 893 panic("vflush: not busy"); 894 loop: 895 for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) { 896 /* 897 * Make sure this vnode wasn't reclaimed in getnewvnode(). 898 * Start over if it has (it won't be on the list anymore). 899 */ 900 if (vp->v_mount != mp) 901 goto loop; 902 nvp = vp->v_mntvnodes.le_next; 903 /* 904 * Skip over a selected vnode. 905 */ 906 if (vp == skipvp) 907 continue; 908 /* 909 * Skip over a vnodes marked VSYSTEM. 910 */ 911 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) 912 continue; 913 /* 914 * If WRITECLOSE is set, only flush out regular file vnodes 915 * open for writing. 916 */ 917 if ((flags & WRITECLOSE) && 918 (vp->v_writecount == 0 || vp->v_type != VREG)) 919 continue; 920 /* 921 * With v_usecount == 0, all we need to do is clear out the 922 * vnode data structures and we are done. 923 */ 924 if (vp->v_usecount == 0) { 925 vgone(vp); 926 continue; 927 } 928 /* 929 * If FORCECLOSE is set, forcibly close the vnode. For block 930 * or character devices, revert to an anonymous device. For 931 * all other files, just kill them. 932 */ 933 if (flags & FORCECLOSE) { 934 if (vp->v_type != VBLK && vp->v_type != VCHR) { 935 vgone(vp); 936 } else { 937 vclean(vp, 0); 938 vp->v_op = spec_vnodeop_p; 939 insmntque(vp, (struct mount *) 0); 940 } 941 continue; 942 } 943 #ifdef DIAGNOSTIC 944 if (busyprt) 945 vprint("vflush: busy vnode", vp); 946 #endif 947 busy++; 948 } 949 if (busy) 950 return (EBUSY); 951 return (0); 952 } 953 954 /* 955 * Disassociate the underlying file system from a vnode. 956 */ 957 void 958 vclean(vp, flags) 959 register struct vnode *vp; 960 int flags; 961 { 962 int active; 963 964 /* 965 * Check to see if the vnode is in use. If so we have to reference it 966 * before we clean it out so that its count cannot fall to zero and 967 * generate a race against ourselves to recycle it. 968 */ 969 if ((active = vp->v_usecount)) 970 VREF(vp); 971 /* 972 * Even if the count is zero, the VOP_INACTIVE routine may still have 973 * the object locked while it cleans it out. The VOP_LOCK ensures that 974 * the VOP_INACTIVE routine is done with its work. For active vnodes, 975 * it ensures that no other activity can occur while the underlying 976 * object is being cleaned out. 977 */ 978 VOP_LOCK(vp); 979 /* 980 * Prevent the vnode from being recycled or brought into use while we 981 * clean it out. 982 */ 983 if (vp->v_flag & VXLOCK) 984 panic("vclean: deadlock"); 985 vp->v_flag |= VXLOCK; 986 /* 987 * Clean out any buffers associated with the vnode. 988 */ 989 if (flags & DOCLOSE) 990 vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0); 991 /* 992 * Any other processes trying to obtain this lock must first wait for 993 * VXLOCK to clear, then call the new lock operation. 994 */ 995 VOP_UNLOCK(vp); 996 /* 997 * If purging an active vnode, it must be closed and deactivated 998 * before being reclaimed. 999 */ 1000 if (active) { 1001 if (flags & DOCLOSE) 1002 VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL); 1003 VOP_INACTIVE(vp); 1004 } 1005 /* 1006 * Reclaim the vnode. 1007 */ 1008 if (VOP_RECLAIM(vp)) 1009 panic("vclean: cannot reclaim"); 1010 if (active) 1011 vrele(vp); 1012 1013 /* 1014 * Done with purge, notify sleepers of the grim news. 1015 */ 1016 vp->v_op = dead_vnodeop_p; 1017 vp->v_tag = VT_NON; 1018 vp->v_flag &= ~VXLOCK; 1019 if (vp->v_flag & VXWANT) { 1020 vp->v_flag &= ~VXWANT; 1021 wakeup((caddr_t) vp); 1022 } 1023 } 1024 1025 /* 1026 * Eliminate all activity associated with the requested vnode 1027 * and with all vnodes aliased to the requested vnode. 1028 */ 1029 void 1030 vgoneall(vp) 1031 register struct vnode *vp; 1032 { 1033 register struct vnode *vq; 1034 1035 if (vp->v_flag & VALIASED) { 1036 /* 1037 * If a vgone (or vclean) is already in progress, wait until 1038 * it is done and return. 1039 */ 1040 if (vp->v_flag & VXLOCK) { 1041 vp->v_flag |= VXWANT; 1042 (void) tsleep((caddr_t) vp, PINOD, "vgall", 0); 1043 return; 1044 } 1045 /* 1046 * Ensure that vp will not be vgone'd while we are eliminating 1047 * its aliases. 1048 */ 1049 vp->v_flag |= VXLOCK; 1050 while (vp->v_flag & VALIASED) { 1051 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1052 if (vq->v_rdev != vp->v_rdev || 1053 vq->v_type != vp->v_type || vp == vq) 1054 continue; 1055 vgone(vq); 1056 break; 1057 } 1058 } 1059 /* 1060 * Remove the lock so that vgone below will really eliminate 1061 * the vnode after which time vgone will awaken any sleepers. 1062 */ 1063 vp->v_flag &= ~VXLOCK; 1064 } 1065 vgone(vp); 1066 } 1067 1068 /* 1069 * Eliminate all activity associated with a vnode 1070 * in preparation for reuse. 1071 */ 1072 void 1073 vgone(vp) 1074 register struct vnode *vp; 1075 { 1076 register struct vnode *vq; 1077 struct vnode *vx; 1078 1079 /* 1080 * If a vgone (or vclean) is already in progress, wait until it is 1081 * done and return. 1082 */ 1083 if (vp->v_flag & VXLOCK) { 1084 vp->v_flag |= VXWANT; 1085 (void) tsleep((caddr_t) vp, PINOD, "vgone", 0); 1086 return; 1087 } 1088 /* 1089 * Clean out the filesystem specific data. 1090 */ 1091 vclean(vp, DOCLOSE); 1092 /* 1093 * Delete from old mount point vnode list, if on one. 1094 */ 1095 if (vp->v_mount != NULL) { 1096 LIST_REMOVE(vp, v_mntvnodes); 1097 vp->v_mount = NULL; 1098 } 1099 /* 1100 * If special device, remove it from special device alias list. 1101 */ 1102 if (vp->v_type == VBLK || vp->v_type == VCHR) { 1103 if (*vp->v_hashchain == vp) { 1104 *vp->v_hashchain = vp->v_specnext; 1105 } else { 1106 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1107 if (vq->v_specnext != vp) 1108 continue; 1109 vq->v_specnext = vp->v_specnext; 1110 break; 1111 } 1112 if (vq == NULL) 1113 panic("missing bdev"); 1114 } 1115 if (vp->v_flag & VALIASED) { 1116 vx = NULL; 1117 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1118 if (vq->v_rdev != vp->v_rdev || 1119 vq->v_type != vp->v_type) 1120 continue; 1121 if (vx) 1122 break; 1123 vx = vq; 1124 } 1125 if (vx == NULL) 1126 panic("missing alias"); 1127 if (vq == NULL) 1128 vx->v_flag &= ~VALIASED; 1129 vp->v_flag &= ~VALIASED; 1130 } 1131 FREE(vp->v_specinfo, M_VNODE); 1132 vp->v_specinfo = NULL; 1133 } 1134 /* 1135 * If it is on the freelist and not already at the head, move it to 1136 * the head of the list. The test of the back pointer and the 1137 * reference count of zero is because it will be removed from the free 1138 * list by getnewvnode, but will not have its reference count 1139 * incremented until after calling vgone. If the reference count were 1140 * incremented first, vgone would (incorrectly) try to close the 1141 * previous instance of the underlying object. So, the back pointer is 1142 * explicitly set to `0xdeadb' in getnewvnode after removing it from 1143 * the freelist to ensure that we do not try to move it here. 1144 */ 1145 if (vp->v_usecount == 0 && 1146 vp->v_freelist.tqe_prev != (struct vnode **) 0xdeadb && 1147 vnode_free_list.tqh_first != vp) { 1148 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1149 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1150 } 1151 vp->v_type = VBAD; 1152 } 1153 1154 /* 1155 * Lookup a vnode by device number. 1156 */ 1157 int 1158 vfinddev(dev, type, vpp) 1159 dev_t dev; 1160 enum vtype type; 1161 struct vnode **vpp; 1162 { 1163 register struct vnode *vp; 1164 1165 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { 1166 if (dev != vp->v_rdev || type != vp->v_type) 1167 continue; 1168 *vpp = vp; 1169 return (1); 1170 } 1171 return (0); 1172 } 1173 1174 /* 1175 * Calculate the total number of references to a special device. 1176 */ 1177 int 1178 vcount(vp) 1179 register struct vnode *vp; 1180 { 1181 register struct vnode *vq, *vnext; 1182 int count; 1183 1184 loop: 1185 if ((vp->v_flag & VALIASED) == 0) 1186 return (vp->v_usecount); 1187 for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) { 1188 vnext = vq->v_specnext; 1189 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type) 1190 continue; 1191 /* 1192 * Alias, but not in use, so flush it out. 1193 */ 1194 if (vq->v_usecount == 0 && vq != vp) { 1195 vgone(vq); 1196 goto loop; 1197 } 1198 count += vq->v_usecount; 1199 } 1200 return (count); 1201 } 1202 1203 /* 1204 * Print out a description of a vnode. 1205 */ 1206 static char *typename[] = 1207 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 1208 1209 void 1210 vprint(label, vp) 1211 char *label; 1212 register struct vnode *vp; 1213 { 1214 char buf[64]; 1215 1216 if (label != NULL) 1217 printf("%s: ", label); 1218 printf("type %s, usecount %d, writecount %d, refcount %ld,", 1219 typename[vp->v_type], vp->v_usecount, vp->v_writecount, 1220 vp->v_holdcnt); 1221 buf[0] = '\0'; 1222 if (vp->v_flag & VROOT) 1223 strcat(buf, "|VROOT"); 1224 if (vp->v_flag & VTEXT) 1225 strcat(buf, "|VTEXT"); 1226 if (vp->v_flag & VSYSTEM) 1227 strcat(buf, "|VSYSTEM"); 1228 if (vp->v_flag & VXLOCK) 1229 strcat(buf, "|VXLOCK"); 1230 if (vp->v_flag & VXWANT) 1231 strcat(buf, "|VXWANT"); 1232 if (vp->v_flag & VBWAIT) 1233 strcat(buf, "|VBWAIT"); 1234 if (vp->v_flag & VALIASED) 1235 strcat(buf, "|VALIASED"); 1236 if (buf[0] != '\0') 1237 printf(" flags (%s)", &buf[1]); 1238 if (vp->v_data == NULL) { 1239 printf("\n"); 1240 } else { 1241 printf("\n\t"); 1242 VOP_PRINT(vp); 1243 } 1244 } 1245 1246 #ifdef DDB 1247 /* 1248 * List all of the locked vnodes in the system. 1249 * Called when debugging the kernel. 1250 */ 1251 void 1252 printlockedvnodes() 1253 { 1254 register struct mount *mp; 1255 register struct vnode *vp; 1256 1257 printf("Locked vnodes\n"); 1258 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; 1259 mp = mp->mnt_list.cqe_next) { 1260 for (vp = mp->mnt_vnodelist.lh_first; 1261 vp != NULL; 1262 vp = vp->v_mntvnodes.le_next) 1263 if (VOP_ISLOCKED(vp)) 1264 vprint((char *) 0, vp); 1265 } 1266 } 1267 #endif 1268 1269 int kinfo_vdebug = 1; 1270 int kinfo_vgetfailed; 1271 1272 #define KINFO_VNODESLOP 10 1273 /* 1274 * Dump vnode list (via sysctl). 1275 * Copyout address of vnode followed by vnode. 1276 */ 1277 /* ARGSUSED */ 1278 int 1279 sysctl_vnode(where, sizep) 1280 char *where; 1281 size_t *sizep; 1282 { 1283 register struct mount *mp, *nmp; 1284 struct vnode *vp; 1285 register char *bp = where, *savebp; 1286 char *ewhere; 1287 int error; 1288 1289 #define VPTRSZ sizeof (struct vnode *) 1290 #define VNODESZ sizeof (struct vnode) 1291 if (where == NULL) { 1292 *sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ); 1293 return (0); 1294 } 1295 ewhere = where + *sizep; 1296 1297 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { 1298 nmp = mp->mnt_list.cqe_next; 1299 if (vfs_busy(mp)) 1300 continue; 1301 savebp = bp; 1302 again: 1303 for (vp = mp->mnt_vnodelist.lh_first; 1304 vp != NULL; 1305 vp = vp->v_mntvnodes.le_next) { 1306 /* 1307 * Check that the vp is still associated with this 1308 * filesystem. RACE: could have been recycled onto 1309 * the same filesystem. 1310 */ 1311 if (vp->v_mount != mp) { 1312 if (kinfo_vdebug) 1313 printf("kinfo: vp changed\n"); 1314 bp = savebp; 1315 goto again; 1316 } 1317 if (bp + VPTRSZ + VNODESZ > ewhere) { 1318 vfs_unbusy(mp); 1319 *sizep = bp - where; 1320 return (ENOMEM); 1321 } 1322 if ((error = copyout(&vp, bp, VPTRSZ)) || 1323 (error = copyout(vp, bp + VPTRSZ, VNODESZ))) { 1324 vfs_unbusy(mp); 1325 *sizep = bp - where; 1326 return (error); 1327 } 1328 bp += VPTRSZ + VNODESZ; 1329 } 1330 vfs_unbusy(mp); 1331 } 1332 1333 *sizep = bp - where; 1334 return (0); 1335 } 1336 1337 /* 1338 * Check to see if a filesystem is mounted on a block device. 1339 */ 1340 int 1341 vfs_mountedon(vp) 1342 register struct vnode *vp; 1343 { 1344 register struct vnode *vq; 1345 1346 if (vp->v_specflags & SI_MOUNTEDON) 1347 return (EBUSY); 1348 if (vp->v_flag & VALIASED) { 1349 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1350 if (vq->v_rdev != vp->v_rdev || 1351 vq->v_type != vp->v_type) 1352 continue; 1353 if (vq->v_specflags & SI_MOUNTEDON) 1354 return (EBUSY); 1355 } 1356 } 1357 return (0); 1358 } 1359 1360 /* 1361 * Build hash lists of net addresses and hang them off the mount point. 1362 * Called by ufs_mount() to set up the lists of export addresses. 1363 */ 1364 static int 1365 vfs_hang_addrlist(mp, nep, argp) 1366 struct mount *mp; 1367 struct netexport *nep; 1368 struct export_args *argp; 1369 { 1370 register struct netcred *np; 1371 register struct radix_node_head *rnh; 1372 register int i; 1373 struct radix_node *rn; 1374 struct sockaddr *saddr, *smask = 0; 1375 struct domain *dom; 1376 int error; 1377 1378 if (argp->ex_addrlen == 0) { 1379 if (mp->mnt_flag & MNT_DEFEXPORTED) 1380 return (EPERM); 1381 np = &nep->ne_defexported; 1382 np->netc_exflags = argp->ex_flags; 1383 np->netc_anon = argp->ex_anon; 1384 np->netc_anon.cr_ref = 1; 1385 mp->mnt_flag |= MNT_DEFEXPORTED; 1386 return (0); 1387 } 1388 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1389 np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK); 1390 bzero((caddr_t) np, i); 1391 saddr = (struct sockaddr *) (np + 1); 1392 if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 1393 goto out; 1394 if (saddr->sa_len > argp->ex_addrlen) 1395 saddr->sa_len = argp->ex_addrlen; 1396 if (argp->ex_masklen) { 1397 smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen); 1398 error = copyin(argp->ex_addr, (caddr_t) smask, argp->ex_masklen); 1399 if (error) 1400 goto out; 1401 if (smask->sa_len > argp->ex_masklen) 1402 smask->sa_len = argp->ex_masklen; 1403 } 1404 i = saddr->sa_family; 1405 if ((rnh = nep->ne_rtable[i]) == 0) { 1406 /* 1407 * Seems silly to initialize every AF when most are not used, 1408 * do so on demand here 1409 */ 1410 for (dom = domains; dom; dom = dom->dom_next) 1411 if (dom->dom_family == i && dom->dom_rtattach) { 1412 dom->dom_rtattach((void **) &nep->ne_rtable[i], 1413 dom->dom_rtoffset); 1414 break; 1415 } 1416 if ((rnh = nep->ne_rtable[i]) == 0) { 1417 error = ENOBUFS; 1418 goto out; 1419 } 1420 } 1421 rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh, 1422 np->netc_rnodes); 1423 if (rn == 0 || np != (struct netcred *) rn) { /* already exists */ 1424 error = EPERM; 1425 goto out; 1426 } 1427 np->netc_exflags = argp->ex_flags; 1428 np->netc_anon = argp->ex_anon; 1429 np->netc_anon.cr_ref = 1; 1430 return (0); 1431 out: 1432 free(np, M_NETADDR); 1433 return (error); 1434 } 1435 1436 /* ARGSUSED */ 1437 static int 1438 vfs_free_netcred(rn, w) 1439 struct radix_node *rn; 1440 caddr_t w; 1441 { 1442 register struct radix_node_head *rnh = (struct radix_node_head *) w; 1443 1444 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 1445 free((caddr_t) rn, M_NETADDR); 1446 return (0); 1447 } 1448 1449 /* 1450 * Free the net address hash lists that are hanging off the mount points. 1451 */ 1452 static void 1453 vfs_free_addrlist(nep) 1454 struct netexport *nep; 1455 { 1456 register int i; 1457 register struct radix_node_head *rnh; 1458 1459 for (i = 0; i <= AF_MAX; i++) 1460 if ((rnh = nep->ne_rtable[i])) { 1461 (*rnh->rnh_walktree) (rnh, vfs_free_netcred, 1462 (caddr_t) rnh); 1463 free((caddr_t) rnh, M_RTABLE); 1464 nep->ne_rtable[i] = 0; 1465 } 1466 } 1467 1468 int 1469 vfs_export(mp, nep, argp) 1470 struct mount *mp; 1471 struct netexport *nep; 1472 struct export_args *argp; 1473 { 1474 int error; 1475 1476 if (argp->ex_flags & MNT_DELEXPORT) { 1477 vfs_free_addrlist(nep); 1478 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 1479 } 1480 if (argp->ex_flags & MNT_EXPORTED) { 1481 if ((error = vfs_hang_addrlist(mp, nep, argp))) 1482 return (error); 1483 mp->mnt_flag |= MNT_EXPORTED; 1484 } 1485 return (0); 1486 } 1487 1488 struct netcred * 1489 vfs_export_lookup(mp, nep, nam) 1490 register struct mount *mp; 1491 struct netexport *nep; 1492 struct mbuf *nam; 1493 { 1494 register struct netcred *np; 1495 register struct radix_node_head *rnh; 1496 struct sockaddr *saddr; 1497 1498 np = NULL; 1499 if (mp->mnt_flag & MNT_EXPORTED) { 1500 /* 1501 * Lookup in the export list first. 1502 */ 1503 if (nam != NULL) { 1504 saddr = mtod(nam, struct sockaddr *); 1505 rnh = nep->ne_rtable[saddr->sa_family]; 1506 if (rnh != NULL) { 1507 np = (struct netcred *) 1508 (*rnh->rnh_matchaddr) ((caddr_t) saddr, 1509 rnh); 1510 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 1511 np = NULL; 1512 } 1513 } 1514 /* 1515 * If no address match, use the default if it exists. 1516 */ 1517 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 1518 np = &nep->ne_defexported; 1519 } 1520 return (np); 1521 } 1522 1523 1524 /* 1525 * perform msync on all vnodes under a mount point 1526 * the mount point must be locked. 1527 */ 1528 void 1529 vfs_msync(struct mount *mp, int flags) { 1530 struct vnode *vp; 1531 loop: 1532 for (vp = mp->mnt_vnodelist.lh_first; 1533 vp != NULL; 1534 vp = vp->v_mntvnodes.le_next) { 1535 1536 if (vp->v_mount != mp) 1537 goto loop; 1538 if (VOP_ISLOCKED(vp) && (flags != MNT_WAIT)) 1539 continue; 1540 if (vp->v_object && 1541 (((vm_object_t) vp->v_object)->flags & OBJ_WRITEABLE)) { 1542 vm_object_page_clean(vp->v_object, 0, 0, TRUE, TRUE); 1543 } 1544 } 1545 } 1546