1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94 39 * $Id: vfs_subr.c,v 1.14 1995/01/09 16:04:54 davidg Exp $ 40 */ 41 42 /* 43 * External virtual filesystem routines 44 */ 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/proc.h> 49 #include <sys/mount.h> 50 #include <sys/time.h> 51 #include <sys/vnode.h> 52 #include <sys/stat.h> 53 #include <sys/namei.h> 54 #include <sys/ucred.h> 55 #include <sys/buf.h> 56 #include <sys/errno.h> 57 #include <sys/malloc.h> 58 #include <sys/domain.h> 59 #include <sys/mbuf.h> 60 61 #include <vm/vm.h> 62 #include <sys/sysctl.h> 63 64 #include <miscfs/specfs/specdev.h> 65 66 void insmntque __P((struct vnode *, struct mount *)); 67 68 enum vtype iftovt_tab[16] = { 69 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 70 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 71 }; 72 int vttoif_tab[9] = { 73 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 74 S_IFSOCK, S_IFIFO, S_IFMT, 75 }; 76 77 /* 78 * Insq/Remq for the vnode usage lists. 79 */ 80 #define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) 81 #define bufremvn(bp) { \ 82 LIST_REMOVE(bp, b_vnbufs); \ 83 (bp)->b_vnbufs.le_next = NOLIST; \ 84 } 85 86 TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */ 87 struct mntlist mountlist; /* mounted filesystem list */ 88 89 int desiredvnodes; 90 91 /* 92 * Initialize the vnode management data structures. 93 */ 94 void 95 vntblinit() 96 { 97 extern int vm_object_cache_max; 98 99 desiredvnodes = maxproc + vm_object_cache_max; 100 101 TAILQ_INIT(&vnode_free_list); 102 TAILQ_INIT(&mountlist); 103 } 104 105 /* 106 * Lock a filesystem. 107 * Used to prevent access to it while mounting and unmounting. 108 */ 109 int 110 vfs_lock(mp) 111 register struct mount *mp; 112 { 113 114 while (mp->mnt_flag & MNT_MLOCK) { 115 mp->mnt_flag |= MNT_MWAIT; 116 (void) tsleep((caddr_t) mp, PVFS, "vfslck", 0); 117 } 118 mp->mnt_flag |= MNT_MLOCK; 119 return (0); 120 } 121 122 /* 123 * Unlock a locked filesystem. 124 * Panic if filesystem is not locked. 125 */ 126 void 127 vfs_unlock(mp) 128 register struct mount *mp; 129 { 130 131 if ((mp->mnt_flag & MNT_MLOCK) == 0) 132 panic("vfs_unlock: not locked"); 133 mp->mnt_flag &= ~MNT_MLOCK; 134 if (mp->mnt_flag & MNT_MWAIT) { 135 mp->mnt_flag &= ~MNT_MWAIT; 136 wakeup((caddr_t) mp); 137 } 138 } 139 140 /* 141 * Mark a mount point as busy. 142 * Used to synchronize access and to delay unmounting. 143 */ 144 int 145 vfs_busy(mp) 146 register struct mount *mp; 147 { 148 149 while (mp->mnt_flag & MNT_MPBUSY) { 150 mp->mnt_flag |= MNT_MPWANT; 151 (void) tsleep((caddr_t) &mp->mnt_flag, PVFS, "vfsbsy", 0); 152 } 153 if (mp->mnt_flag & MNT_UNMOUNT) 154 return (1); 155 mp->mnt_flag |= MNT_MPBUSY; 156 return (0); 157 } 158 159 /* 160 * Free a busy filesystem. 161 * Panic if filesystem is not busy. 162 */ 163 void 164 vfs_unbusy(mp) 165 register struct mount *mp; 166 { 167 168 if ((mp->mnt_flag & MNT_MPBUSY) == 0) 169 panic("vfs_unbusy: not busy"); 170 mp->mnt_flag &= ~MNT_MPBUSY; 171 if (mp->mnt_flag & MNT_MPWANT) { 172 mp->mnt_flag &= ~MNT_MPWANT; 173 wakeup((caddr_t) &mp->mnt_flag); 174 } 175 } 176 177 void 178 vfs_unmountroot(rootfs) 179 struct mount *rootfs; 180 { 181 struct mount *mp = rootfs; 182 int error; 183 184 if (vfs_busy(mp)) { 185 printf("failed to unmount root\n"); 186 return; 187 } 188 mp->mnt_flag |= MNT_UNMOUNT; 189 if ((error = vfs_lock(mp))) { 190 printf("lock of root filesystem failed (%d)\n", error); 191 return; 192 } 193 vnode_pager_umount(mp); /* release cached vnodes */ 194 cache_purgevfs(mp); /* remove cache entries for this file sys */ 195 196 if ((error = VFS_SYNC(mp, MNT_WAIT, initproc->p_ucred, initproc))) 197 printf("sync of root filesystem failed (%d)\n", error); 198 199 if ((error = VFS_UNMOUNT(mp, MNT_FORCE, initproc))) { 200 printf("unmount of root filesystem failed ("); 201 if (error == EBUSY) 202 printf("BUSY)\n"); 203 else 204 printf("%d)\n", error); 205 } 206 mp->mnt_flag &= ~MNT_UNMOUNT; 207 vfs_unbusy(mp); 208 } 209 210 /* 211 * Unmount all filesystems. Should only be called by halt(). 212 */ 213 void 214 vfs_unmountall() 215 { 216 struct mount *mp, *mp_next, *rootfs = NULL; 217 int error; 218 219 /* unmount all but rootfs */ 220 for (mp = mountlist.tqh_first; mp != NULL; mp = mp_next) { 221 mp_next = mp->mnt_list.tqe_next; 222 223 if (mp->mnt_flag & MNT_ROOTFS) { 224 rootfs = mp; 225 continue; 226 } 227 error = dounmount(mp, MNT_FORCE, initproc); 228 if (error) { 229 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 230 if (error == EBUSY) 231 printf("BUSY)\n"); 232 else 233 printf("%d)\n", error); 234 } 235 } 236 237 /* and finally... */ 238 if (rootfs) { 239 vfs_unmountroot(rootfs); 240 } else { 241 printf("no root filesystem\n"); 242 } 243 } 244 245 /* 246 * Lookup a mount point by filesystem identifier. 247 */ 248 struct mount * 249 getvfs(fsid) 250 fsid_t *fsid; 251 { 252 register struct mount *mp; 253 254 for (mp = mountlist.tqh_first; mp != NULL; mp = mp->mnt_list.tqe_next) { 255 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 256 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) 257 return (mp); 258 } 259 return ((struct mount *) 0); 260 } 261 262 /* 263 * Get a new unique fsid 264 */ 265 void 266 getnewfsid(mp, mtype) 267 struct mount *mp; 268 int mtype; 269 { 270 static u_short xxxfs_mntid; 271 272 fsid_t tfsid; 273 274 mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0); 275 mp->mnt_stat.f_fsid.val[1] = mtype; 276 if (xxxfs_mntid == 0) 277 ++xxxfs_mntid; 278 tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid); 279 tfsid.val[1] = mtype; 280 if (mountlist.tqh_first != NULL) { 281 while (getvfs(&tfsid)) { 282 tfsid.val[0]++; 283 xxxfs_mntid++; 284 } 285 } 286 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 287 } 288 289 /* 290 * Set vnode attributes to VNOVAL 291 */ 292 void 293 vattr_null(vap) 294 register struct vattr *vap; 295 { 296 297 vap->va_type = VNON; 298 vap->va_size = VNOVAL; 299 vap->va_bytes = VNOVAL; 300 vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid = 301 vap->va_fsid = vap->va_fileid = 302 vap->va_blocksize = vap->va_rdev = 303 vap->va_atime.ts_sec = vap->va_atime.ts_nsec = 304 vap->va_mtime.ts_sec = vap->va_mtime.ts_nsec = 305 vap->va_ctime.ts_sec = vap->va_ctime.ts_nsec = 306 vap->va_flags = vap->va_gen = VNOVAL; 307 vap->va_vaflags = 0; 308 } 309 310 /* 311 * Routines having to do with the management of the vnode table. 312 */ 313 extern int (**dead_vnodeop_p) (); 314 extern void vclean(); 315 long numvnodes; 316 317 /* 318 * Return the next vnode from the free list. 319 */ 320 int 321 getnewvnode(tag, mp, vops, vpp) 322 enum vtagtype tag; 323 struct mount *mp; 324 int (**vops) (); 325 struct vnode **vpp; 326 { 327 register struct vnode *vp; 328 329 if (vnode_free_list.tqh_first == NULL || 330 numvnodes < desiredvnodes) { 331 vp = (struct vnode *) malloc((u_long) sizeof *vp, 332 M_VNODE, M_WAITOK); 333 bzero((char *) vp, sizeof *vp); 334 numvnodes++; 335 } else { 336 if ((vp = vnode_free_list.tqh_first) == NULL) { 337 tablefull("vnode"); 338 *vpp = 0; 339 return (ENFILE); 340 } 341 if (vp->v_usecount) 342 panic("free vnode isn't"); 343 344 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 345 /* see comment on why 0xdeadb is set at end of vgone (below) */ 346 vp->v_freelist.tqe_prev = (struct vnode **) 0xdeadb; 347 vp->v_lease = NULL; 348 if (vp->v_type != VBAD) 349 vgone(vp); 350 #ifdef DIAGNOSTIC 351 { 352 int s; 353 354 if (vp->v_data) 355 panic("cleaned vnode isn't"); 356 s = splbio(); 357 if (vp->v_numoutput) 358 panic("Clean vnode has pending I/O's"); 359 splx(s); 360 } 361 #endif 362 vp->v_flag = 0; 363 vp->v_lastr = 0; 364 vp->v_ralen = 0; 365 vp->v_maxra = 0; 366 vp->v_lastw = 0; 367 vp->v_lasta = 0; 368 vp->v_cstart = 0; 369 vp->v_clen = 0; 370 vp->v_socket = 0; 371 vp->v_writecount = 0; /* XXX */ 372 } 373 vp->v_type = VNON; 374 cache_purge(vp); 375 vp->v_tag = tag; 376 vp->v_op = vops; 377 insmntque(vp, mp); 378 *vpp = vp; 379 vp->v_usecount = 1; 380 vp->v_data = 0; 381 return (0); 382 } 383 384 /* 385 * Move a vnode from one mount queue to another. 386 */ 387 void 388 insmntque(vp, mp) 389 register struct vnode *vp; 390 register struct mount *mp; 391 { 392 393 /* 394 * Delete from old mount point vnode list, if on one. 395 */ 396 if (vp->v_mount != NULL) 397 LIST_REMOVE(vp, v_mntvnodes); 398 /* 399 * Insert into list of vnodes for the new mount point, if available. 400 */ 401 if ((vp->v_mount = mp) == NULL) 402 return; 403 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 404 } 405 406 /* 407 * Update outstanding I/O count and do wakeup if requested. 408 */ 409 void 410 vwakeup(bp) 411 register struct buf *bp; 412 { 413 register struct vnode *vp; 414 415 bp->b_flags &= ~B_WRITEINPROG; 416 if ((vp = bp->b_vp)) { 417 vp->v_numoutput--; 418 if (vp->v_numoutput < 0) 419 panic("vwakeup: neg numoutput"); 420 if (vp->v_flag & VBWAIT) { 421 vp->v_flag &= ~VBWAIT; 422 wakeup((caddr_t) &vp->v_numoutput); 423 } 424 } 425 } 426 427 /* 428 * Flush out and invalidate all buffers associated with a vnode. 429 * Called with the underlying object locked. 430 */ 431 int 432 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo) 433 register struct vnode *vp; 434 int flags; 435 struct ucred *cred; 436 struct proc *p; 437 int slpflag, slptimeo; 438 { 439 register struct buf *bp; 440 struct buf *nbp, *blist; 441 int s, error; 442 vm_pager_t pager; 443 vm_object_t object; 444 445 if (flags & V_SAVE) { 446 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p))) 447 return (error); 448 if (vp->v_dirtyblkhd.lh_first != NULL) 449 panic("vinvalbuf: dirty bufs"); 450 } 451 for (;;) { 452 if ((blist = vp->v_cleanblkhd.lh_first) && (flags & V_SAVEMETA)) 453 while (blist && blist->b_lblkno < 0) 454 blist = blist->b_vnbufs.le_next; 455 if (!blist && (blist = vp->v_dirtyblkhd.lh_first) && 456 (flags & V_SAVEMETA)) 457 while (blist && blist->b_lblkno < 0) 458 blist = blist->b_vnbufs.le_next; 459 if (!blist) 460 break; 461 462 for (bp = blist; bp; bp = nbp) { 463 nbp = bp->b_vnbufs.le_next; 464 if ((flags & V_SAVEMETA) && bp->b_lblkno < 0) 465 continue; 466 s = splbio(); 467 if (bp->b_flags & B_BUSY) { 468 bp->b_flags |= B_WANTED; 469 error = tsleep((caddr_t) bp, 470 slpflag | (PRIBIO + 1), "vinvalbuf", 471 slptimeo); 472 splx(s); 473 if (error) 474 return (error); 475 break; 476 } 477 bremfree(bp); 478 bp->b_flags |= B_BUSY; 479 splx(s); 480 /* 481 * XXX Since there are no node locks for NFS, I 482 * believe there is a slight chance that a delayed 483 * write will occur while sleeping just above, so 484 * check for it. 485 */ 486 if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) { 487 (void) VOP_BWRITE(bp); 488 break; 489 } 490 bp->b_flags |= B_INVAL; 491 brelse(bp); 492 } 493 } 494 495 496 s = splbio(); 497 while (vp->v_numoutput > 0) { 498 vp->v_flag |= VBWAIT; 499 tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0); 500 } 501 splx(s); 502 503 pager = NULL; 504 object = (vm_object_t) vp->v_vmdata; 505 if (object != NULL) 506 pager = object->pager; 507 if (pager != NULL) { 508 object = vm_object_lookup(pager); 509 if (object) { 510 vm_object_lock(object); 511 if (flags & V_SAVE) 512 vm_object_page_clean(object, 0, 0, TRUE, FALSE); 513 vm_object_page_remove(object, 0, object->size); 514 vm_object_unlock(object); 515 vm_object_deallocate(object); 516 } 517 } 518 if (!(flags & V_SAVEMETA) && 519 (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first)) 520 panic("vinvalbuf: flush failed"); 521 return (0); 522 } 523 524 /* 525 * Associate a buffer with a vnode. 526 */ 527 void 528 bgetvp(vp, bp) 529 register struct vnode *vp; 530 register struct buf *bp; 531 { 532 int s; 533 534 if (bp->b_vp) 535 panic("bgetvp: not free"); 536 VHOLD(vp); 537 bp->b_vp = vp; 538 if (vp->v_type == VBLK || vp->v_type == VCHR) 539 bp->b_dev = vp->v_rdev; 540 else 541 bp->b_dev = NODEV; 542 /* 543 * Insert onto list for new vnode. 544 */ 545 s = splbio(); 546 bufinsvn(bp, &vp->v_cleanblkhd); 547 splx(s); 548 } 549 550 /* 551 * Disassociate a buffer from a vnode. 552 */ 553 void 554 brelvp(bp) 555 register struct buf *bp; 556 { 557 struct vnode *vp; 558 int s; 559 560 if (bp->b_vp == (struct vnode *) 0) 561 panic("brelvp: NULL"); 562 /* 563 * Delete from old vnode list, if on one. 564 */ 565 s = splbio(); 566 if (bp->b_vnbufs.le_next != NOLIST) 567 bufremvn(bp); 568 splx(s); 569 570 vp = bp->b_vp; 571 bp->b_vp = (struct vnode *) 0; 572 HOLDRELE(vp); 573 } 574 575 /* 576 * Associate a p-buffer with a vnode. 577 */ 578 void 579 pbgetvp(vp, bp) 580 register struct vnode *vp; 581 register struct buf *bp; 582 { 583 if (bp->b_vp) 584 panic("pbgetvp: not free"); 585 VHOLD(vp); 586 bp->b_vp = vp; 587 if (vp->v_type == VBLK || vp->v_type == VCHR) 588 bp->b_dev = vp->v_rdev; 589 else 590 bp->b_dev = NODEV; 591 } 592 593 /* 594 * Disassociate a p-buffer from a vnode. 595 */ 596 void 597 pbrelvp(bp) 598 register struct buf *bp; 599 { 600 struct vnode *vp; 601 602 if (bp->b_vp == (struct vnode *) 0) 603 panic("brelvp: NULL"); 604 605 vp = bp->b_vp; 606 bp->b_vp = (struct vnode *) 0; 607 HOLDRELE(vp); 608 } 609 610 /* 611 * Reassign a buffer from one vnode to another. 612 * Used to assign file specific control information 613 * (indirect blocks) to the vnode to which they belong. 614 */ 615 void 616 reassignbuf(bp, newvp) 617 register struct buf *bp; 618 register struct vnode *newvp; 619 { 620 register struct buflists *listheadp; 621 622 if (newvp == NULL) { 623 printf("reassignbuf: NULL"); 624 return; 625 } 626 /* 627 * Delete from old vnode list, if on one. 628 */ 629 if (bp->b_vnbufs.le_next != NOLIST) 630 bufremvn(bp); 631 /* 632 * If dirty, put on list of dirty buffers; otherwise insert onto list 633 * of clean buffers. 634 */ 635 if (bp->b_flags & B_DELWRI) { 636 struct buf *tbp; 637 638 tbp = newvp->v_dirtyblkhd.lh_first; 639 if (!tbp || (tbp->b_lblkno > bp->b_lblkno)) { 640 bufinsvn(bp, &newvp->v_dirtyblkhd); 641 } else { 642 while (tbp->b_vnbufs.le_next && (tbp->b_vnbufs.le_next->b_lblkno < bp->b_lblkno)) { 643 tbp = tbp->b_vnbufs.le_next; 644 } 645 LIST_INSERT_AFTER(tbp, bp, b_vnbufs); 646 } 647 } else { 648 listheadp = &newvp->v_cleanblkhd; 649 bufinsvn(bp, listheadp); 650 } 651 } 652 653 /* 654 * Create a vnode for a block device. 655 * Used for root filesystem, argdev, and swap areas. 656 * Also used for memory file system special devices. 657 */ 658 int 659 bdevvp(dev, vpp) 660 dev_t dev; 661 struct vnode **vpp; 662 { 663 register struct vnode *vp; 664 struct vnode *nvp; 665 int error; 666 667 if (dev == NODEV) 668 return (0); 669 error = getnewvnode(VT_NON, (struct mount *) 0, spec_vnodeop_p, &nvp); 670 if (error) { 671 *vpp = 0; 672 return (error); 673 } 674 vp = nvp; 675 vp->v_type = VBLK; 676 if ((nvp = checkalias(vp, dev, (struct mount *) 0))) { 677 vput(vp); 678 vp = nvp; 679 } 680 *vpp = vp; 681 return (0); 682 } 683 684 /* 685 * Check to see if the new vnode represents a special device 686 * for which we already have a vnode (either because of 687 * bdevvp() or because of a different vnode representing 688 * the same block device). If such an alias exists, deallocate 689 * the existing contents and return the aliased vnode. The 690 * caller is responsible for filling it with its new contents. 691 */ 692 struct vnode * 693 checkalias(nvp, nvp_rdev, mp) 694 register struct vnode *nvp; 695 dev_t nvp_rdev; 696 struct mount *mp; 697 { 698 register struct vnode *vp; 699 struct vnode **vpp; 700 701 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 702 return (NULLVP); 703 704 vpp = &speclisth[SPECHASH(nvp_rdev)]; 705 loop: 706 for (vp = *vpp; vp; vp = vp->v_specnext) { 707 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) 708 continue; 709 /* 710 * Alias, but not in use, so flush it out. 711 */ 712 if (vp->v_usecount == 0) { 713 vgone(vp); 714 goto loop; 715 } 716 if (vget(vp, 1)) 717 goto loop; 718 break; 719 } 720 if (vp == NULL || vp->v_tag != VT_NON) { 721 MALLOC(nvp->v_specinfo, struct specinfo *, 722 sizeof(struct specinfo), M_VNODE, M_WAITOK); 723 nvp->v_rdev = nvp_rdev; 724 nvp->v_hashchain = vpp; 725 nvp->v_specnext = *vpp; 726 nvp->v_specflags = 0; 727 *vpp = nvp; 728 if (vp != NULL) { 729 nvp->v_flag |= VALIASED; 730 vp->v_flag |= VALIASED; 731 vput(vp); 732 } 733 return (NULLVP); 734 } 735 VOP_UNLOCK(vp); 736 vclean(vp, 0); 737 vp->v_op = nvp->v_op; 738 vp->v_tag = nvp->v_tag; 739 nvp->v_type = VNON; 740 insmntque(vp, mp); 741 return (vp); 742 } 743 744 /* 745 * Grab a particular vnode from the free list, increment its 746 * reference count and lock it. The vnode lock bit is set the 747 * vnode is being eliminated in vgone. The process is awakened 748 * when the transition is completed, and an error returned to 749 * indicate that the vnode is no longer usable (possibly having 750 * been changed to a new file system type). 751 */ 752 int 753 vget(vp, lockflag) 754 register struct vnode *vp; 755 int lockflag; 756 { 757 758 /* 759 * If the vnode is in the process of being cleaned out for another 760 * use, we wait for the cleaning to finish and then return failure. 761 * Cleaning is determined either by checking that the VXLOCK flag is 762 * set, or that the use count is zero with the back pointer set to 763 * show that it has been removed from the free list by getnewvnode. 764 * The VXLOCK flag may not have been set yet because vclean is blocked 765 * in the VOP_LOCK call waiting for the VOP_INACTIVE to complete. 766 */ 767 if ((vp->v_flag & VXLOCK) || 768 (vp->v_usecount == 0 && 769 vp->v_freelist.tqe_prev == (struct vnode **) 0xdeadb)) { 770 vp->v_flag |= VXWANT; 771 (void) tsleep((caddr_t) vp, PINOD, "vget", 0); 772 return (1); 773 } 774 if (vp->v_usecount == 0) 775 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 776 vp->v_usecount++; 777 if (lockflag) 778 VOP_LOCK(vp); 779 return (0); 780 } 781 782 /* 783 * Vnode reference, just increment the count 784 */ 785 void 786 vref(vp) 787 struct vnode *vp; 788 { 789 790 if (vp->v_usecount <= 0) 791 panic("vref used where vget required"); 792 vp->v_usecount++; 793 } 794 795 /* 796 * vput(), just unlock and vrele() 797 */ 798 void 799 vput(vp) 800 register struct vnode *vp; 801 { 802 803 VOP_UNLOCK(vp); 804 vrele(vp); 805 } 806 807 /* 808 * Vnode release. 809 * If count drops to zero, call inactive routine and return to freelist. 810 */ 811 void 812 vrele(vp) 813 register struct vnode *vp; 814 { 815 816 #ifdef DIAGNOSTIC 817 if (vp == NULL) 818 panic("vrele: null vp"); 819 #endif 820 vp->v_usecount--; 821 if (vp->v_usecount > 0) 822 return; 823 #ifdef DIAGNOSTIC 824 if (vp->v_usecount != 0 /* || vp->v_writecount != 0 */ ) { 825 vprint("vrele: bad ref count", vp); 826 panic("vrele: ref cnt"); 827 } 828 #endif 829 /* 830 * insert at tail of LRU list 831 */ 832 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 833 VOP_INACTIVE(vp); 834 } 835 836 /* 837 * Page or buffer structure gets a reference. 838 */ 839 void 840 vhold(vp) 841 register struct vnode *vp; 842 { 843 844 vp->v_holdcnt++; 845 } 846 847 /* 848 * Page or buffer structure frees a reference. 849 */ 850 void 851 holdrele(vp) 852 register struct vnode *vp; 853 { 854 855 if (vp->v_holdcnt <= 0) 856 panic("holdrele: holdcnt"); 857 vp->v_holdcnt--; 858 } 859 860 /* 861 * Remove any vnodes in the vnode table belonging to mount point mp. 862 * 863 * If MNT_NOFORCE is specified, there should not be any active ones, 864 * return error if any are found (nb: this is a user error, not a 865 * system error). If MNT_FORCE is specified, detach any active vnodes 866 * that are found. 867 */ 868 #ifdef DIAGNOSTIC 869 int busyprt = 0; /* print out busy vnodes */ 870 struct ctldebug debug1 = {"busyprt", &busyprt}; 871 872 #endif 873 874 int 875 vflush(mp, skipvp, flags) 876 struct mount *mp; 877 struct vnode *skipvp; 878 int flags; 879 { 880 register struct vnode *vp, *nvp; 881 int busy = 0; 882 883 if ((mp->mnt_flag & MNT_MPBUSY) == 0) 884 panic("vflush: not busy"); 885 loop: 886 for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) { 887 if (vp->v_mount != mp) 888 goto loop; 889 nvp = vp->v_mntvnodes.le_next; 890 /* 891 * Skip over a selected vnode. 892 */ 893 if (vp == skipvp) 894 continue; 895 /* 896 * Skip over a vnodes marked VSYSTEM. 897 */ 898 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) 899 continue; 900 /* 901 * If WRITECLOSE is set, only flush out regular file vnodes 902 * open for writing. 903 */ 904 if ((flags & WRITECLOSE) && 905 (vp->v_writecount == 0 || vp->v_type != VREG)) 906 continue; 907 /* 908 * With v_usecount == 0, all we need to do is clear out the 909 * vnode data structures and we are done. 910 */ 911 if (vp->v_usecount == 0) { 912 vgone(vp); 913 continue; 914 } 915 /* 916 * If FORCECLOSE is set, forcibly close the vnode. For block 917 * or character devices, revert to an anonymous device. For 918 * all other files, just kill them. 919 */ 920 if (flags & FORCECLOSE) { 921 if (vp->v_type != VBLK && vp->v_type != VCHR) { 922 vgone(vp); 923 } else { 924 vclean(vp, 0); 925 vp->v_op = spec_vnodeop_p; 926 insmntque(vp, (struct mount *) 0); 927 } 928 continue; 929 } 930 #ifdef DIAGNOSTIC 931 if (busyprt) 932 vprint("vflush: busy vnode", vp); 933 #endif 934 busy++; 935 } 936 if (busy) 937 return (EBUSY); 938 return (0); 939 } 940 941 /* 942 * Disassociate the underlying file system from a vnode. 943 */ 944 void 945 vclean(vp, flags) 946 register struct vnode *vp; 947 int flags; 948 { 949 int active; 950 951 /* 952 * Check to see if the vnode is in use. If so we have to reference it 953 * before we clean it out so that its count cannot fall to zero and 954 * generate a race against ourselves to recycle it. 955 */ 956 if ((active = vp->v_usecount)) 957 VREF(vp); 958 /* 959 * Even if the count is zero, the VOP_INACTIVE routine may still have 960 * the object locked while it cleans it out. The VOP_LOCK ensures that 961 * the VOP_INACTIVE routine is done with its work. For active vnodes, 962 * it ensures that no other activity can occur while the underlying 963 * object is being cleaned out. 964 */ 965 VOP_LOCK(vp); 966 /* 967 * Prevent the vnode from being recycled or brought into use while we 968 * clean it out. 969 */ 970 if (vp->v_flag & VXLOCK) 971 panic("vclean: deadlock"); 972 vp->v_flag |= VXLOCK; 973 /* 974 * Clean out any buffers associated with the vnode. 975 */ 976 if (flags & DOCLOSE) 977 vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0); 978 /* 979 * Any other processes trying to obtain this lock must first wait for 980 * VXLOCK to clear, then call the new lock operation. 981 */ 982 VOP_UNLOCK(vp); 983 /* 984 * If purging an active vnode, it must be closed and deactivated 985 * before being reclaimed. 986 */ 987 if (active) { 988 if (flags & DOCLOSE) 989 VOP_CLOSE(vp, IO_NDELAY, NOCRED, NULL); 990 VOP_INACTIVE(vp); 991 } 992 /* 993 * Reclaim the vnode. 994 */ 995 if (VOP_RECLAIM(vp)) 996 panic("vclean: cannot reclaim"); 997 if (active) 998 vrele(vp); 999 1000 /* 1001 * Done with purge, notify sleepers of the grim news. 1002 */ 1003 vp->v_op = dead_vnodeop_p; 1004 vp->v_tag = VT_NON; 1005 vp->v_flag &= ~VXLOCK; 1006 if (vp->v_flag & VXWANT) { 1007 vp->v_flag &= ~VXWANT; 1008 wakeup((caddr_t) vp); 1009 } 1010 } 1011 1012 /* 1013 * Eliminate all activity associated with the requested vnode 1014 * and with all vnodes aliased to the requested vnode. 1015 */ 1016 void 1017 vgoneall(vp) 1018 register struct vnode *vp; 1019 { 1020 register struct vnode *vq; 1021 1022 if (vp->v_flag & VALIASED) { 1023 /* 1024 * If a vgone (or vclean) is already in progress, wait until 1025 * it is done and return. 1026 */ 1027 if (vp->v_flag & VXLOCK) { 1028 vp->v_flag |= VXWANT; 1029 (void) tsleep((caddr_t) vp, PINOD, "vgall", 0); 1030 return; 1031 } 1032 /* 1033 * Ensure that vp will not be vgone'd while we are eliminating 1034 * its aliases. 1035 */ 1036 vp->v_flag |= VXLOCK; 1037 while (vp->v_flag & VALIASED) { 1038 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1039 if (vq->v_rdev != vp->v_rdev || 1040 vq->v_type != vp->v_type || vp == vq) 1041 continue; 1042 vgone(vq); 1043 break; 1044 } 1045 } 1046 /* 1047 * Remove the lock so that vgone below will really eliminate 1048 * the vnode after which time vgone will awaken any sleepers. 1049 */ 1050 vp->v_flag &= ~VXLOCK; 1051 } 1052 vgone(vp); 1053 } 1054 1055 /* 1056 * Eliminate all activity associated with a vnode 1057 * in preparation for reuse. 1058 */ 1059 void 1060 vgone(vp) 1061 register struct vnode *vp; 1062 { 1063 register struct vnode *vq; 1064 struct vnode *vx; 1065 1066 /* 1067 * If a vgone (or vclean) is already in progress, wait until it is 1068 * done and return. 1069 */ 1070 if (vp->v_flag & VXLOCK) { 1071 vp->v_flag |= VXWANT; 1072 (void) tsleep((caddr_t) vp, PINOD, "vgone", 0); 1073 return; 1074 } 1075 /* 1076 * Clean out the filesystem specific data. 1077 */ 1078 vclean(vp, DOCLOSE); 1079 /* 1080 * Delete from old mount point vnode list, if on one. 1081 */ 1082 if (vp->v_mount != NULL) { 1083 LIST_REMOVE(vp, v_mntvnodes); 1084 vp->v_mount = NULL; 1085 } 1086 /* 1087 * If special device, remove it from special device alias list. 1088 */ 1089 if (vp->v_type == VBLK || vp->v_type == VCHR) { 1090 if (*vp->v_hashchain == vp) { 1091 *vp->v_hashchain = vp->v_specnext; 1092 } else { 1093 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1094 if (vq->v_specnext != vp) 1095 continue; 1096 vq->v_specnext = vp->v_specnext; 1097 break; 1098 } 1099 if (vq == NULL) 1100 panic("missing bdev"); 1101 } 1102 if (vp->v_flag & VALIASED) { 1103 vx = NULL; 1104 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1105 if (vq->v_rdev != vp->v_rdev || 1106 vq->v_type != vp->v_type) 1107 continue; 1108 if (vx) 1109 break; 1110 vx = vq; 1111 } 1112 if (vx == NULL) 1113 panic("missing alias"); 1114 if (vq == NULL) 1115 vx->v_flag &= ~VALIASED; 1116 vp->v_flag &= ~VALIASED; 1117 } 1118 FREE(vp->v_specinfo, M_VNODE); 1119 vp->v_specinfo = NULL; 1120 } 1121 /* 1122 * If it is on the freelist and not already at the head, move it to 1123 * the head of the list. The test of the back pointer and the 1124 * reference count of zero is because it will be removed from the free 1125 * list by getnewvnode, but will not have its reference count 1126 * incremented until after calling vgone. If the reference count were 1127 * incremented first, vgone would (incorrectly) try to close the 1128 * previous instance of the underlying object. So, the back pointer is 1129 * explicitly set to `0xdeadb' in getnewvnode after removing it from 1130 * the freelist to ensure that we do not try to move it here. 1131 */ 1132 if (vp->v_usecount == 0 && 1133 vp->v_freelist.tqe_prev != (struct vnode **) 0xdeadb && 1134 vnode_free_list.tqh_first != vp) { 1135 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1136 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1137 } 1138 vp->v_type = VBAD; 1139 } 1140 1141 /* 1142 * Lookup a vnode by device number. 1143 */ 1144 int 1145 vfinddev(dev, type, vpp) 1146 dev_t dev; 1147 enum vtype type; 1148 struct vnode **vpp; 1149 { 1150 register struct vnode *vp; 1151 1152 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { 1153 if (dev != vp->v_rdev || type != vp->v_type) 1154 continue; 1155 *vpp = vp; 1156 return (1); 1157 } 1158 return (0); 1159 } 1160 1161 /* 1162 * Calculate the total number of references to a special device. 1163 */ 1164 int 1165 vcount(vp) 1166 register struct vnode *vp; 1167 { 1168 register struct vnode *vq, *vnext; 1169 int count; 1170 1171 loop: 1172 if ((vp->v_flag & VALIASED) == 0) 1173 return (vp->v_usecount); 1174 for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) { 1175 vnext = vq->v_specnext; 1176 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type) 1177 continue; 1178 /* 1179 * Alias, but not in use, so flush it out. 1180 */ 1181 if (vq->v_usecount == 0 && vq != vp) { 1182 vgone(vq); 1183 goto loop; 1184 } 1185 count += vq->v_usecount; 1186 } 1187 return (count); 1188 } 1189 1190 /* 1191 * Print out a description of a vnode. 1192 */ 1193 static char *typename[] = 1194 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 1195 1196 void 1197 vprint(label, vp) 1198 char *label; 1199 register struct vnode *vp; 1200 { 1201 char buf[64]; 1202 1203 if (label != NULL) 1204 printf("%s: ", label); 1205 printf("type %s, usecount %d, writecount %d, refcount %ld,", 1206 typename[vp->v_type], vp->v_usecount, vp->v_writecount, 1207 vp->v_holdcnt); 1208 buf[0] = '\0'; 1209 if (vp->v_flag & VROOT) 1210 strcat(buf, "|VROOT"); 1211 if (vp->v_flag & VTEXT) 1212 strcat(buf, "|VTEXT"); 1213 if (vp->v_flag & VSYSTEM) 1214 strcat(buf, "|VSYSTEM"); 1215 if (vp->v_flag & VXLOCK) 1216 strcat(buf, "|VXLOCK"); 1217 if (vp->v_flag & VXWANT) 1218 strcat(buf, "|VXWANT"); 1219 if (vp->v_flag & VBWAIT) 1220 strcat(buf, "|VBWAIT"); 1221 if (vp->v_flag & VALIASED) 1222 strcat(buf, "|VALIASED"); 1223 if (buf[0] != '\0') 1224 printf(" flags (%s)", &buf[1]); 1225 if (vp->v_data == NULL) { 1226 printf("\n"); 1227 } else { 1228 printf("\n\t"); 1229 VOP_PRINT(vp); 1230 } 1231 } 1232 1233 #ifdef DEBUG 1234 /* 1235 * List all of the locked vnodes in the system. 1236 * Called when debugging the kernel. 1237 */ 1238 void 1239 printlockedvnodes() 1240 { 1241 register struct mount *mp; 1242 register struct vnode *vp; 1243 1244 printf("Locked vnodes\n"); 1245 for (mp = mountlist.tqh_first; mp != NULL; mp = mp->mnt_list.tqe_next) { 1246 for (vp = mp->mnt_vnodelist.lh_first; 1247 vp != NULL; 1248 vp = vp->v_mntvnodes.le_next) 1249 if (VOP_ISLOCKED(vp)) 1250 vprint((char *) 0, vp); 1251 } 1252 } 1253 #endif 1254 1255 int kinfo_vdebug = 1; 1256 int kinfo_vgetfailed; 1257 1258 #define KINFO_VNODESLOP 10 1259 /* 1260 * Dump vnode list (via sysctl). 1261 * Copyout address of vnode followed by vnode. 1262 */ 1263 /* ARGSUSED */ 1264 int 1265 sysctl_vnode(where, sizep) 1266 char *where; 1267 size_t *sizep; 1268 { 1269 register struct mount *mp, *nmp; 1270 struct vnode *vp; 1271 register char *bp = where, *savebp; 1272 char *ewhere; 1273 int error; 1274 1275 #define VPTRSZ sizeof (struct vnode *) 1276 #define VNODESZ sizeof (struct vnode) 1277 if (where == NULL) { 1278 *sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ); 1279 return (0); 1280 } 1281 ewhere = where + *sizep; 1282 1283 for (mp = mountlist.tqh_first; mp != NULL; mp = nmp) { 1284 nmp = mp->mnt_list.tqe_next; 1285 if (vfs_busy(mp)) 1286 continue; 1287 savebp = bp; 1288 again: 1289 for (vp = mp->mnt_vnodelist.lh_first; 1290 vp != NULL; 1291 vp = vp->v_mntvnodes.le_next) { 1292 /* 1293 * Check that the vp is still associated with this 1294 * filesystem. RACE: could have been recycled onto 1295 * the same filesystem. 1296 */ 1297 if (vp->v_mount != mp) { 1298 if (kinfo_vdebug) 1299 printf("kinfo: vp changed\n"); 1300 bp = savebp; 1301 goto again; 1302 } 1303 if (bp + VPTRSZ + VNODESZ > ewhere) { 1304 *sizep = bp - where; 1305 return (ENOMEM); 1306 } 1307 if ((error = copyout((caddr_t) &vp, bp, VPTRSZ)) || 1308 (error = copyout((caddr_t) vp, bp + VPTRSZ, VNODESZ))) 1309 return (error); 1310 bp += VPTRSZ + VNODESZ; 1311 } 1312 vfs_unbusy(mp); 1313 } 1314 1315 *sizep = bp - where; 1316 return (0); 1317 } 1318 1319 /* 1320 * Check to see if a filesystem is mounted on a block device. 1321 */ 1322 int 1323 vfs_mountedon(vp) 1324 register struct vnode *vp; 1325 { 1326 register struct vnode *vq; 1327 1328 if (vp->v_specflags & SI_MOUNTEDON) 1329 return (EBUSY); 1330 if (vp->v_flag & VALIASED) { 1331 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1332 if (vq->v_rdev != vp->v_rdev || 1333 vq->v_type != vp->v_type) 1334 continue; 1335 if (vq->v_specflags & SI_MOUNTEDON) 1336 return (EBUSY); 1337 } 1338 } 1339 return (0); 1340 } 1341 1342 /* 1343 * Build hash lists of net addresses and hang them off the mount point. 1344 * Called by ufs_mount() to set up the lists of export addresses. 1345 */ 1346 static int 1347 vfs_hang_addrlist(mp, nep, argp) 1348 struct mount *mp; 1349 struct netexport *nep; 1350 struct export_args *argp; 1351 { 1352 register struct netcred *np; 1353 register struct radix_node_head *rnh; 1354 register int i; 1355 struct radix_node *rn; 1356 struct sockaddr *saddr, *smask = 0; 1357 struct domain *dom; 1358 int error; 1359 1360 if (argp->ex_addrlen == 0) { 1361 if (mp->mnt_flag & MNT_DEFEXPORTED) 1362 return (EPERM); 1363 np = &nep->ne_defexported; 1364 np->netc_exflags = argp->ex_flags; 1365 np->netc_anon = argp->ex_anon; 1366 np->netc_anon.cr_ref = 1; 1367 mp->mnt_flag |= MNT_DEFEXPORTED; 1368 return (0); 1369 } 1370 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1371 np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK); 1372 bzero((caddr_t) np, i); 1373 saddr = (struct sockaddr *) (np + 1); 1374 if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 1375 goto out; 1376 if (saddr->sa_len > argp->ex_addrlen) 1377 saddr->sa_len = argp->ex_addrlen; 1378 if (argp->ex_masklen) { 1379 smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen); 1380 error = copyin(argp->ex_addr, (caddr_t) smask, argp->ex_masklen); 1381 if (error) 1382 goto out; 1383 if (smask->sa_len > argp->ex_masklen) 1384 smask->sa_len = argp->ex_masklen; 1385 } 1386 i = saddr->sa_family; 1387 if ((rnh = nep->ne_rtable[i]) == 0) { 1388 /* 1389 * Seems silly to initialize every AF when most are not used, 1390 * do so on demand here 1391 */ 1392 for (dom = domains; dom; dom = dom->dom_next) 1393 if (dom->dom_family == i && dom->dom_rtattach) { 1394 dom->dom_rtattach((void **) &nep->ne_rtable[i], 1395 dom->dom_rtoffset); 1396 break; 1397 } 1398 if ((rnh = nep->ne_rtable[i]) == 0) { 1399 error = ENOBUFS; 1400 goto out; 1401 } 1402 } 1403 rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh, 1404 np->netc_rnodes); 1405 if (rn == 0 || np != (struct netcred *) rn) { /* already exists */ 1406 error = EPERM; 1407 goto out; 1408 } 1409 np->netc_exflags = argp->ex_flags; 1410 np->netc_anon = argp->ex_anon; 1411 np->netc_anon.cr_ref = 1; 1412 return (0); 1413 out: 1414 free(np, M_NETADDR); 1415 return (error); 1416 } 1417 1418 /* ARGSUSED */ 1419 static int 1420 vfs_free_netcred(rn, w) 1421 struct radix_node *rn; 1422 caddr_t w; 1423 { 1424 register struct radix_node_head *rnh = (struct radix_node_head *) w; 1425 1426 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 1427 free((caddr_t) rn, M_NETADDR); 1428 return (0); 1429 } 1430 1431 /* 1432 * Free the net address hash lists that are hanging off the mount points. 1433 */ 1434 static void 1435 vfs_free_addrlist(nep) 1436 struct netexport *nep; 1437 { 1438 register int i; 1439 register struct radix_node_head *rnh; 1440 1441 for (i = 0; i <= AF_MAX; i++) 1442 if ((rnh = nep->ne_rtable[i])) { 1443 (*rnh->rnh_walktree) (rnh, vfs_free_netcred, 1444 (caddr_t) rnh); 1445 free((caddr_t) rnh, M_RTABLE); 1446 nep->ne_rtable[i] = 0; 1447 } 1448 } 1449 1450 int 1451 vfs_export(mp, nep, argp) 1452 struct mount *mp; 1453 struct netexport *nep; 1454 struct export_args *argp; 1455 { 1456 int error; 1457 1458 if (argp->ex_flags & MNT_DELEXPORT) { 1459 vfs_free_addrlist(nep); 1460 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 1461 } 1462 if (argp->ex_flags & MNT_EXPORTED) { 1463 if ((error = vfs_hang_addrlist(mp, nep, argp))) 1464 return (error); 1465 mp->mnt_flag |= MNT_EXPORTED; 1466 } 1467 return (0); 1468 } 1469 1470 struct netcred * 1471 vfs_export_lookup(mp, nep, nam) 1472 register struct mount *mp; 1473 struct netexport *nep; 1474 struct mbuf *nam; 1475 { 1476 register struct netcred *np; 1477 register struct radix_node_head *rnh; 1478 struct sockaddr *saddr; 1479 1480 np = NULL; 1481 if (mp->mnt_flag & MNT_EXPORTED) { 1482 /* 1483 * Lookup in the export list first. 1484 */ 1485 if (nam != NULL) { 1486 saddr = mtod(nam, struct sockaddr *); 1487 rnh = nep->ne_rtable[saddr->sa_family]; 1488 if (rnh != NULL) { 1489 np = (struct netcred *) 1490 (*rnh->rnh_matchaddr) ((caddr_t) saddr, 1491 rnh); 1492 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 1493 np = NULL; 1494 } 1495 } 1496 /* 1497 * If no address match, use the default if it exists. 1498 */ 1499 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 1500 np = &nep->ne_defexported; 1501 } 1502 return (np); 1503 } 1504