1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1989, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * (c) UNIX System Laboratories, Inc. 5df8bae1dSRodney W. Grimes * All or some portions of this file are derived from material licensed 6df8bae1dSRodney W. Grimes * to the University of California by American Telephone and Telegraph 7df8bae1dSRodney W. Grimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8df8bae1dSRodney W. Grimes * the permission of UNIX System Laboratories, Inc. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 18df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 19df8bae1dSRodney W. Grimes * must display the following acknowledgement: 20df8bae1dSRodney W. Grimes * This product includes software developed by the University of 21df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 22df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 23df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 24df8bae1dSRodney W. Grimes * without specific prior written permission. 25df8bae1dSRodney W. Grimes * 26df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36df8bae1dSRodney W. Grimes * SUCH DAMAGE. 37df8bae1dSRodney W. Grimes * 38996c772fSJohn Dyson * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 3955166637SPoul-Henning Kamp * $Id: vfs_subr.c,v 1.108 1997/10/11 07:34:27 phk Exp $ 40df8bae1dSRodney W. Grimes */ 41df8bae1dSRodney W. Grimes 42df8bae1dSRodney W. Grimes /* 43df8bae1dSRodney W. Grimes * External virtual filesystem routines 44df8bae1dSRodney W. Grimes */ 450e41ee30SGarrett Wollman #include "opt_ddb.h" 4619060a3aSPoul-Henning Kamp #include "opt_devfs.h" 47df8bae1dSRodney W. Grimes 48df8bae1dSRodney W. Grimes #include <sys/param.h> 49df8bae1dSRodney W. Grimes #include <sys/systm.h> 50986f4ce7SBruce Evans #include <sys/kernel.h> 51df8bae1dSRodney W. Grimes #include <sys/proc.h> 52df8bae1dSRodney W. Grimes #include <sys/mount.h> 53df8bae1dSRodney W. Grimes #include <sys/vnode.h> 54df8bae1dSRodney W. Grimes #include <sys/stat.h> 55df8bae1dSRodney W. Grimes #include <sys/buf.h> 56df8bae1dSRodney W. Grimes #include <sys/malloc.h> 577fab7799SPeter Wemm #include <sys/poll.h> 58df8bae1dSRodney W. Grimes #include <sys/domain.h> 59f6b4c285SDoug Rabson #include <sys/dirent.h> 60df8bae1dSRodney W. Grimes 61d3114049SBruce Evans #include <machine/limits.h> 62d3114049SBruce Evans 63df8bae1dSRodney W. Grimes #include <vm/vm.h> 64efeaf95aSDavid Greenman #include <vm/vm_object.h> 65efeaf95aSDavid Greenman #include <vm/vm_extern.h> 666476c0d2SJohn Dyson #include <vm/vnode_pager.h> 67df8bae1dSRodney W. Grimes #include <sys/sysctl.h> 68df8bae1dSRodney W. Grimes 69df8bae1dSRodney W. Grimes #include <miscfs/specfs/specdev.h> 70df8bae1dSRodney W. Grimes 7155166637SPoul-Henning Kamp MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 7255166637SPoul-Henning Kamp 7398d93822SBruce Evans #ifdef DDB 7498d93822SBruce Evans extern void printlockedvnodes __P((void)); 7598d93822SBruce Evans #endif 76996c772fSJohn Dyson static void vclean __P((struct vnode *vp, int flags, struct proc *p)); 770f1adf65SBruce Evans static void vgonel __P((struct vnode *vp, struct proc *p)); 78996c772fSJohn Dyson unsigned long numvnodes; 79b15a966eSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, ""); 8082b8e119SJohn Dyson static void vputrele __P((struct vnode *vp, int put)); 8198d93822SBruce Evans 82df8bae1dSRodney W. Grimes enum vtype iftovt_tab[16] = { 83df8bae1dSRodney W. Grimes VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 84df8bae1dSRodney W. Grimes VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 85df8bae1dSRodney W. Grimes }; 86df8bae1dSRodney W. Grimes int vttoif_tab[9] = { 87df8bae1dSRodney W. Grimes 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 88df8bae1dSRodney W. Grimes S_IFSOCK, S_IFIFO, S_IFMT, 89df8bae1dSRodney W. Grimes }; 90df8bae1dSRodney W. Grimes 91df8bae1dSRodney W. Grimes /* 92df8bae1dSRodney W. Grimes * Insq/Remq for the vnode usage lists. 93df8bae1dSRodney W. Grimes */ 94df8bae1dSRodney W. Grimes #define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) 95df8bae1dSRodney W. Grimes #define bufremvn(bp) { \ 96df8bae1dSRodney W. Grimes LIST_REMOVE(bp, b_vnbufs); \ 97df8bae1dSRodney W. Grimes (bp)->b_vnbufs.le_next = NOLIST; \ 98df8bae1dSRodney W. Grimes } 99df8bae1dSRodney W. Grimes TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */ 10087b1940aSPoul-Henning Kamp static u_long wantfreevnodes = 25; 10100544193SPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, ""); 102cba2a7c6SBruce Evans static u_long freevnodes = 0; 103a051452aSPoul-Henning Kamp SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, ""); 104fbd6e6c9SPoul-Henning Kamp 105df8bae1dSRodney W. Grimes struct mntlist mountlist; /* mounted filesystem list */ 106996c772fSJohn Dyson struct simplelock mountlist_slock; 107996c772fSJohn Dyson static struct simplelock mntid_slock; 108996c772fSJohn Dyson struct simplelock mntvnode_slock; 109996c772fSJohn Dyson struct simplelock vnode_free_list_slock; 110996c772fSJohn Dyson static struct simplelock spechash_slock; 111f6b4c285SDoug Rabson struct nfs_public nfs_pub; /* publicly exported FS */ 112df8bae1dSRodney W. Grimes 1130d94caffSDavid Greenman int desiredvnodes; 114b83ddf9cSBruce Evans SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, &desiredvnodes, 0, ""); 1150d94caffSDavid Greenman 11698d93822SBruce Evans static void vfs_free_addrlist __P((struct netexport *nep)); 11798d93822SBruce Evans static int vfs_free_netcred __P((struct radix_node *rn, void *w)); 11898d93822SBruce Evans static int vfs_hang_addrlist __P((struct mount *mp, struct netexport *nep, 11998d93822SBruce Evans struct export_args *argp)); 12098d93822SBruce Evans 121df8bae1dSRodney W. Grimes /* 122df8bae1dSRodney W. Grimes * Initialize the vnode management data structures. 123df8bae1dSRodney W. Grimes */ 12426f9a767SRodney W. Grimes void 125df8bae1dSRodney W. Grimes vntblinit() 126df8bae1dSRodney W. Grimes { 127df8bae1dSRodney W. Grimes 1285131d64eSBruce Evans desiredvnodes = maxproc + vm_object_cache_max; 129996c772fSJohn Dyson simple_lock_init(&mntvnode_slock); 130996c772fSJohn Dyson simple_lock_init(&mntid_slock); 131996c772fSJohn Dyson simple_lock_init(&spechash_slock); 132df8bae1dSRodney W. Grimes TAILQ_INIT(&vnode_free_list); 133996c772fSJohn Dyson simple_lock_init(&vnode_free_list_slock); 134628641f8SDavid Greenman CIRCLEQ_INIT(&mountlist); 135df8bae1dSRodney W. Grimes } 136df8bae1dSRodney W. Grimes 137df8bae1dSRodney W. Grimes /* 138996c772fSJohn Dyson * Mark a mount point as busy. Used to synchronize access and to delay 139996c772fSJohn Dyson * unmounting. Interlock is not released on failure. 140df8bae1dSRodney W. Grimes */ 14126f9a767SRodney W. Grimes int 142996c772fSJohn Dyson vfs_busy(mp, flags, interlkp, p) 143996c772fSJohn Dyson struct mount *mp; 144996c772fSJohn Dyson int flags; 145996c772fSJohn Dyson struct simplelock *interlkp; 146996c772fSJohn Dyson struct proc *p; 147df8bae1dSRodney W. Grimes { 148996c772fSJohn Dyson int lkflags; 149df8bae1dSRodney W. Grimes 150996c772fSJohn Dyson if (mp->mnt_flag & MNT_UNMOUNT) { 151996c772fSJohn Dyson if (flags & LK_NOWAIT) 152996c772fSJohn Dyson return (ENOENT); 153df8bae1dSRodney W. Grimes mp->mnt_flag |= MNT_MWAIT; 154996c772fSJohn Dyson if (interlkp) { 155996c772fSJohn Dyson simple_unlock(interlkp); 156df8bae1dSRodney W. Grimes } 157df8bae1dSRodney W. Grimes /* 158996c772fSJohn Dyson * Since all busy locks are shared except the exclusive 159996c772fSJohn Dyson * lock granted when unmounting, the only place that a 160996c772fSJohn Dyson * wakeup needs to be done is at the release of the 161996c772fSJohn Dyson * exclusive lock at the end of dounmount. 162df8bae1dSRodney W. Grimes */ 163996c772fSJohn Dyson tsleep((caddr_t)mp, PVFS, "vfs_busy", 0); 164996c772fSJohn Dyson if (interlkp) { 165996c772fSJohn Dyson simple_lock(interlkp); 166df8bae1dSRodney W. Grimes } 167996c772fSJohn Dyson return (ENOENT); 168df8bae1dSRodney W. Grimes } 169996c772fSJohn Dyson lkflags = LK_SHARED; 170996c772fSJohn Dyson if (interlkp) 171996c772fSJohn Dyson lkflags |= LK_INTERLOCK; 172996c772fSJohn Dyson if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p)) 173996c772fSJohn Dyson panic("vfs_busy: unexpected lock failure"); 174df8bae1dSRodney W. Grimes return (0); 175df8bae1dSRodney W. Grimes } 176df8bae1dSRodney W. Grimes 177df8bae1dSRodney W. Grimes /* 178df8bae1dSRodney W. Grimes * Free a busy filesystem. 179df8bae1dSRodney W. Grimes */ 18026f9a767SRodney W. Grimes void 181996c772fSJohn Dyson vfs_unbusy(mp, p) 182996c772fSJohn Dyson struct mount *mp; 183996c772fSJohn Dyson struct proc *p; 184df8bae1dSRodney W. Grimes { 185df8bae1dSRodney W. Grimes 186996c772fSJohn Dyson lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, p); 187e0e9c421SDavid Greenman } 188e0e9c421SDavid Greenman 189e0e9c421SDavid Greenman /* 190996c772fSJohn Dyson * Lookup a filesystem type, and if found allocate and initialize 191996c772fSJohn Dyson * a mount structure for it. 192996c772fSJohn Dyson * 193996c772fSJohn Dyson * Devname is usually updated by mount(8) after booting. 194e0e9c421SDavid Greenman */ 195996c772fSJohn Dyson int 196996c772fSJohn Dyson vfs_rootmountalloc(fstypename, devname, mpp) 197996c772fSJohn Dyson char *fstypename; 198996c772fSJohn Dyson char *devname; 199996c772fSJohn Dyson struct mount **mpp; 200e0e9c421SDavid Greenman { 201996c772fSJohn Dyson struct proc *p = curproc; /* XXX */ 202996c772fSJohn Dyson struct vfsconf *vfsp; 203996c772fSJohn Dyson struct mount *mp; 204996c772fSJohn Dyson 205996c772fSJohn Dyson for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 206996c772fSJohn Dyson if (!strcmp(vfsp->vfc_name, fstypename)) 207996c772fSJohn Dyson break; 208996c772fSJohn Dyson if (vfsp == NULL) 209996c772fSJohn Dyson return (ENODEV); 210996c772fSJohn Dyson mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); 211996c772fSJohn Dyson bzero((char *)mp, (u_long)sizeof(struct mount)); 212996c772fSJohn Dyson lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0); 213996c772fSJohn Dyson (void)vfs_busy(mp, LK_NOWAIT, 0, p); 214996c772fSJohn Dyson LIST_INIT(&mp->mnt_vnodelist); 215996c772fSJohn Dyson mp->mnt_vfc = vfsp; 216996c772fSJohn Dyson mp->mnt_op = vfsp->vfc_vfsops; 217996c772fSJohn Dyson mp->mnt_flag = MNT_RDONLY; 218996c772fSJohn Dyson mp->mnt_vnodecovered = NULLVP; 219996c772fSJohn Dyson vfsp->vfc_refcount++; 220996c772fSJohn Dyson mp->mnt_stat.f_type = vfsp->vfc_typenum; 221996c772fSJohn Dyson mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 222996c772fSJohn Dyson strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 223996c772fSJohn Dyson mp->mnt_stat.f_mntonname[0] = '/'; 224996c772fSJohn Dyson mp->mnt_stat.f_mntonname[1] = 0; 225996c772fSJohn Dyson (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 226996c772fSJohn Dyson *mpp = mp; 227996c772fSJohn Dyson return (0); 228996c772fSJohn Dyson } 229996c772fSJohn Dyson 230996c772fSJohn Dyson /* 231996c772fSJohn Dyson * Find an appropriate filesystem to use for the root. If a filesystem 232996c772fSJohn Dyson * has not been preselected, walk through the list of known filesystems 233996c772fSJohn Dyson * trying those that have mountroot routines, and try them until one 234996c772fSJohn Dyson * works or we have tried them all. 235996c772fSJohn Dyson */ 236996c772fSJohn Dyson #ifdef notdef /* XXX JH */ 237996c772fSJohn Dyson int 238514ede09SBruce Evans lite2_vfs_mountroot() 239996c772fSJohn Dyson { 240996c772fSJohn Dyson struct vfsconf *vfsp; 241514ede09SBruce Evans extern int (*lite2_mountroot) __P((void)); 242e0e9c421SDavid Greenman int error; 243e0e9c421SDavid Greenman 244996c772fSJohn Dyson if (lite2_mountroot != NULL) 245996c772fSJohn Dyson return ((*lite2_mountroot)()); 246996c772fSJohn Dyson for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 247996c772fSJohn Dyson if (vfsp->vfc_mountroot == NULL) 248e0e9c421SDavid Greenman continue; 249996c772fSJohn Dyson if ((error = (*vfsp->vfc_mountroot)()) == 0) 250996c772fSJohn Dyson return (0); 251996c772fSJohn Dyson printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error); 252e0e9c421SDavid Greenman } 253996c772fSJohn Dyson return (ENODEV); 254e0e9c421SDavid Greenman } 255996c772fSJohn Dyson #endif 256e0e9c421SDavid Greenman 257df8bae1dSRodney W. Grimes /* 258df8bae1dSRodney W. Grimes * Lookup a mount point by filesystem identifier. 259df8bae1dSRodney W. Grimes */ 260df8bae1dSRodney W. Grimes struct mount * 261996c772fSJohn Dyson vfs_getvfs(fsid) 262df8bae1dSRodney W. Grimes fsid_t *fsid; 263df8bae1dSRodney W. Grimes { 264df8bae1dSRodney W. Grimes register struct mount *mp; 265df8bae1dSRodney W. Grimes 266996c772fSJohn Dyson simple_lock(&mountlist_slock); 267628641f8SDavid Greenman for (mp = mountlist.cqh_first; mp != (void *)&mountlist; 268628641f8SDavid Greenman mp = mp->mnt_list.cqe_next) { 269df8bae1dSRodney W. Grimes if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 270996c772fSJohn Dyson mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 271996c772fSJohn Dyson simple_unlock(&mountlist_slock); 272df8bae1dSRodney W. Grimes return (mp); 273df8bae1dSRodney W. Grimes } 274996c772fSJohn Dyson } 275996c772fSJohn Dyson simple_unlock(&mountlist_slock); 276df8bae1dSRodney W. Grimes return ((struct mount *) 0); 277df8bae1dSRodney W. Grimes } 278df8bae1dSRodney W. Grimes 279df8bae1dSRodney W. Grimes /* 280df8bae1dSRodney W. Grimes * Get a new unique fsid 281df8bae1dSRodney W. Grimes */ 282df8bae1dSRodney W. Grimes void 283996c772fSJohn Dyson vfs_getnewfsid(mp) 284df8bae1dSRodney W. Grimes struct mount *mp; 285df8bae1dSRodney W. Grimes { 286df8bae1dSRodney W. Grimes static u_short xxxfs_mntid; 287df8bae1dSRodney W. Grimes 288df8bae1dSRodney W. Grimes fsid_t tfsid; 289996c772fSJohn Dyson int mtype; 290df8bae1dSRodney W. Grimes 291996c772fSJohn Dyson simple_lock(&mntid_slock); 292996c772fSJohn Dyson mtype = mp->mnt_vfc->vfc_typenum; 293df8bae1dSRodney W. Grimes mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0); 294df8bae1dSRodney W. Grimes mp->mnt_stat.f_fsid.val[1] = mtype; 295df8bae1dSRodney W. Grimes if (xxxfs_mntid == 0) 296df8bae1dSRodney W. Grimes ++xxxfs_mntid; 297df8bae1dSRodney W. Grimes tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid); 298df8bae1dSRodney W. Grimes tfsid.val[1] = mtype; 299628641f8SDavid Greenman if (mountlist.cqh_first != (void *)&mountlist) { 300996c772fSJohn Dyson while (vfs_getvfs(&tfsid)) { 301df8bae1dSRodney W. Grimes tfsid.val[0]++; 302df8bae1dSRodney W. Grimes xxxfs_mntid++; 303df8bae1dSRodney W. Grimes } 304df8bae1dSRodney W. Grimes } 305df8bae1dSRodney W. Grimes mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 306996c772fSJohn Dyson simple_unlock(&mntid_slock); 307df8bae1dSRodney W. Grimes } 308df8bae1dSRodney W. Grimes 309df8bae1dSRodney W. Grimes /* 310df8bae1dSRodney W. Grimes * Set vnode attributes to VNOVAL 311df8bae1dSRodney W. Grimes */ 31226f9a767SRodney W. Grimes void 31326f9a767SRodney W. Grimes vattr_null(vap) 314df8bae1dSRodney W. Grimes register struct vattr *vap; 315df8bae1dSRodney W. Grimes { 316df8bae1dSRodney W. Grimes 317df8bae1dSRodney W. Grimes vap->va_type = VNON; 31826f9a767SRodney W. Grimes vap->va_size = VNOVAL; 31926f9a767SRodney W. Grimes vap->va_bytes = VNOVAL; 320df8bae1dSRodney W. Grimes vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid = 321df8bae1dSRodney W. Grimes vap->va_fsid = vap->va_fileid = 322df8bae1dSRodney W. Grimes vap->va_blocksize = vap->va_rdev = 323030e2e9eSNate Williams vap->va_atime.tv_sec = vap->va_atime.tv_nsec = 324030e2e9eSNate Williams vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec = 325030e2e9eSNate Williams vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec = 326df8bae1dSRodney W. Grimes vap->va_flags = vap->va_gen = VNOVAL; 327df8bae1dSRodney W. Grimes vap->va_vaflags = 0; 328df8bae1dSRodney W. Grimes } 329df8bae1dSRodney W. Grimes 330df8bae1dSRodney W. Grimes /* 331df8bae1dSRodney W. Grimes * Routines having to do with the management of the vnode table. 332df8bae1dSRodney W. Grimes */ 333f57e6547SBruce Evans extern vop_t **dead_vnodeop_p; 334df8bae1dSRodney W. Grimes 335df8bae1dSRodney W. Grimes /* 336df8bae1dSRodney W. Grimes * Return the next vnode from the free list. 337df8bae1dSRodney W. Grimes */ 33826f9a767SRodney W. Grimes int 339df8bae1dSRodney W. Grimes getnewvnode(tag, mp, vops, vpp) 340df8bae1dSRodney W. Grimes enum vtagtype tag; 341df8bae1dSRodney W. Grimes struct mount *mp; 342f57e6547SBruce Evans vop_t **vops; 343df8bae1dSRodney W. Grimes struct vnode **vpp; 344df8bae1dSRodney W. Grimes { 345996c772fSJohn Dyson struct proc *p = curproc; /* XXX */ 346996c772fSJohn Dyson struct vnode *vp; 347df8bae1dSRodney W. Grimes 348b15a966eSPoul-Henning Kamp /* 349b15a966eSPoul-Henning Kamp * We take the least recently used vnode from the freelist 350b15a966eSPoul-Henning Kamp * if we can get it and it has no cached pages, and no 351b15a966eSPoul-Henning Kamp * namecache entries are relative to it. 352b15a966eSPoul-Henning Kamp * Otherwise we allocate a new vnode 353b15a966eSPoul-Henning Kamp */ 354b15a966eSPoul-Henning Kamp 355996c772fSJohn Dyson simple_lock(&vnode_free_list_slock); 356b15a966eSPoul-Henning Kamp 35700544193SPoul-Henning Kamp if (wantfreevnodes && freevnodes < wantfreevnodes) { 35800544193SPoul-Henning Kamp vp = NULL; 359d047b580SPoul-Henning Kamp } else if (!wantfreevnodes && freevnodes <= desiredvnodes) { 360d047b580SPoul-Henning Kamp /* 361d047b580SPoul-Henning Kamp * XXX: this is only here to be backwards compatible 362d047b580SPoul-Henning Kamp */ 36300544193SPoul-Henning Kamp vp = NULL; 36400544193SPoul-Henning Kamp } else { 365b15a966eSPoul-Henning Kamp TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) { 366b15a966eSPoul-Henning Kamp if (!simple_lock_try(&vp->v_interlock)) 367b15a966eSPoul-Henning Kamp continue; 368996c772fSJohn Dyson if (vp->v_usecount) 369996c772fSJohn Dyson panic("free vnode isn't"); 370fbd6e6c9SPoul-Henning Kamp 371b15a966eSPoul-Henning Kamp if (vp->v_object && vp->v_object->resident_page_count) { 372b15a966eSPoul-Henning Kamp /* Don't recycle if it's caching some pages */ 373b15a966eSPoul-Henning Kamp simple_unlock(&vp->v_interlock); 374b15a966eSPoul-Henning Kamp continue; 375b15a966eSPoul-Henning Kamp } else if (LIST_FIRST(&vp->v_cache_src)) { 376b15a966eSPoul-Henning Kamp /* Don't recycle if active in the namecache */ 377b15a966eSPoul-Henning Kamp simple_unlock(&vp->v_interlock); 378b15a966eSPoul-Henning Kamp continue; 379b15a966eSPoul-Henning Kamp } else { 380b15a966eSPoul-Henning Kamp break; 381b15a966eSPoul-Henning Kamp } 382b15a966eSPoul-Henning Kamp } 38323be6be8SJohn Dyson } 38423be6be8SJohn Dyson 385b15a966eSPoul-Henning Kamp if (vp) { 386a051452aSPoul-Henning Kamp vp->v_flag |= VDOOMED; 387b15a966eSPoul-Henning Kamp TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 388b15a966eSPoul-Henning Kamp freevnodes--; 389996c772fSJohn Dyson simple_unlock(&vnode_free_list_slock); 390a051452aSPoul-Henning Kamp cache_purge(vp); 391df8bae1dSRodney W. Grimes vp->v_lease = NULL; 392df8bae1dSRodney W. Grimes if (vp->v_type != VBAD) 393996c772fSJohn Dyson vgonel(vp, p); 394996c772fSJohn Dyson else { 395996c772fSJohn Dyson simple_unlock(&vp->v_interlock); 396996c772fSJohn Dyson } 397bd7e5f99SJohn Dyson 398df8bae1dSRodney W. Grimes #ifdef DIAGNOSTIC 399797f2d22SPoul-Henning Kamp { 400797f2d22SPoul-Henning Kamp int s; 4010d94caffSDavid Greenman 402df8bae1dSRodney W. Grimes if (vp->v_data) 403df8bae1dSRodney W. Grimes panic("cleaned vnode isn't"); 404df8bae1dSRodney W. Grimes s = splbio(); 405df8bae1dSRodney W. Grimes if (vp->v_numoutput) 406df8bae1dSRodney W. Grimes panic("Clean vnode has pending I/O's"); 407df8bae1dSRodney W. Grimes splx(s); 408797f2d22SPoul-Henning Kamp } 409df8bae1dSRodney W. Grimes #endif 410df8bae1dSRodney W. Grimes vp->v_flag = 0; 411df8bae1dSRodney W. Grimes vp->v_lastr = 0; 412df8bae1dSRodney W. Grimes vp->v_lastw = 0; 413df8bae1dSRodney W. Grimes vp->v_lasta = 0; 414df8bae1dSRodney W. Grimes vp->v_cstart = 0; 415df8bae1dSRodney W. Grimes vp->v_clen = 0; 416df8bae1dSRodney W. Grimes vp->v_socket = 0; 417e0c02154SDavid Greenman vp->v_writecount = 0; /* XXX */ 418b15a966eSPoul-Henning Kamp } else { 419b15a966eSPoul-Henning Kamp simple_unlock(&vnode_free_list_slock); 420b15a966eSPoul-Henning Kamp vp = (struct vnode *) malloc((u_long) sizeof *vp, 421b15a966eSPoul-Henning Kamp M_VNODE, M_WAITOK); 422b15a966eSPoul-Henning Kamp bzero((char *) vp, sizeof *vp); 423b15a966eSPoul-Henning Kamp vp->v_dd = vp; 424a051452aSPoul-Henning Kamp cache_purge(vp); 425b15a966eSPoul-Henning Kamp LIST_INIT(&vp->v_cache_src); 426b15a966eSPoul-Henning Kamp TAILQ_INIT(&vp->v_cache_dst); 427b15a966eSPoul-Henning Kamp numvnodes++; 428df8bae1dSRodney W. Grimes } 429b15a966eSPoul-Henning Kamp 430f9ceb7c7SDavid Greenman vp->v_type = VNON; 431df8bae1dSRodney W. Grimes vp->v_tag = tag; 432df8bae1dSRodney W. Grimes vp->v_op = vops; 433df8bae1dSRodney W. Grimes insmntque(vp, mp); 434df8bae1dSRodney W. Grimes *vpp = vp; 435df8bae1dSRodney W. Grimes vp->v_usecount = 1; 436df8bae1dSRodney W. Grimes vp->v_data = 0; 437df8bae1dSRodney W. Grimes return (0); 438df8bae1dSRodney W. Grimes } 439df8bae1dSRodney W. Grimes 440df8bae1dSRodney W. Grimes /* 441df8bae1dSRodney W. Grimes * Move a vnode from one mount queue to another. 442df8bae1dSRodney W. Grimes */ 44326f9a767SRodney W. Grimes void 444df8bae1dSRodney W. Grimes insmntque(vp, mp) 445df8bae1dSRodney W. Grimes register struct vnode *vp; 446df8bae1dSRodney W. Grimes register struct mount *mp; 447df8bae1dSRodney W. Grimes { 448df8bae1dSRodney W. Grimes 449996c772fSJohn Dyson simple_lock(&mntvnode_slock); 450df8bae1dSRodney W. Grimes /* 451df8bae1dSRodney W. Grimes * Delete from old mount point vnode list, if on one. 452df8bae1dSRodney W. Grimes */ 453df8bae1dSRodney W. Grimes if (vp->v_mount != NULL) 454df8bae1dSRodney W. Grimes LIST_REMOVE(vp, v_mntvnodes); 455df8bae1dSRodney W. Grimes /* 456df8bae1dSRodney W. Grimes * Insert into list of vnodes for the new mount point, if available. 457df8bae1dSRodney W. Grimes */ 458996c772fSJohn Dyson if ((vp->v_mount = mp) == NULL) { 459996c772fSJohn Dyson simple_unlock(&mntvnode_slock); 460df8bae1dSRodney W. Grimes return; 461996c772fSJohn Dyson } 462df8bae1dSRodney W. Grimes LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 463996c772fSJohn Dyson simple_unlock(&mntvnode_slock); 464df8bae1dSRodney W. Grimes } 465df8bae1dSRodney W. Grimes 466df8bae1dSRodney W. Grimes /* 467df8bae1dSRodney W. Grimes * Update outstanding I/O count and do wakeup if requested. 468df8bae1dSRodney W. Grimes */ 46926f9a767SRodney W. Grimes void 470df8bae1dSRodney W. Grimes vwakeup(bp) 471df8bae1dSRodney W. Grimes register struct buf *bp; 472df8bae1dSRodney W. Grimes { 473df8bae1dSRodney W. Grimes register struct vnode *vp; 474df8bae1dSRodney W. Grimes 475df8bae1dSRodney W. Grimes bp->b_flags &= ~B_WRITEINPROG; 476bb56ec4aSPoul-Henning Kamp if ((vp = bp->b_vp)) { 477df8bae1dSRodney W. Grimes vp->v_numoutput--; 478df8bae1dSRodney W. Grimes if (vp->v_numoutput < 0) 479df8bae1dSRodney W. Grimes panic("vwakeup: neg numoutput"); 480a3a8bb29SDavid Greenman if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) { 481df8bae1dSRodney W. Grimes vp->v_flag &= ~VBWAIT; 482df8bae1dSRodney W. Grimes wakeup((caddr_t) &vp->v_numoutput); 483df8bae1dSRodney W. Grimes } 484df8bae1dSRodney W. Grimes } 485df8bae1dSRodney W. Grimes } 486df8bae1dSRodney W. Grimes 487df8bae1dSRodney W. Grimes /* 488df8bae1dSRodney W. Grimes * Flush out and invalidate all buffers associated with a vnode. 489df8bae1dSRodney W. Grimes * Called with the underlying object locked. 490df8bae1dSRodney W. Grimes */ 491df8bae1dSRodney W. Grimes int 492df8bae1dSRodney W. Grimes vinvalbuf(vp, flags, cred, p, slpflag, slptimeo) 493df8bae1dSRodney W. Grimes register struct vnode *vp; 494df8bae1dSRodney W. Grimes int flags; 495df8bae1dSRodney W. Grimes struct ucred *cred; 496df8bae1dSRodney W. Grimes struct proc *p; 497df8bae1dSRodney W. Grimes int slpflag, slptimeo; 498df8bae1dSRodney W. Grimes { 499df8bae1dSRodney W. Grimes register struct buf *bp; 500df8bae1dSRodney W. Grimes struct buf *nbp, *blist; 501df8bae1dSRodney W. Grimes int s, error; 5021cdeb653SDavid Greenman vm_object_t object; 503df8bae1dSRodney W. Grimes 504df8bae1dSRodney W. Grimes if (flags & V_SAVE) { 505bb56ec4aSPoul-Henning Kamp if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p))) 506df8bae1dSRodney W. Grimes return (error); 507df8bae1dSRodney W. Grimes if (vp->v_dirtyblkhd.lh_first != NULL) 508df8bae1dSRodney W. Grimes panic("vinvalbuf: dirty bufs"); 509df8bae1dSRodney W. Grimes } 5106476c0d2SJohn Dyson 5116476c0d2SJohn Dyson s = splbio(); 512df8bae1dSRodney W. Grimes for (;;) { 5131cdeb653SDavid Greenman if ((blist = vp->v_cleanblkhd.lh_first) && (flags & V_SAVEMETA)) 514df8bae1dSRodney W. Grimes while (blist && blist->b_lblkno < 0) 515df8bae1dSRodney W. Grimes blist = blist->b_vnbufs.le_next; 516df8bae1dSRodney W. Grimes if (!blist && (blist = vp->v_dirtyblkhd.lh_first) && 517df8bae1dSRodney W. Grimes (flags & V_SAVEMETA)) 518df8bae1dSRodney W. Grimes while (blist && blist->b_lblkno < 0) 519df8bae1dSRodney W. Grimes blist = blist->b_vnbufs.le_next; 520df8bae1dSRodney W. Grimes if (!blist) 521df8bae1dSRodney W. Grimes break; 522df8bae1dSRodney W. Grimes 523df8bae1dSRodney W. Grimes for (bp = blist; bp; bp = nbp) { 524df8bae1dSRodney W. Grimes nbp = bp->b_vnbufs.le_next; 5251cdeb653SDavid Greenman if ((flags & V_SAVEMETA) && bp->b_lblkno < 0) 526df8bae1dSRodney W. Grimes continue; 527df8bae1dSRodney W. Grimes if (bp->b_flags & B_BUSY) { 528df8bae1dSRodney W. Grimes bp->b_flags |= B_WANTED; 529df8bae1dSRodney W. Grimes error = tsleep((caddr_t) bp, 530df8bae1dSRodney W. Grimes slpflag | (PRIBIO + 1), "vinvalbuf", 531df8bae1dSRodney W. Grimes slptimeo); 5322f2160daSDavid Greenman if (error) { 533df8bae1dSRodney W. Grimes splx(s); 534df8bae1dSRodney W. Grimes return (error); 5352f2160daSDavid Greenman } 536df8bae1dSRodney W. Grimes break; 537df8bae1dSRodney W. Grimes } 538df8bae1dSRodney W. Grimes bremfree(bp); 539df8bae1dSRodney W. Grimes bp->b_flags |= B_BUSY; 540df8bae1dSRodney W. Grimes /* 5410d94caffSDavid Greenman * XXX Since there are no node locks for NFS, I 5420d94caffSDavid Greenman * believe there is a slight chance that a delayed 5430d94caffSDavid Greenman * write will occur while sleeping just above, so 5440d94caffSDavid Greenman * check for it. 545df8bae1dSRodney W. Grimes */ 546df8bae1dSRodney W. Grimes if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) { 547df8bae1dSRodney W. Grimes (void) VOP_BWRITE(bp); 548df8bae1dSRodney W. Grimes break; 549df8bae1dSRodney W. Grimes } 550213fd1b6SDavid Greenman bp->b_flags |= (B_INVAL|B_NOCACHE|B_RELBUF); 551df8bae1dSRodney W. Grimes brelse(bp); 552df8bae1dSRodney W. Grimes } 553df8bae1dSRodney W. Grimes } 5541cdeb653SDavid Greenman 5550d94caffSDavid Greenman while (vp->v_numoutput > 0) { 5560d94caffSDavid Greenman vp->v_flag |= VBWAIT; 5570d94caffSDavid Greenman tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0); 5580d94caffSDavid Greenman } 5592f2160daSDavid Greenman 5600d94caffSDavid Greenman splx(s); 5610d94caffSDavid Greenman 562ff769afcSDavid Greenman /* 563ff769afcSDavid Greenman * Destroy the copy in the VM cache, too. 564ff769afcSDavid Greenman */ 565aa2cabb9SDavid Greenman object = vp->v_object; 56662b71ed6SDavid Greenman if (object != NULL) { 567b9461930SDavid Greenman vm_object_page_remove(object, 0, object->size, 568b9461930SDavid Greenman (flags & V_SAVE) ? TRUE : FALSE); 5691cdeb653SDavid Greenman } 570df8bae1dSRodney W. Grimes if (!(flags & V_SAVEMETA) && 571df8bae1dSRodney W. Grimes (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first)) 572df8bae1dSRodney W. Grimes panic("vinvalbuf: flush failed"); 573df8bae1dSRodney W. Grimes return (0); 574df8bae1dSRodney W. Grimes } 575df8bae1dSRodney W. Grimes 576df8bae1dSRodney W. Grimes /* 577df8bae1dSRodney W. Grimes * Associate a buffer with a vnode. 578df8bae1dSRodney W. Grimes */ 57926f9a767SRodney W. Grimes void 580df8bae1dSRodney W. Grimes bgetvp(vp, bp) 581df8bae1dSRodney W. Grimes register struct vnode *vp; 582df8bae1dSRodney W. Grimes register struct buf *bp; 583df8bae1dSRodney W. Grimes { 584602d2b48SDavid Greenman int s; 585df8bae1dSRodney W. Grimes 586df8bae1dSRodney W. Grimes if (bp->b_vp) 587df8bae1dSRodney W. Grimes panic("bgetvp: not free"); 588a051452aSPoul-Henning Kamp vhold(vp); 589df8bae1dSRodney W. Grimes bp->b_vp = vp; 590df8bae1dSRodney W. Grimes if (vp->v_type == VBLK || vp->v_type == VCHR) 591df8bae1dSRodney W. Grimes bp->b_dev = vp->v_rdev; 592df8bae1dSRodney W. Grimes else 593df8bae1dSRodney W. Grimes bp->b_dev = NODEV; 594df8bae1dSRodney W. Grimes /* 595df8bae1dSRodney W. Grimes * Insert onto list for new vnode. 596df8bae1dSRodney W. Grimes */ 597602d2b48SDavid Greenman s = splbio(); 598df8bae1dSRodney W. Grimes bufinsvn(bp, &vp->v_cleanblkhd); 599602d2b48SDavid Greenman splx(s); 600df8bae1dSRodney W. Grimes } 601df8bae1dSRodney W. Grimes 602df8bae1dSRodney W. Grimes /* 603df8bae1dSRodney W. Grimes * Disassociate a buffer from a vnode. 604df8bae1dSRodney W. Grimes */ 60526f9a767SRodney W. Grimes void 606df8bae1dSRodney W. Grimes brelvp(bp) 607df8bae1dSRodney W. Grimes register struct buf *bp; 608df8bae1dSRodney W. Grimes { 609df8bae1dSRodney W. Grimes struct vnode *vp; 610602d2b48SDavid Greenman int s; 611df8bae1dSRodney W. Grimes 612df8bae1dSRodney W. Grimes if (bp->b_vp == (struct vnode *) 0) 613df8bae1dSRodney W. Grimes panic("brelvp: NULL"); 614df8bae1dSRodney W. Grimes /* 615df8bae1dSRodney W. Grimes * Delete from old vnode list, if on one. 616df8bae1dSRodney W. Grimes */ 617602d2b48SDavid Greenman s = splbio(); 618df8bae1dSRodney W. Grimes if (bp->b_vnbufs.le_next != NOLIST) 619df8bae1dSRodney W. Grimes bufremvn(bp); 620602d2b48SDavid Greenman splx(s); 621602d2b48SDavid Greenman 622df8bae1dSRodney W. Grimes vp = bp->b_vp; 623df8bae1dSRodney W. Grimes bp->b_vp = (struct vnode *) 0; 624a051452aSPoul-Henning Kamp vdrop(vp); 625df8bae1dSRodney W. Grimes } 626df8bae1dSRodney W. Grimes 627df8bae1dSRodney W. Grimes /* 6280d94caffSDavid Greenman * Associate a p-buffer with a vnode. 6290d94caffSDavid Greenman */ 6300d94caffSDavid Greenman void 6310d94caffSDavid Greenman pbgetvp(vp, bp) 6320d94caffSDavid Greenman register struct vnode *vp; 6330d94caffSDavid Greenman register struct buf *bp; 6340d94caffSDavid Greenman { 6350d955f71SJohn Dyson #if defined(DIAGNOSTIC) 6360d94caffSDavid Greenman if (bp->b_vp) 6370d94caffSDavid Greenman panic("pbgetvp: not free"); 6380d955f71SJohn Dyson #endif 6390d94caffSDavid Greenman bp->b_vp = vp; 6400d94caffSDavid Greenman if (vp->v_type == VBLK || vp->v_type == VCHR) 6410d94caffSDavid Greenman bp->b_dev = vp->v_rdev; 6420d94caffSDavid Greenman else 6430d94caffSDavid Greenman bp->b_dev = NODEV; 6440d94caffSDavid Greenman } 6450d94caffSDavid Greenman 6460d94caffSDavid Greenman /* 6470d94caffSDavid Greenman * Disassociate a p-buffer from a vnode. 6480d94caffSDavid Greenman */ 6490d94caffSDavid Greenman void 6500d94caffSDavid Greenman pbrelvp(bp) 6510d94caffSDavid Greenman register struct buf *bp; 6520d94caffSDavid Greenman { 6530d94caffSDavid Greenman struct vnode *vp; 6540d94caffSDavid Greenman 6550d955f71SJohn Dyson #if defined(DIAGNOSTIC) 6560d94caffSDavid Greenman if (bp->b_vp == (struct vnode *) 0) 657fd7f690fSJohn Dyson panic("pbrelvp: NULL"); 6580d955f71SJohn Dyson #endif 6590d94caffSDavid Greenman 6600d94caffSDavid Greenman bp->b_vp = (struct vnode *) 0; 6610d94caffSDavid Greenman } 6620d94caffSDavid Greenman 6630d94caffSDavid Greenman /* 664df8bae1dSRodney W. Grimes * Reassign a buffer from one vnode to another. 665df8bae1dSRodney W. Grimes * Used to assign file specific control information 666df8bae1dSRodney W. Grimes * (indirect blocks) to the vnode to which they belong. 667df8bae1dSRodney W. Grimes */ 66826f9a767SRodney W. Grimes void 669df8bae1dSRodney W. Grimes reassignbuf(bp, newvp) 670df8bae1dSRodney W. Grimes register struct buf *bp; 671df8bae1dSRodney W. Grimes register struct vnode *newvp; 672df8bae1dSRodney W. Grimes { 673619594e8SJohn Dyson int s; 674df8bae1dSRodney W. Grimes 675df8bae1dSRodney W. Grimes if (newvp == NULL) { 676df8bae1dSRodney W. Grimes printf("reassignbuf: NULL"); 677df8bae1dSRodney W. Grimes return; 678df8bae1dSRodney W. Grimes } 679619594e8SJohn Dyson 680619594e8SJohn Dyson s = splbio(); 681df8bae1dSRodney W. Grimes /* 682df8bae1dSRodney W. Grimes * Delete from old vnode list, if on one. 683df8bae1dSRodney W. Grimes */ 684a051452aSPoul-Henning Kamp if (bp->b_vnbufs.le_next != NOLIST) { 685df8bae1dSRodney W. Grimes bufremvn(bp); 686a051452aSPoul-Henning Kamp vdrop(bp->b_vp); 687a051452aSPoul-Henning Kamp } 688df8bae1dSRodney W. Grimes /* 6890d94caffSDavid Greenman * If dirty, put on list of dirty buffers; otherwise insert onto list 6900d94caffSDavid Greenman * of clean buffers. 691df8bae1dSRodney W. Grimes */ 6920d94caffSDavid Greenman if (bp->b_flags & B_DELWRI) { 6930d94caffSDavid Greenman struct buf *tbp; 6940d94caffSDavid Greenman 6950d94caffSDavid Greenman tbp = newvp->v_dirtyblkhd.lh_first; 6960d94caffSDavid Greenman if (!tbp || (tbp->b_lblkno > bp->b_lblkno)) { 6970d94caffSDavid Greenman bufinsvn(bp, &newvp->v_dirtyblkhd); 6980d94caffSDavid Greenman } else { 699bd7e5f99SJohn Dyson while (tbp->b_vnbufs.le_next && 700bd7e5f99SJohn Dyson (tbp->b_vnbufs.le_next->b_lblkno < bp->b_lblkno)) { 7010d94caffSDavid Greenman tbp = tbp->b_vnbufs.le_next; 7020d94caffSDavid Greenman } 7030d94caffSDavid Greenman LIST_INSERT_AFTER(tbp, bp, b_vnbufs); 7040d94caffSDavid Greenman } 7050d94caffSDavid Greenman } else { 7066476c0d2SJohn Dyson bufinsvn(bp, &newvp->v_cleanblkhd); 707df8bae1dSRodney W. Grimes } 708a051452aSPoul-Henning Kamp bp->b_vp = newvp; 709a051452aSPoul-Henning Kamp vhold(bp->b_vp); 710619594e8SJohn Dyson splx(s); 7110d94caffSDavid Greenman } 712df8bae1dSRodney W. Grimes 7138c2ff396SBruce Evans #ifndef DEVFS_ROOT 714df8bae1dSRodney W. Grimes /* 715df8bae1dSRodney W. Grimes * Create a vnode for a block device. 71641fadeebSBruce Evans * Used for mounting the root file system. 717df8bae1dSRodney W. Grimes */ 71826f9a767SRodney W. Grimes int 719df8bae1dSRodney W. Grimes bdevvp(dev, vpp) 720df8bae1dSRodney W. Grimes dev_t dev; 721df8bae1dSRodney W. Grimes struct vnode **vpp; 722df8bae1dSRodney W. Grimes { 723df8bae1dSRodney W. Grimes register struct vnode *vp; 724df8bae1dSRodney W. Grimes struct vnode *nvp; 725df8bae1dSRodney W. Grimes int error; 726df8bae1dSRodney W. Grimes 727df8bae1dSRodney W. Grimes if (dev == NODEV) 728df8bae1dSRodney W. Grimes return (0); 729df8bae1dSRodney W. Grimes error = getnewvnode(VT_NON, (struct mount *) 0, spec_vnodeop_p, &nvp); 730df8bae1dSRodney W. Grimes if (error) { 731df8bae1dSRodney W. Grimes *vpp = 0; 732df8bae1dSRodney W. Grimes return (error); 733df8bae1dSRodney W. Grimes } 734df8bae1dSRodney W. Grimes vp = nvp; 735df8bae1dSRodney W. Grimes vp->v_type = VBLK; 736bb56ec4aSPoul-Henning Kamp if ((nvp = checkalias(vp, dev, (struct mount *) 0))) { 737df8bae1dSRodney W. Grimes vput(vp); 738df8bae1dSRodney W. Grimes vp = nvp; 739df8bae1dSRodney W. Grimes } 740df8bae1dSRodney W. Grimes *vpp = vp; 741df8bae1dSRodney W. Grimes return (0); 742df8bae1dSRodney W. Grimes } 7438c2ff396SBruce Evans #endif /* !DEVFS_ROOT */ 744df8bae1dSRodney W. Grimes 745df8bae1dSRodney W. Grimes /* 746df8bae1dSRodney W. Grimes * Check to see if the new vnode represents a special device 747df8bae1dSRodney W. Grimes * for which we already have a vnode (either because of 748df8bae1dSRodney W. Grimes * bdevvp() or because of a different vnode representing 749df8bae1dSRodney W. Grimes * the same block device). If such an alias exists, deallocate 750df8bae1dSRodney W. Grimes * the existing contents and return the aliased vnode. The 751df8bae1dSRodney W. Grimes * caller is responsible for filling it with its new contents. 752df8bae1dSRodney W. Grimes */ 753df8bae1dSRodney W. Grimes struct vnode * 754df8bae1dSRodney W. Grimes checkalias(nvp, nvp_rdev, mp) 755df8bae1dSRodney W. Grimes register struct vnode *nvp; 756df8bae1dSRodney W. Grimes dev_t nvp_rdev; 757df8bae1dSRodney W. Grimes struct mount *mp; 758df8bae1dSRodney W. Grimes { 759996c772fSJohn Dyson struct proc *p = curproc; /* XXX */ 760996c772fSJohn Dyson struct vnode *vp; 761df8bae1dSRodney W. Grimes struct vnode **vpp; 762df8bae1dSRodney W. Grimes 763df8bae1dSRodney W. Grimes if (nvp->v_type != VBLK && nvp->v_type != VCHR) 764df8bae1dSRodney W. Grimes return (NULLVP); 765df8bae1dSRodney W. Grimes 766df8bae1dSRodney W. Grimes vpp = &speclisth[SPECHASH(nvp_rdev)]; 767df8bae1dSRodney W. Grimes loop: 768996c772fSJohn Dyson simple_lock(&spechash_slock); 769df8bae1dSRodney W. Grimes for (vp = *vpp; vp; vp = vp->v_specnext) { 770df8bae1dSRodney W. Grimes if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) 771df8bae1dSRodney W. Grimes continue; 772df8bae1dSRodney W. Grimes /* 773df8bae1dSRodney W. Grimes * Alias, but not in use, so flush it out. 774df8bae1dSRodney W. Grimes */ 775996c772fSJohn Dyson simple_lock(&vp->v_interlock); 776df8bae1dSRodney W. Grimes if (vp->v_usecount == 0) { 777996c772fSJohn Dyson simple_unlock(&spechash_slock); 778996c772fSJohn Dyson vgonel(vp, p); 779df8bae1dSRodney W. Grimes goto loop; 780df8bae1dSRodney W. Grimes } 781996c772fSJohn Dyson if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) { 782b98afd0dSBruce Evans simple_unlock(&spechash_slock); 783df8bae1dSRodney W. Grimes goto loop; 784996c772fSJohn Dyson } 785df8bae1dSRodney W. Grimes break; 786df8bae1dSRodney W. Grimes } 787df8bae1dSRodney W. Grimes if (vp == NULL || vp->v_tag != VT_NON) { 788df8bae1dSRodney W. Grimes MALLOC(nvp->v_specinfo, struct specinfo *, 789df8bae1dSRodney W. Grimes sizeof(struct specinfo), M_VNODE, M_WAITOK); 790df8bae1dSRodney W. Grimes nvp->v_rdev = nvp_rdev; 791df8bae1dSRodney W. Grimes nvp->v_hashchain = vpp; 792df8bae1dSRodney W. Grimes nvp->v_specnext = *vpp; 793df8bae1dSRodney W. Grimes nvp->v_specflags = 0; 794996c772fSJohn Dyson simple_unlock(&spechash_slock); 795df8bae1dSRodney W. Grimes *vpp = nvp; 796996c772fSJohn Dyson if (vp != NULLVP) { 797df8bae1dSRodney W. Grimes nvp->v_flag |= VALIASED; 798df8bae1dSRodney W. Grimes vp->v_flag |= VALIASED; 799df8bae1dSRodney W. Grimes vput(vp); 800df8bae1dSRodney W. Grimes } 801df8bae1dSRodney W. Grimes return (NULLVP); 802df8bae1dSRodney W. Grimes } 803996c772fSJohn Dyson simple_unlock(&spechash_slock); 804996c772fSJohn Dyson VOP_UNLOCK(vp, 0, p); 805996c772fSJohn Dyson simple_lock(&vp->v_interlock); 806996c772fSJohn Dyson vclean(vp, 0, p); 807df8bae1dSRodney W. Grimes vp->v_op = nvp->v_op; 808df8bae1dSRodney W. Grimes vp->v_tag = nvp->v_tag; 809df8bae1dSRodney W. Grimes nvp->v_type = VNON; 810df8bae1dSRodney W. Grimes insmntque(vp, mp); 811df8bae1dSRodney W. Grimes return (vp); 812df8bae1dSRodney W. Grimes } 813df8bae1dSRodney W. Grimes 814df8bae1dSRodney W. Grimes /* 815df8bae1dSRodney W. Grimes * Grab a particular vnode from the free list, increment its 816df8bae1dSRodney W. Grimes * reference count and lock it. The vnode lock bit is set the 817df8bae1dSRodney W. Grimes * vnode is being eliminated in vgone. The process is awakened 818df8bae1dSRodney W. Grimes * when the transition is completed, and an error returned to 819df8bae1dSRodney W. Grimes * indicate that the vnode is no longer usable (possibly having 820df8bae1dSRodney W. Grimes * been changed to a new file system type). 821df8bae1dSRodney W. Grimes */ 82226f9a767SRodney W. Grimes int 823996c772fSJohn Dyson vget(vp, flags, p) 824df8bae1dSRodney W. Grimes register struct vnode *vp; 825996c772fSJohn Dyson int flags; 826996c772fSJohn Dyson struct proc *p; 827df8bae1dSRodney W. Grimes { 828996c772fSJohn Dyson int error; 829df8bae1dSRodney W. Grimes 830df8bae1dSRodney W. Grimes /* 831996c772fSJohn Dyson * If the vnode is in the process of being cleaned out for 832996c772fSJohn Dyson * another use, we wait for the cleaning to finish and then 833996c772fSJohn Dyson * return failure. Cleaning is determined by checking that 834996c772fSJohn Dyson * the VXLOCK flag is set. 835df8bae1dSRodney W. Grimes */ 836996c772fSJohn Dyson if ((flags & LK_INTERLOCK) == 0) { 837996c772fSJohn Dyson simple_lock(&vp->v_interlock); 838996c772fSJohn Dyson } 839996c772fSJohn Dyson if (vp->v_flag & VXLOCK) { 840df8bae1dSRodney W. Grimes vp->v_flag |= VXWANT; 841996c772fSJohn Dyson simple_unlock(&vp->v_interlock); 842996c772fSJohn Dyson tsleep((caddr_t)vp, PINOD, "vget", 0); 843996c772fSJohn Dyson return (ENOENT); 844df8bae1dSRodney W. Grimes } 845df8bae1dSRodney W. Grimes vp->v_usecount++; 846a051452aSPoul-Henning Kamp if (VSHOULDBUSY(vp)) 847a051452aSPoul-Henning Kamp vbusy(vp); 8486476c0d2SJohn Dyson /* 8496476c0d2SJohn Dyson * Create the VM object, if needed 8506476c0d2SJohn Dyson */ 8516476c0d2SJohn Dyson if ((vp->v_type == VREG) && 8526476c0d2SJohn Dyson ((vp->v_object == NULL) || 8533c631446SJohn Dyson (vp->v_object->flags & OBJ_VFS_REF) == 0 || 8543c631446SJohn Dyson (vp->v_object->flags & OBJ_DEAD))) { 855996c772fSJohn Dyson /* 856996c772fSJohn Dyson * XXX vfs_object_create probably needs the interlock. 857996c772fSJohn Dyson */ 858996c772fSJohn Dyson simple_unlock(&vp->v_interlock); 8596476c0d2SJohn Dyson vfs_object_create(vp, curproc, curproc->p_ucred, 0); 860996c772fSJohn Dyson simple_lock(&vp->v_interlock); 8616476c0d2SJohn Dyson } 862996c772fSJohn Dyson if (flags & LK_TYPE_MASK) { 863996c772fSJohn Dyson if (error = vn_lock(vp, flags | LK_INTERLOCK, p)) 864996c772fSJohn Dyson vrele(vp); 865996c772fSJohn Dyson return (error); 866996c772fSJohn Dyson } 867996c772fSJohn Dyson simple_unlock(&vp->v_interlock); 868df8bae1dSRodney W. Grimes return (0); 869df8bae1dSRodney W. Grimes } 870df8bae1dSRodney W. Grimes 871df8bae1dSRodney W. Grimes /* 872996c772fSJohn Dyson * Stubs to use when there is no locking to be done on the underlying object. 873996c772fSJohn Dyson * A minimal shared lock is necessary to ensure that the underlying object 874996c772fSJohn Dyson * is not revoked while an operation is in progress. So, an active shared 875996c772fSJohn Dyson * count is maintained in an auxillary vnode lock structure. 876996c772fSJohn Dyson */ 877996c772fSJohn Dyson int 878de15ef6aSDoug Rabson vop_sharedlock(ap) 879de15ef6aSDoug Rabson struct vop_lock_args /* { 880de15ef6aSDoug Rabson struct vnode *a_vp; 881de15ef6aSDoug Rabson int a_flags; 882de15ef6aSDoug Rabson struct proc *a_p; 883de15ef6aSDoug Rabson } */ *ap; 884de15ef6aSDoug Rabson { 885de15ef6aSDoug Rabson /* 886de15ef6aSDoug Rabson * This code cannot be used until all the non-locking filesystems 887de15ef6aSDoug Rabson * (notably NFS) are converted to properly lock and release nodes. 888de15ef6aSDoug Rabson * Also, certain vnode operations change the locking state within 889de15ef6aSDoug Rabson * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 890de15ef6aSDoug Rabson * and symlink). Ideally these operations should not change the 891de15ef6aSDoug Rabson * lock state, but should be changed to let the caller of the 892de15ef6aSDoug Rabson * function unlock them. Otherwise all intermediate vnode layers 893de15ef6aSDoug Rabson * (such as union, umapfs, etc) must catch these functions to do 894de15ef6aSDoug Rabson * the necessary locking at their layer. Note that the inactive 895de15ef6aSDoug Rabson * and lookup operations also change their lock state, but this 896de15ef6aSDoug Rabson * cannot be avoided, so these two operations will always need 897de15ef6aSDoug Rabson * to be handled in intermediate layers. 898de15ef6aSDoug Rabson */ 899de15ef6aSDoug Rabson struct vnode *vp = ap->a_vp; 900de15ef6aSDoug Rabson int vnflags, flags = ap->a_flags; 901de15ef6aSDoug Rabson 902de15ef6aSDoug Rabson if (vp->v_vnlock == NULL) { 903de15ef6aSDoug Rabson if ((flags & LK_TYPE_MASK) == LK_DRAIN) 904de15ef6aSDoug Rabson return (0); 905de15ef6aSDoug Rabson MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), 906de15ef6aSDoug Rabson M_VNODE, M_WAITOK); 907de15ef6aSDoug Rabson lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); 908de15ef6aSDoug Rabson } 909de15ef6aSDoug Rabson switch (flags & LK_TYPE_MASK) { 910de15ef6aSDoug Rabson case LK_DRAIN: 911de15ef6aSDoug Rabson vnflags = LK_DRAIN; 912de15ef6aSDoug Rabson break; 913de15ef6aSDoug Rabson case LK_EXCLUSIVE: 914de15ef6aSDoug Rabson #ifdef DEBUG_VFS_LOCKS 915de15ef6aSDoug Rabson /* 916de15ef6aSDoug Rabson * Normally, we use shared locks here, but that confuses 917de15ef6aSDoug Rabson * the locking assertions. 918de15ef6aSDoug Rabson */ 919de15ef6aSDoug Rabson vnflags = LK_EXCLUSIVE; 920de15ef6aSDoug Rabson break; 921de15ef6aSDoug Rabson #endif 922de15ef6aSDoug Rabson case LK_SHARED: 923de15ef6aSDoug Rabson vnflags = LK_SHARED; 924de15ef6aSDoug Rabson break; 925de15ef6aSDoug Rabson case LK_UPGRADE: 926de15ef6aSDoug Rabson case LK_EXCLUPGRADE: 927de15ef6aSDoug Rabson case LK_DOWNGRADE: 928de15ef6aSDoug Rabson return (0); 929de15ef6aSDoug Rabson case LK_RELEASE: 930de15ef6aSDoug Rabson default: 931557fe2c5SPeter Wemm panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK); 932de15ef6aSDoug Rabson } 933de15ef6aSDoug Rabson if (flags & LK_INTERLOCK) 934de15ef6aSDoug Rabson vnflags |= LK_INTERLOCK; 935de15ef6aSDoug Rabson return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p)); 936de15ef6aSDoug Rabson } 937de15ef6aSDoug Rabson 938de15ef6aSDoug Rabson /* 939de15ef6aSDoug Rabson * Stubs to use when there is no locking to be done on the underlying object. 940de15ef6aSDoug Rabson * A minimal shared lock is necessary to ensure that the underlying object 941de15ef6aSDoug Rabson * is not revoked while an operation is in progress. So, an active shared 942de15ef6aSDoug Rabson * count is maintained in an auxillary vnode lock structure. 943de15ef6aSDoug Rabson */ 944de15ef6aSDoug Rabson int 945996c772fSJohn Dyson vop_nolock(ap) 946996c772fSJohn Dyson struct vop_lock_args /* { 947996c772fSJohn Dyson struct vnode *a_vp; 948996c772fSJohn Dyson int a_flags; 949996c772fSJohn Dyson struct proc *a_p; 950996c772fSJohn Dyson } */ *ap; 951996c772fSJohn Dyson { 952996c772fSJohn Dyson #ifdef notyet 953996c772fSJohn Dyson /* 954996c772fSJohn Dyson * This code cannot be used until all the non-locking filesystems 955996c772fSJohn Dyson * (notably NFS) are converted to properly lock and release nodes. 956996c772fSJohn Dyson * Also, certain vnode operations change the locking state within 957996c772fSJohn Dyson * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 958996c772fSJohn Dyson * and symlink). Ideally these operations should not change the 959996c772fSJohn Dyson * lock state, but should be changed to let the caller of the 960996c772fSJohn Dyson * function unlock them. Otherwise all intermediate vnode layers 961996c772fSJohn Dyson * (such as union, umapfs, etc) must catch these functions to do 962996c772fSJohn Dyson * the necessary locking at their layer. Note that the inactive 963996c772fSJohn Dyson * and lookup operations also change their lock state, but this 964996c772fSJohn Dyson * cannot be avoided, so these two operations will always need 965996c772fSJohn Dyson * to be handled in intermediate layers. 966996c772fSJohn Dyson */ 967996c772fSJohn Dyson struct vnode *vp = ap->a_vp; 968996c772fSJohn Dyson int vnflags, flags = ap->a_flags; 969996c772fSJohn Dyson 970996c772fSJohn Dyson if (vp->v_vnlock == NULL) { 971996c772fSJohn Dyson if ((flags & LK_TYPE_MASK) == LK_DRAIN) 972996c772fSJohn Dyson return (0); 973996c772fSJohn Dyson MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), 974996c772fSJohn Dyson M_VNODE, M_WAITOK); 975996c772fSJohn Dyson lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); 976996c772fSJohn Dyson } 977996c772fSJohn Dyson switch (flags & LK_TYPE_MASK) { 978996c772fSJohn Dyson case LK_DRAIN: 979996c772fSJohn Dyson vnflags = LK_DRAIN; 980996c772fSJohn Dyson break; 981996c772fSJohn Dyson case LK_EXCLUSIVE: 982996c772fSJohn Dyson case LK_SHARED: 983996c772fSJohn Dyson vnflags = LK_SHARED; 984996c772fSJohn Dyson break; 985996c772fSJohn Dyson case LK_UPGRADE: 986996c772fSJohn Dyson case LK_EXCLUPGRADE: 987996c772fSJohn Dyson case LK_DOWNGRADE: 988996c772fSJohn Dyson return (0); 989996c772fSJohn Dyson case LK_RELEASE: 990996c772fSJohn Dyson default: 991996c772fSJohn Dyson panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK); 992996c772fSJohn Dyson } 993996c772fSJohn Dyson if (flags & LK_INTERLOCK) 994996c772fSJohn Dyson vnflags |= LK_INTERLOCK; 995996c772fSJohn Dyson return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p)); 996996c772fSJohn Dyson #else /* for now */ 997996c772fSJohn Dyson /* 998996c772fSJohn Dyson * Since we are not using the lock manager, we must clear 999996c772fSJohn Dyson * the interlock here. 1000996c772fSJohn Dyson */ 1001996c772fSJohn Dyson if (ap->a_flags & LK_INTERLOCK) { 1002996c772fSJohn Dyson simple_unlock(&ap->a_vp->v_interlock); 1003996c772fSJohn Dyson } 1004996c772fSJohn Dyson return (0); 1005996c772fSJohn Dyson #endif 1006996c772fSJohn Dyson } 1007996c772fSJohn Dyson 1008996c772fSJohn Dyson /* 1009fd7f690fSJohn Dyson * Do the inverse of vop_nolock, handling the interlock in a compatible way. 1010996c772fSJohn Dyson */ 1011996c772fSJohn Dyson int 1012996c772fSJohn Dyson vop_nounlock(ap) 1013996c772fSJohn Dyson struct vop_unlock_args /* { 1014996c772fSJohn Dyson struct vnode *a_vp; 1015996c772fSJohn Dyson int a_flags; 1016996c772fSJohn Dyson struct proc *a_p; 1017996c772fSJohn Dyson } */ *ap; 1018996c772fSJohn Dyson { 1019996c772fSJohn Dyson struct vnode *vp = ap->a_vp; 1020996c772fSJohn Dyson 1021fd7f690fSJohn Dyson if (vp->v_vnlock == NULL) { 1022fd7f690fSJohn Dyson if (ap->a_flags & LK_INTERLOCK) 1023fd7f690fSJohn Dyson simple_unlock(&ap->a_vp->v_interlock); 1024996c772fSJohn Dyson return (0); 1025fd7f690fSJohn Dyson } 1026fd7f690fSJohn Dyson return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags, 1027fd7f690fSJohn Dyson &ap->a_vp->v_interlock, ap->a_p)); 1028996c772fSJohn Dyson } 1029996c772fSJohn Dyson 1030996c772fSJohn Dyson /* 1031996c772fSJohn Dyson * Return whether or not the node is in use. 1032996c772fSJohn Dyson */ 1033996c772fSJohn Dyson int 1034996c772fSJohn Dyson vop_noislocked(ap) 1035996c772fSJohn Dyson struct vop_islocked_args /* { 1036996c772fSJohn Dyson struct vnode *a_vp; 1037996c772fSJohn Dyson } */ *ap; 1038996c772fSJohn Dyson { 1039996c772fSJohn Dyson struct vnode *vp = ap->a_vp; 1040996c772fSJohn Dyson 1041996c772fSJohn Dyson if (vp->v_vnlock == NULL) 1042996c772fSJohn Dyson return (0); 1043996c772fSJohn Dyson return (lockstatus(vp->v_vnlock)); 1044996c772fSJohn Dyson } 1045996c772fSJohn Dyson 1046996c772fSJohn Dyson /* #ifdef DIAGNOSTIC */ 1047996c772fSJohn Dyson /* 1048df8bae1dSRodney W. Grimes * Vnode reference, just increment the count 1049df8bae1dSRodney W. Grimes */ 105026f9a767SRodney W. Grimes void 105126f9a767SRodney W. Grimes vref(vp) 1052df8bae1dSRodney W. Grimes struct vnode *vp; 1053df8bae1dSRodney W. Grimes { 1054996c772fSJohn Dyson simple_lock(&vp->v_interlock); 1055df8bae1dSRodney W. Grimes if (vp->v_usecount <= 0) 1056df8bae1dSRodney W. Grimes panic("vref used where vget required"); 10576476c0d2SJohn Dyson 1058a8f42fa9SJohn Dyson vp->v_usecount++; 1059a8f42fa9SJohn Dyson 10606476c0d2SJohn Dyson if ((vp->v_type == VREG) && 10616476c0d2SJohn Dyson ((vp->v_object == NULL) || 10623c631446SJohn Dyson ((vp->v_object->flags & OBJ_VFS_REF) == 0) || 10633c631446SJohn Dyson (vp->v_object->flags & OBJ_DEAD))) { 10646476c0d2SJohn Dyson /* 10656476c0d2SJohn Dyson * We need to lock to VP during the time that 10666476c0d2SJohn Dyson * the object is created. This is necessary to 10676476c0d2SJohn Dyson * keep the system from re-entrantly doing it 10686476c0d2SJohn Dyson * multiple times. 1069996c772fSJohn Dyson * XXX vfs_object_create probably needs the interlock? 10706476c0d2SJohn Dyson */ 1071996c772fSJohn Dyson simple_unlock(&vp->v_interlock); 10726476c0d2SJohn Dyson vfs_object_create(vp, curproc, curproc->p_ucred, 0); 1073c35e283aSBruce Evans return; 10746476c0d2SJohn Dyson } 1075c35e283aSBruce Evans simple_unlock(&vp->v_interlock); 1076df8bae1dSRodney W. Grimes } 1077df8bae1dSRodney W. Grimes 1078df8bae1dSRodney W. Grimes /* 10790d955f71SJohn Dyson * Vnode put/release. 1080df8bae1dSRodney W. Grimes * If count drops to zero, call inactive routine and return to freelist. 1081df8bae1dSRodney W. Grimes */ 108282b8e119SJohn Dyson static void 10830d955f71SJohn Dyson vputrele(vp, put) 1084996c772fSJohn Dyson struct vnode *vp; 10850d955f71SJohn Dyson int put; 1086df8bae1dSRodney W. Grimes { 1087996c772fSJohn Dyson struct proc *p = curproc; /* XXX */ 1088df8bae1dSRodney W. Grimes 1089df8bae1dSRodney W. Grimes #ifdef DIAGNOSTIC 1090df8bae1dSRodney W. Grimes if (vp == NULL) 10910d955f71SJohn Dyson panic("vputrele: null vp"); 1092df8bae1dSRodney W. Grimes #endif 1093996c772fSJohn Dyson simple_lock(&vp->v_interlock); 10946476c0d2SJohn Dyson 1095a051452aSPoul-Henning Kamp if ((vp->v_usecount == 2) && 10966476c0d2SJohn Dyson vp->v_object && 10976476c0d2SJohn Dyson (vp->v_object->flags & OBJ_VFS_REF)) { 1098a051452aSPoul-Henning Kamp vp->v_usecount--; 10996476c0d2SJohn Dyson vp->v_object->flags &= ~OBJ_VFS_REF; 11000d955f71SJohn Dyson if (put) { 1101fd7f690fSJohn Dyson VOP_UNLOCK(vp, LK_INTERLOCK, p); 1102fd7f690fSJohn Dyson } else { 1103fd7f690fSJohn Dyson simple_unlock(&vp->v_interlock); 11040d955f71SJohn Dyson } 11056476c0d2SJohn Dyson vm_object_deallocate(vp->v_object); 11066476c0d2SJohn Dyson return; 11076476c0d2SJohn Dyson } 11086476c0d2SJohn Dyson 1109a051452aSPoul-Henning Kamp if (vp->v_usecount > 1) { 1110a051452aSPoul-Henning Kamp vp->v_usecount--; 11110d955f71SJohn Dyson if (put) { 1112fd7f690fSJohn Dyson VOP_UNLOCK(vp, LK_INTERLOCK, p); 1113fd7f690fSJohn Dyson } else { 1114fd7f690fSJohn Dyson simple_unlock(&vp->v_interlock); 11150d955f71SJohn Dyson } 1116df8bae1dSRodney W. Grimes return; 1117996c772fSJohn Dyson } 11186476c0d2SJohn Dyson 1119a051452aSPoul-Henning Kamp if (vp->v_usecount < 1) { 1120864ef7d1SDavid Greenman #ifdef DIAGNOSTIC 11210d955f71SJohn Dyson vprint("vputrele: negative ref count", vp); 1122864ef7d1SDavid Greenman #endif 11230d955f71SJohn Dyson panic("vputrele: negative ref cnt"); 1124df8bae1dSRodney W. Grimes } 1125a051452aSPoul-Henning Kamp 11267cb22688SPoul-Henning Kamp vp->v_usecount--; 1127fd9d9ff1SPoul-Henning Kamp if (VSHOULDFREE(vp)) 1128fd9d9ff1SPoul-Henning Kamp vfree(vp); 11290d955f71SJohn Dyson /* 11300d955f71SJohn Dyson * If we are doing a vput, the node is already locked, and we must 11310d955f71SJohn Dyson * call VOP_INACTIVE with the node locked. So, in the case of 11320d955f71SJohn Dyson * vrele, we explicitly lock the vnode before calling VOP_INACTIVE. 11330d955f71SJohn Dyson */ 1134fd7f690fSJohn Dyson if (put) { 1135fd7f690fSJohn Dyson simple_unlock(&vp->v_interlock); 1136996c772fSJohn Dyson VOP_INACTIVE(vp, p); 1137fd7f690fSJohn Dyson } else if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) { 1138fd7f690fSJohn Dyson VOP_INACTIVE(vp, p); 1139fd7f690fSJohn Dyson } 1140df8bae1dSRodney W. Grimes } 1141df8bae1dSRodney W. Grimes 11420d955f71SJohn Dyson /* 11430d955f71SJohn Dyson * vput(), just unlock and vrele() 11440d955f71SJohn Dyson */ 11450d955f71SJohn Dyson void 11460d955f71SJohn Dyson vput(vp) 11470d955f71SJohn Dyson struct vnode *vp; 11480d955f71SJohn Dyson { 11490d955f71SJohn Dyson vputrele(vp, 1); 11500d955f71SJohn Dyson } 11510d955f71SJohn Dyson 11520d955f71SJohn Dyson void 11530d955f71SJohn Dyson vrele(vp) 11540d955f71SJohn Dyson struct vnode *vp; 11550d955f71SJohn Dyson { 11560d955f71SJohn Dyson vputrele(vp, 0); 11570d955f71SJohn Dyson } 11580d955f71SJohn Dyson 1159df8bae1dSRodney W. Grimes /* 1160a051452aSPoul-Henning Kamp * Somebody doesn't want the vnode recycled. 1161df8bae1dSRodney W. Grimes */ 116226f9a767SRodney W. Grimes void 116326f9a767SRodney W. Grimes vhold(vp) 1164df8bae1dSRodney W. Grimes register struct vnode *vp; 1165df8bae1dSRodney W. Grimes { 1166df8bae1dSRodney W. Grimes 1167996c772fSJohn Dyson simple_lock(&vp->v_interlock); 1168df8bae1dSRodney W. Grimes vp->v_holdcnt++; 1169a051452aSPoul-Henning Kamp if (VSHOULDBUSY(vp)) 1170a051452aSPoul-Henning Kamp vbusy(vp); 1171996c772fSJohn Dyson simple_unlock(&vp->v_interlock); 1172df8bae1dSRodney W. Grimes } 1173df8bae1dSRodney W. Grimes 1174df8bae1dSRodney W. Grimes /* 1175a051452aSPoul-Henning Kamp * One less who cares about this vnode. 1176df8bae1dSRodney W. Grimes */ 117726f9a767SRodney W. Grimes void 1178a051452aSPoul-Henning Kamp vdrop(vp) 1179df8bae1dSRodney W. Grimes register struct vnode *vp; 1180df8bae1dSRodney W. Grimes { 1181df8bae1dSRodney W. Grimes 1182996c772fSJohn Dyson simple_lock(&vp->v_interlock); 1183df8bae1dSRodney W. Grimes if (vp->v_holdcnt <= 0) 1184df8bae1dSRodney W. Grimes panic("holdrele: holdcnt"); 1185df8bae1dSRodney W. Grimes vp->v_holdcnt--; 1186a051452aSPoul-Henning Kamp if (VSHOULDFREE(vp)) 1187a051452aSPoul-Henning Kamp vfree(vp); 1188996c772fSJohn Dyson simple_unlock(&vp->v_interlock); 1189df8bae1dSRodney W. Grimes } 1190df8bae1dSRodney W. Grimes 1191df8bae1dSRodney W. Grimes /* 1192df8bae1dSRodney W. Grimes * Remove any vnodes in the vnode table belonging to mount point mp. 1193df8bae1dSRodney W. Grimes * 1194df8bae1dSRodney W. Grimes * If MNT_NOFORCE is specified, there should not be any active ones, 1195df8bae1dSRodney W. Grimes * return error if any are found (nb: this is a user error, not a 1196df8bae1dSRodney W. Grimes * system error). If MNT_FORCE is specified, detach any active vnodes 1197df8bae1dSRodney W. Grimes * that are found. 1198df8bae1dSRodney W. Grimes */ 1199df8bae1dSRodney W. Grimes #ifdef DIAGNOSTIC 120027a0b398SPoul-Henning Kamp static int busyprt = 0; /* print out busy vnodes */ 12010f1adf65SBruce Evans SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 1202df8bae1dSRodney W. Grimes #endif 1203df8bae1dSRodney W. Grimes 120426f9a767SRodney W. Grimes int 1205df8bae1dSRodney W. Grimes vflush(mp, skipvp, flags) 1206df8bae1dSRodney W. Grimes struct mount *mp; 1207df8bae1dSRodney W. Grimes struct vnode *skipvp; 1208df8bae1dSRodney W. Grimes int flags; 1209df8bae1dSRodney W. Grimes { 1210996c772fSJohn Dyson struct proc *p = curproc; /* XXX */ 1211996c772fSJohn Dyson struct vnode *vp, *nvp; 1212df8bae1dSRodney W. Grimes int busy = 0; 1213df8bae1dSRodney W. Grimes 1214996c772fSJohn Dyson simple_lock(&mntvnode_slock); 1215df8bae1dSRodney W. Grimes loop: 1216df8bae1dSRodney W. Grimes for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) { 12173d2a8cf3SDavid Greenman /* 12183d2a8cf3SDavid Greenman * Make sure this vnode wasn't reclaimed in getnewvnode(). 12193d2a8cf3SDavid Greenman * Start over if it has (it won't be on the list anymore). 12203d2a8cf3SDavid Greenman */ 1221df8bae1dSRodney W. Grimes if (vp->v_mount != mp) 1222df8bae1dSRodney W. Grimes goto loop; 1223df8bae1dSRodney W. Grimes nvp = vp->v_mntvnodes.le_next; 1224df8bae1dSRodney W. Grimes /* 1225df8bae1dSRodney W. Grimes * Skip over a selected vnode. 1226df8bae1dSRodney W. Grimes */ 1227df8bae1dSRodney W. Grimes if (vp == skipvp) 1228df8bae1dSRodney W. Grimes continue; 1229996c772fSJohn Dyson 1230996c772fSJohn Dyson simple_lock(&vp->v_interlock); 1231df8bae1dSRodney W. Grimes /* 1232df8bae1dSRodney W. Grimes * Skip over a vnodes marked VSYSTEM. 1233df8bae1dSRodney W. Grimes */ 1234996c772fSJohn Dyson if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1235996c772fSJohn Dyson simple_unlock(&vp->v_interlock); 1236df8bae1dSRodney W. Grimes continue; 1237996c772fSJohn Dyson } 1238df8bae1dSRodney W. Grimes /* 12390d94caffSDavid Greenman * If WRITECLOSE is set, only flush out regular file vnodes 12400d94caffSDavid Greenman * open for writing. 1241df8bae1dSRodney W. Grimes */ 1242df8bae1dSRodney W. Grimes if ((flags & WRITECLOSE) && 1243996c772fSJohn Dyson (vp->v_writecount == 0 || vp->v_type != VREG)) { 1244996c772fSJohn Dyson simple_unlock(&vp->v_interlock); 1245df8bae1dSRodney W. Grimes continue; 1246996c772fSJohn Dyson } 12476476c0d2SJohn Dyson 1248df8bae1dSRodney W. Grimes /* 12490d94caffSDavid Greenman * With v_usecount == 0, all we need to do is clear out the 12500d94caffSDavid Greenman * vnode data structures and we are done. 1251df8bae1dSRodney W. Grimes */ 1252df8bae1dSRodney W. Grimes if (vp->v_usecount == 0) { 1253996c772fSJohn Dyson simple_unlock(&mntvnode_slock); 1254996c772fSJohn Dyson vgonel(vp, p); 1255996c772fSJohn Dyson simple_lock(&mntvnode_slock); 1256df8bae1dSRodney W. Grimes continue; 1257df8bae1dSRodney W. Grimes } 1258ad980522SJohn Dyson 1259df8bae1dSRodney W. Grimes /* 12600d94caffSDavid Greenman * If FORCECLOSE is set, forcibly close the vnode. For block 12610d94caffSDavid Greenman * or character devices, revert to an anonymous device. For 12620d94caffSDavid Greenman * all other files, just kill them. 1263df8bae1dSRodney W. Grimes */ 1264df8bae1dSRodney W. Grimes if (flags & FORCECLOSE) { 1265996c772fSJohn Dyson simple_unlock(&mntvnode_slock); 1266df8bae1dSRodney W. Grimes if (vp->v_type != VBLK && vp->v_type != VCHR) { 1267996c772fSJohn Dyson vgonel(vp, p); 1268df8bae1dSRodney W. Grimes } else { 1269996c772fSJohn Dyson vclean(vp, 0, p); 1270df8bae1dSRodney W. Grimes vp->v_op = spec_vnodeop_p; 1271df8bae1dSRodney W. Grimes insmntque(vp, (struct mount *) 0); 1272df8bae1dSRodney W. Grimes } 1273996c772fSJohn Dyson simple_lock(&mntvnode_slock); 1274df8bae1dSRodney W. Grimes continue; 1275df8bae1dSRodney W. Grimes } 1276df8bae1dSRodney W. Grimes #ifdef DIAGNOSTIC 1277df8bae1dSRodney W. Grimes if (busyprt) 1278df8bae1dSRodney W. Grimes vprint("vflush: busy vnode", vp); 1279df8bae1dSRodney W. Grimes #endif 1280996c772fSJohn Dyson simple_unlock(&vp->v_interlock); 1281df8bae1dSRodney W. Grimes busy++; 1282df8bae1dSRodney W. Grimes } 1283996c772fSJohn Dyson simple_unlock(&mntvnode_slock); 1284df8bae1dSRodney W. Grimes if (busy) 1285df8bae1dSRodney W. Grimes return (EBUSY); 1286df8bae1dSRodney W. Grimes return (0); 1287df8bae1dSRodney W. Grimes } 1288df8bae1dSRodney W. Grimes 1289df8bae1dSRodney W. Grimes /* 1290df8bae1dSRodney W. Grimes * Disassociate the underlying file system from a vnode. 1291df8bae1dSRodney W. Grimes */ 1292996c772fSJohn Dyson static void 1293514ede09SBruce Evans vclean(vp, flags, p) 1294514ede09SBruce Evans struct vnode *vp; 1295514ede09SBruce Evans int flags; 1296514ede09SBruce Evans struct proc *p; 1297df8bae1dSRodney W. Grimes { 12983c631446SJohn Dyson int active, irefed; 12993c631446SJohn Dyson vm_object_t object; 1300df8bae1dSRodney W. Grimes 1301df8bae1dSRodney W. Grimes /* 13020d94caffSDavid Greenman * Check to see if the vnode is in use. If so we have to reference it 13030d94caffSDavid Greenman * before we clean it out so that its count cannot fall to zero and 13040d94caffSDavid Greenman * generate a race against ourselves to recycle it. 1305df8bae1dSRodney W. Grimes */ 1306bb56ec4aSPoul-Henning Kamp if ((active = vp->v_usecount)) 1307996c772fSJohn Dyson vp->v_usecount++; 1308df8bae1dSRodney W. Grimes /* 13090d94caffSDavid Greenman * Prevent the vnode from being recycled or brought into use while we 13100d94caffSDavid Greenman * clean it out. 1311df8bae1dSRodney W. Grimes */ 1312df8bae1dSRodney W. Grimes if (vp->v_flag & VXLOCK) 1313df8bae1dSRodney W. Grimes panic("vclean: deadlock"); 1314df8bae1dSRodney W. Grimes vp->v_flag |= VXLOCK; 1315df8bae1dSRodney W. Grimes /* 1316996c772fSJohn Dyson * Even if the count is zero, the VOP_INACTIVE routine may still 1317996c772fSJohn Dyson * have the object locked while it cleans it out. The VOP_LOCK 1318996c772fSJohn Dyson * ensures that the VOP_INACTIVE routine is done with its work. 1319996c772fSJohn Dyson * For active vnodes, it ensures that no other activity can 1320996c772fSJohn Dyson * occur while the underlying object is being cleaned out. 1321996c772fSJohn Dyson */ 1322996c772fSJohn Dyson VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p); 13233c631446SJohn Dyson 13243c631446SJohn Dyson object = vp->v_object; 13253c631446SJohn Dyson irefed = 0; 13263c631446SJohn Dyson if (object && ((object->flags & OBJ_DEAD) == 0)) { 13273c631446SJohn Dyson if (object->ref_count == 0) { 13283c631446SJohn Dyson vm_object_reference(object); 13293c631446SJohn Dyson irefed = 1; 13303c631446SJohn Dyson } 13313c631446SJohn Dyson ++object->ref_count; 13323c631446SJohn Dyson pager_cache(object, FALSE); 13333c631446SJohn Dyson } 13343c631446SJohn Dyson 1335996c772fSJohn Dyson /* 1336df8bae1dSRodney W. Grimes * Clean out any buffers associated with the vnode. 1337df8bae1dSRodney W. Grimes */ 1338df8bae1dSRodney W. Grimes if (flags & DOCLOSE) 1339996c772fSJohn Dyson vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); 13403c631446SJohn Dyson 13413c631446SJohn Dyson if (irefed) { 13423c631446SJohn Dyson vm_object_deallocate(object); 13433c631446SJohn Dyson } 13443c631446SJohn Dyson 1345df8bae1dSRodney W. Grimes /* 1346996c772fSJohn Dyson * If purging an active vnode, it must be closed and 1347996c772fSJohn Dyson * deactivated before being reclaimed. Note that the 1348996c772fSJohn Dyson * VOP_INACTIVE will unlock the vnode. 1349df8bae1dSRodney W. Grimes */ 1350df8bae1dSRodney W. Grimes if (active) { 1351df8bae1dSRodney W. Grimes if (flags & DOCLOSE) 1352996c772fSJohn Dyson VOP_CLOSE(vp, IO_NDELAY, NOCRED, p); 1353996c772fSJohn Dyson VOP_INACTIVE(vp, p); 1354996c772fSJohn Dyson } else { 1355996c772fSJohn Dyson /* 1356996c772fSJohn Dyson * Any other processes trying to obtain this lock must first 1357996c772fSJohn Dyson * wait for VXLOCK to clear, then call the new lock operation. 1358996c772fSJohn Dyson */ 1359996c772fSJohn Dyson VOP_UNLOCK(vp, 0, p); 1360df8bae1dSRodney W. Grimes } 1361df8bae1dSRodney W. Grimes /* 1362df8bae1dSRodney W. Grimes * Reclaim the vnode. 1363df8bae1dSRodney W. Grimes */ 1364996c772fSJohn Dyson if (VOP_RECLAIM(vp, p)) 1365df8bae1dSRodney W. Grimes panic("vclean: cannot reclaim"); 1366df8bae1dSRodney W. Grimes if (active) 1367df8bae1dSRodney W. Grimes vrele(vp); 1368996c772fSJohn Dyson cache_purge(vp); 1369996c772fSJohn Dyson if (vp->v_vnlock) { 1370f7891f9aSPoul-Henning Kamp #if 0 /* This is the only place we have LK_DRAINED in the entire kernel ??? */ 1371de15ef6aSDoug Rabson #ifdef DIAGNOSTIC 1372996c772fSJohn Dyson if ((vp->v_vnlock->lk_flags & LK_DRAINED) == 0) 1373996c772fSJohn Dyson vprint("vclean: lock not drained", vp); 1374de15ef6aSDoug Rabson #endif 1375f7891f9aSPoul-Henning Kamp #endif 1376996c772fSJohn Dyson FREE(vp->v_vnlock, M_VNODE); 1377996c772fSJohn Dyson vp->v_vnlock = NULL; 1378996c772fSJohn Dyson } 1379df8bae1dSRodney W. Grimes 1380df8bae1dSRodney W. Grimes /* 1381df8bae1dSRodney W. Grimes * Done with purge, notify sleepers of the grim news. 1382df8bae1dSRodney W. Grimes */ 1383df8bae1dSRodney W. Grimes vp->v_op = dead_vnodeop_p; 1384df8bae1dSRodney W. Grimes vp->v_tag = VT_NON; 1385df8bae1dSRodney W. Grimes vp->v_flag &= ~VXLOCK; 1386df8bae1dSRodney W. Grimes if (vp->v_flag & VXWANT) { 1387df8bae1dSRodney W. Grimes vp->v_flag &= ~VXWANT; 1388df8bae1dSRodney W. Grimes wakeup((caddr_t) vp); 1389df8bae1dSRodney W. Grimes } 1390df8bae1dSRodney W. Grimes } 1391df8bae1dSRodney W. Grimes 1392df8bae1dSRodney W. Grimes /* 1393df8bae1dSRodney W. Grimes * Eliminate all activity associated with the requested vnode 1394df8bae1dSRodney W. Grimes * and with all vnodes aliased to the requested vnode. 1395df8bae1dSRodney W. Grimes */ 1396996c772fSJohn Dyson int 1397996c772fSJohn Dyson vop_revoke(ap) 1398996c772fSJohn Dyson struct vop_revoke_args /* { 1399996c772fSJohn Dyson struct vnode *a_vp; 1400996c772fSJohn Dyson int a_flags; 1401996c772fSJohn Dyson } */ *ap; 1402df8bae1dSRodney W. Grimes { 1403996c772fSJohn Dyson struct vnode *vp, *vq; 1404996c772fSJohn Dyson struct proc *p = curproc; /* XXX */ 1405996c772fSJohn Dyson 1406996c772fSJohn Dyson #ifdef DIAGNOSTIC 1407996c772fSJohn Dyson if ((ap->a_flags & REVOKEALL) == 0) 1408996c772fSJohn Dyson panic("vop_revoke"); 1409996c772fSJohn Dyson #endif 1410996c772fSJohn Dyson 1411996c772fSJohn Dyson vp = ap->a_vp; 1412996c772fSJohn Dyson simple_lock(&vp->v_interlock); 1413df8bae1dSRodney W. Grimes 1414df8bae1dSRodney W. Grimes if (vp->v_flag & VALIASED) { 1415df8bae1dSRodney W. Grimes /* 1416996c772fSJohn Dyson * If a vgone (or vclean) is already in progress, 1417996c772fSJohn Dyson * wait until it is done and return. 1418df8bae1dSRodney W. Grimes */ 1419df8bae1dSRodney W. Grimes if (vp->v_flag & VXLOCK) { 1420df8bae1dSRodney W. Grimes vp->v_flag |= VXWANT; 1421996c772fSJohn Dyson simple_unlock(&vp->v_interlock); 1422996c772fSJohn Dyson tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0); 1423996c772fSJohn Dyson return (0); 1424df8bae1dSRodney W. Grimes } 1425df8bae1dSRodney W. Grimes /* 1426996c772fSJohn Dyson * Ensure that vp will not be vgone'd while we 1427996c772fSJohn Dyson * are eliminating its aliases. 1428df8bae1dSRodney W. Grimes */ 1429df8bae1dSRodney W. Grimes vp->v_flag |= VXLOCK; 1430996c772fSJohn Dyson simple_unlock(&vp->v_interlock); 1431df8bae1dSRodney W. Grimes while (vp->v_flag & VALIASED) { 1432996c772fSJohn Dyson simple_lock(&spechash_slock); 1433df8bae1dSRodney W. Grimes for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1434df8bae1dSRodney W. Grimes if (vq->v_rdev != vp->v_rdev || 1435df8bae1dSRodney W. Grimes vq->v_type != vp->v_type || vp == vq) 1436df8bae1dSRodney W. Grimes continue; 1437996c772fSJohn Dyson simple_unlock(&spechash_slock); 1438df8bae1dSRodney W. Grimes vgone(vq); 1439df8bae1dSRodney W. Grimes break; 1440df8bae1dSRodney W. Grimes } 1441996c772fSJohn Dyson if (vq == NULLVP) { 1442996c772fSJohn Dyson simple_unlock(&spechash_slock); 1443996c772fSJohn Dyson } 1444df8bae1dSRodney W. Grimes } 1445df8bae1dSRodney W. Grimes /* 1446996c772fSJohn Dyson * Remove the lock so that vgone below will 1447996c772fSJohn Dyson * really eliminate the vnode after which time 1448996c772fSJohn Dyson * vgone will awaken any sleepers. 1449df8bae1dSRodney W. Grimes */ 1450996c772fSJohn Dyson simple_lock(&vp->v_interlock); 1451df8bae1dSRodney W. Grimes vp->v_flag &= ~VXLOCK; 1452df8bae1dSRodney W. Grimes } 1453996c772fSJohn Dyson vgonel(vp, p); 1454996c772fSJohn Dyson return (0); 1455996c772fSJohn Dyson } 1456996c772fSJohn Dyson 1457996c772fSJohn Dyson /* 1458996c772fSJohn Dyson * Recycle an unused vnode to the front of the free list. 1459996c772fSJohn Dyson * Release the passed interlock if the vnode will be recycled. 1460996c772fSJohn Dyson */ 1461996c772fSJohn Dyson int 1462996c772fSJohn Dyson vrecycle(vp, inter_lkp, p) 1463996c772fSJohn Dyson struct vnode *vp; 1464996c772fSJohn Dyson struct simplelock *inter_lkp; 1465996c772fSJohn Dyson struct proc *p; 1466996c772fSJohn Dyson { 1467996c772fSJohn Dyson 1468996c772fSJohn Dyson simple_lock(&vp->v_interlock); 1469996c772fSJohn Dyson if (vp->v_usecount == 0) { 1470996c772fSJohn Dyson if (inter_lkp) { 1471996c772fSJohn Dyson simple_unlock(inter_lkp); 1472996c772fSJohn Dyson } 1473996c772fSJohn Dyson vgonel(vp, p); 1474996c772fSJohn Dyson return (1); 1475996c772fSJohn Dyson } 1476996c772fSJohn Dyson simple_unlock(&vp->v_interlock); 1477996c772fSJohn Dyson return (0); 1478df8bae1dSRodney W. Grimes } 1479df8bae1dSRodney W. Grimes 1480df8bae1dSRodney W. Grimes /* 1481df8bae1dSRodney W. Grimes * Eliminate all activity associated with a vnode 1482df8bae1dSRodney W. Grimes * in preparation for reuse. 1483df8bae1dSRodney W. Grimes */ 148426f9a767SRodney W. Grimes void 148526f9a767SRodney W. Grimes vgone(vp) 1486df8bae1dSRodney W. Grimes register struct vnode *vp; 1487df8bae1dSRodney W. Grimes { 1488996c772fSJohn Dyson struct proc *p = curproc; /* XXX */ 1489996c772fSJohn Dyson 1490996c772fSJohn Dyson simple_lock(&vp->v_interlock); 1491996c772fSJohn Dyson vgonel(vp, p); 1492996c772fSJohn Dyson } 1493996c772fSJohn Dyson 1494996c772fSJohn Dyson /* 1495996c772fSJohn Dyson * vgone, with the vp interlock held. 1496996c772fSJohn Dyson */ 14970f1adf65SBruce Evans static void 1498996c772fSJohn Dyson vgonel(vp, p) 1499996c772fSJohn Dyson struct vnode *vp; 1500996c772fSJohn Dyson struct proc *p; 1501996c772fSJohn Dyson { 1502996c772fSJohn Dyson struct vnode *vq; 1503df8bae1dSRodney W. Grimes struct vnode *vx; 1504df8bae1dSRodney W. Grimes 1505df8bae1dSRodney W. Grimes /* 1506996c772fSJohn Dyson * If a vgone (or vclean) is already in progress, 1507996c772fSJohn Dyson * wait until it is done and return. 1508df8bae1dSRodney W. Grimes */ 1509df8bae1dSRodney W. Grimes if (vp->v_flag & VXLOCK) { 1510df8bae1dSRodney W. Grimes vp->v_flag |= VXWANT; 1511996c772fSJohn Dyson simple_unlock(&vp->v_interlock); 1512996c772fSJohn Dyson tsleep((caddr_t)vp, PINOD, "vgone", 0); 1513df8bae1dSRodney W. Grimes return; 1514df8bae1dSRodney W. Grimes } 1515ad980522SJohn Dyson 1516ad980522SJohn Dyson if (vp->v_object) { 1517ad980522SJohn Dyson vp->v_object->flags |= OBJ_VNODE_GONE; 1518ad980522SJohn Dyson } 1519ad980522SJohn Dyson 1520df8bae1dSRodney W. Grimes /* 1521df8bae1dSRodney W. Grimes * Clean out the filesystem specific data. 1522df8bae1dSRodney W. Grimes */ 1523996c772fSJohn Dyson vclean(vp, DOCLOSE, p); 1524df8bae1dSRodney W. Grimes /* 1525df8bae1dSRodney W. Grimes * Delete from old mount point vnode list, if on one. 1526df8bae1dSRodney W. Grimes */ 1527996c772fSJohn Dyson if (vp->v_mount != NULL) 1528996c772fSJohn Dyson insmntque(vp, (struct mount *)0); 1529df8bae1dSRodney W. Grimes /* 1530996c772fSJohn Dyson * If special device, remove it from special device alias list 1531996c772fSJohn Dyson * if it is on one. 1532df8bae1dSRodney W. Grimes */ 1533996c772fSJohn Dyson if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) { 1534996c772fSJohn Dyson simple_lock(&spechash_slock); 1535df8bae1dSRodney W. Grimes if (*vp->v_hashchain == vp) { 1536df8bae1dSRodney W. Grimes *vp->v_hashchain = vp->v_specnext; 1537df8bae1dSRodney W. Grimes } else { 1538df8bae1dSRodney W. Grimes for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1539df8bae1dSRodney W. Grimes if (vq->v_specnext != vp) 1540df8bae1dSRodney W. Grimes continue; 1541df8bae1dSRodney W. Grimes vq->v_specnext = vp->v_specnext; 1542df8bae1dSRodney W. Grimes break; 1543df8bae1dSRodney W. Grimes } 1544df8bae1dSRodney W. Grimes if (vq == NULL) 1545df8bae1dSRodney W. Grimes panic("missing bdev"); 1546df8bae1dSRodney W. Grimes } 1547df8bae1dSRodney W. Grimes if (vp->v_flag & VALIASED) { 1548df8bae1dSRodney W. Grimes vx = NULL; 1549df8bae1dSRodney W. Grimes for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1550df8bae1dSRodney W. Grimes if (vq->v_rdev != vp->v_rdev || 1551df8bae1dSRodney W. Grimes vq->v_type != vp->v_type) 1552df8bae1dSRodney W. Grimes continue; 1553df8bae1dSRodney W. Grimes if (vx) 1554df8bae1dSRodney W. Grimes break; 1555df8bae1dSRodney W. Grimes vx = vq; 1556df8bae1dSRodney W. Grimes } 1557df8bae1dSRodney W. Grimes if (vx == NULL) 1558df8bae1dSRodney W. Grimes panic("missing alias"); 1559df8bae1dSRodney W. Grimes if (vq == NULL) 1560df8bae1dSRodney W. Grimes vx->v_flag &= ~VALIASED; 1561df8bae1dSRodney W. Grimes vp->v_flag &= ~VALIASED; 1562df8bae1dSRodney W. Grimes } 1563996c772fSJohn Dyson simple_unlock(&spechash_slock); 1564df8bae1dSRodney W. Grimes FREE(vp->v_specinfo, M_VNODE); 1565df8bae1dSRodney W. Grimes vp->v_specinfo = NULL; 1566df8bae1dSRodney W. Grimes } 1567996c772fSJohn Dyson 1568df8bae1dSRodney W. Grimes /* 1569996c772fSJohn Dyson * If it is on the freelist and not already at the head, 1570996c772fSJohn Dyson * move it to the head of the list. The test of the back 1571996c772fSJohn Dyson * pointer and the reference count of zero is because 1572996c772fSJohn Dyson * it will be removed from the free list by getnewvnode, 1573996c772fSJohn Dyson * but will not have its reference count incremented until 1574996c772fSJohn Dyson * after calling vgone. If the reference count were 1575996c772fSJohn Dyson * incremented first, vgone would (incorrectly) try to 1576996c772fSJohn Dyson * close the previous instance of the underlying object. 1577df8bae1dSRodney W. Grimes */ 1578a051452aSPoul-Henning Kamp if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) { 1579996c772fSJohn Dyson simple_lock(&vnode_free_list_slock); 1580df8bae1dSRodney W. Grimes TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1581df8bae1dSRodney W. Grimes TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1582996c772fSJohn Dyson simple_unlock(&vnode_free_list_slock); 15830082fb46SJordan K. Hubbard } 1584996c772fSJohn Dyson 1585df8bae1dSRodney W. Grimes vp->v_type = VBAD; 1586df8bae1dSRodney W. Grimes } 1587df8bae1dSRodney W. Grimes 1588df8bae1dSRodney W. Grimes /* 1589df8bae1dSRodney W. Grimes * Lookup a vnode by device number. 1590df8bae1dSRodney W. Grimes */ 159126f9a767SRodney W. Grimes int 1592df8bae1dSRodney W. Grimes vfinddev(dev, type, vpp) 1593df8bae1dSRodney W. Grimes dev_t dev; 1594df8bae1dSRodney W. Grimes enum vtype type; 1595df8bae1dSRodney W. Grimes struct vnode **vpp; 1596df8bae1dSRodney W. Grimes { 1597df8bae1dSRodney W. Grimes register struct vnode *vp; 1598b98afd0dSBruce Evans int rc = 0; 1599df8bae1dSRodney W. Grimes 1600b98afd0dSBruce Evans simple_lock(&spechash_slock); 1601df8bae1dSRodney W. Grimes for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { 1602df8bae1dSRodney W. Grimes if (dev != vp->v_rdev || type != vp->v_type) 1603df8bae1dSRodney W. Grimes continue; 1604df8bae1dSRodney W. Grimes *vpp = vp; 1605b98afd0dSBruce Evans rc = 1; 1606b98afd0dSBruce Evans break; 1607df8bae1dSRodney W. Grimes } 1608b98afd0dSBruce Evans simple_unlock(&spechash_slock); 1609b98afd0dSBruce Evans return (rc); 1610df8bae1dSRodney W. Grimes } 1611df8bae1dSRodney W. Grimes 1612df8bae1dSRodney W. Grimes /* 1613df8bae1dSRodney W. Grimes * Calculate the total number of references to a special device. 1614df8bae1dSRodney W. Grimes */ 161526f9a767SRodney W. Grimes int 1616df8bae1dSRodney W. Grimes vcount(vp) 1617df8bae1dSRodney W. Grimes register struct vnode *vp; 1618df8bae1dSRodney W. Grimes { 1619996c772fSJohn Dyson struct vnode *vq, *vnext; 1620df8bae1dSRodney W. Grimes int count; 1621df8bae1dSRodney W. Grimes 1622df8bae1dSRodney W. Grimes loop: 1623df8bae1dSRodney W. Grimes if ((vp->v_flag & VALIASED) == 0) 1624df8bae1dSRodney W. Grimes return (vp->v_usecount); 1625b98afd0dSBruce Evans simple_lock(&spechash_slock); 1626df8bae1dSRodney W. Grimes for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) { 1627df8bae1dSRodney W. Grimes vnext = vq->v_specnext; 1628df8bae1dSRodney W. Grimes if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type) 1629df8bae1dSRodney W. Grimes continue; 1630df8bae1dSRodney W. Grimes /* 1631df8bae1dSRodney W. Grimes * Alias, but not in use, so flush it out. 1632df8bae1dSRodney W. Grimes */ 1633df8bae1dSRodney W. Grimes if (vq->v_usecount == 0 && vq != vp) { 1634b98afd0dSBruce Evans simple_unlock(&spechash_slock); 1635df8bae1dSRodney W. Grimes vgone(vq); 1636df8bae1dSRodney W. Grimes goto loop; 1637df8bae1dSRodney W. Grimes } 1638df8bae1dSRodney W. Grimes count += vq->v_usecount; 1639df8bae1dSRodney W. Grimes } 1640b98afd0dSBruce Evans simple_unlock(&spechash_slock); 1641df8bae1dSRodney W. Grimes return (count); 1642df8bae1dSRodney W. Grimes } 1643df8bae1dSRodney W. Grimes 1644df8bae1dSRodney W. Grimes /* 16457fab7799SPeter Wemm * Return true for select/poll. 16467fab7799SPeter Wemm */ 16477fab7799SPeter Wemm int 16487fab7799SPeter Wemm vop_nopoll(ap) 16497fab7799SPeter Wemm struct vop_poll_args /* { 16507fab7799SPeter Wemm struct vnode *a_vp; 16517fab7799SPeter Wemm int a_events; 16527fab7799SPeter Wemm struct ucred *a_cred; 16537fab7799SPeter Wemm struct proc *a_p; 16547fab7799SPeter Wemm } */ *ap; 16557fab7799SPeter Wemm { 16567fab7799SPeter Wemm 16577fab7799SPeter Wemm /* 16587fab7799SPeter Wemm * Just return what we were asked for. 16597fab7799SPeter Wemm */ 16607fab7799SPeter Wemm return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 16617fab7799SPeter Wemm } 16627fab7799SPeter Wemm 16637fab7799SPeter Wemm /* 1664df8bae1dSRodney W. Grimes * Print out a description of a vnode. 1665df8bae1dSRodney W. Grimes */ 1666df8bae1dSRodney W. Grimes static char *typename[] = 1667df8bae1dSRodney W. Grimes {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 1668df8bae1dSRodney W. Grimes 166926f9a767SRodney W. Grimes void 1670df8bae1dSRodney W. Grimes vprint(label, vp) 1671df8bae1dSRodney W. Grimes char *label; 1672df8bae1dSRodney W. Grimes register struct vnode *vp; 1673df8bae1dSRodney W. Grimes { 1674df8bae1dSRodney W. Grimes char buf[64]; 1675df8bae1dSRodney W. Grimes 1676df8bae1dSRodney W. Grimes if (label != NULL) 1677de15ef6aSDoug Rabson printf("%s: %x: ", label, vp); 1678de15ef6aSDoug Rabson else 1679de15ef6aSDoug Rabson printf("%x: ", vp); 1680bb56ec4aSPoul-Henning Kamp printf("type %s, usecount %d, writecount %d, refcount %ld,", 1681df8bae1dSRodney W. Grimes typename[vp->v_type], vp->v_usecount, vp->v_writecount, 1682df8bae1dSRodney W. Grimes vp->v_holdcnt); 1683df8bae1dSRodney W. Grimes buf[0] = '\0'; 1684df8bae1dSRodney W. Grimes if (vp->v_flag & VROOT) 1685df8bae1dSRodney W. Grimes strcat(buf, "|VROOT"); 1686df8bae1dSRodney W. Grimes if (vp->v_flag & VTEXT) 1687df8bae1dSRodney W. Grimes strcat(buf, "|VTEXT"); 1688df8bae1dSRodney W. Grimes if (vp->v_flag & VSYSTEM) 1689df8bae1dSRodney W. Grimes strcat(buf, "|VSYSTEM"); 1690df8bae1dSRodney W. Grimes if (vp->v_flag & VXLOCK) 1691df8bae1dSRodney W. Grimes strcat(buf, "|VXLOCK"); 1692df8bae1dSRodney W. Grimes if (vp->v_flag & VXWANT) 1693df8bae1dSRodney W. Grimes strcat(buf, "|VXWANT"); 1694df8bae1dSRodney W. Grimes if (vp->v_flag & VBWAIT) 1695df8bae1dSRodney W. Grimes strcat(buf, "|VBWAIT"); 1696df8bae1dSRodney W. Grimes if (vp->v_flag & VALIASED) 1697df8bae1dSRodney W. Grimes strcat(buf, "|VALIASED"); 1698a051452aSPoul-Henning Kamp if (vp->v_flag & VDOOMED) 1699a051452aSPoul-Henning Kamp strcat(buf, "|VDOOMED"); 1700a051452aSPoul-Henning Kamp if (vp->v_flag & VFREE) 1701a051452aSPoul-Henning Kamp strcat(buf, "|VFREE"); 1702df8bae1dSRodney W. Grimes if (buf[0] != '\0') 1703df8bae1dSRodney W. Grimes printf(" flags (%s)", &buf[1]); 1704df8bae1dSRodney W. Grimes if (vp->v_data == NULL) { 1705df8bae1dSRodney W. Grimes printf("\n"); 1706df8bae1dSRodney W. Grimes } else { 1707df8bae1dSRodney W. Grimes printf("\n\t"); 1708df8bae1dSRodney W. Grimes VOP_PRINT(vp); 1709df8bae1dSRodney W. Grimes } 1710df8bae1dSRodney W. Grimes } 1711df8bae1dSRodney W. Grimes 17121a477b0cSDavid Greenman #ifdef DDB 1713df8bae1dSRodney W. Grimes /* 1714df8bae1dSRodney W. Grimes * List all of the locked vnodes in the system. 1715df8bae1dSRodney W. Grimes * Called when debugging the kernel. 1716df8bae1dSRodney W. Grimes */ 171726f9a767SRodney W. Grimes void 1718c35e283aSBruce Evans printlockedvnodes() 1719df8bae1dSRodney W. Grimes { 1720c35e283aSBruce Evans struct proc *p = curproc; /* XXX */ 1721c35e283aSBruce Evans struct mount *mp, *nmp; 1722c35e283aSBruce Evans struct vnode *vp; 1723df8bae1dSRodney W. Grimes 1724df8bae1dSRodney W. Grimes printf("Locked vnodes\n"); 1725c35e283aSBruce Evans simple_lock(&mountlist_slock); 1726c35e283aSBruce Evans for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { 1727c35e283aSBruce Evans if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { 1728c35e283aSBruce Evans nmp = mp->mnt_list.cqe_next; 1729c35e283aSBruce Evans continue; 1730c35e283aSBruce Evans } 1731df8bae1dSRodney W. Grimes for (vp = mp->mnt_vnodelist.lh_first; 1732df8bae1dSRodney W. Grimes vp != NULL; 1733c35e283aSBruce Evans vp = vp->v_mntvnodes.le_next) { 1734df8bae1dSRodney W. Grimes if (VOP_ISLOCKED(vp)) 1735df8bae1dSRodney W. Grimes vprint((char *)0, vp); 1736df8bae1dSRodney W. Grimes } 1737c35e283aSBruce Evans simple_lock(&mountlist_slock); 1738c35e283aSBruce Evans nmp = mp->mnt_list.cqe_next; 1739c35e283aSBruce Evans vfs_unbusy(mp, p); 1740c35e283aSBruce Evans } 1741c35e283aSBruce Evans simple_unlock(&mountlist_slock); 1742df8bae1dSRodney W. Grimes } 1743df8bae1dSRodney W. Grimes #endif 1744df8bae1dSRodney W. Grimes 17453a76a594SBruce Evans /* 17463a76a594SBruce Evans * Top level filesystem related information gathering. 17473a76a594SBruce Evans */ 17483a76a594SBruce Evans static int sysctl_ovfs_conf __P(SYSCTL_HANDLER_ARGS); 17493a76a594SBruce Evans 17504a8b9660SBruce Evans static int 17513a76a594SBruce Evans vfs_sysctl SYSCTL_HANDLER_ARGS 1752a896f025SBruce Evans { 17534a8b9660SBruce Evans int *name = (int *)arg1 - 1; /* XXX */ 17544a8b9660SBruce Evans u_int namelen = arg2 + 1; /* XXX */ 1755a896f025SBruce Evans struct vfsconf *vfsp; 1756a896f025SBruce Evans 17573a76a594SBruce Evans #ifndef NO_COMPAT_PRELITE2 17583a76a594SBruce Evans /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 17594a8b9660SBruce Evans if (namelen == 1) 17603a76a594SBruce Evans return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 1761dc91a89eSBruce Evans #endif 1762a896f025SBruce Evans 17634a8b9660SBruce Evans #ifdef notyet 17643a76a594SBruce Evans /* all sysctl names at this level are at least name and field */ 17653a76a594SBruce Evans if (namelen < 2) 17663a76a594SBruce Evans return (ENOTDIR); /* overloaded */ 17673a76a594SBruce Evans if (name[0] != VFS_GENERIC) { 17683a76a594SBruce Evans for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 17693a76a594SBruce Evans if (vfsp->vfc_typenum == name[0]) 17703a76a594SBruce Evans break; 17713a76a594SBruce Evans if (vfsp == NULL) 17723a76a594SBruce Evans return (EOPNOTSUPP); 17733a76a594SBruce Evans return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 17743a76a594SBruce Evans oldp, oldlenp, newp, newlen, p)); 17753a76a594SBruce Evans } 17764a8b9660SBruce Evans #endif 17773a76a594SBruce Evans switch (name[1]) { 17783a76a594SBruce Evans case VFS_MAXTYPENUM: 17793a76a594SBruce Evans if (namelen != 2) 17803a76a594SBruce Evans return (ENOTDIR); 17813a76a594SBruce Evans return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 17823a76a594SBruce Evans case VFS_CONF: 17833a76a594SBruce Evans if (namelen != 3) 17843a76a594SBruce Evans return (ENOTDIR); /* overloaded */ 17853a76a594SBruce Evans for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 17863a76a594SBruce Evans if (vfsp->vfc_typenum == name[2]) 17873a76a594SBruce Evans break; 17883a76a594SBruce Evans if (vfsp == NULL) 17893a76a594SBruce Evans return (EOPNOTSUPP); 17903a76a594SBruce Evans return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 17913a76a594SBruce Evans } 17923a76a594SBruce Evans return (EOPNOTSUPP); 17933a76a594SBruce Evans } 17943a76a594SBruce Evans 17954a8b9660SBruce Evans SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 17964a8b9660SBruce Evans "Generic filesystem"); 17974a8b9660SBruce Evans 1798a896f025SBruce Evans #ifndef NO_COMPAT_PRELITE2 1799a896f025SBruce Evans 1800a896f025SBruce Evans static int 1801a896f025SBruce Evans sysctl_ovfs_conf SYSCTL_HANDLER_ARGS 1802a896f025SBruce Evans { 1803a896f025SBruce Evans int error; 1804a896f025SBruce Evans struct vfsconf *vfsp; 1805a896f025SBruce Evans struct ovfsconf ovfs; 18063a76a594SBruce Evans 18073a76a594SBruce Evans for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 1808a896f025SBruce Evans ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 1809a896f025SBruce Evans strcpy(ovfs.vfc_name, vfsp->vfc_name); 1810a896f025SBruce Evans ovfs.vfc_index = vfsp->vfc_typenum; 1811a896f025SBruce Evans ovfs.vfc_refcount = vfsp->vfc_refcount; 1812a896f025SBruce Evans ovfs.vfc_flags = vfsp->vfc_flags; 1813a896f025SBruce Evans error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 1814a896f025SBruce Evans if (error) 1815a896f025SBruce Evans return error; 1816a896f025SBruce Evans } 1817a896f025SBruce Evans return 0; 1818a896f025SBruce Evans } 1819a896f025SBruce Evans 1820a896f025SBruce Evans #endif /* !NO_COMPAT_PRELITE2 */ 1821a896f025SBruce Evans 1822df8bae1dSRodney W. Grimes int kinfo_vdebug = 1; 1823df8bae1dSRodney W. Grimes int kinfo_vgetfailed; 18240d94caffSDavid Greenman 1825df8bae1dSRodney W. Grimes #define KINFO_VNODESLOP 10 1826df8bae1dSRodney W. Grimes /* 1827df8bae1dSRodney W. Grimes * Dump vnode list (via sysctl). 1828df8bae1dSRodney W. Grimes * Copyout address of vnode followed by vnode. 1829df8bae1dSRodney W. Grimes */ 1830df8bae1dSRodney W. Grimes /* ARGSUSED */ 18314b2af45fSPoul-Henning Kamp static int 18324b2af45fSPoul-Henning Kamp sysctl_vnode SYSCTL_HANDLER_ARGS 1833df8bae1dSRodney W. Grimes { 1834996c772fSJohn Dyson struct proc *p = curproc; /* XXX */ 1835c35e283aSBruce Evans struct mount *mp, *nmp; 1836c35e283aSBruce Evans struct vnode *nvp, *vp; 1837df8bae1dSRodney W. Grimes int error; 1838df8bae1dSRodney W. Grimes 1839df8bae1dSRodney W. Grimes #define VPTRSZ sizeof (struct vnode *) 1840df8bae1dSRodney W. Grimes #define VNODESZ sizeof (struct vnode) 18414b2af45fSPoul-Henning Kamp 18424b2af45fSPoul-Henning Kamp req->lock = 0; 18432d0b1d70SPoul-Henning Kamp if (!req->oldptr) /* Make an estimate */ 18444b2af45fSPoul-Henning Kamp return (SYSCTL_OUT(req, 0, 18454b2af45fSPoul-Henning Kamp (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ))); 1846df8bae1dSRodney W. Grimes 1847c35e283aSBruce Evans simple_lock(&mountlist_slock); 1848628641f8SDavid Greenman for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { 1849c35e283aSBruce Evans if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { 1850628641f8SDavid Greenman nmp = mp->mnt_list.cqe_next; 1851df8bae1dSRodney W. Grimes continue; 1852c35e283aSBruce Evans } 1853df8bae1dSRodney W. Grimes again: 1854c35e283aSBruce Evans simple_lock(&mntvnode_slock); 1855df8bae1dSRodney W. Grimes for (vp = mp->mnt_vnodelist.lh_first; 1856df8bae1dSRodney W. Grimes vp != NULL; 1857c35e283aSBruce Evans vp = nvp) { 1858df8bae1dSRodney W. Grimes /* 1859c35e283aSBruce Evans * Check that the vp is still associated with 1860c35e283aSBruce Evans * this filesystem. RACE: could have been 1861c35e283aSBruce Evans * recycled onto the same filesystem. 1862df8bae1dSRodney W. Grimes */ 1863df8bae1dSRodney W. Grimes if (vp->v_mount != mp) { 1864c35e283aSBruce Evans simple_unlock(&mntvnode_slock); 1865df8bae1dSRodney W. Grimes if (kinfo_vdebug) 1866df8bae1dSRodney W. Grimes printf("kinfo: vp changed\n"); 1867df8bae1dSRodney W. Grimes goto again; 1868df8bae1dSRodney W. Grimes } 1869c35e283aSBruce Evans nvp = vp->v_mntvnodes.le_next; 1870c35e283aSBruce Evans simple_unlock(&mntvnode_slock); 18714b2af45fSPoul-Henning Kamp if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) || 1872c35e283aSBruce Evans (error = SYSCTL_OUT(req, vp, VNODESZ))) 1873df8bae1dSRodney W. Grimes return (error); 1874c35e283aSBruce Evans simple_lock(&mntvnode_slock); 1875e887950aSBruce Evans } 1876c35e283aSBruce Evans simple_unlock(&mntvnode_slock); 1877c35e283aSBruce Evans simple_lock(&mountlist_slock); 1878c35e283aSBruce Evans nmp = mp->mnt_list.cqe_next; 1879996c772fSJohn Dyson vfs_unbusy(mp, p); 1880df8bae1dSRodney W. Grimes } 1881c35e283aSBruce Evans simple_unlock(&mountlist_slock); 1882df8bae1dSRodney W. Grimes 1883df8bae1dSRodney W. Grimes return (0); 1884df8bae1dSRodney W. Grimes } 1885df8bae1dSRodney W. Grimes 18862e58c0f8SDavid Greenman /* 18872e58c0f8SDavid Greenman * XXX 18882e58c0f8SDavid Greenman * Exporting the vnode list on large systems causes them to crash. 18892e58c0f8SDavid Greenman * Exporting the vnode list on medium systems causes sysctl to coredump. 18902e58c0f8SDavid Greenman */ 18912e58c0f8SDavid Greenman #if 0 189265d0bc13SPoul-Henning Kamp SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD, 189365d0bc13SPoul-Henning Kamp 0, 0, sysctl_vnode, "S,vnode", ""); 18942e58c0f8SDavid Greenman #endif 18954b2af45fSPoul-Henning Kamp 1896df8bae1dSRodney W. Grimes /* 1897df8bae1dSRodney W. Grimes * Check to see if a filesystem is mounted on a block device. 1898df8bae1dSRodney W. Grimes */ 1899df8bae1dSRodney W. Grimes int 1900df8bae1dSRodney W. Grimes vfs_mountedon(vp) 1901996c772fSJohn Dyson struct vnode *vp; 1902df8bae1dSRodney W. Grimes { 1903996c772fSJohn Dyson struct vnode *vq; 1904996c772fSJohn Dyson int error = 0; 1905df8bae1dSRodney W. Grimes 1906df8bae1dSRodney W. Grimes if (vp->v_specflags & SI_MOUNTEDON) 1907df8bae1dSRodney W. Grimes return (EBUSY); 1908df8bae1dSRodney W. Grimes if (vp->v_flag & VALIASED) { 1909996c772fSJohn Dyson simple_lock(&spechash_slock); 1910df8bae1dSRodney W. Grimes for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1911df8bae1dSRodney W. Grimes if (vq->v_rdev != vp->v_rdev || 1912df8bae1dSRodney W. Grimes vq->v_type != vp->v_type) 1913df8bae1dSRodney W. Grimes continue; 1914996c772fSJohn Dyson if (vq->v_specflags & SI_MOUNTEDON) { 1915996c772fSJohn Dyson error = EBUSY; 1916996c772fSJohn Dyson break; 1917df8bae1dSRodney W. Grimes } 1918df8bae1dSRodney W. Grimes } 1919996c772fSJohn Dyson simple_unlock(&spechash_slock); 1920996c772fSJohn Dyson } 1921996c772fSJohn Dyson return (error); 1922996c772fSJohn Dyson } 1923996c772fSJohn Dyson 1924996c772fSJohn Dyson /* 1925996c772fSJohn Dyson * Unmount all filesystems. The list is traversed in reverse order 19267c1557c4SBruce Evans * of mounting to avoid dependencies. 1927996c772fSJohn Dyson */ 1928996c772fSJohn Dyson void 1929996c772fSJohn Dyson vfs_unmountall() 1930996c772fSJohn Dyson { 19317c1557c4SBruce Evans struct mount *mp, *nmp; 19327c1557c4SBruce Evans struct proc *p = initproc; /* XXX XXX should this be proc0? */ 1933996c772fSJohn Dyson int error; 1934996c772fSJohn Dyson 19357c1557c4SBruce Evans /* 19367c1557c4SBruce Evans * Since this only runs when rebooting, it is not interlocked. 19377c1557c4SBruce Evans */ 1938996c772fSJohn Dyson for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) { 1939996c772fSJohn Dyson nmp = mp->mnt_list.cqe_prev; 19407c1557c4SBruce Evans error = dounmount(mp, MNT_FORCE, p); 1941996c772fSJohn Dyson if (error) { 19427c1557c4SBruce Evans printf("unmount of %s failed (", 19437c1557c4SBruce Evans mp->mnt_stat.f_mntonname); 1944996c772fSJohn Dyson if (error == EBUSY) 1945996c772fSJohn Dyson printf("BUSY)\n"); 1946996c772fSJohn Dyson else 1947996c772fSJohn Dyson printf("%d)\n", error); 1948996c772fSJohn Dyson } 1949996c772fSJohn Dyson } 1950df8bae1dSRodney W. Grimes } 1951df8bae1dSRodney W. Grimes 1952df8bae1dSRodney W. Grimes /* 1953df8bae1dSRodney W. Grimes * Build hash lists of net addresses and hang them off the mount point. 1954df8bae1dSRodney W. Grimes * Called by ufs_mount() to set up the lists of export addresses. 1955df8bae1dSRodney W. Grimes */ 1956df8bae1dSRodney W. Grimes static int 1957514ede09SBruce Evans vfs_hang_addrlist(mp, nep, argp) 1958514ede09SBruce Evans struct mount *mp; 1959514ede09SBruce Evans struct netexport *nep; 1960514ede09SBruce Evans struct export_args *argp; 1961df8bae1dSRodney W. Grimes { 1962df8bae1dSRodney W. Grimes register struct netcred *np; 1963df8bae1dSRodney W. Grimes register struct radix_node_head *rnh; 1964df8bae1dSRodney W. Grimes register int i; 1965df8bae1dSRodney W. Grimes struct radix_node *rn; 1966df8bae1dSRodney W. Grimes struct sockaddr *saddr, *smask = 0; 1967df8bae1dSRodney W. Grimes struct domain *dom; 1968df8bae1dSRodney W. Grimes int error; 1969df8bae1dSRodney W. Grimes 1970df8bae1dSRodney W. Grimes if (argp->ex_addrlen == 0) { 1971df8bae1dSRodney W. Grimes if (mp->mnt_flag & MNT_DEFEXPORTED) 1972df8bae1dSRodney W. Grimes return (EPERM); 1973df8bae1dSRodney W. Grimes np = &nep->ne_defexported; 1974df8bae1dSRodney W. Grimes np->netc_exflags = argp->ex_flags; 1975df8bae1dSRodney W. Grimes np->netc_anon = argp->ex_anon; 1976df8bae1dSRodney W. Grimes np->netc_anon.cr_ref = 1; 1977df8bae1dSRodney W. Grimes mp->mnt_flag |= MNT_DEFEXPORTED; 1978df8bae1dSRodney W. Grimes return (0); 1979df8bae1dSRodney W. Grimes } 1980df8bae1dSRodney W. Grimes i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1981df8bae1dSRodney W. Grimes np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK); 1982df8bae1dSRodney W. Grimes bzero((caddr_t) np, i); 1983df8bae1dSRodney W. Grimes saddr = (struct sockaddr *) (np + 1); 1984bb56ec4aSPoul-Henning Kamp if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 1985df8bae1dSRodney W. Grimes goto out; 1986df8bae1dSRodney W. Grimes if (saddr->sa_len > argp->ex_addrlen) 1987df8bae1dSRodney W. Grimes saddr->sa_len = argp->ex_addrlen; 1988df8bae1dSRodney W. Grimes if (argp->ex_masklen) { 1989df8bae1dSRodney W. Grimes smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen); 19905f61c81dSPeter Wemm error = copyin(argp->ex_mask, (caddr_t) smask, argp->ex_masklen); 1991df8bae1dSRodney W. Grimes if (error) 1992df8bae1dSRodney W. Grimes goto out; 1993df8bae1dSRodney W. Grimes if (smask->sa_len > argp->ex_masklen) 1994df8bae1dSRodney W. Grimes smask->sa_len = argp->ex_masklen; 1995df8bae1dSRodney W. Grimes } 1996df8bae1dSRodney W. Grimes i = saddr->sa_family; 1997df8bae1dSRodney W. Grimes if ((rnh = nep->ne_rtable[i]) == 0) { 1998df8bae1dSRodney W. Grimes /* 19990d94caffSDavid Greenman * Seems silly to initialize every AF when most are not used, 20000d94caffSDavid Greenman * do so on demand here 2001df8bae1dSRodney W. Grimes */ 2002df8bae1dSRodney W. Grimes for (dom = domains; dom; dom = dom->dom_next) 2003df8bae1dSRodney W. Grimes if (dom->dom_family == i && dom->dom_rtattach) { 2004df8bae1dSRodney W. Grimes dom->dom_rtattach((void **) &nep->ne_rtable[i], 2005df8bae1dSRodney W. Grimes dom->dom_rtoffset); 2006df8bae1dSRodney W. Grimes break; 2007df8bae1dSRodney W. Grimes } 2008df8bae1dSRodney W. Grimes if ((rnh = nep->ne_rtable[i]) == 0) { 2009df8bae1dSRodney W. Grimes error = ENOBUFS; 2010df8bae1dSRodney W. Grimes goto out; 2011df8bae1dSRodney W. Grimes } 2012df8bae1dSRodney W. Grimes } 2013df8bae1dSRodney W. Grimes rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh, 2014df8bae1dSRodney W. Grimes np->netc_rnodes); 2015df8bae1dSRodney W. Grimes if (rn == 0 || np != (struct netcred *) rn) { /* already exists */ 2016df8bae1dSRodney W. Grimes error = EPERM; 2017df8bae1dSRodney W. Grimes goto out; 2018df8bae1dSRodney W. Grimes } 2019df8bae1dSRodney W. Grimes np->netc_exflags = argp->ex_flags; 2020df8bae1dSRodney W. Grimes np->netc_anon = argp->ex_anon; 2021df8bae1dSRodney W. Grimes np->netc_anon.cr_ref = 1; 2022df8bae1dSRodney W. Grimes return (0); 2023df8bae1dSRodney W. Grimes out: 2024df8bae1dSRodney W. Grimes free(np, M_NETADDR); 2025df8bae1dSRodney W. Grimes return (error); 2026df8bae1dSRodney W. Grimes } 2027df8bae1dSRodney W. Grimes 2028df8bae1dSRodney W. Grimes /* ARGSUSED */ 2029df8bae1dSRodney W. Grimes static int 2030514ede09SBruce Evans vfs_free_netcred(rn, w) 2031514ede09SBruce Evans struct radix_node *rn; 2032514ede09SBruce Evans void *w; 2033df8bae1dSRodney W. Grimes { 2034df8bae1dSRodney W. Grimes register struct radix_node_head *rnh = (struct radix_node_head *) w; 2035df8bae1dSRodney W. Grimes 2036df8bae1dSRodney W. Grimes (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 2037df8bae1dSRodney W. Grimes free((caddr_t) rn, M_NETADDR); 2038df8bae1dSRodney W. Grimes return (0); 2039df8bae1dSRodney W. Grimes } 2040df8bae1dSRodney W. Grimes 2041df8bae1dSRodney W. Grimes /* 2042df8bae1dSRodney W. Grimes * Free the net address hash lists that are hanging off the mount points. 2043df8bae1dSRodney W. Grimes */ 2044df8bae1dSRodney W. Grimes static void 2045514ede09SBruce Evans vfs_free_addrlist(nep) 2046514ede09SBruce Evans struct netexport *nep; 2047df8bae1dSRodney W. Grimes { 2048df8bae1dSRodney W. Grimes register int i; 2049df8bae1dSRodney W. Grimes register struct radix_node_head *rnh; 2050df8bae1dSRodney W. Grimes 2051df8bae1dSRodney W. Grimes for (i = 0; i <= AF_MAX; i++) 2052bb56ec4aSPoul-Henning Kamp if ((rnh = nep->ne_rtable[i])) { 2053df8bae1dSRodney W. Grimes (*rnh->rnh_walktree) (rnh, vfs_free_netcred, 2054df8bae1dSRodney W. Grimes (caddr_t) rnh); 2055df8bae1dSRodney W. Grimes free((caddr_t) rnh, M_RTABLE); 2056df8bae1dSRodney W. Grimes nep->ne_rtable[i] = 0; 2057df8bae1dSRodney W. Grimes } 2058df8bae1dSRodney W. Grimes } 2059df8bae1dSRodney W. Grimes 2060df8bae1dSRodney W. Grimes int 2061df8bae1dSRodney W. Grimes vfs_export(mp, nep, argp) 2062df8bae1dSRodney W. Grimes struct mount *mp; 2063df8bae1dSRodney W. Grimes struct netexport *nep; 2064df8bae1dSRodney W. Grimes struct export_args *argp; 2065df8bae1dSRodney W. Grimes { 2066df8bae1dSRodney W. Grimes int error; 2067df8bae1dSRodney W. Grimes 2068df8bae1dSRodney W. Grimes if (argp->ex_flags & MNT_DELEXPORT) { 2069f6b4c285SDoug Rabson if (mp->mnt_flag & MNT_EXPUBLIC) { 2070f6b4c285SDoug Rabson vfs_setpublicfs(NULL, NULL, NULL); 2071f6b4c285SDoug Rabson mp->mnt_flag &= ~MNT_EXPUBLIC; 2072f6b4c285SDoug Rabson } 2073df8bae1dSRodney W. Grimes vfs_free_addrlist(nep); 2074df8bae1dSRodney W. Grimes mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 2075df8bae1dSRodney W. Grimes } 2076df8bae1dSRodney W. Grimes if (argp->ex_flags & MNT_EXPORTED) { 2077f6b4c285SDoug Rabson if (argp->ex_flags & MNT_EXPUBLIC) { 2078f6b4c285SDoug Rabson if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 2079f6b4c285SDoug Rabson return (error); 2080f6b4c285SDoug Rabson mp->mnt_flag |= MNT_EXPUBLIC; 2081f6b4c285SDoug Rabson } 2082bb56ec4aSPoul-Henning Kamp if ((error = vfs_hang_addrlist(mp, nep, argp))) 2083df8bae1dSRodney W. Grimes return (error); 2084df8bae1dSRodney W. Grimes mp->mnt_flag |= MNT_EXPORTED; 2085df8bae1dSRodney W. Grimes } 2086df8bae1dSRodney W. Grimes return (0); 2087df8bae1dSRodney W. Grimes } 2088df8bae1dSRodney W. Grimes 2089f6b4c285SDoug Rabson 2090f6b4c285SDoug Rabson /* 2091f6b4c285SDoug Rabson * Set the publicly exported filesystem (WebNFS). Currently, only 2092f6b4c285SDoug Rabson * one public filesystem is possible in the spec (RFC 2054 and 2055) 2093f6b4c285SDoug Rabson */ 2094f6b4c285SDoug Rabson int 2095f6b4c285SDoug Rabson vfs_setpublicfs(mp, nep, argp) 2096f6b4c285SDoug Rabson struct mount *mp; 2097f6b4c285SDoug Rabson struct netexport *nep; 2098f6b4c285SDoug Rabson struct export_args *argp; 2099f6b4c285SDoug Rabson { 2100f6b4c285SDoug Rabson int error; 2101f6b4c285SDoug Rabson struct vnode *rvp; 2102f6b4c285SDoug Rabson char *cp; 2103f6b4c285SDoug Rabson 2104f6b4c285SDoug Rabson /* 2105f6b4c285SDoug Rabson * mp == NULL -> invalidate the current info, the FS is 2106f6b4c285SDoug Rabson * no longer exported. May be called from either vfs_export 2107f6b4c285SDoug Rabson * or unmount, so check if it hasn't already been done. 2108f6b4c285SDoug Rabson */ 2109f6b4c285SDoug Rabson if (mp == NULL) { 2110f6b4c285SDoug Rabson if (nfs_pub.np_valid) { 2111f6b4c285SDoug Rabson nfs_pub.np_valid = 0; 2112f6b4c285SDoug Rabson if (nfs_pub.np_index != NULL) { 2113f6b4c285SDoug Rabson FREE(nfs_pub.np_index, M_TEMP); 2114f6b4c285SDoug Rabson nfs_pub.np_index = NULL; 2115f6b4c285SDoug Rabson } 2116f6b4c285SDoug Rabson } 2117f6b4c285SDoug Rabson return (0); 2118f6b4c285SDoug Rabson } 2119f6b4c285SDoug Rabson 2120f6b4c285SDoug Rabson /* 2121f6b4c285SDoug Rabson * Only one allowed at a time. 2122f6b4c285SDoug Rabson */ 2123f6b4c285SDoug Rabson if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 2124f6b4c285SDoug Rabson return (EBUSY); 2125f6b4c285SDoug Rabson 2126f6b4c285SDoug Rabson /* 2127f6b4c285SDoug Rabson * Get real filehandle for root of exported FS. 2128f6b4c285SDoug Rabson */ 2129f6b4c285SDoug Rabson bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle)); 2130f6b4c285SDoug Rabson nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 2131f6b4c285SDoug Rabson 2132f6b4c285SDoug Rabson if ((error = VFS_ROOT(mp, &rvp))) 2133f6b4c285SDoug Rabson return (error); 2134f6b4c285SDoug Rabson 2135f6b4c285SDoug Rabson if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 2136f6b4c285SDoug Rabson return (error); 2137f6b4c285SDoug Rabson 2138f6b4c285SDoug Rabson vput(rvp); 2139f6b4c285SDoug Rabson 2140f6b4c285SDoug Rabson /* 2141f6b4c285SDoug Rabson * If an indexfile was specified, pull it in. 2142f6b4c285SDoug Rabson */ 2143f6b4c285SDoug Rabson if (argp->ex_indexfile != NULL) { 2144f6b4c285SDoug Rabson MALLOC(nfs_pub.np_index, char *, MAXNAMLEN + 1, M_TEMP, 2145f6b4c285SDoug Rabson M_WAITOK); 2146f6b4c285SDoug Rabson error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 2147f6b4c285SDoug Rabson MAXNAMLEN, (size_t *)0); 2148f6b4c285SDoug Rabson if (!error) { 2149f6b4c285SDoug Rabson /* 2150f6b4c285SDoug Rabson * Check for illegal filenames. 2151f6b4c285SDoug Rabson */ 2152f6b4c285SDoug Rabson for (cp = nfs_pub.np_index; *cp; cp++) { 2153f6b4c285SDoug Rabson if (*cp == '/') { 2154f6b4c285SDoug Rabson error = EINVAL; 2155f6b4c285SDoug Rabson break; 2156f6b4c285SDoug Rabson } 2157f6b4c285SDoug Rabson } 2158f6b4c285SDoug Rabson } 2159f6b4c285SDoug Rabson if (error) { 2160f6b4c285SDoug Rabson FREE(nfs_pub.np_index, M_TEMP); 2161f6b4c285SDoug Rabson return (error); 2162f6b4c285SDoug Rabson } 2163f6b4c285SDoug Rabson } 2164f6b4c285SDoug Rabson 2165f6b4c285SDoug Rabson nfs_pub.np_mount = mp; 2166f6b4c285SDoug Rabson nfs_pub.np_valid = 1; 2167f6b4c285SDoug Rabson return (0); 2168f6b4c285SDoug Rabson } 2169f6b4c285SDoug Rabson 2170df8bae1dSRodney W. Grimes struct netcred * 2171df8bae1dSRodney W. Grimes vfs_export_lookup(mp, nep, nam) 2172df8bae1dSRodney W. Grimes register struct mount *mp; 2173df8bae1dSRodney W. Grimes struct netexport *nep; 217457bf258eSGarrett Wollman struct sockaddr *nam; 2175df8bae1dSRodney W. Grimes { 2176df8bae1dSRodney W. Grimes register struct netcred *np; 2177df8bae1dSRodney W. Grimes register struct radix_node_head *rnh; 2178df8bae1dSRodney W. Grimes struct sockaddr *saddr; 2179df8bae1dSRodney W. Grimes 2180df8bae1dSRodney W. Grimes np = NULL; 2181df8bae1dSRodney W. Grimes if (mp->mnt_flag & MNT_EXPORTED) { 2182df8bae1dSRodney W. Grimes /* 2183df8bae1dSRodney W. Grimes * Lookup in the export list first. 2184df8bae1dSRodney W. Grimes */ 2185df8bae1dSRodney W. Grimes if (nam != NULL) { 218657bf258eSGarrett Wollman saddr = nam; 2187df8bae1dSRodney W. Grimes rnh = nep->ne_rtable[saddr->sa_family]; 2188df8bae1dSRodney W. Grimes if (rnh != NULL) { 2189df8bae1dSRodney W. Grimes np = (struct netcred *) 2190df8bae1dSRodney W. Grimes (*rnh->rnh_matchaddr)((caddr_t)saddr, 2191df8bae1dSRodney W. Grimes rnh); 2192df8bae1dSRodney W. Grimes if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2193df8bae1dSRodney W. Grimes np = NULL; 2194df8bae1dSRodney W. Grimes } 2195df8bae1dSRodney W. Grimes } 2196df8bae1dSRodney W. Grimes /* 2197df8bae1dSRodney W. Grimes * If no address match, use the default if it exists. 2198df8bae1dSRodney W. Grimes */ 2199df8bae1dSRodney W. Grimes if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2200df8bae1dSRodney W. Grimes np = &nep->ne_defexported; 2201df8bae1dSRodney W. Grimes } 2202df8bae1dSRodney W. Grimes return (np); 2203df8bae1dSRodney W. Grimes } 220461f5d510SDavid Greenman 220561f5d510SDavid Greenman /* 220661f5d510SDavid Greenman * perform msync on all vnodes under a mount point 220761f5d510SDavid Greenman * the mount point must be locked. 220861f5d510SDavid Greenman */ 220961f5d510SDavid Greenman void 221061f5d510SDavid Greenman vfs_msync(struct mount *mp, int flags) { 2211a316d390SJohn Dyson struct vnode *vp, *nvp; 221261f5d510SDavid Greenman loop: 2213a316d390SJohn Dyson for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) { 221461f5d510SDavid Greenman 221561f5d510SDavid Greenman if (vp->v_mount != mp) 221661f5d510SDavid Greenman goto loop; 2217a316d390SJohn Dyson nvp = vp->v_mntvnodes.le_next; 221861f5d510SDavid Greenman if (VOP_ISLOCKED(vp) && (flags != MNT_WAIT)) 221961f5d510SDavid Greenman continue; 2220aa2cabb9SDavid Greenman if (vp->v_object && 22216476c0d2SJohn Dyson (vp->v_object->flags & OBJ_MIGHTBEDIRTY)) { 222224a1cce3SDavid Greenman vm_object_page_clean(vp->v_object, 0, 0, TRUE, TRUE); 222361f5d510SDavid Greenman } 222461f5d510SDavid Greenman } 222561f5d510SDavid Greenman } 22266476c0d2SJohn Dyson 22276476c0d2SJohn Dyson /* 22286476c0d2SJohn Dyson * Create the VM object needed for VMIO and mmap support. This 22296476c0d2SJohn Dyson * is done for all VREG files in the system. Some filesystems might 22306476c0d2SJohn Dyson * afford the additional metadata buffering capability of the 22316476c0d2SJohn Dyson * VMIO code by making the device node be VMIO mode also. 22326476c0d2SJohn Dyson */ 22336476c0d2SJohn Dyson int 22346476c0d2SJohn Dyson vfs_object_create(vp, p, cred, waslocked) 22356476c0d2SJohn Dyson struct vnode *vp; 22366476c0d2SJohn Dyson struct proc *p; 22376476c0d2SJohn Dyson struct ucred *cred; 22386476c0d2SJohn Dyson int waslocked; 22396476c0d2SJohn Dyson { 22406476c0d2SJohn Dyson struct vattr vat; 22416476c0d2SJohn Dyson vm_object_t object; 22426476c0d2SJohn Dyson int error = 0; 22436476c0d2SJohn Dyson 22446476c0d2SJohn Dyson retry: 22456476c0d2SJohn Dyson if ((object = vp->v_object) == NULL) { 22466476c0d2SJohn Dyson if (vp->v_type == VREG) { 22476476c0d2SJohn Dyson if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0) 22486476c0d2SJohn Dyson goto retn; 22496476c0d2SJohn Dyson (void) vnode_pager_alloc(vp, 22506476c0d2SJohn Dyson OFF_TO_IDX(round_page(vat.va_size)), 0, 0); 22516476c0d2SJohn Dyson } else { 22526476c0d2SJohn Dyson /* 22536476c0d2SJohn Dyson * This simply allocates the biggest object possible 22546476c0d2SJohn Dyson * for a VBLK vnode. This should be fixed, but doesn't 22556476c0d2SJohn Dyson * cause any problems (yet). 22566476c0d2SJohn Dyson */ 22576476c0d2SJohn Dyson (void) vnode_pager_alloc(vp, INT_MAX, 0, 0); 22586476c0d2SJohn Dyson } 22596476c0d2SJohn Dyson vp->v_object->flags |= OBJ_VFS_REF; 22606476c0d2SJohn Dyson } else { 22616476c0d2SJohn Dyson if (object->flags & OBJ_DEAD) { 22626476c0d2SJohn Dyson if (waslocked) 2263996c772fSJohn Dyson VOP_UNLOCK(vp, 0, p); 22646476c0d2SJohn Dyson tsleep(object, PVM, "vodead", 0); 22656476c0d2SJohn Dyson if (waslocked) 2266996c772fSJohn Dyson vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 22676476c0d2SJohn Dyson goto retry; 22686476c0d2SJohn Dyson } 22696476c0d2SJohn Dyson if ((object->flags & OBJ_VFS_REF) == 0) { 22706476c0d2SJohn Dyson object->flags |= OBJ_VFS_REF; 22716476c0d2SJohn Dyson vm_object_reference(object); 22726476c0d2SJohn Dyson } 22736476c0d2SJohn Dyson } 22746476c0d2SJohn Dyson if (vp->v_object) 22756476c0d2SJohn Dyson vp->v_flag |= VVMIO; 22766476c0d2SJohn Dyson 22776476c0d2SJohn Dyson retn: 22786476c0d2SJohn Dyson return error; 22796476c0d2SJohn Dyson } 2280b15a966eSPoul-Henning Kamp 2281b15a966eSPoul-Henning Kamp void 2282a051452aSPoul-Henning Kamp vfree(vp) 2283b15a966eSPoul-Henning Kamp struct vnode *vp; 2284b15a966eSPoul-Henning Kamp { 2285a051452aSPoul-Henning Kamp simple_lock(&vnode_free_list_slock); 2286a051452aSPoul-Henning Kamp if (vp->v_flag & VAGE) { 2287a051452aSPoul-Henning Kamp TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2288a051452aSPoul-Henning Kamp } else { 2289b15a966eSPoul-Henning Kamp TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 22908670684aSPoul-Henning Kamp } 2291a051452aSPoul-Henning Kamp freevnodes++; 2292b15a966eSPoul-Henning Kamp simple_unlock(&vnode_free_list_slock); 2293a051452aSPoul-Henning Kamp vp->v_flag &= ~VAGE; 2294a051452aSPoul-Henning Kamp vp->v_flag |= VFREE; 2295b15a966eSPoul-Henning Kamp } 2296a051452aSPoul-Henning Kamp 2297a051452aSPoul-Henning Kamp void 2298a051452aSPoul-Henning Kamp vbusy(vp) 2299a051452aSPoul-Henning Kamp struct vnode *vp; 2300a051452aSPoul-Henning Kamp { 2301a051452aSPoul-Henning Kamp simple_lock(&vnode_free_list_slock); 2302a051452aSPoul-Henning Kamp TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2303a051452aSPoul-Henning Kamp freevnodes--; 2304a051452aSPoul-Henning Kamp simple_unlock(&vnode_free_list_slock); 2305a051452aSPoul-Henning Kamp vp->v_flag &= ~VFREE; 2306b15a966eSPoul-Henning Kamp } 2307