1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 39 * $Id: vfs_subr.c,v 1.181 1999/01/08 17:31:17 eivind Exp $ 40 */ 41 42 /* 43 * External virtual filesystem routines 44 */ 45 #include "opt_ddb.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/conf.h> 50 #include <sys/fcntl.h> 51 #include <sys/kernel.h> 52 #include <sys/proc.h> 53 #include <sys/malloc.h> 54 #include <sys/mount.h> 55 #include <sys/socket.h> 56 #include <sys/vnode.h> 57 #include <sys/stat.h> 58 #include <sys/buf.h> 59 #include <sys/domain.h> 60 #include <sys/dirent.h> 61 #include <sys/vmmeter.h> 62 63 #include <machine/limits.h> 64 65 #include <vm/vm.h> 66 #include <vm/vm_object.h> 67 #include <vm/vm_extern.h> 68 #include <vm/pmap.h> 69 #include <vm/vm_map.h> 70 #include <vm/vm_pager.h> 71 #include <vm/vnode_pager.h> 72 #include <vm/vm_zone.h> 73 #include <sys/sysctl.h> 74 75 #include <miscfs/specfs/specdev.h> 76 77 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 78 79 static void insmntque __P((struct vnode *vp, struct mount *mp)); 80 static void vclean __P((struct vnode *vp, int flags, struct proc *p)); 81 static void vfree __P((struct vnode *)); 82 static void vgonel __P((struct vnode *vp, struct proc *p)); 83 static unsigned long numvnodes; 84 SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, ""); 85 86 enum vtype iftovt_tab[16] = { 87 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 88 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 89 }; 90 int vttoif_tab[9] = { 91 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 92 S_IFSOCK, S_IFIFO, S_IFMT, 93 }; 94 95 static TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */ 96 struct tobefreelist vnode_tobefree_list; /* vnode free list */ 97 98 static u_long wantfreevnodes = 25; 99 SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, ""); 100 static u_long freevnodes = 0; 101 SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, ""); 102 103 int vfs_ioopt = 0; 104 #ifdef ENABLE_VFS_IOOPT 105 SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, ""); 106 #endif 107 108 struct mntlist mountlist; /* mounted filesystem list */ 109 struct simplelock mountlist_slock; 110 struct simplelock mntvnode_slock; 111 int nfs_mount_type = -1; 112 #ifndef NULL_SIMPLELOCKS 113 static struct simplelock mntid_slock; 114 static struct simplelock vnode_free_list_slock; 115 static struct simplelock spechash_slock; 116 #endif 117 struct nfs_public nfs_pub; /* publicly exported FS */ 118 static vm_zone_t vnode_zone; 119 120 /* 121 * The workitem queue. 122 */ 123 #define SYNCER_MAXDELAY 32 124 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 125 time_t syncdelay = 30; 126 int rushjob; /* number of slots to run ASAP */ 127 128 static int syncer_delayno = 0; 129 static long syncer_mask; 130 LIST_HEAD(synclist, vnode); 131 static struct synclist *syncer_workitem_pending; 132 133 int desiredvnodes; 134 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, &desiredvnodes, 0, ""); 135 136 static void vfs_free_addrlist __P((struct netexport *nep)); 137 static int vfs_free_netcred __P((struct radix_node *rn, void *w)); 138 static int vfs_hang_addrlist __P((struct mount *mp, struct netexport *nep, 139 struct export_args *argp)); 140 141 /* 142 * Initialize the vnode management data structures. 143 */ 144 void 145 vntblinit() 146 { 147 148 desiredvnodes = maxproc + cnt.v_page_count / 4; 149 simple_lock_init(&mntvnode_slock); 150 simple_lock_init(&mntid_slock); 151 simple_lock_init(&spechash_slock); 152 TAILQ_INIT(&vnode_free_list); 153 TAILQ_INIT(&vnode_tobefree_list); 154 simple_lock_init(&vnode_free_list_slock); 155 CIRCLEQ_INIT(&mountlist); 156 vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5); 157 /* 158 * Initialize the filesystem syncer. 159 */ 160 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 161 &syncer_mask); 162 syncer_maxdelay = syncer_mask + 1; 163 } 164 165 /* 166 * Mark a mount point as busy. Used to synchronize access and to delay 167 * unmounting. Interlock is not released on failure. 168 */ 169 int 170 vfs_busy(mp, flags, interlkp, p) 171 struct mount *mp; 172 int flags; 173 struct simplelock *interlkp; 174 struct proc *p; 175 { 176 int lkflags; 177 178 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 179 if (flags & LK_NOWAIT) 180 return (ENOENT); 181 mp->mnt_kern_flag |= MNTK_MWAIT; 182 if (interlkp) { 183 simple_unlock(interlkp); 184 } 185 /* 186 * Since all busy locks are shared except the exclusive 187 * lock granted when unmounting, the only place that a 188 * wakeup needs to be done is at the release of the 189 * exclusive lock at the end of dounmount. 190 */ 191 tsleep((caddr_t)mp, PVFS, "vfs_busy", 0); 192 if (interlkp) { 193 simple_lock(interlkp); 194 } 195 return (ENOENT); 196 } 197 lkflags = LK_SHARED | LK_NOPAUSE; 198 if (interlkp) 199 lkflags |= LK_INTERLOCK; 200 if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p)) 201 panic("vfs_busy: unexpected lock failure"); 202 return (0); 203 } 204 205 /* 206 * Free a busy filesystem. 207 */ 208 void 209 vfs_unbusy(mp, p) 210 struct mount *mp; 211 struct proc *p; 212 { 213 214 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, p); 215 } 216 217 /* 218 * Lookup a filesystem type, and if found allocate and initialize 219 * a mount structure for it. 220 * 221 * Devname is usually updated by mount(8) after booting. 222 */ 223 int 224 vfs_rootmountalloc(fstypename, devname, mpp) 225 char *fstypename; 226 char *devname; 227 struct mount **mpp; 228 { 229 struct proc *p = curproc; /* XXX */ 230 struct vfsconf *vfsp; 231 struct mount *mp; 232 233 if (fstypename == NULL) 234 return (ENODEV); 235 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 236 if (!strcmp(vfsp->vfc_name, fstypename)) 237 break; 238 if (vfsp == NULL) 239 return (ENODEV); 240 mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); 241 bzero((char *)mp, (u_long)sizeof(struct mount)); 242 lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE); 243 (void)vfs_busy(mp, LK_NOWAIT, 0, p); 244 LIST_INIT(&mp->mnt_vnodelist); 245 mp->mnt_vfc = vfsp; 246 mp->mnt_op = vfsp->vfc_vfsops; 247 mp->mnt_flag = MNT_RDONLY; 248 mp->mnt_vnodecovered = NULLVP; 249 vfsp->vfc_refcount++; 250 mp->mnt_stat.f_type = vfsp->vfc_typenum; 251 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 252 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 253 mp->mnt_stat.f_mntonname[0] = '/'; 254 mp->mnt_stat.f_mntonname[1] = 0; 255 (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 256 *mpp = mp; 257 return (0); 258 } 259 260 /* 261 * Find an appropriate filesystem to use for the root. If a filesystem 262 * has not been preselected, walk through the list of known filesystems 263 * trying those that have mountroot routines, and try them until one 264 * works or we have tried them all. 265 */ 266 #ifdef notdef /* XXX JH */ 267 int 268 lite2_vfs_mountroot() 269 { 270 struct vfsconf *vfsp; 271 extern int (*lite2_mountroot) __P((void)); 272 int error; 273 274 if (lite2_mountroot != NULL) 275 return ((*lite2_mountroot)()); 276 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 277 if (vfsp->vfc_mountroot == NULL) 278 continue; 279 if ((error = (*vfsp->vfc_mountroot)()) == 0) 280 return (0); 281 printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error); 282 } 283 return (ENODEV); 284 } 285 #endif 286 287 /* 288 * Lookup a mount point by filesystem identifier. 289 */ 290 struct mount * 291 vfs_getvfs(fsid) 292 fsid_t *fsid; 293 { 294 register struct mount *mp; 295 296 simple_lock(&mountlist_slock); 297 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; 298 mp = mp->mnt_list.cqe_next) { 299 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 300 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 301 simple_unlock(&mountlist_slock); 302 return (mp); 303 } 304 } 305 simple_unlock(&mountlist_slock); 306 return ((struct mount *) 0); 307 } 308 309 /* 310 * Get a new unique fsid 311 */ 312 void 313 vfs_getnewfsid(mp) 314 struct mount *mp; 315 { 316 static u_short xxxfs_mntid; 317 318 fsid_t tfsid; 319 int mtype; 320 321 simple_lock(&mntid_slock); 322 mtype = mp->mnt_vfc->vfc_typenum; 323 mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0); 324 mp->mnt_stat.f_fsid.val[1] = mtype; 325 if (xxxfs_mntid == 0) 326 ++xxxfs_mntid; 327 tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid); 328 tfsid.val[1] = mtype; 329 if (mountlist.cqh_first != (void *)&mountlist) { 330 while (vfs_getvfs(&tfsid)) { 331 tfsid.val[0]++; 332 xxxfs_mntid++; 333 } 334 } 335 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 336 simple_unlock(&mntid_slock); 337 } 338 339 /* 340 * Set vnode attributes to VNOVAL 341 */ 342 void 343 vattr_null(vap) 344 register struct vattr *vap; 345 { 346 347 vap->va_type = VNON; 348 vap->va_size = VNOVAL; 349 vap->va_bytes = VNOVAL; 350 vap->va_mode = VNOVAL; 351 vap->va_nlink = VNOVAL; 352 vap->va_uid = VNOVAL; 353 vap->va_gid = VNOVAL; 354 vap->va_fsid = VNOVAL; 355 vap->va_fileid = VNOVAL; 356 vap->va_blocksize = VNOVAL; 357 vap->va_rdev = VNOVAL; 358 vap->va_atime.tv_sec = VNOVAL; 359 vap->va_atime.tv_nsec = VNOVAL; 360 vap->va_mtime.tv_sec = VNOVAL; 361 vap->va_mtime.tv_nsec = VNOVAL; 362 vap->va_ctime.tv_sec = VNOVAL; 363 vap->va_ctime.tv_nsec = VNOVAL; 364 vap->va_flags = VNOVAL; 365 vap->va_gen = VNOVAL; 366 vap->va_vaflags = 0; 367 } 368 369 /* 370 * Routines having to do with the management of the vnode table. 371 */ 372 extern vop_t **dead_vnodeop_p; 373 374 /* 375 * Return the next vnode from the free list. 376 */ 377 int 378 getnewvnode(tag, mp, vops, vpp) 379 enum vtagtype tag; 380 struct mount *mp; 381 vop_t **vops; 382 struct vnode **vpp; 383 { 384 int s; 385 struct proc *p = curproc; /* XXX */ 386 struct vnode *vp, *tvp, *nvp; 387 vm_object_t object; 388 TAILQ_HEAD(freelst, vnode) vnode_tmp_list; 389 390 /* 391 * We take the least recently used vnode from the freelist 392 * if we can get it and it has no cached pages, and no 393 * namecache entries are relative to it. 394 * Otherwise we allocate a new vnode 395 */ 396 397 s = splbio(); 398 simple_lock(&vnode_free_list_slock); 399 TAILQ_INIT(&vnode_tmp_list); 400 401 for (vp = TAILQ_FIRST(&vnode_tobefree_list); vp; vp = nvp) { 402 nvp = TAILQ_NEXT(vp, v_freelist); 403 TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist); 404 if (vp->v_flag & VAGE) { 405 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 406 } else { 407 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 408 } 409 vp->v_flag &= ~(VTBFREE|VAGE); 410 vp->v_flag |= VFREE; 411 if (vp->v_usecount) 412 panic("tobe free vnode isn't"); 413 freevnodes++; 414 } 415 416 if (wantfreevnodes && freevnodes < wantfreevnodes) { 417 vp = NULL; 418 } else if (!wantfreevnodes && freevnodes <= desiredvnodes) { 419 /* 420 * XXX: this is only here to be backwards compatible 421 */ 422 vp = NULL; 423 } else { 424 for (vp = TAILQ_FIRST(&vnode_free_list); vp; vp = nvp) { 425 nvp = TAILQ_NEXT(vp, v_freelist); 426 if (!simple_lock_try(&vp->v_interlock)) 427 continue; 428 if (vp->v_usecount) 429 panic("free vnode isn't"); 430 431 object = vp->v_object; 432 if (object && (object->resident_page_count || object->ref_count)) { 433 printf("object inconsistant state: RPC: %d, RC: %d\n", 434 object->resident_page_count, object->ref_count); 435 /* Don't recycle if it's caching some pages */ 436 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 437 TAILQ_INSERT_TAIL(&vnode_tmp_list, vp, v_freelist); 438 continue; 439 } else if (LIST_FIRST(&vp->v_cache_src)) { 440 /* Don't recycle if active in the namecache */ 441 simple_unlock(&vp->v_interlock); 442 continue; 443 } else { 444 break; 445 } 446 } 447 } 448 449 for (tvp = TAILQ_FIRST(&vnode_tmp_list); tvp; tvp = nvp) { 450 nvp = TAILQ_NEXT(tvp, v_freelist); 451 TAILQ_REMOVE(&vnode_tmp_list, tvp, v_freelist); 452 TAILQ_INSERT_TAIL(&vnode_free_list, tvp, v_freelist); 453 simple_unlock(&tvp->v_interlock); 454 } 455 456 if (vp) { 457 vp->v_flag |= VDOOMED; 458 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 459 freevnodes--; 460 simple_unlock(&vnode_free_list_slock); 461 cache_purge(vp); 462 vp->v_lease = NULL; 463 if (vp->v_type != VBAD) { 464 vgonel(vp, p); 465 } else { 466 simple_unlock(&vp->v_interlock); 467 } 468 469 #ifdef INVARIANTS 470 { 471 int s; 472 473 if (vp->v_data) 474 panic("cleaned vnode isn't"); 475 s = splbio(); 476 if (vp->v_numoutput) 477 panic("Clean vnode has pending I/O's"); 478 splx(s); 479 } 480 #endif 481 vp->v_flag = 0; 482 vp->v_lastr = 0; 483 vp->v_lastw = 0; 484 vp->v_lasta = 0; 485 vp->v_cstart = 0; 486 vp->v_clen = 0; 487 vp->v_socket = 0; 488 vp->v_writecount = 0; /* XXX */ 489 vp->v_maxio = 0; 490 } else { 491 simple_unlock(&vnode_free_list_slock); 492 vp = (struct vnode *) zalloc(vnode_zone); 493 bzero((char *) vp, sizeof *vp); 494 simple_lock_init(&vp->v_interlock); 495 vp->v_dd = vp; 496 cache_purge(vp); 497 LIST_INIT(&vp->v_cache_src); 498 TAILQ_INIT(&vp->v_cache_dst); 499 numvnodes++; 500 } 501 502 TAILQ_INIT(&vp->v_cleanblkhd); 503 TAILQ_INIT(&vp->v_dirtyblkhd); 504 vp->v_type = VNON; 505 vp->v_tag = tag; 506 vp->v_op = vops; 507 insmntque(vp, mp); 508 *vpp = vp; 509 vp->v_usecount = 1; 510 vp->v_data = 0; 511 splx(s); 512 513 vfs_object_create(vp, p, p->p_ucred); 514 return (0); 515 } 516 517 /* 518 * Move a vnode from one mount queue to another. 519 */ 520 static void 521 insmntque(vp, mp) 522 register struct vnode *vp; 523 register struct mount *mp; 524 { 525 526 simple_lock(&mntvnode_slock); 527 /* 528 * Delete from old mount point vnode list, if on one. 529 */ 530 if (vp->v_mount != NULL) 531 LIST_REMOVE(vp, v_mntvnodes); 532 /* 533 * Insert into list of vnodes for the new mount point, if available. 534 */ 535 if ((vp->v_mount = mp) == NULL) { 536 simple_unlock(&mntvnode_slock); 537 return; 538 } 539 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 540 simple_unlock(&mntvnode_slock); 541 } 542 543 /* 544 * Update outstanding I/O count and do wakeup if requested. 545 */ 546 void 547 vwakeup(bp) 548 register struct buf *bp; 549 { 550 register struct vnode *vp; 551 552 bp->b_flags &= ~B_WRITEINPROG; 553 if ((vp = bp->b_vp)) { 554 vp->v_numoutput--; 555 if (vp->v_numoutput < 0) 556 panic("vwakeup: neg numoutput"); 557 if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) { 558 vp->v_flag &= ~VBWAIT; 559 wakeup((caddr_t) &vp->v_numoutput); 560 } 561 } 562 } 563 564 /* 565 * Flush out and invalidate all buffers associated with a vnode. 566 * Called with the underlying object locked. 567 */ 568 int 569 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo) 570 register struct vnode *vp; 571 int flags; 572 struct ucred *cred; 573 struct proc *p; 574 int slpflag, slptimeo; 575 { 576 register struct buf *bp; 577 struct buf *nbp, *blist; 578 int s, error; 579 vm_object_t object; 580 581 if (flags & V_SAVE) { 582 s = splbio(); 583 while (vp->v_numoutput) { 584 vp->v_flag |= VBWAIT; 585 error = tsleep((caddr_t)&vp->v_numoutput, 586 slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo); 587 if (error) { 588 splx(s); 589 return (error); 590 } 591 } 592 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 593 splx(s); 594 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0) 595 return (error); 596 s = splbio(); 597 if (vp->v_numoutput > 0 || 598 !TAILQ_EMPTY(&vp->v_dirtyblkhd)) 599 panic("vinvalbuf: dirty bufs"); 600 } 601 splx(s); 602 } 603 s = splbio(); 604 for (;;) { 605 blist = TAILQ_FIRST(&vp->v_cleanblkhd); 606 if (!blist) 607 blist = TAILQ_FIRST(&vp->v_dirtyblkhd); 608 if (!blist) 609 break; 610 611 for (bp = blist; bp; bp = nbp) { 612 nbp = TAILQ_NEXT(bp, b_vnbufs); 613 if (bp->b_flags & B_BUSY) { 614 bp->b_flags |= B_WANTED; 615 error = tsleep((caddr_t) bp, 616 slpflag | (PRIBIO + 4), "vinvalbuf", 617 slptimeo); 618 if (error) { 619 splx(s); 620 return (error); 621 } 622 break; 623 } 624 /* 625 * XXX Since there are no node locks for NFS, I 626 * believe there is a slight chance that a delayed 627 * write will occur while sleeping just above, so 628 * check for it. Note that vfs_bio_awrite expects 629 * buffers to reside on a queue, while VOP_BWRITE and 630 * brelse do not. 631 */ 632 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 633 (flags & V_SAVE)) { 634 635 if (bp->b_vp == vp) { 636 if (bp->b_flags & B_CLUSTEROK) { 637 vfs_bio_awrite(bp); 638 } else { 639 bremfree(bp); 640 bp->b_flags |= (B_BUSY | B_ASYNC); 641 VOP_BWRITE(bp); 642 } 643 } else { 644 bremfree(bp); 645 bp->b_flags |= B_BUSY; 646 (void) VOP_BWRITE(bp); 647 } 648 break; 649 } 650 bremfree(bp); 651 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF | B_BUSY); 652 bp->b_flags &= ~B_ASYNC; 653 brelse(bp); 654 } 655 } 656 657 while (vp->v_numoutput > 0) { 658 vp->v_flag |= VBWAIT; 659 tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0); 660 } 661 662 splx(s); 663 664 /* 665 * Destroy the copy in the VM cache, too. 666 */ 667 simple_lock(&vp->v_interlock); 668 object = vp->v_object; 669 if (object != NULL) { 670 vm_object_page_remove(object, 0, 0, 671 (flags & V_SAVE) ? TRUE : FALSE); 672 } 673 simple_unlock(&vp->v_interlock); 674 675 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd)) 676 panic("vinvalbuf: flush failed"); 677 return (0); 678 } 679 680 /* 681 * Truncate a file's buffer and pages to a specified length. This 682 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 683 * sync activity. 684 */ 685 int 686 vtruncbuf(vp, cred, p, length, blksize) 687 register struct vnode *vp; 688 struct ucred *cred; 689 struct proc *p; 690 off_t length; 691 int blksize; 692 { 693 register struct buf *bp; 694 struct buf *nbp; 695 int s, anyfreed; 696 int trunclbn; 697 698 /* 699 * Round up to the *next* lbn. 700 */ 701 trunclbn = (length + blksize - 1) / blksize; 702 703 s = splbio(); 704 restart: 705 anyfreed = 1; 706 for (;anyfreed;) { 707 anyfreed = 0; 708 for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 709 nbp = TAILQ_NEXT(bp, b_vnbufs); 710 if (bp->b_lblkno >= trunclbn) { 711 if (bp->b_flags & B_BUSY) { 712 bp->b_flags |= B_WANTED; 713 tsleep(bp, PRIBIO + 4, "vtrb1", 0); 714 goto restart; 715 } else { 716 bremfree(bp); 717 bp->b_flags |= (B_BUSY | B_INVAL | B_RELBUF); 718 bp->b_flags &= ~B_ASYNC; 719 brelse(bp); 720 anyfreed = 1; 721 } 722 if (nbp && (((nbp->b_xflags & B_VNCLEAN) == 0)|| 723 (nbp->b_vp != vp) || 724 (nbp->b_flags & B_DELWRI))) { 725 goto restart; 726 } 727 } 728 } 729 730 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 731 nbp = TAILQ_NEXT(bp, b_vnbufs); 732 if (bp->b_lblkno >= trunclbn) { 733 if (bp->b_flags & B_BUSY) { 734 bp->b_flags |= B_WANTED; 735 tsleep(bp, PRIBIO + 4, "vtrb2", 0); 736 goto restart; 737 } else { 738 bremfree(bp); 739 bp->b_flags |= (B_BUSY | B_INVAL | B_RELBUF); 740 bp->b_flags &= ~B_ASYNC; 741 brelse(bp); 742 anyfreed = 1; 743 } 744 if (nbp && (((nbp->b_xflags & B_VNDIRTY) == 0)|| 745 (nbp->b_vp != vp) || 746 (nbp->b_flags & B_DELWRI) == 0)) { 747 goto restart; 748 } 749 } 750 } 751 } 752 753 if (length > 0) { 754 restartsync: 755 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 756 nbp = TAILQ_NEXT(bp, b_vnbufs); 757 if ((bp->b_flags & B_DELWRI) && (bp->b_lblkno < 0)) { 758 if (bp->b_flags & B_BUSY) { 759 bp->b_flags |= B_WANTED; 760 tsleep(bp, PRIBIO, "vtrb3", 0); 761 } else { 762 bremfree(bp); 763 bp->b_flags |= B_BUSY; 764 if (bp->b_vp == vp) { 765 bp->b_flags |= B_ASYNC; 766 } else { 767 bp->b_flags &= ~B_ASYNC; 768 } 769 VOP_BWRITE(bp); 770 } 771 goto restartsync; 772 } 773 774 } 775 } 776 777 while (vp->v_numoutput > 0) { 778 vp->v_flag |= VBWAIT; 779 tsleep(&vp->v_numoutput, PVM, "vbtrunc", 0); 780 } 781 782 splx(s); 783 784 vnode_pager_setsize(vp, length); 785 786 return (0); 787 } 788 789 /* 790 * Associate a buffer with a vnode. 791 */ 792 void 793 bgetvp(vp, bp) 794 register struct vnode *vp; 795 register struct buf *bp; 796 { 797 int s; 798 799 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 800 801 vhold(vp); 802 bp->b_vp = vp; 803 if (vp->v_type == VBLK || vp->v_type == VCHR) 804 bp->b_dev = vp->v_rdev; 805 else 806 bp->b_dev = NODEV; 807 /* 808 * Insert onto list for new vnode. 809 */ 810 s = splbio(); 811 bp->b_xflags |= B_VNCLEAN; 812 bp->b_xflags &= ~B_VNDIRTY; 813 TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs); 814 splx(s); 815 } 816 817 /* 818 * Disassociate a buffer from a vnode. 819 */ 820 void 821 brelvp(bp) 822 register struct buf *bp; 823 { 824 struct vnode *vp; 825 struct buflists *listheadp; 826 int s; 827 828 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 829 830 /* 831 * Delete from old vnode list, if on one. 832 */ 833 vp = bp->b_vp; 834 s = splbio(); 835 if (bp->b_xflags & (B_VNDIRTY|B_VNCLEAN)) { 836 if (bp->b_xflags & B_VNDIRTY) 837 listheadp = &vp->v_dirtyblkhd; 838 else 839 listheadp = &vp->v_cleanblkhd; 840 TAILQ_REMOVE(listheadp, bp, b_vnbufs); 841 bp->b_xflags &= ~(B_VNDIRTY|B_VNCLEAN); 842 } 843 if ((vp->v_flag & VONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 844 vp->v_flag &= ~VONWORKLST; 845 LIST_REMOVE(vp, v_synclist); 846 } 847 splx(s); 848 bp->b_vp = (struct vnode *) 0; 849 vdrop(vp); 850 } 851 852 /* 853 * The workitem queue. 854 * 855 * It is useful to delay writes of file data and filesystem metadata 856 * for tens of seconds so that quickly created and deleted files need 857 * not waste disk bandwidth being created and removed. To realize this, 858 * we append vnodes to a "workitem" queue. When running with a soft 859 * updates implementation, most pending metadata dependencies should 860 * not wait for more than a few seconds. Thus, mounted on block devices 861 * are delayed only about a half the time that file data is delayed. 862 * Similarly, directory updates are more critical, so are only delayed 863 * about a third the time that file data is delayed. Thus, there are 864 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 865 * one each second (driven off the filesystem syner process). The 866 * syncer_delayno variable indicates the next queue that is to be processed. 867 * Items that need to be processed soon are placed in this queue: 868 * 869 * syncer_workitem_pending[syncer_delayno] 870 * 871 * A delay of fifteen seconds is done by placing the request fifteen 872 * entries later in the queue: 873 * 874 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 875 * 876 */ 877 878 /* 879 * Add an item to the syncer work queue. 880 */ 881 void 882 vn_syncer_add_to_worklist(vp, delay) 883 struct vnode *vp; 884 int delay; 885 { 886 int s, slot; 887 888 s = splbio(); 889 890 if (vp->v_flag & VONWORKLST) { 891 LIST_REMOVE(vp, v_synclist); 892 } 893 894 if (delay > syncer_maxdelay - 2) 895 delay = syncer_maxdelay - 2; 896 slot = (syncer_delayno + delay) & syncer_mask; 897 898 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist); 899 vp->v_flag |= VONWORKLST; 900 splx(s); 901 } 902 903 static void sched_sync __P((void)); 904 static struct proc *updateproc; 905 static struct kproc_desc up_kp = { 906 "syncer", 907 sched_sync, 908 &updateproc 909 }; 910 SYSINIT_KT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 911 912 /* 913 * System filesystem synchronizer daemon. 914 */ 915 void 916 sched_sync(void) 917 { 918 struct synclist *slp; 919 struct vnode *vp; 920 long starttime; 921 int s; 922 struct proc *p = updateproc; 923 924 for (;;) { 925 starttime = time_second; 926 927 /* 928 * Push files whose dirty time has expired. 929 */ 930 s = splbio(); 931 slp = &syncer_workitem_pending[syncer_delayno]; 932 syncer_delayno += 1; 933 if (syncer_delayno == syncer_maxdelay) 934 syncer_delayno = 0; 935 splx(s); 936 937 while ((vp = LIST_FIRST(slp)) != NULL) { 938 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 939 (void) VOP_FSYNC(vp, p->p_ucred, MNT_LAZY, p); 940 VOP_UNLOCK(vp, 0, p); 941 if (LIST_FIRST(slp) == vp) { 942 if (TAILQ_EMPTY(&vp->v_dirtyblkhd) && 943 vp->v_type != VBLK) 944 panic("sched_sync: fsync failed"); 945 /* 946 * Move ourselves to the back of the sync list. 947 */ 948 LIST_REMOVE(vp, v_synclist); 949 vn_syncer_add_to_worklist(vp, syncdelay); 950 } 951 } 952 953 /* 954 * Do soft update processing. 955 */ 956 if (bioops.io_sync) 957 (*bioops.io_sync)(NULL); 958 959 /* 960 * The variable rushjob allows the kernel to speed up the 961 * processing of the filesystem syncer process. A rushjob 962 * value of N tells the filesystem syncer to process the next 963 * N seconds worth of work on its queue ASAP. Currently rushjob 964 * is used by the soft update code to speed up the filesystem 965 * syncer process when the incore state is getting so far 966 * ahead of the disk that the kernel memory pool is being 967 * threatened with exhaustion. 968 */ 969 if (rushjob > 0) { 970 rushjob -= 1; 971 continue; 972 } 973 /* 974 * If it has taken us less than a second to process the 975 * current work, then wait. Otherwise start right over 976 * again. We can still lose time if any single round 977 * takes more than two seconds, but it does not really 978 * matter as we are just trying to generally pace the 979 * filesystem activity. 980 */ 981 if (time_second == starttime) 982 tsleep(&lbolt, PPAUSE, "syncer", 0); 983 } 984 } 985 986 /* 987 * Associate a p-buffer with a vnode. 988 */ 989 void 990 pbgetvp(vp, bp) 991 register struct vnode *vp; 992 register struct buf *bp; 993 { 994 995 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free")); 996 997 bp->b_vp = vp; 998 if (vp->v_type == VBLK || vp->v_type == VCHR) 999 bp->b_dev = vp->v_rdev; 1000 else 1001 bp->b_dev = NODEV; 1002 } 1003 1004 /* 1005 * Disassociate a p-buffer from a vnode. 1006 */ 1007 void 1008 pbrelvp(bp) 1009 register struct buf *bp; 1010 { 1011 1012 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL")); 1013 1014 bp->b_vp = (struct vnode *) 0; 1015 } 1016 1017 /* 1018 * Reassign a buffer from one vnode to another. 1019 * Used to assign file specific control information 1020 * (indirect blocks) to the vnode to which they belong. 1021 */ 1022 void 1023 reassignbuf(bp, newvp) 1024 register struct buf *bp; 1025 register struct vnode *newvp; 1026 { 1027 struct buflists *listheadp; 1028 struct vnode *oldvp; 1029 int delay; 1030 int s; 1031 1032 if (newvp == NULL) { 1033 printf("reassignbuf: NULL"); 1034 return; 1035 } 1036 1037 s = splbio(); 1038 /* 1039 * Delete from old vnode list, if on one. 1040 */ 1041 if (bp->b_xflags & (B_VNDIRTY|B_VNCLEAN)) { 1042 oldvp = bp->b_vp; 1043 if (bp->b_xflags & B_VNDIRTY) 1044 listheadp = &oldvp->v_dirtyblkhd; 1045 else 1046 listheadp = &oldvp->v_cleanblkhd; 1047 TAILQ_REMOVE(listheadp, bp, b_vnbufs); 1048 bp->b_xflags &= ~(B_VNDIRTY|B_VNCLEAN); 1049 vdrop(oldvp); 1050 } 1051 /* 1052 * If dirty, put on list of dirty buffers; otherwise insert onto list 1053 * of clean buffers. 1054 */ 1055 if (bp->b_flags & B_DELWRI) { 1056 struct buf *tbp; 1057 1058 listheadp = &newvp->v_dirtyblkhd; 1059 if ((newvp->v_flag & VONWORKLST) == 0) { 1060 switch (newvp->v_type) { 1061 case VDIR: 1062 delay = syncdelay / 3; 1063 break; 1064 case VBLK: 1065 if (newvp->v_specmountpoint != NULL) { 1066 delay = syncdelay / 2; 1067 break; 1068 } 1069 /* fall through */ 1070 default: 1071 delay = syncdelay; 1072 } 1073 vn_syncer_add_to_worklist(newvp, delay); 1074 } 1075 bp->b_xflags |= B_VNDIRTY; 1076 tbp = TAILQ_FIRST(listheadp); 1077 if (tbp == NULL || 1078 (bp->b_lblkno >= 0 && tbp->b_lblkno > bp->b_lblkno)) { 1079 TAILQ_INSERT_HEAD(listheadp, bp, b_vnbufs); 1080 } else { 1081 if (bp->b_lblkno >= 0) { 1082 struct buf *ttbp; 1083 while ((ttbp = TAILQ_NEXT(tbp, b_vnbufs)) && 1084 (ttbp->b_lblkno < bp->b_lblkno)) { 1085 tbp = ttbp; 1086 } 1087 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1088 } else { 1089 TAILQ_INSERT_TAIL(listheadp, bp, b_vnbufs); 1090 } 1091 } 1092 } else { 1093 bp->b_xflags |= B_VNCLEAN; 1094 TAILQ_INSERT_TAIL(&newvp->v_cleanblkhd, bp, b_vnbufs); 1095 if ((newvp->v_flag & VONWORKLST) && 1096 TAILQ_EMPTY(&newvp->v_dirtyblkhd)) { 1097 newvp->v_flag &= ~VONWORKLST; 1098 LIST_REMOVE(newvp, v_synclist); 1099 } 1100 } 1101 bp->b_vp = newvp; 1102 vhold(bp->b_vp); 1103 splx(s); 1104 } 1105 1106 /* 1107 * Create a vnode for a block device. 1108 * Used for mounting the root file system. 1109 */ 1110 int 1111 bdevvp(dev, vpp) 1112 dev_t dev; 1113 struct vnode **vpp; 1114 { 1115 register struct vnode *vp; 1116 struct vnode *nvp; 1117 int error; 1118 1119 /* XXX 255 is for mfs. */ 1120 if (dev == NODEV || (major(dev) != 255 && (major(dev) >= nblkdev || 1121 bdevsw[major(dev)] == NULL))) { 1122 *vpp = NULLVP; 1123 return (ENXIO); 1124 } 1125 error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp); 1126 if (error) { 1127 *vpp = NULLVP; 1128 return (error); 1129 } 1130 vp = nvp; 1131 vp->v_type = VBLK; 1132 if ((nvp = checkalias(vp, dev, (struct mount *)0)) != NULL) { 1133 vput(vp); 1134 vp = nvp; 1135 } 1136 *vpp = vp; 1137 return (0); 1138 } 1139 1140 /* 1141 * Check to see if the new vnode represents a special device 1142 * for which we already have a vnode (either because of 1143 * bdevvp() or because of a different vnode representing 1144 * the same block device). If such an alias exists, deallocate 1145 * the existing contents and return the aliased vnode. The 1146 * caller is responsible for filling it with its new contents. 1147 */ 1148 struct vnode * 1149 checkalias(nvp, nvp_rdev, mp) 1150 register struct vnode *nvp; 1151 dev_t nvp_rdev; 1152 struct mount *mp; 1153 { 1154 struct proc *p = curproc; /* XXX */ 1155 struct vnode *vp; 1156 struct vnode **vpp; 1157 1158 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1159 return (NULLVP); 1160 1161 vpp = &speclisth[SPECHASH(nvp_rdev)]; 1162 loop: 1163 simple_lock(&spechash_slock); 1164 for (vp = *vpp; vp; vp = vp->v_specnext) { 1165 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) 1166 continue; 1167 /* 1168 * Alias, but not in use, so flush it out. 1169 * Only alias active device nodes. 1170 * Not sure why we don't re-use this like we do below. 1171 */ 1172 simple_lock(&vp->v_interlock); 1173 if (vp->v_usecount == 0) { 1174 simple_unlock(&spechash_slock); 1175 vgonel(vp, p); 1176 goto loop; 1177 } 1178 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) { 1179 /* 1180 * It dissappeared, and we may have slept. 1181 * Restart from the beginning 1182 */ 1183 simple_unlock(&spechash_slock); 1184 goto loop; 1185 } 1186 break; 1187 } 1188 /* 1189 * It would be a lot clearer what is going on here if 1190 * this had been expressed as: 1191 * if ( vp && (vp->v_tag == VT_NULL)) 1192 * and the clauses had been swapped. 1193 */ 1194 if (vp == NULL || vp->v_tag != VT_NON) { 1195 /* 1196 * Put the new vnode into the hash chain. 1197 * and if there was an alias, connect them. 1198 */ 1199 MALLOC(nvp->v_specinfo, struct specinfo *, 1200 sizeof(struct specinfo), M_VNODE, M_WAITOK); 1201 nvp->v_rdev = nvp_rdev; 1202 nvp->v_hashchain = vpp; 1203 nvp->v_specnext = *vpp; 1204 nvp->v_specmountpoint = NULL; 1205 simple_unlock(&spechash_slock); 1206 *vpp = nvp; 1207 if (vp != NULLVP) { 1208 nvp->v_flag |= VALIASED; 1209 vp->v_flag |= VALIASED; 1210 vput(vp); 1211 } 1212 return (NULLVP); 1213 } 1214 /* 1215 * if ( vp && (vp->v_tag == VT_NULL)) 1216 * We have a vnode alias, but it is a trashed. 1217 * Make it look like it's newley allocated. (by getnewvnode()) 1218 * The caller should use this instead. 1219 */ 1220 simple_unlock(&spechash_slock); 1221 VOP_UNLOCK(vp, 0, p); 1222 simple_lock(&vp->v_interlock); 1223 vclean(vp, 0, p); 1224 vp->v_op = nvp->v_op; 1225 vp->v_tag = nvp->v_tag; 1226 nvp->v_type = VNON; 1227 insmntque(vp, mp); 1228 return (vp); 1229 } 1230 1231 /* 1232 * Grab a particular vnode from the free list, increment its 1233 * reference count and lock it. The vnode lock bit is set the 1234 * vnode is being eliminated in vgone. The process is awakened 1235 * when the transition is completed, and an error returned to 1236 * indicate that the vnode is no longer usable (possibly having 1237 * been changed to a new file system type). 1238 */ 1239 int 1240 vget(vp, flags, p) 1241 register struct vnode *vp; 1242 int flags; 1243 struct proc *p; 1244 { 1245 int error; 1246 1247 /* 1248 * If the vnode is in the process of being cleaned out for 1249 * another use, we wait for the cleaning to finish and then 1250 * return failure. Cleaning is determined by checking that 1251 * the VXLOCK flag is set. 1252 */ 1253 if ((flags & LK_INTERLOCK) == 0) { 1254 simple_lock(&vp->v_interlock); 1255 } 1256 if (vp->v_flag & VXLOCK) { 1257 vp->v_flag |= VXWANT; 1258 simple_unlock(&vp->v_interlock); 1259 tsleep((caddr_t)vp, PINOD, "vget", 0); 1260 return (ENOENT); 1261 } 1262 1263 vp->v_usecount++; 1264 1265 if (VSHOULDBUSY(vp)) 1266 vbusy(vp); 1267 if (flags & LK_TYPE_MASK) { 1268 if ((error = vn_lock(vp, flags | LK_INTERLOCK, p)) != 0) { 1269 /* 1270 * must expand vrele here because we do not want 1271 * to call VOP_INACTIVE if the reference count 1272 * drops back to zero since it was never really 1273 * active. We must remove it from the free list 1274 * before sleeping so that multiple processes do 1275 * not try to recycle it. 1276 */ 1277 simple_lock(&vp->v_interlock); 1278 vp->v_usecount--; 1279 if (VSHOULDFREE(vp)) 1280 vfree(vp); 1281 simple_unlock(&vp->v_interlock); 1282 } 1283 return (error); 1284 } 1285 simple_unlock(&vp->v_interlock); 1286 return (0); 1287 } 1288 1289 void 1290 vref(struct vnode *vp) 1291 { 1292 simple_lock(&vp->v_interlock); 1293 vp->v_usecount++; 1294 simple_unlock(&vp->v_interlock); 1295 } 1296 1297 /* 1298 * Vnode put/release. 1299 * If count drops to zero, call inactive routine and return to freelist. 1300 */ 1301 void 1302 vrele(vp) 1303 struct vnode *vp; 1304 { 1305 struct proc *p = curproc; /* XXX */ 1306 1307 KASSERT(vp != NULL, ("vrele: null vp")); 1308 1309 simple_lock(&vp->v_interlock); 1310 1311 if (vp->v_usecount > 1) { 1312 1313 vp->v_usecount--; 1314 simple_unlock(&vp->v_interlock); 1315 1316 return; 1317 } 1318 1319 if (vp->v_usecount == 1) { 1320 1321 vp->v_usecount--; 1322 if (VSHOULDFREE(vp)) 1323 vfree(vp); 1324 /* 1325 * If we are doing a vput, the node is already locked, and we must 1326 * call VOP_INACTIVE with the node locked. So, in the case of 1327 * vrele, we explicitly lock the vnode before calling VOP_INACTIVE. 1328 */ 1329 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) { 1330 VOP_INACTIVE(vp, p); 1331 } 1332 1333 } else { 1334 #ifdef DIAGNOSTIC 1335 vprint("vrele: negative ref count", vp); 1336 simple_unlock(&vp->v_interlock); 1337 #endif 1338 panic("vrele: negative ref cnt"); 1339 } 1340 } 1341 1342 void 1343 vput(vp) 1344 struct vnode *vp; 1345 { 1346 struct proc *p = curproc; /* XXX */ 1347 1348 KASSERT(vp != NULL, ("vput: null vp")); 1349 1350 simple_lock(&vp->v_interlock); 1351 1352 if (vp->v_usecount > 1) { 1353 1354 vp->v_usecount--; 1355 VOP_UNLOCK(vp, LK_INTERLOCK, p); 1356 return; 1357 1358 } 1359 1360 if (vp->v_usecount == 1) { 1361 1362 vp->v_usecount--; 1363 if (VSHOULDFREE(vp)) 1364 vfree(vp); 1365 /* 1366 * If we are doing a vput, the node is already locked, and we must 1367 * call VOP_INACTIVE with the node locked. So, in the case of 1368 * vrele, we explicitly lock the vnode before calling VOP_INACTIVE. 1369 */ 1370 simple_unlock(&vp->v_interlock); 1371 VOP_INACTIVE(vp, p); 1372 1373 } else { 1374 #ifdef DIAGNOSTIC 1375 vprint("vput: negative ref count", vp); 1376 #endif 1377 panic("vput: negative ref cnt"); 1378 } 1379 } 1380 1381 /* 1382 * Somebody doesn't want the vnode recycled. 1383 */ 1384 void 1385 vhold(vp) 1386 register struct vnode *vp; 1387 { 1388 int s; 1389 1390 s = splbio(); 1391 vp->v_holdcnt++; 1392 if (VSHOULDBUSY(vp)) 1393 vbusy(vp); 1394 splx(s); 1395 } 1396 1397 /* 1398 * One less who cares about this vnode. 1399 */ 1400 void 1401 vdrop(vp) 1402 register struct vnode *vp; 1403 { 1404 int s; 1405 1406 s = splbio(); 1407 if (vp->v_holdcnt <= 0) 1408 panic("vdrop: holdcnt"); 1409 vp->v_holdcnt--; 1410 if (VSHOULDFREE(vp)) 1411 vfree(vp); 1412 splx(s); 1413 } 1414 1415 /* 1416 * Remove any vnodes in the vnode table belonging to mount point mp. 1417 * 1418 * If MNT_NOFORCE is specified, there should not be any active ones, 1419 * return error if any are found (nb: this is a user error, not a 1420 * system error). If MNT_FORCE is specified, detach any active vnodes 1421 * that are found. 1422 */ 1423 #ifdef DIAGNOSTIC 1424 static int busyprt = 0; /* print out busy vnodes */ 1425 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 1426 #endif 1427 1428 int 1429 vflush(mp, skipvp, flags) 1430 struct mount *mp; 1431 struct vnode *skipvp; 1432 int flags; 1433 { 1434 struct proc *p = curproc; /* XXX */ 1435 struct vnode *vp, *nvp; 1436 int busy = 0; 1437 1438 simple_lock(&mntvnode_slock); 1439 loop: 1440 for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) { 1441 /* 1442 * Make sure this vnode wasn't reclaimed in getnewvnode(). 1443 * Start over if it has (it won't be on the list anymore). 1444 */ 1445 if (vp->v_mount != mp) 1446 goto loop; 1447 nvp = vp->v_mntvnodes.le_next; 1448 /* 1449 * Skip over a selected vnode. 1450 */ 1451 if (vp == skipvp) 1452 continue; 1453 1454 simple_lock(&vp->v_interlock); 1455 /* 1456 * Skip over a vnodes marked VSYSTEM. 1457 */ 1458 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1459 simple_unlock(&vp->v_interlock); 1460 continue; 1461 } 1462 /* 1463 * If WRITECLOSE is set, only flush out regular file vnodes 1464 * open for writing. 1465 */ 1466 if ((flags & WRITECLOSE) && 1467 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1468 simple_unlock(&vp->v_interlock); 1469 continue; 1470 } 1471 1472 /* 1473 * With v_usecount == 0, all we need to do is clear out the 1474 * vnode data structures and we are done. 1475 */ 1476 if (vp->v_usecount == 0) { 1477 simple_unlock(&mntvnode_slock); 1478 vgonel(vp, p); 1479 simple_lock(&mntvnode_slock); 1480 continue; 1481 } 1482 1483 /* 1484 * If FORCECLOSE is set, forcibly close the vnode. For block 1485 * or character devices, revert to an anonymous device. For 1486 * all other files, just kill them. 1487 */ 1488 if (flags & FORCECLOSE) { 1489 simple_unlock(&mntvnode_slock); 1490 if (vp->v_type != VBLK && vp->v_type != VCHR) { 1491 vgonel(vp, p); 1492 } else { 1493 vclean(vp, 0, p); 1494 vp->v_op = spec_vnodeop_p; 1495 insmntque(vp, (struct mount *) 0); 1496 } 1497 simple_lock(&mntvnode_slock); 1498 continue; 1499 } 1500 #ifdef DIAGNOSTIC 1501 if (busyprt) 1502 vprint("vflush: busy vnode", vp); 1503 #endif 1504 simple_unlock(&vp->v_interlock); 1505 busy++; 1506 } 1507 simple_unlock(&mntvnode_slock); 1508 if (busy) 1509 return (EBUSY); 1510 return (0); 1511 } 1512 1513 /* 1514 * Disassociate the underlying file system from a vnode. 1515 */ 1516 static void 1517 vclean(vp, flags, p) 1518 struct vnode *vp; 1519 int flags; 1520 struct proc *p; 1521 { 1522 int active; 1523 vm_object_t obj; 1524 1525 /* 1526 * Check to see if the vnode is in use. If so we have to reference it 1527 * before we clean it out so that its count cannot fall to zero and 1528 * generate a race against ourselves to recycle it. 1529 */ 1530 if ((active = vp->v_usecount)) 1531 vp->v_usecount++; 1532 1533 /* 1534 * Prevent the vnode from being recycled or brought into use while we 1535 * clean it out. 1536 */ 1537 if (vp->v_flag & VXLOCK) 1538 panic("vclean: deadlock"); 1539 vp->v_flag |= VXLOCK; 1540 /* 1541 * Even if the count is zero, the VOP_INACTIVE routine may still 1542 * have the object locked while it cleans it out. The VOP_LOCK 1543 * ensures that the VOP_INACTIVE routine is done with its work. 1544 * For active vnodes, it ensures that no other activity can 1545 * occur while the underlying object is being cleaned out. 1546 */ 1547 VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p); 1548 1549 /* 1550 * Clean out any buffers associated with the vnode. 1551 */ 1552 vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); 1553 if (obj = vp->v_object) { 1554 if (obj->ref_count == 0) { 1555 /* 1556 * This is a normal way of shutting down the object/vnode 1557 * association. 1558 */ 1559 vm_object_terminate(obj); 1560 } else { 1561 /* 1562 * Woe to the process that tries to page now :-). 1563 */ 1564 vm_pager_deallocate(obj); 1565 } 1566 } 1567 1568 /* 1569 * If purging an active vnode, it must be closed and 1570 * deactivated before being reclaimed. Note that the 1571 * VOP_INACTIVE will unlock the vnode. 1572 */ 1573 if (active) { 1574 if (flags & DOCLOSE) 1575 VOP_CLOSE(vp, FNONBLOCK, NOCRED, p); 1576 VOP_INACTIVE(vp, p); 1577 } else { 1578 /* 1579 * Any other processes trying to obtain this lock must first 1580 * wait for VXLOCK to clear, then call the new lock operation. 1581 */ 1582 VOP_UNLOCK(vp, 0, p); 1583 } 1584 /* 1585 * Reclaim the vnode. 1586 */ 1587 if (VOP_RECLAIM(vp, p)) 1588 panic("vclean: cannot reclaim"); 1589 1590 if (active) 1591 vrele(vp); 1592 1593 cache_purge(vp); 1594 if (vp->v_vnlock) { 1595 #if 0 /* This is the only place we have LK_DRAINED in the entire kernel ??? */ 1596 #ifdef DIAGNOSTIC 1597 if ((vp->v_vnlock->lk_flags & LK_DRAINED) == 0) 1598 vprint("vclean: lock not drained", vp); 1599 #endif 1600 #endif 1601 FREE(vp->v_vnlock, M_VNODE); 1602 vp->v_vnlock = NULL; 1603 } 1604 1605 if (VSHOULDFREE(vp)) 1606 vfree(vp); 1607 1608 /* 1609 * Done with purge, notify sleepers of the grim news. 1610 */ 1611 vp->v_op = dead_vnodeop_p; 1612 vn_pollgone(vp); 1613 vp->v_tag = VT_NON; 1614 vp->v_flag &= ~VXLOCK; 1615 if (vp->v_flag & VXWANT) { 1616 vp->v_flag &= ~VXWANT; 1617 wakeup((caddr_t) vp); 1618 } 1619 } 1620 1621 /* 1622 * Eliminate all activity associated with the requested vnode 1623 * and with all vnodes aliased to the requested vnode. 1624 */ 1625 int 1626 vop_revoke(ap) 1627 struct vop_revoke_args /* { 1628 struct vnode *a_vp; 1629 int a_flags; 1630 } */ *ap; 1631 { 1632 struct vnode *vp, *vq; 1633 struct proc *p = curproc; /* XXX */ 1634 1635 KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke")); 1636 1637 vp = ap->a_vp; 1638 simple_lock(&vp->v_interlock); 1639 1640 if (vp->v_flag & VALIASED) { 1641 /* 1642 * If a vgone (or vclean) is already in progress, 1643 * wait until it is done and return. 1644 */ 1645 if (vp->v_flag & VXLOCK) { 1646 vp->v_flag |= VXWANT; 1647 simple_unlock(&vp->v_interlock); 1648 tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0); 1649 return (0); 1650 } 1651 /* 1652 * Ensure that vp will not be vgone'd while we 1653 * are eliminating its aliases. 1654 */ 1655 vp->v_flag |= VXLOCK; 1656 simple_unlock(&vp->v_interlock); 1657 while (vp->v_flag & VALIASED) { 1658 simple_lock(&spechash_slock); 1659 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1660 if (vq->v_rdev != vp->v_rdev || 1661 vq->v_type != vp->v_type || vp == vq) 1662 continue; 1663 simple_unlock(&spechash_slock); 1664 vgone(vq); 1665 break; 1666 } 1667 if (vq == NULLVP) { 1668 simple_unlock(&spechash_slock); 1669 } 1670 } 1671 /* 1672 * Remove the lock so that vgone below will 1673 * really eliminate the vnode after which time 1674 * vgone will awaken any sleepers. 1675 */ 1676 simple_lock(&vp->v_interlock); 1677 vp->v_flag &= ~VXLOCK; 1678 if (vp->v_flag & VXWANT) { 1679 vp->v_flag &= ~VXWANT; 1680 wakeup(vp); 1681 } 1682 } 1683 vgonel(vp, p); 1684 return (0); 1685 } 1686 1687 /* 1688 * Recycle an unused vnode to the front of the free list. 1689 * Release the passed interlock if the vnode will be recycled. 1690 */ 1691 int 1692 vrecycle(vp, inter_lkp, p) 1693 struct vnode *vp; 1694 struct simplelock *inter_lkp; 1695 struct proc *p; 1696 { 1697 1698 simple_lock(&vp->v_interlock); 1699 if (vp->v_usecount == 0) { 1700 if (inter_lkp) { 1701 simple_unlock(inter_lkp); 1702 } 1703 vgonel(vp, p); 1704 return (1); 1705 } 1706 simple_unlock(&vp->v_interlock); 1707 return (0); 1708 } 1709 1710 /* 1711 * Eliminate all activity associated with a vnode 1712 * in preparation for reuse. 1713 */ 1714 void 1715 vgone(vp) 1716 register struct vnode *vp; 1717 { 1718 struct proc *p = curproc; /* XXX */ 1719 1720 simple_lock(&vp->v_interlock); 1721 vgonel(vp, p); 1722 } 1723 1724 /* 1725 * vgone, with the vp interlock held. 1726 */ 1727 static void 1728 vgonel(vp, p) 1729 struct vnode *vp; 1730 struct proc *p; 1731 { 1732 int s; 1733 struct vnode *vq; 1734 struct vnode *vx; 1735 1736 /* 1737 * If a vgone (or vclean) is already in progress, 1738 * wait until it is done and return. 1739 */ 1740 if (vp->v_flag & VXLOCK) { 1741 vp->v_flag |= VXWANT; 1742 simple_unlock(&vp->v_interlock); 1743 tsleep((caddr_t)vp, PINOD, "vgone", 0); 1744 return; 1745 } 1746 1747 /* 1748 * Clean out the filesystem specific data. 1749 */ 1750 vclean(vp, DOCLOSE, p); 1751 simple_lock(&vp->v_interlock); 1752 1753 /* 1754 * Delete from old mount point vnode list, if on one. 1755 */ 1756 if (vp->v_mount != NULL) 1757 insmntque(vp, (struct mount *)0); 1758 /* 1759 * If special device, remove it from special device alias list 1760 * if it is on one. 1761 */ 1762 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) { 1763 simple_lock(&spechash_slock); 1764 if (*vp->v_hashchain == vp) { 1765 *vp->v_hashchain = vp->v_specnext; 1766 } else { 1767 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1768 if (vq->v_specnext != vp) 1769 continue; 1770 vq->v_specnext = vp->v_specnext; 1771 break; 1772 } 1773 if (vq == NULL) 1774 panic("missing bdev"); 1775 } 1776 if (vp->v_flag & VALIASED) { 1777 vx = NULL; 1778 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1779 if (vq->v_rdev != vp->v_rdev || 1780 vq->v_type != vp->v_type) 1781 continue; 1782 if (vx) 1783 break; 1784 vx = vq; 1785 } 1786 if (vx == NULL) 1787 panic("missing alias"); 1788 if (vq == NULL) 1789 vx->v_flag &= ~VALIASED; 1790 vp->v_flag &= ~VALIASED; 1791 } 1792 simple_unlock(&spechash_slock); 1793 FREE(vp->v_specinfo, M_VNODE); 1794 vp->v_specinfo = NULL; 1795 } 1796 1797 /* 1798 * If it is on the freelist and not already at the head, 1799 * move it to the head of the list. The test of the back 1800 * pointer and the reference count of zero is because 1801 * it will be removed from the free list by getnewvnode, 1802 * but will not have its reference count incremented until 1803 * after calling vgone. If the reference count were 1804 * incremented first, vgone would (incorrectly) try to 1805 * close the previous instance of the underlying object. 1806 */ 1807 if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) { 1808 s = splbio(); 1809 simple_lock(&vnode_free_list_slock); 1810 if (vp->v_flag & VFREE) { 1811 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1812 } else if (vp->v_flag & VTBFREE) { 1813 TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist); 1814 vp->v_flag &= ~VTBFREE; 1815 freevnodes++; 1816 } else 1817 freevnodes++; 1818 vp->v_flag |= VFREE; 1819 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1820 simple_unlock(&vnode_free_list_slock); 1821 splx(s); 1822 } 1823 1824 vp->v_type = VBAD; 1825 simple_unlock(&vp->v_interlock); 1826 } 1827 1828 /* 1829 * Lookup a vnode by device number. 1830 */ 1831 int 1832 vfinddev(dev, type, vpp) 1833 dev_t dev; 1834 enum vtype type; 1835 struct vnode **vpp; 1836 { 1837 register struct vnode *vp; 1838 int rc = 0; 1839 1840 simple_lock(&spechash_slock); 1841 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { 1842 if (dev != vp->v_rdev || type != vp->v_type) 1843 continue; 1844 *vpp = vp; 1845 rc = 1; 1846 break; 1847 } 1848 simple_unlock(&spechash_slock); 1849 return (rc); 1850 } 1851 1852 /* 1853 * Calculate the total number of references to a special device. 1854 */ 1855 int 1856 vcount(vp) 1857 register struct vnode *vp; 1858 { 1859 struct vnode *vq, *vnext; 1860 int count; 1861 1862 loop: 1863 if ((vp->v_flag & VALIASED) == 0) 1864 return (vp->v_usecount); 1865 simple_lock(&spechash_slock); 1866 for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) { 1867 vnext = vq->v_specnext; 1868 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type) 1869 continue; 1870 /* 1871 * Alias, but not in use, so flush it out. 1872 */ 1873 if (vq->v_usecount == 0 && vq != vp) { 1874 simple_unlock(&spechash_slock); 1875 vgone(vq); 1876 goto loop; 1877 } 1878 count += vq->v_usecount; 1879 } 1880 simple_unlock(&spechash_slock); 1881 return (count); 1882 } 1883 /* 1884 * Print out a description of a vnode. 1885 */ 1886 static char *typename[] = 1887 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 1888 1889 void 1890 vprint(label, vp) 1891 char *label; 1892 register struct vnode *vp; 1893 { 1894 char buf[96]; 1895 1896 if (label != NULL) 1897 printf("%s: %p: ", label, (void *)vp); 1898 else 1899 printf("%p: ", (void *)vp); 1900 printf("type %s, usecount %d, writecount %d, refcount %d,", 1901 typename[vp->v_type], vp->v_usecount, vp->v_writecount, 1902 vp->v_holdcnt); 1903 buf[0] = '\0'; 1904 if (vp->v_flag & VROOT) 1905 strcat(buf, "|VROOT"); 1906 if (vp->v_flag & VTEXT) 1907 strcat(buf, "|VTEXT"); 1908 if (vp->v_flag & VSYSTEM) 1909 strcat(buf, "|VSYSTEM"); 1910 if (vp->v_flag & VXLOCK) 1911 strcat(buf, "|VXLOCK"); 1912 if (vp->v_flag & VXWANT) 1913 strcat(buf, "|VXWANT"); 1914 if (vp->v_flag & VBWAIT) 1915 strcat(buf, "|VBWAIT"); 1916 if (vp->v_flag & VALIASED) 1917 strcat(buf, "|VALIASED"); 1918 if (vp->v_flag & VDOOMED) 1919 strcat(buf, "|VDOOMED"); 1920 if (vp->v_flag & VFREE) 1921 strcat(buf, "|VFREE"); 1922 if (vp->v_flag & VOBJBUF) 1923 strcat(buf, "|VOBJBUF"); 1924 if (buf[0] != '\0') 1925 printf(" flags (%s)", &buf[1]); 1926 if (vp->v_data == NULL) { 1927 printf("\n"); 1928 } else { 1929 printf("\n\t"); 1930 VOP_PRINT(vp); 1931 } 1932 } 1933 1934 #ifdef DDB 1935 #include <ddb/ddb.h> 1936 /* 1937 * List all of the locked vnodes in the system. 1938 * Called when debugging the kernel. 1939 */ 1940 DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) 1941 { 1942 struct proc *p = curproc; /* XXX */ 1943 struct mount *mp, *nmp; 1944 struct vnode *vp; 1945 1946 printf("Locked vnodes\n"); 1947 simple_lock(&mountlist_slock); 1948 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { 1949 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { 1950 nmp = mp->mnt_list.cqe_next; 1951 continue; 1952 } 1953 for (vp = mp->mnt_vnodelist.lh_first; 1954 vp != NULL; 1955 vp = vp->v_mntvnodes.le_next) { 1956 if (VOP_ISLOCKED(vp)) 1957 vprint((char *)0, vp); 1958 } 1959 simple_lock(&mountlist_slock); 1960 nmp = mp->mnt_list.cqe_next; 1961 vfs_unbusy(mp, p); 1962 } 1963 simple_unlock(&mountlist_slock); 1964 } 1965 #endif 1966 1967 /* 1968 * Top level filesystem related information gathering. 1969 */ 1970 static int sysctl_ovfs_conf __P(SYSCTL_HANDLER_ARGS); 1971 1972 static int 1973 vfs_sysctl SYSCTL_HANDLER_ARGS 1974 { 1975 int *name = (int *)arg1 - 1; /* XXX */ 1976 u_int namelen = arg2 + 1; /* XXX */ 1977 struct vfsconf *vfsp; 1978 1979 #if 1 || defined(COMPAT_PRELITE2) 1980 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 1981 if (namelen == 1) 1982 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 1983 #endif 1984 1985 #ifdef notyet 1986 /* all sysctl names at this level are at least name and field */ 1987 if (namelen < 2) 1988 return (ENOTDIR); /* overloaded */ 1989 if (name[0] != VFS_GENERIC) { 1990 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 1991 if (vfsp->vfc_typenum == name[0]) 1992 break; 1993 if (vfsp == NULL) 1994 return (EOPNOTSUPP); 1995 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 1996 oldp, oldlenp, newp, newlen, p)); 1997 } 1998 #endif 1999 switch (name[1]) { 2000 case VFS_MAXTYPENUM: 2001 if (namelen != 2) 2002 return (ENOTDIR); 2003 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 2004 case VFS_CONF: 2005 if (namelen != 3) 2006 return (ENOTDIR); /* overloaded */ 2007 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2008 if (vfsp->vfc_typenum == name[2]) 2009 break; 2010 if (vfsp == NULL) 2011 return (EOPNOTSUPP); 2012 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 2013 } 2014 return (EOPNOTSUPP); 2015 } 2016 2017 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 2018 "Generic filesystem"); 2019 2020 #if 1 || defined(COMPAT_PRELITE2) 2021 2022 static int 2023 sysctl_ovfs_conf SYSCTL_HANDLER_ARGS 2024 { 2025 int error; 2026 struct vfsconf *vfsp; 2027 struct ovfsconf ovfs; 2028 2029 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 2030 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 2031 strcpy(ovfs.vfc_name, vfsp->vfc_name); 2032 ovfs.vfc_index = vfsp->vfc_typenum; 2033 ovfs.vfc_refcount = vfsp->vfc_refcount; 2034 ovfs.vfc_flags = vfsp->vfc_flags; 2035 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 2036 if (error) 2037 return error; 2038 } 2039 return 0; 2040 } 2041 2042 #endif /* 1 || COMPAT_PRELITE2 */ 2043 2044 #if 0 2045 #define KINFO_VNODESLOP 10 2046 /* 2047 * Dump vnode list (via sysctl). 2048 * Copyout address of vnode followed by vnode. 2049 */ 2050 /* ARGSUSED */ 2051 static int 2052 sysctl_vnode SYSCTL_HANDLER_ARGS 2053 { 2054 struct proc *p = curproc; /* XXX */ 2055 struct mount *mp, *nmp; 2056 struct vnode *nvp, *vp; 2057 int error; 2058 2059 #define VPTRSZ sizeof (struct vnode *) 2060 #define VNODESZ sizeof (struct vnode) 2061 2062 req->lock = 0; 2063 if (!req->oldptr) /* Make an estimate */ 2064 return (SYSCTL_OUT(req, 0, 2065 (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ))); 2066 2067 simple_lock(&mountlist_slock); 2068 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { 2069 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { 2070 nmp = mp->mnt_list.cqe_next; 2071 continue; 2072 } 2073 again: 2074 simple_lock(&mntvnode_slock); 2075 for (vp = mp->mnt_vnodelist.lh_first; 2076 vp != NULL; 2077 vp = nvp) { 2078 /* 2079 * Check that the vp is still associated with 2080 * this filesystem. RACE: could have been 2081 * recycled onto the same filesystem. 2082 */ 2083 if (vp->v_mount != mp) { 2084 simple_unlock(&mntvnode_slock); 2085 goto again; 2086 } 2087 nvp = vp->v_mntvnodes.le_next; 2088 simple_unlock(&mntvnode_slock); 2089 if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) || 2090 (error = SYSCTL_OUT(req, vp, VNODESZ))) 2091 return (error); 2092 simple_lock(&mntvnode_slock); 2093 } 2094 simple_unlock(&mntvnode_slock); 2095 simple_lock(&mountlist_slock); 2096 nmp = mp->mnt_list.cqe_next; 2097 vfs_unbusy(mp, p); 2098 } 2099 simple_unlock(&mountlist_slock); 2100 2101 return (0); 2102 } 2103 #endif 2104 2105 /* 2106 * XXX 2107 * Exporting the vnode list on large systems causes them to crash. 2108 * Exporting the vnode list on medium systems causes sysctl to coredump. 2109 */ 2110 #if 0 2111 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD, 2112 0, 0, sysctl_vnode, "S,vnode", ""); 2113 #endif 2114 2115 /* 2116 * Check to see if a filesystem is mounted on a block device. 2117 */ 2118 int 2119 vfs_mountedon(vp) 2120 struct vnode *vp; 2121 { 2122 struct vnode *vq; 2123 int error = 0; 2124 2125 if (vp->v_specmountpoint != NULL) 2126 return (EBUSY); 2127 if (vp->v_flag & VALIASED) { 2128 simple_lock(&spechash_slock); 2129 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 2130 if (vq->v_rdev != vp->v_rdev || 2131 vq->v_type != vp->v_type) 2132 continue; 2133 if (vq->v_specmountpoint != NULL) { 2134 error = EBUSY; 2135 break; 2136 } 2137 } 2138 simple_unlock(&spechash_slock); 2139 } 2140 return (error); 2141 } 2142 2143 /* 2144 * Unmount all filesystems. The list is traversed in reverse order 2145 * of mounting to avoid dependencies. 2146 */ 2147 void 2148 vfs_unmountall() 2149 { 2150 struct mount *mp, *nmp; 2151 struct proc *p; 2152 int error; 2153 2154 if (curproc != NULL) 2155 p = curproc; 2156 else 2157 p = initproc; /* XXX XXX should this be proc0? */ 2158 /* 2159 * Since this only runs when rebooting, it is not interlocked. 2160 */ 2161 for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) { 2162 nmp = mp->mnt_list.cqe_prev; 2163 error = dounmount(mp, MNT_FORCE, p); 2164 if (error) { 2165 printf("unmount of %s failed (", 2166 mp->mnt_stat.f_mntonname); 2167 if (error == EBUSY) 2168 printf("BUSY)\n"); 2169 else 2170 printf("%d)\n", error); 2171 } 2172 } 2173 } 2174 2175 /* 2176 * Build hash lists of net addresses and hang them off the mount point. 2177 * Called by ufs_mount() to set up the lists of export addresses. 2178 */ 2179 static int 2180 vfs_hang_addrlist(mp, nep, argp) 2181 struct mount *mp; 2182 struct netexport *nep; 2183 struct export_args *argp; 2184 { 2185 register struct netcred *np; 2186 register struct radix_node_head *rnh; 2187 register int i; 2188 struct radix_node *rn; 2189 struct sockaddr *saddr, *smask = 0; 2190 struct domain *dom; 2191 int error; 2192 2193 if (argp->ex_addrlen == 0) { 2194 if (mp->mnt_flag & MNT_DEFEXPORTED) 2195 return (EPERM); 2196 np = &nep->ne_defexported; 2197 np->netc_exflags = argp->ex_flags; 2198 np->netc_anon = argp->ex_anon; 2199 np->netc_anon.cr_ref = 1; 2200 mp->mnt_flag |= MNT_DEFEXPORTED; 2201 return (0); 2202 } 2203 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 2204 np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK); 2205 bzero((caddr_t) np, i); 2206 saddr = (struct sockaddr *) (np + 1); 2207 if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 2208 goto out; 2209 if (saddr->sa_len > argp->ex_addrlen) 2210 saddr->sa_len = argp->ex_addrlen; 2211 if (argp->ex_masklen) { 2212 smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen); 2213 error = copyin(argp->ex_mask, (caddr_t) smask, argp->ex_masklen); 2214 if (error) 2215 goto out; 2216 if (smask->sa_len > argp->ex_masklen) 2217 smask->sa_len = argp->ex_masklen; 2218 } 2219 i = saddr->sa_family; 2220 if ((rnh = nep->ne_rtable[i]) == 0) { 2221 /* 2222 * Seems silly to initialize every AF when most are not used, 2223 * do so on demand here 2224 */ 2225 for (dom = domains; dom; dom = dom->dom_next) 2226 if (dom->dom_family == i && dom->dom_rtattach) { 2227 dom->dom_rtattach((void **) &nep->ne_rtable[i], 2228 dom->dom_rtoffset); 2229 break; 2230 } 2231 if ((rnh = nep->ne_rtable[i]) == 0) { 2232 error = ENOBUFS; 2233 goto out; 2234 } 2235 } 2236 rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh, 2237 np->netc_rnodes); 2238 if (rn == 0 || np != (struct netcred *) rn) { /* already exists */ 2239 error = EPERM; 2240 goto out; 2241 } 2242 np->netc_exflags = argp->ex_flags; 2243 np->netc_anon = argp->ex_anon; 2244 np->netc_anon.cr_ref = 1; 2245 return (0); 2246 out: 2247 free(np, M_NETADDR); 2248 return (error); 2249 } 2250 2251 /* ARGSUSED */ 2252 static int 2253 vfs_free_netcred(rn, w) 2254 struct radix_node *rn; 2255 void *w; 2256 { 2257 register struct radix_node_head *rnh = (struct radix_node_head *) w; 2258 2259 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 2260 free((caddr_t) rn, M_NETADDR); 2261 return (0); 2262 } 2263 2264 /* 2265 * Free the net address hash lists that are hanging off the mount points. 2266 */ 2267 static void 2268 vfs_free_addrlist(nep) 2269 struct netexport *nep; 2270 { 2271 register int i; 2272 register struct radix_node_head *rnh; 2273 2274 for (i = 0; i <= AF_MAX; i++) 2275 if ((rnh = nep->ne_rtable[i])) { 2276 (*rnh->rnh_walktree) (rnh, vfs_free_netcred, 2277 (caddr_t) rnh); 2278 free((caddr_t) rnh, M_RTABLE); 2279 nep->ne_rtable[i] = 0; 2280 } 2281 } 2282 2283 int 2284 vfs_export(mp, nep, argp) 2285 struct mount *mp; 2286 struct netexport *nep; 2287 struct export_args *argp; 2288 { 2289 int error; 2290 2291 if (argp->ex_flags & MNT_DELEXPORT) { 2292 if (mp->mnt_flag & MNT_EXPUBLIC) { 2293 vfs_setpublicfs(NULL, NULL, NULL); 2294 mp->mnt_flag &= ~MNT_EXPUBLIC; 2295 } 2296 vfs_free_addrlist(nep); 2297 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 2298 } 2299 if (argp->ex_flags & MNT_EXPORTED) { 2300 if (argp->ex_flags & MNT_EXPUBLIC) { 2301 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 2302 return (error); 2303 mp->mnt_flag |= MNT_EXPUBLIC; 2304 } 2305 if ((error = vfs_hang_addrlist(mp, nep, argp))) 2306 return (error); 2307 mp->mnt_flag |= MNT_EXPORTED; 2308 } 2309 return (0); 2310 } 2311 2312 2313 /* 2314 * Set the publicly exported filesystem (WebNFS). Currently, only 2315 * one public filesystem is possible in the spec (RFC 2054 and 2055) 2316 */ 2317 int 2318 vfs_setpublicfs(mp, nep, argp) 2319 struct mount *mp; 2320 struct netexport *nep; 2321 struct export_args *argp; 2322 { 2323 int error; 2324 struct vnode *rvp; 2325 char *cp; 2326 2327 /* 2328 * mp == NULL -> invalidate the current info, the FS is 2329 * no longer exported. May be called from either vfs_export 2330 * or unmount, so check if it hasn't already been done. 2331 */ 2332 if (mp == NULL) { 2333 if (nfs_pub.np_valid) { 2334 nfs_pub.np_valid = 0; 2335 if (nfs_pub.np_index != NULL) { 2336 FREE(nfs_pub.np_index, M_TEMP); 2337 nfs_pub.np_index = NULL; 2338 } 2339 } 2340 return (0); 2341 } 2342 2343 /* 2344 * Only one allowed at a time. 2345 */ 2346 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 2347 return (EBUSY); 2348 2349 /* 2350 * Get real filehandle for root of exported FS. 2351 */ 2352 bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle)); 2353 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 2354 2355 if ((error = VFS_ROOT(mp, &rvp))) 2356 return (error); 2357 2358 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 2359 return (error); 2360 2361 vput(rvp); 2362 2363 /* 2364 * If an indexfile was specified, pull it in. 2365 */ 2366 if (argp->ex_indexfile != NULL) { 2367 MALLOC(nfs_pub.np_index, char *, MAXNAMLEN + 1, M_TEMP, 2368 M_WAITOK); 2369 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 2370 MAXNAMLEN, (size_t *)0); 2371 if (!error) { 2372 /* 2373 * Check for illegal filenames. 2374 */ 2375 for (cp = nfs_pub.np_index; *cp; cp++) { 2376 if (*cp == '/') { 2377 error = EINVAL; 2378 break; 2379 } 2380 } 2381 } 2382 if (error) { 2383 FREE(nfs_pub.np_index, M_TEMP); 2384 return (error); 2385 } 2386 } 2387 2388 nfs_pub.np_mount = mp; 2389 nfs_pub.np_valid = 1; 2390 return (0); 2391 } 2392 2393 struct netcred * 2394 vfs_export_lookup(mp, nep, nam) 2395 register struct mount *mp; 2396 struct netexport *nep; 2397 struct sockaddr *nam; 2398 { 2399 register struct netcred *np; 2400 register struct radix_node_head *rnh; 2401 struct sockaddr *saddr; 2402 2403 np = NULL; 2404 if (mp->mnt_flag & MNT_EXPORTED) { 2405 /* 2406 * Lookup in the export list first. 2407 */ 2408 if (nam != NULL) { 2409 saddr = nam; 2410 rnh = nep->ne_rtable[saddr->sa_family]; 2411 if (rnh != NULL) { 2412 np = (struct netcred *) 2413 (*rnh->rnh_matchaddr)((caddr_t)saddr, 2414 rnh); 2415 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2416 np = NULL; 2417 } 2418 } 2419 /* 2420 * If no address match, use the default if it exists. 2421 */ 2422 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2423 np = &nep->ne_defexported; 2424 } 2425 return (np); 2426 } 2427 2428 /* 2429 * perform msync on all vnodes under a mount point 2430 * the mount point must be locked. 2431 */ 2432 void 2433 vfs_msync(struct mount *mp, int flags) { 2434 struct vnode *vp, *nvp; 2435 struct vm_object *obj; 2436 int anyio, tries; 2437 2438 tries = 5; 2439 loop: 2440 anyio = 0; 2441 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) { 2442 2443 nvp = vp->v_mntvnodes.le_next; 2444 2445 if (vp->v_mount != mp) { 2446 goto loop; 2447 } 2448 2449 if (vp->v_flag & VXLOCK) /* XXX: what if MNT_WAIT? */ 2450 continue; 2451 2452 if (flags != MNT_WAIT) { 2453 obj = vp->v_object; 2454 if (obj == NULL || (obj->flags & OBJ_MIGHTBEDIRTY) == 0) 2455 continue; 2456 if (VOP_ISLOCKED(vp)) 2457 continue; 2458 } 2459 2460 simple_lock(&vp->v_interlock); 2461 if (vp->v_object && 2462 (vp->v_object->flags & OBJ_MIGHTBEDIRTY)) { 2463 if (!vget(vp, 2464 LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curproc)) { 2465 if (vp->v_object) { 2466 vm_object_page_clean(vp->v_object, 0, 0, flags == MNT_WAIT ? OBJPC_SYNC : 0); 2467 anyio = 1; 2468 } 2469 vput(vp); 2470 } 2471 } else { 2472 simple_unlock(&vp->v_interlock); 2473 } 2474 } 2475 if (anyio && (--tries > 0)) 2476 goto loop; 2477 } 2478 2479 /* 2480 * Create the VM object needed for VMIO and mmap support. This 2481 * is done for all VREG files in the system. Some filesystems might 2482 * afford the additional metadata buffering capability of the 2483 * VMIO code by making the device node be VMIO mode also. 2484 * 2485 * vp must be locked when vfs_object_create is called. 2486 */ 2487 int 2488 vfs_object_create(vp, p, cred) 2489 struct vnode *vp; 2490 struct proc *p; 2491 struct ucred *cred; 2492 { 2493 struct vattr vat; 2494 vm_object_t object; 2495 int error = 0; 2496 2497 if ((vp->v_type != VREG) && (vp->v_type != VBLK)) 2498 return 0; 2499 2500 retry: 2501 if ((object = vp->v_object) == NULL) { 2502 if (vp->v_type == VREG) { 2503 if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0) 2504 goto retn; 2505 object = vnode_pager_alloc(vp, vat.va_size, 0, 0); 2506 } else if (major(vp->v_rdev) < nblkdev && 2507 bdevsw[major(vp->v_rdev)] != NULL) { 2508 /* 2509 * This simply allocates the biggest object possible 2510 * for a VBLK vnode. This should be fixed, but doesn't 2511 * cause any problems (yet). 2512 */ 2513 object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0); 2514 } 2515 object->ref_count--; 2516 vp->v_usecount--; 2517 } else { 2518 if (object->flags & OBJ_DEAD) { 2519 VOP_UNLOCK(vp, 0, p); 2520 tsleep(object, PVM, "vodead", 0); 2521 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 2522 goto retry; 2523 } 2524 } 2525 2526 if (vp->v_object) 2527 vp->v_flag |= VOBJBUF; 2528 2529 retn: 2530 return error; 2531 } 2532 2533 static void 2534 vfree(vp) 2535 struct vnode *vp; 2536 { 2537 int s; 2538 2539 s = splbio(); 2540 simple_lock(&vnode_free_list_slock); 2541 if (vp->v_flag & VTBFREE) { 2542 TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist); 2543 vp->v_flag &= ~VTBFREE; 2544 } 2545 if (vp->v_flag & VAGE) { 2546 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2547 } else { 2548 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 2549 } 2550 freevnodes++; 2551 simple_unlock(&vnode_free_list_slock); 2552 vp->v_flag &= ~VAGE; 2553 vp->v_flag |= VFREE; 2554 splx(s); 2555 } 2556 2557 void 2558 vbusy(vp) 2559 struct vnode *vp; 2560 { 2561 int s; 2562 2563 s = splbio(); 2564 simple_lock(&vnode_free_list_slock); 2565 if (vp->v_flag & VTBFREE) { 2566 TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist); 2567 vp->v_flag &= ~VTBFREE; 2568 } else { 2569 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2570 freevnodes--; 2571 } 2572 simple_unlock(&vnode_free_list_slock); 2573 vp->v_flag &= ~(VFREE|VAGE); 2574 splx(s); 2575 } 2576 2577 /* 2578 * Record a process's interest in events which might happen to 2579 * a vnode. Because poll uses the historic select-style interface 2580 * internally, this routine serves as both the ``check for any 2581 * pending events'' and the ``record my interest in future events'' 2582 * functions. (These are done together, while the lock is held, 2583 * to avoid race conditions.) 2584 */ 2585 int 2586 vn_pollrecord(vp, p, events) 2587 struct vnode *vp; 2588 struct proc *p; 2589 short events; 2590 { 2591 simple_lock(&vp->v_pollinfo.vpi_lock); 2592 if (vp->v_pollinfo.vpi_revents & events) { 2593 /* 2594 * This leaves events we are not interested 2595 * in available for the other process which 2596 * which presumably had requested them 2597 * (otherwise they would never have been 2598 * recorded). 2599 */ 2600 events &= vp->v_pollinfo.vpi_revents; 2601 vp->v_pollinfo.vpi_revents &= ~events; 2602 2603 simple_unlock(&vp->v_pollinfo.vpi_lock); 2604 return events; 2605 } 2606 vp->v_pollinfo.vpi_events |= events; 2607 selrecord(p, &vp->v_pollinfo.vpi_selinfo); 2608 simple_unlock(&vp->v_pollinfo.vpi_lock); 2609 return 0; 2610 } 2611 2612 /* 2613 * Note the occurrence of an event. If the VN_POLLEVENT macro is used, 2614 * it is possible for us to miss an event due to race conditions, but 2615 * that condition is expected to be rare, so for the moment it is the 2616 * preferred interface. 2617 */ 2618 void 2619 vn_pollevent(vp, events) 2620 struct vnode *vp; 2621 short events; 2622 { 2623 simple_lock(&vp->v_pollinfo.vpi_lock); 2624 if (vp->v_pollinfo.vpi_events & events) { 2625 /* 2626 * We clear vpi_events so that we don't 2627 * call selwakeup() twice if two events are 2628 * posted before the polling process(es) is 2629 * awakened. This also ensures that we take at 2630 * most one selwakeup() if the polling process 2631 * is no longer interested. However, it does 2632 * mean that only one event can be noticed at 2633 * a time. (Perhaps we should only clear those 2634 * event bits which we note?) XXX 2635 */ 2636 vp->v_pollinfo.vpi_events = 0; /* &= ~events ??? */ 2637 vp->v_pollinfo.vpi_revents |= events; 2638 selwakeup(&vp->v_pollinfo.vpi_selinfo); 2639 } 2640 simple_unlock(&vp->v_pollinfo.vpi_lock); 2641 } 2642 2643 /* 2644 * Wake up anyone polling on vp because it is being revoked. 2645 * This depends on dead_poll() returning POLLHUP for correct 2646 * behavior. 2647 */ 2648 void 2649 vn_pollgone(vp) 2650 struct vnode *vp; 2651 { 2652 simple_lock(&vp->v_pollinfo.vpi_lock); 2653 if (vp->v_pollinfo.vpi_events) { 2654 vp->v_pollinfo.vpi_events = 0; 2655 selwakeup(&vp->v_pollinfo.vpi_selinfo); 2656 } 2657 simple_unlock(&vp->v_pollinfo.vpi_lock); 2658 } 2659 2660 2661 2662 /* 2663 * Routine to create and manage a filesystem syncer vnode. 2664 */ 2665 #define sync_close ((int (*) __P((struct vop_close_args *)))nullop) 2666 static int sync_fsync __P((struct vop_fsync_args *)); 2667 static int sync_inactive __P((struct vop_inactive_args *)); 2668 static int sync_reclaim __P((struct vop_reclaim_args *)); 2669 #define sync_lock ((int (*) __P((struct vop_lock_args *)))vop_nolock) 2670 #define sync_unlock ((int (*) __P((struct vop_unlock_args *)))vop_nounlock) 2671 static int sync_print __P((struct vop_print_args *)); 2672 #define sync_islocked ((int(*) __P((struct vop_islocked_args *)))vop_noislocked) 2673 2674 static vop_t **sync_vnodeop_p; 2675 static struct vnodeopv_entry_desc sync_vnodeop_entries[] = { 2676 { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 2677 { &vop_close_desc, (vop_t *) sync_close }, /* close */ 2678 { &vop_fsync_desc, (vop_t *) sync_fsync }, /* fsync */ 2679 { &vop_inactive_desc, (vop_t *) sync_inactive }, /* inactive */ 2680 { &vop_reclaim_desc, (vop_t *) sync_reclaim }, /* reclaim */ 2681 { &vop_lock_desc, (vop_t *) sync_lock }, /* lock */ 2682 { &vop_unlock_desc, (vop_t *) sync_unlock }, /* unlock */ 2683 { &vop_print_desc, (vop_t *) sync_print }, /* print */ 2684 { &vop_islocked_desc, (vop_t *) sync_islocked }, /* islocked */ 2685 { NULL, NULL } 2686 }; 2687 static struct vnodeopv_desc sync_vnodeop_opv_desc = 2688 { &sync_vnodeop_p, sync_vnodeop_entries }; 2689 2690 VNODEOP_SET(sync_vnodeop_opv_desc); 2691 2692 /* 2693 * Create a new filesystem syncer vnode for the specified mount point. 2694 */ 2695 int 2696 vfs_allocate_syncvnode(mp) 2697 struct mount *mp; 2698 { 2699 struct vnode *vp; 2700 static long start, incr, next; 2701 int error; 2702 2703 /* Allocate a new vnode */ 2704 if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) { 2705 mp->mnt_syncer = NULL; 2706 return (error); 2707 } 2708 vp->v_type = VNON; 2709 /* 2710 * Place the vnode onto the syncer worklist. We attempt to 2711 * scatter them about on the list so that they will go off 2712 * at evenly distributed times even if all the filesystems 2713 * are mounted at once. 2714 */ 2715 next += incr; 2716 if (next == 0 || next > syncer_maxdelay) { 2717 start /= 2; 2718 incr /= 2; 2719 if (start == 0) { 2720 start = syncer_maxdelay / 2; 2721 incr = syncer_maxdelay; 2722 } 2723 next = start; 2724 } 2725 vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0); 2726 mp->mnt_syncer = vp; 2727 return (0); 2728 } 2729 2730 /* 2731 * Do a lazy sync of the filesystem. 2732 */ 2733 static int 2734 sync_fsync(ap) 2735 struct vop_fsync_args /* { 2736 struct vnode *a_vp; 2737 struct ucred *a_cred; 2738 int a_waitfor; 2739 struct proc *a_p; 2740 } */ *ap; 2741 { 2742 struct vnode *syncvp = ap->a_vp; 2743 struct mount *mp = syncvp->v_mount; 2744 struct proc *p = ap->a_p; 2745 int asyncflag; 2746 2747 /* 2748 * We only need to do something if this is a lazy evaluation. 2749 */ 2750 if (ap->a_waitfor != MNT_LAZY) 2751 return (0); 2752 2753 /* 2754 * Move ourselves to the back of the sync list. 2755 */ 2756 vn_syncer_add_to_worklist(syncvp, syncdelay); 2757 2758 /* 2759 * Walk the list of vnodes pushing all that are dirty and 2760 * not already on the sync list. 2761 */ 2762 simple_lock(&mountlist_slock); 2763 if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_slock, p) != 0) { 2764 simple_unlock(&mountlist_slock); 2765 return (0); 2766 } 2767 asyncflag = mp->mnt_flag & MNT_ASYNC; 2768 mp->mnt_flag &= ~MNT_ASYNC; 2769 vfs_msync(mp, MNT_NOWAIT); 2770 VFS_SYNC(mp, MNT_LAZY, ap->a_cred, p); 2771 if (asyncflag) 2772 mp->mnt_flag |= MNT_ASYNC; 2773 vfs_unbusy(mp, p); 2774 return (0); 2775 } 2776 2777 /* 2778 * The syncer vnode is no referenced. 2779 */ 2780 static int 2781 sync_inactive(ap) 2782 struct vop_inactive_args /* { 2783 struct vnode *a_vp; 2784 struct proc *a_p; 2785 } */ *ap; 2786 { 2787 2788 vgone(ap->a_vp); 2789 return (0); 2790 } 2791 2792 /* 2793 * The syncer vnode is no longer needed and is being decommissioned. 2794 */ 2795 static int 2796 sync_reclaim(ap) 2797 struct vop_reclaim_args /* { 2798 struct vnode *a_vp; 2799 } */ *ap; 2800 { 2801 struct vnode *vp = ap->a_vp; 2802 2803 vp->v_mount->mnt_syncer = NULL; 2804 if (vp->v_flag & VONWORKLST) { 2805 LIST_REMOVE(vp, v_synclist); 2806 vp->v_flag &= ~VONWORKLST; 2807 } 2808 2809 return (0); 2810 } 2811 2812 /* 2813 * Print out a syncer vnode. 2814 */ 2815 static int 2816 sync_print(ap) 2817 struct vop_print_args /* { 2818 struct vnode *a_vp; 2819 } */ *ap; 2820 { 2821 struct vnode *vp = ap->a_vp; 2822 2823 printf("syncer vnode"); 2824 if (vp->v_vnlock != NULL) 2825 lockmgr_printinfo(vp->v_vnlock); 2826 printf("\n"); 2827 return (0); 2828 } 2829