1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 39 * $FreeBSD$ 40 */ 41 42 /* 43 * External virtual filesystem routines 44 */ 45 #include "opt_ddb.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/buf.h> 50 #include <sys/conf.h> 51 #include <sys/dirent.h> 52 #include <sys/domain.h> 53 #include <sys/eventhandler.h> 54 #include <sys/fcntl.h> 55 #include <sys/kernel.h> 56 #include <sys/kthread.h> 57 #include <sys/malloc.h> 58 #include <sys/mount.h> 59 #include <sys/namei.h> 60 #include <sys/proc.h> 61 #include <sys/reboot.h> 62 #include <sys/socket.h> 63 #include <sys/stat.h> 64 #include <sys/sysctl.h> 65 #include <sys/vmmeter.h> 66 #include <sys/vnode.h> 67 68 #include <machine/limits.h> 69 70 #include <vm/vm.h> 71 #include <vm/vm_object.h> 72 #include <vm/vm_extern.h> 73 #include <vm/pmap.h> 74 #include <vm/vm_map.h> 75 #include <vm/vm_page.h> 76 #include <vm/vm_pager.h> 77 #include <vm/vnode_pager.h> 78 #include <vm/vm_zone.h> 79 80 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 81 82 static void insmntque __P((struct vnode *vp, struct mount *mp)); 83 static void vclean __P((struct vnode *vp, int flags, struct proc *p)); 84 static void vfree __P((struct vnode *)); 85 static void vgonel __P((struct vnode *vp, struct proc *p)); 86 static unsigned long numvnodes; 87 SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, ""); 88 89 enum vtype iftovt_tab[16] = { 90 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 91 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 92 }; 93 int vttoif_tab[9] = { 94 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 95 S_IFSOCK, S_IFIFO, S_IFMT, 96 }; 97 98 static TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */ 99 struct tobefreelist vnode_tobefree_list; /* vnode free list */ 100 101 static u_long wantfreevnodes = 25; 102 SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, ""); 103 static u_long freevnodes = 0; 104 SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, ""); 105 106 static int reassignbufcalls; 107 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, ""); 108 static int reassignbufloops; 109 SYSCTL_INT(_vfs, OID_AUTO, reassignbufloops, CTLFLAG_RW, &reassignbufloops, 0, ""); 110 static int reassignbufsortgood; 111 SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortgood, CTLFLAG_RW, &reassignbufsortgood, 0, ""); 112 static int reassignbufsortbad; 113 SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortbad, CTLFLAG_RW, &reassignbufsortbad, 0, ""); 114 static int reassignbufmethod = 1; 115 SYSCTL_INT(_vfs, OID_AUTO, reassignbufmethod, CTLFLAG_RW, &reassignbufmethod, 0, ""); 116 117 #ifdef ENABLE_VFS_IOOPT 118 int vfs_ioopt = 0; 119 SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, ""); 120 #endif 121 122 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); /* mounted fs */ 123 struct simplelock mountlist_slock; 124 struct simplelock mntvnode_slock; 125 int nfs_mount_type = -1; 126 #ifndef NULL_SIMPLELOCKS 127 static struct simplelock mntid_slock; 128 static struct simplelock vnode_free_list_slock; 129 static struct simplelock spechash_slock; 130 #endif 131 struct nfs_public nfs_pub; /* publicly exported FS */ 132 static vm_zone_t vnode_zone; 133 134 /* 135 * The workitem queue. 136 */ 137 #define SYNCER_MAXDELAY 32 138 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 139 time_t syncdelay = 30; /* max time to delay syncing data */ 140 time_t filedelay = 30; /* time to delay syncing files */ 141 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, ""); 142 time_t dirdelay = 29; /* time to delay syncing directories */ 143 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, ""); 144 time_t metadelay = 28; /* time to delay syncing metadata */ 145 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, ""); 146 static int rushjob; /* number of slots to run ASAP */ 147 static int stat_rush_requests; /* number of times I/O speeded up */ 148 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, ""); 149 150 static int syncer_delayno = 0; 151 static long syncer_mask; 152 LIST_HEAD(synclist, vnode); 153 static struct synclist *syncer_workitem_pending; 154 155 int desiredvnodes; 156 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 157 &desiredvnodes, 0, "Maximum number of vnodes"); 158 159 static void vfs_free_addrlist __P((struct netexport *nep)); 160 static int vfs_free_netcred __P((struct radix_node *rn, void *w)); 161 static int vfs_hang_addrlist __P((struct mount *mp, struct netexport *nep, 162 struct export_args *argp)); 163 164 /* 165 * Initialize the vnode management data structures. 166 */ 167 void 168 vntblinit() 169 { 170 171 desiredvnodes = maxproc + cnt.v_page_count / 4; 172 simple_lock_init(&mntvnode_slock); 173 simple_lock_init(&mntid_slock); 174 simple_lock_init(&spechash_slock); 175 TAILQ_INIT(&vnode_free_list); 176 TAILQ_INIT(&vnode_tobefree_list); 177 simple_lock_init(&vnode_free_list_slock); 178 vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5); 179 /* 180 * Initialize the filesystem syncer. 181 */ 182 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 183 &syncer_mask); 184 syncer_maxdelay = syncer_mask + 1; 185 } 186 187 /* 188 * Mark a mount point as busy. Used to synchronize access and to delay 189 * unmounting. Interlock is not released on failure. 190 */ 191 int 192 vfs_busy(mp, flags, interlkp, p) 193 struct mount *mp; 194 int flags; 195 struct simplelock *interlkp; 196 struct proc *p; 197 { 198 int lkflags; 199 200 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 201 if (flags & LK_NOWAIT) 202 return (ENOENT); 203 mp->mnt_kern_flag |= MNTK_MWAIT; 204 if (interlkp) { 205 simple_unlock(interlkp); 206 } 207 /* 208 * Since all busy locks are shared except the exclusive 209 * lock granted when unmounting, the only place that a 210 * wakeup needs to be done is at the release of the 211 * exclusive lock at the end of dounmount. 212 */ 213 tsleep((caddr_t)mp, PVFS, "vfs_busy", 0); 214 if (interlkp) { 215 simple_lock(interlkp); 216 } 217 return (ENOENT); 218 } 219 lkflags = LK_SHARED | LK_NOPAUSE; 220 if (interlkp) 221 lkflags |= LK_INTERLOCK; 222 if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p)) 223 panic("vfs_busy: unexpected lock failure"); 224 return (0); 225 } 226 227 /* 228 * Free a busy filesystem. 229 */ 230 void 231 vfs_unbusy(mp, p) 232 struct mount *mp; 233 struct proc *p; 234 { 235 236 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, p); 237 } 238 239 /* 240 * Lookup a filesystem type, and if found allocate and initialize 241 * a mount structure for it. 242 * 243 * Devname is usually updated by mount(8) after booting. 244 */ 245 int 246 vfs_rootmountalloc(fstypename, devname, mpp) 247 char *fstypename; 248 char *devname; 249 struct mount **mpp; 250 { 251 struct proc *p = curproc; /* XXX */ 252 struct vfsconf *vfsp; 253 struct mount *mp; 254 255 if (fstypename == NULL) 256 return (ENODEV); 257 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 258 if (!strcmp(vfsp->vfc_name, fstypename)) 259 break; 260 if (vfsp == NULL) 261 return (ENODEV); 262 mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); 263 bzero((char *)mp, (u_long)sizeof(struct mount)); 264 lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE); 265 (void)vfs_busy(mp, LK_NOWAIT, 0, p); 266 LIST_INIT(&mp->mnt_vnodelist); 267 mp->mnt_vfc = vfsp; 268 mp->mnt_op = vfsp->vfc_vfsops; 269 mp->mnt_flag = MNT_RDONLY; 270 mp->mnt_vnodecovered = NULLVP; 271 vfsp->vfc_refcount++; 272 mp->mnt_iosize_max = DFLTPHYS; 273 mp->mnt_stat.f_type = vfsp->vfc_typenum; 274 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 275 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 276 mp->mnt_stat.f_mntonname[0] = '/'; 277 mp->mnt_stat.f_mntonname[1] = 0; 278 (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 279 *mpp = mp; 280 return (0); 281 } 282 283 /* 284 * Find an appropriate filesystem to use for the root. If a filesystem 285 * has not been preselected, walk through the list of known filesystems 286 * trying those that have mountroot routines, and try them until one 287 * works or we have tried them all. 288 */ 289 #ifdef notdef /* XXX JH */ 290 int 291 lite2_vfs_mountroot() 292 { 293 struct vfsconf *vfsp; 294 extern int (*lite2_mountroot) __P((void)); 295 int error; 296 297 if (lite2_mountroot != NULL) 298 return ((*lite2_mountroot)()); 299 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 300 if (vfsp->vfc_mountroot == NULL) 301 continue; 302 if ((error = (*vfsp->vfc_mountroot)()) == 0) 303 return (0); 304 printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error); 305 } 306 return (ENODEV); 307 } 308 #endif 309 310 /* 311 * Lookup a mount point by filesystem identifier. 312 */ 313 struct mount * 314 vfs_getvfs(fsid) 315 fsid_t *fsid; 316 { 317 register struct mount *mp; 318 319 simple_lock(&mountlist_slock); 320 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 321 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 322 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 323 simple_unlock(&mountlist_slock); 324 return (mp); 325 } 326 } 327 simple_unlock(&mountlist_slock); 328 return ((struct mount *) 0); 329 } 330 331 /* 332 * Get a new unique fsid 333 * 334 * Keep in mind that several mounts may be running in parallel, 335 * so always increment mntid_base even if lower numbers are available. 336 */ 337 338 static u_short mntid_base; 339 340 void 341 vfs_getnewfsid(mp) 342 struct mount *mp; 343 { 344 fsid_t tfsid; 345 int mtype; 346 347 simple_lock(&mntid_slock); 348 349 mtype = mp->mnt_vfc->vfc_typenum; 350 for (;;) { 351 tfsid.val[0] = makeudev(255, mtype + (mntid_base << 16)); 352 tfsid.val[1] = mtype; 353 ++mntid_base; 354 if (vfs_getvfs(&tfsid) == NULL) 355 break; 356 } 357 358 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 359 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 360 361 simple_unlock(&mntid_slock); 362 } 363 364 /* 365 * Knob to control the precision of file timestamps: 366 * 367 * 0 = seconds only; nanoseconds zeroed. 368 * 1 = seconds and nanoseconds, accurate within 1/HZ. 369 * 2 = seconds and nanoseconds, truncated to microseconds. 370 * >=3 = seconds and nanoseconds, maximum precision. 371 */ 372 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 373 374 static int timestamp_precision = TSP_SEC; 375 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 376 ×tamp_precision, 0, ""); 377 378 /* 379 * Get a current timestamp. 380 */ 381 void 382 vfs_timestamp(tsp) 383 struct timespec *tsp; 384 { 385 struct timeval tv; 386 387 switch (timestamp_precision) { 388 case TSP_SEC: 389 tsp->tv_sec = time_second; 390 tsp->tv_nsec = 0; 391 break; 392 case TSP_HZ: 393 getnanotime(tsp); 394 break; 395 case TSP_USEC: 396 microtime(&tv); 397 TIMEVAL_TO_TIMESPEC(&tv, tsp); 398 break; 399 case TSP_NSEC: 400 default: 401 nanotime(tsp); 402 break; 403 } 404 } 405 406 /* 407 * Set vnode attributes to VNOVAL 408 */ 409 void 410 vattr_null(vap) 411 register struct vattr *vap; 412 { 413 414 vap->va_type = VNON; 415 vap->va_size = VNOVAL; 416 vap->va_bytes = VNOVAL; 417 vap->va_mode = VNOVAL; 418 vap->va_nlink = VNOVAL; 419 vap->va_uid = VNOVAL; 420 vap->va_gid = VNOVAL; 421 vap->va_fsid = VNOVAL; 422 vap->va_fileid = VNOVAL; 423 vap->va_blocksize = VNOVAL; 424 vap->va_rdev = VNOVAL; 425 vap->va_atime.tv_sec = VNOVAL; 426 vap->va_atime.tv_nsec = VNOVAL; 427 vap->va_mtime.tv_sec = VNOVAL; 428 vap->va_mtime.tv_nsec = VNOVAL; 429 vap->va_ctime.tv_sec = VNOVAL; 430 vap->va_ctime.tv_nsec = VNOVAL; 431 vap->va_flags = VNOVAL; 432 vap->va_gen = VNOVAL; 433 vap->va_vaflags = 0; 434 } 435 436 /* 437 * Routines having to do with the management of the vnode table. 438 */ 439 extern vop_t **dead_vnodeop_p; 440 441 /* 442 * Return the next vnode from the free list. 443 */ 444 int 445 getnewvnode(tag, mp, vops, vpp) 446 enum vtagtype tag; 447 struct mount *mp; 448 vop_t **vops; 449 struct vnode **vpp; 450 { 451 int s; 452 struct proc *p = curproc; /* XXX */ 453 struct vnode *vp, *tvp, *nvp; 454 vm_object_t object; 455 TAILQ_HEAD(freelst, vnode) vnode_tmp_list; 456 457 /* 458 * We take the least recently used vnode from the freelist 459 * if we can get it and it has no cached pages, and no 460 * namecache entries are relative to it. 461 * Otherwise we allocate a new vnode 462 */ 463 464 s = splbio(); 465 simple_lock(&vnode_free_list_slock); 466 TAILQ_INIT(&vnode_tmp_list); 467 468 for (vp = TAILQ_FIRST(&vnode_tobefree_list); vp; vp = nvp) { 469 nvp = TAILQ_NEXT(vp, v_freelist); 470 TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist); 471 if (vp->v_flag & VAGE) { 472 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 473 } else { 474 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 475 } 476 vp->v_flag &= ~(VTBFREE|VAGE); 477 vp->v_flag |= VFREE; 478 if (vp->v_usecount) 479 panic("tobe free vnode isn't"); 480 freevnodes++; 481 } 482 483 if (wantfreevnodes && freevnodes < wantfreevnodes) { 484 vp = NULL; 485 } else if (!wantfreevnodes && freevnodes <= desiredvnodes) { 486 /* 487 * XXX: this is only here to be backwards compatible 488 */ 489 vp = NULL; 490 } else { 491 for (vp = TAILQ_FIRST(&vnode_free_list); vp; vp = nvp) { 492 nvp = TAILQ_NEXT(vp, v_freelist); 493 if (!simple_lock_try(&vp->v_interlock)) 494 continue; 495 if (vp->v_usecount) 496 panic("free vnode isn't"); 497 498 object = vp->v_object; 499 if (object && (object->resident_page_count || object->ref_count)) { 500 printf("object inconsistant state: RPC: %d, RC: %d\n", 501 object->resident_page_count, object->ref_count); 502 /* Don't recycle if it's caching some pages */ 503 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 504 TAILQ_INSERT_TAIL(&vnode_tmp_list, vp, v_freelist); 505 continue; 506 } else if (LIST_FIRST(&vp->v_cache_src)) { 507 /* Don't recycle if active in the namecache */ 508 simple_unlock(&vp->v_interlock); 509 continue; 510 } else { 511 break; 512 } 513 } 514 } 515 516 for (tvp = TAILQ_FIRST(&vnode_tmp_list); tvp; tvp = nvp) { 517 nvp = TAILQ_NEXT(tvp, v_freelist); 518 TAILQ_REMOVE(&vnode_tmp_list, tvp, v_freelist); 519 TAILQ_INSERT_TAIL(&vnode_free_list, tvp, v_freelist); 520 simple_unlock(&tvp->v_interlock); 521 } 522 523 if (vp) { 524 vp->v_flag |= VDOOMED; 525 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 526 freevnodes--; 527 simple_unlock(&vnode_free_list_slock); 528 cache_purge(vp); 529 vp->v_lease = NULL; 530 if (vp->v_type != VBAD) { 531 vgonel(vp, p); 532 } else { 533 simple_unlock(&vp->v_interlock); 534 } 535 536 #ifdef INVARIANTS 537 { 538 int s; 539 540 if (vp->v_data) 541 panic("cleaned vnode isn't"); 542 s = splbio(); 543 if (vp->v_numoutput) 544 panic("Clean vnode has pending I/O's"); 545 splx(s); 546 } 547 #endif 548 vp->v_flag = 0; 549 vp->v_lastw = 0; 550 vp->v_lasta = 0; 551 vp->v_cstart = 0; 552 vp->v_clen = 0; 553 vp->v_socket = 0; 554 vp->v_writecount = 0; /* XXX */ 555 } else { 556 simple_unlock(&vnode_free_list_slock); 557 vp = (struct vnode *) zalloc(vnode_zone); 558 bzero((char *) vp, sizeof *vp); 559 simple_lock_init(&vp->v_interlock); 560 vp->v_dd = vp; 561 cache_purge(vp); 562 LIST_INIT(&vp->v_cache_src); 563 TAILQ_INIT(&vp->v_cache_dst); 564 numvnodes++; 565 } 566 567 TAILQ_INIT(&vp->v_cleanblkhd); 568 TAILQ_INIT(&vp->v_dirtyblkhd); 569 vp->v_type = VNON; 570 vp->v_tag = tag; 571 vp->v_op = vops; 572 insmntque(vp, mp); 573 *vpp = vp; 574 vp->v_usecount = 1; 575 vp->v_data = 0; 576 splx(s); 577 578 vfs_object_create(vp, p, p->p_ucred); 579 return (0); 580 } 581 582 /* 583 * Move a vnode from one mount queue to another. 584 */ 585 static void 586 insmntque(vp, mp) 587 register struct vnode *vp; 588 register struct mount *mp; 589 { 590 591 simple_lock(&mntvnode_slock); 592 /* 593 * Delete from old mount point vnode list, if on one. 594 */ 595 if (vp->v_mount != NULL) 596 LIST_REMOVE(vp, v_mntvnodes); 597 /* 598 * Insert into list of vnodes for the new mount point, if available. 599 */ 600 if ((vp->v_mount = mp) == NULL) { 601 simple_unlock(&mntvnode_slock); 602 return; 603 } 604 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 605 simple_unlock(&mntvnode_slock); 606 } 607 608 /* 609 * Update outstanding I/O count and do wakeup if requested. 610 */ 611 void 612 vwakeup(bp) 613 register struct buf *bp; 614 { 615 register struct vnode *vp; 616 617 bp->b_flags &= ~B_WRITEINPROG; 618 if ((vp = bp->b_vp)) { 619 vp->v_numoutput--; 620 if (vp->v_numoutput < 0) 621 panic("vwakeup: neg numoutput"); 622 if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) { 623 vp->v_flag &= ~VBWAIT; 624 wakeup((caddr_t) &vp->v_numoutput); 625 } 626 } 627 } 628 629 /* 630 * Flush out and invalidate all buffers associated with a vnode. 631 * Called with the underlying object locked. 632 */ 633 int 634 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo) 635 register struct vnode *vp; 636 int flags; 637 struct ucred *cred; 638 struct proc *p; 639 int slpflag, slptimeo; 640 { 641 register struct buf *bp; 642 struct buf *nbp, *blist; 643 int s, error; 644 vm_object_t object; 645 646 if (flags & V_SAVE) { 647 s = splbio(); 648 while (vp->v_numoutput) { 649 vp->v_flag |= VBWAIT; 650 error = tsleep((caddr_t)&vp->v_numoutput, 651 slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo); 652 if (error) { 653 splx(s); 654 return (error); 655 } 656 } 657 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 658 splx(s); 659 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0) 660 return (error); 661 s = splbio(); 662 if (vp->v_numoutput > 0 || 663 !TAILQ_EMPTY(&vp->v_dirtyblkhd)) 664 panic("vinvalbuf: dirty bufs"); 665 } 666 splx(s); 667 } 668 s = splbio(); 669 for (;;) { 670 blist = TAILQ_FIRST(&vp->v_cleanblkhd); 671 if (!blist) 672 blist = TAILQ_FIRST(&vp->v_dirtyblkhd); 673 if (!blist) 674 break; 675 676 for (bp = blist; bp; bp = nbp) { 677 nbp = TAILQ_NEXT(bp, b_vnbufs); 678 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 679 error = BUF_TIMELOCK(bp, 680 LK_EXCLUSIVE | LK_SLEEPFAIL, 681 "vinvalbuf", slpflag, slptimeo); 682 if (error == ENOLCK) 683 break; 684 splx(s); 685 return (error); 686 } 687 /* 688 * XXX Since there are no node locks for NFS, I 689 * believe there is a slight chance that a delayed 690 * write will occur while sleeping just above, so 691 * check for it. Note that vfs_bio_awrite expects 692 * buffers to reside on a queue, while VOP_BWRITE and 693 * brelse do not. 694 */ 695 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 696 (flags & V_SAVE)) { 697 698 if (bp->b_vp == vp) { 699 if (bp->b_flags & B_CLUSTEROK) { 700 BUF_UNLOCK(bp); 701 vfs_bio_awrite(bp); 702 } else { 703 bremfree(bp); 704 bp->b_flags |= B_ASYNC; 705 VOP_BWRITE(bp->b_vp, bp); 706 } 707 } else { 708 bremfree(bp); 709 (void) VOP_BWRITE(bp->b_vp, bp); 710 } 711 break; 712 } 713 bremfree(bp); 714 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF); 715 bp->b_flags &= ~B_ASYNC; 716 brelse(bp); 717 } 718 } 719 720 while (vp->v_numoutput > 0) { 721 vp->v_flag |= VBWAIT; 722 tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0); 723 } 724 725 splx(s); 726 727 /* 728 * Destroy the copy in the VM cache, too. 729 */ 730 simple_lock(&vp->v_interlock); 731 object = vp->v_object; 732 if (object != NULL) { 733 vm_object_page_remove(object, 0, 0, 734 (flags & V_SAVE) ? TRUE : FALSE); 735 } 736 simple_unlock(&vp->v_interlock); 737 738 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd)) 739 panic("vinvalbuf: flush failed"); 740 return (0); 741 } 742 743 /* 744 * Truncate a file's buffer and pages to a specified length. This 745 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 746 * sync activity. 747 */ 748 int 749 vtruncbuf(vp, cred, p, length, blksize) 750 register struct vnode *vp; 751 struct ucred *cred; 752 struct proc *p; 753 off_t length; 754 int blksize; 755 { 756 register struct buf *bp; 757 struct buf *nbp; 758 int s, anyfreed; 759 int trunclbn; 760 761 /* 762 * Round up to the *next* lbn. 763 */ 764 trunclbn = (length + blksize - 1) / blksize; 765 766 s = splbio(); 767 restart: 768 anyfreed = 1; 769 for (;anyfreed;) { 770 anyfreed = 0; 771 for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 772 nbp = TAILQ_NEXT(bp, b_vnbufs); 773 if (bp->b_lblkno >= trunclbn) { 774 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 775 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 776 goto restart; 777 } else { 778 bremfree(bp); 779 bp->b_flags |= (B_INVAL | B_RELBUF); 780 bp->b_flags &= ~B_ASYNC; 781 brelse(bp); 782 anyfreed = 1; 783 } 784 if (nbp && 785 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 786 (nbp->b_vp != vp) || 787 (nbp->b_flags & B_DELWRI))) { 788 goto restart; 789 } 790 } 791 } 792 793 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 794 nbp = TAILQ_NEXT(bp, b_vnbufs); 795 if (bp->b_lblkno >= trunclbn) { 796 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 797 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 798 goto restart; 799 } else { 800 bremfree(bp); 801 bp->b_flags |= (B_INVAL | B_RELBUF); 802 bp->b_flags &= ~B_ASYNC; 803 brelse(bp); 804 anyfreed = 1; 805 } 806 if (nbp && 807 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 808 (nbp->b_vp != vp) || 809 (nbp->b_flags & B_DELWRI) == 0)) { 810 goto restart; 811 } 812 } 813 } 814 } 815 816 if (length > 0) { 817 restartsync: 818 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 819 nbp = TAILQ_NEXT(bp, b_vnbufs); 820 if ((bp->b_flags & B_DELWRI) && (bp->b_lblkno < 0)) { 821 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 822 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 823 goto restart; 824 } else { 825 bremfree(bp); 826 if (bp->b_vp == vp) { 827 bp->b_flags |= B_ASYNC; 828 } else { 829 bp->b_flags &= ~B_ASYNC; 830 } 831 VOP_BWRITE(bp->b_vp, bp); 832 } 833 goto restartsync; 834 } 835 836 } 837 } 838 839 while (vp->v_numoutput > 0) { 840 vp->v_flag |= VBWAIT; 841 tsleep(&vp->v_numoutput, PVM, "vbtrunc", 0); 842 } 843 844 splx(s); 845 846 vnode_pager_setsize(vp, length); 847 848 return (0); 849 } 850 851 /* 852 * Associate a buffer with a vnode. 853 */ 854 void 855 bgetvp(vp, bp) 856 register struct vnode *vp; 857 register struct buf *bp; 858 { 859 int s; 860 861 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 862 863 vhold(vp); 864 bp->b_vp = vp; 865 bp->b_dev = vn_todev(vp); 866 /* 867 * Insert onto list for new vnode. 868 */ 869 s = splbio(); 870 bp->b_xflags |= BX_VNCLEAN; 871 bp->b_xflags &= ~BX_VNDIRTY; 872 TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs); 873 splx(s); 874 } 875 876 /* 877 * Disassociate a buffer from a vnode. 878 */ 879 void 880 brelvp(bp) 881 register struct buf *bp; 882 { 883 struct vnode *vp; 884 struct buflists *listheadp; 885 int s; 886 887 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 888 889 /* 890 * Delete from old vnode list, if on one. 891 */ 892 vp = bp->b_vp; 893 s = splbio(); 894 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) { 895 if (bp->b_xflags & BX_VNDIRTY) 896 listheadp = &vp->v_dirtyblkhd; 897 else 898 listheadp = &vp->v_cleanblkhd; 899 TAILQ_REMOVE(listheadp, bp, b_vnbufs); 900 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 901 } 902 if ((vp->v_flag & VONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 903 vp->v_flag &= ~VONWORKLST; 904 LIST_REMOVE(vp, v_synclist); 905 } 906 splx(s); 907 bp->b_vp = (struct vnode *) 0; 908 vdrop(vp); 909 } 910 911 /* 912 * The workitem queue. 913 * 914 * It is useful to delay writes of file data and filesystem metadata 915 * for tens of seconds so that quickly created and deleted files need 916 * not waste disk bandwidth being created and removed. To realize this, 917 * we append vnodes to a "workitem" queue. When running with a soft 918 * updates implementation, most pending metadata dependencies should 919 * not wait for more than a few seconds. Thus, mounted on block devices 920 * are delayed only about a half the time that file data is delayed. 921 * Similarly, directory updates are more critical, so are only delayed 922 * about a third the time that file data is delayed. Thus, there are 923 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 924 * one each second (driven off the filesystem syncer process). The 925 * syncer_delayno variable indicates the next queue that is to be processed. 926 * Items that need to be processed soon are placed in this queue: 927 * 928 * syncer_workitem_pending[syncer_delayno] 929 * 930 * A delay of fifteen seconds is done by placing the request fifteen 931 * entries later in the queue: 932 * 933 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 934 * 935 */ 936 937 /* 938 * Add an item to the syncer work queue. 939 */ 940 static void 941 vn_syncer_add_to_worklist(struct vnode *vp, int delay) 942 { 943 int s, slot; 944 945 s = splbio(); 946 947 if (vp->v_flag & VONWORKLST) { 948 LIST_REMOVE(vp, v_synclist); 949 } 950 951 if (delay > syncer_maxdelay - 2) 952 delay = syncer_maxdelay - 2; 953 slot = (syncer_delayno + delay) & syncer_mask; 954 955 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist); 956 vp->v_flag |= VONWORKLST; 957 splx(s); 958 } 959 960 struct proc *updateproc; 961 static void sched_sync __P((void)); 962 static struct kproc_desc up_kp = { 963 "syncer", 964 sched_sync, 965 &updateproc 966 }; 967 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 968 969 /* 970 * System filesystem synchronizer daemon. 971 */ 972 void 973 sched_sync(void) 974 { 975 struct synclist *slp; 976 struct vnode *vp; 977 long starttime; 978 int s; 979 struct proc *p = updateproc; 980 981 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, p, 982 SHUTDOWN_PRI_LAST); 983 984 for (;;) { 985 kproc_suspend_loop(p); 986 987 starttime = time_second; 988 989 /* 990 * Push files whose dirty time has expired. Be careful 991 * of interrupt race on slp queue. 992 */ 993 s = splbio(); 994 slp = &syncer_workitem_pending[syncer_delayno]; 995 syncer_delayno += 1; 996 if (syncer_delayno == syncer_maxdelay) 997 syncer_delayno = 0; 998 splx(s); 999 1000 while ((vp = LIST_FIRST(slp)) != NULL) { 1001 if (VOP_ISLOCKED(vp, NULL) == 0) { 1002 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 1003 (void) VOP_FSYNC(vp, p->p_ucred, MNT_LAZY, p); 1004 VOP_UNLOCK(vp, 0, p); 1005 } 1006 s = splbio(); 1007 if (LIST_FIRST(slp) == vp) { 1008 /* 1009 * Note: v_tag VT_VFS vps can remain on the 1010 * worklist too with no dirty blocks, but 1011 * since sync_fsync() moves it to a different 1012 * slot we are safe. 1013 */ 1014 if (TAILQ_EMPTY(&vp->v_dirtyblkhd) && 1015 !vn_isdisk(vp, NULL)) 1016 panic("sched_sync: fsync failed vp %p tag %d", vp, vp->v_tag); 1017 /* 1018 * Put us back on the worklist. The worklist 1019 * routine will remove us from our current 1020 * position and then add us back in at a later 1021 * position. 1022 */ 1023 vn_syncer_add_to_worklist(vp, syncdelay); 1024 } 1025 splx(s); 1026 } 1027 1028 /* 1029 * Do soft update processing. 1030 */ 1031 if (bioops.io_sync) 1032 (*bioops.io_sync)(NULL); 1033 1034 /* 1035 * The variable rushjob allows the kernel to speed up the 1036 * processing of the filesystem syncer process. A rushjob 1037 * value of N tells the filesystem syncer to process the next 1038 * N seconds worth of work on its queue ASAP. Currently rushjob 1039 * is used by the soft update code to speed up the filesystem 1040 * syncer process when the incore state is getting so far 1041 * ahead of the disk that the kernel memory pool is being 1042 * threatened with exhaustion. 1043 */ 1044 if (rushjob > 0) { 1045 rushjob -= 1; 1046 continue; 1047 } 1048 /* 1049 * If it has taken us less than a second to process the 1050 * current work, then wait. Otherwise start right over 1051 * again. We can still lose time if any single round 1052 * takes more than two seconds, but it does not really 1053 * matter as we are just trying to generally pace the 1054 * filesystem activity. 1055 */ 1056 if (time_second == starttime) 1057 tsleep(&lbolt, PPAUSE, "syncer", 0); 1058 } 1059 } 1060 1061 /* 1062 * Request the syncer daemon to speed up its work. 1063 * We never push it to speed up more than half of its 1064 * normal turn time, otherwise it could take over the cpu. 1065 */ 1066 int 1067 speedup_syncer() 1068 { 1069 int s; 1070 1071 s = splhigh(); 1072 if (updateproc->p_wchan == &lbolt) 1073 setrunnable(updateproc); 1074 splx(s); 1075 if (rushjob < syncdelay / 2) { 1076 rushjob += 1; 1077 stat_rush_requests += 1; 1078 return (1); 1079 } 1080 return(0); 1081 } 1082 1083 /* 1084 * Associate a p-buffer with a vnode. 1085 * 1086 * Also sets B_PAGING flag to indicate that vnode is not fully associated 1087 * with the buffer. i.e. the bp has not been linked into the vnode or 1088 * ref-counted. 1089 */ 1090 void 1091 pbgetvp(vp, bp) 1092 register struct vnode *vp; 1093 register struct buf *bp; 1094 { 1095 1096 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free")); 1097 1098 bp->b_vp = vp; 1099 bp->b_flags |= B_PAGING; 1100 bp->b_dev = vn_todev(vp); 1101 } 1102 1103 /* 1104 * Disassociate a p-buffer from a vnode. 1105 */ 1106 void 1107 pbrelvp(bp) 1108 register struct buf *bp; 1109 { 1110 1111 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL")); 1112 1113 #if !defined(MAX_PERF) 1114 /* XXX REMOVE ME */ 1115 if (bp->b_vnbufs.tqe_next != NULL) { 1116 panic( 1117 "relpbuf(): b_vp was probably reassignbuf()d %p %x", 1118 bp, 1119 (int)bp->b_flags 1120 ); 1121 } 1122 #endif 1123 bp->b_vp = (struct vnode *) 0; 1124 bp->b_flags &= ~B_PAGING; 1125 } 1126 1127 void 1128 pbreassignbuf(bp, newvp) 1129 struct buf *bp; 1130 struct vnode *newvp; 1131 { 1132 #if !defined(MAX_PERF) 1133 if ((bp->b_flags & B_PAGING) == 0) { 1134 panic( 1135 "pbreassignbuf() on non phys bp %p", 1136 bp 1137 ); 1138 } 1139 #endif 1140 bp->b_vp = newvp; 1141 } 1142 1143 /* 1144 * Reassign a buffer from one vnode to another. 1145 * Used to assign file specific control information 1146 * (indirect blocks) to the vnode to which they belong. 1147 */ 1148 void 1149 reassignbuf(bp, newvp) 1150 register struct buf *bp; 1151 register struct vnode *newvp; 1152 { 1153 struct buflists *listheadp; 1154 int delay; 1155 int s; 1156 1157 if (newvp == NULL) { 1158 printf("reassignbuf: NULL"); 1159 return; 1160 } 1161 ++reassignbufcalls; 1162 1163 #if !defined(MAX_PERF) 1164 /* 1165 * B_PAGING flagged buffers cannot be reassigned because their vp 1166 * is not fully linked in. 1167 */ 1168 if (bp->b_flags & B_PAGING) 1169 panic("cannot reassign paging buffer"); 1170 #endif 1171 1172 s = splbio(); 1173 /* 1174 * Delete from old vnode list, if on one. 1175 */ 1176 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) { 1177 if (bp->b_xflags & BX_VNDIRTY) 1178 listheadp = &bp->b_vp->v_dirtyblkhd; 1179 else 1180 listheadp = &bp->b_vp->v_cleanblkhd; 1181 TAILQ_REMOVE(listheadp, bp, b_vnbufs); 1182 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1183 if (bp->b_vp != newvp) { 1184 vdrop(bp->b_vp); 1185 bp->b_vp = NULL; /* for clarification */ 1186 } 1187 } 1188 /* 1189 * If dirty, put on list of dirty buffers; otherwise insert onto list 1190 * of clean buffers. 1191 */ 1192 if (bp->b_flags & B_DELWRI) { 1193 struct buf *tbp; 1194 1195 listheadp = &newvp->v_dirtyblkhd; 1196 if ((newvp->v_flag & VONWORKLST) == 0) { 1197 switch (newvp->v_type) { 1198 case VDIR: 1199 delay = dirdelay; 1200 break; 1201 case VCHR: 1202 case VBLK: 1203 if (newvp->v_specmountpoint != NULL) { 1204 delay = metadelay; 1205 break; 1206 } 1207 /* fall through */ 1208 default: 1209 delay = filedelay; 1210 } 1211 vn_syncer_add_to_worklist(newvp, delay); 1212 } 1213 bp->b_xflags |= BX_VNDIRTY; 1214 tbp = TAILQ_FIRST(listheadp); 1215 if (tbp == NULL || 1216 bp->b_lblkno == 0 || 1217 (bp->b_lblkno > 0 && tbp->b_lblkno < 0) || 1218 (bp->b_lblkno > 0 && bp->b_lblkno < tbp->b_lblkno)) { 1219 TAILQ_INSERT_HEAD(listheadp, bp, b_vnbufs); 1220 ++reassignbufsortgood; 1221 } else if (bp->b_lblkno < 0) { 1222 TAILQ_INSERT_TAIL(listheadp, bp, b_vnbufs); 1223 ++reassignbufsortgood; 1224 } else if (reassignbufmethod == 1) { 1225 /* 1226 * New sorting algorithm, only handle sequential case, 1227 * otherwise append to end (but before metadata) 1228 */ 1229 if ((tbp = gbincore(newvp, bp->b_lblkno - 1)) != NULL && 1230 (tbp->b_xflags & BX_VNDIRTY)) { 1231 /* 1232 * Found the best place to insert the buffer 1233 */ 1234 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1235 ++reassignbufsortgood; 1236 } else { 1237 /* 1238 * Missed, append to end, but before meta-data. 1239 * We know that the head buffer in the list is 1240 * not meta-data due to prior conditionals. 1241 * 1242 * Indirect effects: NFS second stage write 1243 * tends to wind up here, giving maximum 1244 * distance between the unstable write and the 1245 * commit rpc. 1246 */ 1247 tbp = TAILQ_LAST(listheadp, buflists); 1248 while (tbp && tbp->b_lblkno < 0) 1249 tbp = TAILQ_PREV(tbp, buflists, b_vnbufs); 1250 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1251 ++reassignbufsortbad; 1252 } 1253 } else { 1254 /* 1255 * Old sorting algorithm, scan queue and insert 1256 */ 1257 struct buf *ttbp; 1258 while ((ttbp = TAILQ_NEXT(tbp, b_vnbufs)) && 1259 (ttbp->b_lblkno < bp->b_lblkno)) { 1260 ++reassignbufloops; 1261 tbp = ttbp; 1262 } 1263 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1264 } 1265 } else { 1266 bp->b_xflags |= BX_VNCLEAN; 1267 TAILQ_INSERT_TAIL(&newvp->v_cleanblkhd, bp, b_vnbufs); 1268 if ((newvp->v_flag & VONWORKLST) && 1269 TAILQ_EMPTY(&newvp->v_dirtyblkhd)) { 1270 newvp->v_flag &= ~VONWORKLST; 1271 LIST_REMOVE(newvp, v_synclist); 1272 } 1273 } 1274 if (bp->b_vp != newvp) { 1275 bp->b_vp = newvp; 1276 vhold(bp->b_vp); 1277 } 1278 splx(s); 1279 } 1280 1281 /* 1282 * Create a vnode for a block device. 1283 * Used for mounting the root file system. 1284 */ 1285 int 1286 bdevvp(dev, vpp) 1287 dev_t dev; 1288 struct vnode **vpp; 1289 { 1290 register struct vnode *vp; 1291 struct vnode *nvp; 1292 int error; 1293 1294 if (dev == NODEV) { 1295 *vpp = NULLVP; 1296 return (ENXIO); 1297 } 1298 error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp); 1299 if (error) { 1300 *vpp = NULLVP; 1301 return (error); 1302 } 1303 vp = nvp; 1304 vp->v_type = VBLK; 1305 addalias(vp, dev); 1306 *vpp = vp; 1307 return (0); 1308 } 1309 1310 /* 1311 * Add vnode to the alias list hung off the dev_t. 1312 * 1313 * The reason for this gunk is that multiple vnodes can reference 1314 * the same physical device, so checking vp->v_usecount to see 1315 * how many users there are is inadequate; the v_usecount for 1316 * the vnodes need to be accumulated. vcount() does that. 1317 */ 1318 void 1319 addaliasu(nvp, nvp_rdev) 1320 struct vnode *nvp; 1321 udev_t nvp_rdev; 1322 { 1323 1324 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1325 panic("addaliasu on non-special vnode"); 1326 addalias(nvp, udev2dev(nvp_rdev, nvp->v_type == VBLK ? 1 : 0)); 1327 } 1328 1329 void 1330 addalias(nvp, dev) 1331 struct vnode *nvp; 1332 dev_t dev; 1333 { 1334 1335 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 1336 panic("addalias on non-special vnode"); 1337 1338 nvp->v_rdev = dev; 1339 simple_lock(&spechash_slock); 1340 SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext); 1341 simple_unlock(&spechash_slock); 1342 } 1343 1344 /* 1345 * Grab a particular vnode from the free list, increment its 1346 * reference count and lock it. The vnode lock bit is set if the 1347 * vnode is being eliminated in vgone. The process is awakened 1348 * when the transition is completed, and an error returned to 1349 * indicate that the vnode is no longer usable (possibly having 1350 * been changed to a new file system type). 1351 */ 1352 int 1353 vget(vp, flags, p) 1354 register struct vnode *vp; 1355 int flags; 1356 struct proc *p; 1357 { 1358 int error; 1359 1360 /* 1361 * If the vnode is in the process of being cleaned out for 1362 * another use, we wait for the cleaning to finish and then 1363 * return failure. Cleaning is determined by checking that 1364 * the VXLOCK flag is set. 1365 */ 1366 if ((flags & LK_INTERLOCK) == 0) { 1367 simple_lock(&vp->v_interlock); 1368 } 1369 if (vp->v_flag & VXLOCK) { 1370 vp->v_flag |= VXWANT; 1371 simple_unlock(&vp->v_interlock); 1372 tsleep((caddr_t)vp, PINOD, "vget", 0); 1373 return (ENOENT); 1374 } 1375 1376 vp->v_usecount++; 1377 1378 if (VSHOULDBUSY(vp)) 1379 vbusy(vp); 1380 if (flags & LK_TYPE_MASK) { 1381 if ((error = vn_lock(vp, flags | LK_INTERLOCK, p)) != 0) { 1382 /* 1383 * must expand vrele here because we do not want 1384 * to call VOP_INACTIVE if the reference count 1385 * drops back to zero since it was never really 1386 * active. We must remove it from the free list 1387 * before sleeping so that multiple processes do 1388 * not try to recycle it. 1389 */ 1390 simple_lock(&vp->v_interlock); 1391 vp->v_usecount--; 1392 if (VSHOULDFREE(vp)) 1393 vfree(vp); 1394 simple_unlock(&vp->v_interlock); 1395 } 1396 return (error); 1397 } 1398 simple_unlock(&vp->v_interlock); 1399 return (0); 1400 } 1401 1402 void 1403 vref(struct vnode *vp) 1404 { 1405 simple_lock(&vp->v_interlock); 1406 vp->v_usecount++; 1407 simple_unlock(&vp->v_interlock); 1408 } 1409 1410 /* 1411 * Vnode put/release. 1412 * If count drops to zero, call inactive routine and return to freelist. 1413 */ 1414 void 1415 vrele(vp) 1416 struct vnode *vp; 1417 { 1418 struct proc *p = curproc; /* XXX */ 1419 1420 KASSERT(vp != NULL, ("vrele: null vp")); 1421 1422 simple_lock(&vp->v_interlock); 1423 1424 if (vp->v_usecount > 1) { 1425 1426 vp->v_usecount--; 1427 simple_unlock(&vp->v_interlock); 1428 1429 return; 1430 } 1431 1432 if (vp->v_usecount == 1) { 1433 1434 vp->v_usecount--; 1435 if (VSHOULDFREE(vp)) 1436 vfree(vp); 1437 /* 1438 * If we are doing a vput, the node is already locked, and we must 1439 * call VOP_INACTIVE with the node locked. So, in the case of 1440 * vrele, we explicitly lock the vnode before calling VOP_INACTIVE. 1441 */ 1442 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) { 1443 VOP_INACTIVE(vp, p); 1444 } 1445 1446 } else { 1447 #ifdef DIAGNOSTIC 1448 vprint("vrele: negative ref count", vp); 1449 simple_unlock(&vp->v_interlock); 1450 #endif 1451 panic("vrele: negative ref cnt"); 1452 } 1453 } 1454 1455 void 1456 vput(vp) 1457 struct vnode *vp; 1458 { 1459 struct proc *p = curproc; /* XXX */ 1460 1461 KASSERT(vp != NULL, ("vput: null vp")); 1462 1463 simple_lock(&vp->v_interlock); 1464 1465 if (vp->v_usecount > 1) { 1466 1467 vp->v_usecount--; 1468 VOP_UNLOCK(vp, LK_INTERLOCK, p); 1469 return; 1470 1471 } 1472 1473 if (vp->v_usecount == 1) { 1474 1475 vp->v_usecount--; 1476 if (VSHOULDFREE(vp)) 1477 vfree(vp); 1478 /* 1479 * If we are doing a vput, the node is already locked, and we must 1480 * call VOP_INACTIVE with the node locked. So, in the case of 1481 * vrele, we explicitly lock the vnode before calling VOP_INACTIVE. 1482 */ 1483 simple_unlock(&vp->v_interlock); 1484 VOP_INACTIVE(vp, p); 1485 1486 } else { 1487 #ifdef DIAGNOSTIC 1488 vprint("vput: negative ref count", vp); 1489 #endif 1490 panic("vput: negative ref cnt"); 1491 } 1492 } 1493 1494 /* 1495 * Somebody doesn't want the vnode recycled. 1496 */ 1497 void 1498 vhold(vp) 1499 register struct vnode *vp; 1500 { 1501 int s; 1502 1503 s = splbio(); 1504 vp->v_holdcnt++; 1505 if (VSHOULDBUSY(vp)) 1506 vbusy(vp); 1507 splx(s); 1508 } 1509 1510 /* 1511 * One less who cares about this vnode. 1512 */ 1513 void 1514 vdrop(vp) 1515 register struct vnode *vp; 1516 { 1517 int s; 1518 1519 s = splbio(); 1520 if (vp->v_holdcnt <= 0) 1521 panic("vdrop: holdcnt"); 1522 vp->v_holdcnt--; 1523 if (VSHOULDFREE(vp)) 1524 vfree(vp); 1525 splx(s); 1526 } 1527 1528 /* 1529 * Remove any vnodes in the vnode table belonging to mount point mp. 1530 * 1531 * If MNT_NOFORCE is specified, there should not be any active ones, 1532 * return error if any are found (nb: this is a user error, not a 1533 * system error). If MNT_FORCE is specified, detach any active vnodes 1534 * that are found. 1535 */ 1536 #ifdef DIAGNOSTIC 1537 static int busyprt = 0; /* print out busy vnodes */ 1538 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 1539 #endif 1540 1541 int 1542 vflush(mp, skipvp, flags) 1543 struct mount *mp; 1544 struct vnode *skipvp; 1545 int flags; 1546 { 1547 struct proc *p = curproc; /* XXX */ 1548 struct vnode *vp, *nvp; 1549 int busy = 0; 1550 1551 simple_lock(&mntvnode_slock); 1552 loop: 1553 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) { 1554 /* 1555 * Make sure this vnode wasn't reclaimed in getnewvnode(). 1556 * Start over if it has (it won't be on the list anymore). 1557 */ 1558 if (vp->v_mount != mp) 1559 goto loop; 1560 nvp = LIST_NEXT(vp, v_mntvnodes); 1561 /* 1562 * Skip over a selected vnode. 1563 */ 1564 if (vp == skipvp) 1565 continue; 1566 1567 simple_lock(&vp->v_interlock); 1568 /* 1569 * Skip over a vnodes marked VSYSTEM. 1570 */ 1571 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1572 simple_unlock(&vp->v_interlock); 1573 continue; 1574 } 1575 /* 1576 * If WRITECLOSE is set, only flush out regular file vnodes 1577 * open for writing. 1578 */ 1579 if ((flags & WRITECLOSE) && 1580 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1581 simple_unlock(&vp->v_interlock); 1582 continue; 1583 } 1584 1585 /* 1586 * With v_usecount == 0, all we need to do is clear out the 1587 * vnode data structures and we are done. 1588 */ 1589 if (vp->v_usecount == 0) { 1590 simple_unlock(&mntvnode_slock); 1591 vgonel(vp, p); 1592 simple_lock(&mntvnode_slock); 1593 continue; 1594 } 1595 1596 /* 1597 * If FORCECLOSE is set, forcibly close the vnode. For block 1598 * or character devices, revert to an anonymous device. For 1599 * all other files, just kill them. 1600 */ 1601 if (flags & FORCECLOSE) { 1602 simple_unlock(&mntvnode_slock); 1603 if (vp->v_type != VBLK && vp->v_type != VCHR) { 1604 vgonel(vp, p); 1605 } else { 1606 vclean(vp, 0, p); 1607 vp->v_op = spec_vnodeop_p; 1608 insmntque(vp, (struct mount *) 0); 1609 } 1610 simple_lock(&mntvnode_slock); 1611 continue; 1612 } 1613 #ifdef DIAGNOSTIC 1614 if (busyprt) 1615 vprint("vflush: busy vnode", vp); 1616 #endif 1617 simple_unlock(&vp->v_interlock); 1618 busy++; 1619 } 1620 simple_unlock(&mntvnode_slock); 1621 if (busy) 1622 return (EBUSY); 1623 return (0); 1624 } 1625 1626 /* 1627 * Disassociate the underlying file system from a vnode. 1628 */ 1629 static void 1630 vclean(vp, flags, p) 1631 struct vnode *vp; 1632 int flags; 1633 struct proc *p; 1634 { 1635 int active; 1636 vm_object_t obj; 1637 1638 /* 1639 * Check to see if the vnode is in use. If so we have to reference it 1640 * before we clean it out so that its count cannot fall to zero and 1641 * generate a race against ourselves to recycle it. 1642 */ 1643 if ((active = vp->v_usecount)) 1644 vp->v_usecount++; 1645 1646 /* 1647 * Prevent the vnode from being recycled or brought into use while we 1648 * clean it out. 1649 */ 1650 if (vp->v_flag & VXLOCK) 1651 panic("vclean: deadlock"); 1652 vp->v_flag |= VXLOCK; 1653 /* 1654 * Even if the count is zero, the VOP_INACTIVE routine may still 1655 * have the object locked while it cleans it out. The VOP_LOCK 1656 * ensures that the VOP_INACTIVE routine is done with its work. 1657 * For active vnodes, it ensures that no other activity can 1658 * occur while the underlying object is being cleaned out. 1659 */ 1660 VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p); 1661 1662 /* 1663 * Clean out any buffers associated with the vnode. 1664 */ 1665 vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); 1666 if ((obj = vp->v_object) != NULL) { 1667 if (obj->ref_count == 0) { 1668 /* 1669 * vclean() may be called twice. The first time removes the 1670 * primary reference to the object, the second time goes 1671 * one further and is a special-case to terminate the object. 1672 */ 1673 vm_object_terminate(obj); 1674 } else { 1675 /* 1676 * Woe to the process that tries to page now :-). 1677 */ 1678 vm_pager_deallocate(obj); 1679 } 1680 } 1681 1682 /* 1683 * If purging an active vnode, it must be closed and 1684 * deactivated before being reclaimed. Note that the 1685 * VOP_INACTIVE will unlock the vnode. 1686 */ 1687 if (active) { 1688 if (flags & DOCLOSE) 1689 VOP_CLOSE(vp, FNONBLOCK, NOCRED, p); 1690 VOP_INACTIVE(vp, p); 1691 } else { 1692 /* 1693 * Any other processes trying to obtain this lock must first 1694 * wait for VXLOCK to clear, then call the new lock operation. 1695 */ 1696 VOP_UNLOCK(vp, 0, p); 1697 } 1698 /* 1699 * Reclaim the vnode. 1700 */ 1701 if (VOP_RECLAIM(vp, p)) 1702 panic("vclean: cannot reclaim"); 1703 1704 if (active) 1705 vrele(vp); 1706 1707 cache_purge(vp); 1708 if (vp->v_vnlock) { 1709 FREE(vp->v_vnlock, M_VNODE); 1710 vp->v_vnlock = NULL; 1711 } 1712 1713 if (VSHOULDFREE(vp)) 1714 vfree(vp); 1715 1716 /* 1717 * Done with purge, notify sleepers of the grim news. 1718 */ 1719 vp->v_op = dead_vnodeop_p; 1720 vn_pollgone(vp); 1721 vp->v_tag = VT_NON; 1722 vp->v_flag &= ~VXLOCK; 1723 if (vp->v_flag & VXWANT) { 1724 vp->v_flag &= ~VXWANT; 1725 wakeup((caddr_t) vp); 1726 } 1727 } 1728 1729 /* 1730 * Eliminate all activity associated with the requested vnode 1731 * and with all vnodes aliased to the requested vnode. 1732 */ 1733 int 1734 vop_revoke(ap) 1735 struct vop_revoke_args /* { 1736 struct vnode *a_vp; 1737 int a_flags; 1738 } */ *ap; 1739 { 1740 struct vnode *vp, *vq; 1741 dev_t dev; 1742 1743 KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke")); 1744 1745 vp = ap->a_vp; 1746 /* 1747 * If a vgone (or vclean) is already in progress, 1748 * wait until it is done and return. 1749 */ 1750 if (vp->v_flag & VXLOCK) { 1751 vp->v_flag |= VXWANT; 1752 simple_unlock(&vp->v_interlock); 1753 tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0); 1754 return (0); 1755 } 1756 dev = vp->v_rdev; 1757 for (;;) { 1758 simple_lock(&spechash_slock); 1759 vq = SLIST_FIRST(&dev->si_hlist); 1760 simple_unlock(&spechash_slock); 1761 if (!vq) 1762 break; 1763 vgone(vq); 1764 } 1765 return (0); 1766 } 1767 1768 /* 1769 * Recycle an unused vnode to the front of the free list. 1770 * Release the passed interlock if the vnode will be recycled. 1771 */ 1772 int 1773 vrecycle(vp, inter_lkp, p) 1774 struct vnode *vp; 1775 struct simplelock *inter_lkp; 1776 struct proc *p; 1777 { 1778 1779 simple_lock(&vp->v_interlock); 1780 if (vp->v_usecount == 0) { 1781 if (inter_lkp) { 1782 simple_unlock(inter_lkp); 1783 } 1784 vgonel(vp, p); 1785 return (1); 1786 } 1787 simple_unlock(&vp->v_interlock); 1788 return (0); 1789 } 1790 1791 /* 1792 * Eliminate all activity associated with a vnode 1793 * in preparation for reuse. 1794 */ 1795 void 1796 vgone(vp) 1797 register struct vnode *vp; 1798 { 1799 struct proc *p = curproc; /* XXX */ 1800 1801 simple_lock(&vp->v_interlock); 1802 vgonel(vp, p); 1803 } 1804 1805 /* 1806 * vgone, with the vp interlock held. 1807 */ 1808 static void 1809 vgonel(vp, p) 1810 struct vnode *vp; 1811 struct proc *p; 1812 { 1813 int s; 1814 1815 /* 1816 * If a vgone (or vclean) is already in progress, 1817 * wait until it is done and return. 1818 */ 1819 if (vp->v_flag & VXLOCK) { 1820 vp->v_flag |= VXWANT; 1821 simple_unlock(&vp->v_interlock); 1822 tsleep((caddr_t)vp, PINOD, "vgone", 0); 1823 return; 1824 } 1825 1826 /* 1827 * Clean out the filesystem specific data. 1828 */ 1829 vclean(vp, DOCLOSE, p); 1830 simple_lock(&vp->v_interlock); 1831 1832 /* 1833 * Delete from old mount point vnode list, if on one. 1834 */ 1835 if (vp->v_mount != NULL) 1836 insmntque(vp, (struct mount *)0); 1837 /* 1838 * If special device, remove it from special device alias list 1839 * if it is on one. 1840 */ 1841 if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_rdev != NULL) { 1842 simple_lock(&spechash_slock); 1843 SLIST_REMOVE(&vp->v_hashchain, vp, vnode, v_specnext); 1844 freedev(vp->v_rdev); 1845 simple_unlock(&spechash_slock); 1846 vp->v_rdev = NULL; 1847 } 1848 1849 /* 1850 * If it is on the freelist and not already at the head, 1851 * move it to the head of the list. The test of the back 1852 * pointer and the reference count of zero is because 1853 * it will be removed from the free list by getnewvnode, 1854 * but will not have its reference count incremented until 1855 * after calling vgone. If the reference count were 1856 * incremented first, vgone would (incorrectly) try to 1857 * close the previous instance of the underlying object. 1858 */ 1859 if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) { 1860 s = splbio(); 1861 simple_lock(&vnode_free_list_slock); 1862 if (vp->v_flag & VFREE) { 1863 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1864 } else if (vp->v_flag & VTBFREE) { 1865 TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist); 1866 vp->v_flag &= ~VTBFREE; 1867 freevnodes++; 1868 } else 1869 freevnodes++; 1870 vp->v_flag |= VFREE; 1871 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1872 simple_unlock(&vnode_free_list_slock); 1873 splx(s); 1874 } 1875 1876 vp->v_type = VBAD; 1877 simple_unlock(&vp->v_interlock); 1878 } 1879 1880 /* 1881 * Lookup a vnode by device number. 1882 */ 1883 int 1884 vfinddev(dev, type, vpp) 1885 dev_t dev; 1886 enum vtype type; 1887 struct vnode **vpp; 1888 { 1889 struct vnode *vp; 1890 1891 simple_lock(&spechash_slock); 1892 SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) { 1893 if (type == vp->v_type) { 1894 *vpp = vp; 1895 simple_unlock(&spechash_slock); 1896 return (1); 1897 } 1898 } 1899 simple_unlock(&spechash_slock); 1900 return (0); 1901 } 1902 1903 /* 1904 * Calculate the total number of references to a special device. 1905 */ 1906 int 1907 vcount(vp) 1908 struct vnode *vp; 1909 { 1910 struct vnode *vq; 1911 int count; 1912 1913 count = 0; 1914 simple_lock(&spechash_slock); 1915 SLIST_FOREACH(vq, &vp->v_hashchain, v_specnext) 1916 count += vq->v_usecount; 1917 simple_unlock(&spechash_slock); 1918 return (count); 1919 } 1920 1921 /* 1922 * Print out a description of a vnode. 1923 */ 1924 static char *typename[] = 1925 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 1926 1927 void 1928 vprint(label, vp) 1929 char *label; 1930 struct vnode *vp; 1931 { 1932 char buf[96]; 1933 1934 if (label != NULL) 1935 printf("%s: %p: ", label, (void *)vp); 1936 else 1937 printf("%p: ", (void *)vp); 1938 printf("type %s, usecount %d, writecount %d, refcount %d,", 1939 typename[vp->v_type], vp->v_usecount, vp->v_writecount, 1940 vp->v_holdcnt); 1941 buf[0] = '\0'; 1942 if (vp->v_flag & VROOT) 1943 strcat(buf, "|VROOT"); 1944 if (vp->v_flag & VTEXT) 1945 strcat(buf, "|VTEXT"); 1946 if (vp->v_flag & VSYSTEM) 1947 strcat(buf, "|VSYSTEM"); 1948 if (vp->v_flag & VXLOCK) 1949 strcat(buf, "|VXLOCK"); 1950 if (vp->v_flag & VXWANT) 1951 strcat(buf, "|VXWANT"); 1952 if (vp->v_flag & VBWAIT) 1953 strcat(buf, "|VBWAIT"); 1954 if (vp->v_flag & VDOOMED) 1955 strcat(buf, "|VDOOMED"); 1956 if (vp->v_flag & VFREE) 1957 strcat(buf, "|VFREE"); 1958 if (vp->v_flag & VOBJBUF) 1959 strcat(buf, "|VOBJBUF"); 1960 if (buf[0] != '\0') 1961 printf(" flags (%s)", &buf[1]); 1962 if (vp->v_data == NULL) { 1963 printf("\n"); 1964 } else { 1965 printf("\n\t"); 1966 VOP_PRINT(vp); 1967 } 1968 } 1969 1970 #ifdef DDB 1971 #include <ddb/ddb.h> 1972 /* 1973 * List all of the locked vnodes in the system. 1974 * Called when debugging the kernel. 1975 */ 1976 DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) 1977 { 1978 struct proc *p = curproc; /* XXX */ 1979 struct mount *mp, *nmp; 1980 struct vnode *vp; 1981 1982 printf("Locked vnodes\n"); 1983 simple_lock(&mountlist_slock); 1984 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 1985 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { 1986 nmp = TAILQ_NEXT(mp, mnt_list); 1987 continue; 1988 } 1989 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { 1990 if (VOP_ISLOCKED(vp, NULL)) 1991 vprint((char *)0, vp); 1992 } 1993 simple_lock(&mountlist_slock); 1994 nmp = TAILQ_NEXT(mp, mnt_list); 1995 vfs_unbusy(mp, p); 1996 } 1997 simple_unlock(&mountlist_slock); 1998 } 1999 #endif 2000 2001 /* 2002 * Top level filesystem related information gathering. 2003 */ 2004 static int sysctl_ovfs_conf __P(SYSCTL_HANDLER_ARGS); 2005 2006 static int 2007 vfs_sysctl SYSCTL_HANDLER_ARGS 2008 { 2009 int *name = (int *)arg1 - 1; /* XXX */ 2010 u_int namelen = arg2 + 1; /* XXX */ 2011 struct vfsconf *vfsp; 2012 2013 #if 1 || defined(COMPAT_PRELITE2) 2014 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 2015 if (namelen == 1) 2016 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 2017 #endif 2018 2019 #ifdef notyet 2020 /* all sysctl names at this level are at least name and field */ 2021 if (namelen < 2) 2022 return (ENOTDIR); /* overloaded */ 2023 if (name[0] != VFS_GENERIC) { 2024 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2025 if (vfsp->vfc_typenum == name[0]) 2026 break; 2027 if (vfsp == NULL) 2028 return (EOPNOTSUPP); 2029 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 2030 oldp, oldlenp, newp, newlen, p)); 2031 } 2032 #endif 2033 switch (name[1]) { 2034 case VFS_MAXTYPENUM: 2035 if (namelen != 2) 2036 return (ENOTDIR); 2037 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 2038 case VFS_CONF: 2039 if (namelen != 3) 2040 return (ENOTDIR); /* overloaded */ 2041 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2042 if (vfsp->vfc_typenum == name[2]) 2043 break; 2044 if (vfsp == NULL) 2045 return (EOPNOTSUPP); 2046 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 2047 } 2048 return (EOPNOTSUPP); 2049 } 2050 2051 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 2052 "Generic filesystem"); 2053 2054 #if 1 || defined(COMPAT_PRELITE2) 2055 2056 static int 2057 sysctl_ovfs_conf SYSCTL_HANDLER_ARGS 2058 { 2059 int error; 2060 struct vfsconf *vfsp; 2061 struct ovfsconf ovfs; 2062 2063 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 2064 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 2065 strcpy(ovfs.vfc_name, vfsp->vfc_name); 2066 ovfs.vfc_index = vfsp->vfc_typenum; 2067 ovfs.vfc_refcount = vfsp->vfc_refcount; 2068 ovfs.vfc_flags = vfsp->vfc_flags; 2069 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 2070 if (error) 2071 return error; 2072 } 2073 return 0; 2074 } 2075 2076 #endif /* 1 || COMPAT_PRELITE2 */ 2077 2078 #if 0 2079 #define KINFO_VNODESLOP 10 2080 /* 2081 * Dump vnode list (via sysctl). 2082 * Copyout address of vnode followed by vnode. 2083 */ 2084 /* ARGSUSED */ 2085 static int 2086 sysctl_vnode SYSCTL_HANDLER_ARGS 2087 { 2088 struct proc *p = curproc; /* XXX */ 2089 struct mount *mp, *nmp; 2090 struct vnode *nvp, *vp; 2091 int error; 2092 2093 #define VPTRSZ sizeof (struct vnode *) 2094 #define VNODESZ sizeof (struct vnode) 2095 2096 req->lock = 0; 2097 if (!req->oldptr) /* Make an estimate */ 2098 return (SYSCTL_OUT(req, 0, 2099 (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ))); 2100 2101 simple_lock(&mountlist_slock); 2102 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 2103 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { 2104 nmp = TAILQ_NEXT(mp, mnt_list); 2105 continue; 2106 } 2107 again: 2108 simple_lock(&mntvnode_slock); 2109 for (vp = LIST_FIRST(&mp->mnt_vnodelist); 2110 vp != NULL; 2111 vp = nvp) { 2112 /* 2113 * Check that the vp is still associated with 2114 * this filesystem. RACE: could have been 2115 * recycled onto the same filesystem. 2116 */ 2117 if (vp->v_mount != mp) { 2118 simple_unlock(&mntvnode_slock); 2119 goto again; 2120 } 2121 nvp = LIST_NEXT(vp, v_mntvnodes); 2122 simple_unlock(&mntvnode_slock); 2123 if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) || 2124 (error = SYSCTL_OUT(req, vp, VNODESZ))) 2125 return (error); 2126 simple_lock(&mntvnode_slock); 2127 } 2128 simple_unlock(&mntvnode_slock); 2129 simple_lock(&mountlist_slock); 2130 nmp = TAILQ_NEXT(mp, mnt_list); 2131 vfs_unbusy(mp, p); 2132 } 2133 simple_unlock(&mountlist_slock); 2134 2135 return (0); 2136 } 2137 #endif 2138 2139 /* 2140 * XXX 2141 * Exporting the vnode list on large systems causes them to crash. 2142 * Exporting the vnode list on medium systems causes sysctl to coredump. 2143 */ 2144 #if 0 2145 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD, 2146 0, 0, sysctl_vnode, "S,vnode", ""); 2147 #endif 2148 2149 /* 2150 * Check to see if a filesystem is mounted on a block device. 2151 */ 2152 int 2153 vfs_mountedon(vp) 2154 struct vnode *vp; 2155 { 2156 2157 if (vp->v_specmountpoint != NULL) 2158 return (EBUSY); 2159 return (0); 2160 } 2161 2162 /* 2163 * Unmount all filesystems. The list is traversed in reverse order 2164 * of mounting to avoid dependencies. 2165 */ 2166 void 2167 vfs_unmountall() 2168 { 2169 struct mount *mp; 2170 struct proc *p; 2171 int error; 2172 2173 if (curproc != NULL) 2174 p = curproc; 2175 else 2176 p = initproc; /* XXX XXX should this be proc0? */ 2177 /* 2178 * Since this only runs when rebooting, it is not interlocked. 2179 */ 2180 while(!TAILQ_EMPTY(&mountlist)) { 2181 mp = TAILQ_LAST(&mountlist, mntlist); 2182 error = dounmount(mp, MNT_FORCE, p); 2183 if (error) { 2184 TAILQ_REMOVE(&mountlist, mp, mnt_list); 2185 printf("unmount of %s failed (", 2186 mp->mnt_stat.f_mntonname); 2187 if (error == EBUSY) 2188 printf("BUSY)\n"); 2189 else 2190 printf("%d)\n", error); 2191 } else { 2192 /* The unmount has removed mp from the mountlist */ 2193 } 2194 } 2195 } 2196 2197 /* 2198 * Build hash lists of net addresses and hang them off the mount point. 2199 * Called by ufs_mount() to set up the lists of export addresses. 2200 */ 2201 static int 2202 vfs_hang_addrlist(mp, nep, argp) 2203 struct mount *mp; 2204 struct netexport *nep; 2205 struct export_args *argp; 2206 { 2207 register struct netcred *np; 2208 register struct radix_node_head *rnh; 2209 register int i; 2210 struct radix_node *rn; 2211 struct sockaddr *saddr, *smask = 0; 2212 struct domain *dom; 2213 int error; 2214 2215 if (argp->ex_addrlen == 0) { 2216 if (mp->mnt_flag & MNT_DEFEXPORTED) 2217 return (EPERM); 2218 np = &nep->ne_defexported; 2219 np->netc_exflags = argp->ex_flags; 2220 np->netc_anon = argp->ex_anon; 2221 np->netc_anon.cr_ref = 1; 2222 mp->mnt_flag |= MNT_DEFEXPORTED; 2223 return (0); 2224 } 2225 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 2226 np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK); 2227 bzero((caddr_t) np, i); 2228 saddr = (struct sockaddr *) (np + 1); 2229 if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 2230 goto out; 2231 if (saddr->sa_len > argp->ex_addrlen) 2232 saddr->sa_len = argp->ex_addrlen; 2233 if (argp->ex_masklen) { 2234 smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen); 2235 error = copyin(argp->ex_mask, (caddr_t) smask, argp->ex_masklen); 2236 if (error) 2237 goto out; 2238 if (smask->sa_len > argp->ex_masklen) 2239 smask->sa_len = argp->ex_masklen; 2240 } 2241 i = saddr->sa_family; 2242 if ((rnh = nep->ne_rtable[i]) == 0) { 2243 /* 2244 * Seems silly to initialize every AF when most are not used, 2245 * do so on demand here 2246 */ 2247 for (dom = domains; dom; dom = dom->dom_next) 2248 if (dom->dom_family == i && dom->dom_rtattach) { 2249 dom->dom_rtattach((void **) &nep->ne_rtable[i], 2250 dom->dom_rtoffset); 2251 break; 2252 } 2253 if ((rnh = nep->ne_rtable[i]) == 0) { 2254 error = ENOBUFS; 2255 goto out; 2256 } 2257 } 2258 rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh, 2259 np->netc_rnodes); 2260 if (rn == 0 || np != (struct netcred *) rn) { /* already exists */ 2261 error = EPERM; 2262 goto out; 2263 } 2264 np->netc_exflags = argp->ex_flags; 2265 np->netc_anon = argp->ex_anon; 2266 np->netc_anon.cr_ref = 1; 2267 return (0); 2268 out: 2269 free(np, M_NETADDR); 2270 return (error); 2271 } 2272 2273 /* ARGSUSED */ 2274 static int 2275 vfs_free_netcred(rn, w) 2276 struct radix_node *rn; 2277 void *w; 2278 { 2279 register struct radix_node_head *rnh = (struct radix_node_head *) w; 2280 2281 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 2282 free((caddr_t) rn, M_NETADDR); 2283 return (0); 2284 } 2285 2286 /* 2287 * Free the net address hash lists that are hanging off the mount points. 2288 */ 2289 static void 2290 vfs_free_addrlist(nep) 2291 struct netexport *nep; 2292 { 2293 register int i; 2294 register struct radix_node_head *rnh; 2295 2296 for (i = 0; i <= AF_MAX; i++) 2297 if ((rnh = nep->ne_rtable[i])) { 2298 (*rnh->rnh_walktree) (rnh, vfs_free_netcred, 2299 (caddr_t) rnh); 2300 free((caddr_t) rnh, M_RTABLE); 2301 nep->ne_rtable[i] = 0; 2302 } 2303 } 2304 2305 int 2306 vfs_export(mp, nep, argp) 2307 struct mount *mp; 2308 struct netexport *nep; 2309 struct export_args *argp; 2310 { 2311 int error; 2312 2313 if (argp->ex_flags & MNT_DELEXPORT) { 2314 if (mp->mnt_flag & MNT_EXPUBLIC) { 2315 vfs_setpublicfs(NULL, NULL, NULL); 2316 mp->mnt_flag &= ~MNT_EXPUBLIC; 2317 } 2318 vfs_free_addrlist(nep); 2319 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 2320 } 2321 if (argp->ex_flags & MNT_EXPORTED) { 2322 if (argp->ex_flags & MNT_EXPUBLIC) { 2323 if ((error = vfs_setpublicfs(mp, nep, argp)) != 0) 2324 return (error); 2325 mp->mnt_flag |= MNT_EXPUBLIC; 2326 } 2327 if ((error = vfs_hang_addrlist(mp, nep, argp))) 2328 return (error); 2329 mp->mnt_flag |= MNT_EXPORTED; 2330 } 2331 return (0); 2332 } 2333 2334 2335 /* 2336 * Set the publicly exported filesystem (WebNFS). Currently, only 2337 * one public filesystem is possible in the spec (RFC 2054 and 2055) 2338 */ 2339 int 2340 vfs_setpublicfs(mp, nep, argp) 2341 struct mount *mp; 2342 struct netexport *nep; 2343 struct export_args *argp; 2344 { 2345 int error; 2346 struct vnode *rvp; 2347 char *cp; 2348 2349 /* 2350 * mp == NULL -> invalidate the current info, the FS is 2351 * no longer exported. May be called from either vfs_export 2352 * or unmount, so check if it hasn't already been done. 2353 */ 2354 if (mp == NULL) { 2355 if (nfs_pub.np_valid) { 2356 nfs_pub.np_valid = 0; 2357 if (nfs_pub.np_index != NULL) { 2358 FREE(nfs_pub.np_index, M_TEMP); 2359 nfs_pub.np_index = NULL; 2360 } 2361 } 2362 return (0); 2363 } 2364 2365 /* 2366 * Only one allowed at a time. 2367 */ 2368 if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount) 2369 return (EBUSY); 2370 2371 /* 2372 * Get real filehandle for root of exported FS. 2373 */ 2374 bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle)); 2375 nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid; 2376 2377 if ((error = VFS_ROOT(mp, &rvp))) 2378 return (error); 2379 2380 if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid))) 2381 return (error); 2382 2383 vput(rvp); 2384 2385 /* 2386 * If an indexfile was specified, pull it in. 2387 */ 2388 if (argp->ex_indexfile != NULL) { 2389 MALLOC(nfs_pub.np_index, char *, MAXNAMLEN + 1, M_TEMP, 2390 M_WAITOK); 2391 error = copyinstr(argp->ex_indexfile, nfs_pub.np_index, 2392 MAXNAMLEN, (size_t *)0); 2393 if (!error) { 2394 /* 2395 * Check for illegal filenames. 2396 */ 2397 for (cp = nfs_pub.np_index; *cp; cp++) { 2398 if (*cp == '/') { 2399 error = EINVAL; 2400 break; 2401 } 2402 } 2403 } 2404 if (error) { 2405 FREE(nfs_pub.np_index, M_TEMP); 2406 return (error); 2407 } 2408 } 2409 2410 nfs_pub.np_mount = mp; 2411 nfs_pub.np_valid = 1; 2412 return (0); 2413 } 2414 2415 struct netcred * 2416 vfs_export_lookup(mp, nep, nam) 2417 register struct mount *mp; 2418 struct netexport *nep; 2419 struct sockaddr *nam; 2420 { 2421 register struct netcred *np; 2422 register struct radix_node_head *rnh; 2423 struct sockaddr *saddr; 2424 2425 np = NULL; 2426 if (mp->mnt_flag & MNT_EXPORTED) { 2427 /* 2428 * Lookup in the export list first. 2429 */ 2430 if (nam != NULL) { 2431 saddr = nam; 2432 rnh = nep->ne_rtable[saddr->sa_family]; 2433 if (rnh != NULL) { 2434 np = (struct netcred *) 2435 (*rnh->rnh_matchaddr)((caddr_t)saddr, 2436 rnh); 2437 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 2438 np = NULL; 2439 } 2440 } 2441 /* 2442 * If no address match, use the default if it exists. 2443 */ 2444 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 2445 np = &nep->ne_defexported; 2446 } 2447 return (np); 2448 } 2449 2450 /* 2451 * perform msync on all vnodes under a mount point 2452 * the mount point must be locked. 2453 */ 2454 void 2455 vfs_msync(struct mount *mp, int flags) { 2456 struct vnode *vp, *nvp; 2457 struct vm_object *obj; 2458 int anyio, tries; 2459 2460 tries = 5; 2461 loop: 2462 anyio = 0; 2463 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { 2464 2465 nvp = LIST_NEXT(vp, v_mntvnodes); 2466 2467 if (vp->v_mount != mp) { 2468 goto loop; 2469 } 2470 2471 if (vp->v_flag & VXLOCK) /* XXX: what if MNT_WAIT? */ 2472 continue; 2473 2474 if (flags != MNT_WAIT) { 2475 obj = vp->v_object; 2476 if (obj == NULL || (obj->flags & OBJ_MIGHTBEDIRTY) == 0) 2477 continue; 2478 if (VOP_ISLOCKED(vp, NULL)) 2479 continue; 2480 } 2481 2482 simple_lock(&vp->v_interlock); 2483 if (vp->v_object && 2484 (vp->v_object->flags & OBJ_MIGHTBEDIRTY)) { 2485 if (!vget(vp, 2486 LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curproc)) { 2487 if (vp->v_object) { 2488 vm_object_page_clean(vp->v_object, 0, 0, flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC); 2489 anyio = 1; 2490 } 2491 vput(vp); 2492 } 2493 } else { 2494 simple_unlock(&vp->v_interlock); 2495 } 2496 } 2497 if (anyio && (--tries > 0)) 2498 goto loop; 2499 } 2500 2501 /* 2502 * Create the VM object needed for VMIO and mmap support. This 2503 * is done for all VREG files in the system. Some filesystems might 2504 * afford the additional metadata buffering capability of the 2505 * VMIO code by making the device node be VMIO mode also. 2506 * 2507 * vp must be locked when vfs_object_create is called. 2508 */ 2509 int 2510 vfs_object_create(vp, p, cred) 2511 struct vnode *vp; 2512 struct proc *p; 2513 struct ucred *cred; 2514 { 2515 struct vattr vat; 2516 vm_object_t object; 2517 int error = 0; 2518 2519 if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE) 2520 return 0; 2521 2522 retry: 2523 if ((object = vp->v_object) == NULL) { 2524 if (vp->v_type == VREG || vp->v_type == VDIR) { 2525 if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0) 2526 goto retn; 2527 object = vnode_pager_alloc(vp, vat.va_size, 0, 0); 2528 } else if (devsw(vp->v_rdev) != NULL) { 2529 /* 2530 * This simply allocates the biggest object possible 2531 * for a disk vnode. This should be fixed, but doesn't 2532 * cause any problems (yet). 2533 */ 2534 object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0); 2535 } else { 2536 goto retn; 2537 } 2538 /* 2539 * Dereference the reference we just created. This assumes 2540 * that the object is associated with the vp. 2541 */ 2542 object->ref_count--; 2543 vp->v_usecount--; 2544 } else { 2545 if (object->flags & OBJ_DEAD) { 2546 VOP_UNLOCK(vp, 0, p); 2547 tsleep(object, PVM, "vodead", 0); 2548 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 2549 goto retry; 2550 } 2551 } 2552 2553 KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object")); 2554 vp->v_flag |= VOBJBUF; 2555 2556 retn: 2557 return error; 2558 } 2559 2560 static void 2561 vfree(vp) 2562 struct vnode *vp; 2563 { 2564 int s; 2565 2566 s = splbio(); 2567 simple_lock(&vnode_free_list_slock); 2568 if (vp->v_flag & VTBFREE) { 2569 TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist); 2570 vp->v_flag &= ~VTBFREE; 2571 } 2572 if (vp->v_flag & VAGE) { 2573 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2574 } else { 2575 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 2576 } 2577 freevnodes++; 2578 simple_unlock(&vnode_free_list_slock); 2579 vp->v_flag &= ~VAGE; 2580 vp->v_flag |= VFREE; 2581 splx(s); 2582 } 2583 2584 void 2585 vbusy(vp) 2586 struct vnode *vp; 2587 { 2588 int s; 2589 2590 s = splbio(); 2591 simple_lock(&vnode_free_list_slock); 2592 if (vp->v_flag & VTBFREE) { 2593 TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist); 2594 vp->v_flag &= ~VTBFREE; 2595 } else { 2596 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2597 freevnodes--; 2598 } 2599 simple_unlock(&vnode_free_list_slock); 2600 vp->v_flag &= ~(VFREE|VAGE); 2601 splx(s); 2602 } 2603 2604 /* 2605 * Record a process's interest in events which might happen to 2606 * a vnode. Because poll uses the historic select-style interface 2607 * internally, this routine serves as both the ``check for any 2608 * pending events'' and the ``record my interest in future events'' 2609 * functions. (These are done together, while the lock is held, 2610 * to avoid race conditions.) 2611 */ 2612 int 2613 vn_pollrecord(vp, p, events) 2614 struct vnode *vp; 2615 struct proc *p; 2616 short events; 2617 { 2618 simple_lock(&vp->v_pollinfo.vpi_lock); 2619 if (vp->v_pollinfo.vpi_revents & events) { 2620 /* 2621 * This leaves events we are not interested 2622 * in available for the other process which 2623 * which presumably had requested them 2624 * (otherwise they would never have been 2625 * recorded). 2626 */ 2627 events &= vp->v_pollinfo.vpi_revents; 2628 vp->v_pollinfo.vpi_revents &= ~events; 2629 2630 simple_unlock(&vp->v_pollinfo.vpi_lock); 2631 return events; 2632 } 2633 vp->v_pollinfo.vpi_events |= events; 2634 selrecord(p, &vp->v_pollinfo.vpi_selinfo); 2635 simple_unlock(&vp->v_pollinfo.vpi_lock); 2636 return 0; 2637 } 2638 2639 /* 2640 * Note the occurrence of an event. If the VN_POLLEVENT macro is used, 2641 * it is possible for us to miss an event due to race conditions, but 2642 * that condition is expected to be rare, so for the moment it is the 2643 * preferred interface. 2644 */ 2645 void 2646 vn_pollevent(vp, events) 2647 struct vnode *vp; 2648 short events; 2649 { 2650 simple_lock(&vp->v_pollinfo.vpi_lock); 2651 if (vp->v_pollinfo.vpi_events & events) { 2652 /* 2653 * We clear vpi_events so that we don't 2654 * call selwakeup() twice if two events are 2655 * posted before the polling process(es) is 2656 * awakened. This also ensures that we take at 2657 * most one selwakeup() if the polling process 2658 * is no longer interested. However, it does 2659 * mean that only one event can be noticed at 2660 * a time. (Perhaps we should only clear those 2661 * event bits which we note?) XXX 2662 */ 2663 vp->v_pollinfo.vpi_events = 0; /* &= ~events ??? */ 2664 vp->v_pollinfo.vpi_revents |= events; 2665 selwakeup(&vp->v_pollinfo.vpi_selinfo); 2666 } 2667 simple_unlock(&vp->v_pollinfo.vpi_lock); 2668 } 2669 2670 /* 2671 * Wake up anyone polling on vp because it is being revoked. 2672 * This depends on dead_poll() returning POLLHUP for correct 2673 * behavior. 2674 */ 2675 void 2676 vn_pollgone(vp) 2677 struct vnode *vp; 2678 { 2679 simple_lock(&vp->v_pollinfo.vpi_lock); 2680 if (vp->v_pollinfo.vpi_events) { 2681 vp->v_pollinfo.vpi_events = 0; 2682 selwakeup(&vp->v_pollinfo.vpi_selinfo); 2683 } 2684 simple_unlock(&vp->v_pollinfo.vpi_lock); 2685 } 2686 2687 2688 2689 /* 2690 * Routine to create and manage a filesystem syncer vnode. 2691 */ 2692 #define sync_close ((int (*) __P((struct vop_close_args *)))nullop) 2693 static int sync_fsync __P((struct vop_fsync_args *)); 2694 static int sync_inactive __P((struct vop_inactive_args *)); 2695 static int sync_reclaim __P((struct vop_reclaim_args *)); 2696 #define sync_lock ((int (*) __P((struct vop_lock_args *)))vop_nolock) 2697 #define sync_unlock ((int (*) __P((struct vop_unlock_args *)))vop_nounlock) 2698 static int sync_print __P((struct vop_print_args *)); 2699 #define sync_islocked ((int(*) __P((struct vop_islocked_args *)))vop_noislocked) 2700 2701 static vop_t **sync_vnodeop_p; 2702 static struct vnodeopv_entry_desc sync_vnodeop_entries[] = { 2703 { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 2704 { &vop_close_desc, (vop_t *) sync_close }, /* close */ 2705 { &vop_fsync_desc, (vop_t *) sync_fsync }, /* fsync */ 2706 { &vop_inactive_desc, (vop_t *) sync_inactive }, /* inactive */ 2707 { &vop_reclaim_desc, (vop_t *) sync_reclaim }, /* reclaim */ 2708 { &vop_lock_desc, (vop_t *) sync_lock }, /* lock */ 2709 { &vop_unlock_desc, (vop_t *) sync_unlock }, /* unlock */ 2710 { &vop_print_desc, (vop_t *) sync_print }, /* print */ 2711 { &vop_islocked_desc, (vop_t *) sync_islocked }, /* islocked */ 2712 { NULL, NULL } 2713 }; 2714 static struct vnodeopv_desc sync_vnodeop_opv_desc = 2715 { &sync_vnodeop_p, sync_vnodeop_entries }; 2716 2717 VNODEOP_SET(sync_vnodeop_opv_desc); 2718 2719 /* 2720 * Create a new filesystem syncer vnode for the specified mount point. 2721 */ 2722 int 2723 vfs_allocate_syncvnode(mp) 2724 struct mount *mp; 2725 { 2726 struct vnode *vp; 2727 static long start, incr, next; 2728 int error; 2729 2730 /* Allocate a new vnode */ 2731 if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) { 2732 mp->mnt_syncer = NULL; 2733 return (error); 2734 } 2735 vp->v_type = VNON; 2736 /* 2737 * Place the vnode onto the syncer worklist. We attempt to 2738 * scatter them about on the list so that they will go off 2739 * at evenly distributed times even if all the filesystems 2740 * are mounted at once. 2741 */ 2742 next += incr; 2743 if (next == 0 || next > syncer_maxdelay) { 2744 start /= 2; 2745 incr /= 2; 2746 if (start == 0) { 2747 start = syncer_maxdelay / 2; 2748 incr = syncer_maxdelay; 2749 } 2750 next = start; 2751 } 2752 vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0); 2753 mp->mnt_syncer = vp; 2754 return (0); 2755 } 2756 2757 /* 2758 * Do a lazy sync of the filesystem. 2759 */ 2760 static int 2761 sync_fsync(ap) 2762 struct vop_fsync_args /* { 2763 struct vnode *a_vp; 2764 struct ucred *a_cred; 2765 int a_waitfor; 2766 struct proc *a_p; 2767 } */ *ap; 2768 { 2769 struct vnode *syncvp = ap->a_vp; 2770 struct mount *mp = syncvp->v_mount; 2771 struct proc *p = ap->a_p; 2772 int asyncflag; 2773 2774 /* 2775 * We only need to do something if this is a lazy evaluation. 2776 */ 2777 if (ap->a_waitfor != MNT_LAZY) 2778 return (0); 2779 2780 /* 2781 * Move ourselves to the back of the sync list. 2782 */ 2783 vn_syncer_add_to_worklist(syncvp, syncdelay); 2784 2785 /* 2786 * Walk the list of vnodes pushing all that are dirty and 2787 * not already on the sync list. 2788 */ 2789 simple_lock(&mountlist_slock); 2790 if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_slock, p) != 0) { 2791 simple_unlock(&mountlist_slock); 2792 return (0); 2793 } 2794 asyncflag = mp->mnt_flag & MNT_ASYNC; 2795 mp->mnt_flag &= ~MNT_ASYNC; 2796 vfs_msync(mp, MNT_NOWAIT); 2797 VFS_SYNC(mp, MNT_LAZY, ap->a_cred, p); 2798 if (asyncflag) 2799 mp->mnt_flag |= MNT_ASYNC; 2800 vfs_unbusy(mp, p); 2801 return (0); 2802 } 2803 2804 /* 2805 * The syncer vnode is no referenced. 2806 */ 2807 static int 2808 sync_inactive(ap) 2809 struct vop_inactive_args /* { 2810 struct vnode *a_vp; 2811 struct proc *a_p; 2812 } */ *ap; 2813 { 2814 2815 vgone(ap->a_vp); 2816 return (0); 2817 } 2818 2819 /* 2820 * The syncer vnode is no longer needed and is being decommissioned. 2821 * 2822 * Modifications to the worklist must be protected at splbio(). 2823 */ 2824 static int 2825 sync_reclaim(ap) 2826 struct vop_reclaim_args /* { 2827 struct vnode *a_vp; 2828 } */ *ap; 2829 { 2830 struct vnode *vp = ap->a_vp; 2831 int s; 2832 2833 s = splbio(); 2834 vp->v_mount->mnt_syncer = NULL; 2835 if (vp->v_flag & VONWORKLST) { 2836 LIST_REMOVE(vp, v_synclist); 2837 vp->v_flag &= ~VONWORKLST; 2838 } 2839 splx(s); 2840 2841 return (0); 2842 } 2843 2844 /* 2845 * Print out a syncer vnode. 2846 */ 2847 static int 2848 sync_print(ap) 2849 struct vop_print_args /* { 2850 struct vnode *a_vp; 2851 } */ *ap; 2852 { 2853 struct vnode *vp = ap->a_vp; 2854 2855 printf("syncer vnode"); 2856 if (vp->v_vnlock != NULL) 2857 lockmgr_printinfo(vp->v_vnlock); 2858 printf("\n"); 2859 return (0); 2860 } 2861 2862 /* 2863 * extract the dev_t from a VBLK or VCHR 2864 */ 2865 dev_t 2866 vn_todev(vp) 2867 struct vnode *vp; 2868 { 2869 if (vp->v_type != VBLK && vp->v_type != VCHR) 2870 return (NODEV); 2871 return (vp->v_rdev); 2872 } 2873 2874 /* 2875 * Check if vnode represents a disk device 2876 */ 2877 int 2878 vn_isdisk(vp, errp) 2879 struct vnode *vp; 2880 int *errp; 2881 { 2882 if (vp->v_type != VBLK && vp->v_type != VCHR) { 2883 if (errp != NULL) 2884 *errp = ENOTBLK; 2885 return (0); 2886 } 2887 if (!devsw(vp->v_rdev)) { 2888 if (errp != NULL) 2889 *errp = ENXIO; 2890 return (0); 2891 } 2892 if (!(devsw(vp->v_rdev)->d_flags & D_DISK)) { 2893 if (errp != NULL) 2894 *errp = ENOTBLK; 2895 return (0); 2896 } 2897 if (errp != NULL) 2898 *errp = 0; 2899 return (1); 2900 } 2901 2902 void 2903 NDFREE(ndp, flags) 2904 struct nameidata *ndp; 2905 const uint flags; 2906 { 2907 if (!(flags & NDF_NO_FREE_PNBUF) && 2908 (ndp->ni_cnd.cn_flags & HASBUF)) { 2909 zfree(namei_zone, ndp->ni_cnd.cn_pnbuf); 2910 ndp->ni_cnd.cn_flags &= ~HASBUF; 2911 } 2912 if (!(flags & NDF_NO_DVP_UNLOCK) && 2913 (ndp->ni_cnd.cn_flags & LOCKPARENT) && 2914 ndp->ni_dvp != ndp->ni_vp) 2915 VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_proc); 2916 if (!(flags & NDF_NO_DVP_RELE) && 2917 (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) { 2918 vrele(ndp->ni_dvp); 2919 ndp->ni_dvp = NULL; 2920 } 2921 if (!(flags & NDF_NO_VP_UNLOCK) && 2922 (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp) 2923 VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_proc); 2924 if (!(flags & NDF_NO_VP_RELE) && 2925 ndp->ni_vp) { 2926 vrele(ndp->ni_vp); 2927 ndp->ni_vp = NULL; 2928 } 2929 if (!(flags & NDF_NO_STARTDIR_RELE) && 2930 (ndp->ni_cnd.cn_flags & SAVESTART)) { 2931 vrele(ndp->ni_startdir); 2932 ndp->ni_startdir = NULL; 2933 } 2934 } 2935