1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 39 * $FreeBSD$ 40 */ 41 42 /* 43 * External virtual filesystem routines 44 */ 45 #include "opt_ddb.h" 46 #include "opt_ffs.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/bio.h> 51 #include <sys/buf.h> 52 #include <sys/conf.h> 53 #include <sys/eventhandler.h> 54 #include <sys/fcntl.h> 55 #include <sys/kernel.h> 56 #include <sys/kthread.h> 57 #include <sys/malloc.h> 58 #include <sys/mount.h> 59 #include <sys/namei.h> 60 #include <sys/stat.h> 61 #include <sys/sysctl.h> 62 #include <sys/vmmeter.h> 63 #include <sys/vnode.h> 64 65 #include <vm/vm.h> 66 #include <vm/vm_object.h> 67 #include <vm/vm_extern.h> 68 #include <vm/pmap.h> 69 #include <vm/vm_map.h> 70 #include <vm/vm_page.h> 71 #include <vm/vm_zone.h> 72 73 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 74 75 static void addalias __P((struct vnode *vp, dev_t nvp_rdev)); 76 static void insmntque __P((struct vnode *vp, struct mount *mp)); 77 static void vclean __P((struct vnode *vp, int flags, struct thread *td)); 78 79 /* 80 * Number of vnodes in existence. Increased whenever getnewvnode() 81 * allocates a new vnode, never decreased. 82 */ 83 static unsigned long numvnodes; 84 SYSCTL_LONG(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, ""); 85 86 /* 87 * Conversion tables for conversion from vnode types to inode formats 88 * and back. 89 */ 90 enum vtype iftovt_tab[16] = { 91 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 92 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 93 }; 94 int vttoif_tab[9] = { 95 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 96 S_IFSOCK, S_IFIFO, S_IFMT, 97 }; 98 99 /* 100 * List of vnodes that are ready for recycling. 101 */ 102 static TAILQ_HEAD(freelst, vnode) vnode_free_list; 103 104 /* 105 * Minimum number of free vnodes. If there are fewer than this free vnodes, 106 * getnewvnode() will return a newly allocated vnode. 107 */ 108 static u_long wantfreevnodes = 25; 109 SYSCTL_LONG(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, ""); 110 /* Number of vnodes in the free list. */ 111 static u_long freevnodes = 0; 112 SYSCTL_LONG(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, ""); 113 /* Number of vnode allocation. */ 114 static u_long vnodeallocs = 0; 115 SYSCTL_LONG(_debug, OID_AUTO, vnodeallocs, CTLFLAG_RD, &vnodeallocs, 0, ""); 116 /* Period of vnode recycle from namecache in vnode allocation times. */ 117 static u_long vnoderecycleperiod = 1000; 118 SYSCTL_LONG(_debug, OID_AUTO, vnoderecycleperiod, CTLFLAG_RW, &vnoderecycleperiod, 0, ""); 119 /* Minimum number of total vnodes required to invoke vnode recycle from namecache. */ 120 static u_long vnoderecyclemintotalvn = 2000; 121 SYSCTL_LONG(_debug, OID_AUTO, vnoderecyclemintotalvn, CTLFLAG_RW, &vnoderecyclemintotalvn, 0, ""); 122 /* Minimum number of free vnodes required to invoke vnode recycle from namecache. */ 123 static u_long vnoderecycleminfreevn = 2000; 124 SYSCTL_LONG(_debug, OID_AUTO, vnoderecycleminfreevn, CTLFLAG_RW, &vnoderecycleminfreevn, 0, ""); 125 /* Number of vnodes attempted to recycle at a time. */ 126 static u_long vnoderecyclenumber = 3000; 127 SYSCTL_LONG(_debug, OID_AUTO, vnoderecyclenumber, CTLFLAG_RW, &vnoderecyclenumber, 0, ""); 128 129 /* 130 * Various variables used for debugging the new implementation of 131 * reassignbuf(). 132 * XXX these are probably of (very) limited utility now. 133 */ 134 static int reassignbufcalls; 135 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, ""); 136 static int reassignbufloops; 137 SYSCTL_INT(_vfs, OID_AUTO, reassignbufloops, CTLFLAG_RW, &reassignbufloops, 0, ""); 138 static int reassignbufsortgood; 139 SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortgood, CTLFLAG_RW, &reassignbufsortgood, 0, ""); 140 static int reassignbufsortbad; 141 SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortbad, CTLFLAG_RW, &reassignbufsortbad, 0, ""); 142 /* Set to 0 for old insertion-sort based reassignbuf, 1 for modern method. */ 143 static int reassignbufmethod = 1; 144 SYSCTL_INT(_vfs, OID_AUTO, reassignbufmethod, CTLFLAG_RW, &reassignbufmethod, 0, ""); 145 146 #ifdef ENABLE_VFS_IOOPT 147 /* See NOTES for a description of this setting. */ 148 int vfs_ioopt = 0; 149 SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, ""); 150 #endif 151 152 /* List of mounted filesystems. */ 153 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 154 155 /* For any iteration/modification of mountlist */ 156 struct mtx mountlist_mtx; 157 158 /* For any iteration/modification of mnt_vnodelist */ 159 struct mtx mntvnode_mtx; 160 161 /* 162 * Cache for the mount type id assigned to NFS. This is used for 163 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c. 164 */ 165 int nfs_mount_type = -1; 166 167 /* To keep more than one thread at a time from running vfs_getnewfsid */ 168 static struct mtx mntid_mtx; 169 170 /* For any iteration/modification of vnode_free_list */ 171 static struct mtx vnode_free_list_mtx; 172 173 /* 174 * For any iteration/modification of dev->si_hlist (linked through 175 * v_specnext) 176 */ 177 static struct mtx spechash_mtx; 178 179 /* Publicly exported FS */ 180 struct nfs_public nfs_pub; 181 182 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 183 static vm_zone_t vnode_zone; 184 185 /* Set to 1 to print out reclaim of active vnodes */ 186 int prtactive = 0; 187 188 /* 189 * The workitem queue. 190 * 191 * It is useful to delay writes of file data and filesystem metadata 192 * for tens of seconds so that quickly created and deleted files need 193 * not waste disk bandwidth being created and removed. To realize this, 194 * we append vnodes to a "workitem" queue. When running with a soft 195 * updates implementation, most pending metadata dependencies should 196 * not wait for more than a few seconds. Thus, mounted on block devices 197 * are delayed only about a half the time that file data is delayed. 198 * Similarly, directory updates are more critical, so are only delayed 199 * about a third the time that file data is delayed. Thus, there are 200 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 201 * one each second (driven off the filesystem syncer process). The 202 * syncer_delayno variable indicates the next queue that is to be processed. 203 * Items that need to be processed soon are placed in this queue: 204 * 205 * syncer_workitem_pending[syncer_delayno] 206 * 207 * A delay of fifteen seconds is done by placing the request fifteen 208 * entries later in the queue: 209 * 210 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 211 * 212 */ 213 static int syncer_delayno = 0; 214 static long syncer_mask; 215 LIST_HEAD(synclist, vnode); 216 static struct synclist *syncer_workitem_pending; 217 218 #define SYNCER_MAXDELAY 32 219 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 220 time_t syncdelay = 30; /* max time to delay syncing data */ 221 time_t filedelay = 30; /* time to delay syncing files */ 222 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, ""); 223 time_t dirdelay = 29; /* time to delay syncing directories */ 224 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, ""); 225 time_t metadelay = 28; /* time to delay syncing metadata */ 226 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, ""); 227 static int rushjob; /* number of slots to run ASAP */ 228 static int stat_rush_requests; /* number of times I/O speeded up */ 229 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, ""); 230 231 /* 232 * Number of vnodes we want to exist at any one time. This is mostly used 233 * to size hash tables in vnode-related code. It is normally not used in 234 * getnewvnode(), as wantfreevnodes is normally nonzero.) 235 * 236 * XXX desiredvnodes is historical cruft and should not exist. 237 */ 238 int desiredvnodes; 239 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 240 &desiredvnodes, 0, "Maximum number of vnodes"); 241 242 /* 243 * Initialize the vnode management data structures. 244 */ 245 static void 246 vntblinit(void *dummy __unused) 247 { 248 249 desiredvnodes = maxproc + cnt.v_page_count / 4; 250 mtx_init(&mountlist_mtx, "mountlist", MTX_DEF); 251 mtx_init(&mntvnode_mtx, "mntvnode", MTX_DEF); 252 mtx_init(&mntid_mtx, "mntid", MTX_DEF); 253 mtx_init(&spechash_mtx, "spechash", MTX_DEF); 254 TAILQ_INIT(&vnode_free_list); 255 mtx_init(&vnode_free_list_mtx, "vnode_free_list", MTX_DEF); 256 vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5); 257 /* 258 * Initialize the filesystem syncer. 259 */ 260 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 261 &syncer_mask); 262 syncer_maxdelay = syncer_mask + 1; 263 } 264 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL) 265 266 267 /* 268 * Mark a mount point as busy. Used to synchronize access and to delay 269 * unmounting. Interlock is not released on failure. 270 */ 271 int 272 vfs_busy(mp, flags, interlkp, td) 273 struct mount *mp; 274 int flags; 275 struct mtx *interlkp; 276 struct thread *td; 277 { 278 int lkflags; 279 280 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 281 if (flags & LK_NOWAIT) 282 return (ENOENT); 283 mp->mnt_kern_flag |= MNTK_MWAIT; 284 /* 285 * Since all busy locks are shared except the exclusive 286 * lock granted when unmounting, the only place that a 287 * wakeup needs to be done is at the release of the 288 * exclusive lock at the end of dounmount. 289 */ 290 msleep((caddr_t)mp, interlkp, PVFS, "vfs_busy", 0); 291 return (ENOENT); 292 } 293 lkflags = LK_SHARED | LK_NOPAUSE; 294 if (interlkp) 295 lkflags |= LK_INTERLOCK; 296 if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td)) 297 panic("vfs_busy: unexpected lock failure"); 298 return (0); 299 } 300 301 /* 302 * Free a busy filesystem. 303 */ 304 void 305 vfs_unbusy(mp, td) 306 struct mount *mp; 307 struct thread *td; 308 { 309 310 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td); 311 } 312 313 /* 314 * Lookup a filesystem type, and if found allocate and initialize 315 * a mount structure for it. 316 * 317 * Devname is usually updated by mount(8) after booting. 318 */ 319 int 320 vfs_rootmountalloc(fstypename, devname, mpp) 321 char *fstypename; 322 char *devname; 323 struct mount **mpp; 324 { 325 struct thread *td = curthread; /* XXX */ 326 struct vfsconf *vfsp; 327 struct mount *mp; 328 329 if (fstypename == NULL) 330 return (ENODEV); 331 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 332 if (!strcmp(vfsp->vfc_name, fstypename)) 333 break; 334 if (vfsp == NULL) 335 return (ENODEV); 336 mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO); 337 lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE); 338 (void)vfs_busy(mp, LK_NOWAIT, 0, td); 339 LIST_INIT(&mp->mnt_vnodelist); 340 mp->mnt_vfc = vfsp; 341 mp->mnt_op = vfsp->vfc_vfsops; 342 mp->mnt_flag = MNT_RDONLY; 343 mp->mnt_vnodecovered = NULLVP; 344 vfsp->vfc_refcount++; 345 mp->mnt_iosize_max = DFLTPHYS; 346 mp->mnt_stat.f_type = vfsp->vfc_typenum; 347 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 348 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 349 mp->mnt_stat.f_mntonname[0] = '/'; 350 mp->mnt_stat.f_mntonname[1] = 0; 351 (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 352 *mpp = mp; 353 return (0); 354 } 355 356 /* 357 * Find an appropriate filesystem to use for the root. If a filesystem 358 * has not been preselected, walk through the list of known filesystems 359 * trying those that have mountroot routines, and try them until one 360 * works or we have tried them all. 361 */ 362 #ifdef notdef /* XXX JH */ 363 int 364 lite2_vfs_mountroot() 365 { 366 struct vfsconf *vfsp; 367 extern int (*lite2_mountroot) __P((void)); 368 int error; 369 370 if (lite2_mountroot != NULL) 371 return ((*lite2_mountroot)()); 372 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 373 if (vfsp->vfc_mountroot == NULL) 374 continue; 375 if ((error = (*vfsp->vfc_mountroot)()) == 0) 376 return (0); 377 printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error); 378 } 379 return (ENODEV); 380 } 381 #endif 382 383 /* 384 * Lookup a mount point by filesystem identifier. 385 */ 386 struct mount * 387 vfs_getvfs(fsid) 388 fsid_t *fsid; 389 { 390 register struct mount *mp; 391 392 mtx_lock(&mountlist_mtx); 393 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 394 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 395 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 396 mtx_unlock(&mountlist_mtx); 397 return (mp); 398 } 399 } 400 mtx_unlock(&mountlist_mtx); 401 return ((struct mount *) 0); 402 } 403 404 /* 405 * Get a new unique fsid. Try to make its val[0] unique, since this value 406 * will be used to create fake device numbers for stat(). Also try (but 407 * not so hard) make its val[0] unique mod 2^16, since some emulators only 408 * support 16-bit device numbers. We end up with unique val[0]'s for the 409 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 410 * 411 * Keep in mind that several mounts may be running in parallel. Starting 412 * the search one past where the previous search terminated is both a 413 * micro-optimization and a defense against returning the same fsid to 414 * different mounts. 415 */ 416 void 417 vfs_getnewfsid(mp) 418 struct mount *mp; 419 { 420 static u_int16_t mntid_base; 421 fsid_t tfsid; 422 int mtype; 423 424 mtx_lock(&mntid_mtx); 425 mtype = mp->mnt_vfc->vfc_typenum; 426 tfsid.val[1] = mtype; 427 mtype = (mtype & 0xFF) << 24; 428 for (;;) { 429 tfsid.val[0] = makeudev(255, 430 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 431 mntid_base++; 432 if (vfs_getvfs(&tfsid) == NULL) 433 break; 434 } 435 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 436 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 437 mtx_unlock(&mntid_mtx); 438 } 439 440 /* 441 * Knob to control the precision of file timestamps: 442 * 443 * 0 = seconds only; nanoseconds zeroed. 444 * 1 = seconds and nanoseconds, accurate within 1/HZ. 445 * 2 = seconds and nanoseconds, truncated to microseconds. 446 * >=3 = seconds and nanoseconds, maximum precision. 447 */ 448 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 449 450 static int timestamp_precision = TSP_SEC; 451 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 452 ×tamp_precision, 0, ""); 453 454 /* 455 * Get a current timestamp. 456 */ 457 void 458 vfs_timestamp(tsp) 459 struct timespec *tsp; 460 { 461 struct timeval tv; 462 463 switch (timestamp_precision) { 464 case TSP_SEC: 465 tsp->tv_sec = time_second; 466 tsp->tv_nsec = 0; 467 break; 468 case TSP_HZ: 469 getnanotime(tsp); 470 break; 471 case TSP_USEC: 472 microtime(&tv); 473 TIMEVAL_TO_TIMESPEC(&tv, tsp); 474 break; 475 case TSP_NSEC: 476 default: 477 nanotime(tsp); 478 break; 479 } 480 } 481 482 /* 483 * Set vnode attributes to VNOVAL 484 */ 485 void 486 vattr_null(vap) 487 register struct vattr *vap; 488 { 489 490 vap->va_type = VNON; 491 vap->va_size = VNOVAL; 492 vap->va_bytes = VNOVAL; 493 vap->va_mode = VNOVAL; 494 vap->va_nlink = VNOVAL; 495 vap->va_uid = VNOVAL; 496 vap->va_gid = VNOVAL; 497 vap->va_fsid = VNOVAL; 498 vap->va_fileid = VNOVAL; 499 vap->va_blocksize = VNOVAL; 500 vap->va_rdev = VNOVAL; 501 vap->va_atime.tv_sec = VNOVAL; 502 vap->va_atime.tv_nsec = VNOVAL; 503 vap->va_mtime.tv_sec = VNOVAL; 504 vap->va_mtime.tv_nsec = VNOVAL; 505 vap->va_ctime.tv_sec = VNOVAL; 506 vap->va_ctime.tv_nsec = VNOVAL; 507 vap->va_flags = VNOVAL; 508 vap->va_gen = VNOVAL; 509 vap->va_vaflags = 0; 510 } 511 512 /* 513 * Routines having to do with the management of the vnode table. 514 */ 515 516 /* 517 * Return the next vnode from the free list. 518 */ 519 int 520 getnewvnode(tag, mp, vops, vpp) 521 enum vtagtype tag; 522 struct mount *mp; 523 vop_t **vops; 524 struct vnode **vpp; 525 { 526 int s, count; 527 struct thread *td = curthread; /* XXX */ 528 struct vnode *vp = NULL; 529 struct mount *vnmp; 530 vm_object_t object; 531 532 /* 533 * We take the least recently used vnode from the freelist 534 * if we can get it and it has no cached pages, and no 535 * namecache entries are relative to it. 536 * Otherwise we allocate a new vnode 537 */ 538 539 s = splbio(); 540 mtx_lock(&vnode_free_list_mtx); 541 542 if (wantfreevnodes && freevnodes < wantfreevnodes) { 543 vp = NULL; 544 } else if (!wantfreevnodes && freevnodes <= desiredvnodes) { 545 /* 546 * XXX: this is only here to be backwards compatible 547 */ 548 vp = NULL; 549 } else for (count = 0; count < freevnodes; count++) { 550 vp = TAILQ_FIRST(&vnode_free_list); 551 if (vp == NULL || vp->v_usecount) 552 panic("getnewvnode: free vnode isn't"); 553 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 554 555 /* 556 * Don't recycle if active in the namecache or 557 * if it still has cached pages or we cannot get 558 * its interlock. 559 */ 560 if (LIST_FIRST(&vp->v_cache_src) != NULL || 561 (VOP_GETVOBJECT(vp, &object) == 0 && 562 (object->resident_page_count || object->ref_count)) || 563 !mtx_trylock(&vp->v_interlock)) { 564 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 565 vp = NULL; 566 continue; 567 } 568 /* 569 * Skip over it if its filesystem is being suspended. 570 */ 571 if (vn_start_write(vp, &vnmp, V_NOWAIT) == 0) 572 break; 573 mtx_unlock(&vp->v_interlock); 574 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 575 vp = NULL; 576 } 577 if (vp) { 578 vp->v_flag |= VDOOMED; 579 vp->v_flag &= ~VFREE; 580 freevnodes--; 581 mtx_unlock(&vnode_free_list_mtx); 582 cache_purge(vp); 583 vp->v_lease = NULL; 584 if (vp->v_type != VBAD) { 585 vgonel(vp, td); 586 } else { 587 mtx_unlock(&vp->v_interlock); 588 } 589 vn_finished_write(vnmp); 590 591 #ifdef INVARIANTS 592 { 593 int s; 594 595 if (vp->v_data) 596 panic("cleaned vnode isn't"); 597 s = splbio(); 598 if (vp->v_numoutput) 599 panic("Clean vnode has pending I/O's"); 600 splx(s); 601 if (vp->v_writecount != 0) 602 panic("Non-zero write count"); 603 } 604 #endif 605 vp->v_flag = 0; 606 vp->v_lastw = 0; 607 vp->v_lasta = 0; 608 vp->v_cstart = 0; 609 vp->v_clen = 0; 610 vp->v_socket = 0; 611 } else { 612 mtx_unlock(&vnode_free_list_mtx); 613 vp = (struct vnode *) zalloc(vnode_zone); 614 bzero((char *) vp, sizeof *vp); 615 mtx_init(&vp->v_interlock, "vnode interlock", MTX_DEF); 616 vp->v_dd = vp; 617 mtx_init(&vp->v_pollinfo.vpi_lock, "vnode pollinfo", MTX_DEF); 618 cache_purge(vp); 619 LIST_INIT(&vp->v_cache_src); 620 TAILQ_INIT(&vp->v_cache_dst); 621 numvnodes++; 622 } 623 624 TAILQ_INIT(&vp->v_cleanblkhd); 625 TAILQ_INIT(&vp->v_dirtyblkhd); 626 vp->v_type = VNON; 627 vp->v_tag = tag; 628 vp->v_op = vops; 629 lockinit(&vp->v_lock, PVFS, "vnlock", 0, LK_NOPAUSE); 630 insmntque(vp, mp); 631 *vpp = vp; 632 vp->v_usecount = 1; 633 vp->v_data = 0; 634 635 splx(s); 636 637 vfs_object_create(vp, td, td->td_proc->p_ucred); 638 639 vnodeallocs++; 640 if (vnodeallocs % vnoderecycleperiod == 0 && 641 freevnodes < vnoderecycleminfreevn && 642 vnoderecyclemintotalvn < numvnodes) { 643 /* Recycle vnodes. */ 644 cache_purgeleafdirs(vnoderecyclenumber); 645 } 646 647 return (0); 648 } 649 650 /* 651 * Move a vnode from one mount queue to another. 652 */ 653 static void 654 insmntque(vp, mp) 655 register struct vnode *vp; 656 register struct mount *mp; 657 { 658 659 mtx_lock(&mntvnode_mtx); 660 /* 661 * Delete from old mount point vnode list, if on one. 662 */ 663 if (vp->v_mount != NULL) 664 LIST_REMOVE(vp, v_mntvnodes); 665 /* 666 * Insert into list of vnodes for the new mount point, if available. 667 */ 668 if ((vp->v_mount = mp) == NULL) { 669 mtx_unlock(&mntvnode_mtx); 670 return; 671 } 672 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 673 mtx_unlock(&mntvnode_mtx); 674 } 675 676 /* 677 * Update outstanding I/O count and do wakeup if requested. 678 */ 679 void 680 vwakeup(bp) 681 register struct buf *bp; 682 { 683 register struct vnode *vp; 684 685 bp->b_flags &= ~B_WRITEINPROG; 686 if ((vp = bp->b_vp)) { 687 vp->v_numoutput--; 688 if (vp->v_numoutput < 0) 689 panic("vwakeup: neg numoutput"); 690 if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) { 691 vp->v_flag &= ~VBWAIT; 692 wakeup((caddr_t) &vp->v_numoutput); 693 } 694 } 695 } 696 697 /* 698 * Flush out and invalidate all buffers associated with a vnode. 699 * Called with the underlying object locked. 700 */ 701 int 702 vinvalbuf(vp, flags, cred, td, slpflag, slptimeo) 703 register struct vnode *vp; 704 int flags; 705 struct ucred *cred; 706 struct thread *td; 707 int slpflag, slptimeo; 708 { 709 register struct buf *bp; 710 struct buf *nbp, *blist; 711 int s, error; 712 vm_object_t object; 713 714 GIANT_REQUIRED; 715 716 if (flags & V_SAVE) { 717 s = splbio(); 718 while (vp->v_numoutput) { 719 vp->v_flag |= VBWAIT; 720 error = tsleep((caddr_t)&vp->v_numoutput, 721 slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo); 722 if (error) { 723 splx(s); 724 return (error); 725 } 726 } 727 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 728 splx(s); 729 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0) 730 return (error); 731 s = splbio(); 732 if (vp->v_numoutput > 0 || 733 !TAILQ_EMPTY(&vp->v_dirtyblkhd)) 734 panic("vinvalbuf: dirty bufs"); 735 } 736 splx(s); 737 } 738 s = splbio(); 739 for (;;) { 740 blist = TAILQ_FIRST(&vp->v_cleanblkhd); 741 if (!blist) 742 blist = TAILQ_FIRST(&vp->v_dirtyblkhd); 743 if (!blist) 744 break; 745 746 for (bp = blist; bp; bp = nbp) { 747 nbp = TAILQ_NEXT(bp, b_vnbufs); 748 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 749 error = BUF_TIMELOCK(bp, 750 LK_EXCLUSIVE | LK_SLEEPFAIL, 751 "vinvalbuf", slpflag, slptimeo); 752 if (error == ENOLCK) 753 break; 754 splx(s); 755 return (error); 756 } 757 /* 758 * XXX Since there are no node locks for NFS, I 759 * believe there is a slight chance that a delayed 760 * write will occur while sleeping just above, so 761 * check for it. Note that vfs_bio_awrite expects 762 * buffers to reside on a queue, while BUF_WRITE and 763 * brelse do not. 764 */ 765 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 766 (flags & V_SAVE)) { 767 768 if (bp->b_vp == vp) { 769 if (bp->b_flags & B_CLUSTEROK) { 770 BUF_UNLOCK(bp); 771 vfs_bio_awrite(bp); 772 } else { 773 bremfree(bp); 774 bp->b_flags |= B_ASYNC; 775 BUF_WRITE(bp); 776 } 777 } else { 778 bremfree(bp); 779 (void) BUF_WRITE(bp); 780 } 781 break; 782 } 783 bremfree(bp); 784 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF); 785 bp->b_flags &= ~B_ASYNC; 786 brelse(bp); 787 } 788 } 789 790 while (vp->v_numoutput > 0) { 791 vp->v_flag |= VBWAIT; 792 tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0); 793 } 794 795 splx(s); 796 797 /* 798 * Destroy the copy in the VM cache, too. 799 */ 800 mtx_lock(&vp->v_interlock); 801 if (VOP_GETVOBJECT(vp, &object) == 0) { 802 vm_object_page_remove(object, 0, 0, 803 (flags & V_SAVE) ? TRUE : FALSE); 804 } 805 mtx_unlock(&vp->v_interlock); 806 807 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd)) 808 panic("vinvalbuf: flush failed"); 809 return (0); 810 } 811 812 /* 813 * Truncate a file's buffer and pages to a specified length. This 814 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 815 * sync activity. 816 */ 817 int 818 vtruncbuf(vp, cred, td, length, blksize) 819 register struct vnode *vp; 820 struct ucred *cred; 821 struct thread *td; 822 off_t length; 823 int blksize; 824 { 825 register struct buf *bp; 826 struct buf *nbp; 827 int s, anyfreed; 828 int trunclbn; 829 830 /* 831 * Round up to the *next* lbn. 832 */ 833 trunclbn = (length + blksize - 1) / blksize; 834 835 s = splbio(); 836 restart: 837 anyfreed = 1; 838 for (;anyfreed;) { 839 anyfreed = 0; 840 for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 841 nbp = TAILQ_NEXT(bp, b_vnbufs); 842 if (bp->b_lblkno >= trunclbn) { 843 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 844 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 845 goto restart; 846 } else { 847 bremfree(bp); 848 bp->b_flags |= (B_INVAL | B_RELBUF); 849 bp->b_flags &= ~B_ASYNC; 850 brelse(bp); 851 anyfreed = 1; 852 } 853 if (nbp && 854 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 855 (nbp->b_vp != vp) || 856 (nbp->b_flags & B_DELWRI))) { 857 goto restart; 858 } 859 } 860 } 861 862 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 863 nbp = TAILQ_NEXT(bp, b_vnbufs); 864 if (bp->b_lblkno >= trunclbn) { 865 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 866 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 867 goto restart; 868 } else { 869 bremfree(bp); 870 bp->b_flags |= (B_INVAL | B_RELBUF); 871 bp->b_flags &= ~B_ASYNC; 872 brelse(bp); 873 anyfreed = 1; 874 } 875 if (nbp && 876 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 877 (nbp->b_vp != vp) || 878 (nbp->b_flags & B_DELWRI) == 0)) { 879 goto restart; 880 } 881 } 882 } 883 } 884 885 if (length > 0) { 886 restartsync: 887 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 888 nbp = TAILQ_NEXT(bp, b_vnbufs); 889 if ((bp->b_flags & B_DELWRI) && (bp->b_lblkno < 0)) { 890 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 891 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 892 goto restart; 893 } else { 894 bremfree(bp); 895 if (bp->b_vp == vp) { 896 bp->b_flags |= B_ASYNC; 897 } else { 898 bp->b_flags &= ~B_ASYNC; 899 } 900 BUF_WRITE(bp); 901 } 902 goto restartsync; 903 } 904 905 } 906 } 907 908 while (vp->v_numoutput > 0) { 909 vp->v_flag |= VBWAIT; 910 tsleep(&vp->v_numoutput, PVM, "vbtrunc", 0); 911 } 912 913 splx(s); 914 915 vnode_pager_setsize(vp, length); 916 917 return (0); 918 } 919 920 /* 921 * Associate a buffer with a vnode. 922 */ 923 void 924 bgetvp(vp, bp) 925 register struct vnode *vp; 926 register struct buf *bp; 927 { 928 int s; 929 930 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 931 932 vhold(vp); 933 bp->b_vp = vp; 934 bp->b_dev = vn_todev(vp); 935 /* 936 * Insert onto list for new vnode. 937 */ 938 s = splbio(); 939 bp->b_xflags |= BX_VNCLEAN; 940 bp->b_xflags &= ~BX_VNDIRTY; 941 TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs); 942 splx(s); 943 } 944 945 /* 946 * Disassociate a buffer from a vnode. 947 */ 948 void 949 brelvp(bp) 950 register struct buf *bp; 951 { 952 struct vnode *vp; 953 struct buflists *listheadp; 954 int s; 955 956 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 957 958 /* 959 * Delete from old vnode list, if on one. 960 */ 961 vp = bp->b_vp; 962 s = splbio(); 963 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) { 964 if (bp->b_xflags & BX_VNDIRTY) 965 listheadp = &vp->v_dirtyblkhd; 966 else 967 listheadp = &vp->v_cleanblkhd; 968 TAILQ_REMOVE(listheadp, bp, b_vnbufs); 969 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 970 } 971 if ((vp->v_flag & VONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 972 vp->v_flag &= ~VONWORKLST; 973 LIST_REMOVE(vp, v_synclist); 974 } 975 splx(s); 976 bp->b_vp = (struct vnode *) 0; 977 vdrop(vp); 978 } 979 980 /* 981 * Add an item to the syncer work queue. 982 */ 983 static void 984 vn_syncer_add_to_worklist(struct vnode *vp, int delay) 985 { 986 int s, slot; 987 988 s = splbio(); 989 990 if (vp->v_flag & VONWORKLST) { 991 LIST_REMOVE(vp, v_synclist); 992 } 993 994 if (delay > syncer_maxdelay - 2) 995 delay = syncer_maxdelay - 2; 996 slot = (syncer_delayno + delay) & syncer_mask; 997 998 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist); 999 vp->v_flag |= VONWORKLST; 1000 splx(s); 1001 } 1002 1003 struct proc *updateproc; 1004 static void sched_sync __P((void)); 1005 static struct kproc_desc up_kp = { 1006 "syncer", 1007 sched_sync, 1008 &updateproc 1009 }; 1010 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 1011 1012 /* 1013 * System filesystem synchronizer daemon. 1014 */ 1015 void 1016 sched_sync(void) 1017 { 1018 struct synclist *slp; 1019 struct vnode *vp; 1020 struct mount *mp; 1021 long starttime; 1022 int s; 1023 struct thread *td = &updateproc->p_thread; /* XXXKSE */ 1024 1025 mtx_lock(&Giant); 1026 1027 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, td->td_proc, 1028 SHUTDOWN_PRI_LAST); 1029 1030 for (;;) { 1031 kthread_suspend_check(td->td_proc); 1032 1033 starttime = time_second; 1034 1035 /* 1036 * Push files whose dirty time has expired. Be careful 1037 * of interrupt race on slp queue. 1038 */ 1039 s = splbio(); 1040 slp = &syncer_workitem_pending[syncer_delayno]; 1041 syncer_delayno += 1; 1042 if (syncer_delayno == syncer_maxdelay) 1043 syncer_delayno = 0; 1044 splx(s); 1045 1046 while ((vp = LIST_FIRST(slp)) != NULL) { 1047 if (VOP_ISLOCKED(vp, NULL) == 0 && 1048 vn_start_write(vp, &mp, V_NOWAIT) == 0) { 1049 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1050 (void) VOP_FSYNC(vp, td->td_proc->p_ucred, MNT_LAZY, td); 1051 VOP_UNLOCK(vp, 0, td); 1052 vn_finished_write(mp); 1053 } 1054 s = splbio(); 1055 if (LIST_FIRST(slp) == vp) { 1056 /* 1057 * Note: v_tag VT_VFS vps can remain on the 1058 * worklist too with no dirty blocks, but 1059 * since sync_fsync() moves it to a different 1060 * slot we are safe. 1061 */ 1062 if (TAILQ_EMPTY(&vp->v_dirtyblkhd) && 1063 !vn_isdisk(vp, NULL)) 1064 panic("sched_sync: fsync failed vp %p tag %d", vp, vp->v_tag); 1065 /* 1066 * Put us back on the worklist. The worklist 1067 * routine will remove us from our current 1068 * position and then add us back in at a later 1069 * position. 1070 */ 1071 vn_syncer_add_to_worklist(vp, syncdelay); 1072 } 1073 splx(s); 1074 } 1075 1076 /* 1077 * Do soft update processing. 1078 */ 1079 #ifdef SOFTUPDATES 1080 softdep_process_worklist(NULL); 1081 #endif 1082 1083 /* 1084 * The variable rushjob allows the kernel to speed up the 1085 * processing of the filesystem syncer process. A rushjob 1086 * value of N tells the filesystem syncer to process the next 1087 * N seconds worth of work on its queue ASAP. Currently rushjob 1088 * is used by the soft update code to speed up the filesystem 1089 * syncer process when the incore state is getting so far 1090 * ahead of the disk that the kernel memory pool is being 1091 * threatened with exhaustion. 1092 */ 1093 if (rushjob > 0) { 1094 rushjob -= 1; 1095 continue; 1096 } 1097 /* 1098 * If it has taken us less than a second to process the 1099 * current work, then wait. Otherwise start right over 1100 * again. We can still lose time if any single round 1101 * takes more than two seconds, but it does not really 1102 * matter as we are just trying to generally pace the 1103 * filesystem activity. 1104 */ 1105 if (time_second == starttime) 1106 tsleep(&lbolt, PPAUSE, "syncer", 0); 1107 } 1108 } 1109 1110 /* 1111 * Request the syncer daemon to speed up its work. 1112 * We never push it to speed up more than half of its 1113 * normal turn time, otherwise it could take over the cpu. 1114 * XXXKSE only one update? 1115 */ 1116 int 1117 speedup_syncer() 1118 { 1119 1120 mtx_lock_spin(&sched_lock); 1121 if (updateproc->p_thread.td_wchan == &lbolt) /* XXXKSE */ 1122 setrunnable(&updateproc->p_thread); 1123 mtx_unlock_spin(&sched_lock); 1124 if (rushjob < syncdelay / 2) { 1125 rushjob += 1; 1126 stat_rush_requests += 1; 1127 return (1); 1128 } 1129 return(0); 1130 } 1131 1132 /* 1133 * Associate a p-buffer with a vnode. 1134 * 1135 * Also sets B_PAGING flag to indicate that vnode is not fully associated 1136 * with the buffer. i.e. the bp has not been linked into the vnode or 1137 * ref-counted. 1138 */ 1139 void 1140 pbgetvp(vp, bp) 1141 register struct vnode *vp; 1142 register struct buf *bp; 1143 { 1144 1145 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free")); 1146 1147 bp->b_vp = vp; 1148 bp->b_flags |= B_PAGING; 1149 bp->b_dev = vn_todev(vp); 1150 } 1151 1152 /* 1153 * Disassociate a p-buffer from a vnode. 1154 */ 1155 void 1156 pbrelvp(bp) 1157 register struct buf *bp; 1158 { 1159 1160 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL")); 1161 1162 /* XXX REMOVE ME */ 1163 if (TAILQ_NEXT(bp, b_vnbufs) != NULL) { 1164 panic( 1165 "relpbuf(): b_vp was probably reassignbuf()d %p %x", 1166 bp, 1167 (int)bp->b_flags 1168 ); 1169 } 1170 bp->b_vp = (struct vnode *) 0; 1171 bp->b_flags &= ~B_PAGING; 1172 } 1173 1174 /* 1175 * Change the vnode a pager buffer is associated with. 1176 */ 1177 void 1178 pbreassignbuf(bp, newvp) 1179 struct buf *bp; 1180 struct vnode *newvp; 1181 { 1182 1183 KASSERT(bp->b_flags & B_PAGING, 1184 ("pbreassignbuf() on non phys bp %p", bp)); 1185 bp->b_vp = newvp; 1186 } 1187 1188 /* 1189 * Reassign a buffer from one vnode to another. 1190 * Used to assign file specific control information 1191 * (indirect blocks) to the vnode to which they belong. 1192 */ 1193 void 1194 reassignbuf(bp, newvp) 1195 register struct buf *bp; 1196 register struct vnode *newvp; 1197 { 1198 struct buflists *listheadp; 1199 int delay; 1200 int s; 1201 1202 if (newvp == NULL) { 1203 printf("reassignbuf: NULL"); 1204 return; 1205 } 1206 ++reassignbufcalls; 1207 1208 /* 1209 * B_PAGING flagged buffers cannot be reassigned because their vp 1210 * is not fully linked in. 1211 */ 1212 if (bp->b_flags & B_PAGING) 1213 panic("cannot reassign paging buffer"); 1214 1215 s = splbio(); 1216 /* 1217 * Delete from old vnode list, if on one. 1218 */ 1219 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) { 1220 if (bp->b_xflags & BX_VNDIRTY) 1221 listheadp = &bp->b_vp->v_dirtyblkhd; 1222 else 1223 listheadp = &bp->b_vp->v_cleanblkhd; 1224 TAILQ_REMOVE(listheadp, bp, b_vnbufs); 1225 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1226 if (bp->b_vp != newvp) { 1227 vdrop(bp->b_vp); 1228 bp->b_vp = NULL; /* for clarification */ 1229 } 1230 } 1231 /* 1232 * If dirty, put on list of dirty buffers; otherwise insert onto list 1233 * of clean buffers. 1234 */ 1235 if (bp->b_flags & B_DELWRI) { 1236 struct buf *tbp; 1237 1238 listheadp = &newvp->v_dirtyblkhd; 1239 if ((newvp->v_flag & VONWORKLST) == 0) { 1240 switch (newvp->v_type) { 1241 case VDIR: 1242 delay = dirdelay; 1243 break; 1244 case VCHR: 1245 if (newvp->v_rdev->si_mountpoint != NULL) { 1246 delay = metadelay; 1247 break; 1248 } 1249 /* fall through */ 1250 default: 1251 delay = filedelay; 1252 } 1253 vn_syncer_add_to_worklist(newvp, delay); 1254 } 1255 bp->b_xflags |= BX_VNDIRTY; 1256 tbp = TAILQ_FIRST(listheadp); 1257 if (tbp == NULL || 1258 bp->b_lblkno == 0 || 1259 (bp->b_lblkno > 0 && tbp->b_lblkno < 0) || 1260 (bp->b_lblkno > 0 && bp->b_lblkno < tbp->b_lblkno)) { 1261 TAILQ_INSERT_HEAD(listheadp, bp, b_vnbufs); 1262 ++reassignbufsortgood; 1263 } else if (bp->b_lblkno < 0) { 1264 TAILQ_INSERT_TAIL(listheadp, bp, b_vnbufs); 1265 ++reassignbufsortgood; 1266 } else if (reassignbufmethod == 1) { 1267 /* 1268 * New sorting algorithm, only handle sequential case, 1269 * otherwise append to end (but before metadata) 1270 */ 1271 if ((tbp = gbincore(newvp, bp->b_lblkno - 1)) != NULL && 1272 (tbp->b_xflags & BX_VNDIRTY)) { 1273 /* 1274 * Found the best place to insert the buffer 1275 */ 1276 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1277 ++reassignbufsortgood; 1278 } else { 1279 /* 1280 * Missed, append to end, but before meta-data. 1281 * We know that the head buffer in the list is 1282 * not meta-data due to prior conditionals. 1283 * 1284 * Indirect effects: NFS second stage write 1285 * tends to wind up here, giving maximum 1286 * distance between the unstable write and the 1287 * commit rpc. 1288 */ 1289 tbp = TAILQ_LAST(listheadp, buflists); 1290 while (tbp && tbp->b_lblkno < 0) 1291 tbp = TAILQ_PREV(tbp, buflists, b_vnbufs); 1292 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1293 ++reassignbufsortbad; 1294 } 1295 } else { 1296 /* 1297 * Old sorting algorithm, scan queue and insert 1298 */ 1299 struct buf *ttbp; 1300 while ((ttbp = TAILQ_NEXT(tbp, b_vnbufs)) && 1301 (ttbp->b_lblkno < bp->b_lblkno)) { 1302 ++reassignbufloops; 1303 tbp = ttbp; 1304 } 1305 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1306 } 1307 } else { 1308 bp->b_xflags |= BX_VNCLEAN; 1309 TAILQ_INSERT_TAIL(&newvp->v_cleanblkhd, bp, b_vnbufs); 1310 if ((newvp->v_flag & VONWORKLST) && 1311 TAILQ_EMPTY(&newvp->v_dirtyblkhd)) { 1312 newvp->v_flag &= ~VONWORKLST; 1313 LIST_REMOVE(newvp, v_synclist); 1314 } 1315 } 1316 if (bp->b_vp != newvp) { 1317 bp->b_vp = newvp; 1318 vhold(bp->b_vp); 1319 } 1320 splx(s); 1321 } 1322 1323 /* 1324 * Create a vnode for a device. 1325 * Used for mounting the root file system. 1326 */ 1327 int 1328 bdevvp(dev, vpp) 1329 dev_t dev; 1330 struct vnode **vpp; 1331 { 1332 register struct vnode *vp; 1333 struct vnode *nvp; 1334 int error; 1335 1336 if (dev == NODEV) { 1337 *vpp = NULLVP; 1338 return (ENXIO); 1339 } 1340 if (vfinddev(dev, VCHR, vpp)) 1341 return (0); 1342 error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp); 1343 if (error) { 1344 *vpp = NULLVP; 1345 return (error); 1346 } 1347 vp = nvp; 1348 vp->v_type = VCHR; 1349 addalias(vp, dev); 1350 *vpp = vp; 1351 return (0); 1352 } 1353 1354 /* 1355 * Add vnode to the alias list hung off the dev_t. 1356 * 1357 * The reason for this gunk is that multiple vnodes can reference 1358 * the same physical device, so checking vp->v_usecount to see 1359 * how many users there are is inadequate; the v_usecount for 1360 * the vnodes need to be accumulated. vcount() does that. 1361 */ 1362 struct vnode * 1363 addaliasu(nvp, nvp_rdev) 1364 struct vnode *nvp; 1365 udev_t nvp_rdev; 1366 { 1367 struct vnode *ovp; 1368 vop_t **ops; 1369 dev_t dev; 1370 1371 if (nvp->v_type == VBLK) 1372 return (nvp); 1373 if (nvp->v_type != VCHR) 1374 panic("addaliasu on non-special vnode"); 1375 dev = udev2dev(nvp_rdev, 0); 1376 /* 1377 * Check to see if we have a bdevvp vnode with no associated 1378 * filesystem. If so, we want to associate the filesystem of 1379 * the new newly instigated vnode with the bdevvp vnode and 1380 * discard the newly created vnode rather than leaving the 1381 * bdevvp vnode lying around with no associated filesystem. 1382 */ 1383 if (vfinddev(dev, nvp->v_type, &ovp) == 0 || ovp->v_data != NULL) { 1384 addalias(nvp, dev); 1385 return (nvp); 1386 } 1387 /* 1388 * Discard unneeded vnode, but save its node specific data. 1389 * Note that if there is a lock, it is carried over in the 1390 * node specific data to the replacement vnode. 1391 */ 1392 vref(ovp); 1393 ovp->v_data = nvp->v_data; 1394 ovp->v_tag = nvp->v_tag; 1395 nvp->v_data = NULL; 1396 lockinit(&ovp->v_lock, PVFS, nvp->v_lock.lk_wmesg, 1397 nvp->v_lock.lk_timo, nvp->v_lock.lk_flags & LK_EXTFLG_MASK); 1398 if (nvp->v_vnlock) 1399 ovp->v_vnlock = &ovp->v_lock; 1400 ops = ovp->v_op; 1401 ovp->v_op = nvp->v_op; 1402 if (VOP_ISLOCKED(nvp, curthread)) { 1403 VOP_UNLOCK(nvp, 0, curthread); 1404 vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curthread); 1405 } 1406 nvp->v_op = ops; 1407 insmntque(ovp, nvp->v_mount); 1408 vrele(nvp); 1409 vgone(nvp); 1410 return (ovp); 1411 } 1412 1413 /* This is a local helper function that do the same as addaliasu, but for a 1414 * dev_t instead of an udev_t. */ 1415 static void 1416 addalias(nvp, dev) 1417 struct vnode *nvp; 1418 dev_t dev; 1419 { 1420 1421 KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode")); 1422 nvp->v_rdev = dev; 1423 mtx_lock(&spechash_mtx); 1424 SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext); 1425 mtx_unlock(&spechash_mtx); 1426 } 1427 1428 /* 1429 * Grab a particular vnode from the free list, increment its 1430 * reference count and lock it. The vnode lock bit is set if the 1431 * vnode is being eliminated in vgone. The process is awakened 1432 * when the transition is completed, and an error returned to 1433 * indicate that the vnode is no longer usable (possibly having 1434 * been changed to a new file system type). 1435 */ 1436 int 1437 vget(vp, flags, td) 1438 register struct vnode *vp; 1439 int flags; 1440 struct thread *td; 1441 { 1442 int error; 1443 1444 /* 1445 * If the vnode is in the process of being cleaned out for 1446 * another use, we wait for the cleaning to finish and then 1447 * return failure. Cleaning is determined by checking that 1448 * the VXLOCK flag is set. 1449 */ 1450 if ((flags & LK_INTERLOCK) == 0) 1451 mtx_lock(&vp->v_interlock); 1452 if (vp->v_flag & VXLOCK) { 1453 if (vp->v_vxproc == curthread) { 1454 printf("VXLOCK interlock avoided\n"); 1455 } else { 1456 vp->v_flag |= VXWANT; 1457 msleep((caddr_t)vp, &vp->v_interlock, PINOD | PDROP, 1458 "vget", 0); 1459 return (ENOENT); 1460 } 1461 } 1462 1463 vp->v_usecount++; 1464 1465 if (VSHOULDBUSY(vp)) 1466 vbusy(vp); 1467 if (flags & LK_TYPE_MASK) { 1468 if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) { 1469 /* 1470 * must expand vrele here because we do not want 1471 * to call VOP_INACTIVE if the reference count 1472 * drops back to zero since it was never really 1473 * active. We must remove it from the free list 1474 * before sleeping so that multiple processes do 1475 * not try to recycle it. 1476 */ 1477 mtx_lock(&vp->v_interlock); 1478 vp->v_usecount--; 1479 if (VSHOULDFREE(vp)) 1480 vfree(vp); 1481 mtx_unlock(&vp->v_interlock); 1482 } 1483 return (error); 1484 } 1485 mtx_unlock(&vp->v_interlock); 1486 return (0); 1487 } 1488 1489 /* 1490 * Increase the reference count of a vnode. 1491 */ 1492 void 1493 vref(struct vnode *vp) 1494 { 1495 mtx_lock(&vp->v_interlock); 1496 vp->v_usecount++; 1497 mtx_unlock(&vp->v_interlock); 1498 } 1499 1500 /* 1501 * Vnode put/release. 1502 * If count drops to zero, call inactive routine and return to freelist. 1503 */ 1504 void 1505 vrele(vp) 1506 struct vnode *vp; 1507 { 1508 struct thread *td = curthread; /* XXX */ 1509 1510 KASSERT(vp != NULL, ("vrele: null vp")); 1511 1512 mtx_lock(&vp->v_interlock); 1513 1514 /* Skip this v_writecount check if we're going to panic below. */ 1515 KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, 1516 ("vrele: missed vn_close")); 1517 1518 if (vp->v_usecount > 1) { 1519 1520 vp->v_usecount--; 1521 mtx_unlock(&vp->v_interlock); 1522 1523 return; 1524 } 1525 1526 if (vp->v_usecount == 1) { 1527 1528 vp->v_usecount--; 1529 if (VSHOULDFREE(vp)) 1530 vfree(vp); 1531 /* 1532 * If we are doing a vput, the node is already locked, and we must 1533 * call VOP_INACTIVE with the node locked. So, in the case of 1534 * vrele, we explicitly lock the vnode before calling VOP_INACTIVE. 1535 */ 1536 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) { 1537 VOP_INACTIVE(vp, td); 1538 } 1539 1540 } else { 1541 #ifdef DIAGNOSTIC 1542 vprint("vrele: negative ref count", vp); 1543 mtx_unlock(&vp->v_interlock); 1544 #endif 1545 panic("vrele: negative ref cnt"); 1546 } 1547 } 1548 1549 /* 1550 * Release an already locked vnode. This give the same effects as 1551 * unlock+vrele(), but takes less time and avoids releasing and 1552 * re-aquiring the lock (as vrele() aquires the lock internally.) 1553 */ 1554 void 1555 vput(vp) 1556 struct vnode *vp; 1557 { 1558 struct thread *td = curthread; /* XXX */ 1559 1560 GIANT_REQUIRED; 1561 1562 KASSERT(vp != NULL, ("vput: null vp")); 1563 mtx_lock(&vp->v_interlock); 1564 /* Skip this v_writecount check if we're going to panic below. */ 1565 KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, 1566 ("vput: missed vn_close")); 1567 1568 if (vp->v_usecount > 1) { 1569 1570 vp->v_usecount--; 1571 VOP_UNLOCK(vp, LK_INTERLOCK, td); 1572 return; 1573 1574 } 1575 1576 if (vp->v_usecount == 1) { 1577 1578 vp->v_usecount--; 1579 if (VSHOULDFREE(vp)) 1580 vfree(vp); 1581 /* 1582 * If we are doing a vput, the node is already locked, and we must 1583 * call VOP_INACTIVE with the node locked. So, in the case of 1584 * vrele, we explicitly lock the vnode before calling VOP_INACTIVE. 1585 */ 1586 mtx_unlock(&vp->v_interlock); 1587 VOP_INACTIVE(vp, td); 1588 1589 } else { 1590 #ifdef DIAGNOSTIC 1591 vprint("vput: negative ref count", vp); 1592 #endif 1593 panic("vput: negative ref cnt"); 1594 } 1595 } 1596 1597 /* 1598 * Somebody doesn't want the vnode recycled. 1599 */ 1600 void 1601 vhold(vp) 1602 register struct vnode *vp; 1603 { 1604 int s; 1605 1606 s = splbio(); 1607 vp->v_holdcnt++; 1608 if (VSHOULDBUSY(vp)) 1609 vbusy(vp); 1610 splx(s); 1611 } 1612 1613 /* 1614 * Note that there is one less who cares about this vnode. vdrop() is the 1615 * opposite of vhold(). 1616 */ 1617 void 1618 vdrop(vp) 1619 register struct vnode *vp; 1620 { 1621 int s; 1622 1623 s = splbio(); 1624 if (vp->v_holdcnt <= 0) 1625 panic("vdrop: holdcnt"); 1626 vp->v_holdcnt--; 1627 if (VSHOULDFREE(vp)) 1628 vfree(vp); 1629 splx(s); 1630 } 1631 1632 /* 1633 * Remove any vnodes in the vnode table belonging to mount point mp. 1634 * 1635 * If FORCECLOSE is not specified, there should not be any active ones, 1636 * return error if any are found (nb: this is a user error, not a 1637 * system error). If FORCECLOSE is specified, detach any active vnodes 1638 * that are found. 1639 * 1640 * If WRITECLOSE is set, only flush out regular file vnodes open for 1641 * writing. 1642 * 1643 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 1644 * 1645 * `rootrefs' specifies the base reference count for the root vnode 1646 * of this filesystem. The root vnode is considered busy if its 1647 * v_usecount exceeds this value. On a successful return, vflush() 1648 * will call vrele() on the root vnode exactly rootrefs times. 1649 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 1650 * be zero. 1651 */ 1652 #ifdef DIAGNOSTIC 1653 static int busyprt = 0; /* print out busy vnodes */ 1654 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 1655 #endif 1656 1657 int 1658 vflush(mp, rootrefs, flags) 1659 struct mount *mp; 1660 int rootrefs; 1661 int flags; 1662 { 1663 struct thread *td = curthread; /* XXX */ 1664 struct vnode *vp, *nvp, *rootvp = NULL; 1665 int busy = 0, error; 1666 1667 if (rootrefs > 0) { 1668 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 1669 ("vflush: bad args")); 1670 /* 1671 * Get the filesystem root vnode. We can vput() it 1672 * immediately, since with rootrefs > 0, it won't go away. 1673 */ 1674 if ((error = VFS_ROOT(mp, &rootvp)) != 0) 1675 return (error); 1676 vput(rootvp); 1677 } 1678 mtx_lock(&mntvnode_mtx); 1679 loop: 1680 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) { 1681 /* 1682 * Make sure this vnode wasn't reclaimed in getnewvnode(). 1683 * Start over if it has (it won't be on the list anymore). 1684 */ 1685 if (vp->v_mount != mp) 1686 goto loop; 1687 nvp = LIST_NEXT(vp, v_mntvnodes); 1688 1689 mtx_unlock(&mntvnode_mtx); 1690 mtx_lock(&vp->v_interlock); 1691 /* 1692 * Skip over a vnodes marked VSYSTEM. 1693 */ 1694 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1695 mtx_unlock(&vp->v_interlock); 1696 mtx_lock(&mntvnode_mtx); 1697 continue; 1698 } 1699 /* 1700 * If WRITECLOSE is set, only flush out regular file vnodes 1701 * open for writing. 1702 */ 1703 if ((flags & WRITECLOSE) && 1704 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1705 mtx_unlock(&vp->v_interlock); 1706 mtx_lock(&mntvnode_mtx); 1707 continue; 1708 } 1709 1710 /* 1711 * With v_usecount == 0, all we need to do is clear out the 1712 * vnode data structures and we are done. 1713 */ 1714 if (vp->v_usecount == 0) { 1715 vgonel(vp, td); 1716 mtx_lock(&mntvnode_mtx); 1717 continue; 1718 } 1719 1720 /* 1721 * If FORCECLOSE is set, forcibly close the vnode. For block 1722 * or character devices, revert to an anonymous device. For 1723 * all other files, just kill them. 1724 */ 1725 if (flags & FORCECLOSE) { 1726 if (vp->v_type != VCHR) { 1727 vgonel(vp, td); 1728 } else { 1729 vclean(vp, 0, td); 1730 vp->v_op = spec_vnodeop_p; 1731 insmntque(vp, (struct mount *) 0); 1732 } 1733 mtx_lock(&mntvnode_mtx); 1734 continue; 1735 } 1736 #ifdef DIAGNOSTIC 1737 if (busyprt) 1738 vprint("vflush: busy vnode", vp); 1739 #endif 1740 mtx_unlock(&vp->v_interlock); 1741 mtx_lock(&mntvnode_mtx); 1742 busy++; 1743 } 1744 mtx_unlock(&mntvnode_mtx); 1745 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 1746 /* 1747 * If just the root vnode is busy, and if its refcount 1748 * is equal to `rootrefs', then go ahead and kill it. 1749 */ 1750 mtx_lock(&rootvp->v_interlock); 1751 KASSERT(busy > 0, ("vflush: not busy")); 1752 KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs")); 1753 if (busy == 1 && rootvp->v_usecount == rootrefs) { 1754 vgonel(rootvp, td); 1755 busy = 0; 1756 } else 1757 mtx_unlock(&rootvp->v_interlock); 1758 } 1759 if (busy) 1760 return (EBUSY); 1761 for (; rootrefs > 0; rootrefs--) 1762 vrele(rootvp); 1763 return (0); 1764 } 1765 1766 /* 1767 * Disassociate the underlying file system from a vnode. 1768 */ 1769 static void 1770 vclean(vp, flags, td) 1771 struct vnode *vp; 1772 int flags; 1773 struct thread *td; 1774 { 1775 int active; 1776 1777 /* 1778 * Check to see if the vnode is in use. If so we have to reference it 1779 * before we clean it out so that its count cannot fall to zero and 1780 * generate a race against ourselves to recycle it. 1781 */ 1782 if ((active = vp->v_usecount)) 1783 vp->v_usecount++; 1784 1785 /* 1786 * Prevent the vnode from being recycled or brought into use while we 1787 * clean it out. 1788 */ 1789 if (vp->v_flag & VXLOCK) 1790 panic("vclean: deadlock"); 1791 vp->v_flag |= VXLOCK; 1792 vp->v_vxproc = curthread; 1793 /* 1794 * Even if the count is zero, the VOP_INACTIVE routine may still 1795 * have the object locked while it cleans it out. The VOP_LOCK 1796 * ensures that the VOP_INACTIVE routine is done with its work. 1797 * For active vnodes, it ensures that no other activity can 1798 * occur while the underlying object is being cleaned out. 1799 */ 1800 VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td); 1801 1802 /* 1803 * Clean out any buffers associated with the vnode. 1804 * If the flush fails, just toss the buffers. 1805 */ 1806 if (flags & DOCLOSE) { 1807 if (TAILQ_FIRST(&vp->v_dirtyblkhd) != NULL) 1808 (void) vn_write_suspend_wait(vp, NULL, V_WAIT); 1809 if (vinvalbuf(vp, V_SAVE, NOCRED, td, 0, 0) != 0) 1810 vinvalbuf(vp, 0, NOCRED, td, 0, 0); 1811 } 1812 1813 VOP_DESTROYVOBJECT(vp); 1814 1815 /* 1816 * If purging an active vnode, it must be closed and 1817 * deactivated before being reclaimed. Note that the 1818 * VOP_INACTIVE will unlock the vnode. 1819 */ 1820 if (active) { 1821 if (flags & DOCLOSE) 1822 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 1823 VOP_INACTIVE(vp, td); 1824 } else { 1825 /* 1826 * Any other processes trying to obtain this lock must first 1827 * wait for VXLOCK to clear, then call the new lock operation. 1828 */ 1829 VOP_UNLOCK(vp, 0, td); 1830 } 1831 /* 1832 * Reclaim the vnode. 1833 */ 1834 if (VOP_RECLAIM(vp, td)) 1835 panic("vclean: cannot reclaim"); 1836 1837 if (active) { 1838 /* 1839 * Inline copy of vrele() since VOP_INACTIVE 1840 * has already been called. 1841 */ 1842 mtx_lock(&vp->v_interlock); 1843 if (--vp->v_usecount <= 0) { 1844 #ifdef DIAGNOSTIC 1845 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 1846 vprint("vclean: bad ref count", vp); 1847 panic("vclean: ref cnt"); 1848 } 1849 #endif 1850 vfree(vp); 1851 } 1852 mtx_unlock(&vp->v_interlock); 1853 } 1854 1855 cache_purge(vp); 1856 vp->v_vnlock = NULL; 1857 lockdestroy(&vp->v_lock); 1858 1859 if (VSHOULDFREE(vp)) 1860 vfree(vp); 1861 1862 /* 1863 * Done with purge, notify sleepers of the grim news. 1864 */ 1865 vp->v_op = dead_vnodeop_p; 1866 vn_pollgone(vp); 1867 vp->v_tag = VT_NON; 1868 vp->v_flag &= ~VXLOCK; 1869 vp->v_vxproc = NULL; 1870 if (vp->v_flag & VXWANT) { 1871 vp->v_flag &= ~VXWANT; 1872 wakeup((caddr_t) vp); 1873 } 1874 } 1875 1876 /* 1877 * Eliminate all activity associated with the requested vnode 1878 * and with all vnodes aliased to the requested vnode. 1879 */ 1880 int 1881 vop_revoke(ap) 1882 struct vop_revoke_args /* { 1883 struct vnode *a_vp; 1884 int a_flags; 1885 } */ *ap; 1886 { 1887 struct vnode *vp, *vq; 1888 dev_t dev; 1889 1890 KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke")); 1891 1892 vp = ap->a_vp; 1893 /* 1894 * If a vgone (or vclean) is already in progress, 1895 * wait until it is done and return. 1896 */ 1897 if (vp->v_flag & VXLOCK) { 1898 vp->v_flag |= VXWANT; 1899 msleep((caddr_t)vp, &vp->v_interlock, PINOD | PDROP, 1900 "vop_revokeall", 0); 1901 return (0); 1902 } 1903 dev = vp->v_rdev; 1904 for (;;) { 1905 mtx_lock(&spechash_mtx); 1906 vq = SLIST_FIRST(&dev->si_hlist); 1907 mtx_unlock(&spechash_mtx); 1908 if (!vq) 1909 break; 1910 vgone(vq); 1911 } 1912 return (0); 1913 } 1914 1915 /* 1916 * Recycle an unused vnode to the front of the free list. 1917 * Release the passed interlock if the vnode will be recycled. 1918 */ 1919 int 1920 vrecycle(vp, inter_lkp, td) 1921 struct vnode *vp; 1922 struct mtx *inter_lkp; 1923 struct thread *td; 1924 { 1925 1926 mtx_lock(&vp->v_interlock); 1927 if (vp->v_usecount == 0) { 1928 if (inter_lkp) { 1929 mtx_unlock(inter_lkp); 1930 } 1931 vgonel(vp, td); 1932 return (1); 1933 } 1934 mtx_unlock(&vp->v_interlock); 1935 return (0); 1936 } 1937 1938 /* 1939 * Eliminate all activity associated with a vnode 1940 * in preparation for reuse. 1941 */ 1942 void 1943 vgone(vp) 1944 register struct vnode *vp; 1945 { 1946 struct thread *td = curthread; /* XXX */ 1947 1948 mtx_lock(&vp->v_interlock); 1949 vgonel(vp, td); 1950 } 1951 1952 /* 1953 * vgone, with the vp interlock held. 1954 */ 1955 void 1956 vgonel(vp, td) 1957 struct vnode *vp; 1958 struct thread *td; 1959 { 1960 int s; 1961 1962 /* 1963 * If a vgone (or vclean) is already in progress, 1964 * wait until it is done and return. 1965 */ 1966 if (vp->v_flag & VXLOCK) { 1967 vp->v_flag |= VXWANT; 1968 msleep((caddr_t)vp, &vp->v_interlock, PINOD | PDROP, 1969 "vgone", 0); 1970 return; 1971 } 1972 1973 /* 1974 * Clean out the filesystem specific data. 1975 */ 1976 vclean(vp, DOCLOSE, td); 1977 mtx_lock(&vp->v_interlock); 1978 1979 /* 1980 * Delete from old mount point vnode list, if on one. 1981 */ 1982 if (vp->v_mount != NULL) 1983 insmntque(vp, (struct mount *)0); 1984 /* 1985 * If special device, remove it from special device alias list 1986 * if it is on one. 1987 */ 1988 if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) { 1989 mtx_lock(&spechash_mtx); 1990 SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext); 1991 freedev(vp->v_rdev); 1992 mtx_unlock(&spechash_mtx); 1993 vp->v_rdev = NULL; 1994 } 1995 1996 /* 1997 * If it is on the freelist and not already at the head, 1998 * move it to the head of the list. The test of the 1999 * VDOOMED flag and the reference count of zero is because 2000 * it will be removed from the free list by getnewvnode, 2001 * but will not have its reference count incremented until 2002 * after calling vgone. If the reference count were 2003 * incremented first, vgone would (incorrectly) try to 2004 * close the previous instance of the underlying object. 2005 */ 2006 if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) { 2007 s = splbio(); 2008 mtx_lock(&vnode_free_list_mtx); 2009 if (vp->v_flag & VFREE) 2010 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2011 else 2012 freevnodes++; 2013 vp->v_flag |= VFREE; 2014 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2015 mtx_unlock(&vnode_free_list_mtx); 2016 splx(s); 2017 } 2018 2019 vp->v_type = VBAD; 2020 mtx_unlock(&vp->v_interlock); 2021 } 2022 2023 /* 2024 * Lookup a vnode by device number. 2025 */ 2026 int 2027 vfinddev(dev, type, vpp) 2028 dev_t dev; 2029 enum vtype type; 2030 struct vnode **vpp; 2031 { 2032 struct vnode *vp; 2033 2034 mtx_lock(&spechash_mtx); 2035 SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) { 2036 if (type == vp->v_type) { 2037 *vpp = vp; 2038 mtx_unlock(&spechash_mtx); 2039 return (1); 2040 } 2041 } 2042 mtx_unlock(&spechash_mtx); 2043 return (0); 2044 } 2045 2046 /* 2047 * Calculate the total number of references to a special device. 2048 */ 2049 int 2050 vcount(vp) 2051 struct vnode *vp; 2052 { 2053 struct vnode *vq; 2054 int count; 2055 2056 count = 0; 2057 mtx_lock(&spechash_mtx); 2058 SLIST_FOREACH(vq, &vp->v_rdev->si_hlist, v_specnext) 2059 count += vq->v_usecount; 2060 mtx_unlock(&spechash_mtx); 2061 return (count); 2062 } 2063 2064 /* 2065 * Same as above, but using the dev_t as argument 2066 */ 2067 int 2068 count_dev(dev) 2069 dev_t dev; 2070 { 2071 struct vnode *vp; 2072 2073 vp = SLIST_FIRST(&dev->si_hlist); 2074 if (vp == NULL) 2075 return (0); 2076 return(vcount(vp)); 2077 } 2078 2079 /* 2080 * Print out a description of a vnode. 2081 */ 2082 static char *typename[] = 2083 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 2084 2085 void 2086 vprint(label, vp) 2087 char *label; 2088 struct vnode *vp; 2089 { 2090 char buf[96]; 2091 2092 if (label != NULL) 2093 printf("%s: %p: ", label, (void *)vp); 2094 else 2095 printf("%p: ", (void *)vp); 2096 printf("type %s, usecount %d, writecount %d, refcount %d,", 2097 typename[vp->v_type], vp->v_usecount, vp->v_writecount, 2098 vp->v_holdcnt); 2099 buf[0] = '\0'; 2100 if (vp->v_flag & VROOT) 2101 strcat(buf, "|VROOT"); 2102 if (vp->v_flag & VTEXT) 2103 strcat(buf, "|VTEXT"); 2104 if (vp->v_flag & VSYSTEM) 2105 strcat(buf, "|VSYSTEM"); 2106 if (vp->v_flag & VXLOCK) 2107 strcat(buf, "|VXLOCK"); 2108 if (vp->v_flag & VXWANT) 2109 strcat(buf, "|VXWANT"); 2110 if (vp->v_flag & VBWAIT) 2111 strcat(buf, "|VBWAIT"); 2112 if (vp->v_flag & VDOOMED) 2113 strcat(buf, "|VDOOMED"); 2114 if (vp->v_flag & VFREE) 2115 strcat(buf, "|VFREE"); 2116 if (vp->v_flag & VOBJBUF) 2117 strcat(buf, "|VOBJBUF"); 2118 if (buf[0] != '\0') 2119 printf(" flags (%s)", &buf[1]); 2120 if (vp->v_data == NULL) { 2121 printf("\n"); 2122 } else { 2123 printf("\n\t"); 2124 VOP_PRINT(vp); 2125 } 2126 } 2127 2128 #ifdef DDB 2129 #include <ddb/ddb.h> 2130 /* 2131 * List all of the locked vnodes in the system. 2132 * Called when debugging the kernel. 2133 */ 2134 DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) 2135 { 2136 struct thread *td = curthread; /* XXX */ 2137 struct mount *mp, *nmp; 2138 struct vnode *vp; 2139 2140 printf("Locked vnodes\n"); 2141 mtx_lock(&mountlist_mtx); 2142 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 2143 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) { 2144 nmp = TAILQ_NEXT(mp, mnt_list); 2145 continue; 2146 } 2147 mtx_lock(&mntvnode_mtx); 2148 LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { 2149 if (VOP_ISLOCKED(vp, NULL)) 2150 vprint((char *)0, vp); 2151 } 2152 mtx_unlock(&mntvnode_mtx); 2153 mtx_lock(&mountlist_mtx); 2154 nmp = TAILQ_NEXT(mp, mnt_list); 2155 vfs_unbusy(mp, td); 2156 } 2157 mtx_unlock(&mountlist_mtx); 2158 } 2159 #endif 2160 2161 /* 2162 * Top level filesystem related information gathering. 2163 */ 2164 static int sysctl_ovfs_conf __P((SYSCTL_HANDLER_ARGS)); 2165 2166 static int 2167 vfs_sysctl(SYSCTL_HANDLER_ARGS) 2168 { 2169 int *name = (int *)arg1 - 1; /* XXX */ 2170 u_int namelen = arg2 + 1; /* XXX */ 2171 struct vfsconf *vfsp; 2172 2173 #if 1 || defined(COMPAT_PRELITE2) 2174 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 2175 if (namelen == 1) 2176 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 2177 #endif 2178 2179 /* XXX the below code does not compile; vfs_sysctl does not exist. */ 2180 #ifdef notyet 2181 /* all sysctl names at this level are at least name and field */ 2182 if (namelen < 2) 2183 return (ENOTDIR); /* overloaded */ 2184 if (name[0] != VFS_GENERIC) { 2185 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2186 if (vfsp->vfc_typenum == name[0]) 2187 break; 2188 if (vfsp == NULL) 2189 return (EOPNOTSUPP); 2190 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 2191 oldp, oldlenp, newp, newlen, td)); 2192 } 2193 #endif 2194 switch (name[1]) { 2195 case VFS_MAXTYPENUM: 2196 if (namelen != 2) 2197 return (ENOTDIR); 2198 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 2199 case VFS_CONF: 2200 if (namelen != 3) 2201 return (ENOTDIR); /* overloaded */ 2202 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2203 if (vfsp->vfc_typenum == name[2]) 2204 break; 2205 if (vfsp == NULL) 2206 return (EOPNOTSUPP); 2207 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 2208 } 2209 return (EOPNOTSUPP); 2210 } 2211 2212 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 2213 "Generic filesystem"); 2214 2215 #if 1 || defined(COMPAT_PRELITE2) 2216 2217 static int 2218 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 2219 { 2220 int error; 2221 struct vfsconf *vfsp; 2222 struct ovfsconf ovfs; 2223 2224 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 2225 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 2226 strcpy(ovfs.vfc_name, vfsp->vfc_name); 2227 ovfs.vfc_index = vfsp->vfc_typenum; 2228 ovfs.vfc_refcount = vfsp->vfc_refcount; 2229 ovfs.vfc_flags = vfsp->vfc_flags; 2230 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 2231 if (error) 2232 return error; 2233 } 2234 return 0; 2235 } 2236 2237 #endif /* 1 || COMPAT_PRELITE2 */ 2238 2239 #if COMPILING_LINT 2240 #define KINFO_VNODESLOP 10 2241 /* 2242 * Dump vnode list (via sysctl). 2243 * Copyout address of vnode followed by vnode. 2244 */ 2245 /* ARGSUSED */ 2246 static int 2247 sysctl_vnode(SYSCTL_HANDLER_ARGS) 2248 { 2249 struct thread *td = curthread; /* XXX */ 2250 struct mount *mp, *nmp; 2251 struct vnode *nvp, *vp; 2252 int error; 2253 2254 #define VPTRSZ sizeof (struct vnode *) 2255 #define VNODESZ sizeof (struct vnode) 2256 2257 req->lock = 0; 2258 if (!req->oldptr) /* Make an estimate */ 2259 return (SYSCTL_OUT(req, 0, 2260 (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ))); 2261 2262 mtx_lock(&mountlist_mtx); 2263 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 2264 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) { 2265 nmp = TAILQ_NEXT(mp, mnt_list); 2266 continue; 2267 } 2268 mtx_lock(&mntvnode_mtx); 2269 again: 2270 for (vp = LIST_FIRST(&mp->mnt_vnodelist); 2271 vp != NULL; 2272 vp = nvp) { 2273 /* 2274 * Check that the vp is still associated with 2275 * this filesystem. RACE: could have been 2276 * recycled onto the same filesystem. 2277 */ 2278 if (vp->v_mount != mp) 2279 goto again; 2280 nvp = LIST_NEXT(vp, v_mntvnodes); 2281 mtx_unlock(&mntvnode_mtx); 2282 if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) || 2283 (error = SYSCTL_OUT(req, vp, VNODESZ))) 2284 return (error); 2285 mtx_lock(&mntvnode_mtx); 2286 } 2287 mtx_unlock(&mntvnode_mtx); 2288 mtx_lock(&mountlist_mtx); 2289 nmp = TAILQ_NEXT(mp, mnt_list); 2290 vfs_unbusy(mp, td); 2291 } 2292 mtx_unlock(&mountlist_mtx); 2293 2294 return (0); 2295 } 2296 2297 /* 2298 * XXX 2299 * Exporting the vnode list on large systems causes them to crash. 2300 * Exporting the vnode list on medium systems causes sysctl to coredump. 2301 */ 2302 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD, 2303 0, 0, sysctl_vnode, "S,vnode", ""); 2304 #endif 2305 2306 /* 2307 * Check to see if a filesystem is mounted on a block device. 2308 */ 2309 int 2310 vfs_mountedon(vp) 2311 struct vnode *vp; 2312 { 2313 2314 if (vp->v_rdev->si_mountpoint != NULL) 2315 return (EBUSY); 2316 return (0); 2317 } 2318 2319 /* 2320 * Unmount all filesystems. The list is traversed in reverse order 2321 * of mounting to avoid dependencies. 2322 */ 2323 void 2324 vfs_unmountall() 2325 { 2326 struct mount *mp; 2327 struct thread *td; 2328 int error; 2329 2330 if (curthread != NULL) 2331 td = curthread; 2332 else 2333 td = &initproc->p_thread; /* XXX XXX should this be proc0? */ 2334 /* 2335 * Since this only runs when rebooting, it is not interlocked. 2336 */ 2337 while(!TAILQ_EMPTY(&mountlist)) { 2338 mp = TAILQ_LAST(&mountlist, mntlist); 2339 error = dounmount(mp, MNT_FORCE, td); 2340 if (error) { 2341 TAILQ_REMOVE(&mountlist, mp, mnt_list); 2342 printf("unmount of %s failed (", 2343 mp->mnt_stat.f_mntonname); 2344 if (error == EBUSY) 2345 printf("BUSY)\n"); 2346 else 2347 printf("%d)\n", error); 2348 } else { 2349 /* The unmount has removed mp from the mountlist */ 2350 } 2351 } 2352 } 2353 2354 /* 2355 * perform msync on all vnodes under a mount point 2356 * the mount point must be locked. 2357 */ 2358 void 2359 vfs_msync(struct mount *mp, int flags) { 2360 struct vnode *vp, *nvp; 2361 struct vm_object *obj; 2362 int anyio, tries; 2363 2364 GIANT_REQUIRED; 2365 2366 tries = 5; 2367 loop: 2368 anyio = 0; 2369 mtx_lock(&mntvnode_mtx); 2370 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { 2371 2372 nvp = LIST_NEXT(vp, v_mntvnodes); 2373 2374 if (vp->v_mount != mp) { 2375 mtx_unlock(&mntvnode_mtx); 2376 goto loop; 2377 } 2378 2379 if (vp->v_flag & VXLOCK) /* XXX: what if MNT_WAIT? */ 2380 continue; 2381 2382 if (vp->v_flag & VNOSYNC) /* unlinked, skip it */ 2383 continue; 2384 2385 if (flags != MNT_WAIT) { 2386 if (VOP_GETVOBJECT(vp, &obj) != 0 || 2387 (obj->flags & OBJ_MIGHTBEDIRTY) == 0) 2388 continue; 2389 if (VOP_ISLOCKED(vp, NULL)) 2390 continue; 2391 } 2392 2393 mtx_unlock(&mntvnode_mtx); 2394 mtx_lock(&vp->v_interlock); 2395 if (VOP_GETVOBJECT(vp, &obj) == 0 && 2396 (obj->flags & OBJ_MIGHTBEDIRTY)) { 2397 if (!vget(vp, 2398 LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curthread)) { 2399 if (VOP_GETVOBJECT(vp, &obj) == 0) { 2400 vm_object_page_clean(obj, 0, 0, 2401 flags == MNT_WAIT ? 2402 OBJPC_SYNC : OBJPC_NOSYNC); 2403 anyio = 1; 2404 } 2405 vput(vp); 2406 } 2407 } else { 2408 mtx_unlock(&vp->v_interlock); 2409 } 2410 mtx_lock(&mntvnode_mtx); 2411 } 2412 mtx_unlock(&mntvnode_mtx); 2413 if (anyio && (--tries > 0)) 2414 goto loop; 2415 } 2416 2417 /* 2418 * Create the VM object needed for VMIO and mmap support. This 2419 * is done for all VREG files in the system. Some filesystems might 2420 * afford the additional metadata buffering capability of the 2421 * VMIO code by making the device node be VMIO mode also. 2422 * 2423 * vp must be locked when vfs_object_create is called. 2424 */ 2425 int 2426 vfs_object_create(vp, td, cred) 2427 struct vnode *vp; 2428 struct thread *td; 2429 struct ucred *cred; 2430 { 2431 GIANT_REQUIRED; 2432 return (VOP_CREATEVOBJECT(vp, cred, td)); 2433 } 2434 2435 /* 2436 * Mark a vnode as free, putting it up for recycling. 2437 */ 2438 void 2439 vfree(vp) 2440 struct vnode *vp; 2441 { 2442 int s; 2443 2444 s = splbio(); 2445 mtx_lock(&vnode_free_list_mtx); 2446 KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free")); 2447 if (vp->v_flag & VAGE) { 2448 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2449 } else { 2450 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 2451 } 2452 freevnodes++; 2453 mtx_unlock(&vnode_free_list_mtx); 2454 vp->v_flag &= ~VAGE; 2455 vp->v_flag |= VFREE; 2456 splx(s); 2457 } 2458 2459 /* 2460 * Opposite of vfree() - mark a vnode as in use. 2461 */ 2462 void 2463 vbusy(vp) 2464 struct vnode *vp; 2465 { 2466 int s; 2467 2468 s = splbio(); 2469 mtx_lock(&vnode_free_list_mtx); 2470 KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free")); 2471 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2472 freevnodes--; 2473 mtx_unlock(&vnode_free_list_mtx); 2474 vp->v_flag &= ~(VFREE|VAGE); 2475 splx(s); 2476 } 2477 2478 /* 2479 * Record a process's interest in events which might happen to 2480 * a vnode. Because poll uses the historic select-style interface 2481 * internally, this routine serves as both the ``check for any 2482 * pending events'' and the ``record my interest in future events'' 2483 * functions. (These are done together, while the lock is held, 2484 * to avoid race conditions.) 2485 */ 2486 int 2487 vn_pollrecord(vp, td, events) 2488 struct vnode *vp; 2489 struct thread *td; 2490 short events; 2491 { 2492 mtx_lock(&vp->v_pollinfo.vpi_lock); 2493 if (vp->v_pollinfo.vpi_revents & events) { 2494 /* 2495 * This leaves events we are not interested 2496 * in available for the other process which 2497 * which presumably had requested them 2498 * (otherwise they would never have been 2499 * recorded). 2500 */ 2501 events &= vp->v_pollinfo.vpi_revents; 2502 vp->v_pollinfo.vpi_revents &= ~events; 2503 2504 mtx_unlock(&vp->v_pollinfo.vpi_lock); 2505 return events; 2506 } 2507 vp->v_pollinfo.vpi_events |= events; 2508 selrecord(td, &vp->v_pollinfo.vpi_selinfo); 2509 mtx_unlock(&vp->v_pollinfo.vpi_lock); 2510 return 0; 2511 } 2512 2513 /* 2514 * Note the occurrence of an event. If the VN_POLLEVENT macro is used, 2515 * it is possible for us to miss an event due to race conditions, but 2516 * that condition is expected to be rare, so for the moment it is the 2517 * preferred interface. 2518 */ 2519 void 2520 vn_pollevent(vp, events) 2521 struct vnode *vp; 2522 short events; 2523 { 2524 mtx_lock(&vp->v_pollinfo.vpi_lock); 2525 if (vp->v_pollinfo.vpi_events & events) { 2526 /* 2527 * We clear vpi_events so that we don't 2528 * call selwakeup() twice if two events are 2529 * posted before the polling process(es) is 2530 * awakened. This also ensures that we take at 2531 * most one selwakeup() if the polling process 2532 * is no longer interested. However, it does 2533 * mean that only one event can be noticed at 2534 * a time. (Perhaps we should only clear those 2535 * event bits which we note?) XXX 2536 */ 2537 vp->v_pollinfo.vpi_events = 0; /* &= ~events ??? */ 2538 vp->v_pollinfo.vpi_revents |= events; 2539 selwakeup(&vp->v_pollinfo.vpi_selinfo); 2540 } 2541 mtx_unlock(&vp->v_pollinfo.vpi_lock); 2542 } 2543 2544 #define VN_KNOTE(vp, b) \ 2545 KNOTE((struct klist *)&vp->v_pollinfo.vpi_selinfo.si_note, (b)) 2546 2547 /* 2548 * Wake up anyone polling on vp because it is being revoked. 2549 * This depends on dead_poll() returning POLLHUP for correct 2550 * behavior. 2551 */ 2552 void 2553 vn_pollgone(vp) 2554 struct vnode *vp; 2555 { 2556 mtx_lock(&vp->v_pollinfo.vpi_lock); 2557 VN_KNOTE(vp, NOTE_REVOKE); 2558 if (vp->v_pollinfo.vpi_events) { 2559 vp->v_pollinfo.vpi_events = 0; 2560 selwakeup(&vp->v_pollinfo.vpi_selinfo); 2561 } 2562 mtx_unlock(&vp->v_pollinfo.vpi_lock); 2563 } 2564 2565 2566 2567 /* 2568 * Routine to create and manage a filesystem syncer vnode. 2569 */ 2570 #define sync_close ((int (*) __P((struct vop_close_args *)))nullop) 2571 static int sync_fsync __P((struct vop_fsync_args *)); 2572 static int sync_inactive __P((struct vop_inactive_args *)); 2573 static int sync_reclaim __P((struct vop_reclaim_args *)); 2574 #define sync_lock ((int (*) __P((struct vop_lock_args *)))vop_nolock) 2575 #define sync_unlock ((int (*) __P((struct vop_unlock_args *)))vop_nounlock) 2576 static int sync_print __P((struct vop_print_args *)); 2577 #define sync_islocked ((int(*) __P((struct vop_islocked_args *)))vop_noislocked) 2578 2579 static vop_t **sync_vnodeop_p; 2580 static struct vnodeopv_entry_desc sync_vnodeop_entries[] = { 2581 { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 2582 { &vop_close_desc, (vop_t *) sync_close }, /* close */ 2583 { &vop_fsync_desc, (vop_t *) sync_fsync }, /* fsync */ 2584 { &vop_inactive_desc, (vop_t *) sync_inactive }, /* inactive */ 2585 { &vop_reclaim_desc, (vop_t *) sync_reclaim }, /* reclaim */ 2586 { &vop_lock_desc, (vop_t *) sync_lock }, /* lock */ 2587 { &vop_unlock_desc, (vop_t *) sync_unlock }, /* unlock */ 2588 { &vop_print_desc, (vop_t *) sync_print }, /* print */ 2589 { &vop_islocked_desc, (vop_t *) sync_islocked }, /* islocked */ 2590 { NULL, NULL } 2591 }; 2592 static struct vnodeopv_desc sync_vnodeop_opv_desc = 2593 { &sync_vnodeop_p, sync_vnodeop_entries }; 2594 2595 VNODEOP_SET(sync_vnodeop_opv_desc); 2596 2597 /* 2598 * Create a new filesystem syncer vnode for the specified mount point. 2599 */ 2600 int 2601 vfs_allocate_syncvnode(mp) 2602 struct mount *mp; 2603 { 2604 struct vnode *vp; 2605 static long start, incr, next; 2606 int error; 2607 2608 /* Allocate a new vnode */ 2609 if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) { 2610 mp->mnt_syncer = NULL; 2611 return (error); 2612 } 2613 vp->v_type = VNON; 2614 /* 2615 * Place the vnode onto the syncer worklist. We attempt to 2616 * scatter them about on the list so that they will go off 2617 * at evenly distributed times even if all the filesystems 2618 * are mounted at once. 2619 */ 2620 next += incr; 2621 if (next == 0 || next > syncer_maxdelay) { 2622 start /= 2; 2623 incr /= 2; 2624 if (start == 0) { 2625 start = syncer_maxdelay / 2; 2626 incr = syncer_maxdelay; 2627 } 2628 next = start; 2629 } 2630 vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0); 2631 mp->mnt_syncer = vp; 2632 return (0); 2633 } 2634 2635 /* 2636 * Do a lazy sync of the filesystem. 2637 */ 2638 static int 2639 sync_fsync(ap) 2640 struct vop_fsync_args /* { 2641 struct vnode *a_vp; 2642 struct ucred *a_cred; 2643 int a_waitfor; 2644 struct thread *a_td; 2645 } */ *ap; 2646 { 2647 struct vnode *syncvp = ap->a_vp; 2648 struct mount *mp = syncvp->v_mount; 2649 struct thread *td = ap->a_td; 2650 int asyncflag; 2651 2652 /* 2653 * We only need to do something if this is a lazy evaluation. 2654 */ 2655 if (ap->a_waitfor != MNT_LAZY) 2656 return (0); 2657 2658 /* 2659 * Move ourselves to the back of the sync list. 2660 */ 2661 vn_syncer_add_to_worklist(syncvp, syncdelay); 2662 2663 /* 2664 * Walk the list of vnodes pushing all that are dirty and 2665 * not already on the sync list. 2666 */ 2667 mtx_lock(&mountlist_mtx); 2668 if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) { 2669 mtx_unlock(&mountlist_mtx); 2670 return (0); 2671 } 2672 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 2673 vfs_unbusy(mp, td); 2674 return (0); 2675 } 2676 asyncflag = mp->mnt_flag & MNT_ASYNC; 2677 mp->mnt_flag &= ~MNT_ASYNC; 2678 vfs_msync(mp, MNT_NOWAIT); 2679 VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td); 2680 if (asyncflag) 2681 mp->mnt_flag |= MNT_ASYNC; 2682 vn_finished_write(mp); 2683 vfs_unbusy(mp, td); 2684 return (0); 2685 } 2686 2687 /* 2688 * The syncer vnode is no referenced. 2689 */ 2690 static int 2691 sync_inactive(ap) 2692 struct vop_inactive_args /* { 2693 struct vnode *a_vp; 2694 struct thread *a_td; 2695 } */ *ap; 2696 { 2697 2698 vgone(ap->a_vp); 2699 return (0); 2700 } 2701 2702 /* 2703 * The syncer vnode is no longer needed and is being decommissioned. 2704 * 2705 * Modifications to the worklist must be protected at splbio(). 2706 */ 2707 static int 2708 sync_reclaim(ap) 2709 struct vop_reclaim_args /* { 2710 struct vnode *a_vp; 2711 } */ *ap; 2712 { 2713 struct vnode *vp = ap->a_vp; 2714 int s; 2715 2716 s = splbio(); 2717 vp->v_mount->mnt_syncer = NULL; 2718 if (vp->v_flag & VONWORKLST) { 2719 LIST_REMOVE(vp, v_synclist); 2720 vp->v_flag &= ~VONWORKLST; 2721 } 2722 splx(s); 2723 2724 return (0); 2725 } 2726 2727 /* 2728 * Print out a syncer vnode. 2729 */ 2730 static int 2731 sync_print(ap) 2732 struct vop_print_args /* { 2733 struct vnode *a_vp; 2734 } */ *ap; 2735 { 2736 struct vnode *vp = ap->a_vp; 2737 2738 printf("syncer vnode"); 2739 if (vp->v_vnlock != NULL) 2740 lockmgr_printinfo(vp->v_vnlock); 2741 printf("\n"); 2742 return (0); 2743 } 2744 2745 /* 2746 * extract the dev_t from a VCHR 2747 */ 2748 dev_t 2749 vn_todev(vp) 2750 struct vnode *vp; 2751 { 2752 if (vp->v_type != VCHR) 2753 return (NODEV); 2754 return (vp->v_rdev); 2755 } 2756 2757 /* 2758 * Check if vnode represents a disk device 2759 */ 2760 int 2761 vn_isdisk(vp, errp) 2762 struct vnode *vp; 2763 int *errp; 2764 { 2765 struct cdevsw *cdevsw; 2766 2767 if (vp->v_type != VCHR) { 2768 if (errp != NULL) 2769 *errp = ENOTBLK; 2770 return (0); 2771 } 2772 if (vp->v_rdev == NULL) { 2773 if (errp != NULL) 2774 *errp = ENXIO; 2775 return (0); 2776 } 2777 cdevsw = devsw(vp->v_rdev); 2778 if (cdevsw == NULL) { 2779 if (errp != NULL) 2780 *errp = ENXIO; 2781 return (0); 2782 } 2783 if (!(cdevsw->d_flags & D_DISK)) { 2784 if (errp != NULL) 2785 *errp = ENOTBLK; 2786 return (0); 2787 } 2788 if (errp != NULL) 2789 *errp = 0; 2790 return (1); 2791 } 2792 2793 /* 2794 * Free data allocated by namei(); see namei(9) for details. 2795 */ 2796 void 2797 NDFREE(ndp, flags) 2798 struct nameidata *ndp; 2799 const uint flags; 2800 { 2801 if (!(flags & NDF_NO_FREE_PNBUF) && 2802 (ndp->ni_cnd.cn_flags & HASBUF)) { 2803 zfree(namei_zone, ndp->ni_cnd.cn_pnbuf); 2804 ndp->ni_cnd.cn_flags &= ~HASBUF; 2805 } 2806 if (!(flags & NDF_NO_DVP_UNLOCK) && 2807 (ndp->ni_cnd.cn_flags & LOCKPARENT) && 2808 ndp->ni_dvp != ndp->ni_vp) 2809 VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_thread); 2810 if (!(flags & NDF_NO_DVP_RELE) && 2811 (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) { 2812 vrele(ndp->ni_dvp); 2813 ndp->ni_dvp = NULL; 2814 } 2815 if (!(flags & NDF_NO_VP_UNLOCK) && 2816 (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp) 2817 VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_thread); 2818 if (!(flags & NDF_NO_VP_RELE) && 2819 ndp->ni_vp) { 2820 vrele(ndp->ni_vp); 2821 ndp->ni_vp = NULL; 2822 } 2823 if (!(flags & NDF_NO_STARTDIR_RELE) && 2824 (ndp->ni_cnd.cn_flags & SAVESTART)) { 2825 vrele(ndp->ni_startdir); 2826 ndp->ni_startdir = NULL; 2827 } 2828 } 2829 2830 /* 2831 * Common file system object access control check routine. Accepts a 2832 * vnode's type, "mode", uid and gid, requested access mode, credentials, 2833 * and optional call-by-reference privused argument allowing vaccess() 2834 * to indicate to the caller whether privilege was used to satisfy the 2835 * request. Returns 0 on success, or an errno on failure. 2836 */ 2837 int 2838 vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused) 2839 enum vtype type; 2840 mode_t file_mode; 2841 uid_t file_uid; 2842 gid_t file_gid; 2843 mode_t acc_mode; 2844 struct ucred *cred; 2845 int *privused; 2846 { 2847 mode_t dac_granted; 2848 #ifdef CAPABILITIES 2849 mode_t cap_granted; 2850 #endif 2851 2852 /* 2853 * Look for a normal, non-privileged way to access the file/directory 2854 * as requested. If it exists, go with that. 2855 */ 2856 2857 if (privused != NULL) 2858 *privused = 0; 2859 2860 dac_granted = 0; 2861 2862 /* Check the owner. */ 2863 if (cred->cr_uid == file_uid) { 2864 dac_granted |= VADMIN; 2865 if (file_mode & S_IXUSR) 2866 dac_granted |= VEXEC; 2867 if (file_mode & S_IRUSR) 2868 dac_granted |= VREAD; 2869 if (file_mode & S_IWUSR) 2870 dac_granted |= VWRITE; 2871 2872 if ((acc_mode & dac_granted) == acc_mode) 2873 return (0); 2874 2875 goto privcheck; 2876 } 2877 2878 /* Otherwise, check the groups (first match) */ 2879 if (groupmember(file_gid, cred)) { 2880 if (file_mode & S_IXGRP) 2881 dac_granted |= VEXEC; 2882 if (file_mode & S_IRGRP) 2883 dac_granted |= VREAD; 2884 if (file_mode & S_IWGRP) 2885 dac_granted |= VWRITE; 2886 2887 if ((acc_mode & dac_granted) == acc_mode) 2888 return (0); 2889 2890 goto privcheck; 2891 } 2892 2893 /* Otherwise, check everyone else. */ 2894 if (file_mode & S_IXOTH) 2895 dac_granted |= VEXEC; 2896 if (file_mode & S_IROTH) 2897 dac_granted |= VREAD; 2898 if (file_mode & S_IWOTH) 2899 dac_granted |= VWRITE; 2900 if ((acc_mode & dac_granted) == acc_mode) 2901 return (0); 2902 2903 privcheck: 2904 if (!suser_xxx(cred, NULL, PRISON_ROOT)) { 2905 /* XXX audit: privilege used */ 2906 if (privused != NULL) 2907 *privused = 1; 2908 return (0); 2909 } 2910 2911 #ifdef CAPABILITIES 2912 /* 2913 * Build a capability mask to determine if the set of capabilities 2914 * satisfies the requirements when combined with the granted mask 2915 * from above. 2916 * For each capability, if the capability is required, bitwise 2917 * or the request type onto the cap_granted mask. 2918 */ 2919 cap_granted = 0; 2920 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) && 2921 !cap_check_xxx(cred, NULL, CAP_DAC_EXECUTE, PRISON_ROOT)) 2922 cap_granted |= VEXEC; 2923 2924 if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) && 2925 !cap_check_xxx(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT)) 2926 cap_granted |= VREAD; 2927 2928 if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) && 2929 !cap_check_xxx(cred, NULL, CAP_DAC_WRITE, PRISON_ROOT)) 2930 cap_granted |= VWRITE; 2931 2932 if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) && 2933 !cap_check_xxx(cred, NULL, CAP_FOWNER, PRISON_ROOT)) 2934 cap_granted |= VADMIN; 2935 2936 if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) { 2937 /* XXX audit: privilege used */ 2938 if (privused != NULL) 2939 *privused = 1; 2940 return (0); 2941 } 2942 #endif 2943 2944 return ((acc_mode & VADMIN) ? EPERM : EACCES); 2945 } 2946 2947