1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 39 * $FreeBSD$ 40 */ 41 42 /* 43 * External virtual filesystem routines 44 */ 45 #include "opt_ddb.h" 46 #include "opt_ffs.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/bio.h> 51 #include <sys/buf.h> 52 #include <sys/conf.h> 53 #include <sys/eventhandler.h> 54 #include <sys/fcntl.h> 55 #include <sys/kernel.h> 56 #include <sys/kthread.h> 57 #include <sys/malloc.h> 58 #include <sys/mount.h> 59 #include <sys/namei.h> 60 #include <sys/stat.h> 61 #include <sys/sysctl.h> 62 #include <sys/syslog.h> 63 #include <sys/vmmeter.h> 64 #include <sys/vnode.h> 65 66 #include <vm/vm.h> 67 #include <vm/vm_object.h> 68 #include <vm/vm_extern.h> 69 #include <vm/pmap.h> 70 #include <vm/vm_map.h> 71 #include <vm/vm_page.h> 72 #include <vm/uma.h> 73 74 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 75 76 static void addalias(struct vnode *vp, dev_t nvp_rdev); 77 static void insmntque(struct vnode *vp, struct mount *mp); 78 static void vclean(struct vnode *vp, int flags, struct thread *td); 79 static void vlruvp(struct vnode *vp); 80 81 /* 82 * Number of vnodes in existence. Increased whenever getnewvnode() 83 * allocates a new vnode, never decreased. 84 */ 85 static unsigned long numvnodes; 86 87 SYSCTL_LONG(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, ""); 88 89 /* 90 * Conversion tables for conversion from vnode types to inode formats 91 * and back. 92 */ 93 enum vtype iftovt_tab[16] = { 94 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 95 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 96 }; 97 int vttoif_tab[9] = { 98 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 99 S_IFSOCK, S_IFIFO, S_IFMT, 100 }; 101 102 /* 103 * List of vnodes that are ready for recycling. 104 */ 105 static TAILQ_HEAD(freelst, vnode) vnode_free_list; 106 107 /* 108 * Minimum number of free vnodes. If there are fewer than this free vnodes, 109 * getnewvnode() will return a newly allocated vnode. 110 */ 111 static u_long wantfreevnodes = 25; 112 SYSCTL_LONG(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, ""); 113 /* Number of vnodes in the free list. */ 114 static u_long freevnodes; 115 SYSCTL_LONG(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, ""); 116 117 /* 118 * Various variables used for debugging the new implementation of 119 * reassignbuf(). 120 * XXX these are probably of (very) limited utility now. 121 */ 122 static int reassignbufcalls; 123 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, ""); 124 static int reassignbufloops; 125 SYSCTL_INT(_vfs, OID_AUTO, reassignbufloops, CTLFLAG_RW, &reassignbufloops, 0, ""); 126 static int reassignbufsortgood; 127 SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortgood, CTLFLAG_RW, &reassignbufsortgood, 0, ""); 128 static int reassignbufsortbad; 129 SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortbad, CTLFLAG_RW, &reassignbufsortbad, 0, ""); 130 /* Set to 0 for old insertion-sort based reassignbuf, 1 for modern method. */ 131 static int reassignbufmethod = 1; 132 SYSCTL_INT(_vfs, OID_AUTO, reassignbufmethod, CTLFLAG_RW, &reassignbufmethod, 0, ""); 133 static int nameileafonly; 134 SYSCTL_INT(_vfs, OID_AUTO, nameileafonly, CTLFLAG_RW, &nameileafonly, 0, ""); 135 136 #ifdef ENABLE_VFS_IOOPT 137 /* See NOTES for a description of this setting. */ 138 int vfs_ioopt; 139 SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, ""); 140 #endif 141 142 /* List of mounted filesystems. */ 143 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 144 145 /* For any iteration/modification of mountlist */ 146 struct mtx mountlist_mtx; 147 148 /* For any iteration/modification of mnt_vnodelist */ 149 struct mtx mntvnode_mtx; 150 151 /* 152 * Cache for the mount type id assigned to NFS. This is used for 153 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c. 154 */ 155 int nfs_mount_type = -1; 156 157 /* To keep more than one thread at a time from running vfs_getnewfsid */ 158 static struct mtx mntid_mtx; 159 160 /* For any iteration/modification of vnode_free_list */ 161 static struct mtx vnode_free_list_mtx; 162 163 /* 164 * For any iteration/modification of dev->si_hlist (linked through 165 * v_specnext) 166 */ 167 static struct mtx spechash_mtx; 168 169 /* Publicly exported FS */ 170 struct nfs_public nfs_pub; 171 172 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 173 static uma_zone_t vnode_zone; 174 static uma_zone_t vnodepoll_zone; 175 176 /* Set to 1 to print out reclaim of active vnodes */ 177 int prtactive; 178 179 /* 180 * The workitem queue. 181 * 182 * It is useful to delay writes of file data and filesystem metadata 183 * for tens of seconds so that quickly created and deleted files need 184 * not waste disk bandwidth being created and removed. To realize this, 185 * we append vnodes to a "workitem" queue. When running with a soft 186 * updates implementation, most pending metadata dependencies should 187 * not wait for more than a few seconds. Thus, mounted on block devices 188 * are delayed only about a half the time that file data is delayed. 189 * Similarly, directory updates are more critical, so are only delayed 190 * about a third the time that file data is delayed. Thus, there are 191 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 192 * one each second (driven off the filesystem syncer process). The 193 * syncer_delayno variable indicates the next queue that is to be processed. 194 * Items that need to be processed soon are placed in this queue: 195 * 196 * syncer_workitem_pending[syncer_delayno] 197 * 198 * A delay of fifteen seconds is done by placing the request fifteen 199 * entries later in the queue: 200 * 201 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 202 * 203 */ 204 static int syncer_delayno; 205 static long syncer_mask; 206 LIST_HEAD(synclist, vnode); 207 static struct synclist *syncer_workitem_pending; 208 209 #define SYNCER_MAXDELAY 32 210 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 211 static int syncdelay = 30; /* max time to delay syncing data */ 212 static int filedelay = 30; /* time to delay syncing files */ 213 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, ""); 214 static int dirdelay = 29; /* time to delay syncing directories */ 215 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, ""); 216 static int metadelay = 28; /* time to delay syncing metadata */ 217 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, ""); 218 static int rushjob; /* number of slots to run ASAP */ 219 static int stat_rush_requests; /* number of times I/O speeded up */ 220 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, ""); 221 222 /* 223 * Number of vnodes we want to exist at any one time. This is mostly used 224 * to size hash tables in vnode-related code. It is normally not used in 225 * getnewvnode(), as wantfreevnodes is normally nonzero.) 226 * 227 * XXX desiredvnodes is historical cruft and should not exist. 228 */ 229 int desiredvnodes; 230 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 231 &desiredvnodes, 0, "Maximum number of vnodes"); 232 static int minvnodes; 233 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 234 &minvnodes, 0, "Minimum number of vnodes"); 235 static int vnlru_nowhere; 236 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, &vnlru_nowhere, 0, 237 "Number of times the vnlru process ran without success"); 238 239 void 240 v_addpollinfo(struct vnode *vp) 241 { 242 vp->v_pollinfo = uma_zalloc(vnodepoll_zone, M_WAITOK); 243 mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 244 } 245 246 /* 247 * Initialize the vnode management data structures. 248 */ 249 static void 250 vntblinit(void *dummy __unused) 251 { 252 253 desiredvnodes = maxproc + cnt.v_page_count / 4; 254 minvnodes = desiredvnodes / 4; 255 mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF); 256 mtx_init(&mntvnode_mtx, "mntvnode", NULL, MTX_DEF); 257 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 258 mtx_init(&spechash_mtx, "spechash", NULL, MTX_DEF); 259 TAILQ_INIT(&vnode_free_list); 260 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 261 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 262 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 263 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 264 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 265 /* 266 * Initialize the filesystem syncer. 267 */ 268 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 269 &syncer_mask); 270 syncer_maxdelay = syncer_mask + 1; 271 } 272 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL) 273 274 275 /* 276 * Mark a mount point as busy. Used to synchronize access and to delay 277 * unmounting. Interlock is not released on failure. 278 */ 279 int 280 vfs_busy(mp, flags, interlkp, td) 281 struct mount *mp; 282 int flags; 283 struct mtx *interlkp; 284 struct thread *td; 285 { 286 int lkflags; 287 288 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 289 if (flags & LK_NOWAIT) 290 return (ENOENT); 291 mp->mnt_kern_flag |= MNTK_MWAIT; 292 /* 293 * Since all busy locks are shared except the exclusive 294 * lock granted when unmounting, the only place that a 295 * wakeup needs to be done is at the release of the 296 * exclusive lock at the end of dounmount. 297 */ 298 msleep((caddr_t)mp, interlkp, PVFS, "vfs_busy", 0); 299 return (ENOENT); 300 } 301 lkflags = LK_SHARED | LK_NOPAUSE; 302 if (interlkp) 303 lkflags |= LK_INTERLOCK; 304 if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td)) 305 panic("vfs_busy: unexpected lock failure"); 306 return (0); 307 } 308 309 /* 310 * Free a busy filesystem. 311 */ 312 void 313 vfs_unbusy(mp, td) 314 struct mount *mp; 315 struct thread *td; 316 { 317 318 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td); 319 } 320 321 /* 322 * Lookup a filesystem type, and if found allocate and initialize 323 * a mount structure for it. 324 * 325 * Devname is usually updated by mount(8) after booting. 326 */ 327 int 328 vfs_rootmountalloc(fstypename, devname, mpp) 329 char *fstypename; 330 char *devname; 331 struct mount **mpp; 332 { 333 struct thread *td = curthread; /* XXX */ 334 struct vfsconf *vfsp; 335 struct mount *mp; 336 337 if (fstypename == NULL) 338 return (ENODEV); 339 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 340 if (!strcmp(vfsp->vfc_name, fstypename)) 341 break; 342 if (vfsp == NULL) 343 return (ENODEV); 344 mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO); 345 lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE); 346 (void)vfs_busy(mp, LK_NOWAIT, 0, td); 347 TAILQ_INIT(&mp->mnt_nvnodelist); 348 TAILQ_INIT(&mp->mnt_reservedvnlist); 349 mp->mnt_vfc = vfsp; 350 mp->mnt_op = vfsp->vfc_vfsops; 351 mp->mnt_flag = MNT_RDONLY; 352 mp->mnt_vnodecovered = NULLVP; 353 vfsp->vfc_refcount++; 354 mp->mnt_iosize_max = DFLTPHYS; 355 mp->mnt_stat.f_type = vfsp->vfc_typenum; 356 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 357 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 358 mp->mnt_stat.f_mntonname[0] = '/'; 359 mp->mnt_stat.f_mntonname[1] = 0; 360 (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 361 *mpp = mp; 362 return (0); 363 } 364 365 /* 366 * Find an appropriate filesystem to use for the root. If a filesystem 367 * has not been preselected, walk through the list of known filesystems 368 * trying those that have mountroot routines, and try them until one 369 * works or we have tried them all. 370 */ 371 #ifdef notdef /* XXX JH */ 372 int 373 lite2_vfs_mountroot() 374 { 375 struct vfsconf *vfsp; 376 extern int (*lite2_mountroot)(void); 377 int error; 378 379 if (lite2_mountroot != NULL) 380 return ((*lite2_mountroot)()); 381 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 382 if (vfsp->vfc_mountroot == NULL) 383 continue; 384 if ((error = (*vfsp->vfc_mountroot)()) == 0) 385 return (0); 386 printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error); 387 } 388 return (ENODEV); 389 } 390 #endif 391 392 /* 393 * Lookup a mount point by filesystem identifier. 394 */ 395 struct mount * 396 vfs_getvfs(fsid) 397 fsid_t *fsid; 398 { 399 register struct mount *mp; 400 401 mtx_lock(&mountlist_mtx); 402 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 403 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 404 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 405 mtx_unlock(&mountlist_mtx); 406 return (mp); 407 } 408 } 409 mtx_unlock(&mountlist_mtx); 410 return ((struct mount *) 0); 411 } 412 413 /* 414 * Get a new unique fsid. Try to make its val[0] unique, since this value 415 * will be used to create fake device numbers for stat(). Also try (but 416 * not so hard) make its val[0] unique mod 2^16, since some emulators only 417 * support 16-bit device numbers. We end up with unique val[0]'s for the 418 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 419 * 420 * Keep in mind that several mounts may be running in parallel. Starting 421 * the search one past where the previous search terminated is both a 422 * micro-optimization and a defense against returning the same fsid to 423 * different mounts. 424 */ 425 void 426 vfs_getnewfsid(mp) 427 struct mount *mp; 428 { 429 static u_int16_t mntid_base; 430 fsid_t tfsid; 431 int mtype; 432 433 mtx_lock(&mntid_mtx); 434 mtype = mp->mnt_vfc->vfc_typenum; 435 tfsid.val[1] = mtype; 436 mtype = (mtype & 0xFF) << 24; 437 for (;;) { 438 tfsid.val[0] = makeudev(255, 439 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 440 mntid_base++; 441 if (vfs_getvfs(&tfsid) == NULL) 442 break; 443 } 444 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 445 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 446 mtx_unlock(&mntid_mtx); 447 } 448 449 /* 450 * Knob to control the precision of file timestamps: 451 * 452 * 0 = seconds only; nanoseconds zeroed. 453 * 1 = seconds and nanoseconds, accurate within 1/HZ. 454 * 2 = seconds and nanoseconds, truncated to microseconds. 455 * >=3 = seconds and nanoseconds, maximum precision. 456 */ 457 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 458 459 static int timestamp_precision = TSP_SEC; 460 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 461 ×tamp_precision, 0, ""); 462 463 /* 464 * Get a current timestamp. 465 */ 466 void 467 vfs_timestamp(tsp) 468 struct timespec *tsp; 469 { 470 struct timeval tv; 471 472 switch (timestamp_precision) { 473 case TSP_SEC: 474 tsp->tv_sec = time_second; 475 tsp->tv_nsec = 0; 476 break; 477 case TSP_HZ: 478 getnanotime(tsp); 479 break; 480 case TSP_USEC: 481 microtime(&tv); 482 TIMEVAL_TO_TIMESPEC(&tv, tsp); 483 break; 484 case TSP_NSEC: 485 default: 486 nanotime(tsp); 487 break; 488 } 489 } 490 491 /* 492 * Get a mount option by its name. 493 * 494 * Return 0 if the option was found, ENOENT otherwise. 495 * If len is non-NULL it will be filled with the length 496 * of the option. If buf is non-NULL, it will be filled 497 * with the address of the option. 498 */ 499 int 500 vfs_getopt(opts, name, buf, len) 501 struct vfsoptlist *opts; 502 const char *name; 503 void **buf; 504 int *len; 505 { 506 struct vfsopt *opt; 507 int i; 508 509 i = 0; 510 opt = opts->opt; 511 while (i++ < opts->optcnt) { 512 if (strcmp(name, opt->name) == 0) { 513 if (len != NULL) 514 *len = opt->len; 515 if (buf != NULL) 516 *buf = opt->value; 517 return (0); 518 } 519 opt++; 520 } 521 return (ENOENT); 522 } 523 524 /* 525 * Find and copy a mount option. 526 * The size of the buffer has to be specified 527 * in len, if it is not big enough, EINVAL is 528 * returned. Returns ENOENT if the option is 529 * not found. Otherwise, the number of bytes 530 * actually copied are put in done if it's 531 * non-NULL and 0 is returned. 532 */ 533 int 534 vfs_copyopt(opts, name, dest, len, done) 535 struct vfsoptlist *opts; 536 const char *name; 537 void *dest; 538 int len, *done; 539 { 540 struct vfsopt *opt; 541 int i; 542 543 i = 0; 544 opt = opts->opt; 545 while (i++ < opts->optcnt) { 546 if (strcmp(name, opt->name) == 0) { 547 if (len < opt->len) 548 return (EINVAL); 549 bcopy(opt->value, dest, opt->len); 550 if (done != NULL) 551 *done = opt->len; 552 return (0); 553 } 554 opt++; 555 } 556 return (ENOENT); 557 } 558 559 /* 560 * Set vnode attributes to VNOVAL 561 */ 562 void 563 vattr_null(vap) 564 register struct vattr *vap; 565 { 566 567 vap->va_type = VNON; 568 vap->va_size = VNOVAL; 569 vap->va_bytes = VNOVAL; 570 vap->va_mode = VNOVAL; 571 vap->va_nlink = VNOVAL; 572 vap->va_uid = VNOVAL; 573 vap->va_gid = VNOVAL; 574 vap->va_fsid = VNOVAL; 575 vap->va_fileid = VNOVAL; 576 vap->va_blocksize = VNOVAL; 577 vap->va_rdev = VNOVAL; 578 vap->va_atime.tv_sec = VNOVAL; 579 vap->va_atime.tv_nsec = VNOVAL; 580 vap->va_mtime.tv_sec = VNOVAL; 581 vap->va_mtime.tv_nsec = VNOVAL; 582 vap->va_ctime.tv_sec = VNOVAL; 583 vap->va_ctime.tv_nsec = VNOVAL; 584 vap->va_flags = VNOVAL; 585 vap->va_gen = VNOVAL; 586 vap->va_vaflags = 0; 587 } 588 589 /* 590 * This routine is called when we have too many vnodes. It attempts 591 * to free <count> vnodes and will potentially free vnodes that still 592 * have VM backing store (VM backing store is typically the cause 593 * of a vnode blowout so we want to do this). Therefore, this operation 594 * is not considered cheap. 595 * 596 * A number of conditions may prevent a vnode from being reclaimed. 597 * the buffer cache may have references on the vnode, a directory 598 * vnode may still have references due to the namei cache representing 599 * underlying files, or the vnode may be in active use. It is not 600 * desireable to reuse such vnodes. These conditions may cause the 601 * number of vnodes to reach some minimum value regardless of what 602 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 603 */ 604 static int 605 vlrureclaim(struct mount *mp, int count) 606 { 607 struct vnode *vp; 608 int done; 609 int trigger; 610 int usevnodes; 611 612 /* 613 * Calculate the trigger point, don't allow user 614 * screwups to blow us up. This prevents us from 615 * recycling vnodes with lots of resident pages. We 616 * aren't trying to free memory, we are trying to 617 * free vnodes. 618 */ 619 usevnodes = desiredvnodes; 620 if (usevnodes <= 0) 621 usevnodes = 1; 622 trigger = cnt.v_page_count * 2 / usevnodes; 623 624 done = 0; 625 mtx_lock(&mntvnode_mtx); 626 while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) { 627 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 628 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 629 630 if (vp->v_type != VNON && 631 vp->v_type != VBAD && 632 VMIGHTFREE(vp) && /* critical path opt */ 633 (vp->v_object == NULL || vp->v_object->resident_page_count < trigger) && 634 mtx_trylock(&vp->v_interlock) 635 ) { 636 mtx_unlock(&mntvnode_mtx); 637 if (VMIGHTFREE(vp)) { 638 vgonel(vp, curthread); 639 done++; 640 } else { 641 mtx_unlock(&vp->v_interlock); 642 } 643 mtx_lock(&mntvnode_mtx); 644 } 645 --count; 646 } 647 mtx_unlock(&mntvnode_mtx); 648 return done; 649 } 650 651 /* 652 * Attempt to recycle vnodes in a context that is always safe to block. 653 * Calling vlrurecycle() from the bowels of filesystem code has some 654 * interesting deadlock problems. 655 */ 656 static struct proc *vnlruproc; 657 static int vnlruproc_sig; 658 659 static void 660 vnlru_proc(void) 661 { 662 struct mount *mp, *nmp; 663 int s; 664 int done; 665 struct proc *p = vnlruproc; 666 struct thread *td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */ 667 668 mtx_lock(&Giant); 669 670 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p, 671 SHUTDOWN_PRI_FIRST); 672 673 s = splbio(); 674 for (;;) { 675 kthread_suspend_check(p); 676 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) { 677 vnlruproc_sig = 0; 678 tsleep(vnlruproc, PVFS, "vlruwt", 0); 679 continue; 680 } 681 done = 0; 682 mtx_lock(&mountlist_mtx); 683 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 684 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) { 685 nmp = TAILQ_NEXT(mp, mnt_list); 686 continue; 687 } 688 done += vlrureclaim(mp, 10); 689 mtx_lock(&mountlist_mtx); 690 nmp = TAILQ_NEXT(mp, mnt_list); 691 vfs_unbusy(mp, td); 692 } 693 mtx_unlock(&mountlist_mtx); 694 if (done == 0) { 695 #if 0 696 /* These messages are temporary debugging aids */ 697 if (vnlru_nowhere < 5) 698 printf("vnlru process getting nowhere..\n"); 699 else if (vnlru_nowhere == 5) 700 printf("vnlru process messages stopped.\n"); 701 #endif 702 vnlru_nowhere++; 703 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 704 } 705 } 706 splx(s); 707 } 708 709 static struct kproc_desc vnlru_kp = { 710 "vnlru", 711 vnlru_proc, 712 &vnlruproc 713 }; 714 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) 715 716 717 /* 718 * Routines having to do with the management of the vnode table. 719 */ 720 721 /* 722 * Return the next vnode from the free list. 723 */ 724 int 725 getnewvnode(tag, mp, vops, vpp) 726 enum vtagtype tag; 727 struct mount *mp; 728 vop_t **vops; 729 struct vnode **vpp; 730 { 731 int s; 732 struct thread *td = curthread; /* XXX */ 733 struct vnode *vp = NULL; 734 struct mount *vnmp; 735 vm_object_t object; 736 737 s = splbio(); 738 /* 739 * Try to reuse vnodes if we hit the max. This situation only 740 * occurs in certain large-memory (2G+) situations. We cannot 741 * attempt to directly reclaim vnodes due to nasty recursion 742 * problems. 743 */ 744 if (vnlruproc_sig == 0 && numvnodes - freevnodes > desiredvnodes) { 745 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 746 wakeup(vnlruproc); 747 } 748 749 /* 750 * Attempt to reuse a vnode already on the free list, allocating 751 * a new vnode if we can't find one or if we have not reached a 752 * good minimum for good LRU performance. 753 */ 754 755 mtx_lock(&vnode_free_list_mtx); 756 757 if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes) { 758 int count; 759 760 for (count = 0; count < freevnodes; count++) { 761 vp = TAILQ_FIRST(&vnode_free_list); 762 if (vp == NULL || vp->v_usecount) 763 panic("getnewvnode: free vnode isn't"); 764 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 765 766 /* Don't recycle if we can't get the interlock */ 767 if (!mtx_trylock(&vp->v_interlock)) { 768 vp = NULL; 769 continue; 770 } 771 772 /* We should be able to immediately acquire this */ 773 if (vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td) != 0) 774 continue; 775 /* 776 * Don't recycle if we still have cached pages. 777 */ 778 if (VOP_GETVOBJECT(vp, &object) == 0 && 779 (object->resident_page_count || 780 object->ref_count)) { 781 TAILQ_INSERT_TAIL(&vnode_free_list, vp, 782 v_freelist); 783 vp = NULL; 784 VOP_UNLOCK(vp, 0, td); 785 continue; 786 } 787 if (LIST_FIRST(&vp->v_cache_src)) { 788 /* 789 * note: nameileafonly sysctl is temporary, 790 * for debugging only, and will eventually be 791 * removed. 792 */ 793 if (nameileafonly > 0) { 794 /* 795 * Do not reuse namei-cached directory 796 * vnodes that have cached 797 * subdirectories. 798 */ 799 if (cache_leaf_test(vp) < 0) { 800 VOP_UNLOCK(vp, 0, td); 801 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 802 vp = NULL; 803 continue; 804 } 805 } else if (nameileafonly < 0 || 806 vmiodirenable == 0) { 807 /* 808 * Do not reuse namei-cached directory 809 * vnodes if nameileafonly is -1 or 810 * if VMIO backing for directories is 811 * turned off (otherwise we reuse them 812 * too quickly). 813 */ 814 VOP_UNLOCK(vp, 0, td); 815 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 816 vp = NULL; 817 continue; 818 } 819 } 820 /* 821 * Skip over it if its filesystem is being suspended. 822 */ 823 if (vn_start_write(vp, &vnmp, V_NOWAIT) == 0) 824 break; 825 VOP_UNLOCK(vp, 0, td); 826 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 827 vp = NULL; 828 } 829 } 830 if (vp) { 831 vp->v_flag |= VDOOMED; 832 vp->v_flag &= ~VFREE; 833 freevnodes--; 834 mtx_unlock(&vnode_free_list_mtx); 835 cache_purge(vp); 836 if (vp->v_type != VBAD) { 837 VOP_UNLOCK(vp, 0, td); 838 vgone(vp); 839 } else { 840 VOP_UNLOCK(vp, 0, td); 841 } 842 vn_finished_write(vnmp); 843 844 #ifdef INVARIANTS 845 { 846 int s; 847 848 if (vp->v_data) 849 panic("cleaned vnode isn't"); 850 s = splbio(); 851 if (vp->v_numoutput) 852 panic("Clean vnode has pending I/O's"); 853 splx(s); 854 if (vp->v_writecount != 0) 855 panic("Non-zero write count"); 856 } 857 #endif 858 if (vp->v_pollinfo) { 859 mtx_destroy(&vp->v_pollinfo->vpi_lock); 860 uma_zfree(vnodepoll_zone, vp->v_pollinfo); 861 } 862 vp->v_pollinfo = NULL; 863 vp->v_flag = 0; 864 vp->v_lastw = 0; 865 vp->v_lasta = 0; 866 vp->v_cstart = 0; 867 vp->v_clen = 0; 868 vp->v_socket = 0; 869 } else { 870 mtx_unlock(&vnode_free_list_mtx); 871 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK); 872 bzero((char *) vp, sizeof *vp); 873 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 874 vp->v_dd = vp; 875 cache_purge(vp); 876 LIST_INIT(&vp->v_cache_src); 877 TAILQ_INIT(&vp->v_cache_dst); 878 numvnodes++; 879 } 880 881 TAILQ_INIT(&vp->v_cleanblkhd); 882 TAILQ_INIT(&vp->v_dirtyblkhd); 883 vp->v_type = VNON; 884 vp->v_tag = tag; 885 vp->v_op = vops; 886 lockinit(&vp->v_lock, PVFS, "vnlock", VLKTIMEOUT, LK_NOPAUSE); 887 insmntque(vp, mp); 888 *vpp = vp; 889 vp->v_usecount = 1; 890 vp->v_data = 0; 891 892 splx(s); 893 894 #if 0 895 vnodeallocs++; 896 if (vnodeallocs % vnoderecycleperiod == 0 && 897 freevnodes < vnoderecycleminfreevn && 898 vnoderecyclemintotalvn < numvnodes) { 899 /* Recycle vnodes. */ 900 cache_purgeleafdirs(vnoderecyclenumber); 901 } 902 #endif 903 904 return (0); 905 } 906 907 /* 908 * Move a vnode from one mount queue to another. 909 */ 910 static void 911 insmntque(vp, mp) 912 register struct vnode *vp; 913 register struct mount *mp; 914 { 915 916 mtx_lock(&mntvnode_mtx); 917 /* 918 * Delete from old mount point vnode list, if on one. 919 */ 920 if (vp->v_mount != NULL) 921 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 922 /* 923 * Insert into list of vnodes for the new mount point, if available. 924 */ 925 if ((vp->v_mount = mp) == NULL) { 926 mtx_unlock(&mntvnode_mtx); 927 return; 928 } 929 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 930 mtx_unlock(&mntvnode_mtx); 931 } 932 933 /* 934 * Update outstanding I/O count and do wakeup if requested. 935 */ 936 void 937 vwakeup(bp) 938 register struct buf *bp; 939 { 940 register struct vnode *vp; 941 942 bp->b_flags &= ~B_WRITEINPROG; 943 if ((vp = bp->b_vp)) { 944 vp->v_numoutput--; 945 if (vp->v_numoutput < 0) 946 panic("vwakeup: neg numoutput"); 947 if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) { 948 vp->v_flag &= ~VBWAIT; 949 wakeup((caddr_t) &vp->v_numoutput); 950 } 951 } 952 } 953 954 /* 955 * Flush out and invalidate all buffers associated with a vnode. 956 * Called with the underlying object locked. 957 */ 958 int 959 vinvalbuf(vp, flags, cred, td, slpflag, slptimeo) 960 register struct vnode *vp; 961 int flags; 962 struct ucred *cred; 963 struct thread *td; 964 int slpflag, slptimeo; 965 { 966 register struct buf *bp; 967 struct buf *nbp, *blist; 968 int s, error; 969 vm_object_t object; 970 971 GIANT_REQUIRED; 972 973 if (flags & V_SAVE) { 974 s = splbio(); 975 while (vp->v_numoutput) { 976 vp->v_flag |= VBWAIT; 977 error = tsleep((caddr_t)&vp->v_numoutput, 978 slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo); 979 if (error) { 980 splx(s); 981 return (error); 982 } 983 } 984 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 985 splx(s); 986 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0) 987 return (error); 988 s = splbio(); 989 if (vp->v_numoutput > 0 || 990 !TAILQ_EMPTY(&vp->v_dirtyblkhd)) 991 panic("vinvalbuf: dirty bufs"); 992 } 993 splx(s); 994 } 995 s = splbio(); 996 for (;;) { 997 blist = TAILQ_FIRST(&vp->v_cleanblkhd); 998 if (!blist) 999 blist = TAILQ_FIRST(&vp->v_dirtyblkhd); 1000 if (!blist) 1001 break; 1002 1003 for (bp = blist; bp; bp = nbp) { 1004 nbp = TAILQ_NEXT(bp, b_vnbufs); 1005 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1006 error = BUF_TIMELOCK(bp, 1007 LK_EXCLUSIVE | LK_SLEEPFAIL, 1008 "vinvalbuf", slpflag, slptimeo); 1009 if (error == ENOLCK) 1010 break; 1011 splx(s); 1012 return (error); 1013 } 1014 /* 1015 * XXX Since there are no node locks for NFS, I 1016 * believe there is a slight chance that a delayed 1017 * write will occur while sleeping just above, so 1018 * check for it. Note that vfs_bio_awrite expects 1019 * buffers to reside on a queue, while BUF_WRITE and 1020 * brelse do not. 1021 */ 1022 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1023 (flags & V_SAVE)) { 1024 1025 if (bp->b_vp == vp) { 1026 if (bp->b_flags & B_CLUSTEROK) { 1027 BUF_UNLOCK(bp); 1028 vfs_bio_awrite(bp); 1029 } else { 1030 bremfree(bp); 1031 bp->b_flags |= B_ASYNC; 1032 BUF_WRITE(bp); 1033 } 1034 } else { 1035 bremfree(bp); 1036 (void) BUF_WRITE(bp); 1037 } 1038 break; 1039 } 1040 bremfree(bp); 1041 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF); 1042 bp->b_flags &= ~B_ASYNC; 1043 brelse(bp); 1044 } 1045 } 1046 1047 /* 1048 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1049 * have write I/O in-progress but if there is a VM object then the 1050 * VM object can also have read-I/O in-progress. 1051 */ 1052 do { 1053 while (vp->v_numoutput > 0) { 1054 vp->v_flag |= VBWAIT; 1055 tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0); 1056 } 1057 if (VOP_GETVOBJECT(vp, &object) == 0) { 1058 while (object->paging_in_progress) 1059 vm_object_pip_sleep(object, "vnvlbx"); 1060 } 1061 } while (vp->v_numoutput > 0); 1062 1063 splx(s); 1064 1065 /* 1066 * Destroy the copy in the VM cache, too. 1067 */ 1068 mtx_lock(&vp->v_interlock); 1069 if (VOP_GETVOBJECT(vp, &object) == 0) { 1070 vm_object_page_remove(object, 0, 0, 1071 (flags & V_SAVE) ? TRUE : FALSE); 1072 } 1073 mtx_unlock(&vp->v_interlock); 1074 1075 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd)) 1076 panic("vinvalbuf: flush failed"); 1077 return (0); 1078 } 1079 1080 /* 1081 * Truncate a file's buffer and pages to a specified length. This 1082 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 1083 * sync activity. 1084 */ 1085 int 1086 vtruncbuf(vp, cred, td, length, blksize) 1087 register struct vnode *vp; 1088 struct ucred *cred; 1089 struct thread *td; 1090 off_t length; 1091 int blksize; 1092 { 1093 register struct buf *bp; 1094 struct buf *nbp; 1095 int s, anyfreed; 1096 int trunclbn; 1097 1098 /* 1099 * Round up to the *next* lbn. 1100 */ 1101 trunclbn = (length + blksize - 1) / blksize; 1102 1103 s = splbio(); 1104 restart: 1105 anyfreed = 1; 1106 for (;anyfreed;) { 1107 anyfreed = 0; 1108 for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 1109 nbp = TAILQ_NEXT(bp, b_vnbufs); 1110 if (bp->b_lblkno >= trunclbn) { 1111 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1112 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 1113 goto restart; 1114 } else { 1115 bremfree(bp); 1116 bp->b_flags |= (B_INVAL | B_RELBUF); 1117 bp->b_flags &= ~B_ASYNC; 1118 brelse(bp); 1119 anyfreed = 1; 1120 } 1121 if (nbp && 1122 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 1123 (nbp->b_vp != vp) || 1124 (nbp->b_flags & B_DELWRI))) { 1125 goto restart; 1126 } 1127 } 1128 } 1129 1130 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 1131 nbp = TAILQ_NEXT(bp, b_vnbufs); 1132 if (bp->b_lblkno >= trunclbn) { 1133 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1134 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 1135 goto restart; 1136 } else { 1137 bremfree(bp); 1138 bp->b_flags |= (B_INVAL | B_RELBUF); 1139 bp->b_flags &= ~B_ASYNC; 1140 brelse(bp); 1141 anyfreed = 1; 1142 } 1143 if (nbp && 1144 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 1145 (nbp->b_vp != vp) || 1146 (nbp->b_flags & B_DELWRI) == 0)) { 1147 goto restart; 1148 } 1149 } 1150 } 1151 } 1152 1153 if (length > 0) { 1154 restartsync: 1155 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 1156 nbp = TAILQ_NEXT(bp, b_vnbufs); 1157 if ((bp->b_flags & B_DELWRI) && (bp->b_lblkno < 0)) { 1158 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1159 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 1160 goto restart; 1161 } else { 1162 bremfree(bp); 1163 if (bp->b_vp == vp) { 1164 bp->b_flags |= B_ASYNC; 1165 } else { 1166 bp->b_flags &= ~B_ASYNC; 1167 } 1168 BUF_WRITE(bp); 1169 } 1170 goto restartsync; 1171 } 1172 1173 } 1174 } 1175 1176 while (vp->v_numoutput > 0) { 1177 vp->v_flag |= VBWAIT; 1178 tsleep(&vp->v_numoutput, PVM, "vbtrunc", 0); 1179 } 1180 1181 splx(s); 1182 1183 vnode_pager_setsize(vp, length); 1184 1185 return (0); 1186 } 1187 1188 /* 1189 * Associate a buffer with a vnode. 1190 */ 1191 void 1192 bgetvp(vp, bp) 1193 register struct vnode *vp; 1194 register struct buf *bp; 1195 { 1196 int s; 1197 1198 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 1199 1200 vhold(vp); 1201 bp->b_vp = vp; 1202 bp->b_dev = vn_todev(vp); 1203 /* 1204 * Insert onto list for new vnode. 1205 */ 1206 s = splbio(); 1207 bp->b_xflags |= BX_VNCLEAN; 1208 bp->b_xflags &= ~BX_VNDIRTY; 1209 TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs); 1210 splx(s); 1211 } 1212 1213 /* 1214 * Disassociate a buffer from a vnode. 1215 */ 1216 void 1217 brelvp(bp) 1218 register struct buf *bp; 1219 { 1220 struct vnode *vp; 1221 struct buflists *listheadp; 1222 int s; 1223 1224 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 1225 1226 /* 1227 * Delete from old vnode list, if on one. 1228 */ 1229 vp = bp->b_vp; 1230 s = splbio(); 1231 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) { 1232 if (bp->b_xflags & BX_VNDIRTY) 1233 listheadp = &vp->v_dirtyblkhd; 1234 else 1235 listheadp = &vp->v_cleanblkhd; 1236 TAILQ_REMOVE(listheadp, bp, b_vnbufs); 1237 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1238 } 1239 if ((vp->v_flag & VONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 1240 vp->v_flag &= ~VONWORKLST; 1241 LIST_REMOVE(vp, v_synclist); 1242 } 1243 splx(s); 1244 bp->b_vp = (struct vnode *) 0; 1245 vdrop(vp); 1246 } 1247 1248 /* 1249 * Add an item to the syncer work queue. 1250 */ 1251 static void 1252 vn_syncer_add_to_worklist(struct vnode *vp, int delay) 1253 { 1254 int s, slot; 1255 1256 s = splbio(); 1257 1258 if (vp->v_flag & VONWORKLST) { 1259 LIST_REMOVE(vp, v_synclist); 1260 } 1261 1262 if (delay > syncer_maxdelay - 2) 1263 delay = syncer_maxdelay - 2; 1264 slot = (syncer_delayno + delay) & syncer_mask; 1265 1266 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist); 1267 vp->v_flag |= VONWORKLST; 1268 splx(s); 1269 } 1270 1271 struct proc *updateproc; 1272 static void sched_sync(void); 1273 static struct kproc_desc up_kp = { 1274 "syncer", 1275 sched_sync, 1276 &updateproc 1277 }; 1278 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 1279 1280 /* 1281 * System filesystem synchronizer daemon. 1282 */ 1283 void 1284 sched_sync(void) 1285 { 1286 struct synclist *slp; 1287 struct vnode *vp; 1288 struct mount *mp; 1289 long starttime; 1290 int s; 1291 struct thread *td = FIRST_THREAD_IN_PROC(updateproc); /* XXXKSE */ 1292 1293 mtx_lock(&Giant); 1294 1295 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, td->td_proc, 1296 SHUTDOWN_PRI_LAST); 1297 1298 for (;;) { 1299 kthread_suspend_check(td->td_proc); 1300 1301 starttime = time_second; 1302 1303 /* 1304 * Push files whose dirty time has expired. Be careful 1305 * of interrupt race on slp queue. 1306 */ 1307 s = splbio(); 1308 slp = &syncer_workitem_pending[syncer_delayno]; 1309 syncer_delayno += 1; 1310 if (syncer_delayno == syncer_maxdelay) 1311 syncer_delayno = 0; 1312 splx(s); 1313 1314 while ((vp = LIST_FIRST(slp)) != NULL) { 1315 if (VOP_ISLOCKED(vp, NULL) == 0 && 1316 vn_start_write(vp, &mp, V_NOWAIT) == 0) { 1317 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1318 (void) VOP_FSYNC(vp, td->td_ucred, MNT_LAZY, td); 1319 VOP_UNLOCK(vp, 0, td); 1320 vn_finished_write(mp); 1321 } 1322 s = splbio(); 1323 if (LIST_FIRST(slp) == vp) { 1324 /* 1325 * Note: v_tag VT_VFS vps can remain on the 1326 * worklist too with no dirty blocks, but 1327 * since sync_fsync() moves it to a different 1328 * slot we are safe. 1329 */ 1330 if (TAILQ_EMPTY(&vp->v_dirtyblkhd) && 1331 !vn_isdisk(vp, NULL)) 1332 panic("sched_sync: fsync failed vp %p tag %d", vp, vp->v_tag); 1333 /* 1334 * Put us back on the worklist. The worklist 1335 * routine will remove us from our current 1336 * position and then add us back in at a later 1337 * position. 1338 */ 1339 vn_syncer_add_to_worklist(vp, syncdelay); 1340 } 1341 splx(s); 1342 } 1343 1344 /* 1345 * Do soft update processing. 1346 */ 1347 #ifdef SOFTUPDATES 1348 softdep_process_worklist(NULL); 1349 #endif 1350 1351 /* 1352 * The variable rushjob allows the kernel to speed up the 1353 * processing of the filesystem syncer process. A rushjob 1354 * value of N tells the filesystem syncer to process the next 1355 * N seconds worth of work on its queue ASAP. Currently rushjob 1356 * is used by the soft update code to speed up the filesystem 1357 * syncer process when the incore state is getting so far 1358 * ahead of the disk that the kernel memory pool is being 1359 * threatened with exhaustion. 1360 */ 1361 if (rushjob > 0) { 1362 rushjob -= 1; 1363 continue; 1364 } 1365 /* 1366 * If it has taken us less than a second to process the 1367 * current work, then wait. Otherwise start right over 1368 * again. We can still lose time if any single round 1369 * takes more than two seconds, but it does not really 1370 * matter as we are just trying to generally pace the 1371 * filesystem activity. 1372 */ 1373 if (time_second == starttime) 1374 tsleep(&lbolt, PPAUSE, "syncer", 0); 1375 } 1376 } 1377 1378 /* 1379 * Request the syncer daemon to speed up its work. 1380 * We never push it to speed up more than half of its 1381 * normal turn time, otherwise it could take over the cpu. 1382 * XXXKSE only one update? 1383 */ 1384 int 1385 speedup_syncer() 1386 { 1387 1388 mtx_lock_spin(&sched_lock); 1389 if (FIRST_THREAD_IN_PROC(updateproc)->td_wchan == &lbolt) /* XXXKSE */ 1390 setrunnable(FIRST_THREAD_IN_PROC(updateproc)); 1391 mtx_unlock_spin(&sched_lock); 1392 if (rushjob < syncdelay / 2) { 1393 rushjob += 1; 1394 stat_rush_requests += 1; 1395 return (1); 1396 } 1397 return(0); 1398 } 1399 1400 /* 1401 * Associate a p-buffer with a vnode. 1402 * 1403 * Also sets B_PAGING flag to indicate that vnode is not fully associated 1404 * with the buffer. i.e. the bp has not been linked into the vnode or 1405 * ref-counted. 1406 */ 1407 void 1408 pbgetvp(vp, bp) 1409 register struct vnode *vp; 1410 register struct buf *bp; 1411 { 1412 1413 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free")); 1414 1415 bp->b_vp = vp; 1416 bp->b_flags |= B_PAGING; 1417 bp->b_dev = vn_todev(vp); 1418 } 1419 1420 /* 1421 * Disassociate a p-buffer from a vnode. 1422 */ 1423 void 1424 pbrelvp(bp) 1425 register struct buf *bp; 1426 { 1427 1428 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL")); 1429 1430 /* XXX REMOVE ME */ 1431 if (TAILQ_NEXT(bp, b_vnbufs) != NULL) { 1432 panic( 1433 "relpbuf(): b_vp was probably reassignbuf()d %p %x", 1434 bp, 1435 (int)bp->b_flags 1436 ); 1437 } 1438 bp->b_vp = (struct vnode *) 0; 1439 bp->b_flags &= ~B_PAGING; 1440 } 1441 1442 /* 1443 * Reassign a buffer from one vnode to another. 1444 * Used to assign file specific control information 1445 * (indirect blocks) to the vnode to which they belong. 1446 */ 1447 void 1448 reassignbuf(bp, newvp) 1449 register struct buf *bp; 1450 register struct vnode *newvp; 1451 { 1452 struct buflists *listheadp; 1453 int delay; 1454 int s; 1455 1456 if (newvp == NULL) { 1457 printf("reassignbuf: NULL"); 1458 return; 1459 } 1460 ++reassignbufcalls; 1461 1462 /* 1463 * B_PAGING flagged buffers cannot be reassigned because their vp 1464 * is not fully linked in. 1465 */ 1466 if (bp->b_flags & B_PAGING) 1467 panic("cannot reassign paging buffer"); 1468 1469 s = splbio(); 1470 /* 1471 * Delete from old vnode list, if on one. 1472 */ 1473 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) { 1474 if (bp->b_xflags & BX_VNDIRTY) 1475 listheadp = &bp->b_vp->v_dirtyblkhd; 1476 else 1477 listheadp = &bp->b_vp->v_cleanblkhd; 1478 TAILQ_REMOVE(listheadp, bp, b_vnbufs); 1479 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1480 if (bp->b_vp != newvp) { 1481 vdrop(bp->b_vp); 1482 bp->b_vp = NULL; /* for clarification */ 1483 } 1484 } 1485 /* 1486 * If dirty, put on list of dirty buffers; otherwise insert onto list 1487 * of clean buffers. 1488 */ 1489 if (bp->b_flags & B_DELWRI) { 1490 struct buf *tbp; 1491 1492 listheadp = &newvp->v_dirtyblkhd; 1493 if ((newvp->v_flag & VONWORKLST) == 0) { 1494 switch (newvp->v_type) { 1495 case VDIR: 1496 delay = dirdelay; 1497 break; 1498 case VCHR: 1499 if (newvp->v_rdev->si_mountpoint != NULL) { 1500 delay = metadelay; 1501 break; 1502 } 1503 /* fall through */ 1504 default: 1505 delay = filedelay; 1506 } 1507 vn_syncer_add_to_worklist(newvp, delay); 1508 } 1509 bp->b_xflags |= BX_VNDIRTY; 1510 tbp = TAILQ_FIRST(listheadp); 1511 if (tbp == NULL || 1512 bp->b_lblkno == 0 || 1513 (bp->b_lblkno > 0 && tbp->b_lblkno < 0) || 1514 (bp->b_lblkno > 0 && bp->b_lblkno < tbp->b_lblkno)) { 1515 TAILQ_INSERT_HEAD(listheadp, bp, b_vnbufs); 1516 ++reassignbufsortgood; 1517 } else if (bp->b_lblkno < 0) { 1518 TAILQ_INSERT_TAIL(listheadp, bp, b_vnbufs); 1519 ++reassignbufsortgood; 1520 } else if (reassignbufmethod == 1) { 1521 /* 1522 * New sorting algorithm, only handle sequential case, 1523 * otherwise append to end (but before metadata) 1524 */ 1525 if ((tbp = gbincore(newvp, bp->b_lblkno - 1)) != NULL && 1526 (tbp->b_xflags & BX_VNDIRTY)) { 1527 /* 1528 * Found the best place to insert the buffer 1529 */ 1530 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1531 ++reassignbufsortgood; 1532 } else { 1533 /* 1534 * Missed, append to end, but before meta-data. 1535 * We know that the head buffer in the list is 1536 * not meta-data due to prior conditionals. 1537 * 1538 * Indirect effects: NFS second stage write 1539 * tends to wind up here, giving maximum 1540 * distance between the unstable write and the 1541 * commit rpc. 1542 */ 1543 tbp = TAILQ_LAST(listheadp, buflists); 1544 while (tbp && tbp->b_lblkno < 0) 1545 tbp = TAILQ_PREV(tbp, buflists, b_vnbufs); 1546 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1547 ++reassignbufsortbad; 1548 } 1549 } else { 1550 /* 1551 * Old sorting algorithm, scan queue and insert 1552 */ 1553 struct buf *ttbp; 1554 while ((ttbp = TAILQ_NEXT(tbp, b_vnbufs)) && 1555 (ttbp->b_lblkno < bp->b_lblkno)) { 1556 ++reassignbufloops; 1557 tbp = ttbp; 1558 } 1559 TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs); 1560 } 1561 } else { 1562 bp->b_xflags |= BX_VNCLEAN; 1563 TAILQ_INSERT_TAIL(&newvp->v_cleanblkhd, bp, b_vnbufs); 1564 if ((newvp->v_flag & VONWORKLST) && 1565 TAILQ_EMPTY(&newvp->v_dirtyblkhd)) { 1566 newvp->v_flag &= ~VONWORKLST; 1567 LIST_REMOVE(newvp, v_synclist); 1568 } 1569 } 1570 if (bp->b_vp != newvp) { 1571 bp->b_vp = newvp; 1572 vhold(bp->b_vp); 1573 } 1574 splx(s); 1575 } 1576 1577 /* 1578 * Create a vnode for a device. 1579 * Used for mounting the root filesystem. 1580 */ 1581 int 1582 bdevvp(dev, vpp) 1583 dev_t dev; 1584 struct vnode **vpp; 1585 { 1586 register struct vnode *vp; 1587 struct vnode *nvp; 1588 int error; 1589 1590 if (dev == NODEV) { 1591 *vpp = NULLVP; 1592 return (ENXIO); 1593 } 1594 if (vfinddev(dev, VCHR, vpp)) 1595 return (0); 1596 error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp); 1597 if (error) { 1598 *vpp = NULLVP; 1599 return (error); 1600 } 1601 vp = nvp; 1602 vp->v_type = VCHR; 1603 addalias(vp, dev); 1604 *vpp = vp; 1605 return (0); 1606 } 1607 1608 /* 1609 * Add vnode to the alias list hung off the dev_t. 1610 * 1611 * The reason for this gunk is that multiple vnodes can reference 1612 * the same physical device, so checking vp->v_usecount to see 1613 * how many users there are is inadequate; the v_usecount for 1614 * the vnodes need to be accumulated. vcount() does that. 1615 */ 1616 struct vnode * 1617 addaliasu(nvp, nvp_rdev) 1618 struct vnode *nvp; 1619 udev_t nvp_rdev; 1620 { 1621 struct vnode *ovp; 1622 vop_t **ops; 1623 dev_t dev; 1624 1625 if (nvp->v_type == VBLK) 1626 return (nvp); 1627 if (nvp->v_type != VCHR) 1628 panic("addaliasu on non-special vnode"); 1629 dev = udev2dev(nvp_rdev, 0); 1630 /* 1631 * Check to see if we have a bdevvp vnode with no associated 1632 * filesystem. If so, we want to associate the filesystem of 1633 * the new newly instigated vnode with the bdevvp vnode and 1634 * discard the newly created vnode rather than leaving the 1635 * bdevvp vnode lying around with no associated filesystem. 1636 */ 1637 if (vfinddev(dev, nvp->v_type, &ovp) == 0 || ovp->v_data != NULL) { 1638 addalias(nvp, dev); 1639 return (nvp); 1640 } 1641 /* 1642 * Discard unneeded vnode, but save its node specific data. 1643 * Note that if there is a lock, it is carried over in the 1644 * node specific data to the replacement vnode. 1645 */ 1646 vref(ovp); 1647 ovp->v_data = nvp->v_data; 1648 ovp->v_tag = nvp->v_tag; 1649 nvp->v_data = NULL; 1650 lockinit(&ovp->v_lock, PVFS, nvp->v_lock.lk_wmesg, 1651 nvp->v_lock.lk_timo, nvp->v_lock.lk_flags & LK_EXTFLG_MASK); 1652 if (nvp->v_vnlock) 1653 ovp->v_vnlock = &ovp->v_lock; 1654 ops = ovp->v_op; 1655 ovp->v_op = nvp->v_op; 1656 if (VOP_ISLOCKED(nvp, curthread)) { 1657 VOP_UNLOCK(nvp, 0, curthread); 1658 vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curthread); 1659 } 1660 nvp->v_op = ops; 1661 insmntque(ovp, nvp->v_mount); 1662 vrele(nvp); 1663 vgone(nvp); 1664 return (ovp); 1665 } 1666 1667 /* This is a local helper function that do the same as addaliasu, but for a 1668 * dev_t instead of an udev_t. */ 1669 static void 1670 addalias(nvp, dev) 1671 struct vnode *nvp; 1672 dev_t dev; 1673 { 1674 1675 KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode")); 1676 nvp->v_rdev = dev; 1677 mtx_lock(&spechash_mtx); 1678 SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext); 1679 mtx_unlock(&spechash_mtx); 1680 } 1681 1682 /* 1683 * Grab a particular vnode from the free list, increment its 1684 * reference count and lock it. The vnode lock bit is set if the 1685 * vnode is being eliminated in vgone. The process is awakened 1686 * when the transition is completed, and an error returned to 1687 * indicate that the vnode is no longer usable (possibly having 1688 * been changed to a new filesystem type). 1689 */ 1690 int 1691 vget(vp, flags, td) 1692 register struct vnode *vp; 1693 int flags; 1694 struct thread *td; 1695 { 1696 int error; 1697 1698 /* 1699 * If the vnode is in the process of being cleaned out for 1700 * another use, we wait for the cleaning to finish and then 1701 * return failure. Cleaning is determined by checking that 1702 * the VXLOCK flag is set. 1703 */ 1704 if ((flags & LK_INTERLOCK) == 0) 1705 mtx_lock(&vp->v_interlock); 1706 if (vp->v_flag & VXLOCK) { 1707 if (vp->v_vxproc == curthread) { 1708 #if 0 1709 /* this can now occur in normal operation */ 1710 log(LOG_INFO, "VXLOCK interlock avoided\n"); 1711 #endif 1712 } else { 1713 vp->v_flag |= VXWANT; 1714 msleep((caddr_t)vp, &vp->v_interlock, PINOD | PDROP, 1715 "vget", 0); 1716 return (ENOENT); 1717 } 1718 } 1719 1720 vp->v_usecount++; 1721 1722 if (VSHOULDBUSY(vp)) 1723 vbusy(vp); 1724 if (flags & LK_TYPE_MASK) { 1725 if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) { 1726 /* 1727 * must expand vrele here because we do not want 1728 * to call VOP_INACTIVE if the reference count 1729 * drops back to zero since it was never really 1730 * active. We must remove it from the free list 1731 * before sleeping so that multiple processes do 1732 * not try to recycle it. 1733 */ 1734 mtx_lock(&vp->v_interlock); 1735 vp->v_usecount--; 1736 if (VSHOULDFREE(vp)) 1737 vfree(vp); 1738 else 1739 vlruvp(vp); 1740 mtx_unlock(&vp->v_interlock); 1741 } 1742 return (error); 1743 } 1744 mtx_unlock(&vp->v_interlock); 1745 return (0); 1746 } 1747 1748 /* 1749 * Increase the reference count of a vnode. 1750 */ 1751 void 1752 vref(struct vnode *vp) 1753 { 1754 mtx_lock(&vp->v_interlock); 1755 vp->v_usecount++; 1756 mtx_unlock(&vp->v_interlock); 1757 } 1758 1759 /* 1760 * Vnode put/release. 1761 * If count drops to zero, call inactive routine and return to freelist. 1762 */ 1763 void 1764 vrele(vp) 1765 struct vnode *vp; 1766 { 1767 struct thread *td = curthread; /* XXX */ 1768 1769 KASSERT(vp != NULL, ("vrele: null vp")); 1770 1771 mtx_lock(&vp->v_interlock); 1772 1773 /* Skip this v_writecount check if we're going to panic below. */ 1774 KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, 1775 ("vrele: missed vn_close")); 1776 1777 if (vp->v_usecount > 1) { 1778 1779 vp->v_usecount--; 1780 mtx_unlock(&vp->v_interlock); 1781 1782 return; 1783 } 1784 1785 if (vp->v_usecount == 1) { 1786 vp->v_usecount--; 1787 /* 1788 * We must call VOP_INACTIVE with the node locked. 1789 * If we are doing a vput, the node is already locked, 1790 * but, in the case of vrele, we must explicitly lock 1791 * the vnode before calling VOP_INACTIVE. 1792 */ 1793 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) 1794 VOP_INACTIVE(vp, td); 1795 if (VSHOULDFREE(vp)) 1796 vfree(vp); 1797 else 1798 vlruvp(vp); 1799 1800 } else { 1801 #ifdef DIAGNOSTIC 1802 vprint("vrele: negative ref count", vp); 1803 mtx_unlock(&vp->v_interlock); 1804 #endif 1805 panic("vrele: negative ref cnt"); 1806 } 1807 } 1808 1809 /* 1810 * Release an already locked vnode. This give the same effects as 1811 * unlock+vrele(), but takes less time and avoids releasing and 1812 * re-aquiring the lock (as vrele() aquires the lock internally.) 1813 */ 1814 void 1815 vput(vp) 1816 struct vnode *vp; 1817 { 1818 struct thread *td = curthread; /* XXX */ 1819 1820 GIANT_REQUIRED; 1821 1822 KASSERT(vp != NULL, ("vput: null vp")); 1823 mtx_lock(&vp->v_interlock); 1824 /* Skip this v_writecount check if we're going to panic below. */ 1825 KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, 1826 ("vput: missed vn_close")); 1827 1828 if (vp->v_usecount > 1) { 1829 vp->v_usecount--; 1830 VOP_UNLOCK(vp, LK_INTERLOCK, td); 1831 return; 1832 } 1833 1834 if (vp->v_usecount == 1) { 1835 vp->v_usecount--; 1836 /* 1837 * We must call VOP_INACTIVE with the node locked. 1838 * If we are doing a vput, the node is already locked, 1839 * so we just need to release the vnode mutex. 1840 */ 1841 mtx_unlock(&vp->v_interlock); 1842 VOP_INACTIVE(vp, td); 1843 if (VSHOULDFREE(vp)) 1844 vfree(vp); 1845 else 1846 vlruvp(vp); 1847 1848 } else { 1849 #ifdef DIAGNOSTIC 1850 vprint("vput: negative ref count", vp); 1851 #endif 1852 panic("vput: negative ref cnt"); 1853 } 1854 } 1855 1856 /* 1857 * Somebody doesn't want the vnode recycled. 1858 */ 1859 void 1860 vhold(vp) 1861 register struct vnode *vp; 1862 { 1863 int s; 1864 1865 s = splbio(); 1866 vp->v_holdcnt++; 1867 if (VSHOULDBUSY(vp)) 1868 vbusy(vp); 1869 splx(s); 1870 } 1871 1872 /* 1873 * Note that there is one less who cares about this vnode. vdrop() is the 1874 * opposite of vhold(). 1875 */ 1876 void 1877 vdrop(vp) 1878 register struct vnode *vp; 1879 { 1880 int s; 1881 1882 s = splbio(); 1883 if (vp->v_holdcnt <= 0) 1884 panic("vdrop: holdcnt"); 1885 vp->v_holdcnt--; 1886 if (VSHOULDFREE(vp)) 1887 vfree(vp); 1888 else 1889 vlruvp(vp); 1890 splx(s); 1891 } 1892 1893 /* 1894 * Remove any vnodes in the vnode table belonging to mount point mp. 1895 * 1896 * If FORCECLOSE is not specified, there should not be any active ones, 1897 * return error if any are found (nb: this is a user error, not a 1898 * system error). If FORCECLOSE is specified, detach any active vnodes 1899 * that are found. 1900 * 1901 * If WRITECLOSE is set, only flush out regular file vnodes open for 1902 * writing. 1903 * 1904 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 1905 * 1906 * `rootrefs' specifies the base reference count for the root vnode 1907 * of this filesystem. The root vnode is considered busy if its 1908 * v_usecount exceeds this value. On a successful return, vflush() 1909 * will call vrele() on the root vnode exactly rootrefs times. 1910 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 1911 * be zero. 1912 */ 1913 #ifdef DIAGNOSTIC 1914 static int busyprt = 0; /* print out busy vnodes */ 1915 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 1916 #endif 1917 1918 int 1919 vflush(mp, rootrefs, flags) 1920 struct mount *mp; 1921 int rootrefs; 1922 int flags; 1923 { 1924 struct thread *td = curthread; /* XXX */ 1925 struct vnode *vp, *nvp, *rootvp = NULL; 1926 struct vattr vattr; 1927 int busy = 0, error; 1928 1929 if (rootrefs > 0) { 1930 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 1931 ("vflush: bad args")); 1932 /* 1933 * Get the filesystem root vnode. We can vput() it 1934 * immediately, since with rootrefs > 0, it won't go away. 1935 */ 1936 if ((error = VFS_ROOT(mp, &rootvp)) != 0) 1937 return (error); 1938 vput(rootvp); 1939 } 1940 mtx_lock(&mntvnode_mtx); 1941 loop: 1942 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) { 1943 /* 1944 * Make sure this vnode wasn't reclaimed in getnewvnode(). 1945 * Start over if it has (it won't be on the list anymore). 1946 */ 1947 if (vp->v_mount != mp) 1948 goto loop; 1949 nvp = TAILQ_NEXT(vp, v_nmntvnodes); 1950 1951 mtx_unlock(&mntvnode_mtx); 1952 mtx_lock(&vp->v_interlock); 1953 /* 1954 * Skip over a vnodes marked VSYSTEM. 1955 */ 1956 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1957 mtx_unlock(&vp->v_interlock); 1958 mtx_lock(&mntvnode_mtx); 1959 continue; 1960 } 1961 /* 1962 * If WRITECLOSE is set, flush out unlinked but still open 1963 * files (even if open only for reading) and regular file 1964 * vnodes open for writing. 1965 */ 1966 if ((flags & WRITECLOSE) && 1967 (vp->v_type == VNON || 1968 (VOP_GETATTR(vp, &vattr, td->td_ucred, td) == 0 && 1969 vattr.va_nlink > 0)) && 1970 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1971 mtx_unlock(&vp->v_interlock); 1972 mtx_lock(&mntvnode_mtx); 1973 continue; 1974 } 1975 1976 /* 1977 * With v_usecount == 0, all we need to do is clear out the 1978 * vnode data structures and we are done. 1979 */ 1980 if (vp->v_usecount == 0) { 1981 vgonel(vp, td); 1982 mtx_lock(&mntvnode_mtx); 1983 continue; 1984 } 1985 1986 /* 1987 * If FORCECLOSE is set, forcibly close the vnode. For block 1988 * or character devices, revert to an anonymous device. For 1989 * all other files, just kill them. 1990 */ 1991 if (flags & FORCECLOSE) { 1992 if (vp->v_type != VCHR) { 1993 vgonel(vp, td); 1994 } else { 1995 vclean(vp, 0, td); 1996 vp->v_op = spec_vnodeop_p; 1997 insmntque(vp, (struct mount *) 0); 1998 } 1999 mtx_lock(&mntvnode_mtx); 2000 continue; 2001 } 2002 #ifdef DIAGNOSTIC 2003 if (busyprt) 2004 vprint("vflush: busy vnode", vp); 2005 #endif 2006 mtx_unlock(&vp->v_interlock); 2007 mtx_lock(&mntvnode_mtx); 2008 busy++; 2009 } 2010 mtx_unlock(&mntvnode_mtx); 2011 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 2012 /* 2013 * If just the root vnode is busy, and if its refcount 2014 * is equal to `rootrefs', then go ahead and kill it. 2015 */ 2016 mtx_lock(&rootvp->v_interlock); 2017 KASSERT(busy > 0, ("vflush: not busy")); 2018 KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs")); 2019 if (busy == 1 && rootvp->v_usecount == rootrefs) { 2020 vgonel(rootvp, td); 2021 busy = 0; 2022 } else 2023 mtx_unlock(&rootvp->v_interlock); 2024 } 2025 if (busy) 2026 return (EBUSY); 2027 for (; rootrefs > 0; rootrefs--) 2028 vrele(rootvp); 2029 return (0); 2030 } 2031 2032 /* 2033 * This moves a now (likely recyclable) vnode to the end of the 2034 * mountlist. XXX However, it is temporarily disabled until we 2035 * can clean up ffs_sync() and friends, which have loop restart 2036 * conditions which this code causes to operate O(N^2). 2037 */ 2038 static void 2039 vlruvp(struct vnode *vp) 2040 { 2041 #if 0 2042 struct mount *mp; 2043 2044 if ((mp = vp->v_mount) != NULL) { 2045 mtx_lock(&mntvnode_mtx); 2046 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2047 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2048 mtx_unlock(&mntvnode_mtx); 2049 } 2050 #endif 2051 } 2052 2053 /* 2054 * Disassociate the underlying filesystem from a vnode. 2055 */ 2056 static void 2057 vclean(vp, flags, td) 2058 struct vnode *vp; 2059 int flags; 2060 struct thread *td; 2061 { 2062 int active; 2063 2064 /* 2065 * Check to see if the vnode is in use. If so we have to reference it 2066 * before we clean it out so that its count cannot fall to zero and 2067 * generate a race against ourselves to recycle it. 2068 */ 2069 if ((active = vp->v_usecount)) 2070 vp->v_usecount++; 2071 2072 /* 2073 * Prevent the vnode from being recycled or brought into use while we 2074 * clean it out. 2075 */ 2076 if (vp->v_flag & VXLOCK) 2077 panic("vclean: deadlock"); 2078 vp->v_flag |= VXLOCK; 2079 vp->v_vxproc = curthread; 2080 /* 2081 * Even if the count is zero, the VOP_INACTIVE routine may still 2082 * have the object locked while it cleans it out. The VOP_LOCK 2083 * ensures that the VOP_INACTIVE routine is done with its work. 2084 * For active vnodes, it ensures that no other activity can 2085 * occur while the underlying object is being cleaned out. 2086 */ 2087 VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td); 2088 2089 /* 2090 * Clean out any buffers associated with the vnode. 2091 * If the flush fails, just toss the buffers. 2092 */ 2093 if (flags & DOCLOSE) { 2094 if (TAILQ_FIRST(&vp->v_dirtyblkhd) != NULL) 2095 (void) vn_write_suspend_wait(vp, NULL, V_WAIT); 2096 if (vinvalbuf(vp, V_SAVE, NOCRED, td, 0, 0) != 0) 2097 vinvalbuf(vp, 0, NOCRED, td, 0, 0); 2098 } 2099 2100 VOP_DESTROYVOBJECT(vp); 2101 2102 /* 2103 * If purging an active vnode, it must be closed and 2104 * deactivated before being reclaimed. Note that the 2105 * VOP_INACTIVE will unlock the vnode. 2106 */ 2107 if (active) { 2108 if (flags & DOCLOSE) 2109 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 2110 VOP_INACTIVE(vp, td); 2111 } else { 2112 /* 2113 * Any other processes trying to obtain this lock must first 2114 * wait for VXLOCK to clear, then call the new lock operation. 2115 */ 2116 VOP_UNLOCK(vp, 0, td); 2117 } 2118 /* 2119 * Reclaim the vnode. 2120 */ 2121 if (VOP_RECLAIM(vp, td)) 2122 panic("vclean: cannot reclaim"); 2123 2124 if (active) { 2125 /* 2126 * Inline copy of vrele() since VOP_INACTIVE 2127 * has already been called. 2128 */ 2129 mtx_lock(&vp->v_interlock); 2130 if (--vp->v_usecount <= 0) { 2131 #ifdef DIAGNOSTIC 2132 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 2133 vprint("vclean: bad ref count", vp); 2134 panic("vclean: ref cnt"); 2135 } 2136 #endif 2137 vfree(vp); 2138 } 2139 mtx_unlock(&vp->v_interlock); 2140 } 2141 2142 cache_purge(vp); 2143 vp->v_vnlock = NULL; 2144 lockdestroy(&vp->v_lock); 2145 2146 if (VSHOULDFREE(vp)) 2147 vfree(vp); 2148 2149 /* 2150 * Done with purge, notify sleepers of the grim news. 2151 */ 2152 vp->v_op = dead_vnodeop_p; 2153 if (vp->v_pollinfo != NULL) 2154 vn_pollgone(vp); 2155 vp->v_tag = VT_NON; 2156 vp->v_flag &= ~VXLOCK; 2157 vp->v_vxproc = NULL; 2158 if (vp->v_flag & VXWANT) { 2159 vp->v_flag &= ~VXWANT; 2160 wakeup((caddr_t) vp); 2161 } 2162 } 2163 2164 /* 2165 * Eliminate all activity associated with the requested vnode 2166 * and with all vnodes aliased to the requested vnode. 2167 */ 2168 int 2169 vop_revoke(ap) 2170 struct vop_revoke_args /* { 2171 struct vnode *a_vp; 2172 int a_flags; 2173 } */ *ap; 2174 { 2175 struct vnode *vp, *vq; 2176 dev_t dev; 2177 2178 KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke")); 2179 2180 vp = ap->a_vp; 2181 /* 2182 * If a vgone (or vclean) is already in progress, 2183 * wait until it is done and return. 2184 */ 2185 if (vp->v_flag & VXLOCK) { 2186 vp->v_flag |= VXWANT; 2187 msleep((caddr_t)vp, &vp->v_interlock, PINOD | PDROP, 2188 "vop_revokeall", 0); 2189 return (0); 2190 } 2191 dev = vp->v_rdev; 2192 for (;;) { 2193 mtx_lock(&spechash_mtx); 2194 vq = SLIST_FIRST(&dev->si_hlist); 2195 mtx_unlock(&spechash_mtx); 2196 if (!vq) 2197 break; 2198 vgone(vq); 2199 } 2200 return (0); 2201 } 2202 2203 /* 2204 * Recycle an unused vnode to the front of the free list. 2205 * Release the passed interlock if the vnode will be recycled. 2206 */ 2207 int 2208 vrecycle(vp, inter_lkp, td) 2209 struct vnode *vp; 2210 struct mtx *inter_lkp; 2211 struct thread *td; 2212 { 2213 2214 mtx_lock(&vp->v_interlock); 2215 if (vp->v_usecount == 0) { 2216 if (inter_lkp) { 2217 mtx_unlock(inter_lkp); 2218 } 2219 vgonel(vp, td); 2220 return (1); 2221 } 2222 mtx_unlock(&vp->v_interlock); 2223 return (0); 2224 } 2225 2226 /* 2227 * Eliminate all activity associated with a vnode 2228 * in preparation for reuse. 2229 */ 2230 void 2231 vgone(vp) 2232 register struct vnode *vp; 2233 { 2234 struct thread *td = curthread; /* XXX */ 2235 2236 mtx_lock(&vp->v_interlock); 2237 vgonel(vp, td); 2238 } 2239 2240 /* 2241 * vgone, with the vp interlock held. 2242 */ 2243 void 2244 vgonel(vp, td) 2245 struct vnode *vp; 2246 struct thread *td; 2247 { 2248 int s; 2249 2250 /* 2251 * If a vgone (or vclean) is already in progress, 2252 * wait until it is done and return. 2253 */ 2254 if (vp->v_flag & VXLOCK) { 2255 vp->v_flag |= VXWANT; 2256 msleep((caddr_t)vp, &vp->v_interlock, PINOD | PDROP, 2257 "vgone", 0); 2258 return; 2259 } 2260 2261 /* 2262 * Clean out the filesystem specific data. 2263 */ 2264 vclean(vp, DOCLOSE, td); 2265 mtx_lock(&vp->v_interlock); 2266 2267 /* 2268 * Delete from old mount point vnode list, if on one. 2269 */ 2270 if (vp->v_mount != NULL) 2271 insmntque(vp, (struct mount *)0); 2272 /* 2273 * If special device, remove it from special device alias list 2274 * if it is on one. 2275 */ 2276 if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) { 2277 mtx_lock(&spechash_mtx); 2278 SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext); 2279 freedev(vp->v_rdev); 2280 mtx_unlock(&spechash_mtx); 2281 vp->v_rdev = NULL; 2282 } 2283 2284 /* 2285 * If it is on the freelist and not already at the head, 2286 * move it to the head of the list. The test of the 2287 * VDOOMED flag and the reference count of zero is because 2288 * it will be removed from the free list by getnewvnode, 2289 * but will not have its reference count incremented until 2290 * after calling vgone. If the reference count were 2291 * incremented first, vgone would (incorrectly) try to 2292 * close the previous instance of the underlying object. 2293 */ 2294 if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) { 2295 s = splbio(); 2296 mtx_lock(&vnode_free_list_mtx); 2297 if (vp->v_flag & VFREE) 2298 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2299 else 2300 freevnodes++; 2301 vp->v_flag |= VFREE; 2302 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2303 mtx_unlock(&vnode_free_list_mtx); 2304 splx(s); 2305 } 2306 2307 vp->v_type = VBAD; 2308 mtx_unlock(&vp->v_interlock); 2309 } 2310 2311 /* 2312 * Lookup a vnode by device number. 2313 */ 2314 int 2315 vfinddev(dev, type, vpp) 2316 dev_t dev; 2317 enum vtype type; 2318 struct vnode **vpp; 2319 { 2320 struct vnode *vp; 2321 2322 mtx_lock(&spechash_mtx); 2323 SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) { 2324 if (type == vp->v_type) { 2325 *vpp = vp; 2326 mtx_unlock(&spechash_mtx); 2327 return (1); 2328 } 2329 } 2330 mtx_unlock(&spechash_mtx); 2331 return (0); 2332 } 2333 2334 /* 2335 * Calculate the total number of references to a special device. 2336 */ 2337 int 2338 vcount(vp) 2339 struct vnode *vp; 2340 { 2341 struct vnode *vq; 2342 int count; 2343 2344 count = 0; 2345 mtx_lock(&spechash_mtx); 2346 SLIST_FOREACH(vq, &vp->v_rdev->si_hlist, v_specnext) 2347 count += vq->v_usecount; 2348 mtx_unlock(&spechash_mtx); 2349 return (count); 2350 } 2351 2352 /* 2353 * Same as above, but using the dev_t as argument 2354 */ 2355 int 2356 count_dev(dev) 2357 dev_t dev; 2358 { 2359 struct vnode *vp; 2360 2361 vp = SLIST_FIRST(&dev->si_hlist); 2362 if (vp == NULL) 2363 return (0); 2364 return(vcount(vp)); 2365 } 2366 2367 /* 2368 * Print out a description of a vnode. 2369 */ 2370 static char *typename[] = 2371 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 2372 2373 void 2374 vprint(label, vp) 2375 char *label; 2376 struct vnode *vp; 2377 { 2378 char buf[96]; 2379 2380 if (label != NULL) 2381 printf("%s: %p: ", label, (void *)vp); 2382 else 2383 printf("%p: ", (void *)vp); 2384 printf("type %s, usecount %d, writecount %d, refcount %d,", 2385 typename[vp->v_type], vp->v_usecount, vp->v_writecount, 2386 vp->v_holdcnt); 2387 buf[0] = '\0'; 2388 if (vp->v_flag & VROOT) 2389 strcat(buf, "|VROOT"); 2390 if (vp->v_flag & VTEXT) 2391 strcat(buf, "|VTEXT"); 2392 if (vp->v_flag & VSYSTEM) 2393 strcat(buf, "|VSYSTEM"); 2394 if (vp->v_flag & VXLOCK) 2395 strcat(buf, "|VXLOCK"); 2396 if (vp->v_flag & VXWANT) 2397 strcat(buf, "|VXWANT"); 2398 if (vp->v_flag & VBWAIT) 2399 strcat(buf, "|VBWAIT"); 2400 if (vp->v_flag & VDOOMED) 2401 strcat(buf, "|VDOOMED"); 2402 if (vp->v_flag & VFREE) 2403 strcat(buf, "|VFREE"); 2404 if (vp->v_flag & VOBJBUF) 2405 strcat(buf, "|VOBJBUF"); 2406 if (buf[0] != '\0') 2407 printf(" flags (%s)", &buf[1]); 2408 if (vp->v_data == NULL) { 2409 printf("\n"); 2410 } else { 2411 printf("\n\t"); 2412 VOP_PRINT(vp); 2413 } 2414 } 2415 2416 #ifdef DDB 2417 #include <ddb/ddb.h> 2418 /* 2419 * List all of the locked vnodes in the system. 2420 * Called when debugging the kernel. 2421 */ 2422 DB_SHOW_COMMAND(lockedvnodes, lockedvnodes) 2423 { 2424 struct thread *td = curthread; /* XXX */ 2425 struct mount *mp, *nmp; 2426 struct vnode *vp; 2427 2428 printf("Locked vnodes\n"); 2429 mtx_lock(&mountlist_mtx); 2430 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 2431 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) { 2432 nmp = TAILQ_NEXT(mp, mnt_list); 2433 continue; 2434 } 2435 mtx_lock(&mntvnode_mtx); 2436 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 2437 if (VOP_ISLOCKED(vp, NULL)) 2438 vprint((char *)0, vp); 2439 } 2440 mtx_unlock(&mntvnode_mtx); 2441 mtx_lock(&mountlist_mtx); 2442 nmp = TAILQ_NEXT(mp, mnt_list); 2443 vfs_unbusy(mp, td); 2444 } 2445 mtx_unlock(&mountlist_mtx); 2446 } 2447 #endif 2448 2449 /* 2450 * Top level filesystem related information gathering. 2451 */ 2452 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 2453 2454 static int 2455 vfs_sysctl(SYSCTL_HANDLER_ARGS) 2456 { 2457 int *name = (int *)arg1 - 1; /* XXX */ 2458 u_int namelen = arg2 + 1; /* XXX */ 2459 struct vfsconf *vfsp; 2460 2461 #if 1 || defined(COMPAT_PRELITE2) 2462 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 2463 if (namelen == 1) 2464 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 2465 #endif 2466 2467 /* XXX the below code does not compile; vfs_sysctl does not exist. */ 2468 #ifdef notyet 2469 /* all sysctl names at this level are at least name and field */ 2470 if (namelen < 2) 2471 return (ENOTDIR); /* overloaded */ 2472 if (name[0] != VFS_GENERIC) { 2473 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2474 if (vfsp->vfc_typenum == name[0]) 2475 break; 2476 if (vfsp == NULL) 2477 return (EOPNOTSUPP); 2478 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 2479 oldp, oldlenp, newp, newlen, td)); 2480 } 2481 #endif 2482 switch (name[1]) { 2483 case VFS_MAXTYPENUM: 2484 if (namelen != 2) 2485 return (ENOTDIR); 2486 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 2487 case VFS_CONF: 2488 if (namelen != 3) 2489 return (ENOTDIR); /* overloaded */ 2490 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2491 if (vfsp->vfc_typenum == name[2]) 2492 break; 2493 if (vfsp == NULL) 2494 return (EOPNOTSUPP); 2495 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 2496 } 2497 return (EOPNOTSUPP); 2498 } 2499 2500 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 2501 "Generic filesystem"); 2502 2503 #if 1 || defined(COMPAT_PRELITE2) 2504 2505 static int 2506 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 2507 { 2508 int error; 2509 struct vfsconf *vfsp; 2510 struct ovfsconf ovfs; 2511 2512 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 2513 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 2514 strcpy(ovfs.vfc_name, vfsp->vfc_name); 2515 ovfs.vfc_index = vfsp->vfc_typenum; 2516 ovfs.vfc_refcount = vfsp->vfc_refcount; 2517 ovfs.vfc_flags = vfsp->vfc_flags; 2518 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 2519 if (error) 2520 return error; 2521 } 2522 return 0; 2523 } 2524 2525 #endif /* 1 || COMPAT_PRELITE2 */ 2526 2527 #if COMPILING_LINT 2528 #define KINFO_VNODESLOP 10 2529 /* 2530 * Dump vnode list (via sysctl). 2531 * Copyout address of vnode followed by vnode. 2532 */ 2533 /* ARGSUSED */ 2534 static int 2535 sysctl_vnode(SYSCTL_HANDLER_ARGS) 2536 { 2537 struct thread *td = curthread; /* XXX */ 2538 struct mount *mp, *nmp; 2539 struct vnode *nvp, *vp; 2540 int error; 2541 2542 #define VPTRSZ sizeof (struct vnode *) 2543 #define VNODESZ sizeof (struct vnode) 2544 2545 req->lock = 0; 2546 if (!req->oldptr) /* Make an estimate */ 2547 return (SYSCTL_OUT(req, 0, 2548 (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ))); 2549 2550 mtx_lock(&mountlist_mtx); 2551 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 2552 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) { 2553 nmp = TAILQ_NEXT(mp, mnt_list); 2554 continue; 2555 } 2556 mtx_lock(&mntvnode_mtx); 2557 again: 2558 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 2559 vp != NULL; 2560 vp = nvp) { 2561 /* 2562 * Check that the vp is still associated with 2563 * this filesystem. RACE: could have been 2564 * recycled onto the same filesystem. 2565 */ 2566 if (vp->v_mount != mp) 2567 goto again; 2568 nvp = TAILQ_NEXT(vp, v_nmntvnodes); 2569 mtx_unlock(&mntvnode_mtx); 2570 if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) || 2571 (error = SYSCTL_OUT(req, vp, VNODESZ))) 2572 return (error); 2573 mtx_lock(&mntvnode_mtx); 2574 } 2575 mtx_unlock(&mntvnode_mtx); 2576 mtx_lock(&mountlist_mtx); 2577 nmp = TAILQ_NEXT(mp, mnt_list); 2578 vfs_unbusy(mp, td); 2579 } 2580 mtx_unlock(&mountlist_mtx); 2581 2582 return (0); 2583 } 2584 2585 /* 2586 * XXX 2587 * Exporting the vnode list on large systems causes them to crash. 2588 * Exporting the vnode list on medium systems causes sysctl to coredump. 2589 */ 2590 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD, 2591 0, 0, sysctl_vnode, "S,vnode", ""); 2592 #endif 2593 2594 /* 2595 * Check to see if a filesystem is mounted on a block device. 2596 */ 2597 int 2598 vfs_mountedon(vp) 2599 struct vnode *vp; 2600 { 2601 2602 if (vp->v_rdev->si_mountpoint != NULL) 2603 return (EBUSY); 2604 return (0); 2605 } 2606 2607 /* 2608 * Unmount all filesystems. The list is traversed in reverse order 2609 * of mounting to avoid dependencies. 2610 */ 2611 void 2612 vfs_unmountall() 2613 { 2614 struct mount *mp; 2615 struct thread *td; 2616 int error; 2617 2618 if (curthread != NULL) 2619 td = curthread; 2620 else 2621 td = FIRST_THREAD_IN_PROC(initproc); /* XXX XXX proc0? */ 2622 /* 2623 * Since this only runs when rebooting, it is not interlocked. 2624 */ 2625 while(!TAILQ_EMPTY(&mountlist)) { 2626 mp = TAILQ_LAST(&mountlist, mntlist); 2627 error = dounmount(mp, MNT_FORCE, td); 2628 if (error) { 2629 TAILQ_REMOVE(&mountlist, mp, mnt_list); 2630 printf("unmount of %s failed (", 2631 mp->mnt_stat.f_mntonname); 2632 if (error == EBUSY) 2633 printf("BUSY)\n"); 2634 else 2635 printf("%d)\n", error); 2636 } else { 2637 /* The unmount has removed mp from the mountlist */ 2638 } 2639 } 2640 } 2641 2642 /* 2643 * perform msync on all vnodes under a mount point 2644 * the mount point must be locked. 2645 */ 2646 void 2647 vfs_msync(struct mount *mp, int flags) 2648 { 2649 struct vnode *vp, *nvp; 2650 struct vm_object *obj; 2651 int tries; 2652 2653 GIANT_REQUIRED; 2654 2655 tries = 5; 2656 mtx_lock(&mntvnode_mtx); 2657 loop: 2658 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) { 2659 if (vp->v_mount != mp) { 2660 if (--tries > 0) 2661 goto loop; 2662 break; 2663 } 2664 nvp = TAILQ_NEXT(vp, v_nmntvnodes); 2665 2666 if (vp->v_flag & VXLOCK) /* XXX: what if MNT_WAIT? */ 2667 continue; 2668 2669 if (vp->v_flag & VNOSYNC) /* unlinked, skip it */ 2670 continue; 2671 2672 if ((vp->v_flag & VOBJDIRTY) && 2673 (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) { 2674 mtx_unlock(&mntvnode_mtx); 2675 if (!vget(vp, 2676 LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curthread)) { 2677 if (VOP_GETVOBJECT(vp, &obj) == 0) { 2678 vm_object_page_clean(obj, 0, 0, 2679 flags == MNT_WAIT ? 2680 OBJPC_SYNC : OBJPC_NOSYNC); 2681 } 2682 vput(vp); 2683 } 2684 mtx_lock(&mntvnode_mtx); 2685 if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) { 2686 if (--tries > 0) 2687 goto loop; 2688 break; 2689 } 2690 } 2691 } 2692 mtx_unlock(&mntvnode_mtx); 2693 } 2694 2695 /* 2696 * Create the VM object needed for VMIO and mmap support. This 2697 * is done for all VREG files in the system. Some filesystems might 2698 * afford the additional metadata buffering capability of the 2699 * VMIO code by making the device node be VMIO mode also. 2700 * 2701 * vp must be locked when vfs_object_create is called. 2702 */ 2703 int 2704 vfs_object_create(vp, td, cred) 2705 struct vnode *vp; 2706 struct thread *td; 2707 struct ucred *cred; 2708 { 2709 GIANT_REQUIRED; 2710 return (VOP_CREATEVOBJECT(vp, cred, td)); 2711 } 2712 2713 /* 2714 * Mark a vnode as free, putting it up for recycling. 2715 */ 2716 void 2717 vfree(vp) 2718 struct vnode *vp; 2719 { 2720 int s; 2721 2722 s = splbio(); 2723 mtx_lock(&vnode_free_list_mtx); 2724 KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free")); 2725 if (vp->v_flag & VAGE) { 2726 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2727 } else { 2728 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 2729 } 2730 freevnodes++; 2731 mtx_unlock(&vnode_free_list_mtx); 2732 vp->v_flag &= ~VAGE; 2733 vp->v_flag |= VFREE; 2734 splx(s); 2735 } 2736 2737 /* 2738 * Opposite of vfree() - mark a vnode as in use. 2739 */ 2740 void 2741 vbusy(vp) 2742 struct vnode *vp; 2743 { 2744 int s; 2745 2746 s = splbio(); 2747 mtx_lock(&vnode_free_list_mtx); 2748 KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free")); 2749 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2750 freevnodes--; 2751 mtx_unlock(&vnode_free_list_mtx); 2752 vp->v_flag &= ~(VFREE|VAGE); 2753 splx(s); 2754 } 2755 2756 /* 2757 * Record a process's interest in events which might happen to 2758 * a vnode. Because poll uses the historic select-style interface 2759 * internally, this routine serves as both the ``check for any 2760 * pending events'' and the ``record my interest in future events'' 2761 * functions. (These are done together, while the lock is held, 2762 * to avoid race conditions.) 2763 */ 2764 int 2765 vn_pollrecord(vp, td, events) 2766 struct vnode *vp; 2767 struct thread *td; 2768 short events; 2769 { 2770 2771 if (vp->v_pollinfo == NULL) 2772 v_addpollinfo(vp); 2773 mtx_lock(&vp->v_pollinfo->vpi_lock); 2774 if (vp->v_pollinfo->vpi_revents & events) { 2775 /* 2776 * This leaves events we are not interested 2777 * in available for the other process which 2778 * which presumably had requested them 2779 * (otherwise they would never have been 2780 * recorded). 2781 */ 2782 events &= vp->v_pollinfo->vpi_revents; 2783 vp->v_pollinfo->vpi_revents &= ~events; 2784 2785 mtx_unlock(&vp->v_pollinfo->vpi_lock); 2786 return events; 2787 } 2788 vp->v_pollinfo->vpi_events |= events; 2789 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 2790 mtx_unlock(&vp->v_pollinfo->vpi_lock); 2791 return 0; 2792 } 2793 2794 /* 2795 * Note the occurrence of an event. If the VN_POLLEVENT macro is used, 2796 * it is possible for us to miss an event due to race conditions, but 2797 * that condition is expected to be rare, so for the moment it is the 2798 * preferred interface. 2799 */ 2800 void 2801 vn_pollevent(vp, events) 2802 struct vnode *vp; 2803 short events; 2804 { 2805 2806 if (vp->v_pollinfo == NULL) 2807 v_addpollinfo(vp); 2808 mtx_lock(&vp->v_pollinfo->vpi_lock); 2809 if (vp->v_pollinfo->vpi_events & events) { 2810 /* 2811 * We clear vpi_events so that we don't 2812 * call selwakeup() twice if two events are 2813 * posted before the polling process(es) is 2814 * awakened. This also ensures that we take at 2815 * most one selwakeup() if the polling process 2816 * is no longer interested. However, it does 2817 * mean that only one event can be noticed at 2818 * a time. (Perhaps we should only clear those 2819 * event bits which we note?) XXX 2820 */ 2821 vp->v_pollinfo->vpi_events = 0; /* &= ~events ??? */ 2822 vp->v_pollinfo->vpi_revents |= events; 2823 selwakeup(&vp->v_pollinfo->vpi_selinfo); 2824 } 2825 mtx_unlock(&vp->v_pollinfo->vpi_lock); 2826 } 2827 2828 /* 2829 * Wake up anyone polling on vp because it is being revoked. 2830 * This depends on dead_poll() returning POLLHUP for correct 2831 * behavior. 2832 */ 2833 void 2834 vn_pollgone(vp) 2835 struct vnode *vp; 2836 { 2837 2838 mtx_lock(&vp->v_pollinfo->vpi_lock); 2839 VN_KNOTE(vp, NOTE_REVOKE); 2840 if (vp->v_pollinfo->vpi_events) { 2841 vp->v_pollinfo->vpi_events = 0; 2842 selwakeup(&vp->v_pollinfo->vpi_selinfo); 2843 } 2844 mtx_unlock(&vp->v_pollinfo->vpi_lock); 2845 } 2846 2847 2848 2849 /* 2850 * Routine to create and manage a filesystem syncer vnode. 2851 */ 2852 #define sync_close ((int (*)(struct vop_close_args *))nullop) 2853 static int sync_fsync(struct vop_fsync_args *); 2854 static int sync_inactive(struct vop_inactive_args *); 2855 static int sync_reclaim(struct vop_reclaim_args *); 2856 #define sync_lock ((int (*)(struct vop_lock_args *))vop_nolock) 2857 #define sync_unlock ((int (*)(struct vop_unlock_args *))vop_nounlock) 2858 static int sync_print(struct vop_print_args *); 2859 #define sync_islocked ((int(*)(struct vop_islocked_args *))vop_noislocked) 2860 2861 static vop_t **sync_vnodeop_p; 2862 static struct vnodeopv_entry_desc sync_vnodeop_entries[] = { 2863 { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 2864 { &vop_close_desc, (vop_t *) sync_close }, /* close */ 2865 { &vop_fsync_desc, (vop_t *) sync_fsync }, /* fsync */ 2866 { &vop_inactive_desc, (vop_t *) sync_inactive }, /* inactive */ 2867 { &vop_reclaim_desc, (vop_t *) sync_reclaim }, /* reclaim */ 2868 { &vop_lock_desc, (vop_t *) sync_lock }, /* lock */ 2869 { &vop_unlock_desc, (vop_t *) sync_unlock }, /* unlock */ 2870 { &vop_print_desc, (vop_t *) sync_print }, /* print */ 2871 { &vop_islocked_desc, (vop_t *) sync_islocked }, /* islocked */ 2872 { NULL, NULL } 2873 }; 2874 static struct vnodeopv_desc sync_vnodeop_opv_desc = 2875 { &sync_vnodeop_p, sync_vnodeop_entries }; 2876 2877 VNODEOP_SET(sync_vnodeop_opv_desc); 2878 2879 /* 2880 * Create a new filesystem syncer vnode for the specified mount point. 2881 */ 2882 int 2883 vfs_allocate_syncvnode(mp) 2884 struct mount *mp; 2885 { 2886 struct vnode *vp; 2887 static long start, incr, next; 2888 int error; 2889 2890 /* Allocate a new vnode */ 2891 if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) { 2892 mp->mnt_syncer = NULL; 2893 return (error); 2894 } 2895 vp->v_type = VNON; 2896 /* 2897 * Place the vnode onto the syncer worklist. We attempt to 2898 * scatter them about on the list so that they will go off 2899 * at evenly distributed times even if all the filesystems 2900 * are mounted at once. 2901 */ 2902 next += incr; 2903 if (next == 0 || next > syncer_maxdelay) { 2904 start /= 2; 2905 incr /= 2; 2906 if (start == 0) { 2907 start = syncer_maxdelay / 2; 2908 incr = syncer_maxdelay; 2909 } 2910 next = start; 2911 } 2912 vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0); 2913 mp->mnt_syncer = vp; 2914 return (0); 2915 } 2916 2917 /* 2918 * Do a lazy sync of the filesystem. 2919 */ 2920 static int 2921 sync_fsync(ap) 2922 struct vop_fsync_args /* { 2923 struct vnode *a_vp; 2924 struct ucred *a_cred; 2925 int a_waitfor; 2926 struct thread *a_td; 2927 } */ *ap; 2928 { 2929 struct vnode *syncvp = ap->a_vp; 2930 struct mount *mp = syncvp->v_mount; 2931 struct thread *td = ap->a_td; 2932 int asyncflag; 2933 2934 /* 2935 * We only need to do something if this is a lazy evaluation. 2936 */ 2937 if (ap->a_waitfor != MNT_LAZY) 2938 return (0); 2939 2940 /* 2941 * Move ourselves to the back of the sync list. 2942 */ 2943 vn_syncer_add_to_worklist(syncvp, syncdelay); 2944 2945 /* 2946 * Walk the list of vnodes pushing all that are dirty and 2947 * not already on the sync list. 2948 */ 2949 mtx_lock(&mountlist_mtx); 2950 if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) { 2951 mtx_unlock(&mountlist_mtx); 2952 return (0); 2953 } 2954 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 2955 vfs_unbusy(mp, td); 2956 return (0); 2957 } 2958 asyncflag = mp->mnt_flag & MNT_ASYNC; 2959 mp->mnt_flag &= ~MNT_ASYNC; 2960 vfs_msync(mp, MNT_NOWAIT); 2961 VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td); 2962 if (asyncflag) 2963 mp->mnt_flag |= MNT_ASYNC; 2964 vn_finished_write(mp); 2965 vfs_unbusy(mp, td); 2966 return (0); 2967 } 2968 2969 /* 2970 * The syncer vnode is no referenced. 2971 */ 2972 static int 2973 sync_inactive(ap) 2974 struct vop_inactive_args /* { 2975 struct vnode *a_vp; 2976 struct thread *a_td; 2977 } */ *ap; 2978 { 2979 2980 vgone(ap->a_vp); 2981 return (0); 2982 } 2983 2984 /* 2985 * The syncer vnode is no longer needed and is being decommissioned. 2986 * 2987 * Modifications to the worklist must be protected at splbio(). 2988 */ 2989 static int 2990 sync_reclaim(ap) 2991 struct vop_reclaim_args /* { 2992 struct vnode *a_vp; 2993 } */ *ap; 2994 { 2995 struct vnode *vp = ap->a_vp; 2996 int s; 2997 2998 s = splbio(); 2999 vp->v_mount->mnt_syncer = NULL; 3000 if (vp->v_flag & VONWORKLST) { 3001 LIST_REMOVE(vp, v_synclist); 3002 vp->v_flag &= ~VONWORKLST; 3003 } 3004 splx(s); 3005 3006 return (0); 3007 } 3008 3009 /* 3010 * Print out a syncer vnode. 3011 */ 3012 static int 3013 sync_print(ap) 3014 struct vop_print_args /* { 3015 struct vnode *a_vp; 3016 } */ *ap; 3017 { 3018 struct vnode *vp = ap->a_vp; 3019 3020 printf("syncer vnode"); 3021 if (vp->v_vnlock != NULL) 3022 lockmgr_printinfo(vp->v_vnlock); 3023 printf("\n"); 3024 return (0); 3025 } 3026 3027 /* 3028 * extract the dev_t from a VCHR 3029 */ 3030 dev_t 3031 vn_todev(vp) 3032 struct vnode *vp; 3033 { 3034 if (vp->v_type != VCHR) 3035 return (NODEV); 3036 return (vp->v_rdev); 3037 } 3038 3039 /* 3040 * Check if vnode represents a disk device 3041 */ 3042 int 3043 vn_isdisk(vp, errp) 3044 struct vnode *vp; 3045 int *errp; 3046 { 3047 struct cdevsw *cdevsw; 3048 3049 if (vp->v_type != VCHR) { 3050 if (errp != NULL) 3051 *errp = ENOTBLK; 3052 return (0); 3053 } 3054 if (vp->v_rdev == NULL) { 3055 if (errp != NULL) 3056 *errp = ENXIO; 3057 return (0); 3058 } 3059 cdevsw = devsw(vp->v_rdev); 3060 if (cdevsw == NULL) { 3061 if (errp != NULL) 3062 *errp = ENXIO; 3063 return (0); 3064 } 3065 if (!(cdevsw->d_flags & D_DISK)) { 3066 if (errp != NULL) 3067 *errp = ENOTBLK; 3068 return (0); 3069 } 3070 if (errp != NULL) 3071 *errp = 0; 3072 return (1); 3073 } 3074 3075 /* 3076 * Free data allocated by namei(); see namei(9) for details. 3077 */ 3078 void 3079 NDFREE(ndp, flags) 3080 struct nameidata *ndp; 3081 const uint flags; 3082 { 3083 if (!(flags & NDF_NO_FREE_PNBUF) && 3084 (ndp->ni_cnd.cn_flags & HASBUF)) { 3085 uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf); 3086 ndp->ni_cnd.cn_flags &= ~HASBUF; 3087 } 3088 if (!(flags & NDF_NO_DVP_UNLOCK) && 3089 (ndp->ni_cnd.cn_flags & LOCKPARENT) && 3090 ndp->ni_dvp != ndp->ni_vp) 3091 VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_thread); 3092 if (!(flags & NDF_NO_DVP_RELE) && 3093 (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) { 3094 vrele(ndp->ni_dvp); 3095 ndp->ni_dvp = NULL; 3096 } 3097 if (!(flags & NDF_NO_VP_UNLOCK) && 3098 (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp) 3099 VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_thread); 3100 if (!(flags & NDF_NO_VP_RELE) && 3101 ndp->ni_vp) { 3102 vrele(ndp->ni_vp); 3103 ndp->ni_vp = NULL; 3104 } 3105 if (!(flags & NDF_NO_STARTDIR_RELE) && 3106 (ndp->ni_cnd.cn_flags & SAVESTART)) { 3107 vrele(ndp->ni_startdir); 3108 ndp->ni_startdir = NULL; 3109 } 3110 } 3111 3112 /* 3113 * Common filesystem object access control check routine. Accepts a 3114 * vnode's type, "mode", uid and gid, requested access mode, credentials, 3115 * and optional call-by-reference privused argument allowing vaccess() 3116 * to indicate to the caller whether privilege was used to satisfy the 3117 * request. Returns 0 on success, or an errno on failure. 3118 */ 3119 int 3120 vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused) 3121 enum vtype type; 3122 mode_t file_mode; 3123 uid_t file_uid; 3124 gid_t file_gid; 3125 mode_t acc_mode; 3126 struct ucred *cred; 3127 int *privused; 3128 { 3129 mode_t dac_granted; 3130 #ifdef CAPABILITIES 3131 mode_t cap_granted; 3132 #endif 3133 3134 /* 3135 * Look for a normal, non-privileged way to access the file/directory 3136 * as requested. If it exists, go with that. 3137 */ 3138 3139 if (privused != NULL) 3140 *privused = 0; 3141 3142 dac_granted = 0; 3143 3144 /* Check the owner. */ 3145 if (cred->cr_uid == file_uid) { 3146 dac_granted |= VADMIN; 3147 if (file_mode & S_IXUSR) 3148 dac_granted |= VEXEC; 3149 if (file_mode & S_IRUSR) 3150 dac_granted |= VREAD; 3151 if (file_mode & S_IWUSR) 3152 dac_granted |= VWRITE; 3153 3154 if ((acc_mode & dac_granted) == acc_mode) 3155 return (0); 3156 3157 goto privcheck; 3158 } 3159 3160 /* Otherwise, check the groups (first match) */ 3161 if (groupmember(file_gid, cred)) { 3162 if (file_mode & S_IXGRP) 3163 dac_granted |= VEXEC; 3164 if (file_mode & S_IRGRP) 3165 dac_granted |= VREAD; 3166 if (file_mode & S_IWGRP) 3167 dac_granted |= VWRITE; 3168 3169 if ((acc_mode & dac_granted) == acc_mode) 3170 return (0); 3171 3172 goto privcheck; 3173 } 3174 3175 /* Otherwise, check everyone else. */ 3176 if (file_mode & S_IXOTH) 3177 dac_granted |= VEXEC; 3178 if (file_mode & S_IROTH) 3179 dac_granted |= VREAD; 3180 if (file_mode & S_IWOTH) 3181 dac_granted |= VWRITE; 3182 if ((acc_mode & dac_granted) == acc_mode) 3183 return (0); 3184 3185 privcheck: 3186 if (!suser_cred(cred, PRISON_ROOT)) { 3187 /* XXX audit: privilege used */ 3188 if (privused != NULL) 3189 *privused = 1; 3190 return (0); 3191 } 3192 3193 #ifdef CAPABILITIES 3194 /* 3195 * Build a capability mask to determine if the set of capabilities 3196 * satisfies the requirements when combined with the granted mask 3197 * from above. 3198 * For each capability, if the capability is required, bitwise 3199 * or the request type onto the cap_granted mask. 3200 */ 3201 cap_granted = 0; 3202 3203 if (type == VDIR) { 3204 /* 3205 * For directories, use CAP_DAC_READ_SEARCH to satisfy 3206 * VEXEC requests, instead of CAP_DAC_EXECUTE. 3207 */ 3208 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3209 !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT)) 3210 cap_granted |= VEXEC; 3211 } else { 3212 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3213 !cap_check(cred, NULL, CAP_DAC_EXECUTE, PRISON_ROOT)) 3214 cap_granted |= VEXEC; 3215 } 3216 3217 if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) && 3218 !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT)) 3219 cap_granted |= VREAD; 3220 3221 if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) && 3222 !cap_check(cred, NULL, CAP_DAC_WRITE, PRISON_ROOT)) 3223 cap_granted |= VWRITE; 3224 3225 if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) && 3226 !cap_check(cred, NULL, CAP_FOWNER, PRISON_ROOT)) 3227 cap_granted |= VADMIN; 3228 3229 if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) { 3230 /* XXX audit: privilege used */ 3231 if (privused != NULL) 3232 *privused = 1; 3233 return (0); 3234 } 3235 #endif 3236 3237 return ((acc_mode & VADMIN) ? EPERM : EACCES); 3238 } 3239 3240