1 /*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 35 */ 36 37 /* 38 * External virtual filesystem routines 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 #include "opt_ddb.h" 45 #include "opt_mac.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/bio.h> 50 #include <sys/buf.h> 51 #include <sys/condvar.h> 52 #include <sys/conf.h> 53 #include <sys/dirent.h> 54 #include <sys/event.h> 55 #include <sys/eventhandler.h> 56 #include <sys/extattr.h> 57 #include <sys/file.h> 58 #include <sys/fcntl.h> 59 #include <sys/jail.h> 60 #include <sys/kdb.h> 61 #include <sys/kernel.h> 62 #include <sys/kthread.h> 63 #include <sys/lockf.h> 64 #include <sys/malloc.h> 65 #include <sys/mount.h> 66 #include <sys/namei.h> 67 #include <sys/priv.h> 68 #include <sys/reboot.h> 69 #include <sys/sleepqueue.h> 70 #include <sys/stat.h> 71 #include <sys/sysctl.h> 72 #include <sys/syslog.h> 73 #include <sys/vmmeter.h> 74 #include <sys/vnode.h> 75 76 #include <machine/stdarg.h> 77 78 #include <security/mac/mac_framework.h> 79 80 #include <vm/vm.h> 81 #include <vm/vm_object.h> 82 #include <vm/vm_extern.h> 83 #include <vm/pmap.h> 84 #include <vm/vm_map.h> 85 #include <vm/vm_page.h> 86 #include <vm/vm_kern.h> 87 #include <vm/uma.h> 88 89 #ifdef DDB 90 #include <ddb/ddb.h> 91 #endif 92 93 #define WI_MPSAFEQ 0 94 #define WI_GIANTQ 1 95 96 static MALLOC_DEFINE(M_NETADDR, "subr_export_host", "Export host address structure"); 97 98 static void delmntque(struct vnode *vp); 99 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 100 int slpflag, int slptimeo); 101 static void syncer_shutdown(void *arg, int howto); 102 static int vtryrecycle(struct vnode *vp); 103 static void vbusy(struct vnode *vp); 104 static void vinactive(struct vnode *, struct thread *); 105 static void v_incr_usecount(struct vnode *); 106 static void v_decr_usecount(struct vnode *); 107 static void v_decr_useonly(struct vnode *); 108 static void v_upgrade_usecount(struct vnode *); 109 static void vfree(struct vnode *); 110 static void vnlru_free(int); 111 static void vgonel(struct vnode *); 112 static void vfs_knllock(void *arg); 113 static void vfs_knlunlock(void *arg); 114 static int vfs_knllocked(void *arg); 115 static void destroy_vpollinfo(struct vpollinfo *vi); 116 117 /* 118 * Enable Giant pushdown based on whether or not the vm is mpsafe in this 119 * build. Without mpsafevm the buffer cache can not run Giant free. 120 */ 121 int mpsafe_vfs = 1; 122 TUNABLE_INT("debug.mpsafevfs", &mpsafe_vfs); 123 SYSCTL_INT(_debug, OID_AUTO, mpsafevfs, CTLFLAG_RD, &mpsafe_vfs, 0, 124 "MPSAFE VFS"); 125 126 /* 127 * Number of vnodes in existence. Increased whenever getnewvnode() 128 * allocates a new vnode, decreased on vdestroy() called on VI_DOOMed 129 * vnode. 130 */ 131 static unsigned long numvnodes; 132 133 SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, ""); 134 135 /* 136 * Conversion tables for conversion from vnode types to inode formats 137 * and back. 138 */ 139 enum vtype iftovt_tab[16] = { 140 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 141 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 142 }; 143 int vttoif_tab[10] = { 144 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 145 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 146 }; 147 148 /* 149 * List of vnodes that are ready for recycling. 150 */ 151 static TAILQ_HEAD(freelst, vnode) vnode_free_list; 152 153 /* 154 * Free vnode target. Free vnodes may simply be files which have been stat'd 155 * but not read. This is somewhat common, and a small cache of such files 156 * should be kept to avoid recreation costs. 157 */ 158 static u_long wantfreevnodes; 159 SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, ""); 160 /* Number of vnodes in the free list. */ 161 static u_long freevnodes; 162 SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, ""); 163 164 /* 165 * Various variables used for debugging the new implementation of 166 * reassignbuf(). 167 * XXX these are probably of (very) limited utility now. 168 */ 169 static int reassignbufcalls; 170 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, ""); 171 172 /* 173 * Cache for the mount type id assigned to NFS. This is used for 174 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c. 175 */ 176 int nfs_mount_type = -1; 177 178 /* To keep more than one thread at a time from running vfs_getnewfsid */ 179 static struct mtx mntid_mtx; 180 181 /* 182 * Lock for any access to the following: 183 * vnode_free_list 184 * numvnodes 185 * freevnodes 186 */ 187 static struct mtx vnode_free_list_mtx; 188 189 /* Publicly exported FS */ 190 struct nfs_public nfs_pub; 191 192 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 193 static uma_zone_t vnode_zone; 194 static uma_zone_t vnodepoll_zone; 195 196 /* Set to 1 to print out reclaim of active vnodes */ 197 int prtactive; 198 199 /* 200 * The workitem queue. 201 * 202 * It is useful to delay writes of file data and filesystem metadata 203 * for tens of seconds so that quickly created and deleted files need 204 * not waste disk bandwidth being created and removed. To realize this, 205 * we append vnodes to a "workitem" queue. When running with a soft 206 * updates implementation, most pending metadata dependencies should 207 * not wait for more than a few seconds. Thus, mounted on block devices 208 * are delayed only about a half the time that file data is delayed. 209 * Similarly, directory updates are more critical, so are only delayed 210 * about a third the time that file data is delayed. Thus, there are 211 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 212 * one each second (driven off the filesystem syncer process). The 213 * syncer_delayno variable indicates the next queue that is to be processed. 214 * Items that need to be processed soon are placed in this queue: 215 * 216 * syncer_workitem_pending[syncer_delayno] 217 * 218 * A delay of fifteen seconds is done by placing the request fifteen 219 * entries later in the queue: 220 * 221 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 222 * 223 */ 224 static int syncer_delayno; 225 static long syncer_mask; 226 LIST_HEAD(synclist, bufobj); 227 static struct synclist *syncer_workitem_pending[2]; 228 /* 229 * The sync_mtx protects: 230 * bo->bo_synclist 231 * sync_vnode_count 232 * syncer_delayno 233 * syncer_state 234 * syncer_workitem_pending 235 * syncer_worklist_len 236 * rushjob 237 */ 238 static struct mtx sync_mtx; 239 static struct cv sync_wakeup; 240 241 #define SYNCER_MAXDELAY 32 242 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 243 static int syncdelay = 30; /* max time to delay syncing data */ 244 static int filedelay = 30; /* time to delay syncing files */ 245 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, ""); 246 static int dirdelay = 29; /* time to delay syncing directories */ 247 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, ""); 248 static int metadelay = 28; /* time to delay syncing metadata */ 249 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, ""); 250 static int rushjob; /* number of slots to run ASAP */ 251 static int stat_rush_requests; /* number of times I/O speeded up */ 252 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, ""); 253 254 /* 255 * When shutting down the syncer, run it at four times normal speed. 256 */ 257 #define SYNCER_SHUTDOWN_SPEEDUP 4 258 static int sync_vnode_count; 259 static int syncer_worklist_len; 260 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 261 syncer_state; 262 263 /* 264 * Number of vnodes we want to exist at any one time. This is mostly used 265 * to size hash tables in vnode-related code. It is normally not used in 266 * getnewvnode(), as wantfreevnodes is normally nonzero.) 267 * 268 * XXX desiredvnodes is historical cruft and should not exist. 269 */ 270 int desiredvnodes; 271 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 272 &desiredvnodes, 0, "Maximum number of vnodes"); 273 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 274 &wantfreevnodes, 0, "Minimum number of vnodes (legacy)"); 275 static int vnlru_nowhere; 276 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 277 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 278 279 /* 280 * Macros to control when a vnode is freed and recycled. All require 281 * the vnode interlock. 282 */ 283 #define VCANRECYCLE(vp) (((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt) 284 #define VSHOULDFREE(vp) (!((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt) 285 #define VSHOULDBUSY(vp) (((vp)->v_iflag & VI_FREE) && (vp)->v_holdcnt) 286 287 288 /* 289 * Initialize the vnode management data structures. 290 */ 291 #ifndef MAXVNODES_MAX 292 #define MAXVNODES_MAX 100000 293 #endif 294 static void 295 vntblinit(void *dummy __unused) 296 { 297 298 /* 299 * Desiredvnodes is a function of the physical memory size and 300 * the kernel's heap size. Specifically, desiredvnodes scales 301 * in proportion to the physical memory size until two fifths 302 * of the kernel's heap size is consumed by vnodes and vm 303 * objects. 304 */ 305 desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size / 306 (5 * (sizeof(struct vm_object) + sizeof(struct vnode)))); 307 if (desiredvnodes > MAXVNODES_MAX) { 308 if (bootverbose) 309 printf("Reducing kern.maxvnodes %d -> %d\n", 310 desiredvnodes, MAXVNODES_MAX); 311 desiredvnodes = MAXVNODES_MAX; 312 } 313 wantfreevnodes = desiredvnodes / 4; 314 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 315 TAILQ_INIT(&vnode_free_list); 316 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 317 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 318 NULL, NULL, UMA_ALIGN_PTR, 0); 319 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 320 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 321 /* 322 * Initialize the filesystem syncer. 323 */ 324 syncer_workitem_pending[WI_MPSAFEQ] = hashinit(syncer_maxdelay, M_VNODE, 325 &syncer_mask); 326 syncer_workitem_pending[WI_GIANTQ] = hashinit(syncer_maxdelay, M_VNODE, 327 &syncer_mask); 328 syncer_maxdelay = syncer_mask + 1; 329 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 330 cv_init(&sync_wakeup, "syncer"); 331 } 332 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 333 334 335 /* 336 * Mark a mount point as busy. Used to synchronize access and to delay 337 * unmounting. Eventually, mountlist_mtx is not released on failure. 338 */ 339 int 340 vfs_busy(struct mount *mp, int flags) 341 { 342 343 MPASS((flags & ~MBF_MASK) == 0); 344 345 MNT_ILOCK(mp); 346 MNT_REF(mp); 347 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 348 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 349 MNT_REL(mp); 350 MNT_IUNLOCK(mp); 351 return (ENOENT); 352 } 353 if (flags & MBF_MNTLSTLOCK) 354 mtx_unlock(&mountlist_mtx); 355 mp->mnt_kern_flag |= MNTK_MWAIT; 356 msleep(mp, MNT_MTX(mp), PVFS, "vfs_busy", 0); 357 MNT_REL(mp); 358 MNT_IUNLOCK(mp); 359 if (flags & MBF_MNTLSTLOCK) 360 mtx_lock(&mountlist_mtx); 361 return (ENOENT); 362 } 363 if (flags & MBF_MNTLSTLOCK) 364 mtx_unlock(&mountlist_mtx); 365 mp->mnt_lockref++; 366 MNT_IUNLOCK(mp); 367 return (0); 368 } 369 370 /* 371 * Free a busy filesystem. 372 */ 373 void 374 vfs_unbusy(struct mount *mp) 375 { 376 377 MNT_ILOCK(mp); 378 MNT_REL(mp); 379 mp->mnt_lockref--; 380 if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 381 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 382 mp->mnt_kern_flag &= ~MNTK_DRAINING; 383 wakeup(&mp->mnt_lockref); 384 } 385 MNT_IUNLOCK(mp); 386 } 387 388 /* 389 * Lookup a mount point by filesystem identifier. 390 */ 391 struct mount * 392 vfs_getvfs(fsid_t *fsid) 393 { 394 struct mount *mp; 395 396 mtx_lock(&mountlist_mtx); 397 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 398 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 399 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 400 vfs_ref(mp); 401 mtx_unlock(&mountlist_mtx); 402 return (mp); 403 } 404 } 405 mtx_unlock(&mountlist_mtx); 406 return ((struct mount *) 0); 407 } 408 409 /* 410 * Lookup a mount point by filesystem identifier, busying it before 411 * returning. 412 */ 413 struct mount * 414 vfs_busyfs(fsid_t *fsid) 415 { 416 struct mount *mp; 417 int error; 418 419 mtx_lock(&mountlist_mtx); 420 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 421 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 422 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 423 error = vfs_busy(mp, MBF_MNTLSTLOCK); 424 if (error) { 425 mtx_unlock(&mountlist_mtx); 426 return (NULL); 427 } 428 return (mp); 429 } 430 } 431 mtx_unlock(&mountlist_mtx); 432 return ((struct mount *) 0); 433 } 434 435 /* 436 * Check if a user can access privileged mount options. 437 */ 438 int 439 vfs_suser(struct mount *mp, struct thread *td) 440 { 441 int error; 442 443 /* 444 * If the thread is jailed, but this is not a jail-friendly file 445 * system, deny immediately. 446 */ 447 if (!(mp->mnt_vfc->vfc_flags & VFCF_JAIL) && jailed(td->td_ucred)) 448 return (EPERM); 449 450 /* 451 * If the file system was mounted outside a jail and a jailed thread 452 * tries to access it, deny immediately. 453 */ 454 if (!jailed(mp->mnt_cred) && jailed(td->td_ucred)) 455 return (EPERM); 456 457 /* 458 * If the file system was mounted inside different jail that the jail of 459 * the calling thread, deny immediately. 460 */ 461 if (jailed(mp->mnt_cred) && jailed(td->td_ucred) && 462 mp->mnt_cred->cr_prison != td->td_ucred->cr_prison) { 463 return (EPERM); 464 } 465 466 /* 467 * If file system supports delegated administration, we don't check 468 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 469 * by the file system itself. 470 * If this is not the user that did original mount, we check for 471 * the PRIV_VFS_MOUNT_OWNER privilege. 472 */ 473 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 474 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 475 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 476 return (error); 477 } 478 return (0); 479 } 480 481 /* 482 * Get a new unique fsid. Try to make its val[0] unique, since this value 483 * will be used to create fake device numbers for stat(). Also try (but 484 * not so hard) make its val[0] unique mod 2^16, since some emulators only 485 * support 16-bit device numbers. We end up with unique val[0]'s for the 486 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 487 * 488 * Keep in mind that several mounts may be running in parallel. Starting 489 * the search one past where the previous search terminated is both a 490 * micro-optimization and a defense against returning the same fsid to 491 * different mounts. 492 */ 493 void 494 vfs_getnewfsid(struct mount *mp) 495 { 496 static u_int16_t mntid_base; 497 struct mount *nmp; 498 fsid_t tfsid; 499 int mtype; 500 501 mtx_lock(&mntid_mtx); 502 mtype = mp->mnt_vfc->vfc_typenum; 503 tfsid.val[1] = mtype; 504 mtype = (mtype & 0xFF) << 24; 505 for (;;) { 506 tfsid.val[0] = makedev(255, 507 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 508 mntid_base++; 509 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 510 break; 511 vfs_rel(nmp); 512 } 513 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 514 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 515 mtx_unlock(&mntid_mtx); 516 } 517 518 /* 519 * Knob to control the precision of file timestamps: 520 * 521 * 0 = seconds only; nanoseconds zeroed. 522 * 1 = seconds and nanoseconds, accurate within 1/HZ. 523 * 2 = seconds and nanoseconds, truncated to microseconds. 524 * >=3 = seconds and nanoseconds, maximum precision. 525 */ 526 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 527 528 static int timestamp_precision = TSP_SEC; 529 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 530 ×tamp_precision, 0, ""); 531 532 /* 533 * Get a current timestamp. 534 */ 535 void 536 vfs_timestamp(struct timespec *tsp) 537 { 538 struct timeval tv; 539 540 switch (timestamp_precision) { 541 case TSP_SEC: 542 tsp->tv_sec = time_second; 543 tsp->tv_nsec = 0; 544 break; 545 case TSP_HZ: 546 getnanotime(tsp); 547 break; 548 case TSP_USEC: 549 microtime(&tv); 550 TIMEVAL_TO_TIMESPEC(&tv, tsp); 551 break; 552 case TSP_NSEC: 553 default: 554 nanotime(tsp); 555 break; 556 } 557 } 558 559 /* 560 * Set vnode attributes to VNOVAL 561 */ 562 void 563 vattr_null(struct vattr *vap) 564 { 565 566 vap->va_type = VNON; 567 vap->va_size = VNOVAL; 568 vap->va_bytes = VNOVAL; 569 vap->va_mode = VNOVAL; 570 vap->va_nlink = VNOVAL; 571 vap->va_uid = VNOVAL; 572 vap->va_gid = VNOVAL; 573 vap->va_fsid = VNOVAL; 574 vap->va_fileid = VNOVAL; 575 vap->va_blocksize = VNOVAL; 576 vap->va_rdev = VNOVAL; 577 vap->va_atime.tv_sec = VNOVAL; 578 vap->va_atime.tv_nsec = VNOVAL; 579 vap->va_mtime.tv_sec = VNOVAL; 580 vap->va_mtime.tv_nsec = VNOVAL; 581 vap->va_ctime.tv_sec = VNOVAL; 582 vap->va_ctime.tv_nsec = VNOVAL; 583 vap->va_birthtime.tv_sec = VNOVAL; 584 vap->va_birthtime.tv_nsec = VNOVAL; 585 vap->va_flags = VNOVAL; 586 vap->va_gen = VNOVAL; 587 vap->va_vaflags = 0; 588 } 589 590 /* 591 * This routine is called when we have too many vnodes. It attempts 592 * to free <count> vnodes and will potentially free vnodes that still 593 * have VM backing store (VM backing store is typically the cause 594 * of a vnode blowout so we want to do this). Therefore, this operation 595 * is not considered cheap. 596 * 597 * A number of conditions may prevent a vnode from being reclaimed. 598 * the buffer cache may have references on the vnode, a directory 599 * vnode may still have references due to the namei cache representing 600 * underlying files, or the vnode may be in active use. It is not 601 * desireable to reuse such vnodes. These conditions may cause the 602 * number of vnodes to reach some minimum value regardless of what 603 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 604 */ 605 static int 606 vlrureclaim(struct mount *mp) 607 { 608 struct vnode *vp; 609 int done; 610 int trigger; 611 int usevnodes; 612 int count; 613 614 /* 615 * Calculate the trigger point, don't allow user 616 * screwups to blow us up. This prevents us from 617 * recycling vnodes with lots of resident pages. We 618 * aren't trying to free memory, we are trying to 619 * free vnodes. 620 */ 621 usevnodes = desiredvnodes; 622 if (usevnodes <= 0) 623 usevnodes = 1; 624 trigger = cnt.v_page_count * 2 / usevnodes; 625 done = 0; 626 vn_start_write(NULL, &mp, V_WAIT); 627 MNT_ILOCK(mp); 628 count = mp->mnt_nvnodelistsize / 10 + 1; 629 while (count != 0) { 630 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 631 while (vp != NULL && vp->v_type == VMARKER) 632 vp = TAILQ_NEXT(vp, v_nmntvnodes); 633 if (vp == NULL) 634 break; 635 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 636 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 637 --count; 638 if (!VI_TRYLOCK(vp)) 639 goto next_iter; 640 /* 641 * If it's been deconstructed already, it's still 642 * referenced, or it exceeds the trigger, skip it. 643 */ 644 if (vp->v_usecount || !LIST_EMPTY(&(vp)->v_cache_src) || 645 (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL && 646 vp->v_object->resident_page_count > trigger)) { 647 VI_UNLOCK(vp); 648 goto next_iter; 649 } 650 MNT_IUNLOCK(mp); 651 vholdl(vp); 652 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) { 653 vdrop(vp); 654 goto next_iter_mntunlocked; 655 } 656 VI_LOCK(vp); 657 /* 658 * v_usecount may have been bumped after VOP_LOCK() dropped 659 * the vnode interlock and before it was locked again. 660 * 661 * It is not necessary to recheck VI_DOOMED because it can 662 * only be set by another thread that holds both the vnode 663 * lock and vnode interlock. If another thread has the 664 * vnode lock before we get to VOP_LOCK() and obtains the 665 * vnode interlock after VOP_LOCK() drops the vnode 666 * interlock, the other thread will be unable to drop the 667 * vnode lock before our VOP_LOCK() call fails. 668 */ 669 if (vp->v_usecount || !LIST_EMPTY(&(vp)->v_cache_src) || 670 (vp->v_object != NULL && 671 vp->v_object->resident_page_count > trigger)) { 672 VOP_UNLOCK(vp, LK_INTERLOCK); 673 goto next_iter_mntunlocked; 674 } 675 KASSERT((vp->v_iflag & VI_DOOMED) == 0, 676 ("VI_DOOMED unexpectedly detected in vlrureclaim()")); 677 vgonel(vp); 678 VOP_UNLOCK(vp, 0); 679 vdropl(vp); 680 done++; 681 next_iter_mntunlocked: 682 if ((count % 256) != 0) 683 goto relock_mnt; 684 goto yield; 685 next_iter: 686 if ((count % 256) != 0) 687 continue; 688 MNT_IUNLOCK(mp); 689 yield: 690 uio_yield(); 691 relock_mnt: 692 MNT_ILOCK(mp); 693 } 694 MNT_IUNLOCK(mp); 695 vn_finished_write(mp); 696 return done; 697 } 698 699 /* 700 * Attempt to keep the free list at wantfreevnodes length. 701 */ 702 static void 703 vnlru_free(int count) 704 { 705 struct vnode *vp; 706 int vfslocked; 707 708 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 709 for (; count > 0; count--) { 710 vp = TAILQ_FIRST(&vnode_free_list); 711 /* 712 * The list can be modified while the free_list_mtx 713 * has been dropped and vp could be NULL here. 714 */ 715 if (!vp) 716 break; 717 VNASSERT(vp->v_op != NULL, vp, 718 ("vnlru_free: vnode already reclaimed.")); 719 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 720 /* 721 * Don't recycle if we can't get the interlock. 722 */ 723 if (!VI_TRYLOCK(vp)) { 724 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 725 continue; 726 } 727 VNASSERT(VCANRECYCLE(vp), vp, 728 ("vp inconsistent on freelist")); 729 freevnodes--; 730 vp->v_iflag &= ~VI_FREE; 731 vholdl(vp); 732 mtx_unlock(&vnode_free_list_mtx); 733 VI_UNLOCK(vp); 734 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 735 vtryrecycle(vp); 736 VFS_UNLOCK_GIANT(vfslocked); 737 /* 738 * If the recycled succeeded this vdrop will actually free 739 * the vnode. If not it will simply place it back on 740 * the free list. 741 */ 742 vdrop(vp); 743 mtx_lock(&vnode_free_list_mtx); 744 } 745 } 746 /* 747 * Attempt to recycle vnodes in a context that is always safe to block. 748 * Calling vlrurecycle() from the bowels of filesystem code has some 749 * interesting deadlock problems. 750 */ 751 static struct proc *vnlruproc; 752 static int vnlruproc_sig; 753 754 static void 755 vnlru_proc(void) 756 { 757 struct mount *mp, *nmp; 758 int done; 759 struct proc *p = vnlruproc; 760 761 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p, 762 SHUTDOWN_PRI_FIRST); 763 764 mtx_lock(&Giant); 765 766 for (;;) { 767 kproc_suspend_check(p); 768 mtx_lock(&vnode_free_list_mtx); 769 if (freevnodes > wantfreevnodes) 770 vnlru_free(freevnodes - wantfreevnodes); 771 if (numvnodes <= desiredvnodes * 9 / 10) { 772 vnlruproc_sig = 0; 773 wakeup(&vnlruproc_sig); 774 msleep(vnlruproc, &vnode_free_list_mtx, 775 PVFS|PDROP, "vlruwt", hz); 776 continue; 777 } 778 mtx_unlock(&vnode_free_list_mtx); 779 done = 0; 780 mtx_lock(&mountlist_mtx); 781 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 782 int vfsunlocked; 783 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) { 784 nmp = TAILQ_NEXT(mp, mnt_list); 785 continue; 786 } 787 if (!VFS_NEEDSGIANT(mp)) { 788 mtx_unlock(&Giant); 789 vfsunlocked = 1; 790 } else 791 vfsunlocked = 0; 792 done += vlrureclaim(mp); 793 if (vfsunlocked) 794 mtx_lock(&Giant); 795 mtx_lock(&mountlist_mtx); 796 nmp = TAILQ_NEXT(mp, mnt_list); 797 vfs_unbusy(mp); 798 } 799 mtx_unlock(&mountlist_mtx); 800 if (done == 0) { 801 EVENTHANDLER_INVOKE(vfs_lowvnodes, desiredvnodes / 10); 802 #if 0 803 /* These messages are temporary debugging aids */ 804 if (vnlru_nowhere < 5) 805 printf("vnlru process getting nowhere..\n"); 806 else if (vnlru_nowhere == 5) 807 printf("vnlru process messages stopped.\n"); 808 #endif 809 vnlru_nowhere++; 810 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 811 } else 812 uio_yield(); 813 } 814 } 815 816 static struct kproc_desc vnlru_kp = { 817 "vnlru", 818 vnlru_proc, 819 &vnlruproc 820 }; 821 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 822 &vnlru_kp); 823 824 /* 825 * Routines having to do with the management of the vnode table. 826 */ 827 828 void 829 vdestroy(struct vnode *vp) 830 { 831 struct bufobj *bo; 832 833 CTR1(KTR_VFS, "vdestroy vp %p", vp); 834 mtx_lock(&vnode_free_list_mtx); 835 numvnodes--; 836 mtx_unlock(&vnode_free_list_mtx); 837 bo = &vp->v_bufobj; 838 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 839 ("cleaned vnode still on the free list.")); 840 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 841 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); 842 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 843 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 844 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 845 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 846 VNASSERT(bo->bo_clean.bv_root == NULL, vp, ("cleanblkroot not NULL")); 847 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 848 VNASSERT(bo->bo_dirty.bv_root == NULL, vp, ("dirtyblkroot not NULL")); 849 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 850 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 851 VI_UNLOCK(vp); 852 #ifdef MAC 853 mac_vnode_destroy(vp); 854 #endif 855 if (vp->v_pollinfo != NULL) 856 destroy_vpollinfo(vp->v_pollinfo); 857 #ifdef INVARIANTS 858 /* XXX Elsewhere we can detect an already freed vnode via NULL v_op. */ 859 vp->v_op = NULL; 860 #endif 861 lockdestroy(vp->v_vnlock); 862 mtx_destroy(&vp->v_interlock); 863 mtx_destroy(BO_MTX(bo)); 864 uma_zfree(vnode_zone, vp); 865 } 866 867 /* 868 * Try to recycle a freed vnode. We abort if anyone picks up a reference 869 * before we actually vgone(). This function must be called with the vnode 870 * held to prevent the vnode from being returned to the free list midway 871 * through vgone(). 872 */ 873 static int 874 vtryrecycle(struct vnode *vp) 875 { 876 struct mount *vnmp; 877 878 CTR1(KTR_VFS, "vtryrecycle: trying vp %p", vp); 879 VNASSERT(vp->v_holdcnt, vp, 880 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 881 /* 882 * This vnode may found and locked via some other list, if so we 883 * can't recycle it yet. 884 */ 885 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) 886 return (EWOULDBLOCK); 887 /* 888 * Don't recycle if its filesystem is being suspended. 889 */ 890 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 891 VOP_UNLOCK(vp, 0); 892 return (EBUSY); 893 } 894 /* 895 * If we got this far, we need to acquire the interlock and see if 896 * anyone picked up this vnode from another list. If not, we will 897 * mark it with DOOMED via vgonel() so that anyone who does find it 898 * will skip over it. 899 */ 900 VI_LOCK(vp); 901 if (vp->v_usecount) { 902 VOP_UNLOCK(vp, LK_INTERLOCK); 903 vn_finished_write(vnmp); 904 return (EBUSY); 905 } 906 if ((vp->v_iflag & VI_DOOMED) == 0) 907 vgonel(vp); 908 VOP_UNLOCK(vp, LK_INTERLOCK); 909 vn_finished_write(vnmp); 910 CTR1(KTR_VFS, "vtryrecycle: recycled vp %p", vp); 911 return (0); 912 } 913 914 /* 915 * Return the next vnode from the free list. 916 */ 917 int 918 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 919 struct vnode **vpp) 920 { 921 struct vnode *vp = NULL; 922 struct bufobj *bo; 923 924 mtx_lock(&vnode_free_list_mtx); 925 /* 926 * Lend our context to reclaim vnodes if they've exceeded the max. 927 */ 928 if (freevnodes > wantfreevnodes) 929 vnlru_free(1); 930 /* 931 * Wait for available vnodes. 932 */ 933 if (numvnodes > desiredvnodes) { 934 if (mp != NULL && (mp->mnt_kern_flag & MNTK_SUSPEND)) { 935 /* 936 * File system is beeing suspended, we cannot risk a 937 * deadlock here, so allocate new vnode anyway. 938 */ 939 if (freevnodes > wantfreevnodes) 940 vnlru_free(freevnodes - wantfreevnodes); 941 goto alloc; 942 } 943 if (vnlruproc_sig == 0) { 944 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 945 wakeup(vnlruproc); 946 } 947 msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS, 948 "vlruwk", hz); 949 #if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */ 950 if (numvnodes > desiredvnodes) { 951 mtx_unlock(&vnode_free_list_mtx); 952 return (ENFILE); 953 } 954 #endif 955 } 956 alloc: 957 numvnodes++; 958 mtx_unlock(&vnode_free_list_mtx); 959 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO); 960 /* 961 * Setup locks. 962 */ 963 vp->v_vnlock = &vp->v_lock; 964 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 965 /* 966 * By default, don't allow shared locks unless filesystems 967 * opt-in. 968 */ 969 lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOSHARE); 970 /* 971 * Initialize bufobj. 972 */ 973 bo = &vp->v_bufobj; 974 bo->__bo_vnode = vp; 975 mtx_init(BO_MTX(bo), "bufobj interlock", NULL, MTX_DEF); 976 bo->bo_ops = &buf_ops_bio; 977 bo->bo_private = vp; 978 TAILQ_INIT(&bo->bo_clean.bv_hd); 979 TAILQ_INIT(&bo->bo_dirty.bv_hd); 980 /* 981 * Initialize namecache. 982 */ 983 LIST_INIT(&vp->v_cache_src); 984 TAILQ_INIT(&vp->v_cache_dst); 985 /* 986 * Finalize various vnode identity bits. 987 */ 988 vp->v_type = VNON; 989 vp->v_tag = tag; 990 vp->v_op = vops; 991 v_incr_usecount(vp); 992 vp->v_data = 0; 993 #ifdef MAC 994 mac_vnode_init(vp); 995 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 996 mac_vnode_associate_singlelabel(mp, vp); 997 else if (mp == NULL && vops != &dead_vnodeops) 998 printf("NULL mp in getnewvnode()\n"); 999 #endif 1000 if (mp != NULL) { 1001 bo->bo_bsize = mp->mnt_stat.f_iosize; 1002 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1003 vp->v_vflag |= VV_NOKNOTE; 1004 } 1005 1006 CTR2(KTR_VFS, "getnewvnode: mp %p vp %p", mp, vp); 1007 *vpp = vp; 1008 return (0); 1009 } 1010 1011 /* 1012 * Delete from old mount point vnode list, if on one. 1013 */ 1014 static void 1015 delmntque(struct vnode *vp) 1016 { 1017 struct mount *mp; 1018 1019 mp = vp->v_mount; 1020 if (mp == NULL) 1021 return; 1022 MNT_ILOCK(mp); 1023 vp->v_mount = NULL; 1024 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1025 ("bad mount point vnode list size")); 1026 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1027 mp->mnt_nvnodelistsize--; 1028 MNT_REL(mp); 1029 MNT_IUNLOCK(mp); 1030 } 1031 1032 static void 1033 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1034 { 1035 1036 vp->v_data = NULL; 1037 vp->v_op = &dead_vnodeops; 1038 /* XXX non mp-safe fs may still call insmntque with vnode 1039 unlocked */ 1040 if (!VOP_ISLOCKED(vp)) 1041 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1042 vgone(vp); 1043 vput(vp); 1044 } 1045 1046 /* 1047 * Insert into list of vnodes for the new mount point, if available. 1048 */ 1049 int 1050 insmntque1(struct vnode *vp, struct mount *mp, 1051 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1052 { 1053 int locked; 1054 1055 KASSERT(vp->v_mount == NULL, 1056 ("insmntque: vnode already on per mount vnode list")); 1057 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1058 #ifdef DEBUG_VFS_LOCKS 1059 if (!VFS_NEEDSGIANT(mp)) 1060 ASSERT_VOP_ELOCKED(vp, 1061 "insmntque: mp-safe fs and non-locked vp"); 1062 #endif 1063 MNT_ILOCK(mp); 1064 if ((mp->mnt_kern_flag & MNTK_NOINSMNTQ) != 0 && 1065 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1066 mp->mnt_nvnodelistsize == 0)) { 1067 locked = VOP_ISLOCKED(vp); 1068 if (!locked || (locked == LK_EXCLUSIVE && 1069 (vp->v_vflag & VV_FORCEINSMQ) == 0)) { 1070 MNT_IUNLOCK(mp); 1071 if (dtr != NULL) 1072 dtr(vp, dtr_arg); 1073 return (EBUSY); 1074 } 1075 } 1076 vp->v_mount = mp; 1077 MNT_REF(mp); 1078 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1079 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1080 ("neg mount point vnode list size")); 1081 mp->mnt_nvnodelistsize++; 1082 MNT_IUNLOCK(mp); 1083 return (0); 1084 } 1085 1086 int 1087 insmntque(struct vnode *vp, struct mount *mp) 1088 { 1089 1090 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1091 } 1092 1093 /* 1094 * Flush out and invalidate all buffers associated with a bufobj 1095 * Called with the underlying object locked. 1096 */ 1097 int 1098 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1099 { 1100 int error; 1101 1102 BO_LOCK(bo); 1103 if (flags & V_SAVE) { 1104 error = bufobj_wwait(bo, slpflag, slptimeo); 1105 if (error) { 1106 BO_UNLOCK(bo); 1107 return (error); 1108 } 1109 if (bo->bo_dirty.bv_cnt > 0) { 1110 BO_UNLOCK(bo); 1111 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1112 return (error); 1113 /* 1114 * XXX We could save a lock/unlock if this was only 1115 * enabled under INVARIANTS 1116 */ 1117 BO_LOCK(bo); 1118 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1119 panic("vinvalbuf: dirty bufs"); 1120 } 1121 } 1122 /* 1123 * If you alter this loop please notice that interlock is dropped and 1124 * reacquired in flushbuflist. Special care is needed to ensure that 1125 * no race conditions occur from this. 1126 */ 1127 do { 1128 error = flushbuflist(&bo->bo_clean, 1129 flags, bo, slpflag, slptimeo); 1130 if (error == 0) 1131 error = flushbuflist(&bo->bo_dirty, 1132 flags, bo, slpflag, slptimeo); 1133 if (error != 0 && error != EAGAIN) { 1134 BO_UNLOCK(bo); 1135 return (error); 1136 } 1137 } while (error != 0); 1138 1139 /* 1140 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1141 * have write I/O in-progress but if there is a VM object then the 1142 * VM object can also have read-I/O in-progress. 1143 */ 1144 do { 1145 bufobj_wwait(bo, 0, 0); 1146 BO_UNLOCK(bo); 1147 if (bo->bo_object != NULL) { 1148 VM_OBJECT_LOCK(bo->bo_object); 1149 vm_object_pip_wait(bo->bo_object, "bovlbx"); 1150 VM_OBJECT_UNLOCK(bo->bo_object); 1151 } 1152 BO_LOCK(bo); 1153 } while (bo->bo_numoutput > 0); 1154 BO_UNLOCK(bo); 1155 1156 /* 1157 * Destroy the copy in the VM cache, too. 1158 */ 1159 if (bo->bo_object != NULL) { 1160 VM_OBJECT_LOCK(bo->bo_object); 1161 vm_object_page_remove(bo->bo_object, 0, 0, 1162 (flags & V_SAVE) ? TRUE : FALSE); 1163 VM_OBJECT_UNLOCK(bo->bo_object); 1164 } 1165 1166 #ifdef INVARIANTS 1167 BO_LOCK(bo); 1168 if ((flags & (V_ALT | V_NORMAL)) == 0 && 1169 (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0)) 1170 panic("vinvalbuf: flush failed"); 1171 BO_UNLOCK(bo); 1172 #endif 1173 return (0); 1174 } 1175 1176 /* 1177 * Flush out and invalidate all buffers associated with a vnode. 1178 * Called with the underlying object locked. 1179 */ 1180 int 1181 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 1182 { 1183 1184 CTR2(KTR_VFS, "vinvalbuf vp %p flags %d", vp, flags); 1185 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1186 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 1187 } 1188 1189 /* 1190 * Flush out buffers on the specified list. 1191 * 1192 */ 1193 static int 1194 flushbuflist( struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 1195 int slptimeo) 1196 { 1197 struct buf *bp, *nbp; 1198 int retval, error; 1199 daddr_t lblkno; 1200 b_xflags_t xflags; 1201 1202 ASSERT_BO_LOCKED(bo); 1203 1204 retval = 0; 1205 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 1206 if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) || 1207 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) { 1208 continue; 1209 } 1210 lblkno = 0; 1211 xflags = 0; 1212 if (nbp != NULL) { 1213 lblkno = nbp->b_lblkno; 1214 xflags = nbp->b_xflags & 1215 (BX_BKGRDMARKER | BX_VNDIRTY | BX_VNCLEAN); 1216 } 1217 retval = EAGAIN; 1218 error = BUF_TIMELOCK(bp, 1219 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_MTX(bo), 1220 "flushbuf", slpflag, slptimeo); 1221 if (error) { 1222 BO_LOCK(bo); 1223 return (error != ENOLCK ? error : EAGAIN); 1224 } 1225 KASSERT(bp->b_bufobj == bo, 1226 ("bp %p wrong b_bufobj %p should be %p", 1227 bp, bp->b_bufobj, bo)); 1228 if (bp->b_bufobj != bo) { /* XXX: necessary ? */ 1229 BUF_UNLOCK(bp); 1230 BO_LOCK(bo); 1231 return (EAGAIN); 1232 } 1233 /* 1234 * XXX Since there are no node locks for NFS, I 1235 * believe there is a slight chance that a delayed 1236 * write will occur while sleeping just above, so 1237 * check for it. 1238 */ 1239 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1240 (flags & V_SAVE)) { 1241 bremfree(bp); 1242 bp->b_flags |= B_ASYNC; 1243 bwrite(bp); 1244 BO_LOCK(bo); 1245 return (EAGAIN); /* XXX: why not loop ? */ 1246 } 1247 bremfree(bp); 1248 bp->b_flags |= (B_INVAL | B_RELBUF); 1249 bp->b_flags &= ~B_ASYNC; 1250 brelse(bp); 1251 BO_LOCK(bo); 1252 if (nbp != NULL && 1253 (nbp->b_bufobj != bo || 1254 nbp->b_lblkno != lblkno || 1255 (nbp->b_xflags & 1256 (BX_BKGRDMARKER | BX_VNDIRTY | BX_VNCLEAN)) != xflags)) 1257 break; /* nbp invalid */ 1258 } 1259 return (retval); 1260 } 1261 1262 /* 1263 * Truncate a file's buffer and pages to a specified length. This 1264 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 1265 * sync activity. 1266 */ 1267 int 1268 vtruncbuf(struct vnode *vp, struct ucred *cred, struct thread *td, 1269 off_t length, int blksize) 1270 { 1271 struct buf *bp, *nbp; 1272 int anyfreed; 1273 int trunclbn; 1274 struct bufobj *bo; 1275 1276 CTR2(KTR_VFS, "vtruncbuf vp %p length %jd", vp, length); 1277 /* 1278 * Round up to the *next* lbn. 1279 */ 1280 trunclbn = (length + blksize - 1) / blksize; 1281 1282 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 1283 restart: 1284 bo = &vp->v_bufobj; 1285 BO_LOCK(bo); 1286 anyfreed = 1; 1287 for (;anyfreed;) { 1288 anyfreed = 0; 1289 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 1290 if (bp->b_lblkno < trunclbn) 1291 continue; 1292 if (BUF_LOCK(bp, 1293 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1294 BO_MTX(bo)) == ENOLCK) 1295 goto restart; 1296 1297 bremfree(bp); 1298 bp->b_flags |= (B_INVAL | B_RELBUF); 1299 bp->b_flags &= ~B_ASYNC; 1300 brelse(bp); 1301 anyfreed = 1; 1302 1303 if (nbp != NULL && 1304 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 1305 (nbp->b_vp != vp) || 1306 (nbp->b_flags & B_DELWRI))) { 1307 goto restart; 1308 } 1309 BO_LOCK(bo); 1310 } 1311 1312 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1313 if (bp->b_lblkno < trunclbn) 1314 continue; 1315 if (BUF_LOCK(bp, 1316 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1317 BO_MTX(bo)) == ENOLCK) 1318 goto restart; 1319 bremfree(bp); 1320 bp->b_flags |= (B_INVAL | B_RELBUF); 1321 bp->b_flags &= ~B_ASYNC; 1322 brelse(bp); 1323 anyfreed = 1; 1324 if (nbp != NULL && 1325 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 1326 (nbp->b_vp != vp) || 1327 (nbp->b_flags & B_DELWRI) == 0)) { 1328 goto restart; 1329 } 1330 BO_LOCK(bo); 1331 } 1332 } 1333 1334 if (length > 0) { 1335 restartsync: 1336 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1337 if (bp->b_lblkno > 0) 1338 continue; 1339 /* 1340 * Since we hold the vnode lock this should only 1341 * fail if we're racing with the buf daemon. 1342 */ 1343 if (BUF_LOCK(bp, 1344 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1345 BO_MTX(bo)) == ENOLCK) { 1346 goto restart; 1347 } 1348 VNASSERT((bp->b_flags & B_DELWRI), vp, 1349 ("buf(%p) on dirty queue without DELWRI", bp)); 1350 1351 bremfree(bp); 1352 bawrite(bp); 1353 BO_LOCK(bo); 1354 goto restartsync; 1355 } 1356 } 1357 1358 bufobj_wwait(bo, 0, 0); 1359 BO_UNLOCK(bo); 1360 vnode_pager_setsize(vp, length); 1361 1362 return (0); 1363 } 1364 1365 /* 1366 * buf_splay() - splay tree core for the clean/dirty list of buffers in 1367 * a vnode. 1368 * 1369 * NOTE: We have to deal with the special case of a background bitmap 1370 * buffer, a situation where two buffers will have the same logical 1371 * block offset. We want (1) only the foreground buffer to be accessed 1372 * in a lookup and (2) must differentiate between the foreground and 1373 * background buffer in the splay tree algorithm because the splay 1374 * tree cannot normally handle multiple entities with the same 'index'. 1375 * We accomplish this by adding differentiating flags to the splay tree's 1376 * numerical domain. 1377 */ 1378 static 1379 struct buf * 1380 buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root) 1381 { 1382 struct buf dummy; 1383 struct buf *lefttreemax, *righttreemin, *y; 1384 1385 if (root == NULL) 1386 return (NULL); 1387 lefttreemax = righttreemin = &dummy; 1388 for (;;) { 1389 if (lblkno < root->b_lblkno || 1390 (lblkno == root->b_lblkno && 1391 (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) { 1392 if ((y = root->b_left) == NULL) 1393 break; 1394 if (lblkno < y->b_lblkno) { 1395 /* Rotate right. */ 1396 root->b_left = y->b_right; 1397 y->b_right = root; 1398 root = y; 1399 if ((y = root->b_left) == NULL) 1400 break; 1401 } 1402 /* Link into the new root's right tree. */ 1403 righttreemin->b_left = root; 1404 righttreemin = root; 1405 } else if (lblkno > root->b_lblkno || 1406 (lblkno == root->b_lblkno && 1407 (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) { 1408 if ((y = root->b_right) == NULL) 1409 break; 1410 if (lblkno > y->b_lblkno) { 1411 /* Rotate left. */ 1412 root->b_right = y->b_left; 1413 y->b_left = root; 1414 root = y; 1415 if ((y = root->b_right) == NULL) 1416 break; 1417 } 1418 /* Link into the new root's left tree. */ 1419 lefttreemax->b_right = root; 1420 lefttreemax = root; 1421 } else { 1422 break; 1423 } 1424 root = y; 1425 } 1426 /* Assemble the new root. */ 1427 lefttreemax->b_right = root->b_left; 1428 righttreemin->b_left = root->b_right; 1429 root->b_left = dummy.b_right; 1430 root->b_right = dummy.b_left; 1431 return (root); 1432 } 1433 1434 static void 1435 buf_vlist_remove(struct buf *bp) 1436 { 1437 struct buf *root; 1438 struct bufv *bv; 1439 1440 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1441 ASSERT_BO_LOCKED(bp->b_bufobj); 1442 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != 1443 (BX_VNDIRTY|BX_VNCLEAN), 1444 ("buf_vlist_remove: Buf %p is on two lists", bp)); 1445 if (bp->b_xflags & BX_VNDIRTY) 1446 bv = &bp->b_bufobj->bo_dirty; 1447 else 1448 bv = &bp->b_bufobj->bo_clean; 1449 if (bp != bv->bv_root) { 1450 root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root); 1451 KASSERT(root == bp, ("splay lookup failed in remove")); 1452 } 1453 if (bp->b_left == NULL) { 1454 root = bp->b_right; 1455 } else { 1456 root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left); 1457 root->b_right = bp->b_right; 1458 } 1459 bv->bv_root = root; 1460 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 1461 bv->bv_cnt--; 1462 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1463 } 1464 1465 /* 1466 * Add the buffer to the sorted clean or dirty block list using a 1467 * splay tree algorithm. 1468 * 1469 * NOTE: xflags is passed as a constant, optimizing this inline function! 1470 */ 1471 static void 1472 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 1473 { 1474 struct buf *root; 1475 struct bufv *bv; 1476 1477 ASSERT_BO_LOCKED(bo); 1478 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 1479 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 1480 bp->b_xflags |= xflags; 1481 if (xflags & BX_VNDIRTY) 1482 bv = &bo->bo_dirty; 1483 else 1484 bv = &bo->bo_clean; 1485 1486 root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root); 1487 if (root == NULL) { 1488 bp->b_left = NULL; 1489 bp->b_right = NULL; 1490 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 1491 } else if (bp->b_lblkno < root->b_lblkno || 1492 (bp->b_lblkno == root->b_lblkno && 1493 (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) { 1494 bp->b_left = root->b_left; 1495 bp->b_right = root; 1496 root->b_left = NULL; 1497 TAILQ_INSERT_BEFORE(root, bp, b_bobufs); 1498 } else { 1499 bp->b_right = root->b_right; 1500 bp->b_left = root; 1501 root->b_right = NULL; 1502 TAILQ_INSERT_AFTER(&bv->bv_hd, root, bp, b_bobufs); 1503 } 1504 bv->bv_cnt++; 1505 bv->bv_root = bp; 1506 } 1507 1508 /* 1509 * Lookup a buffer using the splay tree. Note that we specifically avoid 1510 * shadow buffers used in background bitmap writes. 1511 * 1512 * This code isn't quite efficient as it could be because we are maintaining 1513 * two sorted lists and do not know which list the block resides in. 1514 * 1515 * During a "make buildworld" the desired buffer is found at one of 1516 * the roots more than 60% of the time. Thus, checking both roots 1517 * before performing either splay eliminates unnecessary splays on the 1518 * first tree splayed. 1519 */ 1520 struct buf * 1521 gbincore(struct bufobj *bo, daddr_t lblkno) 1522 { 1523 struct buf *bp; 1524 1525 ASSERT_BO_LOCKED(bo); 1526 if ((bp = bo->bo_clean.bv_root) != NULL && 1527 bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER)) 1528 return (bp); 1529 if ((bp = bo->bo_dirty.bv_root) != NULL && 1530 bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER)) 1531 return (bp); 1532 if ((bp = bo->bo_clean.bv_root) != NULL) { 1533 bo->bo_clean.bv_root = bp = buf_splay(lblkno, 0, bp); 1534 if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER)) 1535 return (bp); 1536 } 1537 if ((bp = bo->bo_dirty.bv_root) != NULL) { 1538 bo->bo_dirty.bv_root = bp = buf_splay(lblkno, 0, bp); 1539 if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER)) 1540 return (bp); 1541 } 1542 return (NULL); 1543 } 1544 1545 /* 1546 * Associate a buffer with a vnode. 1547 */ 1548 void 1549 bgetvp(struct vnode *vp, struct buf *bp) 1550 { 1551 struct bufobj *bo; 1552 1553 bo = &vp->v_bufobj; 1554 ASSERT_BO_LOCKED(bo); 1555 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 1556 1557 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 1558 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 1559 ("bgetvp: bp already attached! %p", bp)); 1560 1561 vhold(vp); 1562 if (VFS_NEEDSGIANT(vp->v_mount) || bo->bo_flag & BO_NEEDSGIANT) 1563 bp->b_flags |= B_NEEDSGIANT; 1564 bp->b_vp = vp; 1565 bp->b_bufobj = bo; 1566 /* 1567 * Insert onto list for new vnode. 1568 */ 1569 buf_vlist_add(bp, bo, BX_VNCLEAN); 1570 } 1571 1572 /* 1573 * Disassociate a buffer from a vnode. 1574 */ 1575 void 1576 brelvp(struct buf *bp) 1577 { 1578 struct bufobj *bo; 1579 struct vnode *vp; 1580 1581 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1582 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 1583 1584 /* 1585 * Delete from old vnode list, if on one. 1586 */ 1587 vp = bp->b_vp; /* XXX */ 1588 bo = bp->b_bufobj; 1589 BO_LOCK(bo); 1590 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1591 buf_vlist_remove(bp); 1592 else 1593 panic("brelvp: Buffer %p not on queue.", bp); 1594 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 1595 bo->bo_flag &= ~BO_ONWORKLST; 1596 mtx_lock(&sync_mtx); 1597 LIST_REMOVE(bo, bo_synclist); 1598 syncer_worklist_len--; 1599 mtx_unlock(&sync_mtx); 1600 } 1601 bp->b_flags &= ~B_NEEDSGIANT; 1602 bp->b_vp = NULL; 1603 bp->b_bufobj = NULL; 1604 BO_UNLOCK(bo); 1605 vdrop(vp); 1606 } 1607 1608 /* 1609 * Add an item to the syncer work queue. 1610 */ 1611 static void 1612 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 1613 { 1614 int queue, slot; 1615 1616 ASSERT_BO_LOCKED(bo); 1617 1618 mtx_lock(&sync_mtx); 1619 if (bo->bo_flag & BO_ONWORKLST) 1620 LIST_REMOVE(bo, bo_synclist); 1621 else { 1622 bo->bo_flag |= BO_ONWORKLST; 1623 syncer_worklist_len++; 1624 } 1625 1626 if (delay > syncer_maxdelay - 2) 1627 delay = syncer_maxdelay - 2; 1628 slot = (syncer_delayno + delay) & syncer_mask; 1629 1630 queue = VFS_NEEDSGIANT(bo->__bo_vnode->v_mount) ? WI_GIANTQ : 1631 WI_MPSAFEQ; 1632 LIST_INSERT_HEAD(&syncer_workitem_pending[queue][slot], bo, 1633 bo_synclist); 1634 mtx_unlock(&sync_mtx); 1635 } 1636 1637 static int 1638 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 1639 { 1640 int error, len; 1641 1642 mtx_lock(&sync_mtx); 1643 len = syncer_worklist_len - sync_vnode_count; 1644 mtx_unlock(&sync_mtx); 1645 error = SYSCTL_OUT(req, &len, sizeof(len)); 1646 return (error); 1647 } 1648 1649 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0, 1650 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 1651 1652 static struct proc *updateproc; 1653 static void sched_sync(void); 1654 static struct kproc_desc up_kp = { 1655 "syncer", 1656 sched_sync, 1657 &updateproc 1658 }; 1659 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 1660 1661 static int 1662 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 1663 { 1664 struct vnode *vp; 1665 struct mount *mp; 1666 1667 *bo = LIST_FIRST(slp); 1668 if (*bo == NULL) 1669 return (0); 1670 vp = (*bo)->__bo_vnode; /* XXX */ 1671 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 1672 return (1); 1673 /* 1674 * We use vhold in case the vnode does not 1675 * successfully sync. vhold prevents the vnode from 1676 * going away when we unlock the sync_mtx so that 1677 * we can acquire the vnode interlock. 1678 */ 1679 vholdl(vp); 1680 mtx_unlock(&sync_mtx); 1681 VI_UNLOCK(vp); 1682 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1683 vdrop(vp); 1684 mtx_lock(&sync_mtx); 1685 return (*bo == LIST_FIRST(slp)); 1686 } 1687 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1688 (void) VOP_FSYNC(vp, MNT_LAZY, td); 1689 VOP_UNLOCK(vp, 0); 1690 vn_finished_write(mp); 1691 BO_LOCK(*bo); 1692 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 1693 /* 1694 * Put us back on the worklist. The worklist 1695 * routine will remove us from our current 1696 * position and then add us back in at a later 1697 * position. 1698 */ 1699 vn_syncer_add_to_worklist(*bo, syncdelay); 1700 } 1701 BO_UNLOCK(*bo); 1702 vdrop(vp); 1703 mtx_lock(&sync_mtx); 1704 return (0); 1705 } 1706 1707 /* 1708 * System filesystem synchronizer daemon. 1709 */ 1710 static void 1711 sched_sync(void) 1712 { 1713 struct synclist *gnext, *next; 1714 struct synclist *gslp, *slp; 1715 struct bufobj *bo; 1716 long starttime; 1717 struct thread *td = curthread; 1718 int last_work_seen; 1719 int net_worklist_len; 1720 int syncer_final_iter; 1721 int first_printf; 1722 int error; 1723 1724 last_work_seen = 0; 1725 syncer_final_iter = 0; 1726 first_printf = 1; 1727 syncer_state = SYNCER_RUNNING; 1728 starttime = time_uptime; 1729 td->td_pflags |= TDP_NORUNNINGBUF; 1730 1731 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 1732 SHUTDOWN_PRI_LAST); 1733 1734 mtx_lock(&sync_mtx); 1735 for (;;) { 1736 if (syncer_state == SYNCER_FINAL_DELAY && 1737 syncer_final_iter == 0) { 1738 mtx_unlock(&sync_mtx); 1739 kproc_suspend_check(td->td_proc); 1740 mtx_lock(&sync_mtx); 1741 } 1742 net_worklist_len = syncer_worklist_len - sync_vnode_count; 1743 if (syncer_state != SYNCER_RUNNING && 1744 starttime != time_uptime) { 1745 if (first_printf) { 1746 printf("\nSyncing disks, vnodes remaining..."); 1747 first_printf = 0; 1748 } 1749 printf("%d ", net_worklist_len); 1750 } 1751 starttime = time_uptime; 1752 1753 /* 1754 * Push files whose dirty time has expired. Be careful 1755 * of interrupt race on slp queue. 1756 * 1757 * Skip over empty worklist slots when shutting down. 1758 */ 1759 do { 1760 slp = &syncer_workitem_pending[WI_MPSAFEQ][syncer_delayno]; 1761 gslp = &syncer_workitem_pending[WI_GIANTQ][syncer_delayno]; 1762 syncer_delayno += 1; 1763 if (syncer_delayno == syncer_maxdelay) 1764 syncer_delayno = 0; 1765 next = &syncer_workitem_pending[WI_MPSAFEQ][syncer_delayno]; 1766 gnext = &syncer_workitem_pending[WI_GIANTQ][syncer_delayno]; 1767 /* 1768 * If the worklist has wrapped since the 1769 * it was emptied of all but syncer vnodes, 1770 * switch to the FINAL_DELAY state and run 1771 * for one more second. 1772 */ 1773 if (syncer_state == SYNCER_SHUTTING_DOWN && 1774 net_worklist_len == 0 && 1775 last_work_seen == syncer_delayno) { 1776 syncer_state = SYNCER_FINAL_DELAY; 1777 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 1778 } 1779 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 1780 LIST_EMPTY(gslp) && syncer_worklist_len > 0); 1781 1782 /* 1783 * Keep track of the last time there was anything 1784 * on the worklist other than syncer vnodes. 1785 * Return to the SHUTTING_DOWN state if any 1786 * new work appears. 1787 */ 1788 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 1789 last_work_seen = syncer_delayno; 1790 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 1791 syncer_state = SYNCER_SHUTTING_DOWN; 1792 while (!LIST_EMPTY(slp)) { 1793 error = sync_vnode(slp, &bo, td); 1794 if (error == 1) { 1795 LIST_REMOVE(bo, bo_synclist); 1796 LIST_INSERT_HEAD(next, bo, bo_synclist); 1797 continue; 1798 } 1799 } 1800 if (!LIST_EMPTY(gslp)) { 1801 mtx_unlock(&sync_mtx); 1802 mtx_lock(&Giant); 1803 mtx_lock(&sync_mtx); 1804 while (!LIST_EMPTY(gslp)) { 1805 error = sync_vnode(gslp, &bo, td); 1806 if (error == 1) { 1807 LIST_REMOVE(bo, bo_synclist); 1808 LIST_INSERT_HEAD(gnext, bo, 1809 bo_synclist); 1810 continue; 1811 } 1812 } 1813 mtx_unlock(&Giant); 1814 } 1815 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 1816 syncer_final_iter--; 1817 /* 1818 * The variable rushjob allows the kernel to speed up the 1819 * processing of the filesystem syncer process. A rushjob 1820 * value of N tells the filesystem syncer to process the next 1821 * N seconds worth of work on its queue ASAP. Currently rushjob 1822 * is used by the soft update code to speed up the filesystem 1823 * syncer process when the incore state is getting so far 1824 * ahead of the disk that the kernel memory pool is being 1825 * threatened with exhaustion. 1826 */ 1827 if (rushjob > 0) { 1828 rushjob -= 1; 1829 continue; 1830 } 1831 /* 1832 * Just sleep for a short period of time between 1833 * iterations when shutting down to allow some I/O 1834 * to happen. 1835 * 1836 * If it has taken us less than a second to process the 1837 * current work, then wait. Otherwise start right over 1838 * again. We can still lose time if any single round 1839 * takes more than two seconds, but it does not really 1840 * matter as we are just trying to generally pace the 1841 * filesystem activity. 1842 */ 1843 if (syncer_state != SYNCER_RUNNING) 1844 cv_timedwait(&sync_wakeup, &sync_mtx, 1845 hz / SYNCER_SHUTDOWN_SPEEDUP); 1846 else if (time_uptime == starttime) 1847 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 1848 } 1849 } 1850 1851 /* 1852 * Request the syncer daemon to speed up its work. 1853 * We never push it to speed up more than half of its 1854 * normal turn time, otherwise it could take over the cpu. 1855 */ 1856 int 1857 speedup_syncer(void) 1858 { 1859 int ret = 0; 1860 1861 mtx_lock(&sync_mtx); 1862 if (rushjob < syncdelay / 2) { 1863 rushjob += 1; 1864 stat_rush_requests += 1; 1865 ret = 1; 1866 } 1867 mtx_unlock(&sync_mtx); 1868 cv_broadcast(&sync_wakeup); 1869 return (ret); 1870 } 1871 1872 /* 1873 * Tell the syncer to speed up its work and run though its work 1874 * list several times, then tell it to shut down. 1875 */ 1876 static void 1877 syncer_shutdown(void *arg, int howto) 1878 { 1879 1880 if (howto & RB_NOSYNC) 1881 return; 1882 mtx_lock(&sync_mtx); 1883 syncer_state = SYNCER_SHUTTING_DOWN; 1884 rushjob = 0; 1885 mtx_unlock(&sync_mtx); 1886 cv_broadcast(&sync_wakeup); 1887 kproc_shutdown(arg, howto); 1888 } 1889 1890 /* 1891 * Reassign a buffer from one vnode to another. 1892 * Used to assign file specific control information 1893 * (indirect blocks) to the vnode to which they belong. 1894 */ 1895 void 1896 reassignbuf(struct buf *bp) 1897 { 1898 struct vnode *vp; 1899 struct bufobj *bo; 1900 int delay; 1901 #ifdef INVARIANTS 1902 struct bufv *bv; 1903 #endif 1904 1905 vp = bp->b_vp; 1906 bo = bp->b_bufobj; 1907 ++reassignbufcalls; 1908 1909 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 1910 bp, bp->b_vp, bp->b_flags); 1911 /* 1912 * B_PAGING flagged buffers cannot be reassigned because their vp 1913 * is not fully linked in. 1914 */ 1915 if (bp->b_flags & B_PAGING) 1916 panic("cannot reassign paging buffer"); 1917 1918 /* 1919 * Delete from old vnode list, if on one. 1920 */ 1921 BO_LOCK(bo); 1922 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1923 buf_vlist_remove(bp); 1924 else 1925 panic("reassignbuf: Buffer %p not on queue.", bp); 1926 /* 1927 * If dirty, put on list of dirty buffers; otherwise insert onto list 1928 * of clean buffers. 1929 */ 1930 if (bp->b_flags & B_DELWRI) { 1931 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 1932 switch (vp->v_type) { 1933 case VDIR: 1934 delay = dirdelay; 1935 break; 1936 case VCHR: 1937 delay = metadelay; 1938 break; 1939 default: 1940 delay = filedelay; 1941 } 1942 vn_syncer_add_to_worklist(bo, delay); 1943 } 1944 buf_vlist_add(bp, bo, BX_VNDIRTY); 1945 } else { 1946 buf_vlist_add(bp, bo, BX_VNCLEAN); 1947 1948 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 1949 mtx_lock(&sync_mtx); 1950 LIST_REMOVE(bo, bo_synclist); 1951 syncer_worklist_len--; 1952 mtx_unlock(&sync_mtx); 1953 bo->bo_flag &= ~BO_ONWORKLST; 1954 } 1955 } 1956 #ifdef INVARIANTS 1957 bv = &bo->bo_clean; 1958 bp = TAILQ_FIRST(&bv->bv_hd); 1959 KASSERT(bp == NULL || bp->b_bufobj == bo, 1960 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 1961 bp = TAILQ_LAST(&bv->bv_hd, buflists); 1962 KASSERT(bp == NULL || bp->b_bufobj == bo, 1963 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 1964 bv = &bo->bo_dirty; 1965 bp = TAILQ_FIRST(&bv->bv_hd); 1966 KASSERT(bp == NULL || bp->b_bufobj == bo, 1967 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 1968 bp = TAILQ_LAST(&bv->bv_hd, buflists); 1969 KASSERT(bp == NULL || bp->b_bufobj == bo, 1970 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 1971 #endif 1972 BO_UNLOCK(bo); 1973 } 1974 1975 /* 1976 * Increment the use and hold counts on the vnode, taking care to reference 1977 * the driver's usecount if this is a chardev. The vholdl() will remove 1978 * the vnode from the free list if it is presently free. Requires the 1979 * vnode interlock and returns with it held. 1980 */ 1981 static void 1982 v_incr_usecount(struct vnode *vp) 1983 { 1984 1985 CTR3(KTR_VFS, "v_incr_usecount: vp %p holdcnt %d usecount %d\n", 1986 vp, vp->v_holdcnt, vp->v_usecount); 1987 vp->v_usecount++; 1988 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 1989 dev_lock(); 1990 vp->v_rdev->si_usecount++; 1991 dev_unlock(); 1992 } 1993 vholdl(vp); 1994 } 1995 1996 /* 1997 * Turn a holdcnt into a use+holdcnt such that only one call to 1998 * v_decr_usecount is needed. 1999 */ 2000 static void 2001 v_upgrade_usecount(struct vnode *vp) 2002 { 2003 2004 CTR3(KTR_VFS, "v_upgrade_usecount: vp %p holdcnt %d usecount %d\n", 2005 vp, vp->v_holdcnt, vp->v_usecount); 2006 vp->v_usecount++; 2007 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2008 dev_lock(); 2009 vp->v_rdev->si_usecount++; 2010 dev_unlock(); 2011 } 2012 } 2013 2014 /* 2015 * Decrement the vnode use and hold count along with the driver's usecount 2016 * if this is a chardev. The vdropl() below releases the vnode interlock 2017 * as it may free the vnode. 2018 */ 2019 static void 2020 v_decr_usecount(struct vnode *vp) 2021 { 2022 2023 CTR3(KTR_VFS, "v_decr_usecount: vp %p holdcnt %d usecount %d\n", 2024 vp, vp->v_holdcnt, vp->v_usecount); 2025 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2026 VNASSERT(vp->v_usecount > 0, vp, 2027 ("v_decr_usecount: negative usecount")); 2028 vp->v_usecount--; 2029 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2030 dev_lock(); 2031 vp->v_rdev->si_usecount--; 2032 dev_unlock(); 2033 } 2034 vdropl(vp); 2035 } 2036 2037 /* 2038 * Decrement only the use count and driver use count. This is intended to 2039 * be paired with a follow on vdropl() to release the remaining hold count. 2040 * In this way we may vgone() a vnode with a 0 usecount without risk of 2041 * having it end up on a free list because the hold count is kept above 0. 2042 */ 2043 static void 2044 v_decr_useonly(struct vnode *vp) 2045 { 2046 2047 CTR3(KTR_VFS, "v_decr_useonly: vp %p holdcnt %d usecount %d\n", 2048 vp, vp->v_holdcnt, vp->v_usecount); 2049 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2050 VNASSERT(vp->v_usecount > 0, vp, 2051 ("v_decr_useonly: negative usecount")); 2052 vp->v_usecount--; 2053 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2054 dev_lock(); 2055 vp->v_rdev->si_usecount--; 2056 dev_unlock(); 2057 } 2058 } 2059 2060 /* 2061 * Grab a particular vnode from the free list, increment its 2062 * reference count and lock it. VI_DOOMED is set if the vnode 2063 * is being destroyed. Only callers who specify LK_RETRY will 2064 * see doomed vnodes. If inactive processing was delayed in 2065 * vput try to do it here. 2066 */ 2067 int 2068 vget(struct vnode *vp, int flags, struct thread *td) 2069 { 2070 int error; 2071 2072 error = 0; 2073 VFS_ASSERT_GIANT(vp->v_mount); 2074 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 2075 ("vget: invalid lock operation")); 2076 if ((flags & LK_INTERLOCK) == 0) 2077 VI_LOCK(vp); 2078 vholdl(vp); 2079 if ((error = vn_lock(vp, flags | LK_INTERLOCK)) != 0) { 2080 vdrop(vp); 2081 return (error); 2082 } 2083 if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0) 2084 panic("vget: vn_lock failed to return ENOENT\n"); 2085 VI_LOCK(vp); 2086 /* Upgrade our holdcnt to a usecount. */ 2087 v_upgrade_usecount(vp); 2088 /* 2089 * We don't guarantee that any particular close will 2090 * trigger inactive processing so just make a best effort 2091 * here at preventing a reference to a removed file. If 2092 * we don't succeed no harm is done. 2093 */ 2094 if (vp->v_iflag & VI_OWEINACT) { 2095 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE && 2096 (flags & LK_NOWAIT) == 0) 2097 vinactive(vp, td); 2098 vp->v_iflag &= ~VI_OWEINACT; 2099 } 2100 VI_UNLOCK(vp); 2101 return (0); 2102 } 2103 2104 /* 2105 * Increase the reference count of a vnode. 2106 */ 2107 void 2108 vref(struct vnode *vp) 2109 { 2110 2111 VI_LOCK(vp); 2112 v_incr_usecount(vp); 2113 VI_UNLOCK(vp); 2114 } 2115 2116 /* 2117 * Return reference count of a vnode. 2118 * 2119 * The results of this call are only guaranteed when some mechanism other 2120 * than the VI lock is used to stop other processes from gaining references 2121 * to the vnode. This may be the case if the caller holds the only reference. 2122 * This is also useful when stale data is acceptable as race conditions may 2123 * be accounted for by some other means. 2124 */ 2125 int 2126 vrefcnt(struct vnode *vp) 2127 { 2128 int usecnt; 2129 2130 VI_LOCK(vp); 2131 usecnt = vp->v_usecount; 2132 VI_UNLOCK(vp); 2133 2134 return (usecnt); 2135 } 2136 2137 2138 /* 2139 * Vnode put/release. 2140 * If count drops to zero, call inactive routine and return to freelist. 2141 */ 2142 void 2143 vrele(struct vnode *vp) 2144 { 2145 struct thread *td = curthread; /* XXX */ 2146 2147 KASSERT(vp != NULL, ("vrele: null vp")); 2148 VFS_ASSERT_GIANT(vp->v_mount); 2149 2150 VI_LOCK(vp); 2151 2152 /* Skip this v_writecount check if we're going to panic below. */ 2153 VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp, 2154 ("vrele: missed vn_close")); 2155 2156 if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) && 2157 vp->v_usecount == 1)) { 2158 v_decr_usecount(vp); 2159 return; 2160 } 2161 if (vp->v_usecount != 1) { 2162 #ifdef DIAGNOSTIC 2163 vprint("vrele: negative ref count", vp); 2164 #endif 2165 VI_UNLOCK(vp); 2166 panic("vrele: negative ref cnt"); 2167 } 2168 /* 2169 * We want to hold the vnode until the inactive finishes to 2170 * prevent vgone() races. We drop the use count here and the 2171 * hold count below when we're done. 2172 */ 2173 v_decr_useonly(vp); 2174 /* 2175 * We must call VOP_INACTIVE with the node locked. Mark 2176 * as VI_DOINGINACT to avoid recursion. 2177 */ 2178 vp->v_iflag |= VI_OWEINACT; 2179 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0) { 2180 VI_LOCK(vp); 2181 if (vp->v_usecount > 0) 2182 vp->v_iflag &= ~VI_OWEINACT; 2183 if (vp->v_iflag & VI_OWEINACT) 2184 vinactive(vp, td); 2185 VOP_UNLOCK(vp, 0); 2186 } else { 2187 VI_LOCK(vp); 2188 if (vp->v_usecount > 0) 2189 vp->v_iflag &= ~VI_OWEINACT; 2190 } 2191 vdropl(vp); 2192 } 2193 2194 /* 2195 * Release an already locked vnode. This give the same effects as 2196 * unlock+vrele(), but takes less time and avoids releasing and 2197 * re-aquiring the lock (as vrele() acquires the lock internally.) 2198 */ 2199 void 2200 vput(struct vnode *vp) 2201 { 2202 struct thread *td = curthread; /* XXX */ 2203 int error; 2204 2205 KASSERT(vp != NULL, ("vput: null vp")); 2206 ASSERT_VOP_LOCKED(vp, "vput"); 2207 VFS_ASSERT_GIANT(vp->v_mount); 2208 VI_LOCK(vp); 2209 /* Skip this v_writecount check if we're going to panic below. */ 2210 VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp, 2211 ("vput: missed vn_close")); 2212 error = 0; 2213 2214 if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) && 2215 vp->v_usecount == 1)) { 2216 VOP_UNLOCK(vp, 0); 2217 v_decr_usecount(vp); 2218 return; 2219 } 2220 2221 if (vp->v_usecount != 1) { 2222 #ifdef DIAGNOSTIC 2223 vprint("vput: negative ref count", vp); 2224 #endif 2225 panic("vput: negative ref cnt"); 2226 } 2227 /* 2228 * We want to hold the vnode until the inactive finishes to 2229 * prevent vgone() races. We drop the use count here and the 2230 * hold count below when we're done. 2231 */ 2232 v_decr_useonly(vp); 2233 vp->v_iflag |= VI_OWEINACT; 2234 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 2235 error = VOP_LOCK(vp, LK_UPGRADE|LK_INTERLOCK|LK_NOWAIT); 2236 VI_LOCK(vp); 2237 if (error) { 2238 if (vp->v_usecount > 0) 2239 vp->v_iflag &= ~VI_OWEINACT; 2240 goto done; 2241 } 2242 } 2243 if (vp->v_usecount > 0) 2244 vp->v_iflag &= ~VI_OWEINACT; 2245 if (vp->v_iflag & VI_OWEINACT) 2246 vinactive(vp, td); 2247 VOP_UNLOCK(vp, 0); 2248 done: 2249 vdropl(vp); 2250 } 2251 2252 /* 2253 * Somebody doesn't want the vnode recycled. 2254 */ 2255 void 2256 vhold(struct vnode *vp) 2257 { 2258 2259 VI_LOCK(vp); 2260 vholdl(vp); 2261 VI_UNLOCK(vp); 2262 } 2263 2264 void 2265 vholdl(struct vnode *vp) 2266 { 2267 2268 vp->v_holdcnt++; 2269 if (VSHOULDBUSY(vp)) 2270 vbusy(vp); 2271 } 2272 2273 /* 2274 * Note that there is one less who cares about this vnode. vdrop() is the 2275 * opposite of vhold(). 2276 */ 2277 void 2278 vdrop(struct vnode *vp) 2279 { 2280 2281 VI_LOCK(vp); 2282 vdropl(vp); 2283 } 2284 2285 /* 2286 * Drop the hold count of the vnode. If this is the last reference to 2287 * the vnode we will free it if it has been vgone'd otherwise it is 2288 * placed on the free list. 2289 */ 2290 void 2291 vdropl(struct vnode *vp) 2292 { 2293 2294 ASSERT_VI_LOCKED(vp, "vdropl"); 2295 if (vp->v_holdcnt <= 0) 2296 panic("vdrop: holdcnt %d", vp->v_holdcnt); 2297 vp->v_holdcnt--; 2298 if (vp->v_holdcnt == 0) { 2299 if (vp->v_iflag & VI_DOOMED) { 2300 vdestroy(vp); 2301 return; 2302 } else 2303 vfree(vp); 2304 } 2305 VI_UNLOCK(vp); 2306 } 2307 2308 /* 2309 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 2310 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 2311 * OWEINACT tracks whether a vnode missed a call to inactive due to a 2312 * failed lock upgrade. 2313 */ 2314 static void 2315 vinactive(struct vnode *vp, struct thread *td) 2316 { 2317 2318 ASSERT_VOP_ELOCKED(vp, "vinactive"); 2319 ASSERT_VI_LOCKED(vp, "vinactive"); 2320 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 2321 ("vinactive: recursed on VI_DOINGINACT")); 2322 vp->v_iflag |= VI_DOINGINACT; 2323 vp->v_iflag &= ~VI_OWEINACT; 2324 VI_UNLOCK(vp); 2325 VOP_INACTIVE(vp, td); 2326 VI_LOCK(vp); 2327 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 2328 ("vinactive: lost VI_DOINGINACT")); 2329 vp->v_iflag &= ~VI_DOINGINACT; 2330 } 2331 2332 /* 2333 * Remove any vnodes in the vnode table belonging to mount point mp. 2334 * 2335 * If FORCECLOSE is not specified, there should not be any active ones, 2336 * return error if any are found (nb: this is a user error, not a 2337 * system error). If FORCECLOSE is specified, detach any active vnodes 2338 * that are found. 2339 * 2340 * If WRITECLOSE is set, only flush out regular file vnodes open for 2341 * writing. 2342 * 2343 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 2344 * 2345 * `rootrefs' specifies the base reference count for the root vnode 2346 * of this filesystem. The root vnode is considered busy if its 2347 * v_usecount exceeds this value. On a successful return, vflush(, td) 2348 * will call vrele() on the root vnode exactly rootrefs times. 2349 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 2350 * be zero. 2351 */ 2352 #ifdef DIAGNOSTIC 2353 static int busyprt = 0; /* print out busy vnodes */ 2354 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 2355 #endif 2356 2357 int 2358 vflush( struct mount *mp, int rootrefs, int flags, struct thread *td) 2359 { 2360 struct vnode *vp, *mvp, *rootvp = NULL; 2361 struct vattr vattr; 2362 int busy = 0, error; 2363 2364 CTR1(KTR_VFS, "vflush: mp %p", mp); 2365 if (rootrefs > 0) { 2366 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 2367 ("vflush: bad args")); 2368 /* 2369 * Get the filesystem root vnode. We can vput() it 2370 * immediately, since with rootrefs > 0, it won't go away. 2371 */ 2372 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp, td)) != 0) 2373 return (error); 2374 vput(rootvp); 2375 2376 } 2377 MNT_ILOCK(mp); 2378 loop: 2379 MNT_VNODE_FOREACH(vp, mp, mvp) { 2380 2381 VI_LOCK(vp); 2382 vholdl(vp); 2383 MNT_IUNLOCK(mp); 2384 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 2385 if (error) { 2386 vdrop(vp); 2387 MNT_ILOCK(mp); 2388 MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp); 2389 goto loop; 2390 } 2391 /* 2392 * Skip over a vnodes marked VV_SYSTEM. 2393 */ 2394 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 2395 VOP_UNLOCK(vp, 0); 2396 vdrop(vp); 2397 MNT_ILOCK(mp); 2398 continue; 2399 } 2400 /* 2401 * If WRITECLOSE is set, flush out unlinked but still open 2402 * files (even if open only for reading) and regular file 2403 * vnodes open for writing. 2404 */ 2405 if (flags & WRITECLOSE) { 2406 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 2407 VI_LOCK(vp); 2408 2409 if ((vp->v_type == VNON || 2410 (error == 0 && vattr.va_nlink > 0)) && 2411 (vp->v_writecount == 0 || vp->v_type != VREG)) { 2412 VOP_UNLOCK(vp, 0); 2413 vdropl(vp); 2414 MNT_ILOCK(mp); 2415 continue; 2416 } 2417 } else 2418 VI_LOCK(vp); 2419 /* 2420 * With v_usecount == 0, all we need to do is clear out the 2421 * vnode data structures and we are done. 2422 * 2423 * If FORCECLOSE is set, forcibly close the vnode. 2424 */ 2425 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 2426 VNASSERT(vp->v_usecount == 0 || 2427 (vp->v_type != VCHR && vp->v_type != VBLK), vp, 2428 ("device VNODE %p is FORCECLOSED", vp)); 2429 vgonel(vp); 2430 } else { 2431 busy++; 2432 #ifdef DIAGNOSTIC 2433 if (busyprt) 2434 vprint("vflush: busy vnode", vp); 2435 #endif 2436 } 2437 VOP_UNLOCK(vp, 0); 2438 vdropl(vp); 2439 MNT_ILOCK(mp); 2440 } 2441 MNT_IUNLOCK(mp); 2442 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 2443 /* 2444 * If just the root vnode is busy, and if its refcount 2445 * is equal to `rootrefs', then go ahead and kill it. 2446 */ 2447 VI_LOCK(rootvp); 2448 KASSERT(busy > 0, ("vflush: not busy")); 2449 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 2450 ("vflush: usecount %d < rootrefs %d", 2451 rootvp->v_usecount, rootrefs)); 2452 if (busy == 1 && rootvp->v_usecount == rootrefs) { 2453 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 2454 vgone(rootvp); 2455 VOP_UNLOCK(rootvp, 0); 2456 busy = 0; 2457 } else 2458 VI_UNLOCK(rootvp); 2459 } 2460 if (busy) 2461 return (EBUSY); 2462 for (; rootrefs > 0; rootrefs--) 2463 vrele(rootvp); 2464 return (0); 2465 } 2466 2467 /* 2468 * Recycle an unused vnode to the front of the free list. 2469 */ 2470 int 2471 vrecycle(struct vnode *vp, struct thread *td) 2472 { 2473 int recycled; 2474 2475 ASSERT_VOP_ELOCKED(vp, "vrecycle"); 2476 recycled = 0; 2477 VI_LOCK(vp); 2478 if (vp->v_usecount == 0) { 2479 recycled = 1; 2480 vgonel(vp); 2481 } 2482 VI_UNLOCK(vp); 2483 return (recycled); 2484 } 2485 2486 /* 2487 * Eliminate all activity associated with a vnode 2488 * in preparation for reuse. 2489 */ 2490 void 2491 vgone(struct vnode *vp) 2492 { 2493 VI_LOCK(vp); 2494 vgonel(vp); 2495 VI_UNLOCK(vp); 2496 } 2497 2498 /* 2499 * vgone, with the vp interlock held. 2500 */ 2501 void 2502 vgonel(struct vnode *vp) 2503 { 2504 struct thread *td; 2505 int oweinact; 2506 int active; 2507 struct mount *mp; 2508 2509 CTR1(KTR_VFS, "vgonel: vp %p", vp); 2510 ASSERT_VOP_ELOCKED(vp, "vgonel"); 2511 ASSERT_VI_LOCKED(vp, "vgonel"); 2512 VNASSERT(vp->v_holdcnt, vp, 2513 ("vgonel: vp %p has no reference.", vp)); 2514 td = curthread; 2515 2516 /* 2517 * Don't vgonel if we're already doomed. 2518 */ 2519 if (vp->v_iflag & VI_DOOMED) 2520 return; 2521 vp->v_iflag |= VI_DOOMED; 2522 /* 2523 * Check to see if the vnode is in use. If so, we have to call 2524 * VOP_CLOSE() and VOP_INACTIVE(). 2525 */ 2526 active = vp->v_usecount; 2527 oweinact = (vp->v_iflag & VI_OWEINACT); 2528 VI_UNLOCK(vp); 2529 /* 2530 * Clean out any buffers associated with the vnode. 2531 * If the flush fails, just toss the buffers. 2532 */ 2533 mp = NULL; 2534 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 2535 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 2536 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) 2537 vinvalbuf(vp, 0, 0, 0); 2538 2539 /* 2540 * If purging an active vnode, it must be closed and 2541 * deactivated before being reclaimed. 2542 */ 2543 if (active) 2544 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 2545 if (oweinact || active) { 2546 VI_LOCK(vp); 2547 if ((vp->v_iflag & VI_DOINGINACT) == 0) 2548 vinactive(vp, td); 2549 VI_UNLOCK(vp); 2550 } 2551 /* 2552 * Reclaim the vnode. 2553 */ 2554 if (VOP_RECLAIM(vp, td)) 2555 panic("vgone: cannot reclaim"); 2556 if (mp != NULL) 2557 vn_finished_secondary_write(mp); 2558 VNASSERT(vp->v_object == NULL, vp, 2559 ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag)); 2560 /* 2561 * Clear the advisory locks and wake up waiting threads. 2562 */ 2563 lf_purgelocks(vp, &(vp->v_lockf)); 2564 /* 2565 * Delete from old mount point vnode list. 2566 */ 2567 delmntque(vp); 2568 cache_purge(vp); 2569 /* 2570 * Done with purge, reset to the standard lock and invalidate 2571 * the vnode. 2572 */ 2573 VI_LOCK(vp); 2574 vp->v_vnlock = &vp->v_lock; 2575 vp->v_op = &dead_vnodeops; 2576 vp->v_tag = "none"; 2577 vp->v_type = VBAD; 2578 } 2579 2580 /* 2581 * Calculate the total number of references to a special device. 2582 */ 2583 int 2584 vcount(struct vnode *vp) 2585 { 2586 int count; 2587 2588 dev_lock(); 2589 count = vp->v_rdev->si_usecount; 2590 dev_unlock(); 2591 return (count); 2592 } 2593 2594 /* 2595 * Same as above, but using the struct cdev *as argument 2596 */ 2597 int 2598 count_dev(struct cdev *dev) 2599 { 2600 int count; 2601 2602 dev_lock(); 2603 count = dev->si_usecount; 2604 dev_unlock(); 2605 return(count); 2606 } 2607 2608 /* 2609 * Print out a description of a vnode. 2610 */ 2611 static char *typename[] = 2612 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 2613 "VMARKER"}; 2614 2615 void 2616 vn_printf(struct vnode *vp, const char *fmt, ...) 2617 { 2618 va_list ap; 2619 char buf[256], buf2[16]; 2620 u_long flags; 2621 2622 va_start(ap, fmt); 2623 vprintf(fmt, ap); 2624 va_end(ap); 2625 printf("%p: ", (void *)vp); 2626 printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]); 2627 printf(" usecount %d, writecount %d, refcount %d mountedhere %p\n", 2628 vp->v_usecount, vp->v_writecount, vp->v_holdcnt, vp->v_mountedhere); 2629 buf[0] = '\0'; 2630 buf[1] = '\0'; 2631 if (vp->v_vflag & VV_ROOT) 2632 strlcat(buf, "|VV_ROOT", sizeof(buf)); 2633 if (vp->v_vflag & VV_ISTTY) 2634 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 2635 if (vp->v_vflag & VV_NOSYNC) 2636 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 2637 if (vp->v_vflag & VV_CACHEDLABEL) 2638 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 2639 if (vp->v_vflag & VV_TEXT) 2640 strlcat(buf, "|VV_TEXT", sizeof(buf)); 2641 if (vp->v_vflag & VV_COPYONWRITE) 2642 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 2643 if (vp->v_vflag & VV_SYSTEM) 2644 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 2645 if (vp->v_vflag & VV_PROCDEP) 2646 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 2647 if (vp->v_vflag & VV_NOKNOTE) 2648 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 2649 if (vp->v_vflag & VV_DELETED) 2650 strlcat(buf, "|VV_DELETED", sizeof(buf)); 2651 if (vp->v_vflag & VV_MD) 2652 strlcat(buf, "|VV_MD", sizeof(buf)); 2653 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | 2654 VV_CACHEDLABEL | VV_TEXT | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 2655 VV_NOKNOTE | VV_DELETED | VV_MD); 2656 if (flags != 0) { 2657 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 2658 strlcat(buf, buf2, sizeof(buf)); 2659 } 2660 if (vp->v_iflag & VI_MOUNT) 2661 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 2662 if (vp->v_iflag & VI_AGE) 2663 strlcat(buf, "|VI_AGE", sizeof(buf)); 2664 if (vp->v_iflag & VI_DOOMED) 2665 strlcat(buf, "|VI_DOOMED", sizeof(buf)); 2666 if (vp->v_iflag & VI_FREE) 2667 strlcat(buf, "|VI_FREE", sizeof(buf)); 2668 if (vp->v_iflag & VI_OBJDIRTY) 2669 strlcat(buf, "|VI_OBJDIRTY", sizeof(buf)); 2670 if (vp->v_iflag & VI_DOINGINACT) 2671 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 2672 if (vp->v_iflag & VI_OWEINACT) 2673 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 2674 flags = vp->v_iflag & ~(VI_MOUNT | VI_AGE | VI_DOOMED | VI_FREE | 2675 VI_OBJDIRTY | VI_DOINGINACT | VI_OWEINACT); 2676 if (flags != 0) { 2677 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 2678 strlcat(buf, buf2, sizeof(buf)); 2679 } 2680 printf(" flags (%s)\n", buf + 1); 2681 if (mtx_owned(VI_MTX(vp))) 2682 printf(" VI_LOCKed"); 2683 if (vp->v_object != NULL) 2684 printf(" v_object %p ref %d pages %d\n", 2685 vp->v_object, vp->v_object->ref_count, 2686 vp->v_object->resident_page_count); 2687 printf(" "); 2688 lockmgr_printinfo(vp->v_vnlock); 2689 printf("\n"); 2690 if (vp->v_data != NULL) 2691 VOP_PRINT(vp); 2692 } 2693 2694 #ifdef DDB 2695 /* 2696 * List all of the locked vnodes in the system. 2697 * Called when debugging the kernel. 2698 */ 2699 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 2700 { 2701 struct mount *mp, *nmp; 2702 struct vnode *vp; 2703 2704 /* 2705 * Note: because this is DDB, we can't obey the locking semantics 2706 * for these structures, which means we could catch an inconsistent 2707 * state and dereference a nasty pointer. Not much to be done 2708 * about that. 2709 */ 2710 db_printf("Locked vnodes\n"); 2711 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 2712 nmp = TAILQ_NEXT(mp, mnt_list); 2713 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 2714 if (vp->v_type != VMARKER && 2715 VOP_ISLOCKED(vp)) 2716 vprint("", vp); 2717 } 2718 nmp = TAILQ_NEXT(mp, mnt_list); 2719 } 2720 } 2721 2722 /* 2723 * Show details about the given vnode. 2724 */ 2725 DB_SHOW_COMMAND(vnode, db_show_vnode) 2726 { 2727 struct vnode *vp; 2728 2729 if (!have_addr) 2730 return; 2731 vp = (struct vnode *)addr; 2732 vn_printf(vp, "vnode "); 2733 } 2734 2735 /* 2736 * Show details about the given mount point. 2737 */ 2738 DB_SHOW_COMMAND(mount, db_show_mount) 2739 { 2740 struct mount *mp; 2741 struct statfs *sp; 2742 struct vnode *vp; 2743 char buf[512]; 2744 u_int flags; 2745 2746 if (!have_addr) { 2747 /* No address given, print short info about all mount points. */ 2748 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 2749 db_printf("%p %s on %s (%s)\n", mp, 2750 mp->mnt_stat.f_mntfromname, 2751 mp->mnt_stat.f_mntonname, 2752 mp->mnt_stat.f_fstypename); 2753 if (db_pager_quit) 2754 break; 2755 } 2756 db_printf("\nMore info: show mount <addr>\n"); 2757 return; 2758 } 2759 2760 mp = (struct mount *)addr; 2761 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 2762 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 2763 2764 buf[0] = '\0'; 2765 flags = mp->mnt_flag; 2766 #define MNT_FLAG(flag) do { \ 2767 if (flags & (flag)) { \ 2768 if (buf[0] != '\0') \ 2769 strlcat(buf, ", ", sizeof(buf)); \ 2770 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 2771 flags &= ~(flag); \ 2772 } \ 2773 } while (0) 2774 MNT_FLAG(MNT_RDONLY); 2775 MNT_FLAG(MNT_SYNCHRONOUS); 2776 MNT_FLAG(MNT_NOEXEC); 2777 MNT_FLAG(MNT_NOSUID); 2778 MNT_FLAG(MNT_UNION); 2779 MNT_FLAG(MNT_ASYNC); 2780 MNT_FLAG(MNT_SUIDDIR); 2781 MNT_FLAG(MNT_SOFTDEP); 2782 MNT_FLAG(MNT_NOSYMFOLLOW); 2783 MNT_FLAG(MNT_GJOURNAL); 2784 MNT_FLAG(MNT_MULTILABEL); 2785 MNT_FLAG(MNT_ACLS); 2786 MNT_FLAG(MNT_NOATIME); 2787 MNT_FLAG(MNT_NOCLUSTERR); 2788 MNT_FLAG(MNT_NOCLUSTERW); 2789 MNT_FLAG(MNT_EXRDONLY); 2790 MNT_FLAG(MNT_EXPORTED); 2791 MNT_FLAG(MNT_DEFEXPORTED); 2792 MNT_FLAG(MNT_EXPORTANON); 2793 MNT_FLAG(MNT_EXKERB); 2794 MNT_FLAG(MNT_EXPUBLIC); 2795 MNT_FLAG(MNT_LOCAL); 2796 MNT_FLAG(MNT_QUOTA); 2797 MNT_FLAG(MNT_ROOTFS); 2798 MNT_FLAG(MNT_USER); 2799 MNT_FLAG(MNT_IGNORE); 2800 MNT_FLAG(MNT_UPDATE); 2801 MNT_FLAG(MNT_DELEXPORT); 2802 MNT_FLAG(MNT_RELOAD); 2803 MNT_FLAG(MNT_FORCE); 2804 MNT_FLAG(MNT_SNAPSHOT); 2805 MNT_FLAG(MNT_BYFSID); 2806 #undef MNT_FLAG 2807 if (flags != 0) { 2808 if (buf[0] != '\0') 2809 strlcat(buf, ", ", sizeof(buf)); 2810 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 2811 "0x%08x", flags); 2812 } 2813 db_printf(" mnt_flag = %s\n", buf); 2814 2815 buf[0] = '\0'; 2816 flags = mp->mnt_kern_flag; 2817 #define MNT_KERN_FLAG(flag) do { \ 2818 if (flags & (flag)) { \ 2819 if (buf[0] != '\0') \ 2820 strlcat(buf, ", ", sizeof(buf)); \ 2821 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 2822 flags &= ~(flag); \ 2823 } \ 2824 } while (0) 2825 MNT_KERN_FLAG(MNTK_UNMOUNTF); 2826 MNT_KERN_FLAG(MNTK_ASYNC); 2827 MNT_KERN_FLAG(MNTK_SOFTDEP); 2828 MNT_KERN_FLAG(MNTK_NOINSMNTQ); 2829 MNT_KERN_FLAG(MNTK_UNMOUNT); 2830 MNT_KERN_FLAG(MNTK_MWAIT); 2831 MNT_KERN_FLAG(MNTK_SUSPEND); 2832 MNT_KERN_FLAG(MNTK_SUSPEND2); 2833 MNT_KERN_FLAG(MNTK_SUSPENDED); 2834 MNT_KERN_FLAG(MNTK_MPSAFE); 2835 MNT_KERN_FLAG(MNTK_NOKNOTE); 2836 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 2837 #undef MNT_KERN_FLAG 2838 if (flags != 0) { 2839 if (buf[0] != '\0') 2840 strlcat(buf, ", ", sizeof(buf)); 2841 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 2842 "0x%08x", flags); 2843 } 2844 db_printf(" mnt_kern_flag = %s\n", buf); 2845 2846 sp = &mp->mnt_stat; 2847 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 2848 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 2849 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 2850 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 2851 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 2852 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 2853 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 2854 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 2855 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 2856 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 2857 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 2858 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 2859 2860 db_printf(" mnt_cred = { uid=%u ruid=%u", 2861 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 2862 if (mp->mnt_cred->cr_prison != NULL) 2863 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 2864 db_printf(" }\n"); 2865 db_printf(" mnt_ref = %d\n", mp->mnt_ref); 2866 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 2867 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 2868 db_printf(" mnt_writeopcount = %d\n", mp->mnt_writeopcount); 2869 db_printf(" mnt_noasync = %u\n", mp->mnt_noasync); 2870 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 2871 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 2872 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 2873 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 2874 db_printf(" mnt_secondary_accwrites = %d\n", 2875 mp->mnt_secondary_accwrites); 2876 db_printf(" mnt_gjprovider = %s\n", 2877 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 2878 db_printf("\n"); 2879 2880 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 2881 if (vp->v_type != VMARKER) { 2882 vn_printf(vp, "vnode "); 2883 if (db_pager_quit) 2884 break; 2885 } 2886 } 2887 } 2888 #endif /* DDB */ 2889 2890 /* 2891 * Fill in a struct xvfsconf based on a struct vfsconf. 2892 */ 2893 static void 2894 vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp) 2895 { 2896 2897 strcpy(xvfsp->vfc_name, vfsp->vfc_name); 2898 xvfsp->vfc_typenum = vfsp->vfc_typenum; 2899 xvfsp->vfc_refcount = vfsp->vfc_refcount; 2900 xvfsp->vfc_flags = vfsp->vfc_flags; 2901 /* 2902 * These are unused in userland, we keep them 2903 * to not break binary compatibility. 2904 */ 2905 xvfsp->vfc_vfsops = NULL; 2906 xvfsp->vfc_next = NULL; 2907 } 2908 2909 /* 2910 * Top level filesystem related information gathering. 2911 */ 2912 static int 2913 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 2914 { 2915 struct vfsconf *vfsp; 2916 struct xvfsconf xvfsp; 2917 int error; 2918 2919 error = 0; 2920 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 2921 bzero(&xvfsp, sizeof(xvfsp)); 2922 vfsconf2x(vfsp, &xvfsp); 2923 error = SYSCTL_OUT(req, &xvfsp, sizeof xvfsp); 2924 if (error) 2925 break; 2926 } 2927 return (error); 2928 } 2929 2930 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist, 2931 "S,xvfsconf", "List of all configured filesystems"); 2932 2933 #ifndef BURN_BRIDGES 2934 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 2935 2936 static int 2937 vfs_sysctl(SYSCTL_HANDLER_ARGS) 2938 { 2939 int *name = (int *)arg1 - 1; /* XXX */ 2940 u_int namelen = arg2 + 1; /* XXX */ 2941 struct vfsconf *vfsp; 2942 struct xvfsconf xvfsp; 2943 2944 printf("WARNING: userland calling deprecated sysctl, " 2945 "please rebuild world\n"); 2946 2947 #if 1 || defined(COMPAT_PRELITE2) 2948 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 2949 if (namelen == 1) 2950 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 2951 #endif 2952 2953 switch (name[1]) { 2954 case VFS_MAXTYPENUM: 2955 if (namelen != 2) 2956 return (ENOTDIR); 2957 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 2958 case VFS_CONF: 2959 if (namelen != 3) 2960 return (ENOTDIR); /* overloaded */ 2961 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) 2962 if (vfsp->vfc_typenum == name[2]) 2963 break; 2964 if (vfsp == NULL) 2965 return (EOPNOTSUPP); 2966 bzero(&xvfsp, sizeof(xvfsp)); 2967 vfsconf2x(vfsp, &xvfsp); 2968 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 2969 } 2970 return (EOPNOTSUPP); 2971 } 2972 2973 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP, 2974 vfs_sysctl, "Generic filesystem"); 2975 2976 #if 1 || defined(COMPAT_PRELITE2) 2977 2978 static int 2979 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 2980 { 2981 int error; 2982 struct vfsconf *vfsp; 2983 struct ovfsconf ovfs; 2984 2985 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 2986 bzero(&ovfs, sizeof(ovfs)); 2987 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 2988 strcpy(ovfs.vfc_name, vfsp->vfc_name); 2989 ovfs.vfc_index = vfsp->vfc_typenum; 2990 ovfs.vfc_refcount = vfsp->vfc_refcount; 2991 ovfs.vfc_flags = vfsp->vfc_flags; 2992 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 2993 if (error) 2994 return error; 2995 } 2996 return 0; 2997 } 2998 2999 #endif /* 1 || COMPAT_PRELITE2 */ 3000 #endif /* !BURN_BRIDGES */ 3001 3002 #define KINFO_VNODESLOP 10 3003 #ifdef notyet 3004 /* 3005 * Dump vnode list (via sysctl). 3006 */ 3007 /* ARGSUSED */ 3008 static int 3009 sysctl_vnode(SYSCTL_HANDLER_ARGS) 3010 { 3011 struct xvnode *xvn; 3012 struct mount *mp; 3013 struct vnode *vp; 3014 int error, len, n; 3015 3016 /* 3017 * Stale numvnodes access is not fatal here. 3018 */ 3019 req->lock = 0; 3020 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 3021 if (!req->oldptr) 3022 /* Make an estimate */ 3023 return (SYSCTL_OUT(req, 0, len)); 3024 3025 error = sysctl_wire_old_buffer(req, 0); 3026 if (error != 0) 3027 return (error); 3028 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 3029 n = 0; 3030 mtx_lock(&mountlist_mtx); 3031 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3032 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 3033 continue; 3034 MNT_ILOCK(mp); 3035 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3036 if (n == len) 3037 break; 3038 vref(vp); 3039 xvn[n].xv_size = sizeof *xvn; 3040 xvn[n].xv_vnode = vp; 3041 xvn[n].xv_id = 0; /* XXX compat */ 3042 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 3043 XV_COPY(usecount); 3044 XV_COPY(writecount); 3045 XV_COPY(holdcnt); 3046 XV_COPY(mount); 3047 XV_COPY(numoutput); 3048 XV_COPY(type); 3049 #undef XV_COPY 3050 xvn[n].xv_flag = vp->v_vflag; 3051 3052 switch (vp->v_type) { 3053 case VREG: 3054 case VDIR: 3055 case VLNK: 3056 break; 3057 case VBLK: 3058 case VCHR: 3059 if (vp->v_rdev == NULL) { 3060 vrele(vp); 3061 continue; 3062 } 3063 xvn[n].xv_dev = dev2udev(vp->v_rdev); 3064 break; 3065 case VSOCK: 3066 xvn[n].xv_socket = vp->v_socket; 3067 break; 3068 case VFIFO: 3069 xvn[n].xv_fifo = vp->v_fifoinfo; 3070 break; 3071 case VNON: 3072 case VBAD: 3073 default: 3074 /* shouldn't happen? */ 3075 vrele(vp); 3076 continue; 3077 } 3078 vrele(vp); 3079 ++n; 3080 } 3081 MNT_IUNLOCK(mp); 3082 mtx_lock(&mountlist_mtx); 3083 vfs_unbusy(mp); 3084 if (n == len) 3085 break; 3086 } 3087 mtx_unlock(&mountlist_mtx); 3088 3089 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 3090 free(xvn, M_TEMP); 3091 return (error); 3092 } 3093 3094 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD, 3095 0, 0, sysctl_vnode, "S,xvnode", ""); 3096 #endif 3097 3098 /* 3099 * Unmount all filesystems. The list is traversed in reverse order 3100 * of mounting to avoid dependencies. 3101 */ 3102 void 3103 vfs_unmountall(void) 3104 { 3105 struct mount *mp; 3106 struct thread *td; 3107 int error; 3108 3109 KASSERT(curthread != NULL, ("vfs_unmountall: NULL curthread")); 3110 td = curthread; 3111 /* 3112 * Since this only runs when rebooting, it is not interlocked. 3113 */ 3114 while(!TAILQ_EMPTY(&mountlist)) { 3115 mp = TAILQ_LAST(&mountlist, mntlist); 3116 error = dounmount(mp, MNT_FORCE, td); 3117 if (error) { 3118 TAILQ_REMOVE(&mountlist, mp, mnt_list); 3119 /* 3120 * XXX: Due to the way in which we mount the root 3121 * file system off of devfs, devfs will generate a 3122 * "busy" warning when we try to unmount it before 3123 * the root. Don't print a warning as a result in 3124 * order to avoid false positive errors that may 3125 * cause needless upset. 3126 */ 3127 if (strcmp(mp->mnt_vfc->vfc_name, "devfs") != 0) { 3128 printf("unmount of %s failed (", 3129 mp->mnt_stat.f_mntonname); 3130 if (error == EBUSY) 3131 printf("BUSY)\n"); 3132 else 3133 printf("%d)\n", error); 3134 } 3135 } else { 3136 /* The unmount has removed mp from the mountlist */ 3137 } 3138 } 3139 } 3140 3141 /* 3142 * perform msync on all vnodes under a mount point 3143 * the mount point must be locked. 3144 */ 3145 void 3146 vfs_msync(struct mount *mp, int flags) 3147 { 3148 struct vnode *vp, *mvp; 3149 struct vm_object *obj; 3150 3151 MNT_ILOCK(mp); 3152 MNT_VNODE_FOREACH(vp, mp, mvp) { 3153 VI_LOCK(vp); 3154 if ((vp->v_iflag & VI_OBJDIRTY) && 3155 (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) { 3156 MNT_IUNLOCK(mp); 3157 if (!vget(vp, 3158 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, 3159 curthread)) { 3160 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */ 3161 vput(vp); 3162 MNT_ILOCK(mp); 3163 continue; 3164 } 3165 3166 obj = vp->v_object; 3167 if (obj != NULL) { 3168 VM_OBJECT_LOCK(obj); 3169 vm_object_page_clean(obj, 0, 0, 3170 flags == MNT_WAIT ? 3171 OBJPC_SYNC : OBJPC_NOSYNC); 3172 VM_OBJECT_UNLOCK(obj); 3173 } 3174 vput(vp); 3175 } 3176 MNT_ILOCK(mp); 3177 } else 3178 VI_UNLOCK(vp); 3179 } 3180 MNT_IUNLOCK(mp); 3181 } 3182 3183 /* 3184 * Mark a vnode as free, putting it up for recycling. 3185 */ 3186 static void 3187 vfree(struct vnode *vp) 3188 { 3189 3190 CTR1(KTR_VFS, "vfree vp %p", vp); 3191 ASSERT_VI_LOCKED(vp, "vfree"); 3192 mtx_lock(&vnode_free_list_mtx); 3193 VNASSERT(vp->v_op != NULL, vp, ("vfree: vnode already reclaimed.")); 3194 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, ("vnode already free")); 3195 VNASSERT(VSHOULDFREE(vp), vp, ("vfree: freeing when we shouldn't")); 3196 VNASSERT((vp->v_iflag & VI_DOOMED) == 0, vp, 3197 ("vfree: Freeing doomed vnode")); 3198 if (vp->v_iflag & VI_AGE) { 3199 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 3200 } else { 3201 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 3202 } 3203 freevnodes++; 3204 vp->v_iflag &= ~VI_AGE; 3205 vp->v_iflag |= VI_FREE; 3206 mtx_unlock(&vnode_free_list_mtx); 3207 } 3208 3209 /* 3210 * Opposite of vfree() - mark a vnode as in use. 3211 */ 3212 static void 3213 vbusy(struct vnode *vp) 3214 { 3215 CTR1(KTR_VFS, "vbusy vp %p", vp); 3216 ASSERT_VI_LOCKED(vp, "vbusy"); 3217 VNASSERT((vp->v_iflag & VI_FREE) != 0, vp, ("vnode not free")); 3218 VNASSERT(vp->v_op != NULL, vp, ("vbusy: vnode already reclaimed.")); 3219 3220 mtx_lock(&vnode_free_list_mtx); 3221 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 3222 freevnodes--; 3223 vp->v_iflag &= ~(VI_FREE|VI_AGE); 3224 mtx_unlock(&vnode_free_list_mtx); 3225 } 3226 3227 static void 3228 destroy_vpollinfo(struct vpollinfo *vi) 3229 { 3230 knlist_destroy(&vi->vpi_selinfo.si_note); 3231 mtx_destroy(&vi->vpi_lock); 3232 uma_zfree(vnodepoll_zone, vi); 3233 } 3234 3235 /* 3236 * Initalize per-vnode helper structure to hold poll-related state. 3237 */ 3238 void 3239 v_addpollinfo(struct vnode *vp) 3240 { 3241 struct vpollinfo *vi; 3242 3243 if (vp->v_pollinfo != NULL) 3244 return; 3245 vi = uma_zalloc(vnodepoll_zone, M_WAITOK); 3246 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 3247 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 3248 vfs_knlunlock, vfs_knllocked); 3249 VI_LOCK(vp); 3250 if (vp->v_pollinfo != NULL) { 3251 VI_UNLOCK(vp); 3252 destroy_vpollinfo(vi); 3253 return; 3254 } 3255 vp->v_pollinfo = vi; 3256 VI_UNLOCK(vp); 3257 } 3258 3259 /* 3260 * Record a process's interest in events which might happen to 3261 * a vnode. Because poll uses the historic select-style interface 3262 * internally, this routine serves as both the ``check for any 3263 * pending events'' and the ``record my interest in future events'' 3264 * functions. (These are done together, while the lock is held, 3265 * to avoid race conditions.) 3266 */ 3267 int 3268 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 3269 { 3270 3271 v_addpollinfo(vp); 3272 mtx_lock(&vp->v_pollinfo->vpi_lock); 3273 if (vp->v_pollinfo->vpi_revents & events) { 3274 /* 3275 * This leaves events we are not interested 3276 * in available for the other process which 3277 * which presumably had requested them 3278 * (otherwise they would never have been 3279 * recorded). 3280 */ 3281 events &= vp->v_pollinfo->vpi_revents; 3282 vp->v_pollinfo->vpi_revents &= ~events; 3283 3284 mtx_unlock(&vp->v_pollinfo->vpi_lock); 3285 return (events); 3286 } 3287 vp->v_pollinfo->vpi_events |= events; 3288 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 3289 mtx_unlock(&vp->v_pollinfo->vpi_lock); 3290 return (0); 3291 } 3292 3293 /* 3294 * Routine to create and manage a filesystem syncer vnode. 3295 */ 3296 #define sync_close ((int (*)(struct vop_close_args *))nullop) 3297 static int sync_fsync(struct vop_fsync_args *); 3298 static int sync_inactive(struct vop_inactive_args *); 3299 static int sync_reclaim(struct vop_reclaim_args *); 3300 3301 static struct vop_vector sync_vnodeops = { 3302 .vop_bypass = VOP_EOPNOTSUPP, 3303 .vop_close = sync_close, /* close */ 3304 .vop_fsync = sync_fsync, /* fsync */ 3305 .vop_inactive = sync_inactive, /* inactive */ 3306 .vop_reclaim = sync_reclaim, /* reclaim */ 3307 .vop_lock1 = vop_stdlock, /* lock */ 3308 .vop_unlock = vop_stdunlock, /* unlock */ 3309 .vop_islocked = vop_stdislocked, /* islocked */ 3310 }; 3311 3312 /* 3313 * Create a new filesystem syncer vnode for the specified mount point. 3314 */ 3315 int 3316 vfs_allocate_syncvnode(struct mount *mp) 3317 { 3318 struct vnode *vp; 3319 struct bufobj *bo; 3320 static long start, incr, next; 3321 int error; 3322 3323 /* Allocate a new vnode */ 3324 if ((error = getnewvnode("syncer", mp, &sync_vnodeops, &vp)) != 0) { 3325 mp->mnt_syncer = NULL; 3326 return (error); 3327 } 3328 vp->v_type = VNON; 3329 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3330 vp->v_vflag |= VV_FORCEINSMQ; 3331 error = insmntque(vp, mp); 3332 if (error != 0) 3333 panic("vfs_allocate_syncvnode: insmntque failed"); 3334 vp->v_vflag &= ~VV_FORCEINSMQ; 3335 VOP_UNLOCK(vp, 0); 3336 /* 3337 * Place the vnode onto the syncer worklist. We attempt to 3338 * scatter them about on the list so that they will go off 3339 * at evenly distributed times even if all the filesystems 3340 * are mounted at once. 3341 */ 3342 next += incr; 3343 if (next == 0 || next > syncer_maxdelay) { 3344 start /= 2; 3345 incr /= 2; 3346 if (start == 0) { 3347 start = syncer_maxdelay / 2; 3348 incr = syncer_maxdelay; 3349 } 3350 next = start; 3351 } 3352 bo = &vp->v_bufobj; 3353 BO_LOCK(bo); 3354 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 3355 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 3356 mtx_lock(&sync_mtx); 3357 sync_vnode_count++; 3358 mtx_unlock(&sync_mtx); 3359 BO_UNLOCK(bo); 3360 mp->mnt_syncer = vp; 3361 return (0); 3362 } 3363 3364 /* 3365 * Do a lazy sync of the filesystem. 3366 */ 3367 static int 3368 sync_fsync(struct vop_fsync_args *ap) 3369 { 3370 struct vnode *syncvp = ap->a_vp; 3371 struct mount *mp = syncvp->v_mount; 3372 int error; 3373 struct bufobj *bo; 3374 3375 /* 3376 * We only need to do something if this is a lazy evaluation. 3377 */ 3378 if (ap->a_waitfor != MNT_LAZY) 3379 return (0); 3380 3381 /* 3382 * Move ourselves to the back of the sync list. 3383 */ 3384 bo = &syncvp->v_bufobj; 3385 BO_LOCK(bo); 3386 vn_syncer_add_to_worklist(bo, syncdelay); 3387 BO_UNLOCK(bo); 3388 3389 /* 3390 * Walk the list of vnodes pushing all that are dirty and 3391 * not already on the sync list. 3392 */ 3393 mtx_lock(&mountlist_mtx); 3394 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK) != 0) { 3395 mtx_unlock(&mountlist_mtx); 3396 return (0); 3397 } 3398 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 3399 vfs_unbusy(mp); 3400 return (0); 3401 } 3402 MNT_ILOCK(mp); 3403 mp->mnt_noasync++; 3404 mp->mnt_kern_flag &= ~MNTK_ASYNC; 3405 MNT_IUNLOCK(mp); 3406 vfs_msync(mp, MNT_NOWAIT); 3407 error = VFS_SYNC(mp, MNT_LAZY, ap->a_td); 3408 MNT_ILOCK(mp); 3409 mp->mnt_noasync--; 3410 if ((mp->mnt_flag & MNT_ASYNC) != 0 && mp->mnt_noasync == 0) 3411 mp->mnt_kern_flag |= MNTK_ASYNC; 3412 MNT_IUNLOCK(mp); 3413 vn_finished_write(mp); 3414 vfs_unbusy(mp); 3415 return (error); 3416 } 3417 3418 /* 3419 * The syncer vnode is no referenced. 3420 */ 3421 static int 3422 sync_inactive(struct vop_inactive_args *ap) 3423 { 3424 3425 vgone(ap->a_vp); 3426 return (0); 3427 } 3428 3429 /* 3430 * The syncer vnode is no longer needed and is being decommissioned. 3431 * 3432 * Modifications to the worklist must be protected by sync_mtx. 3433 */ 3434 static int 3435 sync_reclaim(struct vop_reclaim_args *ap) 3436 { 3437 struct vnode *vp = ap->a_vp; 3438 struct bufobj *bo; 3439 3440 bo = &vp->v_bufobj; 3441 BO_LOCK(bo); 3442 vp->v_mount->mnt_syncer = NULL; 3443 if (bo->bo_flag & BO_ONWORKLST) { 3444 mtx_lock(&sync_mtx); 3445 LIST_REMOVE(bo, bo_synclist); 3446 syncer_worklist_len--; 3447 sync_vnode_count--; 3448 mtx_unlock(&sync_mtx); 3449 bo->bo_flag &= ~BO_ONWORKLST; 3450 } 3451 BO_UNLOCK(bo); 3452 3453 return (0); 3454 } 3455 3456 /* 3457 * Check if vnode represents a disk device 3458 */ 3459 int 3460 vn_isdisk(struct vnode *vp, int *errp) 3461 { 3462 int error; 3463 3464 error = 0; 3465 dev_lock(); 3466 if (vp->v_type != VCHR) 3467 error = ENOTBLK; 3468 else if (vp->v_rdev == NULL) 3469 error = ENXIO; 3470 else if (vp->v_rdev->si_devsw == NULL) 3471 error = ENXIO; 3472 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 3473 error = ENOTBLK; 3474 dev_unlock(); 3475 if (errp != NULL) 3476 *errp = error; 3477 return (error == 0); 3478 } 3479 3480 /* 3481 * Common filesystem object access control check routine. Accepts a 3482 * vnode's type, "mode", uid and gid, requested access mode, credentials, 3483 * and optional call-by-reference privused argument allowing vaccess() 3484 * to indicate to the caller whether privilege was used to satisfy the 3485 * request (obsoleted). Returns 0 on success, or an errno on failure. 3486 * 3487 * The ifdef'd CAPABILITIES version is here for reference, but is not 3488 * actually used. 3489 */ 3490 int 3491 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 3492 accmode_t accmode, struct ucred *cred, int *privused) 3493 { 3494 accmode_t dac_granted; 3495 accmode_t priv_granted; 3496 3497 /* 3498 * Look for a normal, non-privileged way to access the file/directory 3499 * as requested. If it exists, go with that. 3500 */ 3501 3502 if (privused != NULL) 3503 *privused = 0; 3504 3505 dac_granted = 0; 3506 3507 /* Check the owner. */ 3508 if (cred->cr_uid == file_uid) { 3509 dac_granted |= VADMIN; 3510 if (file_mode & S_IXUSR) 3511 dac_granted |= VEXEC; 3512 if (file_mode & S_IRUSR) 3513 dac_granted |= VREAD; 3514 if (file_mode & S_IWUSR) 3515 dac_granted |= (VWRITE | VAPPEND); 3516 3517 if ((accmode & dac_granted) == accmode) 3518 return (0); 3519 3520 goto privcheck; 3521 } 3522 3523 /* Otherwise, check the groups (first match) */ 3524 if (groupmember(file_gid, cred)) { 3525 if (file_mode & S_IXGRP) 3526 dac_granted |= VEXEC; 3527 if (file_mode & S_IRGRP) 3528 dac_granted |= VREAD; 3529 if (file_mode & S_IWGRP) 3530 dac_granted |= (VWRITE | VAPPEND); 3531 3532 if ((accmode & dac_granted) == accmode) 3533 return (0); 3534 3535 goto privcheck; 3536 } 3537 3538 /* Otherwise, check everyone else. */ 3539 if (file_mode & S_IXOTH) 3540 dac_granted |= VEXEC; 3541 if (file_mode & S_IROTH) 3542 dac_granted |= VREAD; 3543 if (file_mode & S_IWOTH) 3544 dac_granted |= (VWRITE | VAPPEND); 3545 if ((accmode & dac_granted) == accmode) 3546 return (0); 3547 3548 privcheck: 3549 /* 3550 * Build a privilege mask to determine if the set of privileges 3551 * satisfies the requirements when combined with the granted mask 3552 * from above. For each privilege, if the privilege is required, 3553 * bitwise or the request type onto the priv_granted mask. 3554 */ 3555 priv_granted = 0; 3556 3557 if (type == VDIR) { 3558 /* 3559 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 3560 * requests, instead of PRIV_VFS_EXEC. 3561 */ 3562 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3563 !priv_check_cred(cred, PRIV_VFS_LOOKUP, 0)) 3564 priv_granted |= VEXEC; 3565 } else { 3566 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3567 !priv_check_cred(cred, PRIV_VFS_EXEC, 0)) 3568 priv_granted |= VEXEC; 3569 } 3570 3571 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 3572 !priv_check_cred(cred, PRIV_VFS_READ, 0)) 3573 priv_granted |= VREAD; 3574 3575 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 3576 !priv_check_cred(cred, PRIV_VFS_WRITE, 0)) 3577 priv_granted |= (VWRITE | VAPPEND); 3578 3579 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 3580 !priv_check_cred(cred, PRIV_VFS_ADMIN, 0)) 3581 priv_granted |= VADMIN; 3582 3583 if ((accmode & (priv_granted | dac_granted)) == accmode) { 3584 /* XXX audit: privilege used */ 3585 if (privused != NULL) 3586 *privused = 1; 3587 return (0); 3588 } 3589 3590 return ((accmode & VADMIN) ? EPERM : EACCES); 3591 } 3592 3593 /* 3594 * Credential check based on process requesting service, and per-attribute 3595 * permissions. 3596 */ 3597 int 3598 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 3599 struct thread *td, accmode_t accmode) 3600 { 3601 3602 /* 3603 * Kernel-invoked always succeeds. 3604 */ 3605 if (cred == NOCRED) 3606 return (0); 3607 3608 /* 3609 * Do not allow privileged processes in jail to directly manipulate 3610 * system attributes. 3611 */ 3612 switch (attrnamespace) { 3613 case EXTATTR_NAMESPACE_SYSTEM: 3614 /* Potentially should be: return (EPERM); */ 3615 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM, 0)); 3616 case EXTATTR_NAMESPACE_USER: 3617 return (VOP_ACCESS(vp, accmode, cred, td)); 3618 default: 3619 return (EPERM); 3620 } 3621 } 3622 3623 #ifdef DEBUG_VFS_LOCKS 3624 /* 3625 * This only exists to supress warnings from unlocked specfs accesses. It is 3626 * no longer ok to have an unlocked VFS. 3627 */ 3628 #define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \ 3629 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 3630 3631 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 3632 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, ""); 3633 3634 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 3635 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 0, ""); 3636 3637 int vfs_badlock_print = 1; /* Print lock violations. */ 3638 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 0, ""); 3639 3640 #ifdef KDB 3641 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 3642 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, &vfs_badlock_backtrace, 0, ""); 3643 #endif 3644 3645 static void 3646 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 3647 { 3648 3649 #ifdef KDB 3650 if (vfs_badlock_backtrace) 3651 kdb_backtrace(); 3652 #endif 3653 if (vfs_badlock_print) 3654 printf("%s: %p %s\n", str, (void *)vp, msg); 3655 if (vfs_badlock_ddb) 3656 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 3657 } 3658 3659 void 3660 assert_vi_locked(struct vnode *vp, const char *str) 3661 { 3662 3663 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 3664 vfs_badlock("interlock is not locked but should be", str, vp); 3665 } 3666 3667 void 3668 assert_vi_unlocked(struct vnode *vp, const char *str) 3669 { 3670 3671 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 3672 vfs_badlock("interlock is locked but should not be", str, vp); 3673 } 3674 3675 void 3676 assert_vop_locked(struct vnode *vp, const char *str) 3677 { 3678 3679 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == 0) 3680 vfs_badlock("is not locked but should be", str, vp); 3681 } 3682 3683 void 3684 assert_vop_unlocked(struct vnode *vp, const char *str) 3685 { 3686 3687 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 3688 vfs_badlock("is locked but should not be", str, vp); 3689 } 3690 3691 void 3692 assert_vop_elocked(struct vnode *vp, const char *str) 3693 { 3694 3695 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 3696 vfs_badlock("is not exclusive locked but should be", str, vp); 3697 } 3698 3699 #if 0 3700 void 3701 assert_vop_elocked_other(struct vnode *vp, const char *str) 3702 { 3703 3704 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLOTHER) 3705 vfs_badlock("is not exclusive locked by another thread", 3706 str, vp); 3707 } 3708 3709 void 3710 assert_vop_slocked(struct vnode *vp, const char *str) 3711 { 3712 3713 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_SHARED) 3714 vfs_badlock("is not locked shared but should be", str, vp); 3715 } 3716 #endif /* 0 */ 3717 #endif /* DEBUG_VFS_LOCKS */ 3718 3719 void 3720 vop_rename_pre(void *ap) 3721 { 3722 struct vop_rename_args *a = ap; 3723 3724 #ifdef DEBUG_VFS_LOCKS 3725 if (a->a_tvp) 3726 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 3727 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 3728 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 3729 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 3730 3731 /* Check the source (from). */ 3732 if (a->a_tdvp != a->a_fdvp && a->a_tvp != a->a_fdvp) 3733 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 3734 if (a->a_tvp != a->a_fvp) 3735 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 3736 3737 /* Check the target. */ 3738 if (a->a_tvp) 3739 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 3740 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 3741 #endif 3742 if (a->a_tdvp != a->a_fdvp) 3743 vhold(a->a_fdvp); 3744 if (a->a_tvp != a->a_fvp) 3745 vhold(a->a_fvp); 3746 vhold(a->a_tdvp); 3747 if (a->a_tvp) 3748 vhold(a->a_tvp); 3749 } 3750 3751 void 3752 vop_strategy_pre(void *ap) 3753 { 3754 #ifdef DEBUG_VFS_LOCKS 3755 struct vop_strategy_args *a; 3756 struct buf *bp; 3757 3758 a = ap; 3759 bp = a->a_bp; 3760 3761 /* 3762 * Cluster ops lock their component buffers but not the IO container. 3763 */ 3764 if ((bp->b_flags & B_CLUSTER) != 0) 3765 return; 3766 3767 if (!BUF_ISLOCKED(bp)) { 3768 if (vfs_badlock_print) 3769 printf( 3770 "VOP_STRATEGY: bp is not locked but should be\n"); 3771 if (vfs_badlock_ddb) 3772 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 3773 } 3774 #endif 3775 } 3776 3777 void 3778 vop_lookup_pre(void *ap) 3779 { 3780 #ifdef DEBUG_VFS_LOCKS 3781 struct vop_lookup_args *a; 3782 struct vnode *dvp; 3783 3784 a = ap; 3785 dvp = a->a_dvp; 3786 ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP"); 3787 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP"); 3788 #endif 3789 } 3790 3791 void 3792 vop_lookup_post(void *ap, int rc) 3793 { 3794 #ifdef DEBUG_VFS_LOCKS 3795 struct vop_lookup_args *a; 3796 struct vnode *dvp; 3797 struct vnode *vp; 3798 3799 a = ap; 3800 dvp = a->a_dvp; 3801 vp = *(a->a_vpp); 3802 3803 ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP"); 3804 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP"); 3805 3806 if (!rc) 3807 ASSERT_VOP_LOCKED(vp, "VOP_LOOKUP (child)"); 3808 #endif 3809 } 3810 3811 void 3812 vop_lock_pre(void *ap) 3813 { 3814 #ifdef DEBUG_VFS_LOCKS 3815 struct vop_lock1_args *a = ap; 3816 3817 if ((a->a_flags & LK_INTERLOCK) == 0) 3818 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 3819 else 3820 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 3821 #endif 3822 } 3823 3824 void 3825 vop_lock_post(void *ap, int rc) 3826 { 3827 #ifdef DEBUG_VFS_LOCKS 3828 struct vop_lock1_args *a = ap; 3829 3830 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 3831 if (rc == 0) 3832 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 3833 #endif 3834 } 3835 3836 void 3837 vop_unlock_pre(void *ap) 3838 { 3839 #ifdef DEBUG_VFS_LOCKS 3840 struct vop_unlock_args *a = ap; 3841 3842 if (a->a_flags & LK_INTERLOCK) 3843 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK"); 3844 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 3845 #endif 3846 } 3847 3848 void 3849 vop_unlock_post(void *ap, int rc) 3850 { 3851 #ifdef DEBUG_VFS_LOCKS 3852 struct vop_unlock_args *a = ap; 3853 3854 if (a->a_flags & LK_INTERLOCK) 3855 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK"); 3856 #endif 3857 } 3858 3859 void 3860 vop_create_post(void *ap, int rc) 3861 { 3862 struct vop_create_args *a = ap; 3863 3864 if (!rc) 3865 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 3866 } 3867 3868 void 3869 vop_link_post(void *ap, int rc) 3870 { 3871 struct vop_link_args *a = ap; 3872 3873 if (!rc) { 3874 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); 3875 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE); 3876 } 3877 } 3878 3879 void 3880 vop_mkdir_post(void *ap, int rc) 3881 { 3882 struct vop_mkdir_args *a = ap; 3883 3884 if (!rc) 3885 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 3886 } 3887 3888 void 3889 vop_mknod_post(void *ap, int rc) 3890 { 3891 struct vop_mknod_args *a = ap; 3892 3893 if (!rc) 3894 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 3895 } 3896 3897 void 3898 vop_remove_post(void *ap, int rc) 3899 { 3900 struct vop_remove_args *a = ap; 3901 3902 if (!rc) { 3903 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 3904 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 3905 } 3906 } 3907 3908 void 3909 vop_rename_post(void *ap, int rc) 3910 { 3911 struct vop_rename_args *a = ap; 3912 3913 if (!rc) { 3914 VFS_KNOTE_UNLOCKED(a->a_fdvp, NOTE_WRITE); 3915 VFS_KNOTE_UNLOCKED(a->a_tdvp, NOTE_WRITE); 3916 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 3917 if (a->a_tvp) 3918 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 3919 } 3920 if (a->a_tdvp != a->a_fdvp) 3921 vdrop(a->a_fdvp); 3922 if (a->a_tvp != a->a_fvp) 3923 vdrop(a->a_fvp); 3924 vdrop(a->a_tdvp); 3925 if (a->a_tvp) 3926 vdrop(a->a_tvp); 3927 } 3928 3929 void 3930 vop_rmdir_post(void *ap, int rc) 3931 { 3932 struct vop_rmdir_args *a = ap; 3933 3934 if (!rc) { 3935 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 3936 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 3937 } 3938 } 3939 3940 void 3941 vop_setattr_post(void *ap, int rc) 3942 { 3943 struct vop_setattr_args *a = ap; 3944 3945 if (!rc) 3946 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 3947 } 3948 3949 void 3950 vop_symlink_post(void *ap, int rc) 3951 { 3952 struct vop_symlink_args *a = ap; 3953 3954 if (!rc) 3955 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 3956 } 3957 3958 static struct knlist fs_knlist; 3959 3960 static void 3961 vfs_event_init(void *arg) 3962 { 3963 knlist_init(&fs_knlist, NULL, NULL, NULL, NULL); 3964 } 3965 /* XXX - correct order? */ 3966 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 3967 3968 void 3969 vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data __unused) 3970 { 3971 3972 KNOTE_UNLOCKED(&fs_knlist, event); 3973 } 3974 3975 static int filt_fsattach(struct knote *kn); 3976 static void filt_fsdetach(struct knote *kn); 3977 static int filt_fsevent(struct knote *kn, long hint); 3978 3979 struct filterops fs_filtops = 3980 { 0, filt_fsattach, filt_fsdetach, filt_fsevent }; 3981 3982 static int 3983 filt_fsattach(struct knote *kn) 3984 { 3985 3986 kn->kn_flags |= EV_CLEAR; 3987 knlist_add(&fs_knlist, kn, 0); 3988 return (0); 3989 } 3990 3991 static void 3992 filt_fsdetach(struct knote *kn) 3993 { 3994 3995 knlist_remove(&fs_knlist, kn, 0); 3996 } 3997 3998 static int 3999 filt_fsevent(struct knote *kn, long hint) 4000 { 4001 4002 kn->kn_fflags |= hint; 4003 return (kn->kn_fflags != 0); 4004 } 4005 4006 static int 4007 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 4008 { 4009 struct vfsidctl vc; 4010 int error; 4011 struct mount *mp; 4012 4013 error = SYSCTL_IN(req, &vc, sizeof(vc)); 4014 if (error) 4015 return (error); 4016 if (vc.vc_vers != VFS_CTL_VERS1) 4017 return (EINVAL); 4018 mp = vfs_getvfs(&vc.vc_fsid); 4019 if (mp == NULL) 4020 return (ENOENT); 4021 /* ensure that a specific sysctl goes to the right filesystem. */ 4022 if (strcmp(vc.vc_fstypename, "*") != 0 && 4023 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 4024 vfs_rel(mp); 4025 return (EINVAL); 4026 } 4027 VCTLTOREQ(&vc, req); 4028 error = VFS_SYSCTL(mp, vc.vc_op, req); 4029 vfs_rel(mp); 4030 return (error); 4031 } 4032 4033 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLFLAG_WR, NULL, 0, sysctl_vfs_ctl, "", 4034 "Sysctl by fsid"); 4035 4036 /* 4037 * Function to initialize a va_filerev field sensibly. 4038 * XXX: Wouldn't a random number make a lot more sense ?? 4039 */ 4040 u_quad_t 4041 init_va_filerev(void) 4042 { 4043 struct bintime bt; 4044 4045 getbinuptime(&bt); 4046 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 4047 } 4048 4049 static int filt_vfsread(struct knote *kn, long hint); 4050 static int filt_vfswrite(struct knote *kn, long hint); 4051 static int filt_vfsvnode(struct knote *kn, long hint); 4052 static void filt_vfsdetach(struct knote *kn); 4053 static struct filterops vfsread_filtops = 4054 { 1, NULL, filt_vfsdetach, filt_vfsread }; 4055 static struct filterops vfswrite_filtops = 4056 { 1, NULL, filt_vfsdetach, filt_vfswrite }; 4057 static struct filterops vfsvnode_filtops = 4058 { 1, NULL, filt_vfsdetach, filt_vfsvnode }; 4059 4060 static void 4061 vfs_knllock(void *arg) 4062 { 4063 struct vnode *vp = arg; 4064 4065 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4066 } 4067 4068 static void 4069 vfs_knlunlock(void *arg) 4070 { 4071 struct vnode *vp = arg; 4072 4073 VOP_UNLOCK(vp, 0); 4074 } 4075 4076 static int 4077 vfs_knllocked(void *arg) 4078 { 4079 struct vnode *vp = arg; 4080 4081 return (VOP_ISLOCKED(vp) == LK_EXCLUSIVE); 4082 } 4083 4084 int 4085 vfs_kqfilter(struct vop_kqfilter_args *ap) 4086 { 4087 struct vnode *vp = ap->a_vp; 4088 struct knote *kn = ap->a_kn; 4089 struct knlist *knl; 4090 4091 switch (kn->kn_filter) { 4092 case EVFILT_READ: 4093 kn->kn_fop = &vfsread_filtops; 4094 break; 4095 case EVFILT_WRITE: 4096 kn->kn_fop = &vfswrite_filtops; 4097 break; 4098 case EVFILT_VNODE: 4099 kn->kn_fop = &vfsvnode_filtops; 4100 break; 4101 default: 4102 return (EINVAL); 4103 } 4104 4105 kn->kn_hook = (caddr_t)vp; 4106 4107 v_addpollinfo(vp); 4108 if (vp->v_pollinfo == NULL) 4109 return (ENOMEM); 4110 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 4111 knlist_add(knl, kn, 0); 4112 4113 return (0); 4114 } 4115 4116 /* 4117 * Detach knote from vnode 4118 */ 4119 static void 4120 filt_vfsdetach(struct knote *kn) 4121 { 4122 struct vnode *vp = (struct vnode *)kn->kn_hook; 4123 4124 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 4125 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 4126 } 4127 4128 /*ARGSUSED*/ 4129 static int 4130 filt_vfsread(struct knote *kn, long hint) 4131 { 4132 struct vnode *vp = (struct vnode *)kn->kn_hook; 4133 struct vattr va; 4134 4135 /* 4136 * filesystem is gone, so set the EOF flag and schedule 4137 * the knote for deletion. 4138 */ 4139 if (hint == NOTE_REVOKE) { 4140 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 4141 return (1); 4142 } 4143 4144 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 4145 return (0); 4146 4147 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 4148 return (kn->kn_data != 0); 4149 } 4150 4151 /*ARGSUSED*/ 4152 static int 4153 filt_vfswrite(struct knote *kn, long hint) 4154 { 4155 /* 4156 * filesystem is gone, so set the EOF flag and schedule 4157 * the knote for deletion. 4158 */ 4159 if (hint == NOTE_REVOKE) 4160 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 4161 4162 kn->kn_data = 0; 4163 return (1); 4164 } 4165 4166 static int 4167 filt_vfsvnode(struct knote *kn, long hint) 4168 { 4169 if (kn->kn_sfflags & hint) 4170 kn->kn_fflags |= hint; 4171 if (hint == NOTE_REVOKE) { 4172 kn->kn_flags |= EV_EOF; 4173 return (1); 4174 } 4175 return (kn->kn_fflags != 0); 4176 } 4177 4178 int 4179 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 4180 { 4181 int error; 4182 4183 if (dp->d_reclen > ap->a_uio->uio_resid) 4184 return (ENAMETOOLONG); 4185 error = uiomove(dp, dp->d_reclen, ap->a_uio); 4186 if (error) { 4187 if (ap->a_ncookies != NULL) { 4188 if (ap->a_cookies != NULL) 4189 free(ap->a_cookies, M_TEMP); 4190 ap->a_cookies = NULL; 4191 *ap->a_ncookies = 0; 4192 } 4193 return (error); 4194 } 4195 if (ap->a_ncookies == NULL) 4196 return (0); 4197 4198 KASSERT(ap->a_cookies, 4199 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 4200 4201 *ap->a_cookies = realloc(*ap->a_cookies, 4202 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 4203 (*ap->a_cookies)[*ap->a_ncookies] = off; 4204 return (0); 4205 } 4206 4207 /* 4208 * Mark for update the access time of the file if the filesystem 4209 * supports VA_MARK_ATIME. This functionality is used by execve 4210 * and mmap, so we want to avoid the synchronous I/O implied by 4211 * directly setting va_atime for the sake of efficiency. 4212 */ 4213 void 4214 vfs_mark_atime(struct vnode *vp, struct ucred *cred) 4215 { 4216 struct vattr atimeattr; 4217 4218 if ((vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) { 4219 VATTR_NULL(&atimeattr); 4220 atimeattr.va_vaflags |= VA_MARK_ATIME; 4221 (void)VOP_SETATTR(vp, &atimeattr, cred); 4222 } 4223 } 4224