1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/condvar.h> 54 #include <sys/conf.h> 55 #include <sys/counter.h> 56 #include <sys/dirent.h> 57 #include <sys/event.h> 58 #include <sys/eventhandler.h> 59 #include <sys/extattr.h> 60 #include <sys/file.h> 61 #include <sys/fcntl.h> 62 #include <sys/jail.h> 63 #include <sys/kdb.h> 64 #include <sys/kernel.h> 65 #include <sys/kthread.h> 66 #include <sys/lockf.h> 67 #include <sys/malloc.h> 68 #include <sys/mount.h> 69 #include <sys/namei.h> 70 #include <sys/pctrie.h> 71 #include <sys/priv.h> 72 #include <sys/reboot.h> 73 #include <sys/refcount.h> 74 #include <sys/rwlock.h> 75 #include <sys/sched.h> 76 #include <sys/sleepqueue.h> 77 #include <sys/smp.h> 78 #include <sys/stat.h> 79 #include <sys/sysctl.h> 80 #include <sys/syslog.h> 81 #include <sys/vmmeter.h> 82 #include <sys/vnode.h> 83 #include <sys/watchdog.h> 84 85 #include <machine/stdarg.h> 86 87 #include <security/mac/mac_framework.h> 88 89 #include <vm/vm.h> 90 #include <vm/vm_object.h> 91 #include <vm/vm_extern.h> 92 #include <vm/pmap.h> 93 #include <vm/vm_map.h> 94 #include <vm/vm_page.h> 95 #include <vm/vm_kern.h> 96 #include <vm/uma.h> 97 98 #ifdef DDB 99 #include <ddb/ddb.h> 100 #endif 101 102 static void delmntque(struct vnode *vp); 103 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 104 int slpflag, int slptimeo); 105 static void syncer_shutdown(void *arg, int howto); 106 static int vtryrecycle(struct vnode *vp); 107 static void v_init_counters(struct vnode *); 108 static void v_incr_usecount(struct vnode *); 109 static void v_incr_usecount_locked(struct vnode *); 110 static void v_incr_devcount(struct vnode *); 111 static void v_decr_devcount(struct vnode *); 112 static void vgonel(struct vnode *); 113 static void vfs_knllock(void *arg); 114 static void vfs_knlunlock(void *arg); 115 static void vfs_knl_assert_locked(void *arg); 116 static void vfs_knl_assert_unlocked(void *arg); 117 static void vnlru_return_batches(struct vfsops *mnt_op); 118 static void destroy_vpollinfo(struct vpollinfo *vi); 119 120 /* 121 * These fences are intended for cases where some synchronization is 122 * needed between access of v_iflags and lockless vnode refcount (v_holdcnt 123 * and v_usecount) updates. Access to v_iflags is generally synchronized 124 * by the interlock, but we have some internal assertions that check vnode 125 * flags without acquiring the lock. Thus, these fences are INVARIANTS-only 126 * for now. 127 */ 128 #ifdef INVARIANTS 129 #define VNODE_REFCOUNT_FENCE_ACQ() atomic_thread_fence_acq() 130 #define VNODE_REFCOUNT_FENCE_REL() atomic_thread_fence_rel() 131 #else 132 #define VNODE_REFCOUNT_FENCE_ACQ() 133 #define VNODE_REFCOUNT_FENCE_REL() 134 #endif 135 136 /* 137 * Number of vnodes in existence. Increased whenever getnewvnode() 138 * allocates a new vnode, decreased in vdropl() for VI_DOOMED vnode. 139 */ 140 static unsigned long numvnodes; 141 142 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 143 "Number of vnodes in existence"); 144 145 static counter_u64_t vnodes_created; 146 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 147 "Number of vnodes created by getnewvnode"); 148 149 static u_long mnt_free_list_batch = 128; 150 SYSCTL_ULONG(_vfs, OID_AUTO, mnt_free_list_batch, CTLFLAG_RW, 151 &mnt_free_list_batch, 0, "Limit of vnodes held on mnt's free list"); 152 153 /* 154 * Conversion tables for conversion from vnode types to inode formats 155 * and back. 156 */ 157 enum vtype iftovt_tab[16] = { 158 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 159 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 160 }; 161 int vttoif_tab[10] = { 162 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 163 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 164 }; 165 166 /* 167 * List of vnodes that are ready for recycling. 168 */ 169 static TAILQ_HEAD(freelst, vnode) vnode_free_list; 170 171 /* 172 * "Free" vnode target. Free vnodes are rarely completely free, but are 173 * just ones that are cheap to recycle. Usually they are for files which 174 * have been stat'd but not read; these usually have inode and namecache 175 * data attached to them. This target is the preferred minimum size of a 176 * sub-cache consisting mostly of such files. The system balances the size 177 * of this sub-cache with its complement to try to prevent either from 178 * thrashing while the other is relatively inactive. The targets express 179 * a preference for the best balance. 180 * 181 * "Above" this target there are 2 further targets (watermarks) related 182 * to recyling of free vnodes. In the best-operating case, the cache is 183 * exactly full, the free list has size between vlowat and vhiwat above the 184 * free target, and recycling from it and normal use maintains this state. 185 * Sometimes the free list is below vlowat or even empty, but this state 186 * is even better for immediate use provided the cache is not full. 187 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 188 * ones) to reach one of these states. The watermarks are currently hard- 189 * coded as 4% and 9% of the available space higher. These and the default 190 * of 25% for wantfreevnodes are too large if the memory size is large. 191 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 192 * whenever vnlru_proc() becomes active. 193 */ 194 static u_long wantfreevnodes; 195 SYSCTL_ULONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, 196 &wantfreevnodes, 0, "Target for minimum number of \"free\" vnodes"); 197 static u_long freevnodes; 198 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 199 &freevnodes, 0, "Number of \"free\" vnodes"); 200 201 static counter_u64_t recycles_count; 202 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 203 "Number of vnodes recycled to meet vnode cache targets"); 204 205 /* 206 * Various variables used for debugging the new implementation of 207 * reassignbuf(). 208 * XXX these are probably of (very) limited utility now. 209 */ 210 static int reassignbufcalls; 211 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, 212 "Number of calls to reassignbuf"); 213 214 static counter_u64_t free_owe_inact; 215 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, free_owe_inact, CTLFLAG_RD, &free_owe_inact, 216 "Number of times free vnodes kept on active list due to VFS " 217 "owing inactivation"); 218 219 /* To keep more than one thread at a time from running vfs_getnewfsid */ 220 static struct mtx mntid_mtx; 221 222 /* 223 * Lock for any access to the following: 224 * vnode_free_list 225 * numvnodes 226 * freevnodes 227 */ 228 static struct mtx vnode_free_list_mtx; 229 230 /* Publicly exported FS */ 231 struct nfs_public nfs_pub; 232 233 static uma_zone_t buf_trie_zone; 234 235 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 236 static uma_zone_t vnode_zone; 237 static uma_zone_t vnodepoll_zone; 238 239 /* 240 * The workitem queue. 241 * 242 * It is useful to delay writes of file data and filesystem metadata 243 * for tens of seconds so that quickly created and deleted files need 244 * not waste disk bandwidth being created and removed. To realize this, 245 * we append vnodes to a "workitem" queue. When running with a soft 246 * updates implementation, most pending metadata dependencies should 247 * not wait for more than a few seconds. Thus, mounted on block devices 248 * are delayed only about a half the time that file data is delayed. 249 * Similarly, directory updates are more critical, so are only delayed 250 * about a third the time that file data is delayed. Thus, there are 251 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 252 * one each second (driven off the filesystem syncer process). The 253 * syncer_delayno variable indicates the next queue that is to be processed. 254 * Items that need to be processed soon are placed in this queue: 255 * 256 * syncer_workitem_pending[syncer_delayno] 257 * 258 * A delay of fifteen seconds is done by placing the request fifteen 259 * entries later in the queue: 260 * 261 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 262 * 263 */ 264 static int syncer_delayno; 265 static long syncer_mask; 266 LIST_HEAD(synclist, bufobj); 267 static struct synclist *syncer_workitem_pending; 268 /* 269 * The sync_mtx protects: 270 * bo->bo_synclist 271 * sync_vnode_count 272 * syncer_delayno 273 * syncer_state 274 * syncer_workitem_pending 275 * syncer_worklist_len 276 * rushjob 277 */ 278 static struct mtx sync_mtx; 279 static struct cv sync_wakeup; 280 281 #define SYNCER_MAXDELAY 32 282 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 283 static int syncdelay = 30; /* max time to delay syncing data */ 284 static int filedelay = 30; /* time to delay syncing files */ 285 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 286 "Time to delay syncing files (in seconds)"); 287 static int dirdelay = 29; /* time to delay syncing directories */ 288 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 289 "Time to delay syncing directories (in seconds)"); 290 static int metadelay = 28; /* time to delay syncing metadata */ 291 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 292 "Time to delay syncing metadata (in seconds)"); 293 static int rushjob; /* number of slots to run ASAP */ 294 static int stat_rush_requests; /* number of times I/O speeded up */ 295 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 296 "Number of times I/O speeded up (rush requests)"); 297 298 /* 299 * When shutting down the syncer, run it at four times normal speed. 300 */ 301 #define SYNCER_SHUTDOWN_SPEEDUP 4 302 static int sync_vnode_count; 303 static int syncer_worklist_len; 304 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 305 syncer_state; 306 307 /* Target for maximum number of vnodes. */ 308 int desiredvnodes; 309 static int gapvnodes; /* gap between wanted and desired */ 310 static int vhiwat; /* enough extras after expansion */ 311 static int vlowat; /* minimal extras before expansion */ 312 static int vstir; /* nonzero to stir non-free vnodes */ 313 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 314 315 static int 316 sysctl_update_desiredvnodes(SYSCTL_HANDLER_ARGS) 317 { 318 int error, old_desiredvnodes; 319 320 old_desiredvnodes = desiredvnodes; 321 if ((error = sysctl_handle_int(oidp, arg1, arg2, req)) != 0) 322 return (error); 323 if (old_desiredvnodes != desiredvnodes) { 324 wantfreevnodes = desiredvnodes / 4; 325 /* XXX locking seems to be incomplete. */ 326 vfs_hash_changesize(desiredvnodes); 327 cache_changesize(desiredvnodes); 328 } 329 return (0); 330 } 331 332 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 333 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, &desiredvnodes, 0, 334 sysctl_update_desiredvnodes, "I", "Target for maximum number of vnodes"); 335 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 336 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 337 static int vnlru_nowhere; 338 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 339 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 340 341 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 342 static int vnsz2log; 343 344 /* 345 * Support for the bufobj clean & dirty pctrie. 346 */ 347 static void * 348 buf_trie_alloc(struct pctrie *ptree) 349 { 350 351 return uma_zalloc(buf_trie_zone, M_NOWAIT); 352 } 353 354 static void 355 buf_trie_free(struct pctrie *ptree, void *node) 356 { 357 358 uma_zfree(buf_trie_zone, node); 359 } 360 PCTRIE_DEFINE(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free); 361 362 /* 363 * Initialize the vnode management data structures. 364 * 365 * Reevaluate the following cap on the number of vnodes after the physical 366 * memory size exceeds 512GB. In the limit, as the physical memory size 367 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 368 */ 369 #ifndef MAXVNODES_MAX 370 #define MAXVNODES_MAX (512 * 1024 * 1024 / 64) /* 8M */ 371 #endif 372 373 /* 374 * Initialize a vnode as it first enters the zone. 375 */ 376 static int 377 vnode_init(void *mem, int size, int flags) 378 { 379 struct vnode *vp; 380 381 vp = mem; 382 bzero(vp, size); 383 /* 384 * Setup locks. 385 */ 386 vp->v_vnlock = &vp->v_lock; 387 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 388 /* 389 * By default, don't allow shared locks unless filesystems opt-in. 390 */ 391 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 392 LK_NOSHARE | LK_IS_VNODE); 393 /* 394 * Initialize bufobj. 395 */ 396 bufobj_init(&vp->v_bufobj, vp); 397 /* 398 * Initialize namecache. 399 */ 400 LIST_INIT(&vp->v_cache_src); 401 TAILQ_INIT(&vp->v_cache_dst); 402 /* 403 * Initialize rangelocks. 404 */ 405 rangelock_init(&vp->v_rl); 406 return (0); 407 } 408 409 /* 410 * Free a vnode when it is cleared from the zone. 411 */ 412 static void 413 vnode_fini(void *mem, int size) 414 { 415 struct vnode *vp; 416 struct bufobj *bo; 417 418 vp = mem; 419 rangelock_destroy(&vp->v_rl); 420 lockdestroy(vp->v_vnlock); 421 mtx_destroy(&vp->v_interlock); 422 bo = &vp->v_bufobj; 423 rw_destroy(BO_LOCKPTR(bo)); 424 } 425 426 /* 427 * Provide the size of NFS nclnode and NFS fh for calculation of the 428 * vnode memory consumption. The size is specified directly to 429 * eliminate dependency on NFS-private header. 430 * 431 * Other filesystems may use bigger or smaller (like UFS and ZFS) 432 * private inode data, but the NFS-based estimation is ample enough. 433 * Still, we care about differences in the size between 64- and 32-bit 434 * platforms. 435 * 436 * Namecache structure size is heuristically 437 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 438 */ 439 #ifdef _LP64 440 #define NFS_NCLNODE_SZ (528 + 64) 441 #define NC_SZ 148 442 #else 443 #define NFS_NCLNODE_SZ (360 + 32) 444 #define NC_SZ 92 445 #endif 446 447 static void 448 vntblinit(void *dummy __unused) 449 { 450 u_int i; 451 int physvnodes, virtvnodes; 452 453 /* 454 * Desiredvnodes is a function of the physical memory size and the 455 * kernel's heap size. Generally speaking, it scales with the 456 * physical memory size. The ratio of desiredvnodes to the physical 457 * memory size is 1:16 until desiredvnodes exceeds 98,304. 458 * Thereafter, the 459 * marginal ratio of desiredvnodes to the physical memory size is 460 * 1:64. However, desiredvnodes is limited by the kernel's heap 461 * size. The memory required by desiredvnodes vnodes and vm objects 462 * must not exceed 1/10th of the kernel's heap size. 463 */ 464 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 465 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 466 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 467 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 468 desiredvnodes = min(physvnodes, virtvnodes); 469 if (desiredvnodes > MAXVNODES_MAX) { 470 if (bootverbose) 471 printf("Reducing kern.maxvnodes %d -> %d\n", 472 desiredvnodes, MAXVNODES_MAX); 473 desiredvnodes = MAXVNODES_MAX; 474 } 475 wantfreevnodes = desiredvnodes / 4; 476 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 477 TAILQ_INIT(&vnode_free_list); 478 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 479 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 480 vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); 481 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 482 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 483 /* 484 * Preallocate enough nodes to support one-per buf so that 485 * we can not fail an insert. reassignbuf() callers can not 486 * tolerate the insertion failure. 487 */ 488 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 489 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 490 UMA_ZONE_NOFREE | UMA_ZONE_VM); 491 uma_prealloc(buf_trie_zone, nbuf); 492 493 vnodes_created = counter_u64_alloc(M_WAITOK); 494 recycles_count = counter_u64_alloc(M_WAITOK); 495 free_owe_inact = counter_u64_alloc(M_WAITOK); 496 497 /* 498 * Initialize the filesystem syncer. 499 */ 500 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 501 &syncer_mask); 502 syncer_maxdelay = syncer_mask + 1; 503 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 504 cv_init(&sync_wakeup, "syncer"); 505 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 506 vnsz2log++; 507 vnsz2log--; 508 } 509 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 510 511 512 /* 513 * Mark a mount point as busy. Used to synchronize access and to delay 514 * unmounting. Eventually, mountlist_mtx is not released on failure. 515 * 516 * vfs_busy() is a custom lock, it can block the caller. 517 * vfs_busy() only sleeps if the unmount is active on the mount point. 518 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 519 * vnode belonging to mp. 520 * 521 * Lookup uses vfs_busy() to traverse mount points. 522 * root fs var fs 523 * / vnode lock A / vnode lock (/var) D 524 * /var vnode lock B /log vnode lock(/var/log) E 525 * vfs_busy lock C vfs_busy lock F 526 * 527 * Within each file system, the lock order is C->A->B and F->D->E. 528 * 529 * When traversing across mounts, the system follows that lock order: 530 * 531 * C->A->B 532 * | 533 * +->F->D->E 534 * 535 * The lookup() process for namei("/var") illustrates the process: 536 * VOP_LOOKUP() obtains B while A is held 537 * vfs_busy() obtains a shared lock on F while A and B are held 538 * vput() releases lock on B 539 * vput() releases lock on A 540 * VFS_ROOT() obtains lock on D while shared lock on F is held 541 * vfs_unbusy() releases shared lock on F 542 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 543 * Attempt to lock A (instead of vp_crossmp) while D is held would 544 * violate the global order, causing deadlocks. 545 * 546 * dounmount() locks B while F is drained. 547 */ 548 int 549 vfs_busy(struct mount *mp, int flags) 550 { 551 552 MPASS((flags & ~MBF_MASK) == 0); 553 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 554 555 MNT_ILOCK(mp); 556 MNT_REF(mp); 557 /* 558 * If mount point is currently being unmounted, sleep until the 559 * mount point fate is decided. If thread doing the unmounting fails, 560 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 561 * that this mount point has survived the unmount attempt and vfs_busy 562 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 563 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 564 * about to be really destroyed. vfs_busy needs to release its 565 * reference on the mount point in this case and return with ENOENT, 566 * telling the caller that mount mount it tried to busy is no longer 567 * valid. 568 */ 569 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 570 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 571 MNT_REL(mp); 572 MNT_IUNLOCK(mp); 573 CTR1(KTR_VFS, "%s: failed busying before sleeping", 574 __func__); 575 return (ENOENT); 576 } 577 if (flags & MBF_MNTLSTLOCK) 578 mtx_unlock(&mountlist_mtx); 579 mp->mnt_kern_flag |= MNTK_MWAIT; 580 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 581 if (flags & MBF_MNTLSTLOCK) 582 mtx_lock(&mountlist_mtx); 583 MNT_ILOCK(mp); 584 } 585 if (flags & MBF_MNTLSTLOCK) 586 mtx_unlock(&mountlist_mtx); 587 mp->mnt_lockref++; 588 MNT_IUNLOCK(mp); 589 return (0); 590 } 591 592 /* 593 * Free a busy filesystem. 594 */ 595 void 596 vfs_unbusy(struct mount *mp) 597 { 598 599 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 600 MNT_ILOCK(mp); 601 MNT_REL(mp); 602 KASSERT(mp->mnt_lockref > 0, ("negative mnt_lockref")); 603 mp->mnt_lockref--; 604 if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 605 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 606 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 607 mp->mnt_kern_flag &= ~MNTK_DRAINING; 608 wakeup(&mp->mnt_lockref); 609 } 610 MNT_IUNLOCK(mp); 611 } 612 613 /* 614 * Lookup a mount point by filesystem identifier. 615 */ 616 struct mount * 617 vfs_getvfs(fsid_t *fsid) 618 { 619 struct mount *mp; 620 621 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 622 mtx_lock(&mountlist_mtx); 623 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 624 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 625 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 626 vfs_ref(mp); 627 mtx_unlock(&mountlist_mtx); 628 return (mp); 629 } 630 } 631 mtx_unlock(&mountlist_mtx); 632 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 633 return ((struct mount *) 0); 634 } 635 636 /* 637 * Lookup a mount point by filesystem identifier, busying it before 638 * returning. 639 * 640 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 641 * cache for popular filesystem identifiers. The cache is lockess, using 642 * the fact that struct mount's are never freed. In worst case we may 643 * get pointer to unmounted or even different filesystem, so we have to 644 * check what we got, and go slow way if so. 645 */ 646 struct mount * 647 vfs_busyfs(fsid_t *fsid) 648 { 649 #define FSID_CACHE_SIZE 256 650 typedef struct mount * volatile vmp_t; 651 static vmp_t cache[FSID_CACHE_SIZE]; 652 struct mount *mp; 653 int error; 654 uint32_t hash; 655 656 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 657 hash = fsid->val[0] ^ fsid->val[1]; 658 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 659 mp = cache[hash]; 660 if (mp == NULL || 661 mp->mnt_stat.f_fsid.val[0] != fsid->val[0] || 662 mp->mnt_stat.f_fsid.val[1] != fsid->val[1]) 663 goto slow; 664 if (vfs_busy(mp, 0) != 0) { 665 cache[hash] = NULL; 666 goto slow; 667 } 668 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 669 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) 670 return (mp); 671 else 672 vfs_unbusy(mp); 673 674 slow: 675 mtx_lock(&mountlist_mtx); 676 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 677 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 678 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 679 error = vfs_busy(mp, MBF_MNTLSTLOCK); 680 if (error) { 681 cache[hash] = NULL; 682 mtx_unlock(&mountlist_mtx); 683 return (NULL); 684 } 685 cache[hash] = mp; 686 return (mp); 687 } 688 } 689 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 690 mtx_unlock(&mountlist_mtx); 691 return ((struct mount *) 0); 692 } 693 694 /* 695 * Check if a user can access privileged mount options. 696 */ 697 int 698 vfs_suser(struct mount *mp, struct thread *td) 699 { 700 int error; 701 702 if (jailed(td->td_ucred)) { 703 /* 704 * If the jail of the calling thread lacks permission for 705 * this type of file system, deny immediately. 706 */ 707 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 708 return (EPERM); 709 710 /* 711 * If the file system was mounted outside the jail of the 712 * calling thread, deny immediately. 713 */ 714 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 715 return (EPERM); 716 } 717 718 /* 719 * If file system supports delegated administration, we don't check 720 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 721 * by the file system itself. 722 * If this is not the user that did original mount, we check for 723 * the PRIV_VFS_MOUNT_OWNER privilege. 724 */ 725 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 726 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 727 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 728 return (error); 729 } 730 return (0); 731 } 732 733 /* 734 * Get a new unique fsid. Try to make its val[0] unique, since this value 735 * will be used to create fake device numbers for stat(). Also try (but 736 * not so hard) make its val[0] unique mod 2^16, since some emulators only 737 * support 16-bit device numbers. We end up with unique val[0]'s for the 738 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 739 * 740 * Keep in mind that several mounts may be running in parallel. Starting 741 * the search one past where the previous search terminated is both a 742 * micro-optimization and a defense against returning the same fsid to 743 * different mounts. 744 */ 745 void 746 vfs_getnewfsid(struct mount *mp) 747 { 748 static uint16_t mntid_base; 749 struct mount *nmp; 750 fsid_t tfsid; 751 int mtype; 752 753 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 754 mtx_lock(&mntid_mtx); 755 mtype = mp->mnt_vfc->vfc_typenum; 756 tfsid.val[1] = mtype; 757 mtype = (mtype & 0xFF) << 24; 758 for (;;) { 759 tfsid.val[0] = makedev(255, 760 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 761 mntid_base++; 762 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 763 break; 764 vfs_rel(nmp); 765 } 766 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 767 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 768 mtx_unlock(&mntid_mtx); 769 } 770 771 /* 772 * Knob to control the precision of file timestamps: 773 * 774 * 0 = seconds only; nanoseconds zeroed. 775 * 1 = seconds and nanoseconds, accurate within 1/HZ. 776 * 2 = seconds and nanoseconds, truncated to microseconds. 777 * >=3 = seconds and nanoseconds, maximum precision. 778 */ 779 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 780 781 static int timestamp_precision = TSP_USEC; 782 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 783 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 784 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 785 "3+: sec + ns (max. precision))"); 786 787 /* 788 * Get a current timestamp. 789 */ 790 void 791 vfs_timestamp(struct timespec *tsp) 792 { 793 struct timeval tv; 794 795 switch (timestamp_precision) { 796 case TSP_SEC: 797 tsp->tv_sec = time_second; 798 tsp->tv_nsec = 0; 799 break; 800 case TSP_HZ: 801 getnanotime(tsp); 802 break; 803 case TSP_USEC: 804 microtime(&tv); 805 TIMEVAL_TO_TIMESPEC(&tv, tsp); 806 break; 807 case TSP_NSEC: 808 default: 809 nanotime(tsp); 810 break; 811 } 812 } 813 814 /* 815 * Set vnode attributes to VNOVAL 816 */ 817 void 818 vattr_null(struct vattr *vap) 819 { 820 821 vap->va_type = VNON; 822 vap->va_size = VNOVAL; 823 vap->va_bytes = VNOVAL; 824 vap->va_mode = VNOVAL; 825 vap->va_nlink = VNOVAL; 826 vap->va_uid = VNOVAL; 827 vap->va_gid = VNOVAL; 828 vap->va_fsid = VNOVAL; 829 vap->va_fileid = VNOVAL; 830 vap->va_blocksize = VNOVAL; 831 vap->va_rdev = VNOVAL; 832 vap->va_atime.tv_sec = VNOVAL; 833 vap->va_atime.tv_nsec = VNOVAL; 834 vap->va_mtime.tv_sec = VNOVAL; 835 vap->va_mtime.tv_nsec = VNOVAL; 836 vap->va_ctime.tv_sec = VNOVAL; 837 vap->va_ctime.tv_nsec = VNOVAL; 838 vap->va_birthtime.tv_sec = VNOVAL; 839 vap->va_birthtime.tv_nsec = VNOVAL; 840 vap->va_flags = VNOVAL; 841 vap->va_gen = VNOVAL; 842 vap->va_vaflags = 0; 843 } 844 845 /* 846 * This routine is called when we have too many vnodes. It attempts 847 * to free <count> vnodes and will potentially free vnodes that still 848 * have VM backing store (VM backing store is typically the cause 849 * of a vnode blowout so we want to do this). Therefore, this operation 850 * is not considered cheap. 851 * 852 * A number of conditions may prevent a vnode from being reclaimed. 853 * the buffer cache may have references on the vnode, a directory 854 * vnode may still have references due to the namei cache representing 855 * underlying files, or the vnode may be in active use. It is not 856 * desirable to reuse such vnodes. These conditions may cause the 857 * number of vnodes to reach some minimum value regardless of what 858 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 859 */ 860 static int 861 vlrureclaim(struct mount *mp, int reclaim_nc_src, int trigger) 862 { 863 struct vnode *vp; 864 int count, done, target; 865 866 done = 0; 867 vn_start_write(NULL, &mp, V_WAIT); 868 MNT_ILOCK(mp); 869 count = mp->mnt_nvnodelistsize; 870 target = count * (int64_t)gapvnodes / imax(desiredvnodes, 1); 871 target = target / 10 + 1; 872 while (count != 0 && done < target) { 873 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 874 while (vp != NULL && vp->v_type == VMARKER) 875 vp = TAILQ_NEXT(vp, v_nmntvnodes); 876 if (vp == NULL) 877 break; 878 /* 879 * XXX LRU is completely broken for non-free vnodes. First 880 * by calling here in mountpoint order, then by moving 881 * unselected vnodes to the end here, and most grossly by 882 * removing the vlruvp() function that was supposed to 883 * maintain the order. (This function was born broken 884 * since syncer problems prevented it doing anything.) The 885 * order is closer to LRC (C = Created). 886 * 887 * LRU reclaiming of vnodes seems to have last worked in 888 * FreeBSD-3 where LRU wasn't mentioned under any spelling. 889 * Then there was no hold count, and inactive vnodes were 890 * simply put on the free list in LRU order. The separate 891 * lists also break LRU. We prefer to reclaim from the 892 * free list for technical reasons. This tends to thrash 893 * the free list to keep very unrecently used held vnodes. 894 * The problem is mitigated by keeping the free list large. 895 */ 896 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 897 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 898 --count; 899 if (!VI_TRYLOCK(vp)) 900 goto next_iter; 901 /* 902 * If it's been deconstructed already, it's still 903 * referenced, or it exceeds the trigger, skip it. 904 * Also skip free vnodes. We are trying to make space 905 * to expand the free list, not reduce it. 906 */ 907 if (vp->v_usecount || 908 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 909 ((vp->v_iflag & VI_FREE) != 0) || 910 (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL && 911 vp->v_object->resident_page_count > trigger)) { 912 VI_UNLOCK(vp); 913 goto next_iter; 914 } 915 MNT_IUNLOCK(mp); 916 vholdl(vp); 917 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) { 918 vdrop(vp); 919 goto next_iter_mntunlocked; 920 } 921 VI_LOCK(vp); 922 /* 923 * v_usecount may have been bumped after VOP_LOCK() dropped 924 * the vnode interlock and before it was locked again. 925 * 926 * It is not necessary to recheck VI_DOOMED because it can 927 * only be set by another thread that holds both the vnode 928 * lock and vnode interlock. If another thread has the 929 * vnode lock before we get to VOP_LOCK() and obtains the 930 * vnode interlock after VOP_LOCK() drops the vnode 931 * interlock, the other thread will be unable to drop the 932 * vnode lock before our VOP_LOCK() call fails. 933 */ 934 if (vp->v_usecount || 935 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 936 (vp->v_iflag & VI_FREE) != 0 || 937 (vp->v_object != NULL && 938 vp->v_object->resident_page_count > trigger)) { 939 VOP_UNLOCK(vp, LK_INTERLOCK); 940 vdrop(vp); 941 goto next_iter_mntunlocked; 942 } 943 KASSERT((vp->v_iflag & VI_DOOMED) == 0, 944 ("VI_DOOMED unexpectedly detected in vlrureclaim()")); 945 counter_u64_add(recycles_count, 1); 946 vgonel(vp); 947 VOP_UNLOCK(vp, 0); 948 vdropl(vp); 949 done++; 950 next_iter_mntunlocked: 951 if (!should_yield()) 952 goto relock_mnt; 953 goto yield; 954 next_iter: 955 if (!should_yield()) 956 continue; 957 MNT_IUNLOCK(mp); 958 yield: 959 kern_yield(PRI_USER); 960 relock_mnt: 961 MNT_ILOCK(mp); 962 } 963 MNT_IUNLOCK(mp); 964 vn_finished_write(mp); 965 return done; 966 } 967 968 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 969 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 970 0, 971 "limit on vnode free requests per call to the vnlru_free routine"); 972 973 /* 974 * Attempt to reduce the free list by the requested amount. 975 */ 976 static void 977 vnlru_free_locked(int count, struct vfsops *mnt_op) 978 { 979 struct vnode *vp; 980 struct mount *mp; 981 bool tried_batches; 982 983 tried_batches = false; 984 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 985 if (count > max_vnlru_free) 986 count = max_vnlru_free; 987 for (; count > 0; count--) { 988 vp = TAILQ_FIRST(&vnode_free_list); 989 /* 990 * The list can be modified while the free_list_mtx 991 * has been dropped and vp could be NULL here. 992 */ 993 if (vp == NULL) { 994 if (tried_batches) 995 break; 996 mtx_unlock(&vnode_free_list_mtx); 997 vnlru_return_batches(mnt_op); 998 tried_batches = true; 999 mtx_lock(&vnode_free_list_mtx); 1000 continue; 1001 } 1002 1003 VNASSERT(vp->v_op != NULL, vp, 1004 ("vnlru_free: vnode already reclaimed.")); 1005 KASSERT((vp->v_iflag & VI_FREE) != 0, 1006 ("Removing vnode not on freelist")); 1007 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 1008 ("Mangling active vnode")); 1009 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 1010 1011 /* 1012 * Don't recycle if our vnode is from different type 1013 * of mount point. Note that mp is type-safe, the 1014 * check does not reach unmapped address even if 1015 * vnode is reclaimed. 1016 * Don't recycle if we can't get the interlock without 1017 * blocking. 1018 */ 1019 if ((mnt_op != NULL && (mp = vp->v_mount) != NULL && 1020 mp->mnt_op != mnt_op) || !VI_TRYLOCK(vp)) { 1021 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist); 1022 continue; 1023 } 1024 VNASSERT((vp->v_iflag & VI_FREE) != 0 && vp->v_holdcnt == 0, 1025 vp, ("vp inconsistent on freelist")); 1026 1027 /* 1028 * The clear of VI_FREE prevents activation of the 1029 * vnode. There is no sense in putting the vnode on 1030 * the mount point active list, only to remove it 1031 * later during recycling. Inline the relevant part 1032 * of vholdl(), to avoid triggering assertions or 1033 * activating. 1034 */ 1035 freevnodes--; 1036 vp->v_iflag &= ~VI_FREE; 1037 VNODE_REFCOUNT_FENCE_REL(); 1038 refcount_acquire(&vp->v_holdcnt); 1039 1040 mtx_unlock(&vnode_free_list_mtx); 1041 VI_UNLOCK(vp); 1042 vtryrecycle(vp); 1043 /* 1044 * If the recycled succeeded this vdrop will actually free 1045 * the vnode. If not it will simply place it back on 1046 * the free list. 1047 */ 1048 vdrop(vp); 1049 mtx_lock(&vnode_free_list_mtx); 1050 } 1051 } 1052 1053 void 1054 vnlru_free(int count, struct vfsops *mnt_op) 1055 { 1056 1057 mtx_lock(&vnode_free_list_mtx); 1058 vnlru_free_locked(count, mnt_op); 1059 mtx_unlock(&vnode_free_list_mtx); 1060 } 1061 1062 1063 /* XXX some names and initialization are bad for limits and watermarks. */ 1064 static int 1065 vspace(void) 1066 { 1067 int space; 1068 1069 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1070 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1071 vlowat = vhiwat / 2; 1072 if (numvnodes > desiredvnodes) 1073 return (0); 1074 space = desiredvnodes - numvnodes; 1075 if (freevnodes > wantfreevnodes) 1076 space += freevnodes - wantfreevnodes; 1077 return (space); 1078 } 1079 1080 static void 1081 vnlru_return_batch_locked(struct mount *mp) 1082 { 1083 struct vnode *vp; 1084 1085 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 1086 1087 if (mp->mnt_tmpfreevnodelistsize == 0) 1088 return; 1089 1090 TAILQ_FOREACH(vp, &mp->mnt_tmpfreevnodelist, v_actfreelist) { 1091 VNASSERT((vp->v_mflag & VMP_TMPMNTFREELIST) != 0, vp, 1092 ("vnode without VMP_TMPMNTFREELIST on mnt_tmpfreevnodelist")); 1093 vp->v_mflag &= ~VMP_TMPMNTFREELIST; 1094 } 1095 mtx_lock(&vnode_free_list_mtx); 1096 TAILQ_CONCAT(&vnode_free_list, &mp->mnt_tmpfreevnodelist, v_actfreelist); 1097 freevnodes += mp->mnt_tmpfreevnodelistsize; 1098 mtx_unlock(&vnode_free_list_mtx); 1099 mp->mnt_tmpfreevnodelistsize = 0; 1100 } 1101 1102 static void 1103 vnlru_return_batch(struct mount *mp) 1104 { 1105 1106 mtx_lock(&mp->mnt_listmtx); 1107 vnlru_return_batch_locked(mp); 1108 mtx_unlock(&mp->mnt_listmtx); 1109 } 1110 1111 static void 1112 vnlru_return_batches(struct vfsops *mnt_op) 1113 { 1114 struct mount *mp, *nmp; 1115 bool need_unbusy; 1116 1117 mtx_lock(&mountlist_mtx); 1118 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 1119 need_unbusy = false; 1120 if (mnt_op != NULL && mp->mnt_op != mnt_op) 1121 goto next; 1122 if (mp->mnt_tmpfreevnodelistsize == 0) 1123 goto next; 1124 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK) == 0) { 1125 vnlru_return_batch(mp); 1126 need_unbusy = true; 1127 mtx_lock(&mountlist_mtx); 1128 } 1129 next: 1130 nmp = TAILQ_NEXT(mp, mnt_list); 1131 if (need_unbusy) 1132 vfs_unbusy(mp); 1133 } 1134 mtx_unlock(&mountlist_mtx); 1135 } 1136 1137 /* 1138 * Attempt to recycle vnodes in a context that is always safe to block. 1139 * Calling vlrurecycle() from the bowels of filesystem code has some 1140 * interesting deadlock problems. 1141 */ 1142 static struct proc *vnlruproc; 1143 static int vnlruproc_sig; 1144 1145 static void 1146 vnlru_proc(void) 1147 { 1148 struct mount *mp, *nmp; 1149 unsigned long onumvnodes; 1150 int done, force, reclaim_nc_src, trigger, usevnodes; 1151 1152 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1153 SHUTDOWN_PRI_FIRST); 1154 1155 force = 0; 1156 for (;;) { 1157 kproc_suspend_check(vnlruproc); 1158 mtx_lock(&vnode_free_list_mtx); 1159 /* 1160 * If numvnodes is too large (due to desiredvnodes being 1161 * adjusted using its sysctl, or emergency growth), first 1162 * try to reduce it by discarding from the free list. 1163 */ 1164 if (numvnodes > desiredvnodes) 1165 vnlru_free_locked(numvnodes - desiredvnodes, NULL); 1166 /* 1167 * Sleep if the vnode cache is in a good state. This is 1168 * when it is not over-full and has space for about a 4% 1169 * or 9% expansion (by growing its size or inexcessively 1170 * reducing its free list). Otherwise, try to reclaim 1171 * space for a 10% expansion. 1172 */ 1173 if (vstir && force == 0) { 1174 force = 1; 1175 vstir = 0; 1176 } 1177 if (vspace() >= vlowat && force == 0) { 1178 vnlruproc_sig = 0; 1179 wakeup(&vnlruproc_sig); 1180 msleep(vnlruproc, &vnode_free_list_mtx, 1181 PVFS|PDROP, "vlruwt", hz); 1182 continue; 1183 } 1184 mtx_unlock(&vnode_free_list_mtx); 1185 done = 0; 1186 onumvnodes = numvnodes; 1187 /* 1188 * Calculate parameters for recycling. These are the same 1189 * throughout the loop to give some semblance of fairness. 1190 * The trigger point is to avoid recycling vnodes with lots 1191 * of resident pages. We aren't trying to free memory; we 1192 * are trying to recycle or at least free vnodes. 1193 */ 1194 if (numvnodes <= desiredvnodes) 1195 usevnodes = numvnodes - freevnodes; 1196 else 1197 usevnodes = numvnodes; 1198 if (usevnodes <= 0) 1199 usevnodes = 1; 1200 /* 1201 * The trigger value is is chosen to give a conservatively 1202 * large value to ensure that it alone doesn't prevent 1203 * making progress. The value can easily be so large that 1204 * it is effectively infinite in some congested and 1205 * misconfigured cases, and this is necessary. Normally 1206 * it is about 8 to 100 (pages), which is quite large. 1207 */ 1208 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1209 if (force < 2) 1210 trigger = vsmalltrigger; 1211 reclaim_nc_src = force >= 3; 1212 mtx_lock(&mountlist_mtx); 1213 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 1214 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) { 1215 nmp = TAILQ_NEXT(mp, mnt_list); 1216 continue; 1217 } 1218 done += vlrureclaim(mp, reclaim_nc_src, trigger); 1219 mtx_lock(&mountlist_mtx); 1220 nmp = TAILQ_NEXT(mp, mnt_list); 1221 vfs_unbusy(mp); 1222 } 1223 mtx_unlock(&mountlist_mtx); 1224 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1225 uma_reclaim(); 1226 if (done == 0) { 1227 if (force == 0 || force == 1) { 1228 force = 2; 1229 continue; 1230 } 1231 if (force == 2) { 1232 force = 3; 1233 continue; 1234 } 1235 force = 0; 1236 vnlru_nowhere++; 1237 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1238 } else 1239 kern_yield(PRI_USER); 1240 /* 1241 * After becoming active to expand above low water, keep 1242 * active until above high water. 1243 */ 1244 force = vspace() < vhiwat; 1245 } 1246 } 1247 1248 static struct kproc_desc vnlru_kp = { 1249 "vnlru", 1250 vnlru_proc, 1251 &vnlruproc 1252 }; 1253 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1254 &vnlru_kp); 1255 1256 /* 1257 * Routines having to do with the management of the vnode table. 1258 */ 1259 1260 /* 1261 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1262 * before we actually vgone(). This function must be called with the vnode 1263 * held to prevent the vnode from being returned to the free list midway 1264 * through vgone(). 1265 */ 1266 static int 1267 vtryrecycle(struct vnode *vp) 1268 { 1269 struct mount *vnmp; 1270 1271 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1272 VNASSERT(vp->v_holdcnt, vp, 1273 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1274 /* 1275 * This vnode may found and locked via some other list, if so we 1276 * can't recycle it yet. 1277 */ 1278 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1279 CTR2(KTR_VFS, 1280 "%s: impossible to recycle, vp %p lock is already held", 1281 __func__, vp); 1282 return (EWOULDBLOCK); 1283 } 1284 /* 1285 * Don't recycle if its filesystem is being suspended. 1286 */ 1287 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1288 VOP_UNLOCK(vp, 0); 1289 CTR2(KTR_VFS, 1290 "%s: impossible to recycle, cannot start the write for %p", 1291 __func__, vp); 1292 return (EBUSY); 1293 } 1294 /* 1295 * If we got this far, we need to acquire the interlock and see if 1296 * anyone picked up this vnode from another list. If not, we will 1297 * mark it with DOOMED via vgonel() so that anyone who does find it 1298 * will skip over it. 1299 */ 1300 VI_LOCK(vp); 1301 if (vp->v_usecount) { 1302 VOP_UNLOCK(vp, LK_INTERLOCK); 1303 vn_finished_write(vnmp); 1304 CTR2(KTR_VFS, 1305 "%s: impossible to recycle, %p is already referenced", 1306 __func__, vp); 1307 return (EBUSY); 1308 } 1309 if ((vp->v_iflag & VI_DOOMED) == 0) { 1310 counter_u64_add(recycles_count, 1); 1311 vgonel(vp); 1312 } 1313 VOP_UNLOCK(vp, LK_INTERLOCK); 1314 vn_finished_write(vnmp); 1315 return (0); 1316 } 1317 1318 static void 1319 vcheckspace(void) 1320 { 1321 1322 if (vspace() < vlowat && vnlruproc_sig == 0) { 1323 vnlruproc_sig = 1; 1324 wakeup(vnlruproc); 1325 } 1326 } 1327 1328 /* 1329 * Wait if necessary for space for a new vnode. 1330 */ 1331 static int 1332 getnewvnode_wait(int suspended) 1333 { 1334 1335 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 1336 if (numvnodes >= desiredvnodes) { 1337 if (suspended) { 1338 /* 1339 * The file system is being suspended. We cannot 1340 * risk a deadlock here, so allow allocation of 1341 * another vnode even if this would give too many. 1342 */ 1343 return (0); 1344 } 1345 if (vnlruproc_sig == 0) { 1346 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 1347 wakeup(vnlruproc); 1348 } 1349 msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS, 1350 "vlruwk", hz); 1351 } 1352 /* Post-adjust like the pre-adjust in getnewvnode(). */ 1353 if (numvnodes + 1 > desiredvnodes && freevnodes > 1) 1354 vnlru_free_locked(1, NULL); 1355 return (numvnodes >= desiredvnodes ? ENFILE : 0); 1356 } 1357 1358 /* 1359 * This hack is fragile, and probably not needed any more now that the 1360 * watermark handling works. 1361 */ 1362 void 1363 getnewvnode_reserve(u_int count) 1364 { 1365 struct thread *td; 1366 1367 /* Pre-adjust like the pre-adjust in getnewvnode(), with any count. */ 1368 /* XXX no longer so quick, but this part is not racy. */ 1369 mtx_lock(&vnode_free_list_mtx); 1370 if (numvnodes + count > desiredvnodes && freevnodes > wantfreevnodes) 1371 vnlru_free_locked(ulmin(numvnodes + count - desiredvnodes, 1372 freevnodes - wantfreevnodes), NULL); 1373 mtx_unlock(&vnode_free_list_mtx); 1374 1375 td = curthread; 1376 /* First try to be quick and racy. */ 1377 if (atomic_fetchadd_long(&numvnodes, count) + count <= desiredvnodes) { 1378 td->td_vp_reserv += count; 1379 vcheckspace(); /* XXX no longer so quick, but more racy */ 1380 return; 1381 } else 1382 atomic_subtract_long(&numvnodes, count); 1383 1384 mtx_lock(&vnode_free_list_mtx); 1385 while (count > 0) { 1386 if (getnewvnode_wait(0) == 0) { 1387 count--; 1388 td->td_vp_reserv++; 1389 atomic_add_long(&numvnodes, 1); 1390 } 1391 } 1392 vcheckspace(); 1393 mtx_unlock(&vnode_free_list_mtx); 1394 } 1395 1396 /* 1397 * This hack is fragile, especially if desiredvnodes or wantvnodes are 1398 * misconfgured or changed significantly. Reducing desiredvnodes below 1399 * the reserved amount should cause bizarre behaviour like reducing it 1400 * below the number of active vnodes -- the system will try to reduce 1401 * numvnodes to match, but should fail, so the subtraction below should 1402 * not overflow. 1403 */ 1404 void 1405 getnewvnode_drop_reserve(void) 1406 { 1407 struct thread *td; 1408 1409 td = curthread; 1410 atomic_subtract_long(&numvnodes, td->td_vp_reserv); 1411 td->td_vp_reserv = 0; 1412 } 1413 1414 /* 1415 * Return the next vnode from the free list. 1416 */ 1417 int 1418 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1419 struct vnode **vpp) 1420 { 1421 struct vnode *vp; 1422 struct thread *td; 1423 struct lock_object *lo; 1424 static int cyclecount; 1425 int error __unused; 1426 1427 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1428 vp = NULL; 1429 td = curthread; 1430 if (td->td_vp_reserv > 0) { 1431 td->td_vp_reserv -= 1; 1432 goto alloc; 1433 } 1434 mtx_lock(&vnode_free_list_mtx); 1435 if (numvnodes < desiredvnodes) 1436 cyclecount = 0; 1437 else if (cyclecount++ >= freevnodes) { 1438 cyclecount = 0; 1439 vstir = 1; 1440 } 1441 /* 1442 * Grow the vnode cache if it will not be above its target max 1443 * after growing. Otherwise, if the free list is nonempty, try 1444 * to reclaim 1 item from it before growing the cache (possibly 1445 * above its target max if the reclamation failed or is delayed). 1446 * Otherwise, wait for some space. In all cases, schedule 1447 * vnlru_proc() if we are getting short of space. The watermarks 1448 * should be chosen so that we never wait or even reclaim from 1449 * the free list to below its target minimum. 1450 */ 1451 if (numvnodes + 1 <= desiredvnodes) 1452 ; 1453 else if (freevnodes > 0) 1454 vnlru_free_locked(1, NULL); 1455 else { 1456 error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag & 1457 MNTK_SUSPEND)); 1458 #if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */ 1459 if (error != 0) { 1460 mtx_unlock(&vnode_free_list_mtx); 1461 return (error); 1462 } 1463 #endif 1464 } 1465 vcheckspace(); 1466 atomic_add_long(&numvnodes, 1); 1467 mtx_unlock(&vnode_free_list_mtx); 1468 alloc: 1469 counter_u64_add(vnodes_created, 1); 1470 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK); 1471 /* 1472 * Locks are given the generic name "vnode" when created. 1473 * Follow the historic practice of using the filesystem 1474 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1475 * 1476 * Locks live in a witness group keyed on their name. Thus, 1477 * when a lock is renamed, it must also move from the witness 1478 * group of its old name to the witness group of its new name. 1479 * 1480 * The change only needs to be made when the vnode moves 1481 * from one filesystem type to another. We ensure that each 1482 * filesystem use a single static name pointer for its tag so 1483 * that we can compare pointers rather than doing a strcmp(). 1484 */ 1485 lo = &vp->v_vnlock->lock_object; 1486 if (lo->lo_name != tag) { 1487 lo->lo_name = tag; 1488 WITNESS_DESTROY(lo); 1489 WITNESS_INIT(lo, tag); 1490 } 1491 /* 1492 * By default, don't allow shared locks unless filesystems opt-in. 1493 */ 1494 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1495 /* 1496 * Finalize various vnode identity bits. 1497 */ 1498 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1499 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1500 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1501 vp->v_type = VNON; 1502 vp->v_tag = tag; 1503 vp->v_op = vops; 1504 v_init_counters(vp); 1505 vp->v_bufobj.bo_ops = &buf_ops_bio; 1506 #ifdef DIAGNOSTIC 1507 if (mp == NULL && vops != &dead_vnodeops) 1508 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1509 #endif 1510 #ifdef MAC 1511 mac_vnode_init(vp); 1512 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1513 mac_vnode_associate_singlelabel(mp, vp); 1514 #endif 1515 if (mp != NULL) { 1516 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1517 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1518 vp->v_vflag |= VV_NOKNOTE; 1519 } 1520 1521 /* 1522 * For the filesystems which do not use vfs_hash_insert(), 1523 * still initialize v_hash to have vfs_hash_index() useful. 1524 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1525 * its own hashing. 1526 */ 1527 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1528 1529 *vpp = vp; 1530 return (0); 1531 } 1532 1533 /* 1534 * Delete from old mount point vnode list, if on one. 1535 */ 1536 static void 1537 delmntque(struct vnode *vp) 1538 { 1539 struct mount *mp; 1540 int active; 1541 1542 mp = vp->v_mount; 1543 if (mp == NULL) 1544 return; 1545 MNT_ILOCK(mp); 1546 VI_LOCK(vp); 1547 KASSERT(mp->mnt_activevnodelistsize <= mp->mnt_nvnodelistsize, 1548 ("Active vnode list size %d > Vnode list size %d", 1549 mp->mnt_activevnodelistsize, mp->mnt_nvnodelistsize)); 1550 active = vp->v_iflag & VI_ACTIVE; 1551 vp->v_iflag &= ~VI_ACTIVE; 1552 if (active) { 1553 mtx_lock(&mp->mnt_listmtx); 1554 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); 1555 mp->mnt_activevnodelistsize--; 1556 mtx_unlock(&mp->mnt_listmtx); 1557 } 1558 vp->v_mount = NULL; 1559 VI_UNLOCK(vp); 1560 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1561 ("bad mount point vnode list size")); 1562 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1563 mp->mnt_nvnodelistsize--; 1564 MNT_REL(mp); 1565 MNT_IUNLOCK(mp); 1566 } 1567 1568 static void 1569 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1570 { 1571 1572 vp->v_data = NULL; 1573 vp->v_op = &dead_vnodeops; 1574 vgone(vp); 1575 vput(vp); 1576 } 1577 1578 /* 1579 * Insert into list of vnodes for the new mount point, if available. 1580 */ 1581 int 1582 insmntque1(struct vnode *vp, struct mount *mp, 1583 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1584 { 1585 1586 KASSERT(vp->v_mount == NULL, 1587 ("insmntque: vnode already on per mount vnode list")); 1588 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1589 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1590 1591 /* 1592 * We acquire the vnode interlock early to ensure that the 1593 * vnode cannot be recycled by another process releasing a 1594 * holdcnt on it before we get it on both the vnode list 1595 * and the active vnode list. The mount mutex protects only 1596 * manipulation of the vnode list and the vnode freelist 1597 * mutex protects only manipulation of the active vnode list. 1598 * Hence the need to hold the vnode interlock throughout. 1599 */ 1600 MNT_ILOCK(mp); 1601 VI_LOCK(vp); 1602 if (((mp->mnt_kern_flag & MNTK_NOINSMNTQ) != 0 && 1603 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1604 mp->mnt_nvnodelistsize == 0)) && 1605 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1606 VI_UNLOCK(vp); 1607 MNT_IUNLOCK(mp); 1608 if (dtr != NULL) 1609 dtr(vp, dtr_arg); 1610 return (EBUSY); 1611 } 1612 vp->v_mount = mp; 1613 MNT_REF(mp); 1614 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1615 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1616 ("neg mount point vnode list size")); 1617 mp->mnt_nvnodelistsize++; 1618 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 1619 ("Activating already active vnode")); 1620 vp->v_iflag |= VI_ACTIVE; 1621 mtx_lock(&mp->mnt_listmtx); 1622 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 1623 mp->mnt_activevnodelistsize++; 1624 mtx_unlock(&mp->mnt_listmtx); 1625 VI_UNLOCK(vp); 1626 MNT_IUNLOCK(mp); 1627 return (0); 1628 } 1629 1630 int 1631 insmntque(struct vnode *vp, struct mount *mp) 1632 { 1633 1634 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1635 } 1636 1637 /* 1638 * Flush out and invalidate all buffers associated with a bufobj 1639 * Called with the underlying object locked. 1640 */ 1641 int 1642 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1643 { 1644 int error; 1645 1646 BO_LOCK(bo); 1647 if (flags & V_SAVE) { 1648 error = bufobj_wwait(bo, slpflag, slptimeo); 1649 if (error) { 1650 BO_UNLOCK(bo); 1651 return (error); 1652 } 1653 if (bo->bo_dirty.bv_cnt > 0) { 1654 BO_UNLOCK(bo); 1655 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1656 return (error); 1657 /* 1658 * XXX We could save a lock/unlock if this was only 1659 * enabled under INVARIANTS 1660 */ 1661 BO_LOCK(bo); 1662 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1663 panic("vinvalbuf: dirty bufs"); 1664 } 1665 } 1666 /* 1667 * If you alter this loop please notice that interlock is dropped and 1668 * reacquired in flushbuflist. Special care is needed to ensure that 1669 * no race conditions occur from this. 1670 */ 1671 do { 1672 error = flushbuflist(&bo->bo_clean, 1673 flags, bo, slpflag, slptimeo); 1674 if (error == 0 && !(flags & V_CLEANONLY)) 1675 error = flushbuflist(&bo->bo_dirty, 1676 flags, bo, slpflag, slptimeo); 1677 if (error != 0 && error != EAGAIN) { 1678 BO_UNLOCK(bo); 1679 return (error); 1680 } 1681 } while (error != 0); 1682 1683 /* 1684 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1685 * have write I/O in-progress but if there is a VM object then the 1686 * VM object can also have read-I/O in-progress. 1687 */ 1688 do { 1689 bufobj_wwait(bo, 0, 0); 1690 if ((flags & V_VMIO) == 0) { 1691 BO_UNLOCK(bo); 1692 if (bo->bo_object != NULL) { 1693 VM_OBJECT_WLOCK(bo->bo_object); 1694 vm_object_pip_wait(bo->bo_object, "bovlbx"); 1695 VM_OBJECT_WUNLOCK(bo->bo_object); 1696 } 1697 BO_LOCK(bo); 1698 } 1699 } while (bo->bo_numoutput > 0); 1700 BO_UNLOCK(bo); 1701 1702 /* 1703 * Destroy the copy in the VM cache, too. 1704 */ 1705 if (bo->bo_object != NULL && 1706 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 1707 VM_OBJECT_WLOCK(bo->bo_object); 1708 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1709 OBJPR_CLEANONLY : 0); 1710 VM_OBJECT_WUNLOCK(bo->bo_object); 1711 } 1712 1713 #ifdef INVARIANTS 1714 BO_LOCK(bo); 1715 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 1716 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 1717 bo->bo_clean.bv_cnt > 0)) 1718 panic("vinvalbuf: flush failed"); 1719 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 1720 bo->bo_dirty.bv_cnt > 0) 1721 panic("vinvalbuf: flush dirty failed"); 1722 BO_UNLOCK(bo); 1723 #endif 1724 return (0); 1725 } 1726 1727 /* 1728 * Flush out and invalidate all buffers associated with a vnode. 1729 * Called with the underlying object locked. 1730 */ 1731 int 1732 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 1733 { 1734 1735 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 1736 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1737 if (vp->v_object != NULL && vp->v_object->handle != vp) 1738 return (0); 1739 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 1740 } 1741 1742 /* 1743 * Flush out buffers on the specified list. 1744 * 1745 */ 1746 static int 1747 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 1748 int slptimeo) 1749 { 1750 struct buf *bp, *nbp; 1751 int retval, error; 1752 daddr_t lblkno; 1753 b_xflags_t xflags; 1754 1755 ASSERT_BO_WLOCKED(bo); 1756 1757 retval = 0; 1758 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 1759 /* 1760 * If we are flushing both V_NORMAL and V_ALT buffers then 1761 * do not skip any buffers. If we are flushing only V_NORMAL 1762 * buffers then skip buffers marked as BX_ALTDATA. If we are 1763 * flushing only V_ALT buffers then skip buffers not marked 1764 * as BX_ALTDATA. 1765 */ 1766 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 1767 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 1768 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 1769 continue; 1770 } 1771 if (nbp != NULL) { 1772 lblkno = nbp->b_lblkno; 1773 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 1774 } 1775 retval = EAGAIN; 1776 error = BUF_TIMELOCK(bp, 1777 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 1778 "flushbuf", slpflag, slptimeo); 1779 if (error) { 1780 BO_LOCK(bo); 1781 return (error != ENOLCK ? error : EAGAIN); 1782 } 1783 KASSERT(bp->b_bufobj == bo, 1784 ("bp %p wrong b_bufobj %p should be %p", 1785 bp, bp->b_bufobj, bo)); 1786 /* 1787 * XXX Since there are no node locks for NFS, I 1788 * believe there is a slight chance that a delayed 1789 * write will occur while sleeping just above, so 1790 * check for it. 1791 */ 1792 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1793 (flags & V_SAVE)) { 1794 bremfree(bp); 1795 bp->b_flags |= B_ASYNC; 1796 bwrite(bp); 1797 BO_LOCK(bo); 1798 return (EAGAIN); /* XXX: why not loop ? */ 1799 } 1800 bremfree(bp); 1801 bp->b_flags |= (B_INVAL | B_RELBUF); 1802 bp->b_flags &= ~B_ASYNC; 1803 brelse(bp); 1804 BO_LOCK(bo); 1805 if (nbp == NULL) 1806 break; 1807 nbp = gbincore(bo, lblkno); 1808 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1809 != xflags) 1810 break; /* nbp invalid */ 1811 } 1812 return (retval); 1813 } 1814 1815 int 1816 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 1817 { 1818 struct buf *bp; 1819 int error; 1820 daddr_t lblkno; 1821 1822 ASSERT_BO_LOCKED(bo); 1823 1824 for (lblkno = startn;;) { 1825 again: 1826 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 1827 if (bp == NULL || bp->b_lblkno >= endn || 1828 bp->b_lblkno < startn) 1829 break; 1830 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 1831 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 1832 if (error != 0) { 1833 BO_RLOCK(bo); 1834 if (error == ENOLCK) 1835 goto again; 1836 return (error); 1837 } 1838 KASSERT(bp->b_bufobj == bo, 1839 ("bp %p wrong b_bufobj %p should be %p", 1840 bp, bp->b_bufobj, bo)); 1841 lblkno = bp->b_lblkno + 1; 1842 if ((bp->b_flags & B_MANAGED) == 0) 1843 bremfree(bp); 1844 bp->b_flags |= B_RELBUF; 1845 /* 1846 * In the VMIO case, use the B_NOREUSE flag to hint that the 1847 * pages backing each buffer in the range are unlikely to be 1848 * reused. Dirty buffers will have the hint applied once 1849 * they've been written. 1850 */ 1851 if ((bp->b_flags & B_VMIO) != 0) 1852 bp->b_flags |= B_NOREUSE; 1853 brelse(bp); 1854 BO_RLOCK(bo); 1855 } 1856 return (0); 1857 } 1858 1859 /* 1860 * Truncate a file's buffer and pages to a specified length. This 1861 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 1862 * sync activity. 1863 */ 1864 int 1865 vtruncbuf(struct vnode *vp, struct ucred *cred, off_t length, int blksize) 1866 { 1867 struct buf *bp, *nbp; 1868 int anyfreed; 1869 daddr_t trunclbn; 1870 struct bufobj *bo; 1871 1872 CTR5(KTR_VFS, "%s: vp %p with cred %p and block %d:%ju", __func__, 1873 vp, cred, blksize, (uintmax_t)length); 1874 1875 /* 1876 * Round up to the *next* lbn. 1877 */ 1878 trunclbn = howmany(length, blksize); 1879 1880 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 1881 restart: 1882 bo = &vp->v_bufobj; 1883 BO_LOCK(bo); 1884 anyfreed = 1; 1885 for (;anyfreed;) { 1886 anyfreed = 0; 1887 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 1888 if (bp->b_lblkno < trunclbn) 1889 continue; 1890 if (BUF_LOCK(bp, 1891 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1892 BO_LOCKPTR(bo)) == ENOLCK) 1893 goto restart; 1894 1895 bremfree(bp); 1896 bp->b_flags |= (B_INVAL | B_RELBUF); 1897 bp->b_flags &= ~B_ASYNC; 1898 brelse(bp); 1899 anyfreed = 1; 1900 1901 BO_LOCK(bo); 1902 if (nbp != NULL && 1903 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 1904 (nbp->b_vp != vp) || 1905 (nbp->b_flags & B_DELWRI))) { 1906 BO_UNLOCK(bo); 1907 goto restart; 1908 } 1909 } 1910 1911 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1912 if (bp->b_lblkno < trunclbn) 1913 continue; 1914 if (BUF_LOCK(bp, 1915 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1916 BO_LOCKPTR(bo)) == ENOLCK) 1917 goto restart; 1918 bremfree(bp); 1919 bp->b_flags |= (B_INVAL | B_RELBUF); 1920 bp->b_flags &= ~B_ASYNC; 1921 brelse(bp); 1922 anyfreed = 1; 1923 1924 BO_LOCK(bo); 1925 if (nbp != NULL && 1926 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 1927 (nbp->b_vp != vp) || 1928 (nbp->b_flags & B_DELWRI) == 0)) { 1929 BO_UNLOCK(bo); 1930 goto restart; 1931 } 1932 } 1933 } 1934 1935 if (length > 0) { 1936 restartsync: 1937 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1938 if (bp->b_lblkno > 0) 1939 continue; 1940 /* 1941 * Since we hold the vnode lock this should only 1942 * fail if we're racing with the buf daemon. 1943 */ 1944 if (BUF_LOCK(bp, 1945 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1946 BO_LOCKPTR(bo)) == ENOLCK) { 1947 goto restart; 1948 } 1949 VNASSERT((bp->b_flags & B_DELWRI), vp, 1950 ("buf(%p) on dirty queue without DELWRI", bp)); 1951 1952 bremfree(bp); 1953 bawrite(bp); 1954 BO_LOCK(bo); 1955 goto restartsync; 1956 } 1957 } 1958 1959 bufobj_wwait(bo, 0, 0); 1960 BO_UNLOCK(bo); 1961 vnode_pager_setsize(vp, length); 1962 1963 return (0); 1964 } 1965 1966 static void 1967 buf_vlist_remove(struct buf *bp) 1968 { 1969 struct bufv *bv; 1970 1971 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1972 ASSERT_BO_WLOCKED(bp->b_bufobj); 1973 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != 1974 (BX_VNDIRTY|BX_VNCLEAN), 1975 ("buf_vlist_remove: Buf %p is on two lists", bp)); 1976 if (bp->b_xflags & BX_VNDIRTY) 1977 bv = &bp->b_bufobj->bo_dirty; 1978 else 1979 bv = &bp->b_bufobj->bo_clean; 1980 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 1981 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 1982 bv->bv_cnt--; 1983 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1984 } 1985 1986 /* 1987 * Add the buffer to the sorted clean or dirty block list. 1988 * 1989 * NOTE: xflags is passed as a constant, optimizing this inline function! 1990 */ 1991 static void 1992 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 1993 { 1994 struct bufv *bv; 1995 struct buf *n; 1996 int error; 1997 1998 ASSERT_BO_WLOCKED(bo); 1999 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2000 ("dead bo %p", bo)); 2001 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2002 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2003 bp->b_xflags |= xflags; 2004 if (xflags & BX_VNDIRTY) 2005 bv = &bo->bo_dirty; 2006 else 2007 bv = &bo->bo_clean; 2008 2009 /* 2010 * Keep the list ordered. Optimize empty list insertion. Assume 2011 * we tend to grow at the tail so lookup_le should usually be cheaper 2012 * than _ge. 2013 */ 2014 if (bv->bv_cnt == 0 || 2015 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2016 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2017 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2018 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2019 else 2020 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2021 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2022 if (error) 2023 panic("buf_vlist_add: Preallocated nodes insufficient."); 2024 bv->bv_cnt++; 2025 } 2026 2027 /* 2028 * Look up a buffer using the buffer tries. 2029 */ 2030 struct buf * 2031 gbincore(struct bufobj *bo, daddr_t lblkno) 2032 { 2033 struct buf *bp; 2034 2035 ASSERT_BO_LOCKED(bo); 2036 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2037 if (bp != NULL) 2038 return (bp); 2039 return BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno); 2040 } 2041 2042 /* 2043 * Associate a buffer with a vnode. 2044 */ 2045 void 2046 bgetvp(struct vnode *vp, struct buf *bp) 2047 { 2048 struct bufobj *bo; 2049 2050 bo = &vp->v_bufobj; 2051 ASSERT_BO_WLOCKED(bo); 2052 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2053 2054 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2055 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2056 ("bgetvp: bp already attached! %p", bp)); 2057 2058 vhold(vp); 2059 bp->b_vp = vp; 2060 bp->b_bufobj = bo; 2061 /* 2062 * Insert onto list for new vnode. 2063 */ 2064 buf_vlist_add(bp, bo, BX_VNCLEAN); 2065 } 2066 2067 /* 2068 * Disassociate a buffer from a vnode. 2069 */ 2070 void 2071 brelvp(struct buf *bp) 2072 { 2073 struct bufobj *bo; 2074 struct vnode *vp; 2075 2076 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2077 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2078 2079 /* 2080 * Delete from old vnode list, if on one. 2081 */ 2082 vp = bp->b_vp; /* XXX */ 2083 bo = bp->b_bufobj; 2084 BO_LOCK(bo); 2085 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2086 buf_vlist_remove(bp); 2087 else 2088 panic("brelvp: Buffer %p not on queue.", bp); 2089 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2090 bo->bo_flag &= ~BO_ONWORKLST; 2091 mtx_lock(&sync_mtx); 2092 LIST_REMOVE(bo, bo_synclist); 2093 syncer_worklist_len--; 2094 mtx_unlock(&sync_mtx); 2095 } 2096 bp->b_vp = NULL; 2097 bp->b_bufobj = NULL; 2098 BO_UNLOCK(bo); 2099 vdrop(vp); 2100 } 2101 2102 /* 2103 * Add an item to the syncer work queue. 2104 */ 2105 static void 2106 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2107 { 2108 int slot; 2109 2110 ASSERT_BO_WLOCKED(bo); 2111 2112 mtx_lock(&sync_mtx); 2113 if (bo->bo_flag & BO_ONWORKLST) 2114 LIST_REMOVE(bo, bo_synclist); 2115 else { 2116 bo->bo_flag |= BO_ONWORKLST; 2117 syncer_worklist_len++; 2118 } 2119 2120 if (delay > syncer_maxdelay - 2) 2121 delay = syncer_maxdelay - 2; 2122 slot = (syncer_delayno + delay) & syncer_mask; 2123 2124 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2125 mtx_unlock(&sync_mtx); 2126 } 2127 2128 static int 2129 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2130 { 2131 int error, len; 2132 2133 mtx_lock(&sync_mtx); 2134 len = syncer_worklist_len - sync_vnode_count; 2135 mtx_unlock(&sync_mtx); 2136 error = SYSCTL_OUT(req, &len, sizeof(len)); 2137 return (error); 2138 } 2139 2140 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0, 2141 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2142 2143 static struct proc *updateproc; 2144 static void sched_sync(void); 2145 static struct kproc_desc up_kp = { 2146 "syncer", 2147 sched_sync, 2148 &updateproc 2149 }; 2150 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2151 2152 static int 2153 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2154 { 2155 struct vnode *vp; 2156 struct mount *mp; 2157 2158 *bo = LIST_FIRST(slp); 2159 if (*bo == NULL) 2160 return (0); 2161 vp = bo2vnode(*bo); 2162 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2163 return (1); 2164 /* 2165 * We use vhold in case the vnode does not 2166 * successfully sync. vhold prevents the vnode from 2167 * going away when we unlock the sync_mtx so that 2168 * we can acquire the vnode interlock. 2169 */ 2170 vholdl(vp); 2171 mtx_unlock(&sync_mtx); 2172 VI_UNLOCK(vp); 2173 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2174 vdrop(vp); 2175 mtx_lock(&sync_mtx); 2176 return (*bo == LIST_FIRST(slp)); 2177 } 2178 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2179 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2180 VOP_UNLOCK(vp, 0); 2181 vn_finished_write(mp); 2182 BO_LOCK(*bo); 2183 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2184 /* 2185 * Put us back on the worklist. The worklist 2186 * routine will remove us from our current 2187 * position and then add us back in at a later 2188 * position. 2189 */ 2190 vn_syncer_add_to_worklist(*bo, syncdelay); 2191 } 2192 BO_UNLOCK(*bo); 2193 vdrop(vp); 2194 mtx_lock(&sync_mtx); 2195 return (0); 2196 } 2197 2198 static int first_printf = 1; 2199 2200 /* 2201 * System filesystem synchronizer daemon. 2202 */ 2203 static void 2204 sched_sync(void) 2205 { 2206 struct synclist *next, *slp; 2207 struct bufobj *bo; 2208 long starttime; 2209 struct thread *td = curthread; 2210 int last_work_seen; 2211 int net_worklist_len; 2212 int syncer_final_iter; 2213 int error; 2214 2215 last_work_seen = 0; 2216 syncer_final_iter = 0; 2217 syncer_state = SYNCER_RUNNING; 2218 starttime = time_uptime; 2219 td->td_pflags |= TDP_NORUNNINGBUF; 2220 2221 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2222 SHUTDOWN_PRI_LAST); 2223 2224 mtx_lock(&sync_mtx); 2225 for (;;) { 2226 if (syncer_state == SYNCER_FINAL_DELAY && 2227 syncer_final_iter == 0) { 2228 mtx_unlock(&sync_mtx); 2229 kproc_suspend_check(td->td_proc); 2230 mtx_lock(&sync_mtx); 2231 } 2232 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2233 if (syncer_state != SYNCER_RUNNING && 2234 starttime != time_uptime) { 2235 if (first_printf) { 2236 printf("\nSyncing disks, vnodes remaining... "); 2237 first_printf = 0; 2238 } 2239 printf("%d ", net_worklist_len); 2240 } 2241 starttime = time_uptime; 2242 2243 /* 2244 * Push files whose dirty time has expired. Be careful 2245 * of interrupt race on slp queue. 2246 * 2247 * Skip over empty worklist slots when shutting down. 2248 */ 2249 do { 2250 slp = &syncer_workitem_pending[syncer_delayno]; 2251 syncer_delayno += 1; 2252 if (syncer_delayno == syncer_maxdelay) 2253 syncer_delayno = 0; 2254 next = &syncer_workitem_pending[syncer_delayno]; 2255 /* 2256 * If the worklist has wrapped since the 2257 * it was emptied of all but syncer vnodes, 2258 * switch to the FINAL_DELAY state and run 2259 * for one more second. 2260 */ 2261 if (syncer_state == SYNCER_SHUTTING_DOWN && 2262 net_worklist_len == 0 && 2263 last_work_seen == syncer_delayno) { 2264 syncer_state = SYNCER_FINAL_DELAY; 2265 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2266 } 2267 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2268 syncer_worklist_len > 0); 2269 2270 /* 2271 * Keep track of the last time there was anything 2272 * on the worklist other than syncer vnodes. 2273 * Return to the SHUTTING_DOWN state if any 2274 * new work appears. 2275 */ 2276 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2277 last_work_seen = syncer_delayno; 2278 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2279 syncer_state = SYNCER_SHUTTING_DOWN; 2280 while (!LIST_EMPTY(slp)) { 2281 error = sync_vnode(slp, &bo, td); 2282 if (error == 1) { 2283 LIST_REMOVE(bo, bo_synclist); 2284 LIST_INSERT_HEAD(next, bo, bo_synclist); 2285 continue; 2286 } 2287 2288 if (first_printf == 0) { 2289 /* 2290 * Drop the sync mutex, because some watchdog 2291 * drivers need to sleep while patting 2292 */ 2293 mtx_unlock(&sync_mtx); 2294 wdog_kern_pat(WD_LASTVAL); 2295 mtx_lock(&sync_mtx); 2296 } 2297 2298 } 2299 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2300 syncer_final_iter--; 2301 /* 2302 * The variable rushjob allows the kernel to speed up the 2303 * processing of the filesystem syncer process. A rushjob 2304 * value of N tells the filesystem syncer to process the next 2305 * N seconds worth of work on its queue ASAP. Currently rushjob 2306 * is used by the soft update code to speed up the filesystem 2307 * syncer process when the incore state is getting so far 2308 * ahead of the disk that the kernel memory pool is being 2309 * threatened with exhaustion. 2310 */ 2311 if (rushjob > 0) { 2312 rushjob -= 1; 2313 continue; 2314 } 2315 /* 2316 * Just sleep for a short period of time between 2317 * iterations when shutting down to allow some I/O 2318 * to happen. 2319 * 2320 * If it has taken us less than a second to process the 2321 * current work, then wait. Otherwise start right over 2322 * again. We can still lose time if any single round 2323 * takes more than two seconds, but it does not really 2324 * matter as we are just trying to generally pace the 2325 * filesystem activity. 2326 */ 2327 if (syncer_state != SYNCER_RUNNING || 2328 time_uptime == starttime) { 2329 thread_lock(td); 2330 sched_prio(td, PPAUSE); 2331 thread_unlock(td); 2332 } 2333 if (syncer_state != SYNCER_RUNNING) 2334 cv_timedwait(&sync_wakeup, &sync_mtx, 2335 hz / SYNCER_SHUTDOWN_SPEEDUP); 2336 else if (time_uptime == starttime) 2337 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2338 } 2339 } 2340 2341 /* 2342 * Request the syncer daemon to speed up its work. 2343 * We never push it to speed up more than half of its 2344 * normal turn time, otherwise it could take over the cpu. 2345 */ 2346 int 2347 speedup_syncer(void) 2348 { 2349 int ret = 0; 2350 2351 mtx_lock(&sync_mtx); 2352 if (rushjob < syncdelay / 2) { 2353 rushjob += 1; 2354 stat_rush_requests += 1; 2355 ret = 1; 2356 } 2357 mtx_unlock(&sync_mtx); 2358 cv_broadcast(&sync_wakeup); 2359 return (ret); 2360 } 2361 2362 /* 2363 * Tell the syncer to speed up its work and run though its work 2364 * list several times, then tell it to shut down. 2365 */ 2366 static void 2367 syncer_shutdown(void *arg, int howto) 2368 { 2369 2370 if (howto & RB_NOSYNC) 2371 return; 2372 mtx_lock(&sync_mtx); 2373 syncer_state = SYNCER_SHUTTING_DOWN; 2374 rushjob = 0; 2375 mtx_unlock(&sync_mtx); 2376 cv_broadcast(&sync_wakeup); 2377 kproc_shutdown(arg, howto); 2378 } 2379 2380 void 2381 syncer_suspend(void) 2382 { 2383 2384 syncer_shutdown(updateproc, 0); 2385 } 2386 2387 void 2388 syncer_resume(void) 2389 { 2390 2391 mtx_lock(&sync_mtx); 2392 first_printf = 1; 2393 syncer_state = SYNCER_RUNNING; 2394 mtx_unlock(&sync_mtx); 2395 cv_broadcast(&sync_wakeup); 2396 kproc_resume(updateproc); 2397 } 2398 2399 /* 2400 * Reassign a buffer from one vnode to another. 2401 * Used to assign file specific control information 2402 * (indirect blocks) to the vnode to which they belong. 2403 */ 2404 void 2405 reassignbuf(struct buf *bp) 2406 { 2407 struct vnode *vp; 2408 struct bufobj *bo; 2409 int delay; 2410 #ifdef INVARIANTS 2411 struct bufv *bv; 2412 #endif 2413 2414 vp = bp->b_vp; 2415 bo = bp->b_bufobj; 2416 ++reassignbufcalls; 2417 2418 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2419 bp, bp->b_vp, bp->b_flags); 2420 /* 2421 * B_PAGING flagged buffers cannot be reassigned because their vp 2422 * is not fully linked in. 2423 */ 2424 if (bp->b_flags & B_PAGING) 2425 panic("cannot reassign paging buffer"); 2426 2427 /* 2428 * Delete from old vnode list, if on one. 2429 */ 2430 BO_LOCK(bo); 2431 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2432 buf_vlist_remove(bp); 2433 else 2434 panic("reassignbuf: Buffer %p not on queue.", bp); 2435 /* 2436 * If dirty, put on list of dirty buffers; otherwise insert onto list 2437 * of clean buffers. 2438 */ 2439 if (bp->b_flags & B_DELWRI) { 2440 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2441 switch (vp->v_type) { 2442 case VDIR: 2443 delay = dirdelay; 2444 break; 2445 case VCHR: 2446 delay = metadelay; 2447 break; 2448 default: 2449 delay = filedelay; 2450 } 2451 vn_syncer_add_to_worklist(bo, delay); 2452 } 2453 buf_vlist_add(bp, bo, BX_VNDIRTY); 2454 } else { 2455 buf_vlist_add(bp, bo, BX_VNCLEAN); 2456 2457 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2458 mtx_lock(&sync_mtx); 2459 LIST_REMOVE(bo, bo_synclist); 2460 syncer_worklist_len--; 2461 mtx_unlock(&sync_mtx); 2462 bo->bo_flag &= ~BO_ONWORKLST; 2463 } 2464 } 2465 #ifdef INVARIANTS 2466 bv = &bo->bo_clean; 2467 bp = TAILQ_FIRST(&bv->bv_hd); 2468 KASSERT(bp == NULL || bp->b_bufobj == bo, 2469 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2470 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2471 KASSERT(bp == NULL || bp->b_bufobj == bo, 2472 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2473 bv = &bo->bo_dirty; 2474 bp = TAILQ_FIRST(&bv->bv_hd); 2475 KASSERT(bp == NULL || bp->b_bufobj == bo, 2476 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2477 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2478 KASSERT(bp == NULL || bp->b_bufobj == bo, 2479 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2480 #endif 2481 BO_UNLOCK(bo); 2482 } 2483 2484 static void 2485 v_init_counters(struct vnode *vp) 2486 { 2487 2488 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2489 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2490 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2491 2492 refcount_init(&vp->v_holdcnt, 1); 2493 refcount_init(&vp->v_usecount, 1); 2494 } 2495 2496 static void 2497 v_incr_usecount_locked(struct vnode *vp) 2498 { 2499 2500 ASSERT_VI_LOCKED(vp, __func__); 2501 if ((vp->v_iflag & VI_OWEINACT) != 0) { 2502 VNASSERT(vp->v_usecount == 0, vp, 2503 ("vnode with usecount and VI_OWEINACT set")); 2504 vp->v_iflag &= ~VI_OWEINACT; 2505 } 2506 refcount_acquire(&vp->v_usecount); 2507 v_incr_devcount(vp); 2508 } 2509 2510 /* 2511 * Increment the use count on the vnode, taking care to reference 2512 * the driver's usecount if this is a chardev. 2513 */ 2514 static void 2515 v_incr_usecount(struct vnode *vp) 2516 { 2517 2518 ASSERT_VI_UNLOCKED(vp, __func__); 2519 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2520 2521 if (vp->v_type != VCHR && 2522 refcount_acquire_if_not_zero(&vp->v_usecount)) { 2523 VNODE_REFCOUNT_FENCE_ACQ(); 2524 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2525 ("vnode with usecount and VI_OWEINACT set")); 2526 } else { 2527 VI_LOCK(vp); 2528 v_incr_usecount_locked(vp); 2529 VI_UNLOCK(vp); 2530 } 2531 } 2532 2533 /* 2534 * Increment si_usecount of the associated device, if any. 2535 */ 2536 static void 2537 v_incr_devcount(struct vnode *vp) 2538 { 2539 2540 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2541 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2542 dev_lock(); 2543 vp->v_rdev->si_usecount++; 2544 dev_unlock(); 2545 } 2546 } 2547 2548 /* 2549 * Decrement si_usecount of the associated device, if any. 2550 */ 2551 static void 2552 v_decr_devcount(struct vnode *vp) 2553 { 2554 2555 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2556 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2557 dev_lock(); 2558 vp->v_rdev->si_usecount--; 2559 dev_unlock(); 2560 } 2561 } 2562 2563 /* 2564 * Grab a particular vnode from the free list, increment its 2565 * reference count and lock it. VI_DOOMED is set if the vnode 2566 * is being destroyed. Only callers who specify LK_RETRY will 2567 * see doomed vnodes. If inactive processing was delayed in 2568 * vput try to do it here. 2569 * 2570 * Notes on lockless counter manipulation: 2571 * _vhold, vputx and other routines make various decisions based 2572 * on either holdcnt or usecount being 0. As long as either counter 2573 * is not transitioning 0->1 nor 1->0, the manipulation can be done 2574 * with atomic operations. Otherwise the interlock is taken covering 2575 * both the atomic and additional actions. 2576 */ 2577 int 2578 vget(struct vnode *vp, int flags, struct thread *td) 2579 { 2580 int error, oweinact; 2581 2582 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 2583 ("vget: invalid lock operation")); 2584 2585 if ((flags & LK_INTERLOCK) != 0) 2586 ASSERT_VI_LOCKED(vp, __func__); 2587 else 2588 ASSERT_VI_UNLOCKED(vp, __func__); 2589 if ((flags & LK_VNHELD) != 0) 2590 VNASSERT((vp->v_holdcnt > 0), vp, 2591 ("vget: LK_VNHELD passed but vnode not held")); 2592 2593 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2594 2595 if ((flags & LK_VNHELD) == 0) 2596 _vhold(vp, (flags & LK_INTERLOCK) != 0); 2597 2598 if ((error = vn_lock(vp, flags)) != 0) { 2599 vdrop(vp); 2600 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2601 vp); 2602 return (error); 2603 } 2604 if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0) 2605 panic("vget: vn_lock failed to return ENOENT\n"); 2606 /* 2607 * We don't guarantee that any particular close will 2608 * trigger inactive processing so just make a best effort 2609 * here at preventing a reference to a removed file. If 2610 * we don't succeed no harm is done. 2611 * 2612 * Upgrade our holdcnt to a usecount. 2613 */ 2614 if (vp->v_type == VCHR || 2615 !refcount_acquire_if_not_zero(&vp->v_usecount)) { 2616 VI_LOCK(vp); 2617 if ((vp->v_iflag & VI_OWEINACT) == 0) { 2618 oweinact = 0; 2619 } else { 2620 oweinact = 1; 2621 vp->v_iflag &= ~VI_OWEINACT; 2622 VNODE_REFCOUNT_FENCE_REL(); 2623 } 2624 refcount_acquire(&vp->v_usecount); 2625 v_incr_devcount(vp); 2626 if (oweinact && VOP_ISLOCKED(vp) == LK_EXCLUSIVE && 2627 (flags & LK_NOWAIT) == 0) 2628 vinactive(vp, td); 2629 VI_UNLOCK(vp); 2630 } 2631 return (0); 2632 } 2633 2634 /* 2635 * Increase the reference (use) and hold count of a vnode. 2636 * This will also remove the vnode from the free list if it is presently free. 2637 */ 2638 void 2639 vref(struct vnode *vp) 2640 { 2641 2642 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2643 _vhold(vp, false); 2644 v_incr_usecount(vp); 2645 } 2646 2647 void 2648 vrefl(struct vnode *vp) 2649 { 2650 2651 ASSERT_VI_LOCKED(vp, __func__); 2652 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2653 _vhold(vp, true); 2654 v_incr_usecount_locked(vp); 2655 } 2656 2657 void 2658 vrefact(struct vnode *vp) 2659 { 2660 2661 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2662 if (__predict_false(vp->v_type == VCHR)) { 2663 VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp, 2664 ("%s: wrong ref counts", __func__)); 2665 vref(vp); 2666 return; 2667 } 2668 #ifdef INVARIANTS 2669 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 2670 VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__)); 2671 old = atomic_fetchadd_int(&vp->v_usecount, 1); 2672 VNASSERT(old > 0, vp, ("%s: wrong use count", __func__)); 2673 #else 2674 refcount_acquire(&vp->v_holdcnt); 2675 refcount_acquire(&vp->v_usecount); 2676 #endif 2677 } 2678 2679 /* 2680 * Return reference count of a vnode. 2681 * 2682 * The results of this call are only guaranteed when some mechanism is used to 2683 * stop other processes from gaining references to the vnode. This may be the 2684 * case if the caller holds the only reference. This is also useful when stale 2685 * data is acceptable as race conditions may be accounted for by some other 2686 * means. 2687 */ 2688 int 2689 vrefcnt(struct vnode *vp) 2690 { 2691 2692 return (vp->v_usecount); 2693 } 2694 2695 #define VPUTX_VRELE 1 2696 #define VPUTX_VPUT 2 2697 #define VPUTX_VUNREF 3 2698 2699 /* 2700 * Decrement the use and hold counts for a vnode. 2701 * 2702 * See an explanation near vget() as to why atomic operation is safe. 2703 */ 2704 static void 2705 vputx(struct vnode *vp, int func) 2706 { 2707 int error; 2708 2709 KASSERT(vp != NULL, ("vputx: null vp")); 2710 if (func == VPUTX_VUNREF) 2711 ASSERT_VOP_LOCKED(vp, "vunref"); 2712 else if (func == VPUTX_VPUT) 2713 ASSERT_VOP_LOCKED(vp, "vput"); 2714 else 2715 KASSERT(func == VPUTX_VRELE, ("vputx: wrong func")); 2716 ASSERT_VI_UNLOCKED(vp, __func__); 2717 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2718 2719 if (vp->v_type != VCHR && 2720 refcount_release_if_not_last(&vp->v_usecount)) { 2721 if (func == VPUTX_VPUT) 2722 VOP_UNLOCK(vp, 0); 2723 vdrop(vp); 2724 return; 2725 } 2726 2727 VI_LOCK(vp); 2728 2729 /* 2730 * We want to hold the vnode until the inactive finishes to 2731 * prevent vgone() races. We drop the use count here and the 2732 * hold count below when we're done. 2733 */ 2734 if (!refcount_release(&vp->v_usecount) || 2735 (vp->v_iflag & VI_DOINGINACT)) { 2736 if (func == VPUTX_VPUT) 2737 VOP_UNLOCK(vp, 0); 2738 v_decr_devcount(vp); 2739 vdropl(vp); 2740 return; 2741 } 2742 2743 v_decr_devcount(vp); 2744 2745 error = 0; 2746 2747 if (vp->v_usecount != 0) { 2748 vn_printf(vp, "vputx: usecount not zero for vnode "); 2749 panic("vputx: usecount not zero"); 2750 } 2751 2752 CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp); 2753 2754 /* 2755 * We must call VOP_INACTIVE with the node locked. Mark 2756 * as VI_DOINGINACT to avoid recursion. 2757 */ 2758 vp->v_iflag |= VI_OWEINACT; 2759 switch (func) { 2760 case VPUTX_VRELE: 2761 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 2762 VI_LOCK(vp); 2763 break; 2764 case VPUTX_VPUT: 2765 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 2766 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 2767 LK_NOWAIT); 2768 VI_LOCK(vp); 2769 } 2770 break; 2771 case VPUTX_VUNREF: 2772 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 2773 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 2774 VI_LOCK(vp); 2775 } 2776 break; 2777 } 2778 VNASSERT(vp->v_usecount == 0 || (vp->v_iflag & VI_OWEINACT) == 0, vp, 2779 ("vnode with usecount and VI_OWEINACT set")); 2780 if (error == 0) { 2781 if (vp->v_iflag & VI_OWEINACT) 2782 vinactive(vp, curthread); 2783 if (func != VPUTX_VUNREF) 2784 VOP_UNLOCK(vp, 0); 2785 } 2786 vdropl(vp); 2787 } 2788 2789 /* 2790 * Vnode put/release. 2791 * If count drops to zero, call inactive routine and return to freelist. 2792 */ 2793 void 2794 vrele(struct vnode *vp) 2795 { 2796 2797 vputx(vp, VPUTX_VRELE); 2798 } 2799 2800 /* 2801 * Release an already locked vnode. This give the same effects as 2802 * unlock+vrele(), but takes less time and avoids releasing and 2803 * re-aquiring the lock (as vrele() acquires the lock internally.) 2804 */ 2805 void 2806 vput(struct vnode *vp) 2807 { 2808 2809 vputx(vp, VPUTX_VPUT); 2810 } 2811 2812 /* 2813 * Release an exclusively locked vnode. Do not unlock the vnode lock. 2814 */ 2815 void 2816 vunref(struct vnode *vp) 2817 { 2818 2819 vputx(vp, VPUTX_VUNREF); 2820 } 2821 2822 /* 2823 * Increase the hold count and activate if this is the first reference. 2824 */ 2825 void 2826 _vhold(struct vnode *vp, bool locked) 2827 { 2828 struct mount *mp; 2829 2830 if (locked) 2831 ASSERT_VI_LOCKED(vp, __func__); 2832 else 2833 ASSERT_VI_UNLOCKED(vp, __func__); 2834 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2835 if (!locked) { 2836 if (refcount_acquire_if_not_zero(&vp->v_holdcnt)) { 2837 VNODE_REFCOUNT_FENCE_ACQ(); 2838 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 2839 ("_vhold: vnode with holdcnt is free")); 2840 return; 2841 } 2842 VI_LOCK(vp); 2843 } 2844 if ((vp->v_iflag & VI_FREE) == 0) { 2845 refcount_acquire(&vp->v_holdcnt); 2846 if (!locked) 2847 VI_UNLOCK(vp); 2848 return; 2849 } 2850 VNASSERT(vp->v_holdcnt == 0, vp, 2851 ("%s: wrong hold count", __func__)); 2852 VNASSERT(vp->v_op != NULL, vp, 2853 ("%s: vnode already reclaimed.", __func__)); 2854 /* 2855 * Remove a vnode from the free list, mark it as in use, 2856 * and put it on the active list. 2857 */ 2858 VNASSERT(vp->v_mount != NULL, vp, 2859 ("_vhold: vnode not on per mount vnode list")); 2860 mp = vp->v_mount; 2861 mtx_lock(&mp->mnt_listmtx); 2862 if ((vp->v_mflag & VMP_TMPMNTFREELIST) != 0) { 2863 TAILQ_REMOVE(&mp->mnt_tmpfreevnodelist, vp, v_actfreelist); 2864 mp->mnt_tmpfreevnodelistsize--; 2865 vp->v_mflag &= ~VMP_TMPMNTFREELIST; 2866 } else { 2867 mtx_lock(&vnode_free_list_mtx); 2868 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 2869 freevnodes--; 2870 mtx_unlock(&vnode_free_list_mtx); 2871 } 2872 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 2873 ("Activating already active vnode")); 2874 vp->v_iflag &= ~VI_FREE; 2875 vp->v_iflag |= VI_ACTIVE; 2876 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 2877 mp->mnt_activevnodelistsize++; 2878 mtx_unlock(&mp->mnt_listmtx); 2879 refcount_acquire(&vp->v_holdcnt); 2880 if (!locked) 2881 VI_UNLOCK(vp); 2882 } 2883 2884 /* 2885 * Drop the hold count of the vnode. If this is the last reference to 2886 * the vnode we place it on the free list unless it has been vgone'd 2887 * (marked VI_DOOMED) in which case we will free it. 2888 * 2889 * Because the vnode vm object keeps a hold reference on the vnode if 2890 * there is at least one resident non-cached page, the vnode cannot 2891 * leave the active list without the page cleanup done. 2892 */ 2893 void 2894 _vdrop(struct vnode *vp, bool locked) 2895 { 2896 struct bufobj *bo; 2897 struct mount *mp; 2898 int active; 2899 2900 if (locked) 2901 ASSERT_VI_LOCKED(vp, __func__); 2902 else 2903 ASSERT_VI_UNLOCKED(vp, __func__); 2904 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2905 if ((int)vp->v_holdcnt <= 0) 2906 panic("vdrop: holdcnt %d", vp->v_holdcnt); 2907 if (!locked) { 2908 if (refcount_release_if_not_last(&vp->v_holdcnt)) 2909 return; 2910 VI_LOCK(vp); 2911 } 2912 if (refcount_release(&vp->v_holdcnt) == 0) { 2913 VI_UNLOCK(vp); 2914 return; 2915 } 2916 if ((vp->v_iflag & VI_DOOMED) == 0) { 2917 /* 2918 * Mark a vnode as free: remove it from its active list 2919 * and put it up for recycling on the freelist. 2920 */ 2921 VNASSERT(vp->v_op != NULL, vp, 2922 ("vdropl: vnode already reclaimed.")); 2923 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 2924 ("vnode already free")); 2925 VNASSERT(vp->v_holdcnt == 0, vp, 2926 ("vdropl: freeing when we shouldn't")); 2927 active = vp->v_iflag & VI_ACTIVE; 2928 if ((vp->v_iflag & VI_OWEINACT) == 0) { 2929 vp->v_iflag &= ~VI_ACTIVE; 2930 mp = vp->v_mount; 2931 if (mp != NULL) { 2932 mtx_lock(&mp->mnt_listmtx); 2933 if (active) { 2934 TAILQ_REMOVE(&mp->mnt_activevnodelist, 2935 vp, v_actfreelist); 2936 mp->mnt_activevnodelistsize--; 2937 } 2938 TAILQ_INSERT_TAIL(&mp->mnt_tmpfreevnodelist, 2939 vp, v_actfreelist); 2940 mp->mnt_tmpfreevnodelistsize++; 2941 vp->v_iflag |= VI_FREE; 2942 vp->v_mflag |= VMP_TMPMNTFREELIST; 2943 VI_UNLOCK(vp); 2944 if (mp->mnt_tmpfreevnodelistsize >= 2945 mnt_free_list_batch) 2946 vnlru_return_batch_locked(mp); 2947 mtx_unlock(&mp->mnt_listmtx); 2948 } else { 2949 VNASSERT(active == 0, vp, 2950 ("vdropl: active vnode not on per mount " 2951 "vnode list")); 2952 mtx_lock(&vnode_free_list_mtx); 2953 TAILQ_INSERT_TAIL(&vnode_free_list, vp, 2954 v_actfreelist); 2955 freevnodes++; 2956 vp->v_iflag |= VI_FREE; 2957 VI_UNLOCK(vp); 2958 mtx_unlock(&vnode_free_list_mtx); 2959 } 2960 } else { 2961 VI_UNLOCK(vp); 2962 counter_u64_add(free_owe_inact, 1); 2963 } 2964 return; 2965 } 2966 /* 2967 * The vnode has been marked for destruction, so free it. 2968 * 2969 * The vnode will be returned to the zone where it will 2970 * normally remain until it is needed for another vnode. We 2971 * need to cleanup (or verify that the cleanup has already 2972 * been done) any residual data left from its current use 2973 * so as not to contaminate the freshly allocated vnode. 2974 */ 2975 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 2976 atomic_subtract_long(&numvnodes, 1); 2977 bo = &vp->v_bufobj; 2978 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 2979 ("cleaned vnode still on the free list.")); 2980 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 2981 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); 2982 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 2983 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 2984 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 2985 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 2986 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 2987 ("clean blk trie not empty")); 2988 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 2989 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 2990 ("dirty blk trie not empty")); 2991 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 2992 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 2993 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 2994 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 2995 ("Dangling rangelock waiters")); 2996 VI_UNLOCK(vp); 2997 #ifdef MAC 2998 mac_vnode_destroy(vp); 2999 #endif 3000 if (vp->v_pollinfo != NULL) { 3001 destroy_vpollinfo(vp->v_pollinfo); 3002 vp->v_pollinfo = NULL; 3003 } 3004 #ifdef INVARIANTS 3005 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ 3006 vp->v_op = NULL; 3007 #endif 3008 vp->v_mountedhere = NULL; 3009 vp->v_unpcb = NULL; 3010 vp->v_rdev = NULL; 3011 vp->v_fifoinfo = NULL; 3012 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 3013 vp->v_iflag = 0; 3014 vp->v_vflag = 0; 3015 bo->bo_flag = 0; 3016 uma_zfree(vnode_zone, vp); 3017 } 3018 3019 /* 3020 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3021 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3022 * OWEINACT tracks whether a vnode missed a call to inactive due to a 3023 * failed lock upgrade. 3024 */ 3025 void 3026 vinactive(struct vnode *vp, struct thread *td) 3027 { 3028 struct vm_object *obj; 3029 3030 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3031 ASSERT_VI_LOCKED(vp, "vinactive"); 3032 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3033 ("vinactive: recursed on VI_DOINGINACT")); 3034 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3035 vp->v_iflag |= VI_DOINGINACT; 3036 vp->v_iflag &= ~VI_OWEINACT; 3037 VI_UNLOCK(vp); 3038 /* 3039 * Before moving off the active list, we must be sure that any 3040 * modified pages are converted into the vnode's dirty 3041 * buffers, since these will no longer be checked once the 3042 * vnode is on the inactive list. 3043 * 3044 * The write-out of the dirty pages is asynchronous. At the 3045 * point that VOP_INACTIVE() is called, there could still be 3046 * pending I/O and dirty pages in the object. 3047 */ 3048 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3049 (obj->flags & OBJ_MIGHTBEDIRTY) != 0) { 3050 VM_OBJECT_WLOCK(obj); 3051 vm_object_page_clean(obj, 0, 0, 0); 3052 VM_OBJECT_WUNLOCK(obj); 3053 } 3054 VOP_INACTIVE(vp, td); 3055 VI_LOCK(vp); 3056 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3057 ("vinactive: lost VI_DOINGINACT")); 3058 vp->v_iflag &= ~VI_DOINGINACT; 3059 } 3060 3061 /* 3062 * Remove any vnodes in the vnode table belonging to mount point mp. 3063 * 3064 * If FORCECLOSE is not specified, there should not be any active ones, 3065 * return error if any are found (nb: this is a user error, not a 3066 * system error). If FORCECLOSE is specified, detach any active vnodes 3067 * that are found. 3068 * 3069 * If WRITECLOSE is set, only flush out regular file vnodes open for 3070 * writing. 3071 * 3072 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3073 * 3074 * `rootrefs' specifies the base reference count for the root vnode 3075 * of this filesystem. The root vnode is considered busy if its 3076 * v_usecount exceeds this value. On a successful return, vflush(, td) 3077 * will call vrele() on the root vnode exactly rootrefs times. 3078 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3079 * be zero. 3080 */ 3081 #ifdef DIAGNOSTIC 3082 static int busyprt = 0; /* print out busy vnodes */ 3083 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3084 #endif 3085 3086 int 3087 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3088 { 3089 struct vnode *vp, *mvp, *rootvp = NULL; 3090 struct vattr vattr; 3091 int busy = 0, error; 3092 3093 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3094 rootrefs, flags); 3095 if (rootrefs > 0) { 3096 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3097 ("vflush: bad args")); 3098 /* 3099 * Get the filesystem root vnode. We can vput() it 3100 * immediately, since with rootrefs > 0, it won't go away. 3101 */ 3102 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3103 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3104 __func__, error); 3105 return (error); 3106 } 3107 vput(rootvp); 3108 } 3109 loop: 3110 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3111 vholdl(vp); 3112 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3113 if (error) { 3114 vdrop(vp); 3115 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3116 goto loop; 3117 } 3118 /* 3119 * Skip over a vnodes marked VV_SYSTEM. 3120 */ 3121 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3122 VOP_UNLOCK(vp, 0); 3123 vdrop(vp); 3124 continue; 3125 } 3126 /* 3127 * If WRITECLOSE is set, flush out unlinked but still open 3128 * files (even if open only for reading) and regular file 3129 * vnodes open for writing. 3130 */ 3131 if (flags & WRITECLOSE) { 3132 if (vp->v_object != NULL) { 3133 VM_OBJECT_WLOCK(vp->v_object); 3134 vm_object_page_clean(vp->v_object, 0, 0, 0); 3135 VM_OBJECT_WUNLOCK(vp->v_object); 3136 } 3137 error = VOP_FSYNC(vp, MNT_WAIT, td); 3138 if (error != 0) { 3139 VOP_UNLOCK(vp, 0); 3140 vdrop(vp); 3141 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3142 return (error); 3143 } 3144 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3145 VI_LOCK(vp); 3146 3147 if ((vp->v_type == VNON || 3148 (error == 0 && vattr.va_nlink > 0)) && 3149 (vp->v_writecount == 0 || vp->v_type != VREG)) { 3150 VOP_UNLOCK(vp, 0); 3151 vdropl(vp); 3152 continue; 3153 } 3154 } else 3155 VI_LOCK(vp); 3156 /* 3157 * With v_usecount == 0, all we need to do is clear out the 3158 * vnode data structures and we are done. 3159 * 3160 * If FORCECLOSE is set, forcibly close the vnode. 3161 */ 3162 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3163 vgonel(vp); 3164 } else { 3165 busy++; 3166 #ifdef DIAGNOSTIC 3167 if (busyprt) 3168 vn_printf(vp, "vflush: busy vnode "); 3169 #endif 3170 } 3171 VOP_UNLOCK(vp, 0); 3172 vdropl(vp); 3173 } 3174 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3175 /* 3176 * If just the root vnode is busy, and if its refcount 3177 * is equal to `rootrefs', then go ahead and kill it. 3178 */ 3179 VI_LOCK(rootvp); 3180 KASSERT(busy > 0, ("vflush: not busy")); 3181 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3182 ("vflush: usecount %d < rootrefs %d", 3183 rootvp->v_usecount, rootrefs)); 3184 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3185 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3186 vgone(rootvp); 3187 VOP_UNLOCK(rootvp, 0); 3188 busy = 0; 3189 } else 3190 VI_UNLOCK(rootvp); 3191 } 3192 if (busy) { 3193 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3194 busy); 3195 return (EBUSY); 3196 } 3197 for (; rootrefs > 0; rootrefs--) 3198 vrele(rootvp); 3199 return (0); 3200 } 3201 3202 /* 3203 * Recycle an unused vnode to the front of the free list. 3204 */ 3205 int 3206 vrecycle(struct vnode *vp) 3207 { 3208 int recycled; 3209 3210 VI_LOCK(vp); 3211 recycled = vrecyclel(vp); 3212 VI_UNLOCK(vp); 3213 return (recycled); 3214 } 3215 3216 /* 3217 * vrecycle, with the vp interlock held. 3218 */ 3219 int 3220 vrecyclel(struct vnode *vp) 3221 { 3222 int recycled; 3223 3224 ASSERT_VOP_ELOCKED(vp, __func__); 3225 ASSERT_VI_LOCKED(vp, __func__); 3226 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3227 recycled = 0; 3228 if (vp->v_usecount == 0) { 3229 recycled = 1; 3230 vgonel(vp); 3231 } 3232 return (recycled); 3233 } 3234 3235 /* 3236 * Eliminate all activity associated with a vnode 3237 * in preparation for reuse. 3238 */ 3239 void 3240 vgone(struct vnode *vp) 3241 { 3242 VI_LOCK(vp); 3243 vgonel(vp); 3244 VI_UNLOCK(vp); 3245 } 3246 3247 static void 3248 notify_lowervp_vfs_dummy(struct mount *mp __unused, 3249 struct vnode *lowervp __unused) 3250 { 3251 } 3252 3253 /* 3254 * Notify upper mounts about reclaimed or unlinked vnode. 3255 */ 3256 void 3257 vfs_notify_upper(struct vnode *vp, int event) 3258 { 3259 static struct vfsops vgonel_vfsops = { 3260 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 3261 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 3262 }; 3263 struct mount *mp, *ump, *mmp; 3264 3265 mp = vp->v_mount; 3266 if (mp == NULL) 3267 return; 3268 3269 MNT_ILOCK(mp); 3270 if (TAILQ_EMPTY(&mp->mnt_uppers)) 3271 goto unlock; 3272 MNT_IUNLOCK(mp); 3273 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 3274 mmp->mnt_op = &vgonel_vfsops; 3275 mmp->mnt_kern_flag |= MNTK_MARKER; 3276 MNT_ILOCK(mp); 3277 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 3278 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 3279 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 3280 ump = TAILQ_NEXT(ump, mnt_upper_link); 3281 continue; 3282 } 3283 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 3284 MNT_IUNLOCK(mp); 3285 switch (event) { 3286 case VFS_NOTIFY_UPPER_RECLAIM: 3287 VFS_RECLAIM_LOWERVP(ump, vp); 3288 break; 3289 case VFS_NOTIFY_UPPER_UNLINK: 3290 VFS_UNLINK_LOWERVP(ump, vp); 3291 break; 3292 default: 3293 KASSERT(0, ("invalid event %d", event)); 3294 break; 3295 } 3296 MNT_ILOCK(mp); 3297 ump = TAILQ_NEXT(mmp, mnt_upper_link); 3298 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 3299 } 3300 free(mmp, M_TEMP); 3301 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 3302 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 3303 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 3304 wakeup(&mp->mnt_uppers); 3305 } 3306 unlock: 3307 MNT_IUNLOCK(mp); 3308 } 3309 3310 /* 3311 * vgone, with the vp interlock held. 3312 */ 3313 static void 3314 vgonel(struct vnode *vp) 3315 { 3316 struct thread *td; 3317 int oweinact; 3318 int active; 3319 struct mount *mp; 3320 3321 ASSERT_VOP_ELOCKED(vp, "vgonel"); 3322 ASSERT_VI_LOCKED(vp, "vgonel"); 3323 VNASSERT(vp->v_holdcnt, vp, 3324 ("vgonel: vp %p has no reference.", vp)); 3325 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3326 td = curthread; 3327 3328 /* 3329 * Don't vgonel if we're already doomed. 3330 */ 3331 if (vp->v_iflag & VI_DOOMED) 3332 return; 3333 vp->v_iflag |= VI_DOOMED; 3334 3335 /* 3336 * Check to see if the vnode is in use. If so, we have to call 3337 * VOP_CLOSE() and VOP_INACTIVE(). 3338 */ 3339 active = vp->v_usecount; 3340 oweinact = (vp->v_iflag & VI_OWEINACT); 3341 VI_UNLOCK(vp); 3342 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 3343 3344 /* 3345 * If purging an active vnode, it must be closed and 3346 * deactivated before being reclaimed. 3347 */ 3348 if (active) 3349 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 3350 if (oweinact || active) { 3351 VI_LOCK(vp); 3352 if ((vp->v_iflag & VI_DOINGINACT) == 0) 3353 vinactive(vp, td); 3354 VI_UNLOCK(vp); 3355 } 3356 if (vp->v_type == VSOCK) 3357 vfs_unp_reclaim(vp); 3358 3359 /* 3360 * Clean out any buffers associated with the vnode. 3361 * If the flush fails, just toss the buffers. 3362 */ 3363 mp = NULL; 3364 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 3365 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 3366 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 3367 while (vinvalbuf(vp, 0, 0, 0) != 0) 3368 ; 3369 } 3370 3371 BO_LOCK(&vp->v_bufobj); 3372 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 3373 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 3374 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 3375 vp->v_bufobj.bo_clean.bv_cnt == 0, 3376 ("vp %p bufobj not invalidated", vp)); 3377 3378 /* 3379 * For VMIO bufobj, BO_DEAD is set in vm_object_terminate() 3380 * after the object's page queue is flushed. 3381 */ 3382 if (vp->v_bufobj.bo_object == NULL) 3383 vp->v_bufobj.bo_flag |= BO_DEAD; 3384 BO_UNLOCK(&vp->v_bufobj); 3385 3386 /* 3387 * Reclaim the vnode. 3388 */ 3389 if (VOP_RECLAIM(vp, td)) 3390 panic("vgone: cannot reclaim"); 3391 if (mp != NULL) 3392 vn_finished_secondary_write(mp); 3393 VNASSERT(vp->v_object == NULL, vp, 3394 ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag)); 3395 /* 3396 * Clear the advisory locks and wake up waiting threads. 3397 */ 3398 (void)VOP_ADVLOCKPURGE(vp); 3399 vp->v_lockf = NULL; 3400 /* 3401 * Delete from old mount point vnode list. 3402 */ 3403 delmntque(vp); 3404 cache_purge(vp); 3405 /* 3406 * Done with purge, reset to the standard lock and invalidate 3407 * the vnode. 3408 */ 3409 VI_LOCK(vp); 3410 vp->v_vnlock = &vp->v_lock; 3411 vp->v_op = &dead_vnodeops; 3412 vp->v_tag = "none"; 3413 vp->v_type = VBAD; 3414 } 3415 3416 /* 3417 * Calculate the total number of references to a special device. 3418 */ 3419 int 3420 vcount(struct vnode *vp) 3421 { 3422 int count; 3423 3424 dev_lock(); 3425 count = vp->v_rdev->si_usecount; 3426 dev_unlock(); 3427 return (count); 3428 } 3429 3430 /* 3431 * Same as above, but using the struct cdev *as argument 3432 */ 3433 int 3434 count_dev(struct cdev *dev) 3435 { 3436 int count; 3437 3438 dev_lock(); 3439 count = dev->si_usecount; 3440 dev_unlock(); 3441 return(count); 3442 } 3443 3444 /* 3445 * Print out a description of a vnode. 3446 */ 3447 static char *typename[] = 3448 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 3449 "VMARKER"}; 3450 3451 void 3452 vn_printf(struct vnode *vp, const char *fmt, ...) 3453 { 3454 va_list ap; 3455 char buf[256], buf2[16]; 3456 u_long flags; 3457 3458 va_start(ap, fmt); 3459 vprintf(fmt, ap); 3460 va_end(ap); 3461 printf("%p: ", (void *)vp); 3462 printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]); 3463 printf(" usecount %d, writecount %d, refcount %d", 3464 vp->v_usecount, vp->v_writecount, vp->v_holdcnt); 3465 switch (vp->v_type) { 3466 case VDIR: 3467 printf(" mountedhere %p\n", vp->v_mountedhere); 3468 break; 3469 case VCHR: 3470 printf(" rdev %p\n", vp->v_rdev); 3471 break; 3472 case VSOCK: 3473 printf(" socket %p\n", vp->v_unpcb); 3474 break; 3475 case VFIFO: 3476 printf(" fifoinfo %p\n", vp->v_fifoinfo); 3477 break; 3478 default: 3479 printf("\n"); 3480 break; 3481 } 3482 buf[0] = '\0'; 3483 buf[1] = '\0'; 3484 if (vp->v_vflag & VV_ROOT) 3485 strlcat(buf, "|VV_ROOT", sizeof(buf)); 3486 if (vp->v_vflag & VV_ISTTY) 3487 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 3488 if (vp->v_vflag & VV_NOSYNC) 3489 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 3490 if (vp->v_vflag & VV_ETERNALDEV) 3491 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 3492 if (vp->v_vflag & VV_CACHEDLABEL) 3493 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 3494 if (vp->v_vflag & VV_TEXT) 3495 strlcat(buf, "|VV_TEXT", sizeof(buf)); 3496 if (vp->v_vflag & VV_COPYONWRITE) 3497 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 3498 if (vp->v_vflag & VV_SYSTEM) 3499 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 3500 if (vp->v_vflag & VV_PROCDEP) 3501 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 3502 if (vp->v_vflag & VV_NOKNOTE) 3503 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 3504 if (vp->v_vflag & VV_DELETED) 3505 strlcat(buf, "|VV_DELETED", sizeof(buf)); 3506 if (vp->v_vflag & VV_MD) 3507 strlcat(buf, "|VV_MD", sizeof(buf)); 3508 if (vp->v_vflag & VV_FORCEINSMQ) 3509 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 3510 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 3511 VV_CACHEDLABEL | VV_TEXT | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 3512 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); 3513 if (flags != 0) { 3514 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 3515 strlcat(buf, buf2, sizeof(buf)); 3516 } 3517 if (vp->v_iflag & VI_MOUNT) 3518 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 3519 if (vp->v_iflag & VI_DOOMED) 3520 strlcat(buf, "|VI_DOOMED", sizeof(buf)); 3521 if (vp->v_iflag & VI_FREE) 3522 strlcat(buf, "|VI_FREE", sizeof(buf)); 3523 if (vp->v_iflag & VI_ACTIVE) 3524 strlcat(buf, "|VI_ACTIVE", sizeof(buf)); 3525 if (vp->v_iflag & VI_DOINGINACT) 3526 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 3527 if (vp->v_iflag & VI_OWEINACT) 3528 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 3529 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOOMED | VI_FREE | 3530 VI_ACTIVE | VI_DOINGINACT | VI_OWEINACT); 3531 if (flags != 0) { 3532 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 3533 strlcat(buf, buf2, sizeof(buf)); 3534 } 3535 printf(" flags (%s)\n", buf + 1); 3536 if (mtx_owned(VI_MTX(vp))) 3537 printf(" VI_LOCKed"); 3538 if (vp->v_object != NULL) 3539 printf(" v_object %p ref %d pages %d " 3540 "cleanbuf %d dirtybuf %d\n", 3541 vp->v_object, vp->v_object->ref_count, 3542 vp->v_object->resident_page_count, 3543 vp->v_bufobj.bo_clean.bv_cnt, 3544 vp->v_bufobj.bo_dirty.bv_cnt); 3545 printf(" "); 3546 lockmgr_printinfo(vp->v_vnlock); 3547 if (vp->v_data != NULL) 3548 VOP_PRINT(vp); 3549 } 3550 3551 #ifdef DDB 3552 /* 3553 * List all of the locked vnodes in the system. 3554 * Called when debugging the kernel. 3555 */ 3556 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 3557 { 3558 struct mount *mp; 3559 struct vnode *vp; 3560 3561 /* 3562 * Note: because this is DDB, we can't obey the locking semantics 3563 * for these structures, which means we could catch an inconsistent 3564 * state and dereference a nasty pointer. Not much to be done 3565 * about that. 3566 */ 3567 db_printf("Locked vnodes\n"); 3568 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3569 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3570 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 3571 vn_printf(vp, "vnode "); 3572 } 3573 } 3574 } 3575 3576 /* 3577 * Show details about the given vnode. 3578 */ 3579 DB_SHOW_COMMAND(vnode, db_show_vnode) 3580 { 3581 struct vnode *vp; 3582 3583 if (!have_addr) 3584 return; 3585 vp = (struct vnode *)addr; 3586 vn_printf(vp, "vnode "); 3587 } 3588 3589 /* 3590 * Show details about the given mount point. 3591 */ 3592 DB_SHOW_COMMAND(mount, db_show_mount) 3593 { 3594 struct mount *mp; 3595 struct vfsopt *opt; 3596 struct statfs *sp; 3597 struct vnode *vp; 3598 char buf[512]; 3599 uint64_t mflags; 3600 u_int flags; 3601 3602 if (!have_addr) { 3603 /* No address given, print short info about all mount points. */ 3604 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3605 db_printf("%p %s on %s (%s)\n", mp, 3606 mp->mnt_stat.f_mntfromname, 3607 mp->mnt_stat.f_mntonname, 3608 mp->mnt_stat.f_fstypename); 3609 if (db_pager_quit) 3610 break; 3611 } 3612 db_printf("\nMore info: show mount <addr>\n"); 3613 return; 3614 } 3615 3616 mp = (struct mount *)addr; 3617 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 3618 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 3619 3620 buf[0] = '\0'; 3621 mflags = mp->mnt_flag; 3622 #define MNT_FLAG(flag) do { \ 3623 if (mflags & (flag)) { \ 3624 if (buf[0] != '\0') \ 3625 strlcat(buf, ", ", sizeof(buf)); \ 3626 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 3627 mflags &= ~(flag); \ 3628 } \ 3629 } while (0) 3630 MNT_FLAG(MNT_RDONLY); 3631 MNT_FLAG(MNT_SYNCHRONOUS); 3632 MNT_FLAG(MNT_NOEXEC); 3633 MNT_FLAG(MNT_NOSUID); 3634 MNT_FLAG(MNT_NFS4ACLS); 3635 MNT_FLAG(MNT_UNION); 3636 MNT_FLAG(MNT_ASYNC); 3637 MNT_FLAG(MNT_SUIDDIR); 3638 MNT_FLAG(MNT_SOFTDEP); 3639 MNT_FLAG(MNT_NOSYMFOLLOW); 3640 MNT_FLAG(MNT_GJOURNAL); 3641 MNT_FLAG(MNT_MULTILABEL); 3642 MNT_FLAG(MNT_ACLS); 3643 MNT_FLAG(MNT_NOATIME); 3644 MNT_FLAG(MNT_NOCLUSTERR); 3645 MNT_FLAG(MNT_NOCLUSTERW); 3646 MNT_FLAG(MNT_SUJ); 3647 MNT_FLAG(MNT_EXRDONLY); 3648 MNT_FLAG(MNT_EXPORTED); 3649 MNT_FLAG(MNT_DEFEXPORTED); 3650 MNT_FLAG(MNT_EXPORTANON); 3651 MNT_FLAG(MNT_EXKERB); 3652 MNT_FLAG(MNT_EXPUBLIC); 3653 MNT_FLAG(MNT_LOCAL); 3654 MNT_FLAG(MNT_QUOTA); 3655 MNT_FLAG(MNT_ROOTFS); 3656 MNT_FLAG(MNT_USER); 3657 MNT_FLAG(MNT_IGNORE); 3658 MNT_FLAG(MNT_UPDATE); 3659 MNT_FLAG(MNT_DELEXPORT); 3660 MNT_FLAG(MNT_RELOAD); 3661 MNT_FLAG(MNT_FORCE); 3662 MNT_FLAG(MNT_SNAPSHOT); 3663 MNT_FLAG(MNT_BYFSID); 3664 #undef MNT_FLAG 3665 if (mflags != 0) { 3666 if (buf[0] != '\0') 3667 strlcat(buf, ", ", sizeof(buf)); 3668 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3669 "0x%016jx", mflags); 3670 } 3671 db_printf(" mnt_flag = %s\n", buf); 3672 3673 buf[0] = '\0'; 3674 flags = mp->mnt_kern_flag; 3675 #define MNT_KERN_FLAG(flag) do { \ 3676 if (flags & (flag)) { \ 3677 if (buf[0] != '\0') \ 3678 strlcat(buf, ", ", sizeof(buf)); \ 3679 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 3680 flags &= ~(flag); \ 3681 } \ 3682 } while (0) 3683 MNT_KERN_FLAG(MNTK_UNMOUNTF); 3684 MNT_KERN_FLAG(MNTK_ASYNC); 3685 MNT_KERN_FLAG(MNTK_SOFTDEP); 3686 MNT_KERN_FLAG(MNTK_NOINSMNTQ); 3687 MNT_KERN_FLAG(MNTK_DRAINING); 3688 MNT_KERN_FLAG(MNTK_REFEXPIRE); 3689 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 3690 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 3691 MNT_KERN_FLAG(MNTK_NO_IOPF); 3692 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 3693 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 3694 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 3695 MNT_KERN_FLAG(MNTK_MARKER); 3696 MNT_KERN_FLAG(MNTK_USES_BCACHE); 3697 MNT_KERN_FLAG(MNTK_NOASYNC); 3698 MNT_KERN_FLAG(MNTK_UNMOUNT); 3699 MNT_KERN_FLAG(MNTK_MWAIT); 3700 MNT_KERN_FLAG(MNTK_SUSPEND); 3701 MNT_KERN_FLAG(MNTK_SUSPEND2); 3702 MNT_KERN_FLAG(MNTK_SUSPENDED); 3703 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 3704 MNT_KERN_FLAG(MNTK_NOKNOTE); 3705 #undef MNT_KERN_FLAG 3706 if (flags != 0) { 3707 if (buf[0] != '\0') 3708 strlcat(buf, ", ", sizeof(buf)); 3709 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3710 "0x%08x", flags); 3711 } 3712 db_printf(" mnt_kern_flag = %s\n", buf); 3713 3714 db_printf(" mnt_opt = "); 3715 opt = TAILQ_FIRST(mp->mnt_opt); 3716 if (opt != NULL) { 3717 db_printf("%s", opt->name); 3718 opt = TAILQ_NEXT(opt, link); 3719 while (opt != NULL) { 3720 db_printf(", %s", opt->name); 3721 opt = TAILQ_NEXT(opt, link); 3722 } 3723 } 3724 db_printf("\n"); 3725 3726 sp = &mp->mnt_stat; 3727 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 3728 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 3729 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 3730 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 3731 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 3732 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 3733 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 3734 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 3735 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 3736 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 3737 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 3738 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 3739 3740 db_printf(" mnt_cred = { uid=%u ruid=%u", 3741 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 3742 if (jailed(mp->mnt_cred)) 3743 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 3744 db_printf(" }\n"); 3745 db_printf(" mnt_ref = %d\n", mp->mnt_ref); 3746 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 3747 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 3748 db_printf(" mnt_activevnodelistsize = %d\n", 3749 mp->mnt_activevnodelistsize); 3750 db_printf(" mnt_writeopcount = %d\n", mp->mnt_writeopcount); 3751 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 3752 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 3753 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 3754 db_printf(" mnt_lockref = %d\n", mp->mnt_lockref); 3755 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 3756 db_printf(" mnt_secondary_accwrites = %d\n", 3757 mp->mnt_secondary_accwrites); 3758 db_printf(" mnt_gjprovider = %s\n", 3759 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 3760 3761 db_printf("\n\nList of active vnodes\n"); 3762 TAILQ_FOREACH(vp, &mp->mnt_activevnodelist, v_actfreelist) { 3763 if (vp->v_type != VMARKER) { 3764 vn_printf(vp, "vnode "); 3765 if (db_pager_quit) 3766 break; 3767 } 3768 } 3769 db_printf("\n\nList of inactive vnodes\n"); 3770 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3771 if (vp->v_type != VMARKER && (vp->v_iflag & VI_ACTIVE) == 0) { 3772 vn_printf(vp, "vnode "); 3773 if (db_pager_quit) 3774 break; 3775 } 3776 } 3777 } 3778 #endif /* DDB */ 3779 3780 /* 3781 * Fill in a struct xvfsconf based on a struct vfsconf. 3782 */ 3783 static int 3784 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 3785 { 3786 struct xvfsconf xvfsp; 3787 3788 bzero(&xvfsp, sizeof(xvfsp)); 3789 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 3790 xvfsp.vfc_typenum = vfsp->vfc_typenum; 3791 xvfsp.vfc_refcount = vfsp->vfc_refcount; 3792 xvfsp.vfc_flags = vfsp->vfc_flags; 3793 /* 3794 * These are unused in userland, we keep them 3795 * to not break binary compatibility. 3796 */ 3797 xvfsp.vfc_vfsops = NULL; 3798 xvfsp.vfc_next = NULL; 3799 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 3800 } 3801 3802 #ifdef COMPAT_FREEBSD32 3803 struct xvfsconf32 { 3804 uint32_t vfc_vfsops; 3805 char vfc_name[MFSNAMELEN]; 3806 int32_t vfc_typenum; 3807 int32_t vfc_refcount; 3808 int32_t vfc_flags; 3809 uint32_t vfc_next; 3810 }; 3811 3812 static int 3813 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 3814 { 3815 struct xvfsconf32 xvfsp; 3816 3817 bzero(&xvfsp, sizeof(xvfsp)); 3818 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 3819 xvfsp.vfc_typenum = vfsp->vfc_typenum; 3820 xvfsp.vfc_refcount = vfsp->vfc_refcount; 3821 xvfsp.vfc_flags = vfsp->vfc_flags; 3822 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 3823 } 3824 #endif 3825 3826 /* 3827 * Top level filesystem related information gathering. 3828 */ 3829 static int 3830 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 3831 { 3832 struct vfsconf *vfsp; 3833 int error; 3834 3835 error = 0; 3836 vfsconf_slock(); 3837 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3838 #ifdef COMPAT_FREEBSD32 3839 if (req->flags & SCTL_MASK32) 3840 error = vfsconf2x32(req, vfsp); 3841 else 3842 #endif 3843 error = vfsconf2x(req, vfsp); 3844 if (error) 3845 break; 3846 } 3847 vfsconf_sunlock(); 3848 return (error); 3849 } 3850 3851 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 3852 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 3853 "S,xvfsconf", "List of all configured filesystems"); 3854 3855 #ifndef BURN_BRIDGES 3856 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 3857 3858 static int 3859 vfs_sysctl(SYSCTL_HANDLER_ARGS) 3860 { 3861 int *name = (int *)arg1 - 1; /* XXX */ 3862 u_int namelen = arg2 + 1; /* XXX */ 3863 struct vfsconf *vfsp; 3864 3865 log(LOG_WARNING, "userland calling deprecated sysctl, " 3866 "please rebuild world\n"); 3867 3868 #if 1 || defined(COMPAT_PRELITE2) 3869 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 3870 if (namelen == 1) 3871 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 3872 #endif 3873 3874 switch (name[1]) { 3875 case VFS_MAXTYPENUM: 3876 if (namelen != 2) 3877 return (ENOTDIR); 3878 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 3879 case VFS_CONF: 3880 if (namelen != 3) 3881 return (ENOTDIR); /* overloaded */ 3882 vfsconf_slock(); 3883 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3884 if (vfsp->vfc_typenum == name[2]) 3885 break; 3886 } 3887 vfsconf_sunlock(); 3888 if (vfsp == NULL) 3889 return (EOPNOTSUPP); 3890 #ifdef COMPAT_FREEBSD32 3891 if (req->flags & SCTL_MASK32) 3892 return (vfsconf2x32(req, vfsp)); 3893 else 3894 #endif 3895 return (vfsconf2x(req, vfsp)); 3896 } 3897 return (EOPNOTSUPP); 3898 } 3899 3900 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 3901 CTLFLAG_MPSAFE, vfs_sysctl, 3902 "Generic filesystem"); 3903 3904 #if 1 || defined(COMPAT_PRELITE2) 3905 3906 static int 3907 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 3908 { 3909 int error; 3910 struct vfsconf *vfsp; 3911 struct ovfsconf ovfs; 3912 3913 vfsconf_slock(); 3914 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3915 bzero(&ovfs, sizeof(ovfs)); 3916 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 3917 strcpy(ovfs.vfc_name, vfsp->vfc_name); 3918 ovfs.vfc_index = vfsp->vfc_typenum; 3919 ovfs.vfc_refcount = vfsp->vfc_refcount; 3920 ovfs.vfc_flags = vfsp->vfc_flags; 3921 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 3922 if (error != 0) { 3923 vfsconf_sunlock(); 3924 return (error); 3925 } 3926 } 3927 vfsconf_sunlock(); 3928 return (0); 3929 } 3930 3931 #endif /* 1 || COMPAT_PRELITE2 */ 3932 #endif /* !BURN_BRIDGES */ 3933 3934 #define KINFO_VNODESLOP 10 3935 #ifdef notyet 3936 /* 3937 * Dump vnode list (via sysctl). 3938 */ 3939 /* ARGSUSED */ 3940 static int 3941 sysctl_vnode(SYSCTL_HANDLER_ARGS) 3942 { 3943 struct xvnode *xvn; 3944 struct mount *mp; 3945 struct vnode *vp; 3946 int error, len, n; 3947 3948 /* 3949 * Stale numvnodes access is not fatal here. 3950 */ 3951 req->lock = 0; 3952 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 3953 if (!req->oldptr) 3954 /* Make an estimate */ 3955 return (SYSCTL_OUT(req, 0, len)); 3956 3957 error = sysctl_wire_old_buffer(req, 0); 3958 if (error != 0) 3959 return (error); 3960 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 3961 n = 0; 3962 mtx_lock(&mountlist_mtx); 3963 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3964 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 3965 continue; 3966 MNT_ILOCK(mp); 3967 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3968 if (n == len) 3969 break; 3970 vref(vp); 3971 xvn[n].xv_size = sizeof *xvn; 3972 xvn[n].xv_vnode = vp; 3973 xvn[n].xv_id = 0; /* XXX compat */ 3974 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 3975 XV_COPY(usecount); 3976 XV_COPY(writecount); 3977 XV_COPY(holdcnt); 3978 XV_COPY(mount); 3979 XV_COPY(numoutput); 3980 XV_COPY(type); 3981 #undef XV_COPY 3982 xvn[n].xv_flag = vp->v_vflag; 3983 3984 switch (vp->v_type) { 3985 case VREG: 3986 case VDIR: 3987 case VLNK: 3988 break; 3989 case VBLK: 3990 case VCHR: 3991 if (vp->v_rdev == NULL) { 3992 vrele(vp); 3993 continue; 3994 } 3995 xvn[n].xv_dev = dev2udev(vp->v_rdev); 3996 break; 3997 case VSOCK: 3998 xvn[n].xv_socket = vp->v_socket; 3999 break; 4000 case VFIFO: 4001 xvn[n].xv_fifo = vp->v_fifoinfo; 4002 break; 4003 case VNON: 4004 case VBAD: 4005 default: 4006 /* shouldn't happen? */ 4007 vrele(vp); 4008 continue; 4009 } 4010 vrele(vp); 4011 ++n; 4012 } 4013 MNT_IUNLOCK(mp); 4014 mtx_lock(&mountlist_mtx); 4015 vfs_unbusy(mp); 4016 if (n == len) 4017 break; 4018 } 4019 mtx_unlock(&mountlist_mtx); 4020 4021 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 4022 free(xvn, M_TEMP); 4023 return (error); 4024 } 4025 4026 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 4027 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 4028 ""); 4029 #endif 4030 4031 static void 4032 unmount_or_warn(struct mount *mp) 4033 { 4034 int error; 4035 4036 error = dounmount(mp, MNT_FORCE, curthread); 4037 if (error != 0) { 4038 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4039 if (error == EBUSY) 4040 printf("BUSY)\n"); 4041 else 4042 printf("%d)\n", error); 4043 } 4044 } 4045 4046 /* 4047 * Unmount all filesystems. The list is traversed in reverse order 4048 * of mounting to avoid dependencies. 4049 */ 4050 void 4051 vfs_unmountall(void) 4052 { 4053 struct mount *mp, *tmp; 4054 4055 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4056 4057 /* 4058 * Since this only runs when rebooting, it is not interlocked. 4059 */ 4060 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4061 vfs_ref(mp); 4062 4063 /* 4064 * Forcibly unmounting "/dev" before "/" would prevent clean 4065 * unmount of the latter. 4066 */ 4067 if (mp == rootdevmp) 4068 continue; 4069 4070 unmount_or_warn(mp); 4071 } 4072 4073 if (rootdevmp != NULL) 4074 unmount_or_warn(rootdevmp); 4075 } 4076 4077 /* 4078 * perform msync on all vnodes under a mount point 4079 * the mount point must be locked. 4080 */ 4081 void 4082 vfs_msync(struct mount *mp, int flags) 4083 { 4084 struct vnode *vp, *mvp; 4085 struct vm_object *obj; 4086 4087 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4088 4089 vnlru_return_batch(mp); 4090 4091 MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { 4092 obj = vp->v_object; 4093 if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0 && 4094 (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) { 4095 if (!vget(vp, 4096 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, 4097 curthread)) { 4098 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */ 4099 vput(vp); 4100 continue; 4101 } 4102 4103 obj = vp->v_object; 4104 if (obj != NULL) { 4105 VM_OBJECT_WLOCK(obj); 4106 vm_object_page_clean(obj, 0, 0, 4107 flags == MNT_WAIT ? 4108 OBJPC_SYNC : OBJPC_NOSYNC); 4109 VM_OBJECT_WUNLOCK(obj); 4110 } 4111 vput(vp); 4112 } 4113 } else 4114 VI_UNLOCK(vp); 4115 } 4116 } 4117 4118 static void 4119 destroy_vpollinfo_free(struct vpollinfo *vi) 4120 { 4121 4122 knlist_destroy(&vi->vpi_selinfo.si_note); 4123 mtx_destroy(&vi->vpi_lock); 4124 uma_zfree(vnodepoll_zone, vi); 4125 } 4126 4127 static void 4128 destroy_vpollinfo(struct vpollinfo *vi) 4129 { 4130 4131 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4132 seldrain(&vi->vpi_selinfo); 4133 destroy_vpollinfo_free(vi); 4134 } 4135 4136 /* 4137 * Initialize per-vnode helper structure to hold poll-related state. 4138 */ 4139 void 4140 v_addpollinfo(struct vnode *vp) 4141 { 4142 struct vpollinfo *vi; 4143 4144 if (vp->v_pollinfo != NULL) 4145 return; 4146 vi = uma_zalloc(vnodepoll_zone, M_WAITOK | M_ZERO); 4147 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4148 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4149 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 4150 VI_LOCK(vp); 4151 if (vp->v_pollinfo != NULL) { 4152 VI_UNLOCK(vp); 4153 destroy_vpollinfo_free(vi); 4154 return; 4155 } 4156 vp->v_pollinfo = vi; 4157 VI_UNLOCK(vp); 4158 } 4159 4160 /* 4161 * Record a process's interest in events which might happen to 4162 * a vnode. Because poll uses the historic select-style interface 4163 * internally, this routine serves as both the ``check for any 4164 * pending events'' and the ``record my interest in future events'' 4165 * functions. (These are done together, while the lock is held, 4166 * to avoid race conditions.) 4167 */ 4168 int 4169 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4170 { 4171 4172 v_addpollinfo(vp); 4173 mtx_lock(&vp->v_pollinfo->vpi_lock); 4174 if (vp->v_pollinfo->vpi_revents & events) { 4175 /* 4176 * This leaves events we are not interested 4177 * in available for the other process which 4178 * which presumably had requested them 4179 * (otherwise they would never have been 4180 * recorded). 4181 */ 4182 events &= vp->v_pollinfo->vpi_revents; 4183 vp->v_pollinfo->vpi_revents &= ~events; 4184 4185 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4186 return (events); 4187 } 4188 vp->v_pollinfo->vpi_events |= events; 4189 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 4190 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4191 return (0); 4192 } 4193 4194 /* 4195 * Routine to create and manage a filesystem syncer vnode. 4196 */ 4197 #define sync_close ((int (*)(struct vop_close_args *))nullop) 4198 static int sync_fsync(struct vop_fsync_args *); 4199 static int sync_inactive(struct vop_inactive_args *); 4200 static int sync_reclaim(struct vop_reclaim_args *); 4201 4202 static struct vop_vector sync_vnodeops = { 4203 .vop_bypass = VOP_EOPNOTSUPP, 4204 .vop_close = sync_close, /* close */ 4205 .vop_fsync = sync_fsync, /* fsync */ 4206 .vop_inactive = sync_inactive, /* inactive */ 4207 .vop_reclaim = sync_reclaim, /* reclaim */ 4208 .vop_lock1 = vop_stdlock, /* lock */ 4209 .vop_unlock = vop_stdunlock, /* unlock */ 4210 .vop_islocked = vop_stdislocked, /* islocked */ 4211 }; 4212 4213 /* 4214 * Create a new filesystem syncer vnode for the specified mount point. 4215 */ 4216 void 4217 vfs_allocate_syncvnode(struct mount *mp) 4218 { 4219 struct vnode *vp; 4220 struct bufobj *bo; 4221 static long start, incr, next; 4222 int error; 4223 4224 /* Allocate a new vnode */ 4225 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 4226 if (error != 0) 4227 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 4228 vp->v_type = VNON; 4229 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4230 vp->v_vflag |= VV_FORCEINSMQ; 4231 error = insmntque(vp, mp); 4232 if (error != 0) 4233 panic("vfs_allocate_syncvnode: insmntque() failed"); 4234 vp->v_vflag &= ~VV_FORCEINSMQ; 4235 VOP_UNLOCK(vp, 0); 4236 /* 4237 * Place the vnode onto the syncer worklist. We attempt to 4238 * scatter them about on the list so that they will go off 4239 * at evenly distributed times even if all the filesystems 4240 * are mounted at once. 4241 */ 4242 next += incr; 4243 if (next == 0 || next > syncer_maxdelay) { 4244 start /= 2; 4245 incr /= 2; 4246 if (start == 0) { 4247 start = syncer_maxdelay / 2; 4248 incr = syncer_maxdelay; 4249 } 4250 next = start; 4251 } 4252 bo = &vp->v_bufobj; 4253 BO_LOCK(bo); 4254 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 4255 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 4256 mtx_lock(&sync_mtx); 4257 sync_vnode_count++; 4258 if (mp->mnt_syncer == NULL) { 4259 mp->mnt_syncer = vp; 4260 vp = NULL; 4261 } 4262 mtx_unlock(&sync_mtx); 4263 BO_UNLOCK(bo); 4264 if (vp != NULL) { 4265 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4266 vgone(vp); 4267 vput(vp); 4268 } 4269 } 4270 4271 void 4272 vfs_deallocate_syncvnode(struct mount *mp) 4273 { 4274 struct vnode *vp; 4275 4276 mtx_lock(&sync_mtx); 4277 vp = mp->mnt_syncer; 4278 if (vp != NULL) 4279 mp->mnt_syncer = NULL; 4280 mtx_unlock(&sync_mtx); 4281 if (vp != NULL) 4282 vrele(vp); 4283 } 4284 4285 /* 4286 * Do a lazy sync of the filesystem. 4287 */ 4288 static int 4289 sync_fsync(struct vop_fsync_args *ap) 4290 { 4291 struct vnode *syncvp = ap->a_vp; 4292 struct mount *mp = syncvp->v_mount; 4293 int error, save; 4294 struct bufobj *bo; 4295 4296 /* 4297 * We only need to do something if this is a lazy evaluation. 4298 */ 4299 if (ap->a_waitfor != MNT_LAZY) 4300 return (0); 4301 4302 /* 4303 * Move ourselves to the back of the sync list. 4304 */ 4305 bo = &syncvp->v_bufobj; 4306 BO_LOCK(bo); 4307 vn_syncer_add_to_worklist(bo, syncdelay); 4308 BO_UNLOCK(bo); 4309 4310 /* 4311 * Walk the list of vnodes pushing all that are dirty and 4312 * not already on the sync list. 4313 */ 4314 if (vfs_busy(mp, MBF_NOWAIT) != 0) 4315 return (0); 4316 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 4317 vfs_unbusy(mp); 4318 return (0); 4319 } 4320 save = curthread_pflags_set(TDP_SYNCIO); 4321 vfs_msync(mp, MNT_NOWAIT); 4322 error = VFS_SYNC(mp, MNT_LAZY); 4323 curthread_pflags_restore(save); 4324 vn_finished_write(mp); 4325 vfs_unbusy(mp); 4326 return (error); 4327 } 4328 4329 /* 4330 * The syncer vnode is no referenced. 4331 */ 4332 static int 4333 sync_inactive(struct vop_inactive_args *ap) 4334 { 4335 4336 vgone(ap->a_vp); 4337 return (0); 4338 } 4339 4340 /* 4341 * The syncer vnode is no longer needed and is being decommissioned. 4342 * 4343 * Modifications to the worklist must be protected by sync_mtx. 4344 */ 4345 static int 4346 sync_reclaim(struct vop_reclaim_args *ap) 4347 { 4348 struct vnode *vp = ap->a_vp; 4349 struct bufobj *bo; 4350 4351 bo = &vp->v_bufobj; 4352 BO_LOCK(bo); 4353 mtx_lock(&sync_mtx); 4354 if (vp->v_mount->mnt_syncer == vp) 4355 vp->v_mount->mnt_syncer = NULL; 4356 if (bo->bo_flag & BO_ONWORKLST) { 4357 LIST_REMOVE(bo, bo_synclist); 4358 syncer_worklist_len--; 4359 sync_vnode_count--; 4360 bo->bo_flag &= ~BO_ONWORKLST; 4361 } 4362 mtx_unlock(&sync_mtx); 4363 BO_UNLOCK(bo); 4364 4365 return (0); 4366 } 4367 4368 /* 4369 * Check if vnode represents a disk device 4370 */ 4371 int 4372 vn_isdisk(struct vnode *vp, int *errp) 4373 { 4374 int error; 4375 4376 if (vp->v_type != VCHR) { 4377 error = ENOTBLK; 4378 goto out; 4379 } 4380 error = 0; 4381 dev_lock(); 4382 if (vp->v_rdev == NULL) 4383 error = ENXIO; 4384 else if (vp->v_rdev->si_devsw == NULL) 4385 error = ENXIO; 4386 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 4387 error = ENOTBLK; 4388 dev_unlock(); 4389 out: 4390 if (errp != NULL) 4391 *errp = error; 4392 return (error == 0); 4393 } 4394 4395 /* 4396 * Common filesystem object access control check routine. Accepts a 4397 * vnode's type, "mode", uid and gid, requested access mode, credentials, 4398 * and optional call-by-reference privused argument allowing vaccess() 4399 * to indicate to the caller whether privilege was used to satisfy the 4400 * request (obsoleted). Returns 0 on success, or an errno on failure. 4401 */ 4402 int 4403 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 4404 accmode_t accmode, struct ucred *cred, int *privused) 4405 { 4406 accmode_t dac_granted; 4407 accmode_t priv_granted; 4408 4409 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 4410 ("invalid bit in accmode")); 4411 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 4412 ("VAPPEND without VWRITE")); 4413 4414 /* 4415 * Look for a normal, non-privileged way to access the file/directory 4416 * as requested. If it exists, go with that. 4417 */ 4418 4419 if (privused != NULL) 4420 *privused = 0; 4421 4422 dac_granted = 0; 4423 4424 /* Check the owner. */ 4425 if (cred->cr_uid == file_uid) { 4426 dac_granted |= VADMIN; 4427 if (file_mode & S_IXUSR) 4428 dac_granted |= VEXEC; 4429 if (file_mode & S_IRUSR) 4430 dac_granted |= VREAD; 4431 if (file_mode & S_IWUSR) 4432 dac_granted |= (VWRITE | VAPPEND); 4433 4434 if ((accmode & dac_granted) == accmode) 4435 return (0); 4436 4437 goto privcheck; 4438 } 4439 4440 /* Otherwise, check the groups (first match) */ 4441 if (groupmember(file_gid, cred)) { 4442 if (file_mode & S_IXGRP) 4443 dac_granted |= VEXEC; 4444 if (file_mode & S_IRGRP) 4445 dac_granted |= VREAD; 4446 if (file_mode & S_IWGRP) 4447 dac_granted |= (VWRITE | VAPPEND); 4448 4449 if ((accmode & dac_granted) == accmode) 4450 return (0); 4451 4452 goto privcheck; 4453 } 4454 4455 /* Otherwise, check everyone else. */ 4456 if (file_mode & S_IXOTH) 4457 dac_granted |= VEXEC; 4458 if (file_mode & S_IROTH) 4459 dac_granted |= VREAD; 4460 if (file_mode & S_IWOTH) 4461 dac_granted |= (VWRITE | VAPPEND); 4462 if ((accmode & dac_granted) == accmode) 4463 return (0); 4464 4465 privcheck: 4466 /* 4467 * Build a privilege mask to determine if the set of privileges 4468 * satisfies the requirements when combined with the granted mask 4469 * from above. For each privilege, if the privilege is required, 4470 * bitwise or the request type onto the priv_granted mask. 4471 */ 4472 priv_granted = 0; 4473 4474 if (type == VDIR) { 4475 /* 4476 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 4477 * requests, instead of PRIV_VFS_EXEC. 4478 */ 4479 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4480 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 4481 priv_granted |= VEXEC; 4482 } else { 4483 /* 4484 * Ensure that at least one execute bit is on. Otherwise, 4485 * a privileged user will always succeed, and we don't want 4486 * this to happen unless the file really is executable. 4487 */ 4488 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4489 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 4490 !priv_check_cred(cred, PRIV_VFS_EXEC)) 4491 priv_granted |= VEXEC; 4492 } 4493 4494 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 4495 !priv_check_cred(cred, PRIV_VFS_READ)) 4496 priv_granted |= VREAD; 4497 4498 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 4499 !priv_check_cred(cred, PRIV_VFS_WRITE)) 4500 priv_granted |= (VWRITE | VAPPEND); 4501 4502 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 4503 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 4504 priv_granted |= VADMIN; 4505 4506 if ((accmode & (priv_granted | dac_granted)) == accmode) { 4507 /* XXX audit: privilege used */ 4508 if (privused != NULL) 4509 *privused = 1; 4510 return (0); 4511 } 4512 4513 return ((accmode & VADMIN) ? EPERM : EACCES); 4514 } 4515 4516 /* 4517 * Credential check based on process requesting service, and per-attribute 4518 * permissions. 4519 */ 4520 int 4521 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 4522 struct thread *td, accmode_t accmode) 4523 { 4524 4525 /* 4526 * Kernel-invoked always succeeds. 4527 */ 4528 if (cred == NOCRED) 4529 return (0); 4530 4531 /* 4532 * Do not allow privileged processes in jail to directly manipulate 4533 * system attributes. 4534 */ 4535 switch (attrnamespace) { 4536 case EXTATTR_NAMESPACE_SYSTEM: 4537 /* Potentially should be: return (EPERM); */ 4538 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 4539 case EXTATTR_NAMESPACE_USER: 4540 return (VOP_ACCESS(vp, accmode, cred, td)); 4541 default: 4542 return (EPERM); 4543 } 4544 } 4545 4546 #ifdef DEBUG_VFS_LOCKS 4547 /* 4548 * This only exists to suppress warnings from unlocked specfs accesses. It is 4549 * no longer ok to have an unlocked VFS. 4550 */ 4551 #define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \ 4552 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 4553 4554 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 4555 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 4556 "Drop into debugger on lock violation"); 4557 4558 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 4559 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 4560 0, "Check for interlock across VOPs"); 4561 4562 int vfs_badlock_print = 1; /* Print lock violations. */ 4563 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 4564 0, "Print lock violations"); 4565 4566 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 4567 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 4568 0, "Print vnode details on lock violations"); 4569 4570 #ifdef KDB 4571 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 4572 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 4573 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 4574 #endif 4575 4576 static void 4577 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 4578 { 4579 4580 #ifdef KDB 4581 if (vfs_badlock_backtrace) 4582 kdb_backtrace(); 4583 #endif 4584 if (vfs_badlock_vnode) 4585 vn_printf(vp, "vnode "); 4586 if (vfs_badlock_print) 4587 printf("%s: %p %s\n", str, (void *)vp, msg); 4588 if (vfs_badlock_ddb) 4589 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 4590 } 4591 4592 void 4593 assert_vi_locked(struct vnode *vp, const char *str) 4594 { 4595 4596 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 4597 vfs_badlock("interlock is not locked but should be", str, vp); 4598 } 4599 4600 void 4601 assert_vi_unlocked(struct vnode *vp, const char *str) 4602 { 4603 4604 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 4605 vfs_badlock("interlock is locked but should not be", str, vp); 4606 } 4607 4608 void 4609 assert_vop_locked(struct vnode *vp, const char *str) 4610 { 4611 int locked; 4612 4613 if (!IGNORE_LOCK(vp)) { 4614 locked = VOP_ISLOCKED(vp); 4615 if (locked == 0 || locked == LK_EXCLOTHER) 4616 vfs_badlock("is not locked but should be", str, vp); 4617 } 4618 } 4619 4620 void 4621 assert_vop_unlocked(struct vnode *vp, const char *str) 4622 { 4623 4624 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 4625 vfs_badlock("is locked but should not be", str, vp); 4626 } 4627 4628 void 4629 assert_vop_elocked(struct vnode *vp, const char *str) 4630 { 4631 4632 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 4633 vfs_badlock("is not exclusive locked but should be", str, vp); 4634 } 4635 #endif /* DEBUG_VFS_LOCKS */ 4636 4637 void 4638 vop_rename_fail(struct vop_rename_args *ap) 4639 { 4640 4641 if (ap->a_tvp != NULL) 4642 vput(ap->a_tvp); 4643 if (ap->a_tdvp == ap->a_tvp) 4644 vrele(ap->a_tdvp); 4645 else 4646 vput(ap->a_tdvp); 4647 vrele(ap->a_fdvp); 4648 vrele(ap->a_fvp); 4649 } 4650 4651 void 4652 vop_rename_pre(void *ap) 4653 { 4654 struct vop_rename_args *a = ap; 4655 4656 #ifdef DEBUG_VFS_LOCKS 4657 if (a->a_tvp) 4658 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 4659 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 4660 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 4661 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 4662 4663 /* Check the source (from). */ 4664 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 4665 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 4666 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 4667 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 4668 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 4669 4670 /* Check the target. */ 4671 if (a->a_tvp) 4672 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 4673 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 4674 #endif 4675 if (a->a_tdvp != a->a_fdvp) 4676 vhold(a->a_fdvp); 4677 if (a->a_tvp != a->a_fvp) 4678 vhold(a->a_fvp); 4679 vhold(a->a_tdvp); 4680 if (a->a_tvp) 4681 vhold(a->a_tvp); 4682 } 4683 4684 #ifdef DEBUG_VFS_LOCKS 4685 void 4686 vop_strategy_pre(void *ap) 4687 { 4688 struct vop_strategy_args *a; 4689 struct buf *bp; 4690 4691 a = ap; 4692 bp = a->a_bp; 4693 4694 /* 4695 * Cluster ops lock their component buffers but not the IO container. 4696 */ 4697 if ((bp->b_flags & B_CLUSTER) != 0) 4698 return; 4699 4700 if (panicstr == NULL && !BUF_ISLOCKED(bp)) { 4701 if (vfs_badlock_print) 4702 printf( 4703 "VOP_STRATEGY: bp is not locked but should be\n"); 4704 if (vfs_badlock_ddb) 4705 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 4706 } 4707 } 4708 4709 void 4710 vop_lock_pre(void *ap) 4711 { 4712 struct vop_lock1_args *a = ap; 4713 4714 if ((a->a_flags & LK_INTERLOCK) == 0) 4715 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 4716 else 4717 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 4718 } 4719 4720 void 4721 vop_lock_post(void *ap, int rc) 4722 { 4723 struct vop_lock1_args *a = ap; 4724 4725 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 4726 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 4727 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 4728 } 4729 4730 void 4731 vop_unlock_pre(void *ap) 4732 { 4733 struct vop_unlock_args *a = ap; 4734 4735 if (a->a_flags & LK_INTERLOCK) 4736 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK"); 4737 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 4738 } 4739 4740 void 4741 vop_unlock_post(void *ap, int rc) 4742 { 4743 struct vop_unlock_args *a = ap; 4744 4745 if (a->a_flags & LK_INTERLOCK) 4746 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK"); 4747 } 4748 #endif 4749 4750 void 4751 vop_create_post(void *ap, int rc) 4752 { 4753 struct vop_create_args *a = ap; 4754 4755 if (!rc) 4756 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4757 } 4758 4759 void 4760 vop_deleteextattr_post(void *ap, int rc) 4761 { 4762 struct vop_deleteextattr_args *a = ap; 4763 4764 if (!rc) 4765 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4766 } 4767 4768 void 4769 vop_link_post(void *ap, int rc) 4770 { 4771 struct vop_link_args *a = ap; 4772 4773 if (!rc) { 4774 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); 4775 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE); 4776 } 4777 } 4778 4779 void 4780 vop_mkdir_post(void *ap, int rc) 4781 { 4782 struct vop_mkdir_args *a = ap; 4783 4784 if (!rc) 4785 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 4786 } 4787 4788 void 4789 vop_mknod_post(void *ap, int rc) 4790 { 4791 struct vop_mknod_args *a = ap; 4792 4793 if (!rc) 4794 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4795 } 4796 4797 void 4798 vop_reclaim_post(void *ap, int rc) 4799 { 4800 struct vop_reclaim_args *a = ap; 4801 4802 if (!rc) 4803 VFS_KNOTE_LOCKED(a->a_vp, NOTE_REVOKE); 4804 } 4805 4806 void 4807 vop_remove_post(void *ap, int rc) 4808 { 4809 struct vop_remove_args *a = ap; 4810 4811 if (!rc) { 4812 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4813 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 4814 } 4815 } 4816 4817 void 4818 vop_rename_post(void *ap, int rc) 4819 { 4820 struct vop_rename_args *a = ap; 4821 long hint; 4822 4823 if (!rc) { 4824 hint = NOTE_WRITE; 4825 if (a->a_fdvp == a->a_tdvp) { 4826 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 4827 hint |= NOTE_LINK; 4828 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 4829 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 4830 } else { 4831 hint |= NOTE_EXTEND; 4832 if (a->a_fvp->v_type == VDIR) 4833 hint |= NOTE_LINK; 4834 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 4835 4836 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 4837 a->a_tvp->v_type == VDIR) 4838 hint &= ~NOTE_LINK; 4839 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 4840 } 4841 4842 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 4843 if (a->a_tvp) 4844 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 4845 } 4846 if (a->a_tdvp != a->a_fdvp) 4847 vdrop(a->a_fdvp); 4848 if (a->a_tvp != a->a_fvp) 4849 vdrop(a->a_fvp); 4850 vdrop(a->a_tdvp); 4851 if (a->a_tvp) 4852 vdrop(a->a_tvp); 4853 } 4854 4855 void 4856 vop_rmdir_post(void *ap, int rc) 4857 { 4858 struct vop_rmdir_args *a = ap; 4859 4860 if (!rc) { 4861 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 4862 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 4863 } 4864 } 4865 4866 void 4867 vop_setattr_post(void *ap, int rc) 4868 { 4869 struct vop_setattr_args *a = ap; 4870 4871 if (!rc) 4872 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4873 } 4874 4875 void 4876 vop_setextattr_post(void *ap, int rc) 4877 { 4878 struct vop_setextattr_args *a = ap; 4879 4880 if (!rc) 4881 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4882 } 4883 4884 void 4885 vop_symlink_post(void *ap, int rc) 4886 { 4887 struct vop_symlink_args *a = ap; 4888 4889 if (!rc) 4890 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4891 } 4892 4893 void 4894 vop_open_post(void *ap, int rc) 4895 { 4896 struct vop_open_args *a = ap; 4897 4898 if (!rc) 4899 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 4900 } 4901 4902 void 4903 vop_close_post(void *ap, int rc) 4904 { 4905 struct vop_close_args *a = ap; 4906 4907 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 4908 (a->a_vp->v_iflag & VI_DOOMED) == 0)) { 4909 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 4910 NOTE_CLOSE_WRITE : NOTE_CLOSE); 4911 } 4912 } 4913 4914 void 4915 vop_read_post(void *ap, int rc) 4916 { 4917 struct vop_read_args *a = ap; 4918 4919 if (!rc) 4920 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 4921 } 4922 4923 void 4924 vop_readdir_post(void *ap, int rc) 4925 { 4926 struct vop_readdir_args *a = ap; 4927 4928 if (!rc) 4929 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 4930 } 4931 4932 static struct knlist fs_knlist; 4933 4934 static void 4935 vfs_event_init(void *arg) 4936 { 4937 knlist_init_mtx(&fs_knlist, NULL); 4938 } 4939 /* XXX - correct order? */ 4940 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 4941 4942 void 4943 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 4944 { 4945 4946 KNOTE_UNLOCKED(&fs_knlist, event); 4947 } 4948 4949 static int filt_fsattach(struct knote *kn); 4950 static void filt_fsdetach(struct knote *kn); 4951 static int filt_fsevent(struct knote *kn, long hint); 4952 4953 struct filterops fs_filtops = { 4954 .f_isfd = 0, 4955 .f_attach = filt_fsattach, 4956 .f_detach = filt_fsdetach, 4957 .f_event = filt_fsevent 4958 }; 4959 4960 static int 4961 filt_fsattach(struct knote *kn) 4962 { 4963 4964 kn->kn_flags |= EV_CLEAR; 4965 knlist_add(&fs_knlist, kn, 0); 4966 return (0); 4967 } 4968 4969 static void 4970 filt_fsdetach(struct knote *kn) 4971 { 4972 4973 knlist_remove(&fs_knlist, kn, 0); 4974 } 4975 4976 static int 4977 filt_fsevent(struct knote *kn, long hint) 4978 { 4979 4980 kn->kn_fflags |= hint; 4981 return (kn->kn_fflags != 0); 4982 } 4983 4984 static int 4985 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 4986 { 4987 struct vfsidctl vc; 4988 int error; 4989 struct mount *mp; 4990 4991 error = SYSCTL_IN(req, &vc, sizeof(vc)); 4992 if (error) 4993 return (error); 4994 if (vc.vc_vers != VFS_CTL_VERS1) 4995 return (EINVAL); 4996 mp = vfs_getvfs(&vc.vc_fsid); 4997 if (mp == NULL) 4998 return (ENOENT); 4999 /* ensure that a specific sysctl goes to the right filesystem. */ 5000 if (strcmp(vc.vc_fstypename, "*") != 0 && 5001 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 5002 vfs_rel(mp); 5003 return (EINVAL); 5004 } 5005 VCTLTOREQ(&vc, req); 5006 error = VFS_SYSCTL(mp, vc.vc_op, req); 5007 vfs_rel(mp); 5008 return (error); 5009 } 5010 5011 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_WR, 5012 NULL, 0, sysctl_vfs_ctl, "", 5013 "Sysctl by fsid"); 5014 5015 /* 5016 * Function to initialize a va_filerev field sensibly. 5017 * XXX: Wouldn't a random number make a lot more sense ?? 5018 */ 5019 u_quad_t 5020 init_va_filerev(void) 5021 { 5022 struct bintime bt; 5023 5024 getbinuptime(&bt); 5025 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 5026 } 5027 5028 static int filt_vfsread(struct knote *kn, long hint); 5029 static int filt_vfswrite(struct knote *kn, long hint); 5030 static int filt_vfsvnode(struct knote *kn, long hint); 5031 static void filt_vfsdetach(struct knote *kn); 5032 static struct filterops vfsread_filtops = { 5033 .f_isfd = 1, 5034 .f_detach = filt_vfsdetach, 5035 .f_event = filt_vfsread 5036 }; 5037 static struct filterops vfswrite_filtops = { 5038 .f_isfd = 1, 5039 .f_detach = filt_vfsdetach, 5040 .f_event = filt_vfswrite 5041 }; 5042 static struct filterops vfsvnode_filtops = { 5043 .f_isfd = 1, 5044 .f_detach = filt_vfsdetach, 5045 .f_event = filt_vfsvnode 5046 }; 5047 5048 static void 5049 vfs_knllock(void *arg) 5050 { 5051 struct vnode *vp = arg; 5052 5053 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5054 } 5055 5056 static void 5057 vfs_knlunlock(void *arg) 5058 { 5059 struct vnode *vp = arg; 5060 5061 VOP_UNLOCK(vp, 0); 5062 } 5063 5064 static void 5065 vfs_knl_assert_locked(void *arg) 5066 { 5067 #ifdef DEBUG_VFS_LOCKS 5068 struct vnode *vp = arg; 5069 5070 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 5071 #endif 5072 } 5073 5074 static void 5075 vfs_knl_assert_unlocked(void *arg) 5076 { 5077 #ifdef DEBUG_VFS_LOCKS 5078 struct vnode *vp = arg; 5079 5080 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 5081 #endif 5082 } 5083 5084 int 5085 vfs_kqfilter(struct vop_kqfilter_args *ap) 5086 { 5087 struct vnode *vp = ap->a_vp; 5088 struct knote *kn = ap->a_kn; 5089 struct knlist *knl; 5090 5091 switch (kn->kn_filter) { 5092 case EVFILT_READ: 5093 kn->kn_fop = &vfsread_filtops; 5094 break; 5095 case EVFILT_WRITE: 5096 kn->kn_fop = &vfswrite_filtops; 5097 break; 5098 case EVFILT_VNODE: 5099 kn->kn_fop = &vfsvnode_filtops; 5100 break; 5101 default: 5102 return (EINVAL); 5103 } 5104 5105 kn->kn_hook = (caddr_t)vp; 5106 5107 v_addpollinfo(vp); 5108 if (vp->v_pollinfo == NULL) 5109 return (ENOMEM); 5110 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 5111 vhold(vp); 5112 knlist_add(knl, kn, 0); 5113 5114 return (0); 5115 } 5116 5117 /* 5118 * Detach knote from vnode 5119 */ 5120 static void 5121 filt_vfsdetach(struct knote *kn) 5122 { 5123 struct vnode *vp = (struct vnode *)kn->kn_hook; 5124 5125 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 5126 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 5127 vdrop(vp); 5128 } 5129 5130 /*ARGSUSED*/ 5131 static int 5132 filt_vfsread(struct knote *kn, long hint) 5133 { 5134 struct vnode *vp = (struct vnode *)kn->kn_hook; 5135 struct vattr va; 5136 int res; 5137 5138 /* 5139 * filesystem is gone, so set the EOF flag and schedule 5140 * the knote for deletion. 5141 */ 5142 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5143 VI_LOCK(vp); 5144 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5145 VI_UNLOCK(vp); 5146 return (1); 5147 } 5148 5149 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 5150 return (0); 5151 5152 VI_LOCK(vp); 5153 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 5154 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 5155 VI_UNLOCK(vp); 5156 return (res); 5157 } 5158 5159 /*ARGSUSED*/ 5160 static int 5161 filt_vfswrite(struct knote *kn, long hint) 5162 { 5163 struct vnode *vp = (struct vnode *)kn->kn_hook; 5164 5165 VI_LOCK(vp); 5166 5167 /* 5168 * filesystem is gone, so set the EOF flag and schedule 5169 * the knote for deletion. 5170 */ 5171 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 5172 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5173 5174 kn->kn_data = 0; 5175 VI_UNLOCK(vp); 5176 return (1); 5177 } 5178 5179 static int 5180 filt_vfsvnode(struct knote *kn, long hint) 5181 { 5182 struct vnode *vp = (struct vnode *)kn->kn_hook; 5183 int res; 5184 5185 VI_LOCK(vp); 5186 if (kn->kn_sfflags & hint) 5187 kn->kn_fflags |= hint; 5188 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5189 kn->kn_flags |= EV_EOF; 5190 VI_UNLOCK(vp); 5191 return (1); 5192 } 5193 res = (kn->kn_fflags != 0); 5194 VI_UNLOCK(vp); 5195 return (res); 5196 } 5197 5198 int 5199 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 5200 { 5201 int error; 5202 5203 if (dp->d_reclen > ap->a_uio->uio_resid) 5204 return (ENAMETOOLONG); 5205 error = uiomove(dp, dp->d_reclen, ap->a_uio); 5206 if (error) { 5207 if (ap->a_ncookies != NULL) { 5208 if (ap->a_cookies != NULL) 5209 free(ap->a_cookies, M_TEMP); 5210 ap->a_cookies = NULL; 5211 *ap->a_ncookies = 0; 5212 } 5213 return (error); 5214 } 5215 if (ap->a_ncookies == NULL) 5216 return (0); 5217 5218 KASSERT(ap->a_cookies, 5219 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 5220 5221 *ap->a_cookies = realloc(*ap->a_cookies, 5222 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 5223 (*ap->a_cookies)[*ap->a_ncookies] = off; 5224 *ap->a_ncookies += 1; 5225 return (0); 5226 } 5227 5228 /* 5229 * Mark for update the access time of the file if the filesystem 5230 * supports VOP_MARKATIME. This functionality is used by execve and 5231 * mmap, so we want to avoid the I/O implied by directly setting 5232 * va_atime for the sake of efficiency. 5233 */ 5234 void 5235 vfs_mark_atime(struct vnode *vp, struct ucred *cred) 5236 { 5237 struct mount *mp; 5238 5239 mp = vp->v_mount; 5240 ASSERT_VOP_LOCKED(vp, "vfs_mark_atime"); 5241 if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) 5242 (void)VOP_MARKATIME(vp); 5243 } 5244 5245 /* 5246 * The purpose of this routine is to remove granularity from accmode_t, 5247 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 5248 * VADMIN and VAPPEND. 5249 * 5250 * If it returns 0, the caller is supposed to continue with the usual 5251 * access checks using 'accmode' as modified by this routine. If it 5252 * returns nonzero value, the caller is supposed to return that value 5253 * as errno. 5254 * 5255 * Note that after this routine runs, accmode may be zero. 5256 */ 5257 int 5258 vfs_unixify_accmode(accmode_t *accmode) 5259 { 5260 /* 5261 * There is no way to specify explicit "deny" rule using 5262 * file mode or POSIX.1e ACLs. 5263 */ 5264 if (*accmode & VEXPLICIT_DENY) { 5265 *accmode = 0; 5266 return (0); 5267 } 5268 5269 /* 5270 * None of these can be translated into usual access bits. 5271 * Also, the common case for NFSv4 ACLs is to not contain 5272 * either of these bits. Caller should check for VWRITE 5273 * on the containing directory instead. 5274 */ 5275 if (*accmode & (VDELETE_CHILD | VDELETE)) 5276 return (EPERM); 5277 5278 if (*accmode & VADMIN_PERMS) { 5279 *accmode &= ~VADMIN_PERMS; 5280 *accmode |= VADMIN; 5281 } 5282 5283 /* 5284 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 5285 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 5286 */ 5287 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 5288 5289 return (0); 5290 } 5291 5292 /* 5293 * These are helper functions for filesystems to traverse all 5294 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 5295 * 5296 * This interface replaces MNT_VNODE_FOREACH. 5297 */ 5298 5299 MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 5300 5301 struct vnode * 5302 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 5303 { 5304 struct vnode *vp; 5305 5306 if (should_yield()) 5307 kern_yield(PRI_USER); 5308 MNT_ILOCK(mp); 5309 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5310 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 5311 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 5312 /* Allow a racy peek at VI_DOOMED to save a lock acquisition. */ 5313 if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0) 5314 continue; 5315 VI_LOCK(vp); 5316 if ((vp->v_iflag & VI_DOOMED) != 0) { 5317 VI_UNLOCK(vp); 5318 continue; 5319 } 5320 break; 5321 } 5322 if (vp == NULL) { 5323 __mnt_vnode_markerfree_all(mvp, mp); 5324 /* MNT_IUNLOCK(mp); -- done in above function */ 5325 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 5326 return (NULL); 5327 } 5328 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 5329 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 5330 MNT_IUNLOCK(mp); 5331 return (vp); 5332 } 5333 5334 struct vnode * 5335 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 5336 { 5337 struct vnode *vp; 5338 5339 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 5340 MNT_ILOCK(mp); 5341 MNT_REF(mp); 5342 (*mvp)->v_mount = mp; 5343 (*mvp)->v_type = VMARKER; 5344 5345 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 5346 /* Allow a racy peek at VI_DOOMED to save a lock acquisition. */ 5347 if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0) 5348 continue; 5349 VI_LOCK(vp); 5350 if ((vp->v_iflag & VI_DOOMED) != 0) { 5351 VI_UNLOCK(vp); 5352 continue; 5353 } 5354 break; 5355 } 5356 if (vp == NULL) { 5357 MNT_REL(mp); 5358 MNT_IUNLOCK(mp); 5359 free(*mvp, M_VNODE_MARKER); 5360 *mvp = NULL; 5361 return (NULL); 5362 } 5363 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 5364 MNT_IUNLOCK(mp); 5365 return (vp); 5366 } 5367 5368 void 5369 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 5370 { 5371 5372 if (*mvp == NULL) { 5373 MNT_IUNLOCK(mp); 5374 return; 5375 } 5376 5377 mtx_assert(MNT_MTX(mp), MA_OWNED); 5378 5379 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5380 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 5381 MNT_REL(mp); 5382 MNT_IUNLOCK(mp); 5383 free(*mvp, M_VNODE_MARKER); 5384 *mvp = NULL; 5385 } 5386 5387 /* 5388 * These are helper functions for filesystems to traverse their 5389 * active vnodes. See MNT_VNODE_FOREACH_ACTIVE() in sys/mount.h 5390 */ 5391 static void 5392 mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 5393 { 5394 5395 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5396 5397 MNT_ILOCK(mp); 5398 MNT_REL(mp); 5399 MNT_IUNLOCK(mp); 5400 free(*mvp, M_VNODE_MARKER); 5401 *mvp = NULL; 5402 } 5403 5404 /* 5405 * Relock the mp mount vnode list lock with the vp vnode interlock in the 5406 * conventional lock order during mnt_vnode_next_active iteration. 5407 * 5408 * On entry, the mount vnode list lock is held and the vnode interlock is not. 5409 * The list lock is dropped and reacquired. On success, both locks are held. 5410 * On failure, the mount vnode list lock is held but the vnode interlock is 5411 * not, and the procedure may have yielded. 5412 */ 5413 static bool 5414 mnt_vnode_next_active_relock(struct vnode *mvp, struct mount *mp, 5415 struct vnode *vp) 5416 { 5417 const struct vnode *tmp; 5418 bool held, ret; 5419 5420 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 5421 TAILQ_NEXT(mvp, v_actfreelist) != NULL, mvp, 5422 ("%s: bad marker", __func__)); 5423 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 5424 ("%s: inappropriate vnode", __func__)); 5425 ASSERT_VI_UNLOCKED(vp, __func__); 5426 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 5427 5428 ret = false; 5429 5430 TAILQ_REMOVE(&mp->mnt_activevnodelist, mvp, v_actfreelist); 5431 TAILQ_INSERT_BEFORE(vp, mvp, v_actfreelist); 5432 5433 /* 5434 * Use a hold to prevent vp from disappearing while the mount vnode 5435 * list lock is dropped and reacquired. Normally a hold would be 5436 * acquired with vhold(), but that might try to acquire the vnode 5437 * interlock, which would be a LOR with the mount vnode list lock. 5438 */ 5439 held = refcount_acquire_if_not_zero(&vp->v_holdcnt); 5440 mtx_unlock(&mp->mnt_listmtx); 5441 if (!held) 5442 goto abort; 5443 VI_LOCK(vp); 5444 if (!refcount_release_if_not_last(&vp->v_holdcnt)) { 5445 vdropl(vp); 5446 goto abort; 5447 } 5448 mtx_lock(&mp->mnt_listmtx); 5449 5450 /* 5451 * Determine whether the vnode is still the next one after the marker, 5452 * excepting any other markers. If the vnode has not been doomed by 5453 * vgone() then the hold should have ensured that it remained on the 5454 * active list. If it has been doomed but is still on the active list, 5455 * don't abort, but rather skip over it (avoid spinning on doomed 5456 * vnodes). 5457 */ 5458 tmp = mvp; 5459 do { 5460 tmp = TAILQ_NEXT(tmp, v_actfreelist); 5461 } while (tmp != NULL && tmp->v_type == VMARKER); 5462 if (tmp != vp) { 5463 mtx_unlock(&mp->mnt_listmtx); 5464 VI_UNLOCK(vp); 5465 goto abort; 5466 } 5467 5468 ret = true; 5469 goto out; 5470 abort: 5471 maybe_yield(); 5472 mtx_lock(&mp->mnt_listmtx); 5473 out: 5474 if (ret) 5475 ASSERT_VI_LOCKED(vp, __func__); 5476 else 5477 ASSERT_VI_UNLOCKED(vp, __func__); 5478 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 5479 return (ret); 5480 } 5481 5482 static struct vnode * 5483 mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 5484 { 5485 struct vnode *vp, *nvp; 5486 5487 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 5488 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5489 restart: 5490 vp = TAILQ_NEXT(*mvp, v_actfreelist); 5491 while (vp != NULL) { 5492 if (vp->v_type == VMARKER) { 5493 vp = TAILQ_NEXT(vp, v_actfreelist); 5494 continue; 5495 } 5496 /* 5497 * Try-lock because this is the wrong lock order. If that does 5498 * not succeed, drop the mount vnode list lock and try to 5499 * reacquire it and the vnode interlock in the right order. 5500 */ 5501 if (!VI_TRYLOCK(vp) && 5502 !mnt_vnode_next_active_relock(*mvp, mp, vp)) 5503 goto restart; 5504 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 5505 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 5506 ("alien vnode on the active list %p %p", vp, mp)); 5507 if (vp->v_mount == mp && (vp->v_iflag & VI_DOOMED) == 0) 5508 break; 5509 nvp = TAILQ_NEXT(vp, v_actfreelist); 5510 VI_UNLOCK(vp); 5511 vp = nvp; 5512 } 5513 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 5514 5515 /* Check if we are done */ 5516 if (vp == NULL) { 5517 mtx_unlock(&mp->mnt_listmtx); 5518 mnt_vnode_markerfree_active(mvp, mp); 5519 return (NULL); 5520 } 5521 TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist); 5522 mtx_unlock(&mp->mnt_listmtx); 5523 ASSERT_VI_LOCKED(vp, "active iter"); 5524 KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp)); 5525 return (vp); 5526 } 5527 5528 struct vnode * 5529 __mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 5530 { 5531 5532 if (should_yield()) 5533 kern_yield(PRI_USER); 5534 mtx_lock(&mp->mnt_listmtx); 5535 return (mnt_vnode_next_active(mvp, mp)); 5536 } 5537 5538 struct vnode * 5539 __mnt_vnode_first_active(struct vnode **mvp, struct mount *mp) 5540 { 5541 struct vnode *vp; 5542 5543 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 5544 MNT_ILOCK(mp); 5545 MNT_REF(mp); 5546 MNT_IUNLOCK(mp); 5547 (*mvp)->v_type = VMARKER; 5548 (*mvp)->v_mount = mp; 5549 5550 mtx_lock(&mp->mnt_listmtx); 5551 vp = TAILQ_FIRST(&mp->mnt_activevnodelist); 5552 if (vp == NULL) { 5553 mtx_unlock(&mp->mnt_listmtx); 5554 mnt_vnode_markerfree_active(mvp, mp); 5555 return (NULL); 5556 } 5557 TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist); 5558 return (mnt_vnode_next_active(mvp, mp)); 5559 } 5560 5561 void 5562 __mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 5563 { 5564 5565 if (*mvp == NULL) 5566 return; 5567 5568 mtx_lock(&mp->mnt_listmtx); 5569 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 5570 mtx_unlock(&mp->mnt_listmtx); 5571 mnt_vnode_markerfree_active(mvp, mp); 5572 } 5573