1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/condvar.h> 54 #include <sys/conf.h> 55 #include <sys/counter.h> 56 #include <sys/dirent.h> 57 #include <sys/event.h> 58 #include <sys/eventhandler.h> 59 #include <sys/extattr.h> 60 #include <sys/file.h> 61 #include <sys/fcntl.h> 62 #include <sys/jail.h> 63 #include <sys/kdb.h> 64 #include <sys/kernel.h> 65 #include <sys/kthread.h> 66 #include <sys/lockf.h> 67 #include <sys/malloc.h> 68 #include <sys/mount.h> 69 #include <sys/namei.h> 70 #include <sys/pctrie.h> 71 #include <sys/priv.h> 72 #include <sys/reboot.h> 73 #include <sys/refcount.h> 74 #include <sys/rwlock.h> 75 #include <sys/sched.h> 76 #include <sys/sleepqueue.h> 77 #include <sys/smp.h> 78 #include <sys/stat.h> 79 #include <sys/sysctl.h> 80 #include <sys/syslog.h> 81 #include <sys/vmmeter.h> 82 #include <sys/vnode.h> 83 #include <sys/watchdog.h> 84 85 #include <machine/stdarg.h> 86 87 #include <security/mac/mac_framework.h> 88 89 #include <vm/vm.h> 90 #include <vm/vm_object.h> 91 #include <vm/vm_extern.h> 92 #include <vm/pmap.h> 93 #include <vm/vm_map.h> 94 #include <vm/vm_page.h> 95 #include <vm/vm_kern.h> 96 #include <vm/uma.h> 97 98 #ifdef DDB 99 #include <ddb/ddb.h> 100 #endif 101 102 static void delmntque(struct vnode *vp); 103 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 104 int slpflag, int slptimeo); 105 static void syncer_shutdown(void *arg, int howto); 106 static int vtryrecycle(struct vnode *vp); 107 static void v_init_counters(struct vnode *); 108 static void v_incr_usecount(struct vnode *); 109 static void v_incr_usecount_locked(struct vnode *); 110 static void v_incr_devcount(struct vnode *); 111 static void v_decr_devcount(struct vnode *); 112 static void vgonel(struct vnode *); 113 static void vfs_knllock(void *arg); 114 static void vfs_knlunlock(void *arg); 115 static void vfs_knl_assert_locked(void *arg); 116 static void vfs_knl_assert_unlocked(void *arg); 117 static void vnlru_return_batches(struct vfsops *mnt_op); 118 static void destroy_vpollinfo(struct vpollinfo *vi); 119 120 /* 121 * Number of vnodes in existence. Increased whenever getnewvnode() 122 * allocates a new vnode, decreased in vdropl() for VI_DOOMED vnode. 123 */ 124 static unsigned long numvnodes; 125 126 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 127 "Number of vnodes in existence"); 128 129 static counter_u64_t vnodes_created; 130 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 131 "Number of vnodes created by getnewvnode"); 132 133 static u_long mnt_free_list_batch = 128; 134 SYSCTL_ULONG(_vfs, OID_AUTO, mnt_free_list_batch, CTLFLAG_RW, 135 &mnt_free_list_batch, 0, "Limit of vnodes held on mnt's free list"); 136 137 /* 138 * Conversion tables for conversion from vnode types to inode formats 139 * and back. 140 */ 141 enum vtype iftovt_tab[16] = { 142 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 143 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 144 }; 145 int vttoif_tab[10] = { 146 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 147 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 148 }; 149 150 /* 151 * List of vnodes that are ready for recycling. 152 */ 153 static TAILQ_HEAD(freelst, vnode) vnode_free_list; 154 155 /* 156 * "Free" vnode target. Free vnodes are rarely completely free, but are 157 * just ones that are cheap to recycle. Usually they are for files which 158 * have been stat'd but not read; these usually have inode and namecache 159 * data attached to them. This target is the preferred minimum size of a 160 * sub-cache consisting mostly of such files. The system balances the size 161 * of this sub-cache with its complement to try to prevent either from 162 * thrashing while the other is relatively inactive. The targets express 163 * a preference for the best balance. 164 * 165 * "Above" this target there are 2 further targets (watermarks) related 166 * to recyling of free vnodes. In the best-operating case, the cache is 167 * exactly full, the free list has size between vlowat and vhiwat above the 168 * free target, and recycling from it and normal use maintains this state. 169 * Sometimes the free list is below vlowat or even empty, but this state 170 * is even better for immediate use provided the cache is not full. 171 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 172 * ones) to reach one of these states. The watermarks are currently hard- 173 * coded as 4% and 9% of the available space higher. These and the default 174 * of 25% for wantfreevnodes are too large if the memory size is large. 175 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 176 * whenever vnlru_proc() becomes active. 177 */ 178 static u_long wantfreevnodes; 179 SYSCTL_ULONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, 180 &wantfreevnodes, 0, "Target for minimum number of \"free\" vnodes"); 181 static u_long freevnodes; 182 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 183 &freevnodes, 0, "Number of \"free\" vnodes"); 184 185 static counter_u64_t recycles_count; 186 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 187 "Number of vnodes recycled to meet vnode cache targets"); 188 189 /* 190 * Various variables used for debugging the new implementation of 191 * reassignbuf(). 192 * XXX these are probably of (very) limited utility now. 193 */ 194 static int reassignbufcalls; 195 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, 196 "Number of calls to reassignbuf"); 197 198 static counter_u64_t free_owe_inact; 199 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, free_owe_inact, CTLFLAG_RD, &free_owe_inact, 200 "Number of times free vnodes kept on active list due to VFS " 201 "owing inactivation"); 202 203 /* To keep more than one thread at a time from running vfs_getnewfsid */ 204 static struct mtx mntid_mtx; 205 206 /* 207 * Lock for any access to the following: 208 * vnode_free_list 209 * numvnodes 210 * freevnodes 211 */ 212 static struct mtx vnode_free_list_mtx; 213 214 /* Publicly exported FS */ 215 struct nfs_public nfs_pub; 216 217 static uma_zone_t buf_trie_zone; 218 219 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 220 static uma_zone_t vnode_zone; 221 static uma_zone_t vnodepoll_zone; 222 223 /* 224 * The workitem queue. 225 * 226 * It is useful to delay writes of file data and filesystem metadata 227 * for tens of seconds so that quickly created and deleted files need 228 * not waste disk bandwidth being created and removed. To realize this, 229 * we append vnodes to a "workitem" queue. When running with a soft 230 * updates implementation, most pending metadata dependencies should 231 * not wait for more than a few seconds. Thus, mounted on block devices 232 * are delayed only about a half the time that file data is delayed. 233 * Similarly, directory updates are more critical, so are only delayed 234 * about a third the time that file data is delayed. Thus, there are 235 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 236 * one each second (driven off the filesystem syncer process). The 237 * syncer_delayno variable indicates the next queue that is to be processed. 238 * Items that need to be processed soon are placed in this queue: 239 * 240 * syncer_workitem_pending[syncer_delayno] 241 * 242 * A delay of fifteen seconds is done by placing the request fifteen 243 * entries later in the queue: 244 * 245 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 246 * 247 */ 248 static int syncer_delayno; 249 static long syncer_mask; 250 LIST_HEAD(synclist, bufobj); 251 static struct synclist *syncer_workitem_pending; 252 /* 253 * The sync_mtx protects: 254 * bo->bo_synclist 255 * sync_vnode_count 256 * syncer_delayno 257 * syncer_state 258 * syncer_workitem_pending 259 * syncer_worklist_len 260 * rushjob 261 */ 262 static struct mtx sync_mtx; 263 static struct cv sync_wakeup; 264 265 #define SYNCER_MAXDELAY 32 266 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 267 static int syncdelay = 30; /* max time to delay syncing data */ 268 static int filedelay = 30; /* time to delay syncing files */ 269 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 270 "Time to delay syncing files (in seconds)"); 271 static int dirdelay = 29; /* time to delay syncing directories */ 272 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 273 "Time to delay syncing directories (in seconds)"); 274 static int metadelay = 28; /* time to delay syncing metadata */ 275 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 276 "Time to delay syncing metadata (in seconds)"); 277 static int rushjob; /* number of slots to run ASAP */ 278 static int stat_rush_requests; /* number of times I/O speeded up */ 279 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 280 "Number of times I/O speeded up (rush requests)"); 281 282 /* 283 * When shutting down the syncer, run it at four times normal speed. 284 */ 285 #define SYNCER_SHUTDOWN_SPEEDUP 4 286 static int sync_vnode_count; 287 static int syncer_worklist_len; 288 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 289 syncer_state; 290 291 /* Target for maximum number of vnodes. */ 292 int desiredvnodes; 293 static int gapvnodes; /* gap between wanted and desired */ 294 static int vhiwat; /* enough extras after expansion */ 295 static int vlowat; /* minimal extras before expansion */ 296 static int vstir; /* nonzero to stir non-free vnodes */ 297 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 298 299 static int 300 sysctl_update_desiredvnodes(SYSCTL_HANDLER_ARGS) 301 { 302 int error, old_desiredvnodes; 303 304 old_desiredvnodes = desiredvnodes; 305 if ((error = sysctl_handle_int(oidp, arg1, arg2, req)) != 0) 306 return (error); 307 if (old_desiredvnodes != desiredvnodes) { 308 wantfreevnodes = desiredvnodes / 4; 309 /* XXX locking seems to be incomplete. */ 310 vfs_hash_changesize(desiredvnodes); 311 cache_changesize(desiredvnodes); 312 } 313 return (0); 314 } 315 316 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 317 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, &desiredvnodes, 0, 318 sysctl_update_desiredvnodes, "I", "Target for maximum number of vnodes"); 319 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 320 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 321 static int vnlru_nowhere; 322 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 323 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 324 325 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 326 static int vnsz2log; 327 328 /* 329 * Support for the bufobj clean & dirty pctrie. 330 */ 331 static void * 332 buf_trie_alloc(struct pctrie *ptree) 333 { 334 335 return uma_zalloc(buf_trie_zone, M_NOWAIT); 336 } 337 338 static void 339 buf_trie_free(struct pctrie *ptree, void *node) 340 { 341 342 uma_zfree(buf_trie_zone, node); 343 } 344 PCTRIE_DEFINE(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free); 345 346 /* 347 * Initialize the vnode management data structures. 348 * 349 * Reevaluate the following cap on the number of vnodes after the physical 350 * memory size exceeds 512GB. In the limit, as the physical memory size 351 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 352 */ 353 #ifndef MAXVNODES_MAX 354 #define MAXVNODES_MAX (512 * 1024 * 1024 / 64) /* 8M */ 355 #endif 356 357 /* 358 * Initialize a vnode as it first enters the zone. 359 */ 360 static int 361 vnode_init(void *mem, int size, int flags) 362 { 363 struct vnode *vp; 364 365 vp = mem; 366 bzero(vp, size); 367 /* 368 * Setup locks. 369 */ 370 vp->v_vnlock = &vp->v_lock; 371 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 372 /* 373 * By default, don't allow shared locks unless filesystems opt-in. 374 */ 375 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 376 LK_NOSHARE | LK_IS_VNODE); 377 /* 378 * Initialize bufobj. 379 */ 380 bufobj_init(&vp->v_bufobj, vp); 381 /* 382 * Initialize namecache. 383 */ 384 LIST_INIT(&vp->v_cache_src); 385 TAILQ_INIT(&vp->v_cache_dst); 386 /* 387 * Initialize rangelocks. 388 */ 389 rangelock_init(&vp->v_rl); 390 return (0); 391 } 392 393 /* 394 * Free a vnode when it is cleared from the zone. 395 */ 396 static void 397 vnode_fini(void *mem, int size) 398 { 399 struct vnode *vp; 400 struct bufobj *bo; 401 402 vp = mem; 403 rangelock_destroy(&vp->v_rl); 404 lockdestroy(vp->v_vnlock); 405 mtx_destroy(&vp->v_interlock); 406 bo = &vp->v_bufobj; 407 rw_destroy(BO_LOCKPTR(bo)); 408 } 409 410 /* 411 * Provide the size of NFS nclnode and NFS fh for calculation of the 412 * vnode memory consumption. The size is specified directly to 413 * eliminate dependency on NFS-private header. 414 * 415 * Other filesystems may use bigger or smaller (like UFS and ZFS) 416 * private inode data, but the NFS-based estimation is ample enough. 417 * Still, we care about differences in the size between 64- and 32-bit 418 * platforms. 419 * 420 * Namecache structure size is heuristically 421 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 422 */ 423 #ifdef _LP64 424 #define NFS_NCLNODE_SZ (528 + 64) 425 #define NC_SZ 148 426 #else 427 #define NFS_NCLNODE_SZ (360 + 32) 428 #define NC_SZ 92 429 #endif 430 431 static void 432 vntblinit(void *dummy __unused) 433 { 434 u_int i; 435 int physvnodes, virtvnodes; 436 437 /* 438 * Desiredvnodes is a function of the physical memory size and the 439 * kernel's heap size. Generally speaking, it scales with the 440 * physical memory size. The ratio of desiredvnodes to the physical 441 * memory size is 1:16 until desiredvnodes exceeds 98,304. 442 * Thereafter, the 443 * marginal ratio of desiredvnodes to the physical memory size is 444 * 1:64. However, desiredvnodes is limited by the kernel's heap 445 * size. The memory required by desiredvnodes vnodes and vm objects 446 * must not exceed 1/10th of the kernel's heap size. 447 */ 448 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 449 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 450 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 451 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 452 desiredvnodes = min(physvnodes, virtvnodes); 453 if (desiredvnodes > MAXVNODES_MAX) { 454 if (bootverbose) 455 printf("Reducing kern.maxvnodes %d -> %d\n", 456 desiredvnodes, MAXVNODES_MAX); 457 desiredvnodes = MAXVNODES_MAX; 458 } 459 wantfreevnodes = desiredvnodes / 4; 460 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 461 TAILQ_INIT(&vnode_free_list); 462 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 463 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 464 vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); 465 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 466 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 467 /* 468 * Preallocate enough nodes to support one-per buf so that 469 * we can not fail an insert. reassignbuf() callers can not 470 * tolerate the insertion failure. 471 */ 472 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 473 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 474 UMA_ZONE_NOFREE | UMA_ZONE_VM); 475 uma_prealloc(buf_trie_zone, nbuf); 476 477 vnodes_created = counter_u64_alloc(M_WAITOK); 478 recycles_count = counter_u64_alloc(M_WAITOK); 479 free_owe_inact = counter_u64_alloc(M_WAITOK); 480 481 /* 482 * Initialize the filesystem syncer. 483 */ 484 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 485 &syncer_mask); 486 syncer_maxdelay = syncer_mask + 1; 487 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 488 cv_init(&sync_wakeup, "syncer"); 489 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 490 vnsz2log++; 491 vnsz2log--; 492 } 493 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 494 495 496 /* 497 * Mark a mount point as busy. Used to synchronize access and to delay 498 * unmounting. Eventually, mountlist_mtx is not released on failure. 499 * 500 * vfs_busy() is a custom lock, it can block the caller. 501 * vfs_busy() only sleeps if the unmount is active on the mount point. 502 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 503 * vnode belonging to mp. 504 * 505 * Lookup uses vfs_busy() to traverse mount points. 506 * root fs var fs 507 * / vnode lock A / vnode lock (/var) D 508 * /var vnode lock B /log vnode lock(/var/log) E 509 * vfs_busy lock C vfs_busy lock F 510 * 511 * Within each file system, the lock order is C->A->B and F->D->E. 512 * 513 * When traversing across mounts, the system follows that lock order: 514 * 515 * C->A->B 516 * | 517 * +->F->D->E 518 * 519 * The lookup() process for namei("/var") illustrates the process: 520 * VOP_LOOKUP() obtains B while A is held 521 * vfs_busy() obtains a shared lock on F while A and B are held 522 * vput() releases lock on B 523 * vput() releases lock on A 524 * VFS_ROOT() obtains lock on D while shared lock on F is held 525 * vfs_unbusy() releases shared lock on F 526 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 527 * Attempt to lock A (instead of vp_crossmp) while D is held would 528 * violate the global order, causing deadlocks. 529 * 530 * dounmount() locks B while F is drained. 531 */ 532 int 533 vfs_busy(struct mount *mp, int flags) 534 { 535 536 MPASS((flags & ~MBF_MASK) == 0); 537 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 538 539 MNT_ILOCK(mp); 540 MNT_REF(mp); 541 /* 542 * If mount point is currently being unmounted, sleep until the 543 * mount point fate is decided. If thread doing the unmounting fails, 544 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 545 * that this mount point has survived the unmount attempt and vfs_busy 546 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 547 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 548 * about to be really destroyed. vfs_busy needs to release its 549 * reference on the mount point in this case and return with ENOENT, 550 * telling the caller that mount mount it tried to busy is no longer 551 * valid. 552 */ 553 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 554 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 555 MNT_REL(mp); 556 MNT_IUNLOCK(mp); 557 CTR1(KTR_VFS, "%s: failed busying before sleeping", 558 __func__); 559 return (ENOENT); 560 } 561 if (flags & MBF_MNTLSTLOCK) 562 mtx_unlock(&mountlist_mtx); 563 mp->mnt_kern_flag |= MNTK_MWAIT; 564 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 565 if (flags & MBF_MNTLSTLOCK) 566 mtx_lock(&mountlist_mtx); 567 MNT_ILOCK(mp); 568 } 569 if (flags & MBF_MNTLSTLOCK) 570 mtx_unlock(&mountlist_mtx); 571 mp->mnt_lockref++; 572 MNT_IUNLOCK(mp); 573 return (0); 574 } 575 576 /* 577 * Free a busy filesystem. 578 */ 579 void 580 vfs_unbusy(struct mount *mp) 581 { 582 583 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 584 MNT_ILOCK(mp); 585 MNT_REL(mp); 586 KASSERT(mp->mnt_lockref > 0, ("negative mnt_lockref")); 587 mp->mnt_lockref--; 588 if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 589 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 590 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 591 mp->mnt_kern_flag &= ~MNTK_DRAINING; 592 wakeup(&mp->mnt_lockref); 593 } 594 MNT_IUNLOCK(mp); 595 } 596 597 /* 598 * Lookup a mount point by filesystem identifier. 599 */ 600 struct mount * 601 vfs_getvfs(fsid_t *fsid) 602 { 603 struct mount *mp; 604 605 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 606 mtx_lock(&mountlist_mtx); 607 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 608 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 609 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 610 vfs_ref(mp); 611 mtx_unlock(&mountlist_mtx); 612 return (mp); 613 } 614 } 615 mtx_unlock(&mountlist_mtx); 616 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 617 return ((struct mount *) 0); 618 } 619 620 /* 621 * Lookup a mount point by filesystem identifier, busying it before 622 * returning. 623 * 624 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 625 * cache for popular filesystem identifiers. The cache is lockess, using 626 * the fact that struct mount's are never freed. In worst case we may 627 * get pointer to unmounted or even different filesystem, so we have to 628 * check what we got, and go slow way if so. 629 */ 630 struct mount * 631 vfs_busyfs(fsid_t *fsid) 632 { 633 #define FSID_CACHE_SIZE 256 634 typedef struct mount * volatile vmp_t; 635 static vmp_t cache[FSID_CACHE_SIZE]; 636 struct mount *mp; 637 int error; 638 uint32_t hash; 639 640 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 641 hash = fsid->val[0] ^ fsid->val[1]; 642 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 643 mp = cache[hash]; 644 if (mp == NULL || 645 mp->mnt_stat.f_fsid.val[0] != fsid->val[0] || 646 mp->mnt_stat.f_fsid.val[1] != fsid->val[1]) 647 goto slow; 648 if (vfs_busy(mp, 0) != 0) { 649 cache[hash] = NULL; 650 goto slow; 651 } 652 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 653 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) 654 return (mp); 655 else 656 vfs_unbusy(mp); 657 658 slow: 659 mtx_lock(&mountlist_mtx); 660 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 661 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 662 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 663 error = vfs_busy(mp, MBF_MNTLSTLOCK); 664 if (error) { 665 cache[hash] = NULL; 666 mtx_unlock(&mountlist_mtx); 667 return (NULL); 668 } 669 cache[hash] = mp; 670 return (mp); 671 } 672 } 673 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 674 mtx_unlock(&mountlist_mtx); 675 return ((struct mount *) 0); 676 } 677 678 /* 679 * Check if a user can access privileged mount options. 680 */ 681 int 682 vfs_suser(struct mount *mp, struct thread *td) 683 { 684 int error; 685 686 if (jailed(td->td_ucred)) { 687 /* 688 * If the jail of the calling thread lacks permission for 689 * this type of file system, deny immediately. 690 */ 691 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 692 return (EPERM); 693 694 /* 695 * If the file system was mounted outside the jail of the 696 * calling thread, deny immediately. 697 */ 698 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 699 return (EPERM); 700 } 701 702 /* 703 * If file system supports delegated administration, we don't check 704 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 705 * by the file system itself. 706 * If this is not the user that did original mount, we check for 707 * the PRIV_VFS_MOUNT_OWNER privilege. 708 */ 709 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 710 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 711 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 712 return (error); 713 } 714 return (0); 715 } 716 717 /* 718 * Get a new unique fsid. Try to make its val[0] unique, since this value 719 * will be used to create fake device numbers for stat(). Also try (but 720 * not so hard) make its val[0] unique mod 2^16, since some emulators only 721 * support 16-bit device numbers. We end up with unique val[0]'s for the 722 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 723 * 724 * Keep in mind that several mounts may be running in parallel. Starting 725 * the search one past where the previous search terminated is both a 726 * micro-optimization and a defense against returning the same fsid to 727 * different mounts. 728 */ 729 void 730 vfs_getnewfsid(struct mount *mp) 731 { 732 static uint16_t mntid_base; 733 struct mount *nmp; 734 fsid_t tfsid; 735 int mtype; 736 737 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 738 mtx_lock(&mntid_mtx); 739 mtype = mp->mnt_vfc->vfc_typenum; 740 tfsid.val[1] = mtype; 741 mtype = (mtype & 0xFF) << 24; 742 for (;;) { 743 tfsid.val[0] = makedev(255, 744 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 745 mntid_base++; 746 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 747 break; 748 vfs_rel(nmp); 749 } 750 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 751 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 752 mtx_unlock(&mntid_mtx); 753 } 754 755 /* 756 * Knob to control the precision of file timestamps: 757 * 758 * 0 = seconds only; nanoseconds zeroed. 759 * 1 = seconds and nanoseconds, accurate within 1/HZ. 760 * 2 = seconds and nanoseconds, truncated to microseconds. 761 * >=3 = seconds and nanoseconds, maximum precision. 762 */ 763 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 764 765 static int timestamp_precision = TSP_USEC; 766 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 767 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 768 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 769 "3+: sec + ns (max. precision))"); 770 771 /* 772 * Get a current timestamp. 773 */ 774 void 775 vfs_timestamp(struct timespec *tsp) 776 { 777 struct timeval tv; 778 779 switch (timestamp_precision) { 780 case TSP_SEC: 781 tsp->tv_sec = time_second; 782 tsp->tv_nsec = 0; 783 break; 784 case TSP_HZ: 785 getnanotime(tsp); 786 break; 787 case TSP_USEC: 788 microtime(&tv); 789 TIMEVAL_TO_TIMESPEC(&tv, tsp); 790 break; 791 case TSP_NSEC: 792 default: 793 nanotime(tsp); 794 break; 795 } 796 } 797 798 /* 799 * Set vnode attributes to VNOVAL 800 */ 801 void 802 vattr_null(struct vattr *vap) 803 { 804 805 vap->va_type = VNON; 806 vap->va_size = VNOVAL; 807 vap->va_bytes = VNOVAL; 808 vap->va_mode = VNOVAL; 809 vap->va_nlink = VNOVAL; 810 vap->va_uid = VNOVAL; 811 vap->va_gid = VNOVAL; 812 vap->va_fsid = VNOVAL; 813 vap->va_fileid = VNOVAL; 814 vap->va_blocksize = VNOVAL; 815 vap->va_rdev = VNOVAL; 816 vap->va_atime.tv_sec = VNOVAL; 817 vap->va_atime.tv_nsec = VNOVAL; 818 vap->va_mtime.tv_sec = VNOVAL; 819 vap->va_mtime.tv_nsec = VNOVAL; 820 vap->va_ctime.tv_sec = VNOVAL; 821 vap->va_ctime.tv_nsec = VNOVAL; 822 vap->va_birthtime.tv_sec = VNOVAL; 823 vap->va_birthtime.tv_nsec = VNOVAL; 824 vap->va_flags = VNOVAL; 825 vap->va_gen = VNOVAL; 826 vap->va_vaflags = 0; 827 } 828 829 /* 830 * This routine is called when we have too many vnodes. It attempts 831 * to free <count> vnodes and will potentially free vnodes that still 832 * have VM backing store (VM backing store is typically the cause 833 * of a vnode blowout so we want to do this). Therefore, this operation 834 * is not considered cheap. 835 * 836 * A number of conditions may prevent a vnode from being reclaimed. 837 * the buffer cache may have references on the vnode, a directory 838 * vnode may still have references due to the namei cache representing 839 * underlying files, or the vnode may be in active use. It is not 840 * desirable to reuse such vnodes. These conditions may cause the 841 * number of vnodes to reach some minimum value regardless of what 842 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 843 */ 844 static int 845 vlrureclaim(struct mount *mp, int reclaim_nc_src, int trigger) 846 { 847 struct vnode *vp; 848 int count, done, target; 849 850 done = 0; 851 vn_start_write(NULL, &mp, V_WAIT); 852 MNT_ILOCK(mp); 853 count = mp->mnt_nvnodelistsize; 854 target = count * (int64_t)gapvnodes / imax(desiredvnodes, 1); 855 target = target / 10 + 1; 856 while (count != 0 && done < target) { 857 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 858 while (vp != NULL && vp->v_type == VMARKER) 859 vp = TAILQ_NEXT(vp, v_nmntvnodes); 860 if (vp == NULL) 861 break; 862 /* 863 * XXX LRU is completely broken for non-free vnodes. First 864 * by calling here in mountpoint order, then by moving 865 * unselected vnodes to the end here, and most grossly by 866 * removing the vlruvp() function that was supposed to 867 * maintain the order. (This function was born broken 868 * since syncer problems prevented it doing anything.) The 869 * order is closer to LRC (C = Created). 870 * 871 * LRU reclaiming of vnodes seems to have last worked in 872 * FreeBSD-3 where LRU wasn't mentioned under any spelling. 873 * Then there was no hold count, and inactive vnodes were 874 * simply put on the free list in LRU order. The separate 875 * lists also break LRU. We prefer to reclaim from the 876 * free list for technical reasons. This tends to thrash 877 * the free list to keep very unrecently used held vnodes. 878 * The problem is mitigated by keeping the free list large. 879 */ 880 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 881 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 882 --count; 883 if (!VI_TRYLOCK(vp)) 884 goto next_iter; 885 /* 886 * If it's been deconstructed already, it's still 887 * referenced, or it exceeds the trigger, skip it. 888 * Also skip free vnodes. We are trying to make space 889 * to expand the free list, not reduce it. 890 */ 891 if (vp->v_usecount || 892 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 893 ((vp->v_iflag & VI_FREE) != 0) || 894 (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL && 895 vp->v_object->resident_page_count > trigger)) { 896 VI_UNLOCK(vp); 897 goto next_iter; 898 } 899 MNT_IUNLOCK(mp); 900 vholdl(vp); 901 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) { 902 vdrop(vp); 903 goto next_iter_mntunlocked; 904 } 905 VI_LOCK(vp); 906 /* 907 * v_usecount may have been bumped after VOP_LOCK() dropped 908 * the vnode interlock and before it was locked again. 909 * 910 * It is not necessary to recheck VI_DOOMED because it can 911 * only be set by another thread that holds both the vnode 912 * lock and vnode interlock. If another thread has the 913 * vnode lock before we get to VOP_LOCK() and obtains the 914 * vnode interlock after VOP_LOCK() drops the vnode 915 * interlock, the other thread will be unable to drop the 916 * vnode lock before our VOP_LOCK() call fails. 917 */ 918 if (vp->v_usecount || 919 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 920 (vp->v_iflag & VI_FREE) != 0 || 921 (vp->v_object != NULL && 922 vp->v_object->resident_page_count > trigger)) { 923 VOP_UNLOCK(vp, LK_INTERLOCK); 924 vdrop(vp); 925 goto next_iter_mntunlocked; 926 } 927 KASSERT((vp->v_iflag & VI_DOOMED) == 0, 928 ("VI_DOOMED unexpectedly detected in vlrureclaim()")); 929 counter_u64_add(recycles_count, 1); 930 vgonel(vp); 931 VOP_UNLOCK(vp, 0); 932 vdropl(vp); 933 done++; 934 next_iter_mntunlocked: 935 if (!should_yield()) 936 goto relock_mnt; 937 goto yield; 938 next_iter: 939 if (!should_yield()) 940 continue; 941 MNT_IUNLOCK(mp); 942 yield: 943 kern_yield(PRI_USER); 944 relock_mnt: 945 MNT_ILOCK(mp); 946 } 947 MNT_IUNLOCK(mp); 948 vn_finished_write(mp); 949 return done; 950 } 951 952 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 953 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 954 0, 955 "limit on vnode free requests per call to the vnlru_free routine"); 956 957 /* 958 * Attempt to reduce the free list by the requested amount. 959 */ 960 static void 961 vnlru_free_locked(int count, struct vfsops *mnt_op) 962 { 963 struct vnode *vp; 964 struct mount *mp; 965 bool tried_batches; 966 967 tried_batches = false; 968 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 969 if (count > max_vnlru_free) 970 count = max_vnlru_free; 971 for (; count > 0; count--) { 972 vp = TAILQ_FIRST(&vnode_free_list); 973 /* 974 * The list can be modified while the free_list_mtx 975 * has been dropped and vp could be NULL here. 976 */ 977 if (vp == NULL) { 978 if (tried_batches) 979 break; 980 mtx_unlock(&vnode_free_list_mtx); 981 vnlru_return_batches(mnt_op); 982 tried_batches = true; 983 mtx_lock(&vnode_free_list_mtx); 984 continue; 985 } 986 987 VNASSERT(vp->v_op != NULL, vp, 988 ("vnlru_free: vnode already reclaimed.")); 989 KASSERT((vp->v_iflag & VI_FREE) != 0, 990 ("Removing vnode not on freelist")); 991 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 992 ("Mangling active vnode")); 993 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 994 995 /* 996 * Don't recycle if our vnode is from different type 997 * of mount point. Note that mp is type-safe, the 998 * check does not reach unmapped address even if 999 * vnode is reclaimed. 1000 * Don't recycle if we can't get the interlock without 1001 * blocking. 1002 */ 1003 if ((mnt_op != NULL && (mp = vp->v_mount) != NULL && 1004 mp->mnt_op != mnt_op) || !VI_TRYLOCK(vp)) { 1005 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist); 1006 continue; 1007 } 1008 VNASSERT((vp->v_iflag & VI_FREE) != 0 && vp->v_holdcnt == 0, 1009 vp, ("vp inconsistent on freelist")); 1010 1011 /* 1012 * The clear of VI_FREE prevents activation of the 1013 * vnode. There is no sense in putting the vnode on 1014 * the mount point active list, only to remove it 1015 * later during recycling. Inline the relevant part 1016 * of vholdl(), to avoid triggering assertions or 1017 * activating. 1018 */ 1019 freevnodes--; 1020 vp->v_iflag &= ~VI_FREE; 1021 refcount_acquire(&vp->v_holdcnt); 1022 1023 mtx_unlock(&vnode_free_list_mtx); 1024 VI_UNLOCK(vp); 1025 vtryrecycle(vp); 1026 /* 1027 * If the recycled succeeded this vdrop will actually free 1028 * the vnode. If not it will simply place it back on 1029 * the free list. 1030 */ 1031 vdrop(vp); 1032 mtx_lock(&vnode_free_list_mtx); 1033 } 1034 } 1035 1036 void 1037 vnlru_free(int count, struct vfsops *mnt_op) 1038 { 1039 1040 mtx_lock(&vnode_free_list_mtx); 1041 vnlru_free_locked(count, mnt_op); 1042 mtx_unlock(&vnode_free_list_mtx); 1043 } 1044 1045 1046 /* XXX some names and initialization are bad for limits and watermarks. */ 1047 static int 1048 vspace(void) 1049 { 1050 int space; 1051 1052 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1053 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1054 vlowat = vhiwat / 2; 1055 if (numvnodes > desiredvnodes) 1056 return (0); 1057 space = desiredvnodes - numvnodes; 1058 if (freevnodes > wantfreevnodes) 1059 space += freevnodes - wantfreevnodes; 1060 return (space); 1061 } 1062 1063 static void 1064 vnlru_return_batch_locked(struct mount *mp) 1065 { 1066 struct vnode *vp; 1067 1068 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 1069 1070 if (mp->mnt_tmpfreevnodelistsize == 0) 1071 return; 1072 1073 TAILQ_FOREACH(vp, &mp->mnt_tmpfreevnodelist, v_actfreelist) { 1074 VNASSERT((vp->v_mflag & VMP_TMPMNTFREELIST) != 0, vp, 1075 ("vnode without VMP_TMPMNTFREELIST on mnt_tmpfreevnodelist")); 1076 vp->v_mflag &= ~VMP_TMPMNTFREELIST; 1077 } 1078 mtx_lock(&vnode_free_list_mtx); 1079 TAILQ_CONCAT(&vnode_free_list, &mp->mnt_tmpfreevnodelist, v_actfreelist); 1080 freevnodes += mp->mnt_tmpfreevnodelistsize; 1081 mtx_unlock(&vnode_free_list_mtx); 1082 mp->mnt_tmpfreevnodelistsize = 0; 1083 } 1084 1085 static void 1086 vnlru_return_batch(struct mount *mp) 1087 { 1088 1089 mtx_lock(&mp->mnt_listmtx); 1090 vnlru_return_batch_locked(mp); 1091 mtx_unlock(&mp->mnt_listmtx); 1092 } 1093 1094 static void 1095 vnlru_return_batches(struct vfsops *mnt_op) 1096 { 1097 struct mount *mp, *nmp; 1098 bool need_unbusy; 1099 1100 mtx_lock(&mountlist_mtx); 1101 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 1102 need_unbusy = false; 1103 if (mnt_op != NULL && mp->mnt_op != mnt_op) 1104 goto next; 1105 if (mp->mnt_tmpfreevnodelistsize == 0) 1106 goto next; 1107 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK) == 0) { 1108 vnlru_return_batch(mp); 1109 need_unbusy = true; 1110 mtx_lock(&mountlist_mtx); 1111 } 1112 next: 1113 nmp = TAILQ_NEXT(mp, mnt_list); 1114 if (need_unbusy) 1115 vfs_unbusy(mp); 1116 } 1117 mtx_unlock(&mountlist_mtx); 1118 } 1119 1120 /* 1121 * Attempt to recycle vnodes in a context that is always safe to block. 1122 * Calling vlrurecycle() from the bowels of filesystem code has some 1123 * interesting deadlock problems. 1124 */ 1125 static struct proc *vnlruproc; 1126 static int vnlruproc_sig; 1127 1128 static void 1129 vnlru_proc(void) 1130 { 1131 struct mount *mp, *nmp; 1132 unsigned long onumvnodes; 1133 int done, force, reclaim_nc_src, trigger, usevnodes; 1134 1135 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1136 SHUTDOWN_PRI_FIRST); 1137 1138 force = 0; 1139 for (;;) { 1140 kproc_suspend_check(vnlruproc); 1141 mtx_lock(&vnode_free_list_mtx); 1142 /* 1143 * If numvnodes is too large (due to desiredvnodes being 1144 * adjusted using its sysctl, or emergency growth), first 1145 * try to reduce it by discarding from the free list. 1146 */ 1147 if (numvnodes > desiredvnodes) 1148 vnlru_free_locked(numvnodes - desiredvnodes, NULL); 1149 /* 1150 * Sleep if the vnode cache is in a good state. This is 1151 * when it is not over-full and has space for about a 4% 1152 * or 9% expansion (by growing its size or inexcessively 1153 * reducing its free list). Otherwise, try to reclaim 1154 * space for a 10% expansion. 1155 */ 1156 if (vstir && force == 0) { 1157 force = 1; 1158 vstir = 0; 1159 } 1160 if (vspace() >= vlowat && force == 0) { 1161 vnlruproc_sig = 0; 1162 wakeup(&vnlruproc_sig); 1163 msleep(vnlruproc, &vnode_free_list_mtx, 1164 PVFS|PDROP, "vlruwt", hz); 1165 continue; 1166 } 1167 mtx_unlock(&vnode_free_list_mtx); 1168 done = 0; 1169 onumvnodes = numvnodes; 1170 /* 1171 * Calculate parameters for recycling. These are the same 1172 * throughout the loop to give some semblance of fairness. 1173 * The trigger point is to avoid recycling vnodes with lots 1174 * of resident pages. We aren't trying to free memory; we 1175 * are trying to recycle or at least free vnodes. 1176 */ 1177 if (numvnodes <= desiredvnodes) 1178 usevnodes = numvnodes - freevnodes; 1179 else 1180 usevnodes = numvnodes; 1181 if (usevnodes <= 0) 1182 usevnodes = 1; 1183 /* 1184 * The trigger value is is chosen to give a conservatively 1185 * large value to ensure that it alone doesn't prevent 1186 * making progress. The value can easily be so large that 1187 * it is effectively infinite in some congested and 1188 * misconfigured cases, and this is necessary. Normally 1189 * it is about 8 to 100 (pages), which is quite large. 1190 */ 1191 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1192 if (force < 2) 1193 trigger = vsmalltrigger; 1194 reclaim_nc_src = force >= 3; 1195 mtx_lock(&mountlist_mtx); 1196 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 1197 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) { 1198 nmp = TAILQ_NEXT(mp, mnt_list); 1199 continue; 1200 } 1201 done += vlrureclaim(mp, reclaim_nc_src, trigger); 1202 mtx_lock(&mountlist_mtx); 1203 nmp = TAILQ_NEXT(mp, mnt_list); 1204 vfs_unbusy(mp); 1205 } 1206 mtx_unlock(&mountlist_mtx); 1207 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1208 uma_reclaim(); 1209 if (done == 0) { 1210 if (force == 0 || force == 1) { 1211 force = 2; 1212 continue; 1213 } 1214 if (force == 2) { 1215 force = 3; 1216 continue; 1217 } 1218 force = 0; 1219 vnlru_nowhere++; 1220 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1221 } else 1222 kern_yield(PRI_USER); 1223 /* 1224 * After becoming active to expand above low water, keep 1225 * active until above high water. 1226 */ 1227 force = vspace() < vhiwat; 1228 } 1229 } 1230 1231 static struct kproc_desc vnlru_kp = { 1232 "vnlru", 1233 vnlru_proc, 1234 &vnlruproc 1235 }; 1236 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1237 &vnlru_kp); 1238 1239 /* 1240 * Routines having to do with the management of the vnode table. 1241 */ 1242 1243 /* 1244 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1245 * before we actually vgone(). This function must be called with the vnode 1246 * held to prevent the vnode from being returned to the free list midway 1247 * through vgone(). 1248 */ 1249 static int 1250 vtryrecycle(struct vnode *vp) 1251 { 1252 struct mount *vnmp; 1253 1254 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1255 VNASSERT(vp->v_holdcnt, vp, 1256 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1257 /* 1258 * This vnode may found and locked via some other list, if so we 1259 * can't recycle it yet. 1260 */ 1261 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1262 CTR2(KTR_VFS, 1263 "%s: impossible to recycle, vp %p lock is already held", 1264 __func__, vp); 1265 return (EWOULDBLOCK); 1266 } 1267 /* 1268 * Don't recycle if its filesystem is being suspended. 1269 */ 1270 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1271 VOP_UNLOCK(vp, 0); 1272 CTR2(KTR_VFS, 1273 "%s: impossible to recycle, cannot start the write for %p", 1274 __func__, vp); 1275 return (EBUSY); 1276 } 1277 /* 1278 * If we got this far, we need to acquire the interlock and see if 1279 * anyone picked up this vnode from another list. If not, we will 1280 * mark it with DOOMED via vgonel() so that anyone who does find it 1281 * will skip over it. 1282 */ 1283 VI_LOCK(vp); 1284 if (vp->v_usecount) { 1285 VOP_UNLOCK(vp, LK_INTERLOCK); 1286 vn_finished_write(vnmp); 1287 CTR2(KTR_VFS, 1288 "%s: impossible to recycle, %p is already referenced", 1289 __func__, vp); 1290 return (EBUSY); 1291 } 1292 if ((vp->v_iflag & VI_DOOMED) == 0) { 1293 counter_u64_add(recycles_count, 1); 1294 vgonel(vp); 1295 } 1296 VOP_UNLOCK(vp, LK_INTERLOCK); 1297 vn_finished_write(vnmp); 1298 return (0); 1299 } 1300 1301 static void 1302 vcheckspace(void) 1303 { 1304 1305 if (vspace() < vlowat && vnlruproc_sig == 0) { 1306 vnlruproc_sig = 1; 1307 wakeup(vnlruproc); 1308 } 1309 } 1310 1311 /* 1312 * Wait if necessary for space for a new vnode. 1313 */ 1314 static int 1315 getnewvnode_wait(int suspended) 1316 { 1317 1318 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 1319 if (numvnodes >= desiredvnodes) { 1320 if (suspended) { 1321 /* 1322 * The file system is being suspended. We cannot 1323 * risk a deadlock here, so allow allocation of 1324 * another vnode even if this would give too many. 1325 */ 1326 return (0); 1327 } 1328 if (vnlruproc_sig == 0) { 1329 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 1330 wakeup(vnlruproc); 1331 } 1332 msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS, 1333 "vlruwk", hz); 1334 } 1335 /* Post-adjust like the pre-adjust in getnewvnode(). */ 1336 if (numvnodes + 1 > desiredvnodes && freevnodes > 1) 1337 vnlru_free_locked(1, NULL); 1338 return (numvnodes >= desiredvnodes ? ENFILE : 0); 1339 } 1340 1341 /* 1342 * This hack is fragile, and probably not needed any more now that the 1343 * watermark handling works. 1344 */ 1345 void 1346 getnewvnode_reserve(u_int count) 1347 { 1348 struct thread *td; 1349 1350 /* Pre-adjust like the pre-adjust in getnewvnode(), with any count. */ 1351 /* XXX no longer so quick, but this part is not racy. */ 1352 mtx_lock(&vnode_free_list_mtx); 1353 if (numvnodes + count > desiredvnodes && freevnodes > wantfreevnodes) 1354 vnlru_free_locked(ulmin(numvnodes + count - desiredvnodes, 1355 freevnodes - wantfreevnodes), NULL); 1356 mtx_unlock(&vnode_free_list_mtx); 1357 1358 td = curthread; 1359 /* First try to be quick and racy. */ 1360 if (atomic_fetchadd_long(&numvnodes, count) + count <= desiredvnodes) { 1361 td->td_vp_reserv += count; 1362 vcheckspace(); /* XXX no longer so quick, but more racy */ 1363 return; 1364 } else 1365 atomic_subtract_long(&numvnodes, count); 1366 1367 mtx_lock(&vnode_free_list_mtx); 1368 while (count > 0) { 1369 if (getnewvnode_wait(0) == 0) { 1370 count--; 1371 td->td_vp_reserv++; 1372 atomic_add_long(&numvnodes, 1); 1373 } 1374 } 1375 vcheckspace(); 1376 mtx_unlock(&vnode_free_list_mtx); 1377 } 1378 1379 /* 1380 * This hack is fragile, especially if desiredvnodes or wantvnodes are 1381 * misconfgured or changed significantly. Reducing desiredvnodes below 1382 * the reserved amount should cause bizarre behaviour like reducing it 1383 * below the number of active vnodes -- the system will try to reduce 1384 * numvnodes to match, but should fail, so the subtraction below should 1385 * not overflow. 1386 */ 1387 void 1388 getnewvnode_drop_reserve(void) 1389 { 1390 struct thread *td; 1391 1392 td = curthread; 1393 atomic_subtract_long(&numvnodes, td->td_vp_reserv); 1394 td->td_vp_reserv = 0; 1395 } 1396 1397 /* 1398 * Return the next vnode from the free list. 1399 */ 1400 int 1401 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1402 struct vnode **vpp) 1403 { 1404 struct vnode *vp; 1405 struct thread *td; 1406 struct lock_object *lo; 1407 static int cyclecount; 1408 int error __unused; 1409 1410 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1411 vp = NULL; 1412 td = curthread; 1413 if (td->td_vp_reserv > 0) { 1414 td->td_vp_reserv -= 1; 1415 goto alloc; 1416 } 1417 mtx_lock(&vnode_free_list_mtx); 1418 if (numvnodes < desiredvnodes) 1419 cyclecount = 0; 1420 else if (cyclecount++ >= freevnodes) { 1421 cyclecount = 0; 1422 vstir = 1; 1423 } 1424 /* 1425 * Grow the vnode cache if it will not be above its target max 1426 * after growing. Otherwise, if the free list is nonempty, try 1427 * to reclaim 1 item from it before growing the cache (possibly 1428 * above its target max if the reclamation failed or is delayed). 1429 * Otherwise, wait for some space. In all cases, schedule 1430 * vnlru_proc() if we are getting short of space. The watermarks 1431 * should be chosen so that we never wait or even reclaim from 1432 * the free list to below its target minimum. 1433 */ 1434 if (numvnodes + 1 <= desiredvnodes) 1435 ; 1436 else if (freevnodes > 0) 1437 vnlru_free_locked(1, NULL); 1438 else { 1439 error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag & 1440 MNTK_SUSPEND)); 1441 #if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */ 1442 if (error != 0) { 1443 mtx_unlock(&vnode_free_list_mtx); 1444 return (error); 1445 } 1446 #endif 1447 } 1448 vcheckspace(); 1449 atomic_add_long(&numvnodes, 1); 1450 mtx_unlock(&vnode_free_list_mtx); 1451 alloc: 1452 counter_u64_add(vnodes_created, 1); 1453 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK); 1454 /* 1455 * Locks are given the generic name "vnode" when created. 1456 * Follow the historic practice of using the filesystem 1457 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1458 * 1459 * Locks live in a witness group keyed on their name. Thus, 1460 * when a lock is renamed, it must also move from the witness 1461 * group of its old name to the witness group of its new name. 1462 * 1463 * The change only needs to be made when the vnode moves 1464 * from one filesystem type to another. We ensure that each 1465 * filesystem use a single static name pointer for its tag so 1466 * that we can compare pointers rather than doing a strcmp(). 1467 */ 1468 lo = &vp->v_vnlock->lock_object; 1469 if (lo->lo_name != tag) { 1470 lo->lo_name = tag; 1471 WITNESS_DESTROY(lo); 1472 WITNESS_INIT(lo, tag); 1473 } 1474 /* 1475 * By default, don't allow shared locks unless filesystems opt-in. 1476 */ 1477 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1478 /* 1479 * Finalize various vnode identity bits. 1480 */ 1481 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1482 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1483 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1484 vp->v_type = VNON; 1485 vp->v_tag = tag; 1486 vp->v_op = vops; 1487 v_init_counters(vp); 1488 vp->v_bufobj.bo_ops = &buf_ops_bio; 1489 #ifdef DIAGNOSTIC 1490 if (mp == NULL && vops != &dead_vnodeops) 1491 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1492 #endif 1493 #ifdef MAC 1494 mac_vnode_init(vp); 1495 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1496 mac_vnode_associate_singlelabel(mp, vp); 1497 #endif 1498 if (mp != NULL) { 1499 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1500 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1501 vp->v_vflag |= VV_NOKNOTE; 1502 } 1503 1504 /* 1505 * For the filesystems which do not use vfs_hash_insert(), 1506 * still initialize v_hash to have vfs_hash_index() useful. 1507 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1508 * its own hashing. 1509 */ 1510 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1511 1512 *vpp = vp; 1513 return (0); 1514 } 1515 1516 /* 1517 * Delete from old mount point vnode list, if on one. 1518 */ 1519 static void 1520 delmntque(struct vnode *vp) 1521 { 1522 struct mount *mp; 1523 int active; 1524 1525 mp = vp->v_mount; 1526 if (mp == NULL) 1527 return; 1528 MNT_ILOCK(mp); 1529 VI_LOCK(vp); 1530 KASSERT(mp->mnt_activevnodelistsize <= mp->mnt_nvnodelistsize, 1531 ("Active vnode list size %d > Vnode list size %d", 1532 mp->mnt_activevnodelistsize, mp->mnt_nvnodelistsize)); 1533 active = vp->v_iflag & VI_ACTIVE; 1534 vp->v_iflag &= ~VI_ACTIVE; 1535 if (active) { 1536 mtx_lock(&mp->mnt_listmtx); 1537 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); 1538 mp->mnt_activevnodelistsize--; 1539 mtx_unlock(&mp->mnt_listmtx); 1540 } 1541 vp->v_mount = NULL; 1542 VI_UNLOCK(vp); 1543 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1544 ("bad mount point vnode list size")); 1545 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1546 mp->mnt_nvnodelistsize--; 1547 MNT_REL(mp); 1548 MNT_IUNLOCK(mp); 1549 } 1550 1551 static void 1552 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1553 { 1554 1555 vp->v_data = NULL; 1556 vp->v_op = &dead_vnodeops; 1557 vgone(vp); 1558 vput(vp); 1559 } 1560 1561 /* 1562 * Insert into list of vnodes for the new mount point, if available. 1563 */ 1564 int 1565 insmntque1(struct vnode *vp, struct mount *mp, 1566 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1567 { 1568 1569 KASSERT(vp->v_mount == NULL, 1570 ("insmntque: vnode already on per mount vnode list")); 1571 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1572 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1573 1574 /* 1575 * We acquire the vnode interlock early to ensure that the 1576 * vnode cannot be recycled by another process releasing a 1577 * holdcnt on it before we get it on both the vnode list 1578 * and the active vnode list. The mount mutex protects only 1579 * manipulation of the vnode list and the vnode freelist 1580 * mutex protects only manipulation of the active vnode list. 1581 * Hence the need to hold the vnode interlock throughout. 1582 */ 1583 MNT_ILOCK(mp); 1584 VI_LOCK(vp); 1585 if (((mp->mnt_kern_flag & MNTK_NOINSMNTQ) != 0 && 1586 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1587 mp->mnt_nvnodelistsize == 0)) && 1588 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1589 VI_UNLOCK(vp); 1590 MNT_IUNLOCK(mp); 1591 if (dtr != NULL) 1592 dtr(vp, dtr_arg); 1593 return (EBUSY); 1594 } 1595 vp->v_mount = mp; 1596 MNT_REF(mp); 1597 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1598 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1599 ("neg mount point vnode list size")); 1600 mp->mnt_nvnodelistsize++; 1601 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 1602 ("Activating already active vnode")); 1603 vp->v_iflag |= VI_ACTIVE; 1604 mtx_lock(&mp->mnt_listmtx); 1605 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 1606 mp->mnt_activevnodelistsize++; 1607 mtx_unlock(&mp->mnt_listmtx); 1608 VI_UNLOCK(vp); 1609 MNT_IUNLOCK(mp); 1610 return (0); 1611 } 1612 1613 int 1614 insmntque(struct vnode *vp, struct mount *mp) 1615 { 1616 1617 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1618 } 1619 1620 /* 1621 * Flush out and invalidate all buffers associated with a bufobj 1622 * Called with the underlying object locked. 1623 */ 1624 int 1625 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1626 { 1627 int error; 1628 1629 BO_LOCK(bo); 1630 if (flags & V_SAVE) { 1631 error = bufobj_wwait(bo, slpflag, slptimeo); 1632 if (error) { 1633 BO_UNLOCK(bo); 1634 return (error); 1635 } 1636 if (bo->bo_dirty.bv_cnt > 0) { 1637 BO_UNLOCK(bo); 1638 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1639 return (error); 1640 /* 1641 * XXX We could save a lock/unlock if this was only 1642 * enabled under INVARIANTS 1643 */ 1644 BO_LOCK(bo); 1645 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1646 panic("vinvalbuf: dirty bufs"); 1647 } 1648 } 1649 /* 1650 * If you alter this loop please notice that interlock is dropped and 1651 * reacquired in flushbuflist. Special care is needed to ensure that 1652 * no race conditions occur from this. 1653 */ 1654 do { 1655 error = flushbuflist(&bo->bo_clean, 1656 flags, bo, slpflag, slptimeo); 1657 if (error == 0 && !(flags & V_CLEANONLY)) 1658 error = flushbuflist(&bo->bo_dirty, 1659 flags, bo, slpflag, slptimeo); 1660 if (error != 0 && error != EAGAIN) { 1661 BO_UNLOCK(bo); 1662 return (error); 1663 } 1664 } while (error != 0); 1665 1666 /* 1667 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1668 * have write I/O in-progress but if there is a VM object then the 1669 * VM object can also have read-I/O in-progress. 1670 */ 1671 do { 1672 bufobj_wwait(bo, 0, 0); 1673 if ((flags & V_VMIO) == 0) { 1674 BO_UNLOCK(bo); 1675 if (bo->bo_object != NULL) { 1676 VM_OBJECT_WLOCK(bo->bo_object); 1677 vm_object_pip_wait(bo->bo_object, "bovlbx"); 1678 VM_OBJECT_WUNLOCK(bo->bo_object); 1679 } 1680 BO_LOCK(bo); 1681 } 1682 } while (bo->bo_numoutput > 0); 1683 BO_UNLOCK(bo); 1684 1685 /* 1686 * Destroy the copy in the VM cache, too. 1687 */ 1688 if (bo->bo_object != NULL && 1689 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 1690 VM_OBJECT_WLOCK(bo->bo_object); 1691 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1692 OBJPR_CLEANONLY : 0); 1693 VM_OBJECT_WUNLOCK(bo->bo_object); 1694 } 1695 1696 #ifdef INVARIANTS 1697 BO_LOCK(bo); 1698 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 1699 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 1700 bo->bo_clean.bv_cnt > 0)) 1701 panic("vinvalbuf: flush failed"); 1702 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 1703 bo->bo_dirty.bv_cnt > 0) 1704 panic("vinvalbuf: flush dirty failed"); 1705 BO_UNLOCK(bo); 1706 #endif 1707 return (0); 1708 } 1709 1710 /* 1711 * Flush out and invalidate all buffers associated with a vnode. 1712 * Called with the underlying object locked. 1713 */ 1714 int 1715 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 1716 { 1717 1718 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 1719 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1720 if (vp->v_object != NULL && vp->v_object->handle != vp) 1721 return (0); 1722 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 1723 } 1724 1725 /* 1726 * Flush out buffers on the specified list. 1727 * 1728 */ 1729 static int 1730 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 1731 int slptimeo) 1732 { 1733 struct buf *bp, *nbp; 1734 int retval, error; 1735 daddr_t lblkno; 1736 b_xflags_t xflags; 1737 1738 ASSERT_BO_WLOCKED(bo); 1739 1740 retval = 0; 1741 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 1742 if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) || 1743 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) { 1744 continue; 1745 } 1746 if (nbp != NULL) { 1747 lblkno = nbp->b_lblkno; 1748 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 1749 } 1750 retval = EAGAIN; 1751 error = BUF_TIMELOCK(bp, 1752 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 1753 "flushbuf", slpflag, slptimeo); 1754 if (error) { 1755 BO_LOCK(bo); 1756 return (error != ENOLCK ? error : EAGAIN); 1757 } 1758 KASSERT(bp->b_bufobj == bo, 1759 ("bp %p wrong b_bufobj %p should be %p", 1760 bp, bp->b_bufobj, bo)); 1761 /* 1762 * XXX Since there are no node locks for NFS, I 1763 * believe there is a slight chance that a delayed 1764 * write will occur while sleeping just above, so 1765 * check for it. 1766 */ 1767 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1768 (flags & V_SAVE)) { 1769 bremfree(bp); 1770 bp->b_flags |= B_ASYNC; 1771 bwrite(bp); 1772 BO_LOCK(bo); 1773 return (EAGAIN); /* XXX: why not loop ? */ 1774 } 1775 bremfree(bp); 1776 bp->b_flags |= (B_INVAL | B_RELBUF); 1777 bp->b_flags &= ~B_ASYNC; 1778 brelse(bp); 1779 BO_LOCK(bo); 1780 if (nbp == NULL) 1781 break; 1782 nbp = gbincore(bo, lblkno); 1783 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1784 != xflags) 1785 break; /* nbp invalid */ 1786 } 1787 return (retval); 1788 } 1789 1790 int 1791 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 1792 { 1793 struct buf *bp; 1794 int error; 1795 daddr_t lblkno; 1796 1797 ASSERT_BO_LOCKED(bo); 1798 1799 for (lblkno = startn;;) { 1800 again: 1801 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 1802 if (bp == NULL || bp->b_lblkno >= endn || 1803 bp->b_lblkno < startn) 1804 break; 1805 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 1806 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 1807 if (error != 0) { 1808 BO_RLOCK(bo); 1809 if (error == ENOLCK) 1810 goto again; 1811 return (error); 1812 } 1813 KASSERT(bp->b_bufobj == bo, 1814 ("bp %p wrong b_bufobj %p should be %p", 1815 bp, bp->b_bufobj, bo)); 1816 lblkno = bp->b_lblkno + 1; 1817 if ((bp->b_flags & B_MANAGED) == 0) 1818 bremfree(bp); 1819 bp->b_flags |= B_RELBUF; 1820 /* 1821 * In the VMIO case, use the B_NOREUSE flag to hint that the 1822 * pages backing each buffer in the range are unlikely to be 1823 * reused. Dirty buffers will have the hint applied once 1824 * they've been written. 1825 */ 1826 if (bp->b_vp->v_object != NULL) 1827 bp->b_flags |= B_NOREUSE; 1828 brelse(bp); 1829 BO_RLOCK(bo); 1830 } 1831 return (0); 1832 } 1833 1834 /* 1835 * Truncate a file's buffer and pages to a specified length. This 1836 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 1837 * sync activity. 1838 */ 1839 int 1840 vtruncbuf(struct vnode *vp, struct ucred *cred, off_t length, int blksize) 1841 { 1842 struct buf *bp, *nbp; 1843 int anyfreed; 1844 int trunclbn; 1845 struct bufobj *bo; 1846 1847 CTR5(KTR_VFS, "%s: vp %p with cred %p and block %d:%ju", __func__, 1848 vp, cred, blksize, (uintmax_t)length); 1849 1850 /* 1851 * Round up to the *next* lbn. 1852 */ 1853 trunclbn = howmany(length, blksize); 1854 1855 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 1856 restart: 1857 bo = &vp->v_bufobj; 1858 BO_LOCK(bo); 1859 anyfreed = 1; 1860 for (;anyfreed;) { 1861 anyfreed = 0; 1862 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 1863 if (bp->b_lblkno < trunclbn) 1864 continue; 1865 if (BUF_LOCK(bp, 1866 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1867 BO_LOCKPTR(bo)) == ENOLCK) 1868 goto restart; 1869 1870 bremfree(bp); 1871 bp->b_flags |= (B_INVAL | B_RELBUF); 1872 bp->b_flags &= ~B_ASYNC; 1873 brelse(bp); 1874 anyfreed = 1; 1875 1876 BO_LOCK(bo); 1877 if (nbp != NULL && 1878 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 1879 (nbp->b_vp != vp) || 1880 (nbp->b_flags & B_DELWRI))) { 1881 BO_UNLOCK(bo); 1882 goto restart; 1883 } 1884 } 1885 1886 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1887 if (bp->b_lblkno < trunclbn) 1888 continue; 1889 if (BUF_LOCK(bp, 1890 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1891 BO_LOCKPTR(bo)) == ENOLCK) 1892 goto restart; 1893 bremfree(bp); 1894 bp->b_flags |= (B_INVAL | B_RELBUF); 1895 bp->b_flags &= ~B_ASYNC; 1896 brelse(bp); 1897 anyfreed = 1; 1898 1899 BO_LOCK(bo); 1900 if (nbp != NULL && 1901 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 1902 (nbp->b_vp != vp) || 1903 (nbp->b_flags & B_DELWRI) == 0)) { 1904 BO_UNLOCK(bo); 1905 goto restart; 1906 } 1907 } 1908 } 1909 1910 if (length > 0) { 1911 restartsync: 1912 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1913 if (bp->b_lblkno > 0) 1914 continue; 1915 /* 1916 * Since we hold the vnode lock this should only 1917 * fail if we're racing with the buf daemon. 1918 */ 1919 if (BUF_LOCK(bp, 1920 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1921 BO_LOCKPTR(bo)) == ENOLCK) { 1922 goto restart; 1923 } 1924 VNASSERT((bp->b_flags & B_DELWRI), vp, 1925 ("buf(%p) on dirty queue without DELWRI", bp)); 1926 1927 bremfree(bp); 1928 bawrite(bp); 1929 BO_LOCK(bo); 1930 goto restartsync; 1931 } 1932 } 1933 1934 bufobj_wwait(bo, 0, 0); 1935 BO_UNLOCK(bo); 1936 vnode_pager_setsize(vp, length); 1937 1938 return (0); 1939 } 1940 1941 static void 1942 buf_vlist_remove(struct buf *bp) 1943 { 1944 struct bufv *bv; 1945 1946 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1947 ASSERT_BO_WLOCKED(bp->b_bufobj); 1948 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != 1949 (BX_VNDIRTY|BX_VNCLEAN), 1950 ("buf_vlist_remove: Buf %p is on two lists", bp)); 1951 if (bp->b_xflags & BX_VNDIRTY) 1952 bv = &bp->b_bufobj->bo_dirty; 1953 else 1954 bv = &bp->b_bufobj->bo_clean; 1955 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 1956 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 1957 bv->bv_cnt--; 1958 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1959 } 1960 1961 /* 1962 * Add the buffer to the sorted clean or dirty block list. 1963 * 1964 * NOTE: xflags is passed as a constant, optimizing this inline function! 1965 */ 1966 static void 1967 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 1968 { 1969 struct bufv *bv; 1970 struct buf *n; 1971 int error; 1972 1973 ASSERT_BO_WLOCKED(bo); 1974 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 1975 ("dead bo %p", bo)); 1976 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 1977 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 1978 bp->b_xflags |= xflags; 1979 if (xflags & BX_VNDIRTY) 1980 bv = &bo->bo_dirty; 1981 else 1982 bv = &bo->bo_clean; 1983 1984 /* 1985 * Keep the list ordered. Optimize empty list insertion. Assume 1986 * we tend to grow at the tail so lookup_le should usually be cheaper 1987 * than _ge. 1988 */ 1989 if (bv->bv_cnt == 0 || 1990 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 1991 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 1992 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 1993 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 1994 else 1995 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 1996 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 1997 if (error) 1998 panic("buf_vlist_add: Preallocated nodes insufficient."); 1999 bv->bv_cnt++; 2000 } 2001 2002 /* 2003 * Look up a buffer using the buffer tries. 2004 */ 2005 struct buf * 2006 gbincore(struct bufobj *bo, daddr_t lblkno) 2007 { 2008 struct buf *bp; 2009 2010 ASSERT_BO_LOCKED(bo); 2011 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2012 if (bp != NULL) 2013 return (bp); 2014 return BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno); 2015 } 2016 2017 /* 2018 * Associate a buffer with a vnode. 2019 */ 2020 void 2021 bgetvp(struct vnode *vp, struct buf *bp) 2022 { 2023 struct bufobj *bo; 2024 2025 bo = &vp->v_bufobj; 2026 ASSERT_BO_WLOCKED(bo); 2027 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2028 2029 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2030 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2031 ("bgetvp: bp already attached! %p", bp)); 2032 2033 vhold(vp); 2034 bp->b_vp = vp; 2035 bp->b_bufobj = bo; 2036 /* 2037 * Insert onto list for new vnode. 2038 */ 2039 buf_vlist_add(bp, bo, BX_VNCLEAN); 2040 } 2041 2042 /* 2043 * Disassociate a buffer from a vnode. 2044 */ 2045 void 2046 brelvp(struct buf *bp) 2047 { 2048 struct bufobj *bo; 2049 struct vnode *vp; 2050 2051 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2052 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2053 2054 /* 2055 * Delete from old vnode list, if on one. 2056 */ 2057 vp = bp->b_vp; /* XXX */ 2058 bo = bp->b_bufobj; 2059 BO_LOCK(bo); 2060 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2061 buf_vlist_remove(bp); 2062 else 2063 panic("brelvp: Buffer %p not on queue.", bp); 2064 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2065 bo->bo_flag &= ~BO_ONWORKLST; 2066 mtx_lock(&sync_mtx); 2067 LIST_REMOVE(bo, bo_synclist); 2068 syncer_worklist_len--; 2069 mtx_unlock(&sync_mtx); 2070 } 2071 bp->b_vp = NULL; 2072 bp->b_bufobj = NULL; 2073 BO_UNLOCK(bo); 2074 vdrop(vp); 2075 } 2076 2077 /* 2078 * Add an item to the syncer work queue. 2079 */ 2080 static void 2081 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2082 { 2083 int slot; 2084 2085 ASSERT_BO_WLOCKED(bo); 2086 2087 mtx_lock(&sync_mtx); 2088 if (bo->bo_flag & BO_ONWORKLST) 2089 LIST_REMOVE(bo, bo_synclist); 2090 else { 2091 bo->bo_flag |= BO_ONWORKLST; 2092 syncer_worklist_len++; 2093 } 2094 2095 if (delay > syncer_maxdelay - 2) 2096 delay = syncer_maxdelay - 2; 2097 slot = (syncer_delayno + delay) & syncer_mask; 2098 2099 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2100 mtx_unlock(&sync_mtx); 2101 } 2102 2103 static int 2104 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2105 { 2106 int error, len; 2107 2108 mtx_lock(&sync_mtx); 2109 len = syncer_worklist_len - sync_vnode_count; 2110 mtx_unlock(&sync_mtx); 2111 error = SYSCTL_OUT(req, &len, sizeof(len)); 2112 return (error); 2113 } 2114 2115 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0, 2116 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2117 2118 static struct proc *updateproc; 2119 static void sched_sync(void); 2120 static struct kproc_desc up_kp = { 2121 "syncer", 2122 sched_sync, 2123 &updateproc 2124 }; 2125 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2126 2127 static int 2128 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2129 { 2130 struct vnode *vp; 2131 struct mount *mp; 2132 2133 *bo = LIST_FIRST(slp); 2134 if (*bo == NULL) 2135 return (0); 2136 vp = bo2vnode(*bo); 2137 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2138 return (1); 2139 /* 2140 * We use vhold in case the vnode does not 2141 * successfully sync. vhold prevents the vnode from 2142 * going away when we unlock the sync_mtx so that 2143 * we can acquire the vnode interlock. 2144 */ 2145 vholdl(vp); 2146 mtx_unlock(&sync_mtx); 2147 VI_UNLOCK(vp); 2148 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2149 vdrop(vp); 2150 mtx_lock(&sync_mtx); 2151 return (*bo == LIST_FIRST(slp)); 2152 } 2153 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2154 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2155 VOP_UNLOCK(vp, 0); 2156 vn_finished_write(mp); 2157 BO_LOCK(*bo); 2158 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2159 /* 2160 * Put us back on the worklist. The worklist 2161 * routine will remove us from our current 2162 * position and then add us back in at a later 2163 * position. 2164 */ 2165 vn_syncer_add_to_worklist(*bo, syncdelay); 2166 } 2167 BO_UNLOCK(*bo); 2168 vdrop(vp); 2169 mtx_lock(&sync_mtx); 2170 return (0); 2171 } 2172 2173 static int first_printf = 1; 2174 2175 /* 2176 * System filesystem synchronizer daemon. 2177 */ 2178 static void 2179 sched_sync(void) 2180 { 2181 struct synclist *next, *slp; 2182 struct bufobj *bo; 2183 long starttime; 2184 struct thread *td = curthread; 2185 int last_work_seen; 2186 int net_worklist_len; 2187 int syncer_final_iter; 2188 int error; 2189 2190 last_work_seen = 0; 2191 syncer_final_iter = 0; 2192 syncer_state = SYNCER_RUNNING; 2193 starttime = time_uptime; 2194 td->td_pflags |= TDP_NORUNNINGBUF; 2195 2196 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2197 SHUTDOWN_PRI_LAST); 2198 2199 mtx_lock(&sync_mtx); 2200 for (;;) { 2201 if (syncer_state == SYNCER_FINAL_DELAY && 2202 syncer_final_iter == 0) { 2203 mtx_unlock(&sync_mtx); 2204 kproc_suspend_check(td->td_proc); 2205 mtx_lock(&sync_mtx); 2206 } 2207 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2208 if (syncer_state != SYNCER_RUNNING && 2209 starttime != time_uptime) { 2210 if (first_printf) { 2211 printf("\nSyncing disks, vnodes remaining... "); 2212 first_printf = 0; 2213 } 2214 printf("%d ", net_worklist_len); 2215 } 2216 starttime = time_uptime; 2217 2218 /* 2219 * Push files whose dirty time has expired. Be careful 2220 * of interrupt race on slp queue. 2221 * 2222 * Skip over empty worklist slots when shutting down. 2223 */ 2224 do { 2225 slp = &syncer_workitem_pending[syncer_delayno]; 2226 syncer_delayno += 1; 2227 if (syncer_delayno == syncer_maxdelay) 2228 syncer_delayno = 0; 2229 next = &syncer_workitem_pending[syncer_delayno]; 2230 /* 2231 * If the worklist has wrapped since the 2232 * it was emptied of all but syncer vnodes, 2233 * switch to the FINAL_DELAY state and run 2234 * for one more second. 2235 */ 2236 if (syncer_state == SYNCER_SHUTTING_DOWN && 2237 net_worklist_len == 0 && 2238 last_work_seen == syncer_delayno) { 2239 syncer_state = SYNCER_FINAL_DELAY; 2240 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2241 } 2242 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2243 syncer_worklist_len > 0); 2244 2245 /* 2246 * Keep track of the last time there was anything 2247 * on the worklist other than syncer vnodes. 2248 * Return to the SHUTTING_DOWN state if any 2249 * new work appears. 2250 */ 2251 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2252 last_work_seen = syncer_delayno; 2253 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2254 syncer_state = SYNCER_SHUTTING_DOWN; 2255 while (!LIST_EMPTY(slp)) { 2256 error = sync_vnode(slp, &bo, td); 2257 if (error == 1) { 2258 LIST_REMOVE(bo, bo_synclist); 2259 LIST_INSERT_HEAD(next, bo, bo_synclist); 2260 continue; 2261 } 2262 2263 if (first_printf == 0) { 2264 /* 2265 * Drop the sync mutex, because some watchdog 2266 * drivers need to sleep while patting 2267 */ 2268 mtx_unlock(&sync_mtx); 2269 wdog_kern_pat(WD_LASTVAL); 2270 mtx_lock(&sync_mtx); 2271 } 2272 2273 } 2274 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2275 syncer_final_iter--; 2276 /* 2277 * The variable rushjob allows the kernel to speed up the 2278 * processing of the filesystem syncer process. A rushjob 2279 * value of N tells the filesystem syncer to process the next 2280 * N seconds worth of work on its queue ASAP. Currently rushjob 2281 * is used by the soft update code to speed up the filesystem 2282 * syncer process when the incore state is getting so far 2283 * ahead of the disk that the kernel memory pool is being 2284 * threatened with exhaustion. 2285 */ 2286 if (rushjob > 0) { 2287 rushjob -= 1; 2288 continue; 2289 } 2290 /* 2291 * Just sleep for a short period of time between 2292 * iterations when shutting down to allow some I/O 2293 * to happen. 2294 * 2295 * If it has taken us less than a second to process the 2296 * current work, then wait. Otherwise start right over 2297 * again. We can still lose time if any single round 2298 * takes more than two seconds, but it does not really 2299 * matter as we are just trying to generally pace the 2300 * filesystem activity. 2301 */ 2302 if (syncer_state != SYNCER_RUNNING || 2303 time_uptime == starttime) { 2304 thread_lock(td); 2305 sched_prio(td, PPAUSE); 2306 thread_unlock(td); 2307 } 2308 if (syncer_state != SYNCER_RUNNING) 2309 cv_timedwait(&sync_wakeup, &sync_mtx, 2310 hz / SYNCER_SHUTDOWN_SPEEDUP); 2311 else if (time_uptime == starttime) 2312 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2313 } 2314 } 2315 2316 /* 2317 * Request the syncer daemon to speed up its work. 2318 * We never push it to speed up more than half of its 2319 * normal turn time, otherwise it could take over the cpu. 2320 */ 2321 int 2322 speedup_syncer(void) 2323 { 2324 int ret = 0; 2325 2326 mtx_lock(&sync_mtx); 2327 if (rushjob < syncdelay / 2) { 2328 rushjob += 1; 2329 stat_rush_requests += 1; 2330 ret = 1; 2331 } 2332 mtx_unlock(&sync_mtx); 2333 cv_broadcast(&sync_wakeup); 2334 return (ret); 2335 } 2336 2337 /* 2338 * Tell the syncer to speed up its work and run though its work 2339 * list several times, then tell it to shut down. 2340 */ 2341 static void 2342 syncer_shutdown(void *arg, int howto) 2343 { 2344 2345 if (howto & RB_NOSYNC) 2346 return; 2347 mtx_lock(&sync_mtx); 2348 syncer_state = SYNCER_SHUTTING_DOWN; 2349 rushjob = 0; 2350 mtx_unlock(&sync_mtx); 2351 cv_broadcast(&sync_wakeup); 2352 kproc_shutdown(arg, howto); 2353 } 2354 2355 void 2356 syncer_suspend(void) 2357 { 2358 2359 syncer_shutdown(updateproc, 0); 2360 } 2361 2362 void 2363 syncer_resume(void) 2364 { 2365 2366 mtx_lock(&sync_mtx); 2367 first_printf = 1; 2368 syncer_state = SYNCER_RUNNING; 2369 mtx_unlock(&sync_mtx); 2370 cv_broadcast(&sync_wakeup); 2371 kproc_resume(updateproc); 2372 } 2373 2374 /* 2375 * Reassign a buffer from one vnode to another. 2376 * Used to assign file specific control information 2377 * (indirect blocks) to the vnode to which they belong. 2378 */ 2379 void 2380 reassignbuf(struct buf *bp) 2381 { 2382 struct vnode *vp; 2383 struct bufobj *bo; 2384 int delay; 2385 #ifdef INVARIANTS 2386 struct bufv *bv; 2387 #endif 2388 2389 vp = bp->b_vp; 2390 bo = bp->b_bufobj; 2391 ++reassignbufcalls; 2392 2393 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2394 bp, bp->b_vp, bp->b_flags); 2395 /* 2396 * B_PAGING flagged buffers cannot be reassigned because their vp 2397 * is not fully linked in. 2398 */ 2399 if (bp->b_flags & B_PAGING) 2400 panic("cannot reassign paging buffer"); 2401 2402 /* 2403 * Delete from old vnode list, if on one. 2404 */ 2405 BO_LOCK(bo); 2406 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2407 buf_vlist_remove(bp); 2408 else 2409 panic("reassignbuf: Buffer %p not on queue.", bp); 2410 /* 2411 * If dirty, put on list of dirty buffers; otherwise insert onto list 2412 * of clean buffers. 2413 */ 2414 if (bp->b_flags & B_DELWRI) { 2415 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2416 switch (vp->v_type) { 2417 case VDIR: 2418 delay = dirdelay; 2419 break; 2420 case VCHR: 2421 delay = metadelay; 2422 break; 2423 default: 2424 delay = filedelay; 2425 } 2426 vn_syncer_add_to_worklist(bo, delay); 2427 } 2428 buf_vlist_add(bp, bo, BX_VNDIRTY); 2429 } else { 2430 buf_vlist_add(bp, bo, BX_VNCLEAN); 2431 2432 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2433 mtx_lock(&sync_mtx); 2434 LIST_REMOVE(bo, bo_synclist); 2435 syncer_worklist_len--; 2436 mtx_unlock(&sync_mtx); 2437 bo->bo_flag &= ~BO_ONWORKLST; 2438 } 2439 } 2440 #ifdef INVARIANTS 2441 bv = &bo->bo_clean; 2442 bp = TAILQ_FIRST(&bv->bv_hd); 2443 KASSERT(bp == NULL || bp->b_bufobj == bo, 2444 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2445 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2446 KASSERT(bp == NULL || bp->b_bufobj == bo, 2447 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2448 bv = &bo->bo_dirty; 2449 bp = TAILQ_FIRST(&bv->bv_hd); 2450 KASSERT(bp == NULL || bp->b_bufobj == bo, 2451 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2452 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2453 KASSERT(bp == NULL || bp->b_bufobj == bo, 2454 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2455 #endif 2456 BO_UNLOCK(bo); 2457 } 2458 2459 static void 2460 v_init_counters(struct vnode *vp) 2461 { 2462 2463 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2464 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2465 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2466 2467 refcount_init(&vp->v_holdcnt, 1); 2468 refcount_init(&vp->v_usecount, 1); 2469 } 2470 2471 static void 2472 v_incr_usecount_locked(struct vnode *vp) 2473 { 2474 2475 ASSERT_VI_LOCKED(vp, __func__); 2476 if ((vp->v_iflag & VI_OWEINACT) != 0) { 2477 VNASSERT(vp->v_usecount == 0, vp, 2478 ("vnode with usecount and VI_OWEINACT set")); 2479 vp->v_iflag &= ~VI_OWEINACT; 2480 } 2481 refcount_acquire(&vp->v_usecount); 2482 v_incr_devcount(vp); 2483 } 2484 2485 /* 2486 * Increment the use count on the vnode, taking care to reference 2487 * the driver's usecount if this is a chardev. 2488 */ 2489 static void 2490 v_incr_usecount(struct vnode *vp) 2491 { 2492 2493 ASSERT_VI_UNLOCKED(vp, __func__); 2494 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2495 2496 if (vp->v_type != VCHR && 2497 refcount_acquire_if_not_zero(&vp->v_usecount)) { 2498 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2499 ("vnode with usecount and VI_OWEINACT set")); 2500 } else { 2501 VI_LOCK(vp); 2502 v_incr_usecount_locked(vp); 2503 VI_UNLOCK(vp); 2504 } 2505 } 2506 2507 /* 2508 * Increment si_usecount of the associated device, if any. 2509 */ 2510 static void 2511 v_incr_devcount(struct vnode *vp) 2512 { 2513 2514 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2515 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2516 dev_lock(); 2517 vp->v_rdev->si_usecount++; 2518 dev_unlock(); 2519 } 2520 } 2521 2522 /* 2523 * Decrement si_usecount of the associated device, if any. 2524 */ 2525 static void 2526 v_decr_devcount(struct vnode *vp) 2527 { 2528 2529 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2530 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2531 dev_lock(); 2532 vp->v_rdev->si_usecount--; 2533 dev_unlock(); 2534 } 2535 } 2536 2537 /* 2538 * Grab a particular vnode from the free list, increment its 2539 * reference count and lock it. VI_DOOMED is set if the vnode 2540 * is being destroyed. Only callers who specify LK_RETRY will 2541 * see doomed vnodes. If inactive processing was delayed in 2542 * vput try to do it here. 2543 * 2544 * Notes on lockless counter manipulation: 2545 * _vhold, vputx and other routines make various decisions based 2546 * on either holdcnt or usecount being 0. As long as either counter 2547 * is not transitioning 0->1 nor 1->0, the manipulation can be done 2548 * with atomic operations. Otherwise the interlock is taken covering 2549 * both the atomic and additional actions. 2550 */ 2551 int 2552 vget(struct vnode *vp, int flags, struct thread *td) 2553 { 2554 int error, oweinact; 2555 2556 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 2557 ("vget: invalid lock operation")); 2558 2559 if ((flags & LK_INTERLOCK) != 0) 2560 ASSERT_VI_LOCKED(vp, __func__); 2561 else 2562 ASSERT_VI_UNLOCKED(vp, __func__); 2563 if ((flags & LK_VNHELD) != 0) 2564 VNASSERT((vp->v_holdcnt > 0), vp, 2565 ("vget: LK_VNHELD passed but vnode not held")); 2566 2567 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2568 2569 if ((flags & LK_VNHELD) == 0) 2570 _vhold(vp, (flags & LK_INTERLOCK) != 0); 2571 2572 if ((error = vn_lock(vp, flags)) != 0) { 2573 vdrop(vp); 2574 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2575 vp); 2576 return (error); 2577 } 2578 if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0) 2579 panic("vget: vn_lock failed to return ENOENT\n"); 2580 /* 2581 * We don't guarantee that any particular close will 2582 * trigger inactive processing so just make a best effort 2583 * here at preventing a reference to a removed file. If 2584 * we don't succeed no harm is done. 2585 * 2586 * Upgrade our holdcnt to a usecount. 2587 */ 2588 if (vp->v_type == VCHR || 2589 !refcount_acquire_if_not_zero(&vp->v_usecount)) { 2590 VI_LOCK(vp); 2591 if ((vp->v_iflag & VI_OWEINACT) == 0) { 2592 oweinact = 0; 2593 } else { 2594 oweinact = 1; 2595 vp->v_iflag &= ~VI_OWEINACT; 2596 } 2597 refcount_acquire(&vp->v_usecount); 2598 v_incr_devcount(vp); 2599 if (oweinact && VOP_ISLOCKED(vp) == LK_EXCLUSIVE && 2600 (flags & LK_NOWAIT) == 0) 2601 vinactive(vp, td); 2602 VI_UNLOCK(vp); 2603 } 2604 return (0); 2605 } 2606 2607 /* 2608 * Increase the reference (use) and hold count of a vnode. 2609 * This will also remove the vnode from the free list if it is presently free. 2610 */ 2611 void 2612 vref(struct vnode *vp) 2613 { 2614 2615 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2616 _vhold(vp, false); 2617 v_incr_usecount(vp); 2618 } 2619 2620 void 2621 vrefl(struct vnode *vp) 2622 { 2623 2624 ASSERT_VI_LOCKED(vp, __func__); 2625 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2626 _vhold(vp, true); 2627 v_incr_usecount_locked(vp); 2628 } 2629 2630 void 2631 vrefact(struct vnode *vp) 2632 { 2633 2634 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2635 if (__predict_false(vp->v_type == VCHR)) { 2636 VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp, 2637 ("%s: wrong ref counts", __func__)); 2638 vref(vp); 2639 return; 2640 } 2641 #ifdef INVARIANTS 2642 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 2643 VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__)); 2644 old = atomic_fetchadd_int(&vp->v_usecount, 1); 2645 VNASSERT(old > 0, vp, ("%s: wrong use count", __func__)); 2646 #else 2647 refcount_acquire(&vp->v_holdcnt); 2648 refcount_acquire(&vp->v_usecount); 2649 #endif 2650 } 2651 2652 /* 2653 * Return reference count of a vnode. 2654 * 2655 * The results of this call are only guaranteed when some mechanism is used to 2656 * stop other processes from gaining references to the vnode. This may be the 2657 * case if the caller holds the only reference. This is also useful when stale 2658 * data is acceptable as race conditions may be accounted for by some other 2659 * means. 2660 */ 2661 int 2662 vrefcnt(struct vnode *vp) 2663 { 2664 2665 return (vp->v_usecount); 2666 } 2667 2668 #define VPUTX_VRELE 1 2669 #define VPUTX_VPUT 2 2670 #define VPUTX_VUNREF 3 2671 2672 /* 2673 * Decrement the use and hold counts for a vnode. 2674 * 2675 * See an explanation near vget() as to why atomic operation is safe. 2676 */ 2677 static void 2678 vputx(struct vnode *vp, int func) 2679 { 2680 int error; 2681 2682 KASSERT(vp != NULL, ("vputx: null vp")); 2683 if (func == VPUTX_VUNREF) 2684 ASSERT_VOP_LOCKED(vp, "vunref"); 2685 else if (func == VPUTX_VPUT) 2686 ASSERT_VOP_LOCKED(vp, "vput"); 2687 else 2688 KASSERT(func == VPUTX_VRELE, ("vputx: wrong func")); 2689 ASSERT_VI_UNLOCKED(vp, __func__); 2690 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2691 2692 if (vp->v_type != VCHR && 2693 refcount_release_if_not_last(&vp->v_usecount)) { 2694 if (func == VPUTX_VPUT) 2695 VOP_UNLOCK(vp, 0); 2696 vdrop(vp); 2697 return; 2698 } 2699 2700 VI_LOCK(vp); 2701 2702 /* 2703 * We want to hold the vnode until the inactive finishes to 2704 * prevent vgone() races. We drop the use count here and the 2705 * hold count below when we're done. 2706 */ 2707 if (!refcount_release(&vp->v_usecount) || 2708 (vp->v_iflag & VI_DOINGINACT)) { 2709 if (func == VPUTX_VPUT) 2710 VOP_UNLOCK(vp, 0); 2711 v_decr_devcount(vp); 2712 vdropl(vp); 2713 return; 2714 } 2715 2716 v_decr_devcount(vp); 2717 2718 error = 0; 2719 2720 if (vp->v_usecount != 0) { 2721 vn_printf(vp, "vputx: usecount not zero for vnode "); 2722 panic("vputx: usecount not zero"); 2723 } 2724 2725 CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp); 2726 2727 /* 2728 * We must call VOP_INACTIVE with the node locked. Mark 2729 * as VI_DOINGINACT to avoid recursion. 2730 */ 2731 vp->v_iflag |= VI_OWEINACT; 2732 switch (func) { 2733 case VPUTX_VRELE: 2734 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 2735 VI_LOCK(vp); 2736 break; 2737 case VPUTX_VPUT: 2738 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 2739 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 2740 LK_NOWAIT); 2741 VI_LOCK(vp); 2742 } 2743 break; 2744 case VPUTX_VUNREF: 2745 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 2746 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 2747 VI_LOCK(vp); 2748 } 2749 break; 2750 } 2751 VNASSERT(vp->v_usecount == 0 || (vp->v_iflag & VI_OWEINACT) == 0, vp, 2752 ("vnode with usecount and VI_OWEINACT set")); 2753 if (error == 0) { 2754 if (vp->v_iflag & VI_OWEINACT) 2755 vinactive(vp, curthread); 2756 if (func != VPUTX_VUNREF) 2757 VOP_UNLOCK(vp, 0); 2758 } 2759 vdropl(vp); 2760 } 2761 2762 /* 2763 * Vnode put/release. 2764 * If count drops to zero, call inactive routine and return to freelist. 2765 */ 2766 void 2767 vrele(struct vnode *vp) 2768 { 2769 2770 vputx(vp, VPUTX_VRELE); 2771 } 2772 2773 /* 2774 * Release an already locked vnode. This give the same effects as 2775 * unlock+vrele(), but takes less time and avoids releasing and 2776 * re-aquiring the lock (as vrele() acquires the lock internally.) 2777 */ 2778 void 2779 vput(struct vnode *vp) 2780 { 2781 2782 vputx(vp, VPUTX_VPUT); 2783 } 2784 2785 /* 2786 * Release an exclusively locked vnode. Do not unlock the vnode lock. 2787 */ 2788 void 2789 vunref(struct vnode *vp) 2790 { 2791 2792 vputx(vp, VPUTX_VUNREF); 2793 } 2794 2795 /* 2796 * Increase the hold count and activate if this is the first reference. 2797 */ 2798 void 2799 _vhold(struct vnode *vp, bool locked) 2800 { 2801 struct mount *mp; 2802 2803 if (locked) 2804 ASSERT_VI_LOCKED(vp, __func__); 2805 else 2806 ASSERT_VI_UNLOCKED(vp, __func__); 2807 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2808 if (!locked) { 2809 if (refcount_acquire_if_not_zero(&vp->v_holdcnt)) { 2810 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 2811 ("_vhold: vnode with holdcnt is free")); 2812 return; 2813 } 2814 VI_LOCK(vp); 2815 } 2816 if ((vp->v_iflag & VI_FREE) == 0) { 2817 refcount_acquire(&vp->v_holdcnt); 2818 if (!locked) 2819 VI_UNLOCK(vp); 2820 return; 2821 } 2822 VNASSERT(vp->v_holdcnt == 0, vp, 2823 ("%s: wrong hold count", __func__)); 2824 VNASSERT(vp->v_op != NULL, vp, 2825 ("%s: vnode already reclaimed.", __func__)); 2826 /* 2827 * Remove a vnode from the free list, mark it as in use, 2828 * and put it on the active list. 2829 */ 2830 VNASSERT(vp->v_mount != NULL, vp, 2831 ("_vhold: vnode not on per mount vnode list")); 2832 mp = vp->v_mount; 2833 mtx_lock(&mp->mnt_listmtx); 2834 if ((vp->v_mflag & VMP_TMPMNTFREELIST) != 0) { 2835 TAILQ_REMOVE(&mp->mnt_tmpfreevnodelist, vp, v_actfreelist); 2836 mp->mnt_tmpfreevnodelistsize--; 2837 vp->v_mflag &= ~VMP_TMPMNTFREELIST; 2838 } else { 2839 mtx_lock(&vnode_free_list_mtx); 2840 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 2841 freevnodes--; 2842 mtx_unlock(&vnode_free_list_mtx); 2843 } 2844 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 2845 ("Activating already active vnode")); 2846 vp->v_iflag &= ~VI_FREE; 2847 vp->v_iflag |= VI_ACTIVE; 2848 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 2849 mp->mnt_activevnodelistsize++; 2850 mtx_unlock(&mp->mnt_listmtx); 2851 refcount_acquire(&vp->v_holdcnt); 2852 if (!locked) 2853 VI_UNLOCK(vp); 2854 } 2855 2856 /* 2857 * Drop the hold count of the vnode. If this is the last reference to 2858 * the vnode we place it on the free list unless it has been vgone'd 2859 * (marked VI_DOOMED) in which case we will free it. 2860 * 2861 * Because the vnode vm object keeps a hold reference on the vnode if 2862 * there is at least one resident non-cached page, the vnode cannot 2863 * leave the active list without the page cleanup done. 2864 */ 2865 void 2866 _vdrop(struct vnode *vp, bool locked) 2867 { 2868 struct bufobj *bo; 2869 struct mount *mp; 2870 int active; 2871 2872 if (locked) 2873 ASSERT_VI_LOCKED(vp, __func__); 2874 else 2875 ASSERT_VI_UNLOCKED(vp, __func__); 2876 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2877 if ((int)vp->v_holdcnt <= 0) 2878 panic("vdrop: holdcnt %d", vp->v_holdcnt); 2879 if (!locked) { 2880 if (refcount_release_if_not_last(&vp->v_holdcnt)) 2881 return; 2882 VI_LOCK(vp); 2883 } 2884 if (refcount_release(&vp->v_holdcnt) == 0) { 2885 VI_UNLOCK(vp); 2886 return; 2887 } 2888 if ((vp->v_iflag & VI_DOOMED) == 0) { 2889 /* 2890 * Mark a vnode as free: remove it from its active list 2891 * and put it up for recycling on the freelist. 2892 */ 2893 VNASSERT(vp->v_op != NULL, vp, 2894 ("vdropl: vnode already reclaimed.")); 2895 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 2896 ("vnode already free")); 2897 VNASSERT(vp->v_holdcnt == 0, vp, 2898 ("vdropl: freeing when we shouldn't")); 2899 active = vp->v_iflag & VI_ACTIVE; 2900 if ((vp->v_iflag & VI_OWEINACT) == 0) { 2901 vp->v_iflag &= ~VI_ACTIVE; 2902 mp = vp->v_mount; 2903 if (mp != NULL) { 2904 mtx_lock(&mp->mnt_listmtx); 2905 if (active) { 2906 TAILQ_REMOVE(&mp->mnt_activevnodelist, 2907 vp, v_actfreelist); 2908 mp->mnt_activevnodelistsize--; 2909 } 2910 TAILQ_INSERT_TAIL(&mp->mnt_tmpfreevnodelist, 2911 vp, v_actfreelist); 2912 mp->mnt_tmpfreevnodelistsize++; 2913 vp->v_iflag |= VI_FREE; 2914 vp->v_mflag |= VMP_TMPMNTFREELIST; 2915 VI_UNLOCK(vp); 2916 if (mp->mnt_tmpfreevnodelistsize >= 2917 mnt_free_list_batch) 2918 vnlru_return_batch_locked(mp); 2919 mtx_unlock(&mp->mnt_listmtx); 2920 } else { 2921 VNASSERT(active == 0, vp, 2922 ("vdropl: active vnode not on per mount " 2923 "vnode list")); 2924 mtx_lock(&vnode_free_list_mtx); 2925 TAILQ_INSERT_TAIL(&vnode_free_list, vp, 2926 v_actfreelist); 2927 freevnodes++; 2928 vp->v_iflag |= VI_FREE; 2929 VI_UNLOCK(vp); 2930 mtx_unlock(&vnode_free_list_mtx); 2931 } 2932 } else { 2933 VI_UNLOCK(vp); 2934 counter_u64_add(free_owe_inact, 1); 2935 } 2936 return; 2937 } 2938 /* 2939 * The vnode has been marked for destruction, so free it. 2940 * 2941 * The vnode will be returned to the zone where it will 2942 * normally remain until it is needed for another vnode. We 2943 * need to cleanup (or verify that the cleanup has already 2944 * been done) any residual data left from its current use 2945 * so as not to contaminate the freshly allocated vnode. 2946 */ 2947 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 2948 atomic_subtract_long(&numvnodes, 1); 2949 bo = &vp->v_bufobj; 2950 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 2951 ("cleaned vnode still on the free list.")); 2952 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 2953 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); 2954 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 2955 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 2956 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 2957 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 2958 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 2959 ("clean blk trie not empty")); 2960 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 2961 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 2962 ("dirty blk trie not empty")); 2963 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 2964 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 2965 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 2966 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 2967 ("Dangling rangelock waiters")); 2968 VI_UNLOCK(vp); 2969 #ifdef MAC 2970 mac_vnode_destroy(vp); 2971 #endif 2972 if (vp->v_pollinfo != NULL) { 2973 destroy_vpollinfo(vp->v_pollinfo); 2974 vp->v_pollinfo = NULL; 2975 } 2976 #ifdef INVARIANTS 2977 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ 2978 vp->v_op = NULL; 2979 #endif 2980 vp->v_mountedhere = NULL; 2981 vp->v_unpcb = NULL; 2982 vp->v_rdev = NULL; 2983 vp->v_fifoinfo = NULL; 2984 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 2985 vp->v_iflag = 0; 2986 vp->v_vflag = 0; 2987 bo->bo_flag = 0; 2988 uma_zfree(vnode_zone, vp); 2989 } 2990 2991 /* 2992 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 2993 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 2994 * OWEINACT tracks whether a vnode missed a call to inactive due to a 2995 * failed lock upgrade. 2996 */ 2997 void 2998 vinactive(struct vnode *vp, struct thread *td) 2999 { 3000 struct vm_object *obj; 3001 3002 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3003 ASSERT_VI_LOCKED(vp, "vinactive"); 3004 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3005 ("vinactive: recursed on VI_DOINGINACT")); 3006 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3007 vp->v_iflag |= VI_DOINGINACT; 3008 vp->v_iflag &= ~VI_OWEINACT; 3009 VI_UNLOCK(vp); 3010 /* 3011 * Before moving off the active list, we must be sure that any 3012 * modified pages are converted into the vnode's dirty 3013 * buffers, since these will no longer be checked once the 3014 * vnode is on the inactive list. 3015 * 3016 * The write-out of the dirty pages is asynchronous. At the 3017 * point that VOP_INACTIVE() is called, there could still be 3018 * pending I/O and dirty pages in the object. 3019 */ 3020 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3021 (obj->flags & OBJ_MIGHTBEDIRTY) != 0) { 3022 VM_OBJECT_WLOCK(obj); 3023 vm_object_page_clean(obj, 0, 0, 0); 3024 VM_OBJECT_WUNLOCK(obj); 3025 } 3026 VOP_INACTIVE(vp, td); 3027 VI_LOCK(vp); 3028 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3029 ("vinactive: lost VI_DOINGINACT")); 3030 vp->v_iflag &= ~VI_DOINGINACT; 3031 } 3032 3033 /* 3034 * Remove any vnodes in the vnode table belonging to mount point mp. 3035 * 3036 * If FORCECLOSE is not specified, there should not be any active ones, 3037 * return error if any are found (nb: this is a user error, not a 3038 * system error). If FORCECLOSE is specified, detach any active vnodes 3039 * that are found. 3040 * 3041 * If WRITECLOSE is set, only flush out regular file vnodes open for 3042 * writing. 3043 * 3044 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3045 * 3046 * `rootrefs' specifies the base reference count for the root vnode 3047 * of this filesystem. The root vnode is considered busy if its 3048 * v_usecount exceeds this value. On a successful return, vflush(, td) 3049 * will call vrele() on the root vnode exactly rootrefs times. 3050 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3051 * be zero. 3052 */ 3053 #ifdef DIAGNOSTIC 3054 static int busyprt = 0; /* print out busy vnodes */ 3055 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3056 #endif 3057 3058 int 3059 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3060 { 3061 struct vnode *vp, *mvp, *rootvp = NULL; 3062 struct vattr vattr; 3063 int busy = 0, error; 3064 3065 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3066 rootrefs, flags); 3067 if (rootrefs > 0) { 3068 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3069 ("vflush: bad args")); 3070 /* 3071 * Get the filesystem root vnode. We can vput() it 3072 * immediately, since with rootrefs > 0, it won't go away. 3073 */ 3074 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3075 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3076 __func__, error); 3077 return (error); 3078 } 3079 vput(rootvp); 3080 } 3081 loop: 3082 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3083 vholdl(vp); 3084 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3085 if (error) { 3086 vdrop(vp); 3087 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3088 goto loop; 3089 } 3090 /* 3091 * Skip over a vnodes marked VV_SYSTEM. 3092 */ 3093 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3094 VOP_UNLOCK(vp, 0); 3095 vdrop(vp); 3096 continue; 3097 } 3098 /* 3099 * If WRITECLOSE is set, flush out unlinked but still open 3100 * files (even if open only for reading) and regular file 3101 * vnodes open for writing. 3102 */ 3103 if (flags & WRITECLOSE) { 3104 if (vp->v_object != NULL) { 3105 VM_OBJECT_WLOCK(vp->v_object); 3106 vm_object_page_clean(vp->v_object, 0, 0, 0); 3107 VM_OBJECT_WUNLOCK(vp->v_object); 3108 } 3109 error = VOP_FSYNC(vp, MNT_WAIT, td); 3110 if (error != 0) { 3111 VOP_UNLOCK(vp, 0); 3112 vdrop(vp); 3113 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3114 return (error); 3115 } 3116 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3117 VI_LOCK(vp); 3118 3119 if ((vp->v_type == VNON || 3120 (error == 0 && vattr.va_nlink > 0)) && 3121 (vp->v_writecount == 0 || vp->v_type != VREG)) { 3122 VOP_UNLOCK(vp, 0); 3123 vdropl(vp); 3124 continue; 3125 } 3126 } else 3127 VI_LOCK(vp); 3128 /* 3129 * With v_usecount == 0, all we need to do is clear out the 3130 * vnode data structures and we are done. 3131 * 3132 * If FORCECLOSE is set, forcibly close the vnode. 3133 */ 3134 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3135 vgonel(vp); 3136 } else { 3137 busy++; 3138 #ifdef DIAGNOSTIC 3139 if (busyprt) 3140 vn_printf(vp, "vflush: busy vnode "); 3141 #endif 3142 } 3143 VOP_UNLOCK(vp, 0); 3144 vdropl(vp); 3145 } 3146 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3147 /* 3148 * If just the root vnode is busy, and if its refcount 3149 * is equal to `rootrefs', then go ahead and kill it. 3150 */ 3151 VI_LOCK(rootvp); 3152 KASSERT(busy > 0, ("vflush: not busy")); 3153 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3154 ("vflush: usecount %d < rootrefs %d", 3155 rootvp->v_usecount, rootrefs)); 3156 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3157 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3158 vgone(rootvp); 3159 VOP_UNLOCK(rootvp, 0); 3160 busy = 0; 3161 } else 3162 VI_UNLOCK(rootvp); 3163 } 3164 if (busy) { 3165 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3166 busy); 3167 return (EBUSY); 3168 } 3169 for (; rootrefs > 0; rootrefs--) 3170 vrele(rootvp); 3171 return (0); 3172 } 3173 3174 /* 3175 * Recycle an unused vnode to the front of the free list. 3176 */ 3177 int 3178 vrecycle(struct vnode *vp) 3179 { 3180 int recycled; 3181 3182 VI_LOCK(vp); 3183 recycled = vrecyclel(vp); 3184 VI_UNLOCK(vp); 3185 return (recycled); 3186 } 3187 3188 /* 3189 * vrecycle, with the vp interlock held. 3190 */ 3191 int 3192 vrecyclel(struct vnode *vp) 3193 { 3194 int recycled; 3195 3196 ASSERT_VOP_ELOCKED(vp, __func__); 3197 ASSERT_VI_LOCKED(vp, __func__); 3198 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3199 recycled = 0; 3200 if (vp->v_usecount == 0) { 3201 recycled = 1; 3202 vgonel(vp); 3203 } 3204 return (recycled); 3205 } 3206 3207 /* 3208 * Eliminate all activity associated with a vnode 3209 * in preparation for reuse. 3210 */ 3211 void 3212 vgone(struct vnode *vp) 3213 { 3214 VI_LOCK(vp); 3215 vgonel(vp); 3216 VI_UNLOCK(vp); 3217 } 3218 3219 static void 3220 notify_lowervp_vfs_dummy(struct mount *mp __unused, 3221 struct vnode *lowervp __unused) 3222 { 3223 } 3224 3225 /* 3226 * Notify upper mounts about reclaimed or unlinked vnode. 3227 */ 3228 void 3229 vfs_notify_upper(struct vnode *vp, int event) 3230 { 3231 static struct vfsops vgonel_vfsops = { 3232 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 3233 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 3234 }; 3235 struct mount *mp, *ump, *mmp; 3236 3237 mp = vp->v_mount; 3238 if (mp == NULL) 3239 return; 3240 3241 MNT_ILOCK(mp); 3242 if (TAILQ_EMPTY(&mp->mnt_uppers)) 3243 goto unlock; 3244 MNT_IUNLOCK(mp); 3245 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 3246 mmp->mnt_op = &vgonel_vfsops; 3247 mmp->mnt_kern_flag |= MNTK_MARKER; 3248 MNT_ILOCK(mp); 3249 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 3250 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 3251 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 3252 ump = TAILQ_NEXT(ump, mnt_upper_link); 3253 continue; 3254 } 3255 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 3256 MNT_IUNLOCK(mp); 3257 switch (event) { 3258 case VFS_NOTIFY_UPPER_RECLAIM: 3259 VFS_RECLAIM_LOWERVP(ump, vp); 3260 break; 3261 case VFS_NOTIFY_UPPER_UNLINK: 3262 VFS_UNLINK_LOWERVP(ump, vp); 3263 break; 3264 default: 3265 KASSERT(0, ("invalid event %d", event)); 3266 break; 3267 } 3268 MNT_ILOCK(mp); 3269 ump = TAILQ_NEXT(mmp, mnt_upper_link); 3270 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 3271 } 3272 free(mmp, M_TEMP); 3273 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 3274 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 3275 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 3276 wakeup(&mp->mnt_uppers); 3277 } 3278 unlock: 3279 MNT_IUNLOCK(mp); 3280 } 3281 3282 /* 3283 * vgone, with the vp interlock held. 3284 */ 3285 static void 3286 vgonel(struct vnode *vp) 3287 { 3288 struct thread *td; 3289 int oweinact; 3290 int active; 3291 struct mount *mp; 3292 3293 ASSERT_VOP_ELOCKED(vp, "vgonel"); 3294 ASSERT_VI_LOCKED(vp, "vgonel"); 3295 VNASSERT(vp->v_holdcnt, vp, 3296 ("vgonel: vp %p has no reference.", vp)); 3297 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3298 td = curthread; 3299 3300 /* 3301 * Don't vgonel if we're already doomed. 3302 */ 3303 if (vp->v_iflag & VI_DOOMED) 3304 return; 3305 vp->v_iflag |= VI_DOOMED; 3306 3307 /* 3308 * Check to see if the vnode is in use. If so, we have to call 3309 * VOP_CLOSE() and VOP_INACTIVE(). 3310 */ 3311 active = vp->v_usecount; 3312 oweinact = (vp->v_iflag & VI_OWEINACT); 3313 VI_UNLOCK(vp); 3314 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 3315 3316 /* 3317 * If purging an active vnode, it must be closed and 3318 * deactivated before being reclaimed. 3319 */ 3320 if (active) 3321 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 3322 if (oweinact || active) { 3323 VI_LOCK(vp); 3324 if ((vp->v_iflag & VI_DOINGINACT) == 0) 3325 vinactive(vp, td); 3326 VI_UNLOCK(vp); 3327 } 3328 if (vp->v_type == VSOCK) 3329 vfs_unp_reclaim(vp); 3330 3331 /* 3332 * Clean out any buffers associated with the vnode. 3333 * If the flush fails, just toss the buffers. 3334 */ 3335 mp = NULL; 3336 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 3337 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 3338 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 3339 while (vinvalbuf(vp, 0, 0, 0) != 0) 3340 ; 3341 } 3342 3343 BO_LOCK(&vp->v_bufobj); 3344 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 3345 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 3346 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 3347 vp->v_bufobj.bo_clean.bv_cnt == 0, 3348 ("vp %p bufobj not invalidated", vp)); 3349 3350 /* 3351 * For VMIO bufobj, BO_DEAD is set in vm_object_terminate() 3352 * after the object's page queue is flushed. 3353 */ 3354 if (vp->v_bufobj.bo_object == NULL) 3355 vp->v_bufobj.bo_flag |= BO_DEAD; 3356 BO_UNLOCK(&vp->v_bufobj); 3357 3358 /* 3359 * Reclaim the vnode. 3360 */ 3361 if (VOP_RECLAIM(vp, td)) 3362 panic("vgone: cannot reclaim"); 3363 if (mp != NULL) 3364 vn_finished_secondary_write(mp); 3365 VNASSERT(vp->v_object == NULL, vp, 3366 ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag)); 3367 /* 3368 * Clear the advisory locks and wake up waiting threads. 3369 */ 3370 (void)VOP_ADVLOCKPURGE(vp); 3371 vp->v_lockf = NULL; 3372 /* 3373 * Delete from old mount point vnode list. 3374 */ 3375 delmntque(vp); 3376 cache_purge(vp); 3377 /* 3378 * Done with purge, reset to the standard lock and invalidate 3379 * the vnode. 3380 */ 3381 VI_LOCK(vp); 3382 vp->v_vnlock = &vp->v_lock; 3383 vp->v_op = &dead_vnodeops; 3384 vp->v_tag = "none"; 3385 vp->v_type = VBAD; 3386 } 3387 3388 /* 3389 * Calculate the total number of references to a special device. 3390 */ 3391 int 3392 vcount(struct vnode *vp) 3393 { 3394 int count; 3395 3396 dev_lock(); 3397 count = vp->v_rdev->si_usecount; 3398 dev_unlock(); 3399 return (count); 3400 } 3401 3402 /* 3403 * Same as above, but using the struct cdev *as argument 3404 */ 3405 int 3406 count_dev(struct cdev *dev) 3407 { 3408 int count; 3409 3410 dev_lock(); 3411 count = dev->si_usecount; 3412 dev_unlock(); 3413 return(count); 3414 } 3415 3416 /* 3417 * Print out a description of a vnode. 3418 */ 3419 static char *typename[] = 3420 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 3421 "VMARKER"}; 3422 3423 void 3424 vn_printf(struct vnode *vp, const char *fmt, ...) 3425 { 3426 va_list ap; 3427 char buf[256], buf2[16]; 3428 u_long flags; 3429 3430 va_start(ap, fmt); 3431 vprintf(fmt, ap); 3432 va_end(ap); 3433 printf("%p: ", (void *)vp); 3434 printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]); 3435 printf(" usecount %d, writecount %d, refcount %d", 3436 vp->v_usecount, vp->v_writecount, vp->v_holdcnt); 3437 switch (vp->v_type) { 3438 case VDIR: 3439 printf(" mountedhere %p\n", vp->v_mountedhere); 3440 break; 3441 case VCHR: 3442 printf(" rdev %p\n", vp->v_rdev); 3443 break; 3444 case VSOCK: 3445 printf(" socket %p\n", vp->v_unpcb); 3446 break; 3447 case VFIFO: 3448 printf(" fifoinfo %p\n", vp->v_fifoinfo); 3449 break; 3450 default: 3451 printf("\n"); 3452 break; 3453 } 3454 buf[0] = '\0'; 3455 buf[1] = '\0'; 3456 if (vp->v_vflag & VV_ROOT) 3457 strlcat(buf, "|VV_ROOT", sizeof(buf)); 3458 if (vp->v_vflag & VV_ISTTY) 3459 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 3460 if (vp->v_vflag & VV_NOSYNC) 3461 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 3462 if (vp->v_vflag & VV_ETERNALDEV) 3463 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 3464 if (vp->v_vflag & VV_CACHEDLABEL) 3465 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 3466 if (vp->v_vflag & VV_TEXT) 3467 strlcat(buf, "|VV_TEXT", sizeof(buf)); 3468 if (vp->v_vflag & VV_COPYONWRITE) 3469 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 3470 if (vp->v_vflag & VV_SYSTEM) 3471 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 3472 if (vp->v_vflag & VV_PROCDEP) 3473 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 3474 if (vp->v_vflag & VV_NOKNOTE) 3475 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 3476 if (vp->v_vflag & VV_DELETED) 3477 strlcat(buf, "|VV_DELETED", sizeof(buf)); 3478 if (vp->v_vflag & VV_MD) 3479 strlcat(buf, "|VV_MD", sizeof(buf)); 3480 if (vp->v_vflag & VV_FORCEINSMQ) 3481 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 3482 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 3483 VV_CACHEDLABEL | VV_TEXT | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 3484 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); 3485 if (flags != 0) { 3486 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 3487 strlcat(buf, buf2, sizeof(buf)); 3488 } 3489 if (vp->v_iflag & VI_MOUNT) 3490 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 3491 if (vp->v_iflag & VI_DOOMED) 3492 strlcat(buf, "|VI_DOOMED", sizeof(buf)); 3493 if (vp->v_iflag & VI_FREE) 3494 strlcat(buf, "|VI_FREE", sizeof(buf)); 3495 if (vp->v_iflag & VI_ACTIVE) 3496 strlcat(buf, "|VI_ACTIVE", sizeof(buf)); 3497 if (vp->v_iflag & VI_DOINGINACT) 3498 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 3499 if (vp->v_iflag & VI_OWEINACT) 3500 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 3501 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOOMED | VI_FREE | 3502 VI_ACTIVE | VI_DOINGINACT | VI_OWEINACT); 3503 if (flags != 0) { 3504 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 3505 strlcat(buf, buf2, sizeof(buf)); 3506 } 3507 printf(" flags (%s)\n", buf + 1); 3508 if (mtx_owned(VI_MTX(vp))) 3509 printf(" VI_LOCKed"); 3510 if (vp->v_object != NULL) 3511 printf(" v_object %p ref %d pages %d " 3512 "cleanbuf %d dirtybuf %d\n", 3513 vp->v_object, vp->v_object->ref_count, 3514 vp->v_object->resident_page_count, 3515 vp->v_bufobj.bo_clean.bv_cnt, 3516 vp->v_bufobj.bo_dirty.bv_cnt); 3517 printf(" "); 3518 lockmgr_printinfo(vp->v_vnlock); 3519 if (vp->v_data != NULL) 3520 VOP_PRINT(vp); 3521 } 3522 3523 #ifdef DDB 3524 /* 3525 * List all of the locked vnodes in the system. 3526 * Called when debugging the kernel. 3527 */ 3528 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 3529 { 3530 struct mount *mp; 3531 struct vnode *vp; 3532 3533 /* 3534 * Note: because this is DDB, we can't obey the locking semantics 3535 * for these structures, which means we could catch an inconsistent 3536 * state and dereference a nasty pointer. Not much to be done 3537 * about that. 3538 */ 3539 db_printf("Locked vnodes\n"); 3540 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3541 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3542 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 3543 vn_printf(vp, "vnode "); 3544 } 3545 } 3546 } 3547 3548 /* 3549 * Show details about the given vnode. 3550 */ 3551 DB_SHOW_COMMAND(vnode, db_show_vnode) 3552 { 3553 struct vnode *vp; 3554 3555 if (!have_addr) 3556 return; 3557 vp = (struct vnode *)addr; 3558 vn_printf(vp, "vnode "); 3559 } 3560 3561 /* 3562 * Show details about the given mount point. 3563 */ 3564 DB_SHOW_COMMAND(mount, db_show_mount) 3565 { 3566 struct mount *mp; 3567 struct vfsopt *opt; 3568 struct statfs *sp; 3569 struct vnode *vp; 3570 char buf[512]; 3571 uint64_t mflags; 3572 u_int flags; 3573 3574 if (!have_addr) { 3575 /* No address given, print short info about all mount points. */ 3576 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3577 db_printf("%p %s on %s (%s)\n", mp, 3578 mp->mnt_stat.f_mntfromname, 3579 mp->mnt_stat.f_mntonname, 3580 mp->mnt_stat.f_fstypename); 3581 if (db_pager_quit) 3582 break; 3583 } 3584 db_printf("\nMore info: show mount <addr>\n"); 3585 return; 3586 } 3587 3588 mp = (struct mount *)addr; 3589 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 3590 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 3591 3592 buf[0] = '\0'; 3593 mflags = mp->mnt_flag; 3594 #define MNT_FLAG(flag) do { \ 3595 if (mflags & (flag)) { \ 3596 if (buf[0] != '\0') \ 3597 strlcat(buf, ", ", sizeof(buf)); \ 3598 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 3599 mflags &= ~(flag); \ 3600 } \ 3601 } while (0) 3602 MNT_FLAG(MNT_RDONLY); 3603 MNT_FLAG(MNT_SYNCHRONOUS); 3604 MNT_FLAG(MNT_NOEXEC); 3605 MNT_FLAG(MNT_NOSUID); 3606 MNT_FLAG(MNT_NFS4ACLS); 3607 MNT_FLAG(MNT_UNION); 3608 MNT_FLAG(MNT_ASYNC); 3609 MNT_FLAG(MNT_SUIDDIR); 3610 MNT_FLAG(MNT_SOFTDEP); 3611 MNT_FLAG(MNT_NOSYMFOLLOW); 3612 MNT_FLAG(MNT_GJOURNAL); 3613 MNT_FLAG(MNT_MULTILABEL); 3614 MNT_FLAG(MNT_ACLS); 3615 MNT_FLAG(MNT_NOATIME); 3616 MNT_FLAG(MNT_NOCLUSTERR); 3617 MNT_FLAG(MNT_NOCLUSTERW); 3618 MNT_FLAG(MNT_SUJ); 3619 MNT_FLAG(MNT_EXRDONLY); 3620 MNT_FLAG(MNT_EXPORTED); 3621 MNT_FLAG(MNT_DEFEXPORTED); 3622 MNT_FLAG(MNT_EXPORTANON); 3623 MNT_FLAG(MNT_EXKERB); 3624 MNT_FLAG(MNT_EXPUBLIC); 3625 MNT_FLAG(MNT_LOCAL); 3626 MNT_FLAG(MNT_QUOTA); 3627 MNT_FLAG(MNT_ROOTFS); 3628 MNT_FLAG(MNT_USER); 3629 MNT_FLAG(MNT_IGNORE); 3630 MNT_FLAG(MNT_UPDATE); 3631 MNT_FLAG(MNT_DELEXPORT); 3632 MNT_FLAG(MNT_RELOAD); 3633 MNT_FLAG(MNT_FORCE); 3634 MNT_FLAG(MNT_SNAPSHOT); 3635 MNT_FLAG(MNT_BYFSID); 3636 #undef MNT_FLAG 3637 if (mflags != 0) { 3638 if (buf[0] != '\0') 3639 strlcat(buf, ", ", sizeof(buf)); 3640 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3641 "0x%016jx", mflags); 3642 } 3643 db_printf(" mnt_flag = %s\n", buf); 3644 3645 buf[0] = '\0'; 3646 flags = mp->mnt_kern_flag; 3647 #define MNT_KERN_FLAG(flag) do { \ 3648 if (flags & (flag)) { \ 3649 if (buf[0] != '\0') \ 3650 strlcat(buf, ", ", sizeof(buf)); \ 3651 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 3652 flags &= ~(flag); \ 3653 } \ 3654 } while (0) 3655 MNT_KERN_FLAG(MNTK_UNMOUNTF); 3656 MNT_KERN_FLAG(MNTK_ASYNC); 3657 MNT_KERN_FLAG(MNTK_SOFTDEP); 3658 MNT_KERN_FLAG(MNTK_NOINSMNTQ); 3659 MNT_KERN_FLAG(MNTK_DRAINING); 3660 MNT_KERN_FLAG(MNTK_REFEXPIRE); 3661 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 3662 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 3663 MNT_KERN_FLAG(MNTK_NO_IOPF); 3664 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 3665 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 3666 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 3667 MNT_KERN_FLAG(MNTK_MARKER); 3668 MNT_KERN_FLAG(MNTK_USES_BCACHE); 3669 MNT_KERN_FLAG(MNTK_NOASYNC); 3670 MNT_KERN_FLAG(MNTK_UNMOUNT); 3671 MNT_KERN_FLAG(MNTK_MWAIT); 3672 MNT_KERN_FLAG(MNTK_SUSPEND); 3673 MNT_KERN_FLAG(MNTK_SUSPEND2); 3674 MNT_KERN_FLAG(MNTK_SUSPENDED); 3675 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 3676 MNT_KERN_FLAG(MNTK_NOKNOTE); 3677 #undef MNT_KERN_FLAG 3678 if (flags != 0) { 3679 if (buf[0] != '\0') 3680 strlcat(buf, ", ", sizeof(buf)); 3681 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3682 "0x%08x", flags); 3683 } 3684 db_printf(" mnt_kern_flag = %s\n", buf); 3685 3686 db_printf(" mnt_opt = "); 3687 opt = TAILQ_FIRST(mp->mnt_opt); 3688 if (opt != NULL) { 3689 db_printf("%s", opt->name); 3690 opt = TAILQ_NEXT(opt, link); 3691 while (opt != NULL) { 3692 db_printf(", %s", opt->name); 3693 opt = TAILQ_NEXT(opt, link); 3694 } 3695 } 3696 db_printf("\n"); 3697 3698 sp = &mp->mnt_stat; 3699 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 3700 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 3701 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 3702 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 3703 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 3704 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 3705 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 3706 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 3707 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 3708 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 3709 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 3710 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 3711 3712 db_printf(" mnt_cred = { uid=%u ruid=%u", 3713 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 3714 if (jailed(mp->mnt_cred)) 3715 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 3716 db_printf(" }\n"); 3717 db_printf(" mnt_ref = %d\n", mp->mnt_ref); 3718 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 3719 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 3720 db_printf(" mnt_activevnodelistsize = %d\n", 3721 mp->mnt_activevnodelistsize); 3722 db_printf(" mnt_writeopcount = %d\n", mp->mnt_writeopcount); 3723 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 3724 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 3725 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 3726 db_printf(" mnt_lockref = %d\n", mp->mnt_lockref); 3727 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 3728 db_printf(" mnt_secondary_accwrites = %d\n", 3729 mp->mnt_secondary_accwrites); 3730 db_printf(" mnt_gjprovider = %s\n", 3731 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 3732 3733 db_printf("\n\nList of active vnodes\n"); 3734 TAILQ_FOREACH(vp, &mp->mnt_activevnodelist, v_actfreelist) { 3735 if (vp->v_type != VMARKER) { 3736 vn_printf(vp, "vnode "); 3737 if (db_pager_quit) 3738 break; 3739 } 3740 } 3741 db_printf("\n\nList of inactive vnodes\n"); 3742 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3743 if (vp->v_type != VMARKER && (vp->v_iflag & VI_ACTIVE) == 0) { 3744 vn_printf(vp, "vnode "); 3745 if (db_pager_quit) 3746 break; 3747 } 3748 } 3749 } 3750 #endif /* DDB */ 3751 3752 /* 3753 * Fill in a struct xvfsconf based on a struct vfsconf. 3754 */ 3755 static int 3756 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 3757 { 3758 struct xvfsconf xvfsp; 3759 3760 bzero(&xvfsp, sizeof(xvfsp)); 3761 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 3762 xvfsp.vfc_typenum = vfsp->vfc_typenum; 3763 xvfsp.vfc_refcount = vfsp->vfc_refcount; 3764 xvfsp.vfc_flags = vfsp->vfc_flags; 3765 /* 3766 * These are unused in userland, we keep them 3767 * to not break binary compatibility. 3768 */ 3769 xvfsp.vfc_vfsops = NULL; 3770 xvfsp.vfc_next = NULL; 3771 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 3772 } 3773 3774 #ifdef COMPAT_FREEBSD32 3775 struct xvfsconf32 { 3776 uint32_t vfc_vfsops; 3777 char vfc_name[MFSNAMELEN]; 3778 int32_t vfc_typenum; 3779 int32_t vfc_refcount; 3780 int32_t vfc_flags; 3781 uint32_t vfc_next; 3782 }; 3783 3784 static int 3785 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 3786 { 3787 struct xvfsconf32 xvfsp; 3788 3789 bzero(&xvfsp, sizeof(xvfsp)); 3790 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 3791 xvfsp.vfc_typenum = vfsp->vfc_typenum; 3792 xvfsp.vfc_refcount = vfsp->vfc_refcount; 3793 xvfsp.vfc_flags = vfsp->vfc_flags; 3794 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 3795 } 3796 #endif 3797 3798 /* 3799 * Top level filesystem related information gathering. 3800 */ 3801 static int 3802 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 3803 { 3804 struct vfsconf *vfsp; 3805 int error; 3806 3807 error = 0; 3808 vfsconf_slock(); 3809 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3810 #ifdef COMPAT_FREEBSD32 3811 if (req->flags & SCTL_MASK32) 3812 error = vfsconf2x32(req, vfsp); 3813 else 3814 #endif 3815 error = vfsconf2x(req, vfsp); 3816 if (error) 3817 break; 3818 } 3819 vfsconf_sunlock(); 3820 return (error); 3821 } 3822 3823 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 3824 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 3825 "S,xvfsconf", "List of all configured filesystems"); 3826 3827 #ifndef BURN_BRIDGES 3828 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 3829 3830 static int 3831 vfs_sysctl(SYSCTL_HANDLER_ARGS) 3832 { 3833 int *name = (int *)arg1 - 1; /* XXX */ 3834 u_int namelen = arg2 + 1; /* XXX */ 3835 struct vfsconf *vfsp; 3836 3837 log(LOG_WARNING, "userland calling deprecated sysctl, " 3838 "please rebuild world\n"); 3839 3840 #if 1 || defined(COMPAT_PRELITE2) 3841 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 3842 if (namelen == 1) 3843 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 3844 #endif 3845 3846 switch (name[1]) { 3847 case VFS_MAXTYPENUM: 3848 if (namelen != 2) 3849 return (ENOTDIR); 3850 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 3851 case VFS_CONF: 3852 if (namelen != 3) 3853 return (ENOTDIR); /* overloaded */ 3854 vfsconf_slock(); 3855 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3856 if (vfsp->vfc_typenum == name[2]) 3857 break; 3858 } 3859 vfsconf_sunlock(); 3860 if (vfsp == NULL) 3861 return (EOPNOTSUPP); 3862 #ifdef COMPAT_FREEBSD32 3863 if (req->flags & SCTL_MASK32) 3864 return (vfsconf2x32(req, vfsp)); 3865 else 3866 #endif 3867 return (vfsconf2x(req, vfsp)); 3868 } 3869 return (EOPNOTSUPP); 3870 } 3871 3872 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 3873 CTLFLAG_MPSAFE, vfs_sysctl, 3874 "Generic filesystem"); 3875 3876 #if 1 || defined(COMPAT_PRELITE2) 3877 3878 static int 3879 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 3880 { 3881 int error; 3882 struct vfsconf *vfsp; 3883 struct ovfsconf ovfs; 3884 3885 vfsconf_slock(); 3886 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3887 bzero(&ovfs, sizeof(ovfs)); 3888 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 3889 strcpy(ovfs.vfc_name, vfsp->vfc_name); 3890 ovfs.vfc_index = vfsp->vfc_typenum; 3891 ovfs.vfc_refcount = vfsp->vfc_refcount; 3892 ovfs.vfc_flags = vfsp->vfc_flags; 3893 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 3894 if (error != 0) { 3895 vfsconf_sunlock(); 3896 return (error); 3897 } 3898 } 3899 vfsconf_sunlock(); 3900 return (0); 3901 } 3902 3903 #endif /* 1 || COMPAT_PRELITE2 */ 3904 #endif /* !BURN_BRIDGES */ 3905 3906 #define KINFO_VNODESLOP 10 3907 #ifdef notyet 3908 /* 3909 * Dump vnode list (via sysctl). 3910 */ 3911 /* ARGSUSED */ 3912 static int 3913 sysctl_vnode(SYSCTL_HANDLER_ARGS) 3914 { 3915 struct xvnode *xvn; 3916 struct mount *mp; 3917 struct vnode *vp; 3918 int error, len, n; 3919 3920 /* 3921 * Stale numvnodes access is not fatal here. 3922 */ 3923 req->lock = 0; 3924 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 3925 if (!req->oldptr) 3926 /* Make an estimate */ 3927 return (SYSCTL_OUT(req, 0, len)); 3928 3929 error = sysctl_wire_old_buffer(req, 0); 3930 if (error != 0) 3931 return (error); 3932 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 3933 n = 0; 3934 mtx_lock(&mountlist_mtx); 3935 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3936 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 3937 continue; 3938 MNT_ILOCK(mp); 3939 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3940 if (n == len) 3941 break; 3942 vref(vp); 3943 xvn[n].xv_size = sizeof *xvn; 3944 xvn[n].xv_vnode = vp; 3945 xvn[n].xv_id = 0; /* XXX compat */ 3946 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 3947 XV_COPY(usecount); 3948 XV_COPY(writecount); 3949 XV_COPY(holdcnt); 3950 XV_COPY(mount); 3951 XV_COPY(numoutput); 3952 XV_COPY(type); 3953 #undef XV_COPY 3954 xvn[n].xv_flag = vp->v_vflag; 3955 3956 switch (vp->v_type) { 3957 case VREG: 3958 case VDIR: 3959 case VLNK: 3960 break; 3961 case VBLK: 3962 case VCHR: 3963 if (vp->v_rdev == NULL) { 3964 vrele(vp); 3965 continue; 3966 } 3967 xvn[n].xv_dev = dev2udev(vp->v_rdev); 3968 break; 3969 case VSOCK: 3970 xvn[n].xv_socket = vp->v_socket; 3971 break; 3972 case VFIFO: 3973 xvn[n].xv_fifo = vp->v_fifoinfo; 3974 break; 3975 case VNON: 3976 case VBAD: 3977 default: 3978 /* shouldn't happen? */ 3979 vrele(vp); 3980 continue; 3981 } 3982 vrele(vp); 3983 ++n; 3984 } 3985 MNT_IUNLOCK(mp); 3986 mtx_lock(&mountlist_mtx); 3987 vfs_unbusy(mp); 3988 if (n == len) 3989 break; 3990 } 3991 mtx_unlock(&mountlist_mtx); 3992 3993 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 3994 free(xvn, M_TEMP); 3995 return (error); 3996 } 3997 3998 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 3999 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 4000 ""); 4001 #endif 4002 4003 static void 4004 unmount_or_warn(struct mount *mp) 4005 { 4006 int error; 4007 4008 error = dounmount(mp, MNT_FORCE, curthread); 4009 if (error != 0) { 4010 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4011 if (error == EBUSY) 4012 printf("BUSY)\n"); 4013 else 4014 printf("%d)\n", error); 4015 } 4016 } 4017 4018 /* 4019 * Unmount all filesystems. The list is traversed in reverse order 4020 * of mounting to avoid dependencies. 4021 */ 4022 void 4023 vfs_unmountall(void) 4024 { 4025 struct mount *mp, *tmp; 4026 4027 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4028 4029 /* 4030 * Since this only runs when rebooting, it is not interlocked. 4031 */ 4032 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4033 vfs_ref(mp); 4034 4035 /* 4036 * Forcibly unmounting "/dev" before "/" would prevent clean 4037 * unmount of the latter. 4038 */ 4039 if (mp == rootdevmp) 4040 continue; 4041 4042 unmount_or_warn(mp); 4043 } 4044 4045 if (rootdevmp != NULL) 4046 unmount_or_warn(rootdevmp); 4047 } 4048 4049 /* 4050 * perform msync on all vnodes under a mount point 4051 * the mount point must be locked. 4052 */ 4053 void 4054 vfs_msync(struct mount *mp, int flags) 4055 { 4056 struct vnode *vp, *mvp; 4057 struct vm_object *obj; 4058 4059 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4060 4061 vnlru_return_batch(mp); 4062 4063 MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { 4064 obj = vp->v_object; 4065 if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0 && 4066 (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) { 4067 if (!vget(vp, 4068 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, 4069 curthread)) { 4070 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */ 4071 vput(vp); 4072 continue; 4073 } 4074 4075 obj = vp->v_object; 4076 if (obj != NULL) { 4077 VM_OBJECT_WLOCK(obj); 4078 vm_object_page_clean(obj, 0, 0, 4079 flags == MNT_WAIT ? 4080 OBJPC_SYNC : OBJPC_NOSYNC); 4081 VM_OBJECT_WUNLOCK(obj); 4082 } 4083 vput(vp); 4084 } 4085 } else 4086 VI_UNLOCK(vp); 4087 } 4088 } 4089 4090 static void 4091 destroy_vpollinfo_free(struct vpollinfo *vi) 4092 { 4093 4094 knlist_destroy(&vi->vpi_selinfo.si_note); 4095 mtx_destroy(&vi->vpi_lock); 4096 uma_zfree(vnodepoll_zone, vi); 4097 } 4098 4099 static void 4100 destroy_vpollinfo(struct vpollinfo *vi) 4101 { 4102 4103 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4104 seldrain(&vi->vpi_selinfo); 4105 destroy_vpollinfo_free(vi); 4106 } 4107 4108 /* 4109 * Initialize per-vnode helper structure to hold poll-related state. 4110 */ 4111 void 4112 v_addpollinfo(struct vnode *vp) 4113 { 4114 struct vpollinfo *vi; 4115 4116 if (vp->v_pollinfo != NULL) 4117 return; 4118 vi = uma_zalloc(vnodepoll_zone, M_WAITOK | M_ZERO); 4119 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4120 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4121 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 4122 VI_LOCK(vp); 4123 if (vp->v_pollinfo != NULL) { 4124 VI_UNLOCK(vp); 4125 destroy_vpollinfo_free(vi); 4126 return; 4127 } 4128 vp->v_pollinfo = vi; 4129 VI_UNLOCK(vp); 4130 } 4131 4132 /* 4133 * Record a process's interest in events which might happen to 4134 * a vnode. Because poll uses the historic select-style interface 4135 * internally, this routine serves as both the ``check for any 4136 * pending events'' and the ``record my interest in future events'' 4137 * functions. (These are done together, while the lock is held, 4138 * to avoid race conditions.) 4139 */ 4140 int 4141 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4142 { 4143 4144 v_addpollinfo(vp); 4145 mtx_lock(&vp->v_pollinfo->vpi_lock); 4146 if (vp->v_pollinfo->vpi_revents & events) { 4147 /* 4148 * This leaves events we are not interested 4149 * in available for the other process which 4150 * which presumably had requested them 4151 * (otherwise they would never have been 4152 * recorded). 4153 */ 4154 events &= vp->v_pollinfo->vpi_revents; 4155 vp->v_pollinfo->vpi_revents &= ~events; 4156 4157 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4158 return (events); 4159 } 4160 vp->v_pollinfo->vpi_events |= events; 4161 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 4162 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4163 return (0); 4164 } 4165 4166 /* 4167 * Routine to create and manage a filesystem syncer vnode. 4168 */ 4169 #define sync_close ((int (*)(struct vop_close_args *))nullop) 4170 static int sync_fsync(struct vop_fsync_args *); 4171 static int sync_inactive(struct vop_inactive_args *); 4172 static int sync_reclaim(struct vop_reclaim_args *); 4173 4174 static struct vop_vector sync_vnodeops = { 4175 .vop_bypass = VOP_EOPNOTSUPP, 4176 .vop_close = sync_close, /* close */ 4177 .vop_fsync = sync_fsync, /* fsync */ 4178 .vop_inactive = sync_inactive, /* inactive */ 4179 .vop_reclaim = sync_reclaim, /* reclaim */ 4180 .vop_lock1 = vop_stdlock, /* lock */ 4181 .vop_unlock = vop_stdunlock, /* unlock */ 4182 .vop_islocked = vop_stdislocked, /* islocked */ 4183 }; 4184 4185 /* 4186 * Create a new filesystem syncer vnode for the specified mount point. 4187 */ 4188 void 4189 vfs_allocate_syncvnode(struct mount *mp) 4190 { 4191 struct vnode *vp; 4192 struct bufobj *bo; 4193 static long start, incr, next; 4194 int error; 4195 4196 /* Allocate a new vnode */ 4197 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 4198 if (error != 0) 4199 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 4200 vp->v_type = VNON; 4201 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4202 vp->v_vflag |= VV_FORCEINSMQ; 4203 error = insmntque(vp, mp); 4204 if (error != 0) 4205 panic("vfs_allocate_syncvnode: insmntque() failed"); 4206 vp->v_vflag &= ~VV_FORCEINSMQ; 4207 VOP_UNLOCK(vp, 0); 4208 /* 4209 * Place the vnode onto the syncer worklist. We attempt to 4210 * scatter them about on the list so that they will go off 4211 * at evenly distributed times even if all the filesystems 4212 * are mounted at once. 4213 */ 4214 next += incr; 4215 if (next == 0 || next > syncer_maxdelay) { 4216 start /= 2; 4217 incr /= 2; 4218 if (start == 0) { 4219 start = syncer_maxdelay / 2; 4220 incr = syncer_maxdelay; 4221 } 4222 next = start; 4223 } 4224 bo = &vp->v_bufobj; 4225 BO_LOCK(bo); 4226 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 4227 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 4228 mtx_lock(&sync_mtx); 4229 sync_vnode_count++; 4230 if (mp->mnt_syncer == NULL) { 4231 mp->mnt_syncer = vp; 4232 vp = NULL; 4233 } 4234 mtx_unlock(&sync_mtx); 4235 BO_UNLOCK(bo); 4236 if (vp != NULL) { 4237 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4238 vgone(vp); 4239 vput(vp); 4240 } 4241 } 4242 4243 void 4244 vfs_deallocate_syncvnode(struct mount *mp) 4245 { 4246 struct vnode *vp; 4247 4248 mtx_lock(&sync_mtx); 4249 vp = mp->mnt_syncer; 4250 if (vp != NULL) 4251 mp->mnt_syncer = NULL; 4252 mtx_unlock(&sync_mtx); 4253 if (vp != NULL) 4254 vrele(vp); 4255 } 4256 4257 /* 4258 * Do a lazy sync of the filesystem. 4259 */ 4260 static int 4261 sync_fsync(struct vop_fsync_args *ap) 4262 { 4263 struct vnode *syncvp = ap->a_vp; 4264 struct mount *mp = syncvp->v_mount; 4265 int error, save; 4266 struct bufobj *bo; 4267 4268 /* 4269 * We only need to do something if this is a lazy evaluation. 4270 */ 4271 if (ap->a_waitfor != MNT_LAZY) 4272 return (0); 4273 4274 /* 4275 * Move ourselves to the back of the sync list. 4276 */ 4277 bo = &syncvp->v_bufobj; 4278 BO_LOCK(bo); 4279 vn_syncer_add_to_worklist(bo, syncdelay); 4280 BO_UNLOCK(bo); 4281 4282 /* 4283 * Walk the list of vnodes pushing all that are dirty and 4284 * not already on the sync list. 4285 */ 4286 if (vfs_busy(mp, MBF_NOWAIT) != 0) 4287 return (0); 4288 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 4289 vfs_unbusy(mp); 4290 return (0); 4291 } 4292 save = curthread_pflags_set(TDP_SYNCIO); 4293 vfs_msync(mp, MNT_NOWAIT); 4294 error = VFS_SYNC(mp, MNT_LAZY); 4295 curthread_pflags_restore(save); 4296 vn_finished_write(mp); 4297 vfs_unbusy(mp); 4298 return (error); 4299 } 4300 4301 /* 4302 * The syncer vnode is no referenced. 4303 */ 4304 static int 4305 sync_inactive(struct vop_inactive_args *ap) 4306 { 4307 4308 vgone(ap->a_vp); 4309 return (0); 4310 } 4311 4312 /* 4313 * The syncer vnode is no longer needed and is being decommissioned. 4314 * 4315 * Modifications to the worklist must be protected by sync_mtx. 4316 */ 4317 static int 4318 sync_reclaim(struct vop_reclaim_args *ap) 4319 { 4320 struct vnode *vp = ap->a_vp; 4321 struct bufobj *bo; 4322 4323 bo = &vp->v_bufobj; 4324 BO_LOCK(bo); 4325 mtx_lock(&sync_mtx); 4326 if (vp->v_mount->mnt_syncer == vp) 4327 vp->v_mount->mnt_syncer = NULL; 4328 if (bo->bo_flag & BO_ONWORKLST) { 4329 LIST_REMOVE(bo, bo_synclist); 4330 syncer_worklist_len--; 4331 sync_vnode_count--; 4332 bo->bo_flag &= ~BO_ONWORKLST; 4333 } 4334 mtx_unlock(&sync_mtx); 4335 BO_UNLOCK(bo); 4336 4337 return (0); 4338 } 4339 4340 /* 4341 * Check if vnode represents a disk device 4342 */ 4343 int 4344 vn_isdisk(struct vnode *vp, int *errp) 4345 { 4346 int error; 4347 4348 if (vp->v_type != VCHR) { 4349 error = ENOTBLK; 4350 goto out; 4351 } 4352 error = 0; 4353 dev_lock(); 4354 if (vp->v_rdev == NULL) 4355 error = ENXIO; 4356 else if (vp->v_rdev->si_devsw == NULL) 4357 error = ENXIO; 4358 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 4359 error = ENOTBLK; 4360 dev_unlock(); 4361 out: 4362 if (errp != NULL) 4363 *errp = error; 4364 return (error == 0); 4365 } 4366 4367 /* 4368 * Common filesystem object access control check routine. Accepts a 4369 * vnode's type, "mode", uid and gid, requested access mode, credentials, 4370 * and optional call-by-reference privused argument allowing vaccess() 4371 * to indicate to the caller whether privilege was used to satisfy the 4372 * request (obsoleted). Returns 0 on success, or an errno on failure. 4373 */ 4374 int 4375 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 4376 accmode_t accmode, struct ucred *cred, int *privused) 4377 { 4378 accmode_t dac_granted; 4379 accmode_t priv_granted; 4380 4381 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 4382 ("invalid bit in accmode")); 4383 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 4384 ("VAPPEND without VWRITE")); 4385 4386 /* 4387 * Look for a normal, non-privileged way to access the file/directory 4388 * as requested. If it exists, go with that. 4389 */ 4390 4391 if (privused != NULL) 4392 *privused = 0; 4393 4394 dac_granted = 0; 4395 4396 /* Check the owner. */ 4397 if (cred->cr_uid == file_uid) { 4398 dac_granted |= VADMIN; 4399 if (file_mode & S_IXUSR) 4400 dac_granted |= VEXEC; 4401 if (file_mode & S_IRUSR) 4402 dac_granted |= VREAD; 4403 if (file_mode & S_IWUSR) 4404 dac_granted |= (VWRITE | VAPPEND); 4405 4406 if ((accmode & dac_granted) == accmode) 4407 return (0); 4408 4409 goto privcheck; 4410 } 4411 4412 /* Otherwise, check the groups (first match) */ 4413 if (groupmember(file_gid, cred)) { 4414 if (file_mode & S_IXGRP) 4415 dac_granted |= VEXEC; 4416 if (file_mode & S_IRGRP) 4417 dac_granted |= VREAD; 4418 if (file_mode & S_IWGRP) 4419 dac_granted |= (VWRITE | VAPPEND); 4420 4421 if ((accmode & dac_granted) == accmode) 4422 return (0); 4423 4424 goto privcheck; 4425 } 4426 4427 /* Otherwise, check everyone else. */ 4428 if (file_mode & S_IXOTH) 4429 dac_granted |= VEXEC; 4430 if (file_mode & S_IROTH) 4431 dac_granted |= VREAD; 4432 if (file_mode & S_IWOTH) 4433 dac_granted |= (VWRITE | VAPPEND); 4434 if ((accmode & dac_granted) == accmode) 4435 return (0); 4436 4437 privcheck: 4438 /* 4439 * Build a privilege mask to determine if the set of privileges 4440 * satisfies the requirements when combined with the granted mask 4441 * from above. For each privilege, if the privilege is required, 4442 * bitwise or the request type onto the priv_granted mask. 4443 */ 4444 priv_granted = 0; 4445 4446 if (type == VDIR) { 4447 /* 4448 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 4449 * requests, instead of PRIV_VFS_EXEC. 4450 */ 4451 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4452 !priv_check_cred(cred, PRIV_VFS_LOOKUP, 0)) 4453 priv_granted |= VEXEC; 4454 } else { 4455 /* 4456 * Ensure that at least one execute bit is on. Otherwise, 4457 * a privileged user will always succeed, and we don't want 4458 * this to happen unless the file really is executable. 4459 */ 4460 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4461 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 4462 !priv_check_cred(cred, PRIV_VFS_EXEC, 0)) 4463 priv_granted |= VEXEC; 4464 } 4465 4466 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 4467 !priv_check_cred(cred, PRIV_VFS_READ, 0)) 4468 priv_granted |= VREAD; 4469 4470 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 4471 !priv_check_cred(cred, PRIV_VFS_WRITE, 0)) 4472 priv_granted |= (VWRITE | VAPPEND); 4473 4474 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 4475 !priv_check_cred(cred, PRIV_VFS_ADMIN, 0)) 4476 priv_granted |= VADMIN; 4477 4478 if ((accmode & (priv_granted | dac_granted)) == accmode) { 4479 /* XXX audit: privilege used */ 4480 if (privused != NULL) 4481 *privused = 1; 4482 return (0); 4483 } 4484 4485 return ((accmode & VADMIN) ? EPERM : EACCES); 4486 } 4487 4488 /* 4489 * Credential check based on process requesting service, and per-attribute 4490 * permissions. 4491 */ 4492 int 4493 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 4494 struct thread *td, accmode_t accmode) 4495 { 4496 4497 /* 4498 * Kernel-invoked always succeeds. 4499 */ 4500 if (cred == NOCRED) 4501 return (0); 4502 4503 /* 4504 * Do not allow privileged processes in jail to directly manipulate 4505 * system attributes. 4506 */ 4507 switch (attrnamespace) { 4508 case EXTATTR_NAMESPACE_SYSTEM: 4509 /* Potentially should be: return (EPERM); */ 4510 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM, 0)); 4511 case EXTATTR_NAMESPACE_USER: 4512 return (VOP_ACCESS(vp, accmode, cred, td)); 4513 default: 4514 return (EPERM); 4515 } 4516 } 4517 4518 #ifdef DEBUG_VFS_LOCKS 4519 /* 4520 * This only exists to suppress warnings from unlocked specfs accesses. It is 4521 * no longer ok to have an unlocked VFS. 4522 */ 4523 #define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \ 4524 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 4525 4526 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 4527 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 4528 "Drop into debugger on lock violation"); 4529 4530 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 4531 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 4532 0, "Check for interlock across VOPs"); 4533 4534 int vfs_badlock_print = 1; /* Print lock violations. */ 4535 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 4536 0, "Print lock violations"); 4537 4538 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 4539 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 4540 0, "Print vnode details on lock violations"); 4541 4542 #ifdef KDB 4543 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 4544 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 4545 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 4546 #endif 4547 4548 static void 4549 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 4550 { 4551 4552 #ifdef KDB 4553 if (vfs_badlock_backtrace) 4554 kdb_backtrace(); 4555 #endif 4556 if (vfs_badlock_vnode) 4557 vn_printf(vp, "vnode "); 4558 if (vfs_badlock_print) 4559 printf("%s: %p %s\n", str, (void *)vp, msg); 4560 if (vfs_badlock_ddb) 4561 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 4562 } 4563 4564 void 4565 assert_vi_locked(struct vnode *vp, const char *str) 4566 { 4567 4568 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 4569 vfs_badlock("interlock is not locked but should be", str, vp); 4570 } 4571 4572 void 4573 assert_vi_unlocked(struct vnode *vp, const char *str) 4574 { 4575 4576 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 4577 vfs_badlock("interlock is locked but should not be", str, vp); 4578 } 4579 4580 void 4581 assert_vop_locked(struct vnode *vp, const char *str) 4582 { 4583 int locked; 4584 4585 if (!IGNORE_LOCK(vp)) { 4586 locked = VOP_ISLOCKED(vp); 4587 if (locked == 0 || locked == LK_EXCLOTHER) 4588 vfs_badlock("is not locked but should be", str, vp); 4589 } 4590 } 4591 4592 void 4593 assert_vop_unlocked(struct vnode *vp, const char *str) 4594 { 4595 4596 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 4597 vfs_badlock("is locked but should not be", str, vp); 4598 } 4599 4600 void 4601 assert_vop_elocked(struct vnode *vp, const char *str) 4602 { 4603 4604 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 4605 vfs_badlock("is not exclusive locked but should be", str, vp); 4606 } 4607 #endif /* DEBUG_VFS_LOCKS */ 4608 4609 void 4610 vop_rename_fail(struct vop_rename_args *ap) 4611 { 4612 4613 if (ap->a_tvp != NULL) 4614 vput(ap->a_tvp); 4615 if (ap->a_tdvp == ap->a_tvp) 4616 vrele(ap->a_tdvp); 4617 else 4618 vput(ap->a_tdvp); 4619 vrele(ap->a_fdvp); 4620 vrele(ap->a_fvp); 4621 } 4622 4623 void 4624 vop_rename_pre(void *ap) 4625 { 4626 struct vop_rename_args *a = ap; 4627 4628 #ifdef DEBUG_VFS_LOCKS 4629 if (a->a_tvp) 4630 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 4631 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 4632 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 4633 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 4634 4635 /* Check the source (from). */ 4636 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 4637 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 4638 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 4639 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 4640 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 4641 4642 /* Check the target. */ 4643 if (a->a_tvp) 4644 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 4645 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 4646 #endif 4647 if (a->a_tdvp != a->a_fdvp) 4648 vhold(a->a_fdvp); 4649 if (a->a_tvp != a->a_fvp) 4650 vhold(a->a_fvp); 4651 vhold(a->a_tdvp); 4652 if (a->a_tvp) 4653 vhold(a->a_tvp); 4654 } 4655 4656 #ifdef DEBUG_VFS_LOCKS 4657 void 4658 vop_strategy_pre(void *ap) 4659 { 4660 struct vop_strategy_args *a; 4661 struct buf *bp; 4662 4663 a = ap; 4664 bp = a->a_bp; 4665 4666 /* 4667 * Cluster ops lock their component buffers but not the IO container. 4668 */ 4669 if ((bp->b_flags & B_CLUSTER) != 0) 4670 return; 4671 4672 if (panicstr == NULL && !BUF_ISLOCKED(bp)) { 4673 if (vfs_badlock_print) 4674 printf( 4675 "VOP_STRATEGY: bp is not locked but should be\n"); 4676 if (vfs_badlock_ddb) 4677 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 4678 } 4679 } 4680 4681 void 4682 vop_lock_pre(void *ap) 4683 { 4684 struct vop_lock1_args *a = ap; 4685 4686 if ((a->a_flags & LK_INTERLOCK) == 0) 4687 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 4688 else 4689 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 4690 } 4691 4692 void 4693 vop_lock_post(void *ap, int rc) 4694 { 4695 struct vop_lock1_args *a = ap; 4696 4697 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 4698 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 4699 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 4700 } 4701 4702 void 4703 vop_unlock_pre(void *ap) 4704 { 4705 struct vop_unlock_args *a = ap; 4706 4707 if (a->a_flags & LK_INTERLOCK) 4708 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK"); 4709 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 4710 } 4711 4712 void 4713 vop_unlock_post(void *ap, int rc) 4714 { 4715 struct vop_unlock_args *a = ap; 4716 4717 if (a->a_flags & LK_INTERLOCK) 4718 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK"); 4719 } 4720 #endif 4721 4722 void 4723 vop_create_post(void *ap, int rc) 4724 { 4725 struct vop_create_args *a = ap; 4726 4727 if (!rc) 4728 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4729 } 4730 4731 void 4732 vop_deleteextattr_post(void *ap, int rc) 4733 { 4734 struct vop_deleteextattr_args *a = ap; 4735 4736 if (!rc) 4737 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4738 } 4739 4740 void 4741 vop_link_post(void *ap, int rc) 4742 { 4743 struct vop_link_args *a = ap; 4744 4745 if (!rc) { 4746 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); 4747 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE); 4748 } 4749 } 4750 4751 void 4752 vop_mkdir_post(void *ap, int rc) 4753 { 4754 struct vop_mkdir_args *a = ap; 4755 4756 if (!rc) 4757 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 4758 } 4759 4760 void 4761 vop_mknod_post(void *ap, int rc) 4762 { 4763 struct vop_mknod_args *a = ap; 4764 4765 if (!rc) 4766 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4767 } 4768 4769 void 4770 vop_reclaim_post(void *ap, int rc) 4771 { 4772 struct vop_reclaim_args *a = ap; 4773 4774 if (!rc) 4775 VFS_KNOTE_LOCKED(a->a_vp, NOTE_REVOKE); 4776 } 4777 4778 void 4779 vop_remove_post(void *ap, int rc) 4780 { 4781 struct vop_remove_args *a = ap; 4782 4783 if (!rc) { 4784 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4785 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 4786 } 4787 } 4788 4789 void 4790 vop_rename_post(void *ap, int rc) 4791 { 4792 struct vop_rename_args *a = ap; 4793 long hint; 4794 4795 if (!rc) { 4796 hint = NOTE_WRITE; 4797 if (a->a_fdvp == a->a_tdvp) { 4798 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 4799 hint |= NOTE_LINK; 4800 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 4801 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 4802 } else { 4803 hint |= NOTE_EXTEND; 4804 if (a->a_fvp->v_type == VDIR) 4805 hint |= NOTE_LINK; 4806 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 4807 4808 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 4809 a->a_tvp->v_type == VDIR) 4810 hint &= ~NOTE_LINK; 4811 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 4812 } 4813 4814 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 4815 if (a->a_tvp) 4816 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 4817 } 4818 if (a->a_tdvp != a->a_fdvp) 4819 vdrop(a->a_fdvp); 4820 if (a->a_tvp != a->a_fvp) 4821 vdrop(a->a_fvp); 4822 vdrop(a->a_tdvp); 4823 if (a->a_tvp) 4824 vdrop(a->a_tvp); 4825 } 4826 4827 void 4828 vop_rmdir_post(void *ap, int rc) 4829 { 4830 struct vop_rmdir_args *a = ap; 4831 4832 if (!rc) { 4833 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 4834 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 4835 } 4836 } 4837 4838 void 4839 vop_setattr_post(void *ap, int rc) 4840 { 4841 struct vop_setattr_args *a = ap; 4842 4843 if (!rc) 4844 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4845 } 4846 4847 void 4848 vop_setextattr_post(void *ap, int rc) 4849 { 4850 struct vop_setextattr_args *a = ap; 4851 4852 if (!rc) 4853 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4854 } 4855 4856 void 4857 vop_symlink_post(void *ap, int rc) 4858 { 4859 struct vop_symlink_args *a = ap; 4860 4861 if (!rc) 4862 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4863 } 4864 4865 void 4866 vop_open_post(void *ap, int rc) 4867 { 4868 struct vop_open_args *a = ap; 4869 4870 if (!rc) 4871 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 4872 } 4873 4874 void 4875 vop_close_post(void *ap, int rc) 4876 { 4877 struct vop_close_args *a = ap; 4878 4879 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 4880 (a->a_vp->v_iflag & VI_DOOMED) == 0)) { 4881 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 4882 NOTE_CLOSE_WRITE : NOTE_CLOSE); 4883 } 4884 } 4885 4886 void 4887 vop_read_post(void *ap, int rc) 4888 { 4889 struct vop_read_args *a = ap; 4890 4891 if (!rc) 4892 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 4893 } 4894 4895 void 4896 vop_readdir_post(void *ap, int rc) 4897 { 4898 struct vop_readdir_args *a = ap; 4899 4900 if (!rc) 4901 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 4902 } 4903 4904 static struct knlist fs_knlist; 4905 4906 static void 4907 vfs_event_init(void *arg) 4908 { 4909 knlist_init_mtx(&fs_knlist, NULL); 4910 } 4911 /* XXX - correct order? */ 4912 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 4913 4914 void 4915 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 4916 { 4917 4918 KNOTE_UNLOCKED(&fs_knlist, event); 4919 } 4920 4921 static int filt_fsattach(struct knote *kn); 4922 static void filt_fsdetach(struct knote *kn); 4923 static int filt_fsevent(struct knote *kn, long hint); 4924 4925 struct filterops fs_filtops = { 4926 .f_isfd = 0, 4927 .f_attach = filt_fsattach, 4928 .f_detach = filt_fsdetach, 4929 .f_event = filt_fsevent 4930 }; 4931 4932 static int 4933 filt_fsattach(struct knote *kn) 4934 { 4935 4936 kn->kn_flags |= EV_CLEAR; 4937 knlist_add(&fs_knlist, kn, 0); 4938 return (0); 4939 } 4940 4941 static void 4942 filt_fsdetach(struct knote *kn) 4943 { 4944 4945 knlist_remove(&fs_knlist, kn, 0); 4946 } 4947 4948 static int 4949 filt_fsevent(struct knote *kn, long hint) 4950 { 4951 4952 kn->kn_fflags |= hint; 4953 return (kn->kn_fflags != 0); 4954 } 4955 4956 static int 4957 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 4958 { 4959 struct vfsidctl vc; 4960 int error; 4961 struct mount *mp; 4962 4963 error = SYSCTL_IN(req, &vc, sizeof(vc)); 4964 if (error) 4965 return (error); 4966 if (vc.vc_vers != VFS_CTL_VERS1) 4967 return (EINVAL); 4968 mp = vfs_getvfs(&vc.vc_fsid); 4969 if (mp == NULL) 4970 return (ENOENT); 4971 /* ensure that a specific sysctl goes to the right filesystem. */ 4972 if (strcmp(vc.vc_fstypename, "*") != 0 && 4973 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 4974 vfs_rel(mp); 4975 return (EINVAL); 4976 } 4977 VCTLTOREQ(&vc, req); 4978 error = VFS_SYSCTL(mp, vc.vc_op, req); 4979 vfs_rel(mp); 4980 return (error); 4981 } 4982 4983 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_WR, 4984 NULL, 0, sysctl_vfs_ctl, "", 4985 "Sysctl by fsid"); 4986 4987 /* 4988 * Function to initialize a va_filerev field sensibly. 4989 * XXX: Wouldn't a random number make a lot more sense ?? 4990 */ 4991 u_quad_t 4992 init_va_filerev(void) 4993 { 4994 struct bintime bt; 4995 4996 getbinuptime(&bt); 4997 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 4998 } 4999 5000 static int filt_vfsread(struct knote *kn, long hint); 5001 static int filt_vfswrite(struct knote *kn, long hint); 5002 static int filt_vfsvnode(struct knote *kn, long hint); 5003 static void filt_vfsdetach(struct knote *kn); 5004 static struct filterops vfsread_filtops = { 5005 .f_isfd = 1, 5006 .f_detach = filt_vfsdetach, 5007 .f_event = filt_vfsread 5008 }; 5009 static struct filterops vfswrite_filtops = { 5010 .f_isfd = 1, 5011 .f_detach = filt_vfsdetach, 5012 .f_event = filt_vfswrite 5013 }; 5014 static struct filterops vfsvnode_filtops = { 5015 .f_isfd = 1, 5016 .f_detach = filt_vfsdetach, 5017 .f_event = filt_vfsvnode 5018 }; 5019 5020 static void 5021 vfs_knllock(void *arg) 5022 { 5023 struct vnode *vp = arg; 5024 5025 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5026 } 5027 5028 static void 5029 vfs_knlunlock(void *arg) 5030 { 5031 struct vnode *vp = arg; 5032 5033 VOP_UNLOCK(vp, 0); 5034 } 5035 5036 static void 5037 vfs_knl_assert_locked(void *arg) 5038 { 5039 #ifdef DEBUG_VFS_LOCKS 5040 struct vnode *vp = arg; 5041 5042 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 5043 #endif 5044 } 5045 5046 static void 5047 vfs_knl_assert_unlocked(void *arg) 5048 { 5049 #ifdef DEBUG_VFS_LOCKS 5050 struct vnode *vp = arg; 5051 5052 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 5053 #endif 5054 } 5055 5056 int 5057 vfs_kqfilter(struct vop_kqfilter_args *ap) 5058 { 5059 struct vnode *vp = ap->a_vp; 5060 struct knote *kn = ap->a_kn; 5061 struct knlist *knl; 5062 5063 switch (kn->kn_filter) { 5064 case EVFILT_READ: 5065 kn->kn_fop = &vfsread_filtops; 5066 break; 5067 case EVFILT_WRITE: 5068 kn->kn_fop = &vfswrite_filtops; 5069 break; 5070 case EVFILT_VNODE: 5071 kn->kn_fop = &vfsvnode_filtops; 5072 break; 5073 default: 5074 return (EINVAL); 5075 } 5076 5077 kn->kn_hook = (caddr_t)vp; 5078 5079 v_addpollinfo(vp); 5080 if (vp->v_pollinfo == NULL) 5081 return (ENOMEM); 5082 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 5083 vhold(vp); 5084 knlist_add(knl, kn, 0); 5085 5086 return (0); 5087 } 5088 5089 /* 5090 * Detach knote from vnode 5091 */ 5092 static void 5093 filt_vfsdetach(struct knote *kn) 5094 { 5095 struct vnode *vp = (struct vnode *)kn->kn_hook; 5096 5097 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 5098 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 5099 vdrop(vp); 5100 } 5101 5102 /*ARGSUSED*/ 5103 static int 5104 filt_vfsread(struct knote *kn, long hint) 5105 { 5106 struct vnode *vp = (struct vnode *)kn->kn_hook; 5107 struct vattr va; 5108 int res; 5109 5110 /* 5111 * filesystem is gone, so set the EOF flag and schedule 5112 * the knote for deletion. 5113 */ 5114 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5115 VI_LOCK(vp); 5116 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5117 VI_UNLOCK(vp); 5118 return (1); 5119 } 5120 5121 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 5122 return (0); 5123 5124 VI_LOCK(vp); 5125 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 5126 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 5127 VI_UNLOCK(vp); 5128 return (res); 5129 } 5130 5131 /*ARGSUSED*/ 5132 static int 5133 filt_vfswrite(struct knote *kn, long hint) 5134 { 5135 struct vnode *vp = (struct vnode *)kn->kn_hook; 5136 5137 VI_LOCK(vp); 5138 5139 /* 5140 * filesystem is gone, so set the EOF flag and schedule 5141 * the knote for deletion. 5142 */ 5143 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 5144 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5145 5146 kn->kn_data = 0; 5147 VI_UNLOCK(vp); 5148 return (1); 5149 } 5150 5151 static int 5152 filt_vfsvnode(struct knote *kn, long hint) 5153 { 5154 struct vnode *vp = (struct vnode *)kn->kn_hook; 5155 int res; 5156 5157 VI_LOCK(vp); 5158 if (kn->kn_sfflags & hint) 5159 kn->kn_fflags |= hint; 5160 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5161 kn->kn_flags |= EV_EOF; 5162 VI_UNLOCK(vp); 5163 return (1); 5164 } 5165 res = (kn->kn_fflags != 0); 5166 VI_UNLOCK(vp); 5167 return (res); 5168 } 5169 5170 int 5171 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 5172 { 5173 int error; 5174 5175 if (dp->d_reclen > ap->a_uio->uio_resid) 5176 return (ENAMETOOLONG); 5177 error = uiomove(dp, dp->d_reclen, ap->a_uio); 5178 if (error) { 5179 if (ap->a_ncookies != NULL) { 5180 if (ap->a_cookies != NULL) 5181 free(ap->a_cookies, M_TEMP); 5182 ap->a_cookies = NULL; 5183 *ap->a_ncookies = 0; 5184 } 5185 return (error); 5186 } 5187 if (ap->a_ncookies == NULL) 5188 return (0); 5189 5190 KASSERT(ap->a_cookies, 5191 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 5192 5193 *ap->a_cookies = realloc(*ap->a_cookies, 5194 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 5195 (*ap->a_cookies)[*ap->a_ncookies] = off; 5196 *ap->a_ncookies += 1; 5197 return (0); 5198 } 5199 5200 /* 5201 * Mark for update the access time of the file if the filesystem 5202 * supports VOP_MARKATIME. This functionality is used by execve and 5203 * mmap, so we want to avoid the I/O implied by directly setting 5204 * va_atime for the sake of efficiency. 5205 */ 5206 void 5207 vfs_mark_atime(struct vnode *vp, struct ucred *cred) 5208 { 5209 struct mount *mp; 5210 5211 mp = vp->v_mount; 5212 ASSERT_VOP_LOCKED(vp, "vfs_mark_atime"); 5213 if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) 5214 (void)VOP_MARKATIME(vp); 5215 } 5216 5217 /* 5218 * The purpose of this routine is to remove granularity from accmode_t, 5219 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 5220 * VADMIN and VAPPEND. 5221 * 5222 * If it returns 0, the caller is supposed to continue with the usual 5223 * access checks using 'accmode' as modified by this routine. If it 5224 * returns nonzero value, the caller is supposed to return that value 5225 * as errno. 5226 * 5227 * Note that after this routine runs, accmode may be zero. 5228 */ 5229 int 5230 vfs_unixify_accmode(accmode_t *accmode) 5231 { 5232 /* 5233 * There is no way to specify explicit "deny" rule using 5234 * file mode or POSIX.1e ACLs. 5235 */ 5236 if (*accmode & VEXPLICIT_DENY) { 5237 *accmode = 0; 5238 return (0); 5239 } 5240 5241 /* 5242 * None of these can be translated into usual access bits. 5243 * Also, the common case for NFSv4 ACLs is to not contain 5244 * either of these bits. Caller should check for VWRITE 5245 * on the containing directory instead. 5246 */ 5247 if (*accmode & (VDELETE_CHILD | VDELETE)) 5248 return (EPERM); 5249 5250 if (*accmode & VADMIN_PERMS) { 5251 *accmode &= ~VADMIN_PERMS; 5252 *accmode |= VADMIN; 5253 } 5254 5255 /* 5256 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 5257 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 5258 */ 5259 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 5260 5261 return (0); 5262 } 5263 5264 /* 5265 * These are helper functions for filesystems to traverse all 5266 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 5267 * 5268 * This interface replaces MNT_VNODE_FOREACH. 5269 */ 5270 5271 MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 5272 5273 struct vnode * 5274 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 5275 { 5276 struct vnode *vp; 5277 5278 if (should_yield()) 5279 kern_yield(PRI_USER); 5280 MNT_ILOCK(mp); 5281 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5282 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 5283 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 5284 /* Allow a racy peek at VI_DOOMED to save a lock acquisition. */ 5285 if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0) 5286 continue; 5287 VI_LOCK(vp); 5288 if ((vp->v_iflag & VI_DOOMED) != 0) { 5289 VI_UNLOCK(vp); 5290 continue; 5291 } 5292 break; 5293 } 5294 if (vp == NULL) { 5295 __mnt_vnode_markerfree_all(mvp, mp); 5296 /* MNT_IUNLOCK(mp); -- done in above function */ 5297 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 5298 return (NULL); 5299 } 5300 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 5301 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 5302 MNT_IUNLOCK(mp); 5303 return (vp); 5304 } 5305 5306 struct vnode * 5307 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 5308 { 5309 struct vnode *vp; 5310 5311 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 5312 MNT_ILOCK(mp); 5313 MNT_REF(mp); 5314 (*mvp)->v_mount = mp; 5315 (*mvp)->v_type = VMARKER; 5316 5317 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 5318 /* Allow a racy peek at VI_DOOMED to save a lock acquisition. */ 5319 if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0) 5320 continue; 5321 VI_LOCK(vp); 5322 if ((vp->v_iflag & VI_DOOMED) != 0) { 5323 VI_UNLOCK(vp); 5324 continue; 5325 } 5326 break; 5327 } 5328 if (vp == NULL) { 5329 MNT_REL(mp); 5330 MNT_IUNLOCK(mp); 5331 free(*mvp, M_VNODE_MARKER); 5332 *mvp = NULL; 5333 return (NULL); 5334 } 5335 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 5336 MNT_IUNLOCK(mp); 5337 return (vp); 5338 } 5339 5340 void 5341 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 5342 { 5343 5344 if (*mvp == NULL) { 5345 MNT_IUNLOCK(mp); 5346 return; 5347 } 5348 5349 mtx_assert(MNT_MTX(mp), MA_OWNED); 5350 5351 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5352 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 5353 MNT_REL(mp); 5354 MNT_IUNLOCK(mp); 5355 free(*mvp, M_VNODE_MARKER); 5356 *mvp = NULL; 5357 } 5358 5359 /* 5360 * These are helper functions for filesystems to traverse their 5361 * active vnodes. See MNT_VNODE_FOREACH_ACTIVE() in sys/mount.h 5362 */ 5363 static void 5364 mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 5365 { 5366 5367 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5368 5369 MNT_ILOCK(mp); 5370 MNT_REL(mp); 5371 MNT_IUNLOCK(mp); 5372 free(*mvp, M_VNODE_MARKER); 5373 *mvp = NULL; 5374 } 5375 5376 /* 5377 * Relock the mp mount vnode list lock with the vp vnode interlock in the 5378 * conventional lock order during mnt_vnode_next_active iteration. 5379 * 5380 * On entry, the mount vnode list lock is held and the vnode interlock is not. 5381 * The list lock is dropped and reacquired. On success, both locks are held. 5382 * On failure, the mount vnode list lock is held but the vnode interlock is 5383 * not, and the procedure may have yielded. 5384 */ 5385 static bool 5386 mnt_vnode_next_active_relock(struct vnode *mvp, struct mount *mp, 5387 struct vnode *vp) 5388 { 5389 const struct vnode *tmp; 5390 bool held, ret; 5391 5392 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 5393 TAILQ_NEXT(mvp, v_actfreelist) != NULL, mvp, 5394 ("%s: bad marker", __func__)); 5395 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 5396 ("%s: inappropriate vnode", __func__)); 5397 ASSERT_VI_UNLOCKED(vp, __func__); 5398 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 5399 5400 ret = false; 5401 5402 TAILQ_REMOVE(&mp->mnt_activevnodelist, mvp, v_actfreelist); 5403 TAILQ_INSERT_BEFORE(vp, mvp, v_actfreelist); 5404 5405 /* 5406 * Use a hold to prevent vp from disappearing while the mount vnode 5407 * list lock is dropped and reacquired. Normally a hold would be 5408 * acquired with vhold(), but that might try to acquire the vnode 5409 * interlock, which would be a LOR with the mount vnode list lock. 5410 */ 5411 held = refcount_acquire_if_not_zero(&vp->v_holdcnt); 5412 mtx_unlock(&mp->mnt_listmtx); 5413 if (!held) 5414 goto abort; 5415 VI_LOCK(vp); 5416 if (!refcount_release_if_not_last(&vp->v_holdcnt)) { 5417 vdropl(vp); 5418 goto abort; 5419 } 5420 mtx_lock(&mp->mnt_listmtx); 5421 5422 /* 5423 * Determine whether the vnode is still the next one after the marker, 5424 * excepting any other markers. If the vnode has not been doomed by 5425 * vgone() then the hold should have ensured that it remained on the 5426 * active list. If it has been doomed but is still on the active list, 5427 * don't abort, but rather skip over it (avoid spinning on doomed 5428 * vnodes). 5429 */ 5430 tmp = mvp; 5431 do { 5432 tmp = TAILQ_NEXT(tmp, v_actfreelist); 5433 } while (tmp != NULL && tmp->v_type == VMARKER); 5434 if (tmp != vp) { 5435 mtx_unlock(&mp->mnt_listmtx); 5436 VI_UNLOCK(vp); 5437 goto abort; 5438 } 5439 5440 ret = true; 5441 goto out; 5442 abort: 5443 maybe_yield(); 5444 mtx_lock(&mp->mnt_listmtx); 5445 out: 5446 if (ret) 5447 ASSERT_VI_LOCKED(vp, __func__); 5448 else 5449 ASSERT_VI_UNLOCKED(vp, __func__); 5450 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 5451 return (ret); 5452 } 5453 5454 static struct vnode * 5455 mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 5456 { 5457 struct vnode *vp, *nvp; 5458 5459 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 5460 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5461 restart: 5462 vp = TAILQ_NEXT(*mvp, v_actfreelist); 5463 while (vp != NULL) { 5464 if (vp->v_type == VMARKER) { 5465 vp = TAILQ_NEXT(vp, v_actfreelist); 5466 continue; 5467 } 5468 /* 5469 * Try-lock because this is the wrong lock order. If that does 5470 * not succeed, drop the mount vnode list lock and try to 5471 * reacquire it and the vnode interlock in the right order. 5472 */ 5473 if (!VI_TRYLOCK(vp) && 5474 !mnt_vnode_next_active_relock(*mvp, mp, vp)) 5475 goto restart; 5476 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 5477 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 5478 ("alien vnode on the active list %p %p", vp, mp)); 5479 if (vp->v_mount == mp && (vp->v_iflag & VI_DOOMED) == 0) 5480 break; 5481 nvp = TAILQ_NEXT(vp, v_actfreelist); 5482 VI_UNLOCK(vp); 5483 vp = nvp; 5484 } 5485 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 5486 5487 /* Check if we are done */ 5488 if (vp == NULL) { 5489 mtx_unlock(&mp->mnt_listmtx); 5490 mnt_vnode_markerfree_active(mvp, mp); 5491 return (NULL); 5492 } 5493 TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist); 5494 mtx_unlock(&mp->mnt_listmtx); 5495 ASSERT_VI_LOCKED(vp, "active iter"); 5496 KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp)); 5497 return (vp); 5498 } 5499 5500 struct vnode * 5501 __mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 5502 { 5503 5504 if (should_yield()) 5505 kern_yield(PRI_USER); 5506 mtx_lock(&mp->mnt_listmtx); 5507 return (mnt_vnode_next_active(mvp, mp)); 5508 } 5509 5510 struct vnode * 5511 __mnt_vnode_first_active(struct vnode **mvp, struct mount *mp) 5512 { 5513 struct vnode *vp; 5514 5515 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 5516 MNT_ILOCK(mp); 5517 MNT_REF(mp); 5518 MNT_IUNLOCK(mp); 5519 (*mvp)->v_type = VMARKER; 5520 (*mvp)->v_mount = mp; 5521 5522 mtx_lock(&mp->mnt_listmtx); 5523 vp = TAILQ_FIRST(&mp->mnt_activevnodelist); 5524 if (vp == NULL) { 5525 mtx_unlock(&mp->mnt_listmtx); 5526 mnt_vnode_markerfree_active(mvp, mp); 5527 return (NULL); 5528 } 5529 TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist); 5530 return (mnt_vnode_next_active(mvp, mp)); 5531 } 5532 5533 void 5534 __mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 5535 { 5536 5537 if (*mvp == NULL) 5538 return; 5539 5540 mtx_lock(&mp->mnt_listmtx); 5541 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 5542 mtx_unlock(&mp->mnt_listmtx); 5543 mnt_vnode_markerfree_active(mvp, mp); 5544 } 5545