1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/condvar.h> 54 #include <sys/conf.h> 55 #include <sys/counter.h> 56 #include <sys/dirent.h> 57 #include <sys/event.h> 58 #include <sys/eventhandler.h> 59 #include <sys/extattr.h> 60 #include <sys/file.h> 61 #include <sys/fcntl.h> 62 #include <sys/jail.h> 63 #include <sys/kdb.h> 64 #include <sys/kernel.h> 65 #include <sys/kthread.h> 66 #include <sys/lockf.h> 67 #include <sys/malloc.h> 68 #include <sys/mount.h> 69 #include <sys/namei.h> 70 #include <sys/pctrie.h> 71 #include <sys/priv.h> 72 #include <sys/reboot.h> 73 #include <sys/refcount.h> 74 #include <sys/rwlock.h> 75 #include <sys/sched.h> 76 #include <sys/sleepqueue.h> 77 #include <sys/smp.h> 78 #include <sys/stat.h> 79 #include <sys/sysctl.h> 80 #include <sys/syslog.h> 81 #include <sys/vmmeter.h> 82 #include <sys/vnode.h> 83 #include <sys/watchdog.h> 84 85 #include <machine/stdarg.h> 86 87 #include <security/mac/mac_framework.h> 88 89 #include <vm/vm.h> 90 #include <vm/vm_object.h> 91 #include <vm/vm_extern.h> 92 #include <vm/pmap.h> 93 #include <vm/vm_map.h> 94 #include <vm/vm_page.h> 95 #include <vm/vm_kern.h> 96 #include <vm/uma.h> 97 98 #ifdef DDB 99 #include <ddb/ddb.h> 100 #endif 101 102 static void delmntque(struct vnode *vp); 103 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 104 int slpflag, int slptimeo); 105 static void syncer_shutdown(void *arg, int howto); 106 static int vtryrecycle(struct vnode *vp); 107 static void v_init_counters(struct vnode *); 108 static void v_incr_usecount(struct vnode *); 109 static void v_incr_usecount_locked(struct vnode *); 110 static void v_incr_devcount(struct vnode *); 111 static void v_decr_devcount(struct vnode *); 112 static void vgonel(struct vnode *); 113 static void vfs_knllock(void *arg); 114 static void vfs_knlunlock(void *arg); 115 static void vfs_knl_assert_locked(void *arg); 116 static void vfs_knl_assert_unlocked(void *arg); 117 static void vnlru_return_batches(struct vfsops *mnt_op); 118 static void destroy_vpollinfo(struct vpollinfo *vi); 119 120 /* 121 * Number of vnodes in existence. Increased whenever getnewvnode() 122 * allocates a new vnode, decreased in vdropl() for VI_DOOMED vnode. 123 */ 124 static unsigned long numvnodes; 125 126 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 127 "Number of vnodes in existence"); 128 129 static counter_u64_t vnodes_created; 130 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 131 "Number of vnodes created by getnewvnode"); 132 133 static u_long mnt_free_list_batch = 128; 134 SYSCTL_ULONG(_vfs, OID_AUTO, mnt_free_list_batch, CTLFLAG_RW, 135 &mnt_free_list_batch, 0, "Limit of vnodes held on mnt's free list"); 136 137 /* 138 * Conversion tables for conversion from vnode types to inode formats 139 * and back. 140 */ 141 enum vtype iftovt_tab[16] = { 142 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 143 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 144 }; 145 int vttoif_tab[10] = { 146 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 147 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 148 }; 149 150 /* 151 * List of vnodes that are ready for recycling. 152 */ 153 static TAILQ_HEAD(freelst, vnode) vnode_free_list; 154 155 /* 156 * "Free" vnode target. Free vnodes are rarely completely free, but are 157 * just ones that are cheap to recycle. Usually they are for files which 158 * have been stat'd but not read; these usually have inode and namecache 159 * data attached to them. This target is the preferred minimum size of a 160 * sub-cache consisting mostly of such files. The system balances the size 161 * of this sub-cache with its complement to try to prevent either from 162 * thrashing while the other is relatively inactive. The targets express 163 * a preference for the best balance. 164 * 165 * "Above" this target there are 2 further targets (watermarks) related 166 * to recyling of free vnodes. In the best-operating case, the cache is 167 * exactly full, the free list has size between vlowat and vhiwat above the 168 * free target, and recycling from it and normal use maintains this state. 169 * Sometimes the free list is below vlowat or even empty, but this state 170 * is even better for immediate use provided the cache is not full. 171 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 172 * ones) to reach one of these states. The watermarks are currently hard- 173 * coded as 4% and 9% of the available space higher. These and the default 174 * of 25% for wantfreevnodes are too large if the memory size is large. 175 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 176 * whenever vnlru_proc() becomes active. 177 */ 178 static u_long wantfreevnodes; 179 SYSCTL_ULONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, 180 &wantfreevnodes, 0, "Target for minimum number of \"free\" vnodes"); 181 static u_long freevnodes; 182 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 183 &freevnodes, 0, "Number of \"free\" vnodes"); 184 185 static counter_u64_t recycles_count; 186 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 187 "Number of vnodes recycled to meet vnode cache targets"); 188 189 /* 190 * Various variables used for debugging the new implementation of 191 * reassignbuf(). 192 * XXX these are probably of (very) limited utility now. 193 */ 194 static int reassignbufcalls; 195 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, 196 "Number of calls to reassignbuf"); 197 198 static counter_u64_t free_owe_inact; 199 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, free_owe_inact, CTLFLAG_RD, &free_owe_inact, 200 "Number of times free vnodes kept on active list due to VFS " 201 "owing inactivation"); 202 203 /* To keep more than one thread at a time from running vfs_getnewfsid */ 204 static struct mtx mntid_mtx; 205 206 /* 207 * Lock for any access to the following: 208 * vnode_free_list 209 * numvnodes 210 * freevnodes 211 */ 212 static struct mtx vnode_free_list_mtx; 213 214 /* Publicly exported FS */ 215 struct nfs_public nfs_pub; 216 217 static uma_zone_t buf_trie_zone; 218 219 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 220 static uma_zone_t vnode_zone; 221 static uma_zone_t vnodepoll_zone; 222 223 /* 224 * The workitem queue. 225 * 226 * It is useful to delay writes of file data and filesystem metadata 227 * for tens of seconds so that quickly created and deleted files need 228 * not waste disk bandwidth being created and removed. To realize this, 229 * we append vnodes to a "workitem" queue. When running with a soft 230 * updates implementation, most pending metadata dependencies should 231 * not wait for more than a few seconds. Thus, mounted on block devices 232 * are delayed only about a half the time that file data is delayed. 233 * Similarly, directory updates are more critical, so are only delayed 234 * about a third the time that file data is delayed. Thus, there are 235 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 236 * one each second (driven off the filesystem syncer process). The 237 * syncer_delayno variable indicates the next queue that is to be processed. 238 * Items that need to be processed soon are placed in this queue: 239 * 240 * syncer_workitem_pending[syncer_delayno] 241 * 242 * A delay of fifteen seconds is done by placing the request fifteen 243 * entries later in the queue: 244 * 245 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 246 * 247 */ 248 static int syncer_delayno; 249 static long syncer_mask; 250 LIST_HEAD(synclist, bufobj); 251 static struct synclist *syncer_workitem_pending; 252 /* 253 * The sync_mtx protects: 254 * bo->bo_synclist 255 * sync_vnode_count 256 * syncer_delayno 257 * syncer_state 258 * syncer_workitem_pending 259 * syncer_worklist_len 260 * rushjob 261 */ 262 static struct mtx sync_mtx; 263 static struct cv sync_wakeup; 264 265 #define SYNCER_MAXDELAY 32 266 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 267 static int syncdelay = 30; /* max time to delay syncing data */ 268 static int filedelay = 30; /* time to delay syncing files */ 269 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 270 "Time to delay syncing files (in seconds)"); 271 static int dirdelay = 29; /* time to delay syncing directories */ 272 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 273 "Time to delay syncing directories (in seconds)"); 274 static int metadelay = 28; /* time to delay syncing metadata */ 275 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 276 "Time to delay syncing metadata (in seconds)"); 277 static int rushjob; /* number of slots to run ASAP */ 278 static int stat_rush_requests; /* number of times I/O speeded up */ 279 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 280 "Number of times I/O speeded up (rush requests)"); 281 282 /* 283 * When shutting down the syncer, run it at four times normal speed. 284 */ 285 #define SYNCER_SHUTDOWN_SPEEDUP 4 286 static int sync_vnode_count; 287 static int syncer_worklist_len; 288 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 289 syncer_state; 290 291 /* Target for maximum number of vnodes. */ 292 int desiredvnodes; 293 static int gapvnodes; /* gap between wanted and desired */ 294 static int vhiwat; /* enough extras after expansion */ 295 static int vlowat; /* minimal extras before expansion */ 296 static int vstir; /* nonzero to stir non-free vnodes */ 297 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 298 299 static int 300 sysctl_update_desiredvnodes(SYSCTL_HANDLER_ARGS) 301 { 302 int error, old_desiredvnodes; 303 304 old_desiredvnodes = desiredvnodes; 305 if ((error = sysctl_handle_int(oidp, arg1, arg2, req)) != 0) 306 return (error); 307 if (old_desiredvnodes != desiredvnodes) { 308 wantfreevnodes = desiredvnodes / 4; 309 /* XXX locking seems to be incomplete. */ 310 vfs_hash_changesize(desiredvnodes); 311 cache_changesize(desiredvnodes); 312 } 313 return (0); 314 } 315 316 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 317 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, &desiredvnodes, 0, 318 sysctl_update_desiredvnodes, "I", "Target for maximum number of vnodes"); 319 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 320 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 321 static int vnlru_nowhere; 322 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 323 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 324 325 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 326 static int vnsz2log; 327 328 /* 329 * Support for the bufobj clean & dirty pctrie. 330 */ 331 static void * 332 buf_trie_alloc(struct pctrie *ptree) 333 { 334 335 return uma_zalloc(buf_trie_zone, M_NOWAIT); 336 } 337 338 static void 339 buf_trie_free(struct pctrie *ptree, void *node) 340 { 341 342 uma_zfree(buf_trie_zone, node); 343 } 344 PCTRIE_DEFINE(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free); 345 346 /* 347 * Initialize the vnode management data structures. 348 * 349 * Reevaluate the following cap on the number of vnodes after the physical 350 * memory size exceeds 512GB. In the limit, as the physical memory size 351 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 352 */ 353 #ifndef MAXVNODES_MAX 354 #define MAXVNODES_MAX (512 * 1024 * 1024 / 64) /* 8M */ 355 #endif 356 357 /* 358 * Initialize a vnode as it first enters the zone. 359 */ 360 static int 361 vnode_init(void *mem, int size, int flags) 362 { 363 struct vnode *vp; 364 365 vp = mem; 366 bzero(vp, size); 367 /* 368 * Setup locks. 369 */ 370 vp->v_vnlock = &vp->v_lock; 371 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 372 /* 373 * By default, don't allow shared locks unless filesystems opt-in. 374 */ 375 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 376 LK_NOSHARE | LK_IS_VNODE); 377 /* 378 * Initialize bufobj. 379 */ 380 bufobj_init(&vp->v_bufobj, vp); 381 /* 382 * Initialize namecache. 383 */ 384 LIST_INIT(&vp->v_cache_src); 385 TAILQ_INIT(&vp->v_cache_dst); 386 /* 387 * Initialize rangelocks. 388 */ 389 rangelock_init(&vp->v_rl); 390 return (0); 391 } 392 393 /* 394 * Free a vnode when it is cleared from the zone. 395 */ 396 static void 397 vnode_fini(void *mem, int size) 398 { 399 struct vnode *vp; 400 struct bufobj *bo; 401 402 vp = mem; 403 rangelock_destroy(&vp->v_rl); 404 lockdestroy(vp->v_vnlock); 405 mtx_destroy(&vp->v_interlock); 406 bo = &vp->v_bufobj; 407 rw_destroy(BO_LOCKPTR(bo)); 408 } 409 410 /* 411 * Provide the size of NFS nclnode and NFS fh for calculation of the 412 * vnode memory consumption. The size is specified directly to 413 * eliminate dependency on NFS-private header. 414 * 415 * Other filesystems may use bigger or smaller (like UFS and ZFS) 416 * private inode data, but the NFS-based estimation is ample enough. 417 * Still, we care about differences in the size between 64- and 32-bit 418 * platforms. 419 * 420 * Namecache structure size is heuristically 421 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 422 */ 423 #ifdef _LP64 424 #define NFS_NCLNODE_SZ (528 + 64) 425 #define NC_SZ 148 426 #else 427 #define NFS_NCLNODE_SZ (360 + 32) 428 #define NC_SZ 92 429 #endif 430 431 static void 432 vntblinit(void *dummy __unused) 433 { 434 u_int i; 435 int physvnodes, virtvnodes; 436 437 /* 438 * Desiredvnodes is a function of the physical memory size and the 439 * kernel's heap size. Generally speaking, it scales with the 440 * physical memory size. The ratio of desiredvnodes to the physical 441 * memory size is 1:16 until desiredvnodes exceeds 98,304. 442 * Thereafter, the 443 * marginal ratio of desiredvnodes to the physical memory size is 444 * 1:64. However, desiredvnodes is limited by the kernel's heap 445 * size. The memory required by desiredvnodes vnodes and vm objects 446 * must not exceed 1/10th of the kernel's heap size. 447 */ 448 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 449 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 450 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 451 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 452 desiredvnodes = min(physvnodes, virtvnodes); 453 if (desiredvnodes > MAXVNODES_MAX) { 454 if (bootverbose) 455 printf("Reducing kern.maxvnodes %d -> %d\n", 456 desiredvnodes, MAXVNODES_MAX); 457 desiredvnodes = MAXVNODES_MAX; 458 } 459 wantfreevnodes = desiredvnodes / 4; 460 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 461 TAILQ_INIT(&vnode_free_list); 462 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 463 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 464 vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); 465 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 466 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 467 /* 468 * Preallocate enough nodes to support one-per buf so that 469 * we can not fail an insert. reassignbuf() callers can not 470 * tolerate the insertion failure. 471 */ 472 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 473 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 474 UMA_ZONE_NOFREE | UMA_ZONE_VM); 475 uma_prealloc(buf_trie_zone, nbuf); 476 477 vnodes_created = counter_u64_alloc(M_WAITOK); 478 recycles_count = counter_u64_alloc(M_WAITOK); 479 free_owe_inact = counter_u64_alloc(M_WAITOK); 480 481 /* 482 * Initialize the filesystem syncer. 483 */ 484 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 485 &syncer_mask); 486 syncer_maxdelay = syncer_mask + 1; 487 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 488 cv_init(&sync_wakeup, "syncer"); 489 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 490 vnsz2log++; 491 vnsz2log--; 492 } 493 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 494 495 496 /* 497 * Mark a mount point as busy. Used to synchronize access and to delay 498 * unmounting. Eventually, mountlist_mtx is not released on failure. 499 * 500 * vfs_busy() is a custom lock, it can block the caller. 501 * vfs_busy() only sleeps if the unmount is active on the mount point. 502 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 503 * vnode belonging to mp. 504 * 505 * Lookup uses vfs_busy() to traverse mount points. 506 * root fs var fs 507 * / vnode lock A / vnode lock (/var) D 508 * /var vnode lock B /log vnode lock(/var/log) E 509 * vfs_busy lock C vfs_busy lock F 510 * 511 * Within each file system, the lock order is C->A->B and F->D->E. 512 * 513 * When traversing across mounts, the system follows that lock order: 514 * 515 * C->A->B 516 * | 517 * +->F->D->E 518 * 519 * The lookup() process for namei("/var") illustrates the process: 520 * VOP_LOOKUP() obtains B while A is held 521 * vfs_busy() obtains a shared lock on F while A and B are held 522 * vput() releases lock on B 523 * vput() releases lock on A 524 * VFS_ROOT() obtains lock on D while shared lock on F is held 525 * vfs_unbusy() releases shared lock on F 526 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 527 * Attempt to lock A (instead of vp_crossmp) while D is held would 528 * violate the global order, causing deadlocks. 529 * 530 * dounmount() locks B while F is drained. 531 */ 532 int 533 vfs_busy(struct mount *mp, int flags) 534 { 535 536 MPASS((flags & ~MBF_MASK) == 0); 537 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 538 539 MNT_ILOCK(mp); 540 MNT_REF(mp); 541 /* 542 * If mount point is currently being unmounted, sleep until the 543 * mount point fate is decided. If thread doing the unmounting fails, 544 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 545 * that this mount point has survived the unmount attempt and vfs_busy 546 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 547 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 548 * about to be really destroyed. vfs_busy needs to release its 549 * reference on the mount point in this case and return with ENOENT, 550 * telling the caller that mount mount it tried to busy is no longer 551 * valid. 552 */ 553 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 554 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 555 MNT_REL(mp); 556 MNT_IUNLOCK(mp); 557 CTR1(KTR_VFS, "%s: failed busying before sleeping", 558 __func__); 559 return (ENOENT); 560 } 561 if (flags & MBF_MNTLSTLOCK) 562 mtx_unlock(&mountlist_mtx); 563 mp->mnt_kern_flag |= MNTK_MWAIT; 564 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 565 if (flags & MBF_MNTLSTLOCK) 566 mtx_lock(&mountlist_mtx); 567 MNT_ILOCK(mp); 568 } 569 if (flags & MBF_MNTLSTLOCK) 570 mtx_unlock(&mountlist_mtx); 571 mp->mnt_lockref++; 572 MNT_IUNLOCK(mp); 573 return (0); 574 } 575 576 /* 577 * Free a busy filesystem. 578 */ 579 void 580 vfs_unbusy(struct mount *mp) 581 { 582 583 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 584 MNT_ILOCK(mp); 585 MNT_REL(mp); 586 KASSERT(mp->mnt_lockref > 0, ("negative mnt_lockref")); 587 mp->mnt_lockref--; 588 if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 589 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 590 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 591 mp->mnt_kern_flag &= ~MNTK_DRAINING; 592 wakeup(&mp->mnt_lockref); 593 } 594 MNT_IUNLOCK(mp); 595 } 596 597 /* 598 * Lookup a mount point by filesystem identifier. 599 */ 600 struct mount * 601 vfs_getvfs(fsid_t *fsid) 602 { 603 struct mount *mp; 604 605 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 606 mtx_lock(&mountlist_mtx); 607 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 608 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 609 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 610 vfs_ref(mp); 611 mtx_unlock(&mountlist_mtx); 612 return (mp); 613 } 614 } 615 mtx_unlock(&mountlist_mtx); 616 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 617 return ((struct mount *) 0); 618 } 619 620 /* 621 * Lookup a mount point by filesystem identifier, busying it before 622 * returning. 623 * 624 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 625 * cache for popular filesystem identifiers. The cache is lockess, using 626 * the fact that struct mount's are never freed. In worst case we may 627 * get pointer to unmounted or even different filesystem, so we have to 628 * check what we got, and go slow way if so. 629 */ 630 struct mount * 631 vfs_busyfs(fsid_t *fsid) 632 { 633 #define FSID_CACHE_SIZE 256 634 typedef struct mount * volatile vmp_t; 635 static vmp_t cache[FSID_CACHE_SIZE]; 636 struct mount *mp; 637 int error; 638 uint32_t hash; 639 640 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 641 hash = fsid->val[0] ^ fsid->val[1]; 642 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 643 mp = cache[hash]; 644 if (mp == NULL || 645 mp->mnt_stat.f_fsid.val[0] != fsid->val[0] || 646 mp->mnt_stat.f_fsid.val[1] != fsid->val[1]) 647 goto slow; 648 if (vfs_busy(mp, 0) != 0) { 649 cache[hash] = NULL; 650 goto slow; 651 } 652 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 653 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) 654 return (mp); 655 else 656 vfs_unbusy(mp); 657 658 slow: 659 mtx_lock(&mountlist_mtx); 660 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 661 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 662 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 663 error = vfs_busy(mp, MBF_MNTLSTLOCK); 664 if (error) { 665 cache[hash] = NULL; 666 mtx_unlock(&mountlist_mtx); 667 return (NULL); 668 } 669 cache[hash] = mp; 670 return (mp); 671 } 672 } 673 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 674 mtx_unlock(&mountlist_mtx); 675 return ((struct mount *) 0); 676 } 677 678 /* 679 * Check if a user can access privileged mount options. 680 */ 681 int 682 vfs_suser(struct mount *mp, struct thread *td) 683 { 684 int error; 685 686 /* 687 * If the thread is jailed, but this is not a jail-friendly file 688 * system, deny immediately. 689 */ 690 if (!(mp->mnt_vfc->vfc_flags & VFCF_JAIL) && jailed(td->td_ucred)) 691 return (EPERM); 692 693 /* 694 * If the file system was mounted outside the jail of the calling 695 * thread, deny immediately. 696 */ 697 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 698 return (EPERM); 699 700 /* 701 * If file system supports delegated administration, we don't check 702 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 703 * by the file system itself. 704 * If this is not the user that did original mount, we check for 705 * the PRIV_VFS_MOUNT_OWNER privilege. 706 */ 707 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 708 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 709 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 710 return (error); 711 } 712 return (0); 713 } 714 715 /* 716 * Get a new unique fsid. Try to make its val[0] unique, since this value 717 * will be used to create fake device numbers for stat(). Also try (but 718 * not so hard) make its val[0] unique mod 2^16, since some emulators only 719 * support 16-bit device numbers. We end up with unique val[0]'s for the 720 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 721 * 722 * Keep in mind that several mounts may be running in parallel. Starting 723 * the search one past where the previous search terminated is both a 724 * micro-optimization and a defense against returning the same fsid to 725 * different mounts. 726 */ 727 void 728 vfs_getnewfsid(struct mount *mp) 729 { 730 static uint16_t mntid_base; 731 struct mount *nmp; 732 fsid_t tfsid; 733 int mtype; 734 735 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 736 mtx_lock(&mntid_mtx); 737 mtype = mp->mnt_vfc->vfc_typenum; 738 tfsid.val[1] = mtype; 739 mtype = (mtype & 0xFF) << 24; 740 for (;;) { 741 tfsid.val[0] = makedev(255, 742 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 743 mntid_base++; 744 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 745 break; 746 vfs_rel(nmp); 747 } 748 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 749 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 750 mtx_unlock(&mntid_mtx); 751 } 752 753 /* 754 * Knob to control the precision of file timestamps: 755 * 756 * 0 = seconds only; nanoseconds zeroed. 757 * 1 = seconds and nanoseconds, accurate within 1/HZ. 758 * 2 = seconds and nanoseconds, truncated to microseconds. 759 * >=3 = seconds and nanoseconds, maximum precision. 760 */ 761 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 762 763 static int timestamp_precision = TSP_USEC; 764 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 765 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 766 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 767 "3+: sec + ns (max. precision))"); 768 769 /* 770 * Get a current timestamp. 771 */ 772 void 773 vfs_timestamp(struct timespec *tsp) 774 { 775 struct timeval tv; 776 777 switch (timestamp_precision) { 778 case TSP_SEC: 779 tsp->tv_sec = time_second; 780 tsp->tv_nsec = 0; 781 break; 782 case TSP_HZ: 783 getnanotime(tsp); 784 break; 785 case TSP_USEC: 786 microtime(&tv); 787 TIMEVAL_TO_TIMESPEC(&tv, tsp); 788 break; 789 case TSP_NSEC: 790 default: 791 nanotime(tsp); 792 break; 793 } 794 } 795 796 /* 797 * Set vnode attributes to VNOVAL 798 */ 799 void 800 vattr_null(struct vattr *vap) 801 { 802 803 vap->va_type = VNON; 804 vap->va_size = VNOVAL; 805 vap->va_bytes = VNOVAL; 806 vap->va_mode = VNOVAL; 807 vap->va_nlink = VNOVAL; 808 vap->va_uid = VNOVAL; 809 vap->va_gid = VNOVAL; 810 vap->va_fsid = VNOVAL; 811 vap->va_fileid = VNOVAL; 812 vap->va_blocksize = VNOVAL; 813 vap->va_rdev = VNOVAL; 814 vap->va_atime.tv_sec = VNOVAL; 815 vap->va_atime.tv_nsec = VNOVAL; 816 vap->va_mtime.tv_sec = VNOVAL; 817 vap->va_mtime.tv_nsec = VNOVAL; 818 vap->va_ctime.tv_sec = VNOVAL; 819 vap->va_ctime.tv_nsec = VNOVAL; 820 vap->va_birthtime.tv_sec = VNOVAL; 821 vap->va_birthtime.tv_nsec = VNOVAL; 822 vap->va_flags = VNOVAL; 823 vap->va_gen = VNOVAL; 824 vap->va_vaflags = 0; 825 } 826 827 /* 828 * This routine is called when we have too many vnodes. It attempts 829 * to free <count> vnodes and will potentially free vnodes that still 830 * have VM backing store (VM backing store is typically the cause 831 * of a vnode blowout so we want to do this). Therefore, this operation 832 * is not considered cheap. 833 * 834 * A number of conditions may prevent a vnode from being reclaimed. 835 * the buffer cache may have references on the vnode, a directory 836 * vnode may still have references due to the namei cache representing 837 * underlying files, or the vnode may be in active use. It is not 838 * desirable to reuse such vnodes. These conditions may cause the 839 * number of vnodes to reach some minimum value regardless of what 840 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 841 */ 842 static int 843 vlrureclaim(struct mount *mp, int reclaim_nc_src, int trigger) 844 { 845 struct vnode *vp; 846 int count, done, target; 847 848 done = 0; 849 vn_start_write(NULL, &mp, V_WAIT); 850 MNT_ILOCK(mp); 851 count = mp->mnt_nvnodelistsize; 852 target = count * (int64_t)gapvnodes / imax(desiredvnodes, 1); 853 target = target / 10 + 1; 854 while (count != 0 && done < target) { 855 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 856 while (vp != NULL && vp->v_type == VMARKER) 857 vp = TAILQ_NEXT(vp, v_nmntvnodes); 858 if (vp == NULL) 859 break; 860 /* 861 * XXX LRU is completely broken for non-free vnodes. First 862 * by calling here in mountpoint order, then by moving 863 * unselected vnodes to the end here, and most grossly by 864 * removing the vlruvp() function that was supposed to 865 * maintain the order. (This function was born broken 866 * since syncer problems prevented it doing anything.) The 867 * order is closer to LRC (C = Created). 868 * 869 * LRU reclaiming of vnodes seems to have last worked in 870 * FreeBSD-3 where LRU wasn't mentioned under any spelling. 871 * Then there was no hold count, and inactive vnodes were 872 * simply put on the free list in LRU order. The separate 873 * lists also break LRU. We prefer to reclaim from the 874 * free list for technical reasons. This tends to thrash 875 * the free list to keep very unrecently used held vnodes. 876 * The problem is mitigated by keeping the free list large. 877 */ 878 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 879 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 880 --count; 881 if (!VI_TRYLOCK(vp)) 882 goto next_iter; 883 /* 884 * If it's been deconstructed already, it's still 885 * referenced, or it exceeds the trigger, skip it. 886 * Also skip free vnodes. We are trying to make space 887 * to expand the free list, not reduce it. 888 */ 889 if (vp->v_usecount || 890 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 891 ((vp->v_iflag & VI_FREE) != 0) || 892 (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL && 893 vp->v_object->resident_page_count > trigger)) { 894 VI_UNLOCK(vp); 895 goto next_iter; 896 } 897 MNT_IUNLOCK(mp); 898 vholdl(vp); 899 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) { 900 vdrop(vp); 901 goto next_iter_mntunlocked; 902 } 903 VI_LOCK(vp); 904 /* 905 * v_usecount may have been bumped after VOP_LOCK() dropped 906 * the vnode interlock and before it was locked again. 907 * 908 * It is not necessary to recheck VI_DOOMED because it can 909 * only be set by another thread that holds both the vnode 910 * lock and vnode interlock. If another thread has the 911 * vnode lock before we get to VOP_LOCK() and obtains the 912 * vnode interlock after VOP_LOCK() drops the vnode 913 * interlock, the other thread will be unable to drop the 914 * vnode lock before our VOP_LOCK() call fails. 915 */ 916 if (vp->v_usecount || 917 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 918 (vp->v_iflag & VI_FREE) != 0 || 919 (vp->v_object != NULL && 920 vp->v_object->resident_page_count > trigger)) { 921 VOP_UNLOCK(vp, LK_INTERLOCK); 922 vdrop(vp); 923 goto next_iter_mntunlocked; 924 } 925 KASSERT((vp->v_iflag & VI_DOOMED) == 0, 926 ("VI_DOOMED unexpectedly detected in vlrureclaim()")); 927 counter_u64_add(recycles_count, 1); 928 vgonel(vp); 929 VOP_UNLOCK(vp, 0); 930 vdropl(vp); 931 done++; 932 next_iter_mntunlocked: 933 if (!should_yield()) 934 goto relock_mnt; 935 goto yield; 936 next_iter: 937 if (!should_yield()) 938 continue; 939 MNT_IUNLOCK(mp); 940 yield: 941 kern_yield(PRI_USER); 942 relock_mnt: 943 MNT_ILOCK(mp); 944 } 945 MNT_IUNLOCK(mp); 946 vn_finished_write(mp); 947 return done; 948 } 949 950 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 951 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 952 0, 953 "limit on vnode free requests per call to the vnlru_free routine"); 954 955 /* 956 * Attempt to reduce the free list by the requested amount. 957 */ 958 static void 959 vnlru_free_locked(int count, struct vfsops *mnt_op) 960 { 961 struct vnode *vp; 962 struct mount *mp; 963 bool tried_batches; 964 965 tried_batches = false; 966 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 967 if (count > max_vnlru_free) 968 count = max_vnlru_free; 969 for (; count > 0; count--) { 970 vp = TAILQ_FIRST(&vnode_free_list); 971 /* 972 * The list can be modified while the free_list_mtx 973 * has been dropped and vp could be NULL here. 974 */ 975 if (vp == NULL) { 976 if (tried_batches) 977 break; 978 mtx_unlock(&vnode_free_list_mtx); 979 vnlru_return_batches(mnt_op); 980 tried_batches = true; 981 mtx_lock(&vnode_free_list_mtx); 982 continue; 983 } 984 985 VNASSERT(vp->v_op != NULL, vp, 986 ("vnlru_free: vnode already reclaimed.")); 987 KASSERT((vp->v_iflag & VI_FREE) != 0, 988 ("Removing vnode not on freelist")); 989 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 990 ("Mangling active vnode")); 991 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 992 993 /* 994 * Don't recycle if our vnode is from different type 995 * of mount point. Note that mp is type-safe, the 996 * check does not reach unmapped address even if 997 * vnode is reclaimed. 998 * Don't recycle if we can't get the interlock without 999 * blocking. 1000 */ 1001 if ((mnt_op != NULL && (mp = vp->v_mount) != NULL && 1002 mp->mnt_op != mnt_op) || !VI_TRYLOCK(vp)) { 1003 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist); 1004 continue; 1005 } 1006 VNASSERT((vp->v_iflag & VI_FREE) != 0 && vp->v_holdcnt == 0, 1007 vp, ("vp inconsistent on freelist")); 1008 1009 /* 1010 * The clear of VI_FREE prevents activation of the 1011 * vnode. There is no sense in putting the vnode on 1012 * the mount point active list, only to remove it 1013 * later during recycling. Inline the relevant part 1014 * of vholdl(), to avoid triggering assertions or 1015 * activating. 1016 */ 1017 freevnodes--; 1018 vp->v_iflag &= ~VI_FREE; 1019 refcount_acquire(&vp->v_holdcnt); 1020 1021 mtx_unlock(&vnode_free_list_mtx); 1022 VI_UNLOCK(vp); 1023 vtryrecycle(vp); 1024 /* 1025 * If the recycled succeeded this vdrop will actually free 1026 * the vnode. If not it will simply place it back on 1027 * the free list. 1028 */ 1029 vdrop(vp); 1030 mtx_lock(&vnode_free_list_mtx); 1031 } 1032 } 1033 1034 void 1035 vnlru_free(int count, struct vfsops *mnt_op) 1036 { 1037 1038 mtx_lock(&vnode_free_list_mtx); 1039 vnlru_free_locked(count, mnt_op); 1040 mtx_unlock(&vnode_free_list_mtx); 1041 } 1042 1043 1044 /* XXX some names and initialization are bad for limits and watermarks. */ 1045 static int 1046 vspace(void) 1047 { 1048 int space; 1049 1050 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1051 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1052 vlowat = vhiwat / 2; 1053 if (numvnodes > desiredvnodes) 1054 return (0); 1055 space = desiredvnodes - numvnodes; 1056 if (freevnodes > wantfreevnodes) 1057 space += freevnodes - wantfreevnodes; 1058 return (space); 1059 } 1060 1061 static void 1062 vnlru_return_batch_locked(struct mount *mp) 1063 { 1064 struct vnode *vp; 1065 1066 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 1067 1068 if (mp->mnt_tmpfreevnodelistsize == 0) 1069 return; 1070 1071 TAILQ_FOREACH(vp, &mp->mnt_tmpfreevnodelist, v_actfreelist) { 1072 VNASSERT((vp->v_mflag & VMP_TMPMNTFREELIST) != 0, vp, 1073 ("vnode without VMP_TMPMNTFREELIST on mnt_tmpfreevnodelist")); 1074 vp->v_mflag &= ~VMP_TMPMNTFREELIST; 1075 } 1076 mtx_lock(&vnode_free_list_mtx); 1077 TAILQ_CONCAT(&vnode_free_list, &mp->mnt_tmpfreevnodelist, v_actfreelist); 1078 freevnodes += mp->mnt_tmpfreevnodelistsize; 1079 mtx_unlock(&vnode_free_list_mtx); 1080 mp->mnt_tmpfreevnodelistsize = 0; 1081 } 1082 1083 static void 1084 vnlru_return_batch(struct mount *mp) 1085 { 1086 1087 mtx_lock(&mp->mnt_listmtx); 1088 vnlru_return_batch_locked(mp); 1089 mtx_unlock(&mp->mnt_listmtx); 1090 } 1091 1092 static void 1093 vnlru_return_batches(struct vfsops *mnt_op) 1094 { 1095 struct mount *mp, *nmp; 1096 bool need_unbusy; 1097 1098 mtx_lock(&mountlist_mtx); 1099 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 1100 need_unbusy = false; 1101 if (mnt_op != NULL && mp->mnt_op != mnt_op) 1102 goto next; 1103 if (mp->mnt_tmpfreevnodelistsize == 0) 1104 goto next; 1105 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK) == 0) { 1106 vnlru_return_batch(mp); 1107 need_unbusy = true; 1108 mtx_lock(&mountlist_mtx); 1109 } 1110 next: 1111 nmp = TAILQ_NEXT(mp, mnt_list); 1112 if (need_unbusy) 1113 vfs_unbusy(mp); 1114 } 1115 mtx_unlock(&mountlist_mtx); 1116 } 1117 1118 /* 1119 * Attempt to recycle vnodes in a context that is always safe to block. 1120 * Calling vlrurecycle() from the bowels of filesystem code has some 1121 * interesting deadlock problems. 1122 */ 1123 static struct proc *vnlruproc; 1124 static int vnlruproc_sig; 1125 1126 static void 1127 vnlru_proc(void) 1128 { 1129 struct mount *mp, *nmp; 1130 unsigned long onumvnodes; 1131 int done, force, reclaim_nc_src, trigger, usevnodes; 1132 1133 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1134 SHUTDOWN_PRI_FIRST); 1135 1136 force = 0; 1137 for (;;) { 1138 kproc_suspend_check(vnlruproc); 1139 mtx_lock(&vnode_free_list_mtx); 1140 /* 1141 * If numvnodes is too large (due to desiredvnodes being 1142 * adjusted using its sysctl, or emergency growth), first 1143 * try to reduce it by discarding from the free list. 1144 */ 1145 if (numvnodes > desiredvnodes) 1146 vnlru_free_locked(numvnodes - desiredvnodes, NULL); 1147 /* 1148 * Sleep if the vnode cache is in a good state. This is 1149 * when it is not over-full and has space for about a 4% 1150 * or 9% expansion (by growing its size or inexcessively 1151 * reducing its free list). Otherwise, try to reclaim 1152 * space for a 10% expansion. 1153 */ 1154 if (vstir && force == 0) { 1155 force = 1; 1156 vstir = 0; 1157 } 1158 if (vspace() >= vlowat && force == 0) { 1159 vnlruproc_sig = 0; 1160 wakeup(&vnlruproc_sig); 1161 msleep(vnlruproc, &vnode_free_list_mtx, 1162 PVFS|PDROP, "vlruwt", hz); 1163 continue; 1164 } 1165 mtx_unlock(&vnode_free_list_mtx); 1166 done = 0; 1167 onumvnodes = numvnodes; 1168 /* 1169 * Calculate parameters for recycling. These are the same 1170 * throughout the loop to give some semblance of fairness. 1171 * The trigger point is to avoid recycling vnodes with lots 1172 * of resident pages. We aren't trying to free memory; we 1173 * are trying to recycle or at least free vnodes. 1174 */ 1175 if (numvnodes <= desiredvnodes) 1176 usevnodes = numvnodes - freevnodes; 1177 else 1178 usevnodes = numvnodes; 1179 if (usevnodes <= 0) 1180 usevnodes = 1; 1181 /* 1182 * The trigger value is is chosen to give a conservatively 1183 * large value to ensure that it alone doesn't prevent 1184 * making progress. The value can easily be so large that 1185 * it is effectively infinite in some congested and 1186 * misconfigured cases, and this is necessary. Normally 1187 * it is about 8 to 100 (pages), which is quite large. 1188 */ 1189 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1190 if (force < 2) 1191 trigger = vsmalltrigger; 1192 reclaim_nc_src = force >= 3; 1193 mtx_lock(&mountlist_mtx); 1194 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 1195 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) { 1196 nmp = TAILQ_NEXT(mp, mnt_list); 1197 continue; 1198 } 1199 done += vlrureclaim(mp, reclaim_nc_src, trigger); 1200 mtx_lock(&mountlist_mtx); 1201 nmp = TAILQ_NEXT(mp, mnt_list); 1202 vfs_unbusy(mp); 1203 } 1204 mtx_unlock(&mountlist_mtx); 1205 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1206 uma_reclaim(); 1207 if (done == 0) { 1208 if (force == 0 || force == 1) { 1209 force = 2; 1210 continue; 1211 } 1212 if (force == 2) { 1213 force = 3; 1214 continue; 1215 } 1216 force = 0; 1217 vnlru_nowhere++; 1218 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1219 } else 1220 kern_yield(PRI_USER); 1221 /* 1222 * After becoming active to expand above low water, keep 1223 * active until above high water. 1224 */ 1225 force = vspace() < vhiwat; 1226 } 1227 } 1228 1229 static struct kproc_desc vnlru_kp = { 1230 "vnlru", 1231 vnlru_proc, 1232 &vnlruproc 1233 }; 1234 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1235 &vnlru_kp); 1236 1237 /* 1238 * Routines having to do with the management of the vnode table. 1239 */ 1240 1241 /* 1242 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1243 * before we actually vgone(). This function must be called with the vnode 1244 * held to prevent the vnode from being returned to the free list midway 1245 * through vgone(). 1246 */ 1247 static int 1248 vtryrecycle(struct vnode *vp) 1249 { 1250 struct mount *vnmp; 1251 1252 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1253 VNASSERT(vp->v_holdcnt, vp, 1254 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1255 /* 1256 * This vnode may found and locked via some other list, if so we 1257 * can't recycle it yet. 1258 */ 1259 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1260 CTR2(KTR_VFS, 1261 "%s: impossible to recycle, vp %p lock is already held", 1262 __func__, vp); 1263 return (EWOULDBLOCK); 1264 } 1265 /* 1266 * Don't recycle if its filesystem is being suspended. 1267 */ 1268 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1269 VOP_UNLOCK(vp, 0); 1270 CTR2(KTR_VFS, 1271 "%s: impossible to recycle, cannot start the write for %p", 1272 __func__, vp); 1273 return (EBUSY); 1274 } 1275 /* 1276 * If we got this far, we need to acquire the interlock and see if 1277 * anyone picked up this vnode from another list. If not, we will 1278 * mark it with DOOMED via vgonel() so that anyone who does find it 1279 * will skip over it. 1280 */ 1281 VI_LOCK(vp); 1282 if (vp->v_usecount) { 1283 VOP_UNLOCK(vp, LK_INTERLOCK); 1284 vn_finished_write(vnmp); 1285 CTR2(KTR_VFS, 1286 "%s: impossible to recycle, %p is already referenced", 1287 __func__, vp); 1288 return (EBUSY); 1289 } 1290 if ((vp->v_iflag & VI_DOOMED) == 0) { 1291 counter_u64_add(recycles_count, 1); 1292 vgonel(vp); 1293 } 1294 VOP_UNLOCK(vp, LK_INTERLOCK); 1295 vn_finished_write(vnmp); 1296 return (0); 1297 } 1298 1299 static void 1300 vcheckspace(void) 1301 { 1302 1303 if (vspace() < vlowat && vnlruproc_sig == 0) { 1304 vnlruproc_sig = 1; 1305 wakeup(vnlruproc); 1306 } 1307 } 1308 1309 /* 1310 * Wait if necessary for space for a new vnode. 1311 */ 1312 static int 1313 getnewvnode_wait(int suspended) 1314 { 1315 1316 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 1317 if (numvnodes >= desiredvnodes) { 1318 if (suspended) { 1319 /* 1320 * The file system is being suspended. We cannot 1321 * risk a deadlock here, so allow allocation of 1322 * another vnode even if this would give too many. 1323 */ 1324 return (0); 1325 } 1326 if (vnlruproc_sig == 0) { 1327 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 1328 wakeup(vnlruproc); 1329 } 1330 msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS, 1331 "vlruwk", hz); 1332 } 1333 /* Post-adjust like the pre-adjust in getnewvnode(). */ 1334 if (numvnodes + 1 > desiredvnodes && freevnodes > 1) 1335 vnlru_free_locked(1, NULL); 1336 return (numvnodes >= desiredvnodes ? ENFILE : 0); 1337 } 1338 1339 /* 1340 * This hack is fragile, and probably not needed any more now that the 1341 * watermark handling works. 1342 */ 1343 void 1344 getnewvnode_reserve(u_int count) 1345 { 1346 struct thread *td; 1347 1348 /* Pre-adjust like the pre-adjust in getnewvnode(), with any count. */ 1349 /* XXX no longer so quick, but this part is not racy. */ 1350 mtx_lock(&vnode_free_list_mtx); 1351 if (numvnodes + count > desiredvnodes && freevnodes > wantfreevnodes) 1352 vnlru_free_locked(ulmin(numvnodes + count - desiredvnodes, 1353 freevnodes - wantfreevnodes), NULL); 1354 mtx_unlock(&vnode_free_list_mtx); 1355 1356 td = curthread; 1357 /* First try to be quick and racy. */ 1358 if (atomic_fetchadd_long(&numvnodes, count) + count <= desiredvnodes) { 1359 td->td_vp_reserv += count; 1360 vcheckspace(); /* XXX no longer so quick, but more racy */ 1361 return; 1362 } else 1363 atomic_subtract_long(&numvnodes, count); 1364 1365 mtx_lock(&vnode_free_list_mtx); 1366 while (count > 0) { 1367 if (getnewvnode_wait(0) == 0) { 1368 count--; 1369 td->td_vp_reserv++; 1370 atomic_add_long(&numvnodes, 1); 1371 } 1372 } 1373 vcheckspace(); 1374 mtx_unlock(&vnode_free_list_mtx); 1375 } 1376 1377 /* 1378 * This hack is fragile, especially if desiredvnodes or wantvnodes are 1379 * misconfgured or changed significantly. Reducing desiredvnodes below 1380 * the reserved amount should cause bizarre behaviour like reducing it 1381 * below the number of active vnodes -- the system will try to reduce 1382 * numvnodes to match, but should fail, so the subtraction below should 1383 * not overflow. 1384 */ 1385 void 1386 getnewvnode_drop_reserve(void) 1387 { 1388 struct thread *td; 1389 1390 td = curthread; 1391 atomic_subtract_long(&numvnodes, td->td_vp_reserv); 1392 td->td_vp_reserv = 0; 1393 } 1394 1395 /* 1396 * Return the next vnode from the free list. 1397 */ 1398 int 1399 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1400 struct vnode **vpp) 1401 { 1402 struct vnode *vp; 1403 struct thread *td; 1404 struct lock_object *lo; 1405 static int cyclecount; 1406 int error; 1407 1408 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1409 vp = NULL; 1410 td = curthread; 1411 if (td->td_vp_reserv > 0) { 1412 td->td_vp_reserv -= 1; 1413 goto alloc; 1414 } 1415 mtx_lock(&vnode_free_list_mtx); 1416 if (numvnodes < desiredvnodes) 1417 cyclecount = 0; 1418 else if (cyclecount++ >= freevnodes) { 1419 cyclecount = 0; 1420 vstir = 1; 1421 } 1422 /* 1423 * Grow the vnode cache if it will not be above its target max 1424 * after growing. Otherwise, if the free list is nonempty, try 1425 * to reclaim 1 item from it before growing the cache (possibly 1426 * above its target max if the reclamation failed or is delayed). 1427 * Otherwise, wait for some space. In all cases, schedule 1428 * vnlru_proc() if we are getting short of space. The watermarks 1429 * should be chosen so that we never wait or even reclaim from 1430 * the free list to below its target minimum. 1431 */ 1432 if (numvnodes + 1 <= desiredvnodes) 1433 ; 1434 else if (freevnodes > 0) 1435 vnlru_free_locked(1, NULL); 1436 else { 1437 error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag & 1438 MNTK_SUSPEND)); 1439 #if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */ 1440 if (error != 0) { 1441 mtx_unlock(&vnode_free_list_mtx); 1442 return (error); 1443 } 1444 #endif 1445 } 1446 vcheckspace(); 1447 atomic_add_long(&numvnodes, 1); 1448 mtx_unlock(&vnode_free_list_mtx); 1449 alloc: 1450 counter_u64_add(vnodes_created, 1); 1451 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK); 1452 /* 1453 * Locks are given the generic name "vnode" when created. 1454 * Follow the historic practice of using the filesystem 1455 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1456 * 1457 * Locks live in a witness group keyed on their name. Thus, 1458 * when a lock is renamed, it must also move from the witness 1459 * group of its old name to the witness group of its new name. 1460 * 1461 * The change only needs to be made when the vnode moves 1462 * from one filesystem type to another. We ensure that each 1463 * filesystem use a single static name pointer for its tag so 1464 * that we can compare pointers rather than doing a strcmp(). 1465 */ 1466 lo = &vp->v_vnlock->lock_object; 1467 if (lo->lo_name != tag) { 1468 lo->lo_name = tag; 1469 WITNESS_DESTROY(lo); 1470 WITNESS_INIT(lo, tag); 1471 } 1472 /* 1473 * By default, don't allow shared locks unless filesystems opt-in. 1474 */ 1475 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1476 /* 1477 * Finalize various vnode identity bits. 1478 */ 1479 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1480 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1481 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1482 vp->v_type = VNON; 1483 vp->v_tag = tag; 1484 vp->v_op = vops; 1485 v_init_counters(vp); 1486 vp->v_bufobj.bo_ops = &buf_ops_bio; 1487 #ifdef DIAGNOSTIC 1488 if (mp == NULL && vops != &dead_vnodeops) 1489 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1490 #endif 1491 #ifdef MAC 1492 mac_vnode_init(vp); 1493 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1494 mac_vnode_associate_singlelabel(mp, vp); 1495 #endif 1496 if (mp != NULL) { 1497 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1498 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1499 vp->v_vflag |= VV_NOKNOTE; 1500 } 1501 1502 /* 1503 * For the filesystems which do not use vfs_hash_insert(), 1504 * still initialize v_hash to have vfs_hash_index() useful. 1505 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1506 * its own hashing. 1507 */ 1508 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1509 1510 *vpp = vp; 1511 return (0); 1512 } 1513 1514 /* 1515 * Delete from old mount point vnode list, if on one. 1516 */ 1517 static void 1518 delmntque(struct vnode *vp) 1519 { 1520 struct mount *mp; 1521 int active; 1522 1523 mp = vp->v_mount; 1524 if (mp == NULL) 1525 return; 1526 MNT_ILOCK(mp); 1527 VI_LOCK(vp); 1528 KASSERT(mp->mnt_activevnodelistsize <= mp->mnt_nvnodelistsize, 1529 ("Active vnode list size %d > Vnode list size %d", 1530 mp->mnt_activevnodelistsize, mp->mnt_nvnodelistsize)); 1531 active = vp->v_iflag & VI_ACTIVE; 1532 vp->v_iflag &= ~VI_ACTIVE; 1533 if (active) { 1534 mtx_lock(&mp->mnt_listmtx); 1535 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); 1536 mp->mnt_activevnodelistsize--; 1537 mtx_unlock(&mp->mnt_listmtx); 1538 } 1539 vp->v_mount = NULL; 1540 VI_UNLOCK(vp); 1541 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1542 ("bad mount point vnode list size")); 1543 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1544 mp->mnt_nvnodelistsize--; 1545 MNT_REL(mp); 1546 MNT_IUNLOCK(mp); 1547 } 1548 1549 static void 1550 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1551 { 1552 1553 vp->v_data = NULL; 1554 vp->v_op = &dead_vnodeops; 1555 vgone(vp); 1556 vput(vp); 1557 } 1558 1559 /* 1560 * Insert into list of vnodes for the new mount point, if available. 1561 */ 1562 int 1563 insmntque1(struct vnode *vp, struct mount *mp, 1564 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1565 { 1566 1567 KASSERT(vp->v_mount == NULL, 1568 ("insmntque: vnode already on per mount vnode list")); 1569 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1570 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1571 1572 /* 1573 * We acquire the vnode interlock early to ensure that the 1574 * vnode cannot be recycled by another process releasing a 1575 * holdcnt on it before we get it on both the vnode list 1576 * and the active vnode list. The mount mutex protects only 1577 * manipulation of the vnode list and the vnode freelist 1578 * mutex protects only manipulation of the active vnode list. 1579 * Hence the need to hold the vnode interlock throughout. 1580 */ 1581 MNT_ILOCK(mp); 1582 VI_LOCK(vp); 1583 if (((mp->mnt_kern_flag & MNTK_NOINSMNTQ) != 0 && 1584 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1585 mp->mnt_nvnodelistsize == 0)) && 1586 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1587 VI_UNLOCK(vp); 1588 MNT_IUNLOCK(mp); 1589 if (dtr != NULL) 1590 dtr(vp, dtr_arg); 1591 return (EBUSY); 1592 } 1593 vp->v_mount = mp; 1594 MNT_REF(mp); 1595 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1596 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1597 ("neg mount point vnode list size")); 1598 mp->mnt_nvnodelistsize++; 1599 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 1600 ("Activating already active vnode")); 1601 vp->v_iflag |= VI_ACTIVE; 1602 mtx_lock(&mp->mnt_listmtx); 1603 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 1604 mp->mnt_activevnodelistsize++; 1605 mtx_unlock(&mp->mnt_listmtx); 1606 VI_UNLOCK(vp); 1607 MNT_IUNLOCK(mp); 1608 return (0); 1609 } 1610 1611 int 1612 insmntque(struct vnode *vp, struct mount *mp) 1613 { 1614 1615 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1616 } 1617 1618 /* 1619 * Flush out and invalidate all buffers associated with a bufobj 1620 * Called with the underlying object locked. 1621 */ 1622 int 1623 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1624 { 1625 int error; 1626 1627 BO_LOCK(bo); 1628 if (flags & V_SAVE) { 1629 error = bufobj_wwait(bo, slpflag, slptimeo); 1630 if (error) { 1631 BO_UNLOCK(bo); 1632 return (error); 1633 } 1634 if (bo->bo_dirty.bv_cnt > 0) { 1635 BO_UNLOCK(bo); 1636 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1637 return (error); 1638 /* 1639 * XXX We could save a lock/unlock if this was only 1640 * enabled under INVARIANTS 1641 */ 1642 BO_LOCK(bo); 1643 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1644 panic("vinvalbuf: dirty bufs"); 1645 } 1646 } 1647 /* 1648 * If you alter this loop please notice that interlock is dropped and 1649 * reacquired in flushbuflist. Special care is needed to ensure that 1650 * no race conditions occur from this. 1651 */ 1652 do { 1653 error = flushbuflist(&bo->bo_clean, 1654 flags, bo, slpflag, slptimeo); 1655 if (error == 0 && !(flags & V_CLEANONLY)) 1656 error = flushbuflist(&bo->bo_dirty, 1657 flags, bo, slpflag, slptimeo); 1658 if (error != 0 && error != EAGAIN) { 1659 BO_UNLOCK(bo); 1660 return (error); 1661 } 1662 } while (error != 0); 1663 1664 /* 1665 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1666 * have write I/O in-progress but if there is a VM object then the 1667 * VM object can also have read-I/O in-progress. 1668 */ 1669 do { 1670 bufobj_wwait(bo, 0, 0); 1671 if ((flags & V_VMIO) == 0) { 1672 BO_UNLOCK(bo); 1673 if (bo->bo_object != NULL) { 1674 VM_OBJECT_WLOCK(bo->bo_object); 1675 vm_object_pip_wait(bo->bo_object, "bovlbx"); 1676 VM_OBJECT_WUNLOCK(bo->bo_object); 1677 } 1678 BO_LOCK(bo); 1679 } 1680 } while (bo->bo_numoutput > 0); 1681 BO_UNLOCK(bo); 1682 1683 /* 1684 * Destroy the copy in the VM cache, too. 1685 */ 1686 if (bo->bo_object != NULL && 1687 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 1688 VM_OBJECT_WLOCK(bo->bo_object); 1689 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1690 OBJPR_CLEANONLY : 0); 1691 VM_OBJECT_WUNLOCK(bo->bo_object); 1692 } 1693 1694 #ifdef INVARIANTS 1695 BO_LOCK(bo); 1696 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 1697 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 1698 bo->bo_clean.bv_cnt > 0)) 1699 panic("vinvalbuf: flush failed"); 1700 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 1701 bo->bo_dirty.bv_cnt > 0) 1702 panic("vinvalbuf: flush dirty failed"); 1703 BO_UNLOCK(bo); 1704 #endif 1705 return (0); 1706 } 1707 1708 /* 1709 * Flush out and invalidate all buffers associated with a vnode. 1710 * Called with the underlying object locked. 1711 */ 1712 int 1713 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 1714 { 1715 1716 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 1717 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1718 if (vp->v_object != NULL && vp->v_object->handle != vp) 1719 return (0); 1720 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 1721 } 1722 1723 /* 1724 * Flush out buffers on the specified list. 1725 * 1726 */ 1727 static int 1728 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 1729 int slptimeo) 1730 { 1731 struct buf *bp, *nbp; 1732 int retval, error; 1733 daddr_t lblkno; 1734 b_xflags_t xflags; 1735 1736 ASSERT_BO_WLOCKED(bo); 1737 1738 retval = 0; 1739 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 1740 if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) || 1741 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) { 1742 continue; 1743 } 1744 if (nbp != NULL) { 1745 lblkno = nbp->b_lblkno; 1746 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 1747 } 1748 retval = EAGAIN; 1749 error = BUF_TIMELOCK(bp, 1750 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 1751 "flushbuf", slpflag, slptimeo); 1752 if (error) { 1753 BO_LOCK(bo); 1754 return (error != ENOLCK ? error : EAGAIN); 1755 } 1756 KASSERT(bp->b_bufobj == bo, 1757 ("bp %p wrong b_bufobj %p should be %p", 1758 bp, bp->b_bufobj, bo)); 1759 /* 1760 * XXX Since there are no node locks for NFS, I 1761 * believe there is a slight chance that a delayed 1762 * write will occur while sleeping just above, so 1763 * check for it. 1764 */ 1765 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1766 (flags & V_SAVE)) { 1767 bremfree(bp); 1768 bp->b_flags |= B_ASYNC; 1769 bwrite(bp); 1770 BO_LOCK(bo); 1771 return (EAGAIN); /* XXX: why not loop ? */ 1772 } 1773 bremfree(bp); 1774 bp->b_flags |= (B_INVAL | B_RELBUF); 1775 bp->b_flags &= ~B_ASYNC; 1776 brelse(bp); 1777 BO_LOCK(bo); 1778 if (nbp == NULL) 1779 break; 1780 nbp = gbincore(bo, lblkno); 1781 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1782 != xflags) 1783 break; /* nbp invalid */ 1784 } 1785 return (retval); 1786 } 1787 1788 int 1789 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 1790 { 1791 struct buf *bp; 1792 int error; 1793 daddr_t lblkno; 1794 1795 ASSERT_BO_LOCKED(bo); 1796 1797 for (lblkno = startn;;) { 1798 again: 1799 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 1800 if (bp == NULL || bp->b_lblkno >= endn || 1801 bp->b_lblkno < startn) 1802 break; 1803 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 1804 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 1805 if (error != 0) { 1806 BO_RLOCK(bo); 1807 if (error == ENOLCK) 1808 goto again; 1809 return (error); 1810 } 1811 KASSERT(bp->b_bufobj == bo, 1812 ("bp %p wrong b_bufobj %p should be %p", 1813 bp, bp->b_bufobj, bo)); 1814 lblkno = bp->b_lblkno + 1; 1815 if ((bp->b_flags & B_MANAGED) == 0) 1816 bremfree(bp); 1817 bp->b_flags |= B_RELBUF; 1818 /* 1819 * In the VMIO case, use the B_NOREUSE flag to hint that the 1820 * pages backing each buffer in the range are unlikely to be 1821 * reused. Dirty buffers will have the hint applied once 1822 * they've been written. 1823 */ 1824 if (bp->b_vp->v_object != NULL) 1825 bp->b_flags |= B_NOREUSE; 1826 brelse(bp); 1827 BO_RLOCK(bo); 1828 } 1829 return (0); 1830 } 1831 1832 /* 1833 * Truncate a file's buffer and pages to a specified length. This 1834 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 1835 * sync activity. 1836 */ 1837 int 1838 vtruncbuf(struct vnode *vp, struct ucred *cred, off_t length, int blksize) 1839 { 1840 struct buf *bp, *nbp; 1841 int anyfreed; 1842 int trunclbn; 1843 struct bufobj *bo; 1844 1845 CTR5(KTR_VFS, "%s: vp %p with cred %p and block %d:%ju", __func__, 1846 vp, cred, blksize, (uintmax_t)length); 1847 1848 /* 1849 * Round up to the *next* lbn. 1850 */ 1851 trunclbn = howmany(length, blksize); 1852 1853 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 1854 restart: 1855 bo = &vp->v_bufobj; 1856 BO_LOCK(bo); 1857 anyfreed = 1; 1858 for (;anyfreed;) { 1859 anyfreed = 0; 1860 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 1861 if (bp->b_lblkno < trunclbn) 1862 continue; 1863 if (BUF_LOCK(bp, 1864 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1865 BO_LOCKPTR(bo)) == ENOLCK) 1866 goto restart; 1867 1868 bremfree(bp); 1869 bp->b_flags |= (B_INVAL | B_RELBUF); 1870 bp->b_flags &= ~B_ASYNC; 1871 brelse(bp); 1872 anyfreed = 1; 1873 1874 BO_LOCK(bo); 1875 if (nbp != NULL && 1876 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 1877 (nbp->b_vp != vp) || 1878 (nbp->b_flags & B_DELWRI))) { 1879 BO_UNLOCK(bo); 1880 goto restart; 1881 } 1882 } 1883 1884 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1885 if (bp->b_lblkno < trunclbn) 1886 continue; 1887 if (BUF_LOCK(bp, 1888 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1889 BO_LOCKPTR(bo)) == ENOLCK) 1890 goto restart; 1891 bremfree(bp); 1892 bp->b_flags |= (B_INVAL | B_RELBUF); 1893 bp->b_flags &= ~B_ASYNC; 1894 brelse(bp); 1895 anyfreed = 1; 1896 1897 BO_LOCK(bo); 1898 if (nbp != NULL && 1899 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 1900 (nbp->b_vp != vp) || 1901 (nbp->b_flags & B_DELWRI) == 0)) { 1902 BO_UNLOCK(bo); 1903 goto restart; 1904 } 1905 } 1906 } 1907 1908 if (length > 0) { 1909 restartsync: 1910 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1911 if (bp->b_lblkno > 0) 1912 continue; 1913 /* 1914 * Since we hold the vnode lock this should only 1915 * fail if we're racing with the buf daemon. 1916 */ 1917 if (BUF_LOCK(bp, 1918 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1919 BO_LOCKPTR(bo)) == ENOLCK) { 1920 goto restart; 1921 } 1922 VNASSERT((bp->b_flags & B_DELWRI), vp, 1923 ("buf(%p) on dirty queue without DELWRI", bp)); 1924 1925 bremfree(bp); 1926 bawrite(bp); 1927 BO_LOCK(bo); 1928 goto restartsync; 1929 } 1930 } 1931 1932 bufobj_wwait(bo, 0, 0); 1933 BO_UNLOCK(bo); 1934 vnode_pager_setsize(vp, length); 1935 1936 return (0); 1937 } 1938 1939 static void 1940 buf_vlist_remove(struct buf *bp) 1941 { 1942 struct bufv *bv; 1943 1944 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1945 ASSERT_BO_WLOCKED(bp->b_bufobj); 1946 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != 1947 (BX_VNDIRTY|BX_VNCLEAN), 1948 ("buf_vlist_remove: Buf %p is on two lists", bp)); 1949 if (bp->b_xflags & BX_VNDIRTY) 1950 bv = &bp->b_bufobj->bo_dirty; 1951 else 1952 bv = &bp->b_bufobj->bo_clean; 1953 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 1954 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 1955 bv->bv_cnt--; 1956 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1957 } 1958 1959 /* 1960 * Add the buffer to the sorted clean or dirty block list. 1961 * 1962 * NOTE: xflags is passed as a constant, optimizing this inline function! 1963 */ 1964 static void 1965 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 1966 { 1967 struct bufv *bv; 1968 struct buf *n; 1969 int error; 1970 1971 ASSERT_BO_WLOCKED(bo); 1972 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 1973 ("dead bo %p", bo)); 1974 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 1975 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 1976 bp->b_xflags |= xflags; 1977 if (xflags & BX_VNDIRTY) 1978 bv = &bo->bo_dirty; 1979 else 1980 bv = &bo->bo_clean; 1981 1982 /* 1983 * Keep the list ordered. Optimize empty list insertion. Assume 1984 * we tend to grow at the tail so lookup_le should usually be cheaper 1985 * than _ge. 1986 */ 1987 if (bv->bv_cnt == 0 || 1988 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 1989 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 1990 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 1991 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 1992 else 1993 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 1994 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 1995 if (error) 1996 panic("buf_vlist_add: Preallocated nodes insufficient."); 1997 bv->bv_cnt++; 1998 } 1999 2000 /* 2001 * Look up a buffer using the buffer tries. 2002 */ 2003 struct buf * 2004 gbincore(struct bufobj *bo, daddr_t lblkno) 2005 { 2006 struct buf *bp; 2007 2008 ASSERT_BO_LOCKED(bo); 2009 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2010 if (bp != NULL) 2011 return (bp); 2012 return BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno); 2013 } 2014 2015 /* 2016 * Associate a buffer with a vnode. 2017 */ 2018 void 2019 bgetvp(struct vnode *vp, struct buf *bp) 2020 { 2021 struct bufobj *bo; 2022 2023 bo = &vp->v_bufobj; 2024 ASSERT_BO_WLOCKED(bo); 2025 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2026 2027 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2028 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2029 ("bgetvp: bp already attached! %p", bp)); 2030 2031 vhold(vp); 2032 bp->b_vp = vp; 2033 bp->b_bufobj = bo; 2034 /* 2035 * Insert onto list for new vnode. 2036 */ 2037 buf_vlist_add(bp, bo, BX_VNCLEAN); 2038 } 2039 2040 /* 2041 * Disassociate a buffer from a vnode. 2042 */ 2043 void 2044 brelvp(struct buf *bp) 2045 { 2046 struct bufobj *bo; 2047 struct vnode *vp; 2048 2049 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2050 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2051 2052 /* 2053 * Delete from old vnode list, if on one. 2054 */ 2055 vp = bp->b_vp; /* XXX */ 2056 bo = bp->b_bufobj; 2057 BO_LOCK(bo); 2058 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2059 buf_vlist_remove(bp); 2060 else 2061 panic("brelvp: Buffer %p not on queue.", bp); 2062 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2063 bo->bo_flag &= ~BO_ONWORKLST; 2064 mtx_lock(&sync_mtx); 2065 LIST_REMOVE(bo, bo_synclist); 2066 syncer_worklist_len--; 2067 mtx_unlock(&sync_mtx); 2068 } 2069 bp->b_vp = NULL; 2070 bp->b_bufobj = NULL; 2071 BO_UNLOCK(bo); 2072 vdrop(vp); 2073 } 2074 2075 /* 2076 * Add an item to the syncer work queue. 2077 */ 2078 static void 2079 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2080 { 2081 int slot; 2082 2083 ASSERT_BO_WLOCKED(bo); 2084 2085 mtx_lock(&sync_mtx); 2086 if (bo->bo_flag & BO_ONWORKLST) 2087 LIST_REMOVE(bo, bo_synclist); 2088 else { 2089 bo->bo_flag |= BO_ONWORKLST; 2090 syncer_worklist_len++; 2091 } 2092 2093 if (delay > syncer_maxdelay - 2) 2094 delay = syncer_maxdelay - 2; 2095 slot = (syncer_delayno + delay) & syncer_mask; 2096 2097 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2098 mtx_unlock(&sync_mtx); 2099 } 2100 2101 static int 2102 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2103 { 2104 int error, len; 2105 2106 mtx_lock(&sync_mtx); 2107 len = syncer_worklist_len - sync_vnode_count; 2108 mtx_unlock(&sync_mtx); 2109 error = SYSCTL_OUT(req, &len, sizeof(len)); 2110 return (error); 2111 } 2112 2113 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0, 2114 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2115 2116 static struct proc *updateproc; 2117 static void sched_sync(void); 2118 static struct kproc_desc up_kp = { 2119 "syncer", 2120 sched_sync, 2121 &updateproc 2122 }; 2123 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2124 2125 static int 2126 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2127 { 2128 struct vnode *vp; 2129 struct mount *mp; 2130 2131 *bo = LIST_FIRST(slp); 2132 if (*bo == NULL) 2133 return (0); 2134 vp = bo2vnode(*bo); 2135 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2136 return (1); 2137 /* 2138 * We use vhold in case the vnode does not 2139 * successfully sync. vhold prevents the vnode from 2140 * going away when we unlock the sync_mtx so that 2141 * we can acquire the vnode interlock. 2142 */ 2143 vholdl(vp); 2144 mtx_unlock(&sync_mtx); 2145 VI_UNLOCK(vp); 2146 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2147 vdrop(vp); 2148 mtx_lock(&sync_mtx); 2149 return (*bo == LIST_FIRST(slp)); 2150 } 2151 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2152 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2153 VOP_UNLOCK(vp, 0); 2154 vn_finished_write(mp); 2155 BO_LOCK(*bo); 2156 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2157 /* 2158 * Put us back on the worklist. The worklist 2159 * routine will remove us from our current 2160 * position and then add us back in at a later 2161 * position. 2162 */ 2163 vn_syncer_add_to_worklist(*bo, syncdelay); 2164 } 2165 BO_UNLOCK(*bo); 2166 vdrop(vp); 2167 mtx_lock(&sync_mtx); 2168 return (0); 2169 } 2170 2171 static int first_printf = 1; 2172 2173 /* 2174 * System filesystem synchronizer daemon. 2175 */ 2176 static void 2177 sched_sync(void) 2178 { 2179 struct synclist *next, *slp; 2180 struct bufobj *bo; 2181 long starttime; 2182 struct thread *td = curthread; 2183 int last_work_seen; 2184 int net_worklist_len; 2185 int syncer_final_iter; 2186 int error; 2187 2188 last_work_seen = 0; 2189 syncer_final_iter = 0; 2190 syncer_state = SYNCER_RUNNING; 2191 starttime = time_uptime; 2192 td->td_pflags |= TDP_NORUNNINGBUF; 2193 2194 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2195 SHUTDOWN_PRI_LAST); 2196 2197 mtx_lock(&sync_mtx); 2198 for (;;) { 2199 if (syncer_state == SYNCER_FINAL_DELAY && 2200 syncer_final_iter == 0) { 2201 mtx_unlock(&sync_mtx); 2202 kproc_suspend_check(td->td_proc); 2203 mtx_lock(&sync_mtx); 2204 } 2205 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2206 if (syncer_state != SYNCER_RUNNING && 2207 starttime != time_uptime) { 2208 if (first_printf) { 2209 printf("\nSyncing disks, vnodes remaining... "); 2210 first_printf = 0; 2211 } 2212 printf("%d ", net_worklist_len); 2213 } 2214 starttime = time_uptime; 2215 2216 /* 2217 * Push files whose dirty time has expired. Be careful 2218 * of interrupt race on slp queue. 2219 * 2220 * Skip over empty worklist slots when shutting down. 2221 */ 2222 do { 2223 slp = &syncer_workitem_pending[syncer_delayno]; 2224 syncer_delayno += 1; 2225 if (syncer_delayno == syncer_maxdelay) 2226 syncer_delayno = 0; 2227 next = &syncer_workitem_pending[syncer_delayno]; 2228 /* 2229 * If the worklist has wrapped since the 2230 * it was emptied of all but syncer vnodes, 2231 * switch to the FINAL_DELAY state and run 2232 * for one more second. 2233 */ 2234 if (syncer_state == SYNCER_SHUTTING_DOWN && 2235 net_worklist_len == 0 && 2236 last_work_seen == syncer_delayno) { 2237 syncer_state = SYNCER_FINAL_DELAY; 2238 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2239 } 2240 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2241 syncer_worklist_len > 0); 2242 2243 /* 2244 * Keep track of the last time there was anything 2245 * on the worklist other than syncer vnodes. 2246 * Return to the SHUTTING_DOWN state if any 2247 * new work appears. 2248 */ 2249 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2250 last_work_seen = syncer_delayno; 2251 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2252 syncer_state = SYNCER_SHUTTING_DOWN; 2253 while (!LIST_EMPTY(slp)) { 2254 error = sync_vnode(slp, &bo, td); 2255 if (error == 1) { 2256 LIST_REMOVE(bo, bo_synclist); 2257 LIST_INSERT_HEAD(next, bo, bo_synclist); 2258 continue; 2259 } 2260 2261 if (first_printf == 0) { 2262 /* 2263 * Drop the sync mutex, because some watchdog 2264 * drivers need to sleep while patting 2265 */ 2266 mtx_unlock(&sync_mtx); 2267 wdog_kern_pat(WD_LASTVAL); 2268 mtx_lock(&sync_mtx); 2269 } 2270 2271 } 2272 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2273 syncer_final_iter--; 2274 /* 2275 * The variable rushjob allows the kernel to speed up the 2276 * processing of the filesystem syncer process. A rushjob 2277 * value of N tells the filesystem syncer to process the next 2278 * N seconds worth of work on its queue ASAP. Currently rushjob 2279 * is used by the soft update code to speed up the filesystem 2280 * syncer process when the incore state is getting so far 2281 * ahead of the disk that the kernel memory pool is being 2282 * threatened with exhaustion. 2283 */ 2284 if (rushjob > 0) { 2285 rushjob -= 1; 2286 continue; 2287 } 2288 /* 2289 * Just sleep for a short period of time between 2290 * iterations when shutting down to allow some I/O 2291 * to happen. 2292 * 2293 * If it has taken us less than a second to process the 2294 * current work, then wait. Otherwise start right over 2295 * again. We can still lose time if any single round 2296 * takes more than two seconds, but it does not really 2297 * matter as we are just trying to generally pace the 2298 * filesystem activity. 2299 */ 2300 if (syncer_state != SYNCER_RUNNING || 2301 time_uptime == starttime) { 2302 thread_lock(td); 2303 sched_prio(td, PPAUSE); 2304 thread_unlock(td); 2305 } 2306 if (syncer_state != SYNCER_RUNNING) 2307 cv_timedwait(&sync_wakeup, &sync_mtx, 2308 hz / SYNCER_SHUTDOWN_SPEEDUP); 2309 else if (time_uptime == starttime) 2310 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2311 } 2312 } 2313 2314 /* 2315 * Request the syncer daemon to speed up its work. 2316 * We never push it to speed up more than half of its 2317 * normal turn time, otherwise it could take over the cpu. 2318 */ 2319 int 2320 speedup_syncer(void) 2321 { 2322 int ret = 0; 2323 2324 mtx_lock(&sync_mtx); 2325 if (rushjob < syncdelay / 2) { 2326 rushjob += 1; 2327 stat_rush_requests += 1; 2328 ret = 1; 2329 } 2330 mtx_unlock(&sync_mtx); 2331 cv_broadcast(&sync_wakeup); 2332 return (ret); 2333 } 2334 2335 /* 2336 * Tell the syncer to speed up its work and run though its work 2337 * list several times, then tell it to shut down. 2338 */ 2339 static void 2340 syncer_shutdown(void *arg, int howto) 2341 { 2342 2343 if (howto & RB_NOSYNC) 2344 return; 2345 mtx_lock(&sync_mtx); 2346 syncer_state = SYNCER_SHUTTING_DOWN; 2347 rushjob = 0; 2348 mtx_unlock(&sync_mtx); 2349 cv_broadcast(&sync_wakeup); 2350 kproc_shutdown(arg, howto); 2351 } 2352 2353 void 2354 syncer_suspend(void) 2355 { 2356 2357 syncer_shutdown(updateproc, 0); 2358 } 2359 2360 void 2361 syncer_resume(void) 2362 { 2363 2364 mtx_lock(&sync_mtx); 2365 first_printf = 1; 2366 syncer_state = SYNCER_RUNNING; 2367 mtx_unlock(&sync_mtx); 2368 cv_broadcast(&sync_wakeup); 2369 kproc_resume(updateproc); 2370 } 2371 2372 /* 2373 * Reassign a buffer from one vnode to another. 2374 * Used to assign file specific control information 2375 * (indirect blocks) to the vnode to which they belong. 2376 */ 2377 void 2378 reassignbuf(struct buf *bp) 2379 { 2380 struct vnode *vp; 2381 struct bufobj *bo; 2382 int delay; 2383 #ifdef INVARIANTS 2384 struct bufv *bv; 2385 #endif 2386 2387 vp = bp->b_vp; 2388 bo = bp->b_bufobj; 2389 ++reassignbufcalls; 2390 2391 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2392 bp, bp->b_vp, bp->b_flags); 2393 /* 2394 * B_PAGING flagged buffers cannot be reassigned because their vp 2395 * is not fully linked in. 2396 */ 2397 if (bp->b_flags & B_PAGING) 2398 panic("cannot reassign paging buffer"); 2399 2400 /* 2401 * Delete from old vnode list, if on one. 2402 */ 2403 BO_LOCK(bo); 2404 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2405 buf_vlist_remove(bp); 2406 else 2407 panic("reassignbuf: Buffer %p not on queue.", bp); 2408 /* 2409 * If dirty, put on list of dirty buffers; otherwise insert onto list 2410 * of clean buffers. 2411 */ 2412 if (bp->b_flags & B_DELWRI) { 2413 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2414 switch (vp->v_type) { 2415 case VDIR: 2416 delay = dirdelay; 2417 break; 2418 case VCHR: 2419 delay = metadelay; 2420 break; 2421 default: 2422 delay = filedelay; 2423 } 2424 vn_syncer_add_to_worklist(bo, delay); 2425 } 2426 buf_vlist_add(bp, bo, BX_VNDIRTY); 2427 } else { 2428 buf_vlist_add(bp, bo, BX_VNCLEAN); 2429 2430 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2431 mtx_lock(&sync_mtx); 2432 LIST_REMOVE(bo, bo_synclist); 2433 syncer_worklist_len--; 2434 mtx_unlock(&sync_mtx); 2435 bo->bo_flag &= ~BO_ONWORKLST; 2436 } 2437 } 2438 #ifdef INVARIANTS 2439 bv = &bo->bo_clean; 2440 bp = TAILQ_FIRST(&bv->bv_hd); 2441 KASSERT(bp == NULL || bp->b_bufobj == bo, 2442 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2443 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2444 KASSERT(bp == NULL || bp->b_bufobj == bo, 2445 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2446 bv = &bo->bo_dirty; 2447 bp = TAILQ_FIRST(&bv->bv_hd); 2448 KASSERT(bp == NULL || bp->b_bufobj == bo, 2449 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2450 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2451 KASSERT(bp == NULL || bp->b_bufobj == bo, 2452 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2453 #endif 2454 BO_UNLOCK(bo); 2455 } 2456 2457 static void 2458 v_init_counters(struct vnode *vp) 2459 { 2460 2461 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2462 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2463 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2464 2465 refcount_init(&vp->v_holdcnt, 1); 2466 refcount_init(&vp->v_usecount, 1); 2467 } 2468 2469 static void 2470 v_incr_usecount_locked(struct vnode *vp) 2471 { 2472 2473 ASSERT_VI_LOCKED(vp, __func__); 2474 if ((vp->v_iflag & VI_OWEINACT) != 0) { 2475 VNASSERT(vp->v_usecount == 0, vp, 2476 ("vnode with usecount and VI_OWEINACT set")); 2477 vp->v_iflag &= ~VI_OWEINACT; 2478 } 2479 refcount_acquire(&vp->v_usecount); 2480 v_incr_devcount(vp); 2481 } 2482 2483 /* 2484 * Increment the use count on the vnode, taking care to reference 2485 * the driver's usecount if this is a chardev. 2486 */ 2487 static void 2488 v_incr_usecount(struct vnode *vp) 2489 { 2490 2491 ASSERT_VI_UNLOCKED(vp, __func__); 2492 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2493 2494 if (vp->v_type != VCHR && 2495 refcount_acquire_if_not_zero(&vp->v_usecount)) { 2496 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2497 ("vnode with usecount and VI_OWEINACT set")); 2498 } else { 2499 VI_LOCK(vp); 2500 v_incr_usecount_locked(vp); 2501 VI_UNLOCK(vp); 2502 } 2503 } 2504 2505 /* 2506 * Increment si_usecount of the associated device, if any. 2507 */ 2508 static void 2509 v_incr_devcount(struct vnode *vp) 2510 { 2511 2512 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2513 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2514 dev_lock(); 2515 vp->v_rdev->si_usecount++; 2516 dev_unlock(); 2517 } 2518 } 2519 2520 /* 2521 * Decrement si_usecount of the associated device, if any. 2522 */ 2523 static void 2524 v_decr_devcount(struct vnode *vp) 2525 { 2526 2527 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2528 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2529 dev_lock(); 2530 vp->v_rdev->si_usecount--; 2531 dev_unlock(); 2532 } 2533 } 2534 2535 /* 2536 * Grab a particular vnode from the free list, increment its 2537 * reference count and lock it. VI_DOOMED is set if the vnode 2538 * is being destroyed. Only callers who specify LK_RETRY will 2539 * see doomed vnodes. If inactive processing was delayed in 2540 * vput try to do it here. 2541 * 2542 * Notes on lockless counter manipulation: 2543 * _vhold, vputx and other routines make various decisions based 2544 * on either holdcnt or usecount being 0. As long as either counter 2545 * is not transitioning 0->1 nor 1->0, the manipulation can be done 2546 * with atomic operations. Otherwise the interlock is taken covering 2547 * both the atomic and additional actions. 2548 */ 2549 int 2550 vget(struct vnode *vp, int flags, struct thread *td) 2551 { 2552 int error, oweinact; 2553 2554 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 2555 ("vget: invalid lock operation")); 2556 2557 if ((flags & LK_INTERLOCK) != 0) 2558 ASSERT_VI_LOCKED(vp, __func__); 2559 else 2560 ASSERT_VI_UNLOCKED(vp, __func__); 2561 if ((flags & LK_VNHELD) != 0) 2562 VNASSERT((vp->v_holdcnt > 0), vp, 2563 ("vget: LK_VNHELD passed but vnode not held")); 2564 2565 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2566 2567 if ((flags & LK_VNHELD) == 0) 2568 _vhold(vp, (flags & LK_INTERLOCK) != 0); 2569 2570 if ((error = vn_lock(vp, flags)) != 0) { 2571 vdrop(vp); 2572 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2573 vp); 2574 return (error); 2575 } 2576 if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0) 2577 panic("vget: vn_lock failed to return ENOENT\n"); 2578 /* 2579 * We don't guarantee that any particular close will 2580 * trigger inactive processing so just make a best effort 2581 * here at preventing a reference to a removed file. If 2582 * we don't succeed no harm is done. 2583 * 2584 * Upgrade our holdcnt to a usecount. 2585 */ 2586 if (vp->v_type == VCHR || 2587 !refcount_acquire_if_not_zero(&vp->v_usecount)) { 2588 VI_LOCK(vp); 2589 if ((vp->v_iflag & VI_OWEINACT) == 0) { 2590 oweinact = 0; 2591 } else { 2592 oweinact = 1; 2593 vp->v_iflag &= ~VI_OWEINACT; 2594 } 2595 refcount_acquire(&vp->v_usecount); 2596 v_incr_devcount(vp); 2597 if (oweinact && VOP_ISLOCKED(vp) == LK_EXCLUSIVE && 2598 (flags & LK_NOWAIT) == 0) 2599 vinactive(vp, td); 2600 VI_UNLOCK(vp); 2601 } 2602 return (0); 2603 } 2604 2605 /* 2606 * Increase the reference (use) and hold count of a vnode. 2607 * This will also remove the vnode from the free list if it is presently free. 2608 */ 2609 void 2610 vref(struct vnode *vp) 2611 { 2612 2613 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2614 _vhold(vp, false); 2615 v_incr_usecount(vp); 2616 } 2617 2618 void 2619 vrefl(struct vnode *vp) 2620 { 2621 2622 ASSERT_VI_LOCKED(vp, __func__); 2623 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2624 _vhold(vp, true); 2625 v_incr_usecount_locked(vp); 2626 } 2627 2628 void 2629 vrefact(struct vnode *vp) 2630 { 2631 2632 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2633 if (__predict_false(vp->v_type == VCHR)) { 2634 VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp, 2635 ("%s: wrong ref counts", __func__)); 2636 vref(vp); 2637 return; 2638 } 2639 #ifdef INVARIANTS 2640 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 2641 VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__)); 2642 old = atomic_fetchadd_int(&vp->v_usecount, 1); 2643 VNASSERT(old > 0, vp, ("%s: wrong use count", __func__)); 2644 #else 2645 refcount_acquire(&vp->v_holdcnt); 2646 refcount_acquire(&vp->v_usecount); 2647 #endif 2648 } 2649 2650 /* 2651 * Return reference count of a vnode. 2652 * 2653 * The results of this call are only guaranteed when some mechanism is used to 2654 * stop other processes from gaining references to the vnode. This may be the 2655 * case if the caller holds the only reference. This is also useful when stale 2656 * data is acceptable as race conditions may be accounted for by some other 2657 * means. 2658 */ 2659 int 2660 vrefcnt(struct vnode *vp) 2661 { 2662 2663 return (vp->v_usecount); 2664 } 2665 2666 #define VPUTX_VRELE 1 2667 #define VPUTX_VPUT 2 2668 #define VPUTX_VUNREF 3 2669 2670 /* 2671 * Decrement the use and hold counts for a vnode. 2672 * 2673 * See an explanation near vget() as to why atomic operation is safe. 2674 */ 2675 static void 2676 vputx(struct vnode *vp, int func) 2677 { 2678 int error; 2679 2680 KASSERT(vp != NULL, ("vputx: null vp")); 2681 if (func == VPUTX_VUNREF) 2682 ASSERT_VOP_LOCKED(vp, "vunref"); 2683 else if (func == VPUTX_VPUT) 2684 ASSERT_VOP_LOCKED(vp, "vput"); 2685 else 2686 KASSERT(func == VPUTX_VRELE, ("vputx: wrong func")); 2687 ASSERT_VI_UNLOCKED(vp, __func__); 2688 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2689 2690 if (vp->v_type != VCHR && 2691 refcount_release_if_not_last(&vp->v_usecount)) { 2692 if (func == VPUTX_VPUT) 2693 VOP_UNLOCK(vp, 0); 2694 vdrop(vp); 2695 return; 2696 } 2697 2698 VI_LOCK(vp); 2699 2700 /* 2701 * We want to hold the vnode until the inactive finishes to 2702 * prevent vgone() races. We drop the use count here and the 2703 * hold count below when we're done. 2704 */ 2705 if (!refcount_release(&vp->v_usecount) || 2706 (vp->v_iflag & VI_DOINGINACT)) { 2707 if (func == VPUTX_VPUT) 2708 VOP_UNLOCK(vp, 0); 2709 v_decr_devcount(vp); 2710 vdropl(vp); 2711 return; 2712 } 2713 2714 v_decr_devcount(vp); 2715 2716 error = 0; 2717 2718 if (vp->v_usecount != 0) { 2719 vn_printf(vp, "vputx: usecount not zero for vnode "); 2720 panic("vputx: usecount not zero"); 2721 } 2722 2723 CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp); 2724 2725 /* 2726 * We must call VOP_INACTIVE with the node locked. Mark 2727 * as VI_DOINGINACT to avoid recursion. 2728 */ 2729 vp->v_iflag |= VI_OWEINACT; 2730 switch (func) { 2731 case VPUTX_VRELE: 2732 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 2733 VI_LOCK(vp); 2734 break; 2735 case VPUTX_VPUT: 2736 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 2737 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 2738 LK_NOWAIT); 2739 VI_LOCK(vp); 2740 } 2741 break; 2742 case VPUTX_VUNREF: 2743 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 2744 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 2745 VI_LOCK(vp); 2746 } 2747 break; 2748 } 2749 VNASSERT(vp->v_usecount == 0 || (vp->v_iflag & VI_OWEINACT) == 0, vp, 2750 ("vnode with usecount and VI_OWEINACT set")); 2751 if (error == 0) { 2752 if (vp->v_iflag & VI_OWEINACT) 2753 vinactive(vp, curthread); 2754 if (func != VPUTX_VUNREF) 2755 VOP_UNLOCK(vp, 0); 2756 } 2757 vdropl(vp); 2758 } 2759 2760 /* 2761 * Vnode put/release. 2762 * If count drops to zero, call inactive routine and return to freelist. 2763 */ 2764 void 2765 vrele(struct vnode *vp) 2766 { 2767 2768 vputx(vp, VPUTX_VRELE); 2769 } 2770 2771 /* 2772 * Release an already locked vnode. This give the same effects as 2773 * unlock+vrele(), but takes less time and avoids releasing and 2774 * re-aquiring the lock (as vrele() acquires the lock internally.) 2775 */ 2776 void 2777 vput(struct vnode *vp) 2778 { 2779 2780 vputx(vp, VPUTX_VPUT); 2781 } 2782 2783 /* 2784 * Release an exclusively locked vnode. Do not unlock the vnode lock. 2785 */ 2786 void 2787 vunref(struct vnode *vp) 2788 { 2789 2790 vputx(vp, VPUTX_VUNREF); 2791 } 2792 2793 /* 2794 * Increase the hold count and activate if this is the first reference. 2795 */ 2796 void 2797 _vhold(struct vnode *vp, bool locked) 2798 { 2799 struct mount *mp; 2800 2801 if (locked) 2802 ASSERT_VI_LOCKED(vp, __func__); 2803 else 2804 ASSERT_VI_UNLOCKED(vp, __func__); 2805 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2806 if (!locked) { 2807 if (refcount_acquire_if_not_zero(&vp->v_holdcnt)) { 2808 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 2809 ("_vhold: vnode with holdcnt is free")); 2810 return; 2811 } 2812 VI_LOCK(vp); 2813 } 2814 if ((vp->v_iflag & VI_FREE) == 0) { 2815 refcount_acquire(&vp->v_holdcnt); 2816 if (!locked) 2817 VI_UNLOCK(vp); 2818 return; 2819 } 2820 VNASSERT(vp->v_holdcnt == 0, vp, 2821 ("%s: wrong hold count", __func__)); 2822 VNASSERT(vp->v_op != NULL, vp, 2823 ("%s: vnode already reclaimed.", __func__)); 2824 /* 2825 * Remove a vnode from the free list, mark it as in use, 2826 * and put it on the active list. 2827 */ 2828 VNASSERT(vp->v_mount != NULL, vp, 2829 ("_vhold: vnode not on per mount vnode list")); 2830 mp = vp->v_mount; 2831 mtx_lock(&mp->mnt_listmtx); 2832 if ((vp->v_mflag & VMP_TMPMNTFREELIST) != 0) { 2833 TAILQ_REMOVE(&mp->mnt_tmpfreevnodelist, vp, v_actfreelist); 2834 mp->mnt_tmpfreevnodelistsize--; 2835 vp->v_mflag &= ~VMP_TMPMNTFREELIST; 2836 } else { 2837 mtx_lock(&vnode_free_list_mtx); 2838 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 2839 freevnodes--; 2840 mtx_unlock(&vnode_free_list_mtx); 2841 } 2842 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 2843 ("Activating already active vnode")); 2844 vp->v_iflag &= ~VI_FREE; 2845 vp->v_iflag |= VI_ACTIVE; 2846 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 2847 mp->mnt_activevnodelistsize++; 2848 mtx_unlock(&mp->mnt_listmtx); 2849 refcount_acquire(&vp->v_holdcnt); 2850 if (!locked) 2851 VI_UNLOCK(vp); 2852 } 2853 2854 /* 2855 * Drop the hold count of the vnode. If this is the last reference to 2856 * the vnode we place it on the free list unless it has been vgone'd 2857 * (marked VI_DOOMED) in which case we will free it. 2858 * 2859 * Because the vnode vm object keeps a hold reference on the vnode if 2860 * there is at least one resident non-cached page, the vnode cannot 2861 * leave the active list without the page cleanup done. 2862 */ 2863 void 2864 _vdrop(struct vnode *vp, bool locked) 2865 { 2866 struct bufobj *bo; 2867 struct mount *mp; 2868 int active; 2869 2870 if (locked) 2871 ASSERT_VI_LOCKED(vp, __func__); 2872 else 2873 ASSERT_VI_UNLOCKED(vp, __func__); 2874 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2875 if ((int)vp->v_holdcnt <= 0) 2876 panic("vdrop: holdcnt %d", vp->v_holdcnt); 2877 if (!locked) { 2878 if (refcount_release_if_not_last(&vp->v_holdcnt)) 2879 return; 2880 VI_LOCK(vp); 2881 } 2882 if (refcount_release(&vp->v_holdcnt) == 0) { 2883 VI_UNLOCK(vp); 2884 return; 2885 } 2886 if ((vp->v_iflag & VI_DOOMED) == 0) { 2887 /* 2888 * Mark a vnode as free: remove it from its active list 2889 * and put it up for recycling on the freelist. 2890 */ 2891 VNASSERT(vp->v_op != NULL, vp, 2892 ("vdropl: vnode already reclaimed.")); 2893 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 2894 ("vnode already free")); 2895 VNASSERT(vp->v_holdcnt == 0, vp, 2896 ("vdropl: freeing when we shouldn't")); 2897 active = vp->v_iflag & VI_ACTIVE; 2898 if ((vp->v_iflag & VI_OWEINACT) == 0) { 2899 vp->v_iflag &= ~VI_ACTIVE; 2900 mp = vp->v_mount; 2901 if (mp != NULL) { 2902 mtx_lock(&mp->mnt_listmtx); 2903 if (active) { 2904 TAILQ_REMOVE(&mp->mnt_activevnodelist, 2905 vp, v_actfreelist); 2906 mp->mnt_activevnodelistsize--; 2907 } 2908 TAILQ_INSERT_TAIL(&mp->mnt_tmpfreevnodelist, 2909 vp, v_actfreelist); 2910 mp->mnt_tmpfreevnodelistsize++; 2911 vp->v_iflag |= VI_FREE; 2912 vp->v_mflag |= VMP_TMPMNTFREELIST; 2913 VI_UNLOCK(vp); 2914 if (mp->mnt_tmpfreevnodelistsize >= 2915 mnt_free_list_batch) 2916 vnlru_return_batch_locked(mp); 2917 mtx_unlock(&mp->mnt_listmtx); 2918 } else { 2919 VNASSERT(active == 0, vp, 2920 ("vdropl: active vnode not on per mount " 2921 "vnode list")); 2922 mtx_lock(&vnode_free_list_mtx); 2923 TAILQ_INSERT_TAIL(&vnode_free_list, vp, 2924 v_actfreelist); 2925 freevnodes++; 2926 vp->v_iflag |= VI_FREE; 2927 VI_UNLOCK(vp); 2928 mtx_unlock(&vnode_free_list_mtx); 2929 } 2930 } else { 2931 VI_UNLOCK(vp); 2932 counter_u64_add(free_owe_inact, 1); 2933 } 2934 return; 2935 } 2936 /* 2937 * The vnode has been marked for destruction, so free it. 2938 * 2939 * The vnode will be returned to the zone where it will 2940 * normally remain until it is needed for another vnode. We 2941 * need to cleanup (or verify that the cleanup has already 2942 * been done) any residual data left from its current use 2943 * so as not to contaminate the freshly allocated vnode. 2944 */ 2945 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 2946 atomic_subtract_long(&numvnodes, 1); 2947 bo = &vp->v_bufobj; 2948 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 2949 ("cleaned vnode still on the free list.")); 2950 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 2951 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); 2952 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 2953 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 2954 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 2955 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 2956 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 2957 ("clean blk trie not empty")); 2958 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 2959 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 2960 ("dirty blk trie not empty")); 2961 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 2962 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 2963 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 2964 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 2965 ("Dangling rangelock waiters")); 2966 VI_UNLOCK(vp); 2967 #ifdef MAC 2968 mac_vnode_destroy(vp); 2969 #endif 2970 if (vp->v_pollinfo != NULL) { 2971 destroy_vpollinfo(vp->v_pollinfo); 2972 vp->v_pollinfo = NULL; 2973 } 2974 #ifdef INVARIANTS 2975 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ 2976 vp->v_op = NULL; 2977 #endif 2978 vp->v_mountedhere = NULL; 2979 vp->v_unpcb = NULL; 2980 vp->v_rdev = NULL; 2981 vp->v_fifoinfo = NULL; 2982 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 2983 vp->v_iflag = 0; 2984 vp->v_vflag = 0; 2985 bo->bo_flag = 0; 2986 uma_zfree(vnode_zone, vp); 2987 } 2988 2989 /* 2990 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 2991 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 2992 * OWEINACT tracks whether a vnode missed a call to inactive due to a 2993 * failed lock upgrade. 2994 */ 2995 void 2996 vinactive(struct vnode *vp, struct thread *td) 2997 { 2998 struct vm_object *obj; 2999 3000 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3001 ASSERT_VI_LOCKED(vp, "vinactive"); 3002 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3003 ("vinactive: recursed on VI_DOINGINACT")); 3004 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3005 vp->v_iflag |= VI_DOINGINACT; 3006 vp->v_iflag &= ~VI_OWEINACT; 3007 VI_UNLOCK(vp); 3008 /* 3009 * Before moving off the active list, we must be sure that any 3010 * modified pages are converted into the vnode's dirty 3011 * buffers, since these will no longer be checked once the 3012 * vnode is on the inactive list. 3013 * 3014 * The write-out of the dirty pages is asynchronous. At the 3015 * point that VOP_INACTIVE() is called, there could still be 3016 * pending I/O and dirty pages in the object. 3017 */ 3018 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3019 (obj->flags & OBJ_MIGHTBEDIRTY) != 0) { 3020 VM_OBJECT_WLOCK(obj); 3021 vm_object_page_clean(obj, 0, 0, 0); 3022 VM_OBJECT_WUNLOCK(obj); 3023 } 3024 VOP_INACTIVE(vp, td); 3025 VI_LOCK(vp); 3026 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3027 ("vinactive: lost VI_DOINGINACT")); 3028 vp->v_iflag &= ~VI_DOINGINACT; 3029 } 3030 3031 /* 3032 * Remove any vnodes in the vnode table belonging to mount point mp. 3033 * 3034 * If FORCECLOSE is not specified, there should not be any active ones, 3035 * return error if any are found (nb: this is a user error, not a 3036 * system error). If FORCECLOSE is specified, detach any active vnodes 3037 * that are found. 3038 * 3039 * If WRITECLOSE is set, only flush out regular file vnodes open for 3040 * writing. 3041 * 3042 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3043 * 3044 * `rootrefs' specifies the base reference count for the root vnode 3045 * of this filesystem. The root vnode is considered busy if its 3046 * v_usecount exceeds this value. On a successful return, vflush(, td) 3047 * will call vrele() on the root vnode exactly rootrefs times. 3048 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3049 * be zero. 3050 */ 3051 #ifdef DIAGNOSTIC 3052 static int busyprt = 0; /* print out busy vnodes */ 3053 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3054 #endif 3055 3056 int 3057 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3058 { 3059 struct vnode *vp, *mvp, *rootvp = NULL; 3060 struct vattr vattr; 3061 int busy = 0, error; 3062 3063 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3064 rootrefs, flags); 3065 if (rootrefs > 0) { 3066 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3067 ("vflush: bad args")); 3068 /* 3069 * Get the filesystem root vnode. We can vput() it 3070 * immediately, since with rootrefs > 0, it won't go away. 3071 */ 3072 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3073 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3074 __func__, error); 3075 return (error); 3076 } 3077 vput(rootvp); 3078 } 3079 loop: 3080 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3081 vholdl(vp); 3082 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3083 if (error) { 3084 vdrop(vp); 3085 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3086 goto loop; 3087 } 3088 /* 3089 * Skip over a vnodes marked VV_SYSTEM. 3090 */ 3091 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3092 VOP_UNLOCK(vp, 0); 3093 vdrop(vp); 3094 continue; 3095 } 3096 /* 3097 * If WRITECLOSE is set, flush out unlinked but still open 3098 * files (even if open only for reading) and regular file 3099 * vnodes open for writing. 3100 */ 3101 if (flags & WRITECLOSE) { 3102 if (vp->v_object != NULL) { 3103 VM_OBJECT_WLOCK(vp->v_object); 3104 vm_object_page_clean(vp->v_object, 0, 0, 0); 3105 VM_OBJECT_WUNLOCK(vp->v_object); 3106 } 3107 error = VOP_FSYNC(vp, MNT_WAIT, td); 3108 if (error != 0) { 3109 VOP_UNLOCK(vp, 0); 3110 vdrop(vp); 3111 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3112 return (error); 3113 } 3114 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3115 VI_LOCK(vp); 3116 3117 if ((vp->v_type == VNON || 3118 (error == 0 && vattr.va_nlink > 0)) && 3119 (vp->v_writecount == 0 || vp->v_type != VREG)) { 3120 VOP_UNLOCK(vp, 0); 3121 vdropl(vp); 3122 continue; 3123 } 3124 } else 3125 VI_LOCK(vp); 3126 /* 3127 * With v_usecount == 0, all we need to do is clear out the 3128 * vnode data structures and we are done. 3129 * 3130 * If FORCECLOSE is set, forcibly close the vnode. 3131 */ 3132 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3133 vgonel(vp); 3134 } else { 3135 busy++; 3136 #ifdef DIAGNOSTIC 3137 if (busyprt) 3138 vn_printf(vp, "vflush: busy vnode "); 3139 #endif 3140 } 3141 VOP_UNLOCK(vp, 0); 3142 vdropl(vp); 3143 } 3144 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3145 /* 3146 * If just the root vnode is busy, and if its refcount 3147 * is equal to `rootrefs', then go ahead and kill it. 3148 */ 3149 VI_LOCK(rootvp); 3150 KASSERT(busy > 0, ("vflush: not busy")); 3151 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3152 ("vflush: usecount %d < rootrefs %d", 3153 rootvp->v_usecount, rootrefs)); 3154 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3155 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3156 vgone(rootvp); 3157 VOP_UNLOCK(rootvp, 0); 3158 busy = 0; 3159 } else 3160 VI_UNLOCK(rootvp); 3161 } 3162 if (busy) { 3163 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3164 busy); 3165 return (EBUSY); 3166 } 3167 for (; rootrefs > 0; rootrefs--) 3168 vrele(rootvp); 3169 return (0); 3170 } 3171 3172 /* 3173 * Recycle an unused vnode to the front of the free list. 3174 */ 3175 int 3176 vrecycle(struct vnode *vp) 3177 { 3178 int recycled; 3179 3180 VI_LOCK(vp); 3181 recycled = vrecyclel(vp); 3182 VI_UNLOCK(vp); 3183 return (recycled); 3184 } 3185 3186 /* 3187 * vrecycle, with the vp interlock held. 3188 */ 3189 int 3190 vrecyclel(struct vnode *vp) 3191 { 3192 int recycled; 3193 3194 ASSERT_VOP_ELOCKED(vp, __func__); 3195 ASSERT_VI_LOCKED(vp, __func__); 3196 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3197 recycled = 0; 3198 if (vp->v_usecount == 0) { 3199 recycled = 1; 3200 vgonel(vp); 3201 } 3202 return (recycled); 3203 } 3204 3205 /* 3206 * Eliminate all activity associated with a vnode 3207 * in preparation for reuse. 3208 */ 3209 void 3210 vgone(struct vnode *vp) 3211 { 3212 VI_LOCK(vp); 3213 vgonel(vp); 3214 VI_UNLOCK(vp); 3215 } 3216 3217 static void 3218 notify_lowervp_vfs_dummy(struct mount *mp __unused, 3219 struct vnode *lowervp __unused) 3220 { 3221 } 3222 3223 /* 3224 * Notify upper mounts about reclaimed or unlinked vnode. 3225 */ 3226 void 3227 vfs_notify_upper(struct vnode *vp, int event) 3228 { 3229 static struct vfsops vgonel_vfsops = { 3230 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 3231 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 3232 }; 3233 struct mount *mp, *ump, *mmp; 3234 3235 mp = vp->v_mount; 3236 if (mp == NULL) 3237 return; 3238 3239 MNT_ILOCK(mp); 3240 if (TAILQ_EMPTY(&mp->mnt_uppers)) 3241 goto unlock; 3242 MNT_IUNLOCK(mp); 3243 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 3244 mmp->mnt_op = &vgonel_vfsops; 3245 mmp->mnt_kern_flag |= MNTK_MARKER; 3246 MNT_ILOCK(mp); 3247 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 3248 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 3249 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 3250 ump = TAILQ_NEXT(ump, mnt_upper_link); 3251 continue; 3252 } 3253 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 3254 MNT_IUNLOCK(mp); 3255 switch (event) { 3256 case VFS_NOTIFY_UPPER_RECLAIM: 3257 VFS_RECLAIM_LOWERVP(ump, vp); 3258 break; 3259 case VFS_NOTIFY_UPPER_UNLINK: 3260 VFS_UNLINK_LOWERVP(ump, vp); 3261 break; 3262 default: 3263 KASSERT(0, ("invalid event %d", event)); 3264 break; 3265 } 3266 MNT_ILOCK(mp); 3267 ump = TAILQ_NEXT(mmp, mnt_upper_link); 3268 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 3269 } 3270 free(mmp, M_TEMP); 3271 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 3272 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 3273 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 3274 wakeup(&mp->mnt_uppers); 3275 } 3276 unlock: 3277 MNT_IUNLOCK(mp); 3278 } 3279 3280 /* 3281 * vgone, with the vp interlock held. 3282 */ 3283 static void 3284 vgonel(struct vnode *vp) 3285 { 3286 struct thread *td; 3287 int oweinact; 3288 int active; 3289 struct mount *mp; 3290 3291 ASSERT_VOP_ELOCKED(vp, "vgonel"); 3292 ASSERT_VI_LOCKED(vp, "vgonel"); 3293 VNASSERT(vp->v_holdcnt, vp, 3294 ("vgonel: vp %p has no reference.", vp)); 3295 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3296 td = curthread; 3297 3298 /* 3299 * Don't vgonel if we're already doomed. 3300 */ 3301 if (vp->v_iflag & VI_DOOMED) 3302 return; 3303 vp->v_iflag |= VI_DOOMED; 3304 3305 /* 3306 * Check to see if the vnode is in use. If so, we have to call 3307 * VOP_CLOSE() and VOP_INACTIVE(). 3308 */ 3309 active = vp->v_usecount; 3310 oweinact = (vp->v_iflag & VI_OWEINACT); 3311 VI_UNLOCK(vp); 3312 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 3313 3314 /* 3315 * If purging an active vnode, it must be closed and 3316 * deactivated before being reclaimed. 3317 */ 3318 if (active) 3319 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 3320 if (oweinact || active) { 3321 VI_LOCK(vp); 3322 if ((vp->v_iflag & VI_DOINGINACT) == 0) 3323 vinactive(vp, td); 3324 VI_UNLOCK(vp); 3325 } 3326 if (vp->v_type == VSOCK) 3327 vfs_unp_reclaim(vp); 3328 3329 /* 3330 * Clean out any buffers associated with the vnode. 3331 * If the flush fails, just toss the buffers. 3332 */ 3333 mp = NULL; 3334 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 3335 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 3336 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 3337 while (vinvalbuf(vp, 0, 0, 0) != 0) 3338 ; 3339 } 3340 3341 BO_LOCK(&vp->v_bufobj); 3342 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 3343 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 3344 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 3345 vp->v_bufobj.bo_clean.bv_cnt == 0, 3346 ("vp %p bufobj not invalidated", vp)); 3347 3348 /* 3349 * For VMIO bufobj, BO_DEAD is set in vm_object_terminate() 3350 * after the object's page queue is flushed. 3351 */ 3352 if (vp->v_bufobj.bo_object == NULL) 3353 vp->v_bufobj.bo_flag |= BO_DEAD; 3354 BO_UNLOCK(&vp->v_bufobj); 3355 3356 /* 3357 * Reclaim the vnode. 3358 */ 3359 if (VOP_RECLAIM(vp, td)) 3360 panic("vgone: cannot reclaim"); 3361 if (mp != NULL) 3362 vn_finished_secondary_write(mp); 3363 VNASSERT(vp->v_object == NULL, vp, 3364 ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag)); 3365 /* 3366 * Clear the advisory locks and wake up waiting threads. 3367 */ 3368 (void)VOP_ADVLOCKPURGE(vp); 3369 vp->v_lockf = NULL; 3370 /* 3371 * Delete from old mount point vnode list. 3372 */ 3373 delmntque(vp); 3374 cache_purge(vp); 3375 /* 3376 * Done with purge, reset to the standard lock and invalidate 3377 * the vnode. 3378 */ 3379 VI_LOCK(vp); 3380 vp->v_vnlock = &vp->v_lock; 3381 vp->v_op = &dead_vnodeops; 3382 vp->v_tag = "none"; 3383 vp->v_type = VBAD; 3384 } 3385 3386 /* 3387 * Calculate the total number of references to a special device. 3388 */ 3389 int 3390 vcount(struct vnode *vp) 3391 { 3392 int count; 3393 3394 dev_lock(); 3395 count = vp->v_rdev->si_usecount; 3396 dev_unlock(); 3397 return (count); 3398 } 3399 3400 /* 3401 * Same as above, but using the struct cdev *as argument 3402 */ 3403 int 3404 count_dev(struct cdev *dev) 3405 { 3406 int count; 3407 3408 dev_lock(); 3409 count = dev->si_usecount; 3410 dev_unlock(); 3411 return(count); 3412 } 3413 3414 /* 3415 * Print out a description of a vnode. 3416 */ 3417 static char *typename[] = 3418 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 3419 "VMARKER"}; 3420 3421 void 3422 vn_printf(struct vnode *vp, const char *fmt, ...) 3423 { 3424 va_list ap; 3425 char buf[256], buf2[16]; 3426 u_long flags; 3427 3428 va_start(ap, fmt); 3429 vprintf(fmt, ap); 3430 va_end(ap); 3431 printf("%p: ", (void *)vp); 3432 printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]); 3433 printf(" usecount %d, writecount %d, refcount %d", 3434 vp->v_usecount, vp->v_writecount, vp->v_holdcnt); 3435 switch (vp->v_type) { 3436 case VDIR: 3437 printf(" mountedhere %p\n", vp->v_mountedhere); 3438 break; 3439 case VCHR: 3440 printf(" rdev %p\n", vp->v_rdev); 3441 break; 3442 case VSOCK: 3443 printf(" socket %p\n", vp->v_unpcb); 3444 break; 3445 case VFIFO: 3446 printf(" fifoinfo %p\n", vp->v_fifoinfo); 3447 break; 3448 default: 3449 printf("\n"); 3450 break; 3451 } 3452 buf[0] = '\0'; 3453 buf[1] = '\0'; 3454 if (vp->v_vflag & VV_ROOT) 3455 strlcat(buf, "|VV_ROOT", sizeof(buf)); 3456 if (vp->v_vflag & VV_ISTTY) 3457 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 3458 if (vp->v_vflag & VV_NOSYNC) 3459 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 3460 if (vp->v_vflag & VV_ETERNALDEV) 3461 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 3462 if (vp->v_vflag & VV_CACHEDLABEL) 3463 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 3464 if (vp->v_vflag & VV_TEXT) 3465 strlcat(buf, "|VV_TEXT", sizeof(buf)); 3466 if (vp->v_vflag & VV_COPYONWRITE) 3467 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 3468 if (vp->v_vflag & VV_SYSTEM) 3469 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 3470 if (vp->v_vflag & VV_PROCDEP) 3471 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 3472 if (vp->v_vflag & VV_NOKNOTE) 3473 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 3474 if (vp->v_vflag & VV_DELETED) 3475 strlcat(buf, "|VV_DELETED", sizeof(buf)); 3476 if (vp->v_vflag & VV_MD) 3477 strlcat(buf, "|VV_MD", sizeof(buf)); 3478 if (vp->v_vflag & VV_FORCEINSMQ) 3479 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 3480 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 3481 VV_CACHEDLABEL | VV_TEXT | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 3482 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); 3483 if (flags != 0) { 3484 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 3485 strlcat(buf, buf2, sizeof(buf)); 3486 } 3487 if (vp->v_iflag & VI_MOUNT) 3488 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 3489 if (vp->v_iflag & VI_DOOMED) 3490 strlcat(buf, "|VI_DOOMED", sizeof(buf)); 3491 if (vp->v_iflag & VI_FREE) 3492 strlcat(buf, "|VI_FREE", sizeof(buf)); 3493 if (vp->v_iflag & VI_ACTIVE) 3494 strlcat(buf, "|VI_ACTIVE", sizeof(buf)); 3495 if (vp->v_iflag & VI_DOINGINACT) 3496 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 3497 if (vp->v_iflag & VI_OWEINACT) 3498 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 3499 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOOMED | VI_FREE | 3500 VI_ACTIVE | VI_DOINGINACT | VI_OWEINACT); 3501 if (flags != 0) { 3502 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 3503 strlcat(buf, buf2, sizeof(buf)); 3504 } 3505 printf(" flags (%s)\n", buf + 1); 3506 if (mtx_owned(VI_MTX(vp))) 3507 printf(" VI_LOCKed"); 3508 if (vp->v_object != NULL) 3509 printf(" v_object %p ref %d pages %d " 3510 "cleanbuf %d dirtybuf %d\n", 3511 vp->v_object, vp->v_object->ref_count, 3512 vp->v_object->resident_page_count, 3513 vp->v_bufobj.bo_clean.bv_cnt, 3514 vp->v_bufobj.bo_dirty.bv_cnt); 3515 printf(" "); 3516 lockmgr_printinfo(vp->v_vnlock); 3517 if (vp->v_data != NULL) 3518 VOP_PRINT(vp); 3519 } 3520 3521 #ifdef DDB 3522 /* 3523 * List all of the locked vnodes in the system. 3524 * Called when debugging the kernel. 3525 */ 3526 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 3527 { 3528 struct mount *mp; 3529 struct vnode *vp; 3530 3531 /* 3532 * Note: because this is DDB, we can't obey the locking semantics 3533 * for these structures, which means we could catch an inconsistent 3534 * state and dereference a nasty pointer. Not much to be done 3535 * about that. 3536 */ 3537 db_printf("Locked vnodes\n"); 3538 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3539 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3540 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 3541 vn_printf(vp, "vnode "); 3542 } 3543 } 3544 } 3545 3546 /* 3547 * Show details about the given vnode. 3548 */ 3549 DB_SHOW_COMMAND(vnode, db_show_vnode) 3550 { 3551 struct vnode *vp; 3552 3553 if (!have_addr) 3554 return; 3555 vp = (struct vnode *)addr; 3556 vn_printf(vp, "vnode "); 3557 } 3558 3559 /* 3560 * Show details about the given mount point. 3561 */ 3562 DB_SHOW_COMMAND(mount, db_show_mount) 3563 { 3564 struct mount *mp; 3565 struct vfsopt *opt; 3566 struct statfs *sp; 3567 struct vnode *vp; 3568 char buf[512]; 3569 uint64_t mflags; 3570 u_int flags; 3571 3572 if (!have_addr) { 3573 /* No address given, print short info about all mount points. */ 3574 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3575 db_printf("%p %s on %s (%s)\n", mp, 3576 mp->mnt_stat.f_mntfromname, 3577 mp->mnt_stat.f_mntonname, 3578 mp->mnt_stat.f_fstypename); 3579 if (db_pager_quit) 3580 break; 3581 } 3582 db_printf("\nMore info: show mount <addr>\n"); 3583 return; 3584 } 3585 3586 mp = (struct mount *)addr; 3587 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 3588 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 3589 3590 buf[0] = '\0'; 3591 mflags = mp->mnt_flag; 3592 #define MNT_FLAG(flag) do { \ 3593 if (mflags & (flag)) { \ 3594 if (buf[0] != '\0') \ 3595 strlcat(buf, ", ", sizeof(buf)); \ 3596 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 3597 mflags &= ~(flag); \ 3598 } \ 3599 } while (0) 3600 MNT_FLAG(MNT_RDONLY); 3601 MNT_FLAG(MNT_SYNCHRONOUS); 3602 MNT_FLAG(MNT_NOEXEC); 3603 MNT_FLAG(MNT_NOSUID); 3604 MNT_FLAG(MNT_NFS4ACLS); 3605 MNT_FLAG(MNT_UNION); 3606 MNT_FLAG(MNT_ASYNC); 3607 MNT_FLAG(MNT_SUIDDIR); 3608 MNT_FLAG(MNT_SOFTDEP); 3609 MNT_FLAG(MNT_NOSYMFOLLOW); 3610 MNT_FLAG(MNT_GJOURNAL); 3611 MNT_FLAG(MNT_MULTILABEL); 3612 MNT_FLAG(MNT_ACLS); 3613 MNT_FLAG(MNT_NOATIME); 3614 MNT_FLAG(MNT_NOCLUSTERR); 3615 MNT_FLAG(MNT_NOCLUSTERW); 3616 MNT_FLAG(MNT_SUJ); 3617 MNT_FLAG(MNT_EXRDONLY); 3618 MNT_FLAG(MNT_EXPORTED); 3619 MNT_FLAG(MNT_DEFEXPORTED); 3620 MNT_FLAG(MNT_EXPORTANON); 3621 MNT_FLAG(MNT_EXKERB); 3622 MNT_FLAG(MNT_EXPUBLIC); 3623 MNT_FLAG(MNT_LOCAL); 3624 MNT_FLAG(MNT_QUOTA); 3625 MNT_FLAG(MNT_ROOTFS); 3626 MNT_FLAG(MNT_USER); 3627 MNT_FLAG(MNT_IGNORE); 3628 MNT_FLAG(MNT_UPDATE); 3629 MNT_FLAG(MNT_DELEXPORT); 3630 MNT_FLAG(MNT_RELOAD); 3631 MNT_FLAG(MNT_FORCE); 3632 MNT_FLAG(MNT_SNAPSHOT); 3633 MNT_FLAG(MNT_BYFSID); 3634 #undef MNT_FLAG 3635 if (mflags != 0) { 3636 if (buf[0] != '\0') 3637 strlcat(buf, ", ", sizeof(buf)); 3638 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3639 "0x%016jx", mflags); 3640 } 3641 db_printf(" mnt_flag = %s\n", buf); 3642 3643 buf[0] = '\0'; 3644 flags = mp->mnt_kern_flag; 3645 #define MNT_KERN_FLAG(flag) do { \ 3646 if (flags & (flag)) { \ 3647 if (buf[0] != '\0') \ 3648 strlcat(buf, ", ", sizeof(buf)); \ 3649 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 3650 flags &= ~(flag); \ 3651 } \ 3652 } while (0) 3653 MNT_KERN_FLAG(MNTK_UNMOUNTF); 3654 MNT_KERN_FLAG(MNTK_ASYNC); 3655 MNT_KERN_FLAG(MNTK_SOFTDEP); 3656 MNT_KERN_FLAG(MNTK_NOINSMNTQ); 3657 MNT_KERN_FLAG(MNTK_DRAINING); 3658 MNT_KERN_FLAG(MNTK_REFEXPIRE); 3659 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 3660 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 3661 MNT_KERN_FLAG(MNTK_NO_IOPF); 3662 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 3663 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 3664 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 3665 MNT_KERN_FLAG(MNTK_MARKER); 3666 MNT_KERN_FLAG(MNTK_USES_BCACHE); 3667 MNT_KERN_FLAG(MNTK_NOASYNC); 3668 MNT_KERN_FLAG(MNTK_UNMOUNT); 3669 MNT_KERN_FLAG(MNTK_MWAIT); 3670 MNT_KERN_FLAG(MNTK_SUSPEND); 3671 MNT_KERN_FLAG(MNTK_SUSPEND2); 3672 MNT_KERN_FLAG(MNTK_SUSPENDED); 3673 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 3674 MNT_KERN_FLAG(MNTK_NOKNOTE); 3675 #undef MNT_KERN_FLAG 3676 if (flags != 0) { 3677 if (buf[0] != '\0') 3678 strlcat(buf, ", ", sizeof(buf)); 3679 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3680 "0x%08x", flags); 3681 } 3682 db_printf(" mnt_kern_flag = %s\n", buf); 3683 3684 db_printf(" mnt_opt = "); 3685 opt = TAILQ_FIRST(mp->mnt_opt); 3686 if (opt != NULL) { 3687 db_printf("%s", opt->name); 3688 opt = TAILQ_NEXT(opt, link); 3689 while (opt != NULL) { 3690 db_printf(", %s", opt->name); 3691 opt = TAILQ_NEXT(opt, link); 3692 } 3693 } 3694 db_printf("\n"); 3695 3696 sp = &mp->mnt_stat; 3697 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 3698 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 3699 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 3700 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 3701 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 3702 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 3703 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 3704 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 3705 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 3706 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 3707 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 3708 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 3709 3710 db_printf(" mnt_cred = { uid=%u ruid=%u", 3711 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 3712 if (jailed(mp->mnt_cred)) 3713 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 3714 db_printf(" }\n"); 3715 db_printf(" mnt_ref = %d\n", mp->mnt_ref); 3716 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 3717 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 3718 db_printf(" mnt_activevnodelistsize = %d\n", 3719 mp->mnt_activevnodelistsize); 3720 db_printf(" mnt_writeopcount = %d\n", mp->mnt_writeopcount); 3721 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 3722 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 3723 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 3724 db_printf(" mnt_lockref = %d\n", mp->mnt_lockref); 3725 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 3726 db_printf(" mnt_secondary_accwrites = %d\n", 3727 mp->mnt_secondary_accwrites); 3728 db_printf(" mnt_gjprovider = %s\n", 3729 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 3730 3731 db_printf("\n\nList of active vnodes\n"); 3732 TAILQ_FOREACH(vp, &mp->mnt_activevnodelist, v_actfreelist) { 3733 if (vp->v_type != VMARKER) { 3734 vn_printf(vp, "vnode "); 3735 if (db_pager_quit) 3736 break; 3737 } 3738 } 3739 db_printf("\n\nList of inactive vnodes\n"); 3740 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3741 if (vp->v_type != VMARKER && (vp->v_iflag & VI_ACTIVE) == 0) { 3742 vn_printf(vp, "vnode "); 3743 if (db_pager_quit) 3744 break; 3745 } 3746 } 3747 } 3748 #endif /* DDB */ 3749 3750 /* 3751 * Fill in a struct xvfsconf based on a struct vfsconf. 3752 */ 3753 static int 3754 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 3755 { 3756 struct xvfsconf xvfsp; 3757 3758 bzero(&xvfsp, sizeof(xvfsp)); 3759 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 3760 xvfsp.vfc_typenum = vfsp->vfc_typenum; 3761 xvfsp.vfc_refcount = vfsp->vfc_refcount; 3762 xvfsp.vfc_flags = vfsp->vfc_flags; 3763 /* 3764 * These are unused in userland, we keep them 3765 * to not break binary compatibility. 3766 */ 3767 xvfsp.vfc_vfsops = NULL; 3768 xvfsp.vfc_next = NULL; 3769 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 3770 } 3771 3772 #ifdef COMPAT_FREEBSD32 3773 struct xvfsconf32 { 3774 uint32_t vfc_vfsops; 3775 char vfc_name[MFSNAMELEN]; 3776 int32_t vfc_typenum; 3777 int32_t vfc_refcount; 3778 int32_t vfc_flags; 3779 uint32_t vfc_next; 3780 }; 3781 3782 static int 3783 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 3784 { 3785 struct xvfsconf32 xvfsp; 3786 3787 bzero(&xvfsp, sizeof(xvfsp)); 3788 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 3789 xvfsp.vfc_typenum = vfsp->vfc_typenum; 3790 xvfsp.vfc_refcount = vfsp->vfc_refcount; 3791 xvfsp.vfc_flags = vfsp->vfc_flags; 3792 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 3793 } 3794 #endif 3795 3796 /* 3797 * Top level filesystem related information gathering. 3798 */ 3799 static int 3800 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 3801 { 3802 struct vfsconf *vfsp; 3803 int error; 3804 3805 error = 0; 3806 vfsconf_slock(); 3807 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3808 #ifdef COMPAT_FREEBSD32 3809 if (req->flags & SCTL_MASK32) 3810 error = vfsconf2x32(req, vfsp); 3811 else 3812 #endif 3813 error = vfsconf2x(req, vfsp); 3814 if (error) 3815 break; 3816 } 3817 vfsconf_sunlock(); 3818 return (error); 3819 } 3820 3821 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 3822 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 3823 "S,xvfsconf", "List of all configured filesystems"); 3824 3825 #ifndef BURN_BRIDGES 3826 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 3827 3828 static int 3829 vfs_sysctl(SYSCTL_HANDLER_ARGS) 3830 { 3831 int *name = (int *)arg1 - 1; /* XXX */ 3832 u_int namelen = arg2 + 1; /* XXX */ 3833 struct vfsconf *vfsp; 3834 3835 log(LOG_WARNING, "userland calling deprecated sysctl, " 3836 "please rebuild world\n"); 3837 3838 #if 1 || defined(COMPAT_PRELITE2) 3839 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 3840 if (namelen == 1) 3841 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 3842 #endif 3843 3844 switch (name[1]) { 3845 case VFS_MAXTYPENUM: 3846 if (namelen != 2) 3847 return (ENOTDIR); 3848 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 3849 case VFS_CONF: 3850 if (namelen != 3) 3851 return (ENOTDIR); /* overloaded */ 3852 vfsconf_slock(); 3853 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3854 if (vfsp->vfc_typenum == name[2]) 3855 break; 3856 } 3857 vfsconf_sunlock(); 3858 if (vfsp == NULL) 3859 return (EOPNOTSUPP); 3860 #ifdef COMPAT_FREEBSD32 3861 if (req->flags & SCTL_MASK32) 3862 return (vfsconf2x32(req, vfsp)); 3863 else 3864 #endif 3865 return (vfsconf2x(req, vfsp)); 3866 } 3867 return (EOPNOTSUPP); 3868 } 3869 3870 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 3871 CTLFLAG_MPSAFE, vfs_sysctl, 3872 "Generic filesystem"); 3873 3874 #if 1 || defined(COMPAT_PRELITE2) 3875 3876 static int 3877 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 3878 { 3879 int error; 3880 struct vfsconf *vfsp; 3881 struct ovfsconf ovfs; 3882 3883 vfsconf_slock(); 3884 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3885 bzero(&ovfs, sizeof(ovfs)); 3886 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 3887 strcpy(ovfs.vfc_name, vfsp->vfc_name); 3888 ovfs.vfc_index = vfsp->vfc_typenum; 3889 ovfs.vfc_refcount = vfsp->vfc_refcount; 3890 ovfs.vfc_flags = vfsp->vfc_flags; 3891 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 3892 if (error != 0) { 3893 vfsconf_sunlock(); 3894 return (error); 3895 } 3896 } 3897 vfsconf_sunlock(); 3898 return (0); 3899 } 3900 3901 #endif /* 1 || COMPAT_PRELITE2 */ 3902 #endif /* !BURN_BRIDGES */ 3903 3904 #define KINFO_VNODESLOP 10 3905 #ifdef notyet 3906 /* 3907 * Dump vnode list (via sysctl). 3908 */ 3909 /* ARGSUSED */ 3910 static int 3911 sysctl_vnode(SYSCTL_HANDLER_ARGS) 3912 { 3913 struct xvnode *xvn; 3914 struct mount *mp; 3915 struct vnode *vp; 3916 int error, len, n; 3917 3918 /* 3919 * Stale numvnodes access is not fatal here. 3920 */ 3921 req->lock = 0; 3922 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 3923 if (!req->oldptr) 3924 /* Make an estimate */ 3925 return (SYSCTL_OUT(req, 0, len)); 3926 3927 error = sysctl_wire_old_buffer(req, 0); 3928 if (error != 0) 3929 return (error); 3930 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 3931 n = 0; 3932 mtx_lock(&mountlist_mtx); 3933 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3934 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 3935 continue; 3936 MNT_ILOCK(mp); 3937 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3938 if (n == len) 3939 break; 3940 vref(vp); 3941 xvn[n].xv_size = sizeof *xvn; 3942 xvn[n].xv_vnode = vp; 3943 xvn[n].xv_id = 0; /* XXX compat */ 3944 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 3945 XV_COPY(usecount); 3946 XV_COPY(writecount); 3947 XV_COPY(holdcnt); 3948 XV_COPY(mount); 3949 XV_COPY(numoutput); 3950 XV_COPY(type); 3951 #undef XV_COPY 3952 xvn[n].xv_flag = vp->v_vflag; 3953 3954 switch (vp->v_type) { 3955 case VREG: 3956 case VDIR: 3957 case VLNK: 3958 break; 3959 case VBLK: 3960 case VCHR: 3961 if (vp->v_rdev == NULL) { 3962 vrele(vp); 3963 continue; 3964 } 3965 xvn[n].xv_dev = dev2udev(vp->v_rdev); 3966 break; 3967 case VSOCK: 3968 xvn[n].xv_socket = vp->v_socket; 3969 break; 3970 case VFIFO: 3971 xvn[n].xv_fifo = vp->v_fifoinfo; 3972 break; 3973 case VNON: 3974 case VBAD: 3975 default: 3976 /* shouldn't happen? */ 3977 vrele(vp); 3978 continue; 3979 } 3980 vrele(vp); 3981 ++n; 3982 } 3983 MNT_IUNLOCK(mp); 3984 mtx_lock(&mountlist_mtx); 3985 vfs_unbusy(mp); 3986 if (n == len) 3987 break; 3988 } 3989 mtx_unlock(&mountlist_mtx); 3990 3991 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 3992 free(xvn, M_TEMP); 3993 return (error); 3994 } 3995 3996 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 3997 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 3998 ""); 3999 #endif 4000 4001 static void 4002 unmount_or_warn(struct mount *mp) 4003 { 4004 int error; 4005 4006 error = dounmount(mp, MNT_FORCE, curthread); 4007 if (error != 0) { 4008 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4009 if (error == EBUSY) 4010 printf("BUSY)\n"); 4011 else 4012 printf("%d)\n", error); 4013 } 4014 } 4015 4016 /* 4017 * Unmount all filesystems. The list is traversed in reverse order 4018 * of mounting to avoid dependencies. 4019 */ 4020 void 4021 vfs_unmountall(void) 4022 { 4023 struct mount *mp, *tmp; 4024 4025 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4026 4027 /* 4028 * Since this only runs when rebooting, it is not interlocked. 4029 */ 4030 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4031 vfs_ref(mp); 4032 4033 /* 4034 * Forcibly unmounting "/dev" before "/" would prevent clean 4035 * unmount of the latter. 4036 */ 4037 if (mp == rootdevmp) 4038 continue; 4039 4040 unmount_or_warn(mp); 4041 } 4042 4043 if (rootdevmp != NULL) 4044 unmount_or_warn(rootdevmp); 4045 } 4046 4047 /* 4048 * perform msync on all vnodes under a mount point 4049 * the mount point must be locked. 4050 */ 4051 void 4052 vfs_msync(struct mount *mp, int flags) 4053 { 4054 struct vnode *vp, *mvp; 4055 struct vm_object *obj; 4056 4057 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4058 4059 vnlru_return_batch(mp); 4060 4061 MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { 4062 obj = vp->v_object; 4063 if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0 && 4064 (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) { 4065 if (!vget(vp, 4066 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, 4067 curthread)) { 4068 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */ 4069 vput(vp); 4070 continue; 4071 } 4072 4073 obj = vp->v_object; 4074 if (obj != NULL) { 4075 VM_OBJECT_WLOCK(obj); 4076 vm_object_page_clean(obj, 0, 0, 4077 flags == MNT_WAIT ? 4078 OBJPC_SYNC : OBJPC_NOSYNC); 4079 VM_OBJECT_WUNLOCK(obj); 4080 } 4081 vput(vp); 4082 } 4083 } else 4084 VI_UNLOCK(vp); 4085 } 4086 } 4087 4088 static void 4089 destroy_vpollinfo_free(struct vpollinfo *vi) 4090 { 4091 4092 knlist_destroy(&vi->vpi_selinfo.si_note); 4093 mtx_destroy(&vi->vpi_lock); 4094 uma_zfree(vnodepoll_zone, vi); 4095 } 4096 4097 static void 4098 destroy_vpollinfo(struct vpollinfo *vi) 4099 { 4100 4101 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4102 seldrain(&vi->vpi_selinfo); 4103 destroy_vpollinfo_free(vi); 4104 } 4105 4106 /* 4107 * Initialize per-vnode helper structure to hold poll-related state. 4108 */ 4109 void 4110 v_addpollinfo(struct vnode *vp) 4111 { 4112 struct vpollinfo *vi; 4113 4114 if (vp->v_pollinfo != NULL) 4115 return; 4116 vi = uma_zalloc(vnodepoll_zone, M_WAITOK | M_ZERO); 4117 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4118 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4119 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 4120 VI_LOCK(vp); 4121 if (vp->v_pollinfo != NULL) { 4122 VI_UNLOCK(vp); 4123 destroy_vpollinfo_free(vi); 4124 return; 4125 } 4126 vp->v_pollinfo = vi; 4127 VI_UNLOCK(vp); 4128 } 4129 4130 /* 4131 * Record a process's interest in events which might happen to 4132 * a vnode. Because poll uses the historic select-style interface 4133 * internally, this routine serves as both the ``check for any 4134 * pending events'' and the ``record my interest in future events'' 4135 * functions. (These are done together, while the lock is held, 4136 * to avoid race conditions.) 4137 */ 4138 int 4139 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4140 { 4141 4142 v_addpollinfo(vp); 4143 mtx_lock(&vp->v_pollinfo->vpi_lock); 4144 if (vp->v_pollinfo->vpi_revents & events) { 4145 /* 4146 * This leaves events we are not interested 4147 * in available for the other process which 4148 * which presumably had requested them 4149 * (otherwise they would never have been 4150 * recorded). 4151 */ 4152 events &= vp->v_pollinfo->vpi_revents; 4153 vp->v_pollinfo->vpi_revents &= ~events; 4154 4155 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4156 return (events); 4157 } 4158 vp->v_pollinfo->vpi_events |= events; 4159 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 4160 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4161 return (0); 4162 } 4163 4164 /* 4165 * Routine to create and manage a filesystem syncer vnode. 4166 */ 4167 #define sync_close ((int (*)(struct vop_close_args *))nullop) 4168 static int sync_fsync(struct vop_fsync_args *); 4169 static int sync_inactive(struct vop_inactive_args *); 4170 static int sync_reclaim(struct vop_reclaim_args *); 4171 4172 static struct vop_vector sync_vnodeops = { 4173 .vop_bypass = VOP_EOPNOTSUPP, 4174 .vop_close = sync_close, /* close */ 4175 .vop_fsync = sync_fsync, /* fsync */ 4176 .vop_inactive = sync_inactive, /* inactive */ 4177 .vop_reclaim = sync_reclaim, /* reclaim */ 4178 .vop_lock1 = vop_stdlock, /* lock */ 4179 .vop_unlock = vop_stdunlock, /* unlock */ 4180 .vop_islocked = vop_stdislocked, /* islocked */ 4181 }; 4182 4183 /* 4184 * Create a new filesystem syncer vnode for the specified mount point. 4185 */ 4186 void 4187 vfs_allocate_syncvnode(struct mount *mp) 4188 { 4189 struct vnode *vp; 4190 struct bufobj *bo; 4191 static long start, incr, next; 4192 int error; 4193 4194 /* Allocate a new vnode */ 4195 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 4196 if (error != 0) 4197 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 4198 vp->v_type = VNON; 4199 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4200 vp->v_vflag |= VV_FORCEINSMQ; 4201 error = insmntque(vp, mp); 4202 if (error != 0) 4203 panic("vfs_allocate_syncvnode: insmntque() failed"); 4204 vp->v_vflag &= ~VV_FORCEINSMQ; 4205 VOP_UNLOCK(vp, 0); 4206 /* 4207 * Place the vnode onto the syncer worklist. We attempt to 4208 * scatter them about on the list so that they will go off 4209 * at evenly distributed times even if all the filesystems 4210 * are mounted at once. 4211 */ 4212 next += incr; 4213 if (next == 0 || next > syncer_maxdelay) { 4214 start /= 2; 4215 incr /= 2; 4216 if (start == 0) { 4217 start = syncer_maxdelay / 2; 4218 incr = syncer_maxdelay; 4219 } 4220 next = start; 4221 } 4222 bo = &vp->v_bufobj; 4223 BO_LOCK(bo); 4224 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 4225 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 4226 mtx_lock(&sync_mtx); 4227 sync_vnode_count++; 4228 if (mp->mnt_syncer == NULL) { 4229 mp->mnt_syncer = vp; 4230 vp = NULL; 4231 } 4232 mtx_unlock(&sync_mtx); 4233 BO_UNLOCK(bo); 4234 if (vp != NULL) { 4235 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4236 vgone(vp); 4237 vput(vp); 4238 } 4239 } 4240 4241 void 4242 vfs_deallocate_syncvnode(struct mount *mp) 4243 { 4244 struct vnode *vp; 4245 4246 mtx_lock(&sync_mtx); 4247 vp = mp->mnt_syncer; 4248 if (vp != NULL) 4249 mp->mnt_syncer = NULL; 4250 mtx_unlock(&sync_mtx); 4251 if (vp != NULL) 4252 vrele(vp); 4253 } 4254 4255 /* 4256 * Do a lazy sync of the filesystem. 4257 */ 4258 static int 4259 sync_fsync(struct vop_fsync_args *ap) 4260 { 4261 struct vnode *syncvp = ap->a_vp; 4262 struct mount *mp = syncvp->v_mount; 4263 int error, save; 4264 struct bufobj *bo; 4265 4266 /* 4267 * We only need to do something if this is a lazy evaluation. 4268 */ 4269 if (ap->a_waitfor != MNT_LAZY) 4270 return (0); 4271 4272 /* 4273 * Move ourselves to the back of the sync list. 4274 */ 4275 bo = &syncvp->v_bufobj; 4276 BO_LOCK(bo); 4277 vn_syncer_add_to_worklist(bo, syncdelay); 4278 BO_UNLOCK(bo); 4279 4280 /* 4281 * Walk the list of vnodes pushing all that are dirty and 4282 * not already on the sync list. 4283 */ 4284 if (vfs_busy(mp, MBF_NOWAIT) != 0) 4285 return (0); 4286 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 4287 vfs_unbusy(mp); 4288 return (0); 4289 } 4290 save = curthread_pflags_set(TDP_SYNCIO); 4291 vfs_msync(mp, MNT_NOWAIT); 4292 error = VFS_SYNC(mp, MNT_LAZY); 4293 curthread_pflags_restore(save); 4294 vn_finished_write(mp); 4295 vfs_unbusy(mp); 4296 return (error); 4297 } 4298 4299 /* 4300 * The syncer vnode is no referenced. 4301 */ 4302 static int 4303 sync_inactive(struct vop_inactive_args *ap) 4304 { 4305 4306 vgone(ap->a_vp); 4307 return (0); 4308 } 4309 4310 /* 4311 * The syncer vnode is no longer needed and is being decommissioned. 4312 * 4313 * Modifications to the worklist must be protected by sync_mtx. 4314 */ 4315 static int 4316 sync_reclaim(struct vop_reclaim_args *ap) 4317 { 4318 struct vnode *vp = ap->a_vp; 4319 struct bufobj *bo; 4320 4321 bo = &vp->v_bufobj; 4322 BO_LOCK(bo); 4323 mtx_lock(&sync_mtx); 4324 if (vp->v_mount->mnt_syncer == vp) 4325 vp->v_mount->mnt_syncer = NULL; 4326 if (bo->bo_flag & BO_ONWORKLST) { 4327 LIST_REMOVE(bo, bo_synclist); 4328 syncer_worklist_len--; 4329 sync_vnode_count--; 4330 bo->bo_flag &= ~BO_ONWORKLST; 4331 } 4332 mtx_unlock(&sync_mtx); 4333 BO_UNLOCK(bo); 4334 4335 return (0); 4336 } 4337 4338 /* 4339 * Check if vnode represents a disk device 4340 */ 4341 int 4342 vn_isdisk(struct vnode *vp, int *errp) 4343 { 4344 int error; 4345 4346 if (vp->v_type != VCHR) { 4347 error = ENOTBLK; 4348 goto out; 4349 } 4350 error = 0; 4351 dev_lock(); 4352 if (vp->v_rdev == NULL) 4353 error = ENXIO; 4354 else if (vp->v_rdev->si_devsw == NULL) 4355 error = ENXIO; 4356 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 4357 error = ENOTBLK; 4358 dev_unlock(); 4359 out: 4360 if (errp != NULL) 4361 *errp = error; 4362 return (error == 0); 4363 } 4364 4365 /* 4366 * Common filesystem object access control check routine. Accepts a 4367 * vnode's type, "mode", uid and gid, requested access mode, credentials, 4368 * and optional call-by-reference privused argument allowing vaccess() 4369 * to indicate to the caller whether privilege was used to satisfy the 4370 * request (obsoleted). Returns 0 on success, or an errno on failure. 4371 */ 4372 int 4373 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 4374 accmode_t accmode, struct ucred *cred, int *privused) 4375 { 4376 accmode_t dac_granted; 4377 accmode_t priv_granted; 4378 4379 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 4380 ("invalid bit in accmode")); 4381 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 4382 ("VAPPEND without VWRITE")); 4383 4384 /* 4385 * Look for a normal, non-privileged way to access the file/directory 4386 * as requested. If it exists, go with that. 4387 */ 4388 4389 if (privused != NULL) 4390 *privused = 0; 4391 4392 dac_granted = 0; 4393 4394 /* Check the owner. */ 4395 if (cred->cr_uid == file_uid) { 4396 dac_granted |= VADMIN; 4397 if (file_mode & S_IXUSR) 4398 dac_granted |= VEXEC; 4399 if (file_mode & S_IRUSR) 4400 dac_granted |= VREAD; 4401 if (file_mode & S_IWUSR) 4402 dac_granted |= (VWRITE | VAPPEND); 4403 4404 if ((accmode & dac_granted) == accmode) 4405 return (0); 4406 4407 goto privcheck; 4408 } 4409 4410 /* Otherwise, check the groups (first match) */ 4411 if (groupmember(file_gid, cred)) { 4412 if (file_mode & S_IXGRP) 4413 dac_granted |= VEXEC; 4414 if (file_mode & S_IRGRP) 4415 dac_granted |= VREAD; 4416 if (file_mode & S_IWGRP) 4417 dac_granted |= (VWRITE | VAPPEND); 4418 4419 if ((accmode & dac_granted) == accmode) 4420 return (0); 4421 4422 goto privcheck; 4423 } 4424 4425 /* Otherwise, check everyone else. */ 4426 if (file_mode & S_IXOTH) 4427 dac_granted |= VEXEC; 4428 if (file_mode & S_IROTH) 4429 dac_granted |= VREAD; 4430 if (file_mode & S_IWOTH) 4431 dac_granted |= (VWRITE | VAPPEND); 4432 if ((accmode & dac_granted) == accmode) 4433 return (0); 4434 4435 privcheck: 4436 /* 4437 * Build a privilege mask to determine if the set of privileges 4438 * satisfies the requirements when combined with the granted mask 4439 * from above. For each privilege, if the privilege is required, 4440 * bitwise or the request type onto the priv_granted mask. 4441 */ 4442 priv_granted = 0; 4443 4444 if (type == VDIR) { 4445 /* 4446 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 4447 * requests, instead of PRIV_VFS_EXEC. 4448 */ 4449 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4450 !priv_check_cred(cred, PRIV_VFS_LOOKUP, 0)) 4451 priv_granted |= VEXEC; 4452 } else { 4453 /* 4454 * Ensure that at least one execute bit is on. Otherwise, 4455 * a privileged user will always succeed, and we don't want 4456 * this to happen unless the file really is executable. 4457 */ 4458 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4459 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 4460 !priv_check_cred(cred, PRIV_VFS_EXEC, 0)) 4461 priv_granted |= VEXEC; 4462 } 4463 4464 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 4465 !priv_check_cred(cred, PRIV_VFS_READ, 0)) 4466 priv_granted |= VREAD; 4467 4468 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 4469 !priv_check_cred(cred, PRIV_VFS_WRITE, 0)) 4470 priv_granted |= (VWRITE | VAPPEND); 4471 4472 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 4473 !priv_check_cred(cred, PRIV_VFS_ADMIN, 0)) 4474 priv_granted |= VADMIN; 4475 4476 if ((accmode & (priv_granted | dac_granted)) == accmode) { 4477 /* XXX audit: privilege used */ 4478 if (privused != NULL) 4479 *privused = 1; 4480 return (0); 4481 } 4482 4483 return ((accmode & VADMIN) ? EPERM : EACCES); 4484 } 4485 4486 /* 4487 * Credential check based on process requesting service, and per-attribute 4488 * permissions. 4489 */ 4490 int 4491 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 4492 struct thread *td, accmode_t accmode) 4493 { 4494 4495 /* 4496 * Kernel-invoked always succeeds. 4497 */ 4498 if (cred == NOCRED) 4499 return (0); 4500 4501 /* 4502 * Do not allow privileged processes in jail to directly manipulate 4503 * system attributes. 4504 */ 4505 switch (attrnamespace) { 4506 case EXTATTR_NAMESPACE_SYSTEM: 4507 /* Potentially should be: return (EPERM); */ 4508 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM, 0)); 4509 case EXTATTR_NAMESPACE_USER: 4510 return (VOP_ACCESS(vp, accmode, cred, td)); 4511 default: 4512 return (EPERM); 4513 } 4514 } 4515 4516 #ifdef DEBUG_VFS_LOCKS 4517 /* 4518 * This only exists to suppress warnings from unlocked specfs accesses. It is 4519 * no longer ok to have an unlocked VFS. 4520 */ 4521 #define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \ 4522 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 4523 4524 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 4525 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 4526 "Drop into debugger on lock violation"); 4527 4528 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 4529 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 4530 0, "Check for interlock across VOPs"); 4531 4532 int vfs_badlock_print = 1; /* Print lock violations. */ 4533 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 4534 0, "Print lock violations"); 4535 4536 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 4537 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 4538 0, "Print vnode details on lock violations"); 4539 4540 #ifdef KDB 4541 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 4542 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 4543 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 4544 #endif 4545 4546 static void 4547 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 4548 { 4549 4550 #ifdef KDB 4551 if (vfs_badlock_backtrace) 4552 kdb_backtrace(); 4553 #endif 4554 if (vfs_badlock_vnode) 4555 vn_printf(vp, "vnode "); 4556 if (vfs_badlock_print) 4557 printf("%s: %p %s\n", str, (void *)vp, msg); 4558 if (vfs_badlock_ddb) 4559 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 4560 } 4561 4562 void 4563 assert_vi_locked(struct vnode *vp, const char *str) 4564 { 4565 4566 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 4567 vfs_badlock("interlock is not locked but should be", str, vp); 4568 } 4569 4570 void 4571 assert_vi_unlocked(struct vnode *vp, const char *str) 4572 { 4573 4574 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 4575 vfs_badlock("interlock is locked but should not be", str, vp); 4576 } 4577 4578 void 4579 assert_vop_locked(struct vnode *vp, const char *str) 4580 { 4581 int locked; 4582 4583 if (!IGNORE_LOCK(vp)) { 4584 locked = VOP_ISLOCKED(vp); 4585 if (locked == 0 || locked == LK_EXCLOTHER) 4586 vfs_badlock("is not locked but should be", str, vp); 4587 } 4588 } 4589 4590 void 4591 assert_vop_unlocked(struct vnode *vp, const char *str) 4592 { 4593 4594 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 4595 vfs_badlock("is locked but should not be", str, vp); 4596 } 4597 4598 void 4599 assert_vop_elocked(struct vnode *vp, const char *str) 4600 { 4601 4602 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 4603 vfs_badlock("is not exclusive locked but should be", str, vp); 4604 } 4605 #endif /* DEBUG_VFS_LOCKS */ 4606 4607 void 4608 vop_rename_fail(struct vop_rename_args *ap) 4609 { 4610 4611 if (ap->a_tvp != NULL) 4612 vput(ap->a_tvp); 4613 if (ap->a_tdvp == ap->a_tvp) 4614 vrele(ap->a_tdvp); 4615 else 4616 vput(ap->a_tdvp); 4617 vrele(ap->a_fdvp); 4618 vrele(ap->a_fvp); 4619 } 4620 4621 void 4622 vop_rename_pre(void *ap) 4623 { 4624 struct vop_rename_args *a = ap; 4625 4626 #ifdef DEBUG_VFS_LOCKS 4627 if (a->a_tvp) 4628 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 4629 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 4630 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 4631 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 4632 4633 /* Check the source (from). */ 4634 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 4635 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 4636 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 4637 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 4638 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 4639 4640 /* Check the target. */ 4641 if (a->a_tvp) 4642 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 4643 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 4644 #endif 4645 if (a->a_tdvp != a->a_fdvp) 4646 vhold(a->a_fdvp); 4647 if (a->a_tvp != a->a_fvp) 4648 vhold(a->a_fvp); 4649 vhold(a->a_tdvp); 4650 if (a->a_tvp) 4651 vhold(a->a_tvp); 4652 } 4653 4654 #ifdef DEBUG_VFS_LOCKS 4655 void 4656 vop_strategy_pre(void *ap) 4657 { 4658 struct vop_strategy_args *a; 4659 struct buf *bp; 4660 4661 a = ap; 4662 bp = a->a_bp; 4663 4664 /* 4665 * Cluster ops lock their component buffers but not the IO container. 4666 */ 4667 if ((bp->b_flags & B_CLUSTER) != 0) 4668 return; 4669 4670 if (panicstr == NULL && !BUF_ISLOCKED(bp)) { 4671 if (vfs_badlock_print) 4672 printf( 4673 "VOP_STRATEGY: bp is not locked but should be\n"); 4674 if (vfs_badlock_ddb) 4675 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 4676 } 4677 } 4678 4679 void 4680 vop_lock_pre(void *ap) 4681 { 4682 struct vop_lock1_args *a = ap; 4683 4684 if ((a->a_flags & LK_INTERLOCK) == 0) 4685 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 4686 else 4687 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 4688 } 4689 4690 void 4691 vop_lock_post(void *ap, int rc) 4692 { 4693 struct vop_lock1_args *a = ap; 4694 4695 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 4696 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 4697 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 4698 } 4699 4700 void 4701 vop_unlock_pre(void *ap) 4702 { 4703 struct vop_unlock_args *a = ap; 4704 4705 if (a->a_flags & LK_INTERLOCK) 4706 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK"); 4707 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 4708 } 4709 4710 void 4711 vop_unlock_post(void *ap, int rc) 4712 { 4713 struct vop_unlock_args *a = ap; 4714 4715 if (a->a_flags & LK_INTERLOCK) 4716 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK"); 4717 } 4718 #endif 4719 4720 void 4721 vop_create_post(void *ap, int rc) 4722 { 4723 struct vop_create_args *a = ap; 4724 4725 if (!rc) 4726 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4727 } 4728 4729 void 4730 vop_deleteextattr_post(void *ap, int rc) 4731 { 4732 struct vop_deleteextattr_args *a = ap; 4733 4734 if (!rc) 4735 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4736 } 4737 4738 void 4739 vop_link_post(void *ap, int rc) 4740 { 4741 struct vop_link_args *a = ap; 4742 4743 if (!rc) { 4744 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); 4745 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE); 4746 } 4747 } 4748 4749 void 4750 vop_mkdir_post(void *ap, int rc) 4751 { 4752 struct vop_mkdir_args *a = ap; 4753 4754 if (!rc) 4755 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 4756 } 4757 4758 void 4759 vop_mknod_post(void *ap, int rc) 4760 { 4761 struct vop_mknod_args *a = ap; 4762 4763 if (!rc) 4764 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4765 } 4766 4767 void 4768 vop_reclaim_post(void *ap, int rc) 4769 { 4770 struct vop_reclaim_args *a = ap; 4771 4772 if (!rc) 4773 VFS_KNOTE_LOCKED(a->a_vp, NOTE_REVOKE); 4774 } 4775 4776 void 4777 vop_remove_post(void *ap, int rc) 4778 { 4779 struct vop_remove_args *a = ap; 4780 4781 if (!rc) { 4782 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4783 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 4784 } 4785 } 4786 4787 void 4788 vop_rename_post(void *ap, int rc) 4789 { 4790 struct vop_rename_args *a = ap; 4791 long hint; 4792 4793 if (!rc) { 4794 hint = NOTE_WRITE; 4795 if (a->a_fdvp == a->a_tdvp) { 4796 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 4797 hint |= NOTE_LINK; 4798 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 4799 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 4800 } else { 4801 hint |= NOTE_EXTEND; 4802 if (a->a_fvp->v_type == VDIR) 4803 hint |= NOTE_LINK; 4804 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 4805 4806 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 4807 a->a_tvp->v_type == VDIR) 4808 hint &= ~NOTE_LINK; 4809 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 4810 } 4811 4812 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 4813 if (a->a_tvp) 4814 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 4815 } 4816 if (a->a_tdvp != a->a_fdvp) 4817 vdrop(a->a_fdvp); 4818 if (a->a_tvp != a->a_fvp) 4819 vdrop(a->a_fvp); 4820 vdrop(a->a_tdvp); 4821 if (a->a_tvp) 4822 vdrop(a->a_tvp); 4823 } 4824 4825 void 4826 vop_rmdir_post(void *ap, int rc) 4827 { 4828 struct vop_rmdir_args *a = ap; 4829 4830 if (!rc) { 4831 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 4832 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 4833 } 4834 } 4835 4836 void 4837 vop_setattr_post(void *ap, int rc) 4838 { 4839 struct vop_setattr_args *a = ap; 4840 4841 if (!rc) 4842 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4843 } 4844 4845 void 4846 vop_setextattr_post(void *ap, int rc) 4847 { 4848 struct vop_setextattr_args *a = ap; 4849 4850 if (!rc) 4851 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4852 } 4853 4854 void 4855 vop_symlink_post(void *ap, int rc) 4856 { 4857 struct vop_symlink_args *a = ap; 4858 4859 if (!rc) 4860 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4861 } 4862 4863 void 4864 vop_open_post(void *ap, int rc) 4865 { 4866 struct vop_open_args *a = ap; 4867 4868 if (!rc) 4869 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 4870 } 4871 4872 void 4873 vop_close_post(void *ap, int rc) 4874 { 4875 struct vop_close_args *a = ap; 4876 4877 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 4878 (a->a_vp->v_iflag & VI_DOOMED) == 0)) { 4879 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 4880 NOTE_CLOSE_WRITE : NOTE_CLOSE); 4881 } 4882 } 4883 4884 void 4885 vop_read_post(void *ap, int rc) 4886 { 4887 struct vop_read_args *a = ap; 4888 4889 if (!rc) 4890 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 4891 } 4892 4893 void 4894 vop_readdir_post(void *ap, int rc) 4895 { 4896 struct vop_readdir_args *a = ap; 4897 4898 if (!rc) 4899 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 4900 } 4901 4902 static struct knlist fs_knlist; 4903 4904 static void 4905 vfs_event_init(void *arg) 4906 { 4907 knlist_init_mtx(&fs_knlist, NULL); 4908 } 4909 /* XXX - correct order? */ 4910 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 4911 4912 void 4913 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 4914 { 4915 4916 KNOTE_UNLOCKED(&fs_knlist, event); 4917 } 4918 4919 static int filt_fsattach(struct knote *kn); 4920 static void filt_fsdetach(struct knote *kn); 4921 static int filt_fsevent(struct knote *kn, long hint); 4922 4923 struct filterops fs_filtops = { 4924 .f_isfd = 0, 4925 .f_attach = filt_fsattach, 4926 .f_detach = filt_fsdetach, 4927 .f_event = filt_fsevent 4928 }; 4929 4930 static int 4931 filt_fsattach(struct knote *kn) 4932 { 4933 4934 kn->kn_flags |= EV_CLEAR; 4935 knlist_add(&fs_knlist, kn, 0); 4936 return (0); 4937 } 4938 4939 static void 4940 filt_fsdetach(struct knote *kn) 4941 { 4942 4943 knlist_remove(&fs_knlist, kn, 0); 4944 } 4945 4946 static int 4947 filt_fsevent(struct knote *kn, long hint) 4948 { 4949 4950 kn->kn_fflags |= hint; 4951 return (kn->kn_fflags != 0); 4952 } 4953 4954 static int 4955 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 4956 { 4957 struct vfsidctl vc; 4958 int error; 4959 struct mount *mp; 4960 4961 error = SYSCTL_IN(req, &vc, sizeof(vc)); 4962 if (error) 4963 return (error); 4964 if (vc.vc_vers != VFS_CTL_VERS1) 4965 return (EINVAL); 4966 mp = vfs_getvfs(&vc.vc_fsid); 4967 if (mp == NULL) 4968 return (ENOENT); 4969 /* ensure that a specific sysctl goes to the right filesystem. */ 4970 if (strcmp(vc.vc_fstypename, "*") != 0 && 4971 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 4972 vfs_rel(mp); 4973 return (EINVAL); 4974 } 4975 VCTLTOREQ(&vc, req); 4976 error = VFS_SYSCTL(mp, vc.vc_op, req); 4977 vfs_rel(mp); 4978 return (error); 4979 } 4980 4981 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_WR, 4982 NULL, 0, sysctl_vfs_ctl, "", 4983 "Sysctl by fsid"); 4984 4985 /* 4986 * Function to initialize a va_filerev field sensibly. 4987 * XXX: Wouldn't a random number make a lot more sense ?? 4988 */ 4989 u_quad_t 4990 init_va_filerev(void) 4991 { 4992 struct bintime bt; 4993 4994 getbinuptime(&bt); 4995 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 4996 } 4997 4998 static int filt_vfsread(struct knote *kn, long hint); 4999 static int filt_vfswrite(struct knote *kn, long hint); 5000 static int filt_vfsvnode(struct knote *kn, long hint); 5001 static void filt_vfsdetach(struct knote *kn); 5002 static struct filterops vfsread_filtops = { 5003 .f_isfd = 1, 5004 .f_detach = filt_vfsdetach, 5005 .f_event = filt_vfsread 5006 }; 5007 static struct filterops vfswrite_filtops = { 5008 .f_isfd = 1, 5009 .f_detach = filt_vfsdetach, 5010 .f_event = filt_vfswrite 5011 }; 5012 static struct filterops vfsvnode_filtops = { 5013 .f_isfd = 1, 5014 .f_detach = filt_vfsdetach, 5015 .f_event = filt_vfsvnode 5016 }; 5017 5018 static void 5019 vfs_knllock(void *arg) 5020 { 5021 struct vnode *vp = arg; 5022 5023 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5024 } 5025 5026 static void 5027 vfs_knlunlock(void *arg) 5028 { 5029 struct vnode *vp = arg; 5030 5031 VOP_UNLOCK(vp, 0); 5032 } 5033 5034 static void 5035 vfs_knl_assert_locked(void *arg) 5036 { 5037 #ifdef DEBUG_VFS_LOCKS 5038 struct vnode *vp = arg; 5039 5040 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 5041 #endif 5042 } 5043 5044 static void 5045 vfs_knl_assert_unlocked(void *arg) 5046 { 5047 #ifdef DEBUG_VFS_LOCKS 5048 struct vnode *vp = arg; 5049 5050 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 5051 #endif 5052 } 5053 5054 int 5055 vfs_kqfilter(struct vop_kqfilter_args *ap) 5056 { 5057 struct vnode *vp = ap->a_vp; 5058 struct knote *kn = ap->a_kn; 5059 struct knlist *knl; 5060 5061 switch (kn->kn_filter) { 5062 case EVFILT_READ: 5063 kn->kn_fop = &vfsread_filtops; 5064 break; 5065 case EVFILT_WRITE: 5066 kn->kn_fop = &vfswrite_filtops; 5067 break; 5068 case EVFILT_VNODE: 5069 kn->kn_fop = &vfsvnode_filtops; 5070 break; 5071 default: 5072 return (EINVAL); 5073 } 5074 5075 kn->kn_hook = (caddr_t)vp; 5076 5077 v_addpollinfo(vp); 5078 if (vp->v_pollinfo == NULL) 5079 return (ENOMEM); 5080 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 5081 vhold(vp); 5082 knlist_add(knl, kn, 0); 5083 5084 return (0); 5085 } 5086 5087 /* 5088 * Detach knote from vnode 5089 */ 5090 static void 5091 filt_vfsdetach(struct knote *kn) 5092 { 5093 struct vnode *vp = (struct vnode *)kn->kn_hook; 5094 5095 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 5096 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 5097 vdrop(vp); 5098 } 5099 5100 /*ARGSUSED*/ 5101 static int 5102 filt_vfsread(struct knote *kn, long hint) 5103 { 5104 struct vnode *vp = (struct vnode *)kn->kn_hook; 5105 struct vattr va; 5106 int res; 5107 5108 /* 5109 * filesystem is gone, so set the EOF flag and schedule 5110 * the knote for deletion. 5111 */ 5112 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5113 VI_LOCK(vp); 5114 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5115 VI_UNLOCK(vp); 5116 return (1); 5117 } 5118 5119 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 5120 return (0); 5121 5122 VI_LOCK(vp); 5123 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 5124 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 5125 VI_UNLOCK(vp); 5126 return (res); 5127 } 5128 5129 /*ARGSUSED*/ 5130 static int 5131 filt_vfswrite(struct knote *kn, long hint) 5132 { 5133 struct vnode *vp = (struct vnode *)kn->kn_hook; 5134 5135 VI_LOCK(vp); 5136 5137 /* 5138 * filesystem is gone, so set the EOF flag and schedule 5139 * the knote for deletion. 5140 */ 5141 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 5142 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5143 5144 kn->kn_data = 0; 5145 VI_UNLOCK(vp); 5146 return (1); 5147 } 5148 5149 static int 5150 filt_vfsvnode(struct knote *kn, long hint) 5151 { 5152 struct vnode *vp = (struct vnode *)kn->kn_hook; 5153 int res; 5154 5155 VI_LOCK(vp); 5156 if (kn->kn_sfflags & hint) 5157 kn->kn_fflags |= hint; 5158 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5159 kn->kn_flags |= EV_EOF; 5160 VI_UNLOCK(vp); 5161 return (1); 5162 } 5163 res = (kn->kn_fflags != 0); 5164 VI_UNLOCK(vp); 5165 return (res); 5166 } 5167 5168 int 5169 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 5170 { 5171 int error; 5172 5173 if (dp->d_reclen > ap->a_uio->uio_resid) 5174 return (ENAMETOOLONG); 5175 error = uiomove(dp, dp->d_reclen, ap->a_uio); 5176 if (error) { 5177 if (ap->a_ncookies != NULL) { 5178 if (ap->a_cookies != NULL) 5179 free(ap->a_cookies, M_TEMP); 5180 ap->a_cookies = NULL; 5181 *ap->a_ncookies = 0; 5182 } 5183 return (error); 5184 } 5185 if (ap->a_ncookies == NULL) 5186 return (0); 5187 5188 KASSERT(ap->a_cookies, 5189 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 5190 5191 *ap->a_cookies = realloc(*ap->a_cookies, 5192 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 5193 (*ap->a_cookies)[*ap->a_ncookies] = off; 5194 *ap->a_ncookies += 1; 5195 return (0); 5196 } 5197 5198 /* 5199 * Mark for update the access time of the file if the filesystem 5200 * supports VOP_MARKATIME. This functionality is used by execve and 5201 * mmap, so we want to avoid the I/O implied by directly setting 5202 * va_atime for the sake of efficiency. 5203 */ 5204 void 5205 vfs_mark_atime(struct vnode *vp, struct ucred *cred) 5206 { 5207 struct mount *mp; 5208 5209 mp = vp->v_mount; 5210 ASSERT_VOP_LOCKED(vp, "vfs_mark_atime"); 5211 if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) 5212 (void)VOP_MARKATIME(vp); 5213 } 5214 5215 /* 5216 * The purpose of this routine is to remove granularity from accmode_t, 5217 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 5218 * VADMIN and VAPPEND. 5219 * 5220 * If it returns 0, the caller is supposed to continue with the usual 5221 * access checks using 'accmode' as modified by this routine. If it 5222 * returns nonzero value, the caller is supposed to return that value 5223 * as errno. 5224 * 5225 * Note that after this routine runs, accmode may be zero. 5226 */ 5227 int 5228 vfs_unixify_accmode(accmode_t *accmode) 5229 { 5230 /* 5231 * There is no way to specify explicit "deny" rule using 5232 * file mode or POSIX.1e ACLs. 5233 */ 5234 if (*accmode & VEXPLICIT_DENY) { 5235 *accmode = 0; 5236 return (0); 5237 } 5238 5239 /* 5240 * None of these can be translated into usual access bits. 5241 * Also, the common case for NFSv4 ACLs is to not contain 5242 * either of these bits. Caller should check for VWRITE 5243 * on the containing directory instead. 5244 */ 5245 if (*accmode & (VDELETE_CHILD | VDELETE)) 5246 return (EPERM); 5247 5248 if (*accmode & VADMIN_PERMS) { 5249 *accmode &= ~VADMIN_PERMS; 5250 *accmode |= VADMIN; 5251 } 5252 5253 /* 5254 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 5255 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 5256 */ 5257 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 5258 5259 return (0); 5260 } 5261 5262 /* 5263 * These are helper functions for filesystems to traverse all 5264 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 5265 * 5266 * This interface replaces MNT_VNODE_FOREACH. 5267 */ 5268 5269 MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 5270 5271 struct vnode * 5272 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 5273 { 5274 struct vnode *vp; 5275 5276 if (should_yield()) 5277 kern_yield(PRI_USER); 5278 MNT_ILOCK(mp); 5279 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5280 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 5281 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 5282 /* Allow a racy peek at VI_DOOMED to save a lock acquisition. */ 5283 if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0) 5284 continue; 5285 VI_LOCK(vp); 5286 if ((vp->v_iflag & VI_DOOMED) != 0) { 5287 VI_UNLOCK(vp); 5288 continue; 5289 } 5290 break; 5291 } 5292 if (vp == NULL) { 5293 __mnt_vnode_markerfree_all(mvp, mp); 5294 /* MNT_IUNLOCK(mp); -- done in above function */ 5295 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 5296 return (NULL); 5297 } 5298 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 5299 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 5300 MNT_IUNLOCK(mp); 5301 return (vp); 5302 } 5303 5304 struct vnode * 5305 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 5306 { 5307 struct vnode *vp; 5308 5309 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 5310 MNT_ILOCK(mp); 5311 MNT_REF(mp); 5312 (*mvp)->v_mount = mp; 5313 (*mvp)->v_type = VMARKER; 5314 5315 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 5316 /* Allow a racy peek at VI_DOOMED to save a lock acquisition. */ 5317 if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0) 5318 continue; 5319 VI_LOCK(vp); 5320 if ((vp->v_iflag & VI_DOOMED) != 0) { 5321 VI_UNLOCK(vp); 5322 continue; 5323 } 5324 break; 5325 } 5326 if (vp == NULL) { 5327 MNT_REL(mp); 5328 MNT_IUNLOCK(mp); 5329 free(*mvp, M_VNODE_MARKER); 5330 *mvp = NULL; 5331 return (NULL); 5332 } 5333 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 5334 MNT_IUNLOCK(mp); 5335 return (vp); 5336 } 5337 5338 void 5339 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 5340 { 5341 5342 if (*mvp == NULL) { 5343 MNT_IUNLOCK(mp); 5344 return; 5345 } 5346 5347 mtx_assert(MNT_MTX(mp), MA_OWNED); 5348 5349 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5350 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 5351 MNT_REL(mp); 5352 MNT_IUNLOCK(mp); 5353 free(*mvp, M_VNODE_MARKER); 5354 *mvp = NULL; 5355 } 5356 5357 /* 5358 * These are helper functions for filesystems to traverse their 5359 * active vnodes. See MNT_VNODE_FOREACH_ACTIVE() in sys/mount.h 5360 */ 5361 static void 5362 mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 5363 { 5364 5365 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5366 5367 MNT_ILOCK(mp); 5368 MNT_REL(mp); 5369 MNT_IUNLOCK(mp); 5370 free(*mvp, M_VNODE_MARKER); 5371 *mvp = NULL; 5372 } 5373 5374 /* 5375 * Relock the mp mount vnode list lock with the vp vnode interlock in the 5376 * conventional lock order during mnt_vnode_next_active iteration. 5377 * 5378 * On entry, the mount vnode list lock is held and the vnode interlock is not. 5379 * The list lock is dropped and reacquired. On success, both locks are held. 5380 * On failure, the mount vnode list lock is held but the vnode interlock is 5381 * not, and the procedure may have yielded. 5382 */ 5383 static bool 5384 mnt_vnode_next_active_relock(struct vnode *mvp, struct mount *mp, 5385 struct vnode *vp) 5386 { 5387 const struct vnode *tmp; 5388 bool held, ret; 5389 5390 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 5391 TAILQ_NEXT(mvp, v_actfreelist) != NULL, mvp, 5392 ("%s: bad marker", __func__)); 5393 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 5394 ("%s: inappropriate vnode", __func__)); 5395 ASSERT_VI_UNLOCKED(vp, __func__); 5396 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 5397 5398 ret = false; 5399 5400 TAILQ_REMOVE(&mp->mnt_activevnodelist, mvp, v_actfreelist); 5401 TAILQ_INSERT_BEFORE(vp, mvp, v_actfreelist); 5402 5403 /* 5404 * Use a hold to prevent vp from disappearing while the mount vnode 5405 * list lock is dropped and reacquired. Normally a hold would be 5406 * acquired with vhold(), but that might try to acquire the vnode 5407 * interlock, which would be a LOR with the mount vnode list lock. 5408 */ 5409 held = refcount_acquire_if_not_zero(&vp->v_holdcnt); 5410 mtx_unlock(&mp->mnt_listmtx); 5411 if (!held) 5412 goto abort; 5413 VI_LOCK(vp); 5414 if (!refcount_release_if_not_last(&vp->v_holdcnt)) { 5415 vdropl(vp); 5416 goto abort; 5417 } 5418 mtx_lock(&mp->mnt_listmtx); 5419 5420 /* 5421 * Determine whether the vnode is still the next one after the marker, 5422 * excepting any other markers. If the vnode has not been doomed by 5423 * vgone() then the hold should have ensured that it remained on the 5424 * active list. If it has been doomed but is still on the active list, 5425 * don't abort, but rather skip over it (avoid spinning on doomed 5426 * vnodes). 5427 */ 5428 tmp = mvp; 5429 do { 5430 tmp = TAILQ_NEXT(tmp, v_actfreelist); 5431 } while (tmp != NULL && tmp->v_type == VMARKER); 5432 if (tmp != vp) { 5433 mtx_unlock(&mp->mnt_listmtx); 5434 VI_UNLOCK(vp); 5435 goto abort; 5436 } 5437 5438 ret = true; 5439 goto out; 5440 abort: 5441 maybe_yield(); 5442 mtx_lock(&mp->mnt_listmtx); 5443 out: 5444 if (ret) 5445 ASSERT_VI_LOCKED(vp, __func__); 5446 else 5447 ASSERT_VI_UNLOCKED(vp, __func__); 5448 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 5449 return (ret); 5450 } 5451 5452 static struct vnode * 5453 mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 5454 { 5455 struct vnode *vp, *nvp; 5456 5457 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 5458 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5459 restart: 5460 vp = TAILQ_NEXT(*mvp, v_actfreelist); 5461 while (vp != NULL) { 5462 if (vp->v_type == VMARKER) { 5463 vp = TAILQ_NEXT(vp, v_actfreelist); 5464 continue; 5465 } 5466 /* 5467 * Try-lock because this is the wrong lock order. If that does 5468 * not succeed, drop the mount vnode list lock and try to 5469 * reacquire it and the vnode interlock in the right order. 5470 */ 5471 if (!VI_TRYLOCK(vp) && 5472 !mnt_vnode_next_active_relock(*mvp, mp, vp)) 5473 goto restart; 5474 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 5475 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 5476 ("alien vnode on the active list %p %p", vp, mp)); 5477 if (vp->v_mount == mp && (vp->v_iflag & VI_DOOMED) == 0) 5478 break; 5479 nvp = TAILQ_NEXT(vp, v_actfreelist); 5480 VI_UNLOCK(vp); 5481 vp = nvp; 5482 } 5483 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 5484 5485 /* Check if we are done */ 5486 if (vp == NULL) { 5487 mtx_unlock(&mp->mnt_listmtx); 5488 mnt_vnode_markerfree_active(mvp, mp); 5489 return (NULL); 5490 } 5491 TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist); 5492 mtx_unlock(&mp->mnt_listmtx); 5493 ASSERT_VI_LOCKED(vp, "active iter"); 5494 KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp)); 5495 return (vp); 5496 } 5497 5498 struct vnode * 5499 __mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 5500 { 5501 5502 if (should_yield()) 5503 kern_yield(PRI_USER); 5504 mtx_lock(&mp->mnt_listmtx); 5505 return (mnt_vnode_next_active(mvp, mp)); 5506 } 5507 5508 struct vnode * 5509 __mnt_vnode_first_active(struct vnode **mvp, struct mount *mp) 5510 { 5511 struct vnode *vp; 5512 5513 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 5514 MNT_ILOCK(mp); 5515 MNT_REF(mp); 5516 MNT_IUNLOCK(mp); 5517 (*mvp)->v_type = VMARKER; 5518 (*mvp)->v_mount = mp; 5519 5520 mtx_lock(&mp->mnt_listmtx); 5521 vp = TAILQ_FIRST(&mp->mnt_activevnodelist); 5522 if (vp == NULL) { 5523 mtx_unlock(&mp->mnt_listmtx); 5524 mnt_vnode_markerfree_active(mvp, mp); 5525 return (NULL); 5526 } 5527 TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist); 5528 return (mnt_vnode_next_active(mvp, mp)); 5529 } 5530 5531 void 5532 __mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 5533 { 5534 5535 if (*mvp == NULL) 5536 return; 5537 5538 mtx_lock(&mp->mnt_listmtx); 5539 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 5540 mtx_unlock(&mp->mnt_listmtx); 5541 mnt_vnode_markerfree_active(mvp, mp); 5542 } 5543