1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/capsicum.h> 54 #include <sys/condvar.h> 55 #include <sys/conf.h> 56 #include <sys/counter.h> 57 #include <sys/dirent.h> 58 #include <sys/event.h> 59 #include <sys/eventhandler.h> 60 #include <sys/extattr.h> 61 #include <sys/file.h> 62 #include <sys/fcntl.h> 63 #include <sys/jail.h> 64 #include <sys/kdb.h> 65 #include <sys/kernel.h> 66 #include <sys/kthread.h> 67 #include <sys/ktr.h> 68 #include <sys/lockf.h> 69 #include <sys/malloc.h> 70 #include <sys/mount.h> 71 #include <sys/namei.h> 72 #include <sys/pctrie.h> 73 #include <sys/priv.h> 74 #include <sys/reboot.h> 75 #include <sys/refcount.h> 76 #include <sys/rwlock.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smp.h> 80 #include <sys/stat.h> 81 #include <sys/sysctl.h> 82 #include <sys/syslog.h> 83 #include <sys/vmmeter.h> 84 #include <sys/vnode.h> 85 #include <sys/watchdog.h> 86 87 #include <machine/stdarg.h> 88 89 #include <security/mac/mac_framework.h> 90 91 #include <vm/vm.h> 92 #include <vm/vm_object.h> 93 #include <vm/vm_extern.h> 94 #include <vm/pmap.h> 95 #include <vm/vm_map.h> 96 #include <vm/vm_page.h> 97 #include <vm/vm_kern.h> 98 #include <vm/uma.h> 99 100 #ifdef DDB 101 #include <ddb/ddb.h> 102 #endif 103 104 static void delmntque(struct vnode *vp); 105 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 106 int slpflag, int slptimeo); 107 static void syncer_shutdown(void *arg, int howto); 108 static int vtryrecycle(struct vnode *vp); 109 static void v_init_counters(struct vnode *); 110 static void v_incr_usecount(struct vnode *); 111 static void v_incr_usecount_locked(struct vnode *); 112 static void v_incr_devcount(struct vnode *); 113 static void v_decr_devcount(struct vnode *); 114 static void vgonel(struct vnode *); 115 static void vfs_knllock(void *arg); 116 static void vfs_knlunlock(void *arg); 117 static void vfs_knl_assert_locked(void *arg); 118 static void vfs_knl_assert_unlocked(void *arg); 119 static void vnlru_return_batches(struct vfsops *mnt_op); 120 static void destroy_vpollinfo(struct vpollinfo *vi); 121 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 122 daddr_t startlbn, daddr_t endlbn); 123 124 /* 125 * These fences are intended for cases where some synchronization is 126 * needed between access of v_iflags and lockless vnode refcount (v_holdcnt 127 * and v_usecount) updates. Access to v_iflags is generally synchronized 128 * by the interlock, but we have some internal assertions that check vnode 129 * flags without acquiring the lock. Thus, these fences are INVARIANTS-only 130 * for now. 131 */ 132 #ifdef INVARIANTS 133 #define VNODE_REFCOUNT_FENCE_ACQ() atomic_thread_fence_acq() 134 #define VNODE_REFCOUNT_FENCE_REL() atomic_thread_fence_rel() 135 #else 136 #define VNODE_REFCOUNT_FENCE_ACQ() 137 #define VNODE_REFCOUNT_FENCE_REL() 138 #endif 139 140 /* 141 * Number of vnodes in existence. Increased whenever getnewvnode() 142 * allocates a new vnode, decreased in vdropl() for VI_DOOMED vnode. 143 */ 144 static unsigned long numvnodes; 145 146 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 147 "Number of vnodes in existence"); 148 149 static counter_u64_t vnodes_created; 150 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 151 "Number of vnodes created by getnewvnode"); 152 153 static u_long mnt_free_list_batch = 128; 154 SYSCTL_ULONG(_vfs, OID_AUTO, mnt_free_list_batch, CTLFLAG_RW, 155 &mnt_free_list_batch, 0, "Limit of vnodes held on mnt's free list"); 156 157 /* 158 * Conversion tables for conversion from vnode types to inode formats 159 * and back. 160 */ 161 enum vtype iftovt_tab[16] = { 162 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 163 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 164 }; 165 int vttoif_tab[10] = { 166 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 167 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 168 }; 169 170 /* 171 * List of vnodes that are ready for recycling. 172 */ 173 static TAILQ_HEAD(freelst, vnode) vnode_free_list; 174 175 /* 176 * "Free" vnode target. Free vnodes are rarely completely free, but are 177 * just ones that are cheap to recycle. Usually they are for files which 178 * have been stat'd but not read; these usually have inode and namecache 179 * data attached to them. This target is the preferred minimum size of a 180 * sub-cache consisting mostly of such files. The system balances the size 181 * of this sub-cache with its complement to try to prevent either from 182 * thrashing while the other is relatively inactive. The targets express 183 * a preference for the best balance. 184 * 185 * "Above" this target there are 2 further targets (watermarks) related 186 * to recyling of free vnodes. In the best-operating case, the cache is 187 * exactly full, the free list has size between vlowat and vhiwat above the 188 * free target, and recycling from it and normal use maintains this state. 189 * Sometimes the free list is below vlowat or even empty, but this state 190 * is even better for immediate use provided the cache is not full. 191 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 192 * ones) to reach one of these states. The watermarks are currently hard- 193 * coded as 4% and 9% of the available space higher. These and the default 194 * of 25% for wantfreevnodes are too large if the memory size is large. 195 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 196 * whenever vnlru_proc() becomes active. 197 */ 198 static u_long wantfreevnodes; 199 SYSCTL_ULONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, 200 &wantfreevnodes, 0, "Target for minimum number of \"free\" vnodes"); 201 static u_long freevnodes; 202 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 203 &freevnodes, 0, "Number of \"free\" vnodes"); 204 205 static counter_u64_t recycles_count; 206 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 207 "Number of vnodes recycled to meet vnode cache targets"); 208 209 /* 210 * Various variables used for debugging the new implementation of 211 * reassignbuf(). 212 * XXX these are probably of (very) limited utility now. 213 */ 214 static int reassignbufcalls; 215 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, 216 "Number of calls to reassignbuf"); 217 218 static counter_u64_t free_owe_inact; 219 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, free_owe_inact, CTLFLAG_RD, &free_owe_inact, 220 "Number of times free vnodes kept on active list due to VFS " 221 "owing inactivation"); 222 223 /* To keep more than one thread at a time from running vfs_getnewfsid */ 224 static struct mtx mntid_mtx; 225 226 /* 227 * Lock for any access to the following: 228 * vnode_free_list 229 * numvnodes 230 * freevnodes 231 */ 232 static struct mtx vnode_free_list_mtx; 233 234 /* Publicly exported FS */ 235 struct nfs_public nfs_pub; 236 237 static uma_zone_t buf_trie_zone; 238 239 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 240 static uma_zone_t vnode_zone; 241 static uma_zone_t vnodepoll_zone; 242 243 /* 244 * The workitem queue. 245 * 246 * It is useful to delay writes of file data and filesystem metadata 247 * for tens of seconds so that quickly created and deleted files need 248 * not waste disk bandwidth being created and removed. To realize this, 249 * we append vnodes to a "workitem" queue. When running with a soft 250 * updates implementation, most pending metadata dependencies should 251 * not wait for more than a few seconds. Thus, mounted on block devices 252 * are delayed only about a half the time that file data is delayed. 253 * Similarly, directory updates are more critical, so are only delayed 254 * about a third the time that file data is delayed. Thus, there are 255 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 256 * one each second (driven off the filesystem syncer process). The 257 * syncer_delayno variable indicates the next queue that is to be processed. 258 * Items that need to be processed soon are placed in this queue: 259 * 260 * syncer_workitem_pending[syncer_delayno] 261 * 262 * A delay of fifteen seconds is done by placing the request fifteen 263 * entries later in the queue: 264 * 265 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 266 * 267 */ 268 static int syncer_delayno; 269 static long syncer_mask; 270 LIST_HEAD(synclist, bufobj); 271 static struct synclist *syncer_workitem_pending; 272 /* 273 * The sync_mtx protects: 274 * bo->bo_synclist 275 * sync_vnode_count 276 * syncer_delayno 277 * syncer_state 278 * syncer_workitem_pending 279 * syncer_worklist_len 280 * rushjob 281 */ 282 static struct mtx sync_mtx; 283 static struct cv sync_wakeup; 284 285 #define SYNCER_MAXDELAY 32 286 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 287 static int syncdelay = 30; /* max time to delay syncing data */ 288 static int filedelay = 30; /* time to delay syncing files */ 289 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 290 "Time to delay syncing files (in seconds)"); 291 static int dirdelay = 29; /* time to delay syncing directories */ 292 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 293 "Time to delay syncing directories (in seconds)"); 294 static int metadelay = 28; /* time to delay syncing metadata */ 295 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 296 "Time to delay syncing metadata (in seconds)"); 297 static int rushjob; /* number of slots to run ASAP */ 298 static int stat_rush_requests; /* number of times I/O speeded up */ 299 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 300 "Number of times I/O speeded up (rush requests)"); 301 302 /* 303 * When shutting down the syncer, run it at four times normal speed. 304 */ 305 #define SYNCER_SHUTDOWN_SPEEDUP 4 306 static int sync_vnode_count; 307 static int syncer_worklist_len; 308 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 309 syncer_state; 310 311 /* Target for maximum number of vnodes. */ 312 int desiredvnodes; 313 static int gapvnodes; /* gap between wanted and desired */ 314 static int vhiwat; /* enough extras after expansion */ 315 static int vlowat; /* minimal extras before expansion */ 316 static int vstir; /* nonzero to stir non-free vnodes */ 317 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 318 319 static int 320 sysctl_update_desiredvnodes(SYSCTL_HANDLER_ARGS) 321 { 322 int error, old_desiredvnodes; 323 324 old_desiredvnodes = desiredvnodes; 325 if ((error = sysctl_handle_int(oidp, arg1, arg2, req)) != 0) 326 return (error); 327 if (old_desiredvnodes != desiredvnodes) { 328 wantfreevnodes = desiredvnodes / 4; 329 /* XXX locking seems to be incomplete. */ 330 vfs_hash_changesize(desiredvnodes); 331 cache_changesize(desiredvnodes); 332 } 333 return (0); 334 } 335 336 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 337 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, &desiredvnodes, 0, 338 sysctl_update_desiredvnodes, "I", "Target for maximum number of vnodes"); 339 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 340 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 341 static int vnlru_nowhere; 342 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 343 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 344 345 static int 346 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 347 { 348 struct vnode *vp; 349 struct nameidata nd; 350 char *buf; 351 unsigned long ndflags; 352 int error; 353 354 if (req->newptr == NULL) 355 return (EINVAL); 356 if (req->newlen > PATH_MAX) 357 return (E2BIG); 358 359 buf = malloc(PATH_MAX + 1, M_TEMP, M_WAITOK); 360 error = SYSCTL_IN(req, buf, req->newlen); 361 if (error != 0) 362 goto out; 363 364 buf[req->newlen] = '\0'; 365 366 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1 | NOCACHE | SAVENAME; 367 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf, curthread); 368 if ((error = namei(&nd)) != 0) 369 goto out; 370 vp = nd.ni_vp; 371 372 if ((vp->v_iflag & VI_DOOMED) != 0) { 373 /* 374 * This vnode is being recycled. Return != 0 to let the caller 375 * know that the sysctl had no effect. Return EAGAIN because a 376 * subsequent call will likely succeed (since namei will create 377 * a new vnode if necessary) 378 */ 379 error = EAGAIN; 380 goto putvnode; 381 } 382 383 counter_u64_add(recycles_count, 1); 384 vgone(vp); 385 putvnode: 386 NDFREE(&nd, 0); 387 out: 388 free(buf, M_TEMP); 389 return (error); 390 } 391 392 static int 393 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 394 { 395 struct thread *td = curthread; 396 struct vnode *vp; 397 struct file *fp; 398 int error; 399 int fd; 400 401 if (req->newptr == NULL) 402 return (EBADF); 403 404 error = sysctl_handle_int(oidp, &fd, 0, req); 405 if (error != 0) 406 return (error); 407 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 408 if (error != 0) 409 return (error); 410 vp = fp->f_vnode; 411 412 error = vn_lock(vp, LK_EXCLUSIVE); 413 if (error != 0) 414 goto drop; 415 416 counter_u64_add(recycles_count, 1); 417 vgone(vp); 418 VOP_UNLOCK(vp, 0); 419 drop: 420 fdrop(fp, td); 421 return (error); 422 } 423 424 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 425 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 426 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 427 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 428 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 429 sysctl_ftry_reclaim_vnode, "I", 430 "Try to reclaim a vnode by its file descriptor"); 431 432 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 433 static int vnsz2log; 434 435 /* 436 * Support for the bufobj clean & dirty pctrie. 437 */ 438 static void * 439 buf_trie_alloc(struct pctrie *ptree) 440 { 441 442 return uma_zalloc(buf_trie_zone, M_NOWAIT); 443 } 444 445 static void 446 buf_trie_free(struct pctrie *ptree, void *node) 447 { 448 449 uma_zfree(buf_trie_zone, node); 450 } 451 PCTRIE_DEFINE(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free); 452 453 /* 454 * Initialize the vnode management data structures. 455 * 456 * Reevaluate the following cap on the number of vnodes after the physical 457 * memory size exceeds 512GB. In the limit, as the physical memory size 458 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 459 */ 460 #ifndef MAXVNODES_MAX 461 #define MAXVNODES_MAX (512 * 1024 * 1024 / 64) /* 8M */ 462 #endif 463 464 /* 465 * Initialize a vnode as it first enters the zone. 466 */ 467 static int 468 vnode_init(void *mem, int size, int flags) 469 { 470 struct vnode *vp; 471 472 vp = mem; 473 bzero(vp, size); 474 /* 475 * Setup locks. 476 */ 477 vp->v_vnlock = &vp->v_lock; 478 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 479 /* 480 * By default, don't allow shared locks unless filesystems opt-in. 481 */ 482 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 483 LK_NOSHARE | LK_IS_VNODE); 484 /* 485 * Initialize bufobj. 486 */ 487 bufobj_init(&vp->v_bufobj, vp); 488 /* 489 * Initialize namecache. 490 */ 491 LIST_INIT(&vp->v_cache_src); 492 TAILQ_INIT(&vp->v_cache_dst); 493 /* 494 * Initialize rangelocks. 495 */ 496 rangelock_init(&vp->v_rl); 497 return (0); 498 } 499 500 /* 501 * Free a vnode when it is cleared from the zone. 502 */ 503 static void 504 vnode_fini(void *mem, int size) 505 { 506 struct vnode *vp; 507 struct bufobj *bo; 508 509 vp = mem; 510 rangelock_destroy(&vp->v_rl); 511 lockdestroy(vp->v_vnlock); 512 mtx_destroy(&vp->v_interlock); 513 bo = &vp->v_bufobj; 514 rw_destroy(BO_LOCKPTR(bo)); 515 } 516 517 /* 518 * Provide the size of NFS nclnode and NFS fh for calculation of the 519 * vnode memory consumption. The size is specified directly to 520 * eliminate dependency on NFS-private header. 521 * 522 * Other filesystems may use bigger or smaller (like UFS and ZFS) 523 * private inode data, but the NFS-based estimation is ample enough. 524 * Still, we care about differences in the size between 64- and 32-bit 525 * platforms. 526 * 527 * Namecache structure size is heuristically 528 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 529 */ 530 #ifdef _LP64 531 #define NFS_NCLNODE_SZ (528 + 64) 532 #define NC_SZ 148 533 #else 534 #define NFS_NCLNODE_SZ (360 + 32) 535 #define NC_SZ 92 536 #endif 537 538 static void 539 vntblinit(void *dummy __unused) 540 { 541 u_int i; 542 int physvnodes, virtvnodes; 543 544 /* 545 * Desiredvnodes is a function of the physical memory size and the 546 * kernel's heap size. Generally speaking, it scales with the 547 * physical memory size. The ratio of desiredvnodes to the physical 548 * memory size is 1:16 until desiredvnodes exceeds 98,304. 549 * Thereafter, the 550 * marginal ratio of desiredvnodes to the physical memory size is 551 * 1:64. However, desiredvnodes is limited by the kernel's heap 552 * size. The memory required by desiredvnodes vnodes and vm objects 553 * must not exceed 1/10th of the kernel's heap size. 554 */ 555 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 556 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 557 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 558 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 559 desiredvnodes = min(physvnodes, virtvnodes); 560 if (desiredvnodes > MAXVNODES_MAX) { 561 if (bootverbose) 562 printf("Reducing kern.maxvnodes %d -> %d\n", 563 desiredvnodes, MAXVNODES_MAX); 564 desiredvnodes = MAXVNODES_MAX; 565 } 566 wantfreevnodes = desiredvnodes / 4; 567 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 568 TAILQ_INIT(&vnode_free_list); 569 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 570 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 571 vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); 572 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 573 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 574 /* 575 * Preallocate enough nodes to support one-per buf so that 576 * we can not fail an insert. reassignbuf() callers can not 577 * tolerate the insertion failure. 578 */ 579 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 580 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 581 UMA_ZONE_NOFREE | UMA_ZONE_VM); 582 uma_prealloc(buf_trie_zone, nbuf); 583 584 vnodes_created = counter_u64_alloc(M_WAITOK); 585 recycles_count = counter_u64_alloc(M_WAITOK); 586 free_owe_inact = counter_u64_alloc(M_WAITOK); 587 588 /* 589 * Initialize the filesystem syncer. 590 */ 591 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 592 &syncer_mask); 593 syncer_maxdelay = syncer_mask + 1; 594 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 595 cv_init(&sync_wakeup, "syncer"); 596 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 597 vnsz2log++; 598 vnsz2log--; 599 } 600 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 601 602 603 /* 604 * Mark a mount point as busy. Used to synchronize access and to delay 605 * unmounting. Eventually, mountlist_mtx is not released on failure. 606 * 607 * vfs_busy() is a custom lock, it can block the caller. 608 * vfs_busy() only sleeps if the unmount is active on the mount point. 609 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 610 * vnode belonging to mp. 611 * 612 * Lookup uses vfs_busy() to traverse mount points. 613 * root fs var fs 614 * / vnode lock A / vnode lock (/var) D 615 * /var vnode lock B /log vnode lock(/var/log) E 616 * vfs_busy lock C vfs_busy lock F 617 * 618 * Within each file system, the lock order is C->A->B and F->D->E. 619 * 620 * When traversing across mounts, the system follows that lock order: 621 * 622 * C->A->B 623 * | 624 * +->F->D->E 625 * 626 * The lookup() process for namei("/var") illustrates the process: 627 * VOP_LOOKUP() obtains B while A is held 628 * vfs_busy() obtains a shared lock on F while A and B are held 629 * vput() releases lock on B 630 * vput() releases lock on A 631 * VFS_ROOT() obtains lock on D while shared lock on F is held 632 * vfs_unbusy() releases shared lock on F 633 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 634 * Attempt to lock A (instead of vp_crossmp) while D is held would 635 * violate the global order, causing deadlocks. 636 * 637 * dounmount() locks B while F is drained. 638 */ 639 int 640 vfs_busy(struct mount *mp, int flags) 641 { 642 643 MPASS((flags & ~MBF_MASK) == 0); 644 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 645 646 MNT_ILOCK(mp); 647 MNT_REF(mp); 648 /* 649 * If mount point is currently being unmounted, sleep until the 650 * mount point fate is decided. If thread doing the unmounting fails, 651 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 652 * that this mount point has survived the unmount attempt and vfs_busy 653 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 654 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 655 * about to be really destroyed. vfs_busy needs to release its 656 * reference on the mount point in this case and return with ENOENT, 657 * telling the caller that mount mount it tried to busy is no longer 658 * valid. 659 */ 660 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 661 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 662 MNT_REL(mp); 663 MNT_IUNLOCK(mp); 664 CTR1(KTR_VFS, "%s: failed busying before sleeping", 665 __func__); 666 return (ENOENT); 667 } 668 if (flags & MBF_MNTLSTLOCK) 669 mtx_unlock(&mountlist_mtx); 670 mp->mnt_kern_flag |= MNTK_MWAIT; 671 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 672 if (flags & MBF_MNTLSTLOCK) 673 mtx_lock(&mountlist_mtx); 674 MNT_ILOCK(mp); 675 } 676 if (flags & MBF_MNTLSTLOCK) 677 mtx_unlock(&mountlist_mtx); 678 mp->mnt_lockref++; 679 MNT_IUNLOCK(mp); 680 return (0); 681 } 682 683 /* 684 * Free a busy filesystem. 685 */ 686 void 687 vfs_unbusy(struct mount *mp) 688 { 689 690 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 691 MNT_ILOCK(mp); 692 MNT_REL(mp); 693 KASSERT(mp->mnt_lockref > 0, ("negative mnt_lockref")); 694 mp->mnt_lockref--; 695 if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 696 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 697 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 698 mp->mnt_kern_flag &= ~MNTK_DRAINING; 699 wakeup(&mp->mnt_lockref); 700 } 701 MNT_IUNLOCK(mp); 702 } 703 704 /* 705 * Lookup a mount point by filesystem identifier. 706 */ 707 struct mount * 708 vfs_getvfs(fsid_t *fsid) 709 { 710 struct mount *mp; 711 712 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 713 mtx_lock(&mountlist_mtx); 714 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 715 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 716 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 717 vfs_ref(mp); 718 mtx_unlock(&mountlist_mtx); 719 return (mp); 720 } 721 } 722 mtx_unlock(&mountlist_mtx); 723 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 724 return ((struct mount *) 0); 725 } 726 727 /* 728 * Lookup a mount point by filesystem identifier, busying it before 729 * returning. 730 * 731 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 732 * cache for popular filesystem identifiers. The cache is lockess, using 733 * the fact that struct mount's are never freed. In worst case we may 734 * get pointer to unmounted or even different filesystem, so we have to 735 * check what we got, and go slow way if so. 736 */ 737 struct mount * 738 vfs_busyfs(fsid_t *fsid) 739 { 740 #define FSID_CACHE_SIZE 256 741 typedef struct mount * volatile vmp_t; 742 static vmp_t cache[FSID_CACHE_SIZE]; 743 struct mount *mp; 744 int error; 745 uint32_t hash; 746 747 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 748 hash = fsid->val[0] ^ fsid->val[1]; 749 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 750 mp = cache[hash]; 751 if (mp == NULL || 752 mp->mnt_stat.f_fsid.val[0] != fsid->val[0] || 753 mp->mnt_stat.f_fsid.val[1] != fsid->val[1]) 754 goto slow; 755 if (vfs_busy(mp, 0) != 0) { 756 cache[hash] = NULL; 757 goto slow; 758 } 759 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 760 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) 761 return (mp); 762 else 763 vfs_unbusy(mp); 764 765 slow: 766 mtx_lock(&mountlist_mtx); 767 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 768 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 769 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 770 error = vfs_busy(mp, MBF_MNTLSTLOCK); 771 if (error) { 772 cache[hash] = NULL; 773 mtx_unlock(&mountlist_mtx); 774 return (NULL); 775 } 776 cache[hash] = mp; 777 return (mp); 778 } 779 } 780 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 781 mtx_unlock(&mountlist_mtx); 782 return ((struct mount *) 0); 783 } 784 785 /* 786 * Check if a user can access privileged mount options. 787 */ 788 int 789 vfs_suser(struct mount *mp, struct thread *td) 790 { 791 int error; 792 793 if (jailed(td->td_ucred)) { 794 /* 795 * If the jail of the calling thread lacks permission for 796 * this type of file system, deny immediately. 797 */ 798 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 799 return (EPERM); 800 801 /* 802 * If the file system was mounted outside the jail of the 803 * calling thread, deny immediately. 804 */ 805 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 806 return (EPERM); 807 } 808 809 /* 810 * If file system supports delegated administration, we don't check 811 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 812 * by the file system itself. 813 * If this is not the user that did original mount, we check for 814 * the PRIV_VFS_MOUNT_OWNER privilege. 815 */ 816 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 817 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 818 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 819 return (error); 820 } 821 return (0); 822 } 823 824 /* 825 * Get a new unique fsid. Try to make its val[0] unique, since this value 826 * will be used to create fake device numbers for stat(). Also try (but 827 * not so hard) make its val[0] unique mod 2^16, since some emulators only 828 * support 16-bit device numbers. We end up with unique val[0]'s for the 829 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 830 * 831 * Keep in mind that several mounts may be running in parallel. Starting 832 * the search one past where the previous search terminated is both a 833 * micro-optimization and a defense against returning the same fsid to 834 * different mounts. 835 */ 836 void 837 vfs_getnewfsid(struct mount *mp) 838 { 839 static uint16_t mntid_base; 840 struct mount *nmp; 841 fsid_t tfsid; 842 int mtype; 843 844 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 845 mtx_lock(&mntid_mtx); 846 mtype = mp->mnt_vfc->vfc_typenum; 847 tfsid.val[1] = mtype; 848 mtype = (mtype & 0xFF) << 24; 849 for (;;) { 850 tfsid.val[0] = makedev(255, 851 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 852 mntid_base++; 853 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 854 break; 855 vfs_rel(nmp); 856 } 857 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 858 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 859 mtx_unlock(&mntid_mtx); 860 } 861 862 /* 863 * Knob to control the precision of file timestamps: 864 * 865 * 0 = seconds only; nanoseconds zeroed. 866 * 1 = seconds and nanoseconds, accurate within 1/HZ. 867 * 2 = seconds and nanoseconds, truncated to microseconds. 868 * >=3 = seconds and nanoseconds, maximum precision. 869 */ 870 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 871 872 static int timestamp_precision = TSP_USEC; 873 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 874 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 875 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 876 "3+: sec + ns (max. precision))"); 877 878 /* 879 * Get a current timestamp. 880 */ 881 void 882 vfs_timestamp(struct timespec *tsp) 883 { 884 struct timeval tv; 885 886 switch (timestamp_precision) { 887 case TSP_SEC: 888 tsp->tv_sec = time_second; 889 tsp->tv_nsec = 0; 890 break; 891 case TSP_HZ: 892 getnanotime(tsp); 893 break; 894 case TSP_USEC: 895 microtime(&tv); 896 TIMEVAL_TO_TIMESPEC(&tv, tsp); 897 break; 898 case TSP_NSEC: 899 default: 900 nanotime(tsp); 901 break; 902 } 903 } 904 905 /* 906 * Set vnode attributes to VNOVAL 907 */ 908 void 909 vattr_null(struct vattr *vap) 910 { 911 912 vap->va_type = VNON; 913 vap->va_size = VNOVAL; 914 vap->va_bytes = VNOVAL; 915 vap->va_mode = VNOVAL; 916 vap->va_nlink = VNOVAL; 917 vap->va_uid = VNOVAL; 918 vap->va_gid = VNOVAL; 919 vap->va_fsid = VNOVAL; 920 vap->va_fileid = VNOVAL; 921 vap->va_blocksize = VNOVAL; 922 vap->va_rdev = VNOVAL; 923 vap->va_atime.tv_sec = VNOVAL; 924 vap->va_atime.tv_nsec = VNOVAL; 925 vap->va_mtime.tv_sec = VNOVAL; 926 vap->va_mtime.tv_nsec = VNOVAL; 927 vap->va_ctime.tv_sec = VNOVAL; 928 vap->va_ctime.tv_nsec = VNOVAL; 929 vap->va_birthtime.tv_sec = VNOVAL; 930 vap->va_birthtime.tv_nsec = VNOVAL; 931 vap->va_flags = VNOVAL; 932 vap->va_gen = VNOVAL; 933 vap->va_vaflags = 0; 934 } 935 936 /* 937 * This routine is called when we have too many vnodes. It attempts 938 * to free <count> vnodes and will potentially free vnodes that still 939 * have VM backing store (VM backing store is typically the cause 940 * of a vnode blowout so we want to do this). Therefore, this operation 941 * is not considered cheap. 942 * 943 * A number of conditions may prevent a vnode from being reclaimed. 944 * the buffer cache may have references on the vnode, a directory 945 * vnode may still have references due to the namei cache representing 946 * underlying files, or the vnode may be in active use. It is not 947 * desirable to reuse such vnodes. These conditions may cause the 948 * number of vnodes to reach some minimum value regardless of what 949 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 950 * 951 * @param mp Try to reclaim vnodes from this mountpoint 952 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 953 * entries if this argument is strue 954 * @param trigger Only reclaim vnodes with fewer than this many resident 955 * pages. 956 * @return The number of vnodes that were reclaimed. 957 */ 958 static int 959 vlrureclaim(struct mount *mp, bool reclaim_nc_src, int trigger) 960 { 961 struct vnode *vp; 962 int count, done, target; 963 964 done = 0; 965 vn_start_write(NULL, &mp, V_WAIT); 966 MNT_ILOCK(mp); 967 count = mp->mnt_nvnodelistsize; 968 target = count * (int64_t)gapvnodes / imax(desiredvnodes, 1); 969 target = target / 10 + 1; 970 while (count != 0 && done < target) { 971 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 972 while (vp != NULL && vp->v_type == VMARKER) 973 vp = TAILQ_NEXT(vp, v_nmntvnodes); 974 if (vp == NULL) 975 break; 976 /* 977 * XXX LRU is completely broken for non-free vnodes. First 978 * by calling here in mountpoint order, then by moving 979 * unselected vnodes to the end here, and most grossly by 980 * removing the vlruvp() function that was supposed to 981 * maintain the order. (This function was born broken 982 * since syncer problems prevented it doing anything.) The 983 * order is closer to LRC (C = Created). 984 * 985 * LRU reclaiming of vnodes seems to have last worked in 986 * FreeBSD-3 where LRU wasn't mentioned under any spelling. 987 * Then there was no hold count, and inactive vnodes were 988 * simply put on the free list in LRU order. The separate 989 * lists also break LRU. We prefer to reclaim from the 990 * free list for technical reasons. This tends to thrash 991 * the free list to keep very unrecently used held vnodes. 992 * The problem is mitigated by keeping the free list large. 993 */ 994 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 995 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 996 --count; 997 if (!VI_TRYLOCK(vp)) 998 goto next_iter; 999 /* 1000 * If it's been deconstructed already, it's still 1001 * referenced, or it exceeds the trigger, skip it. 1002 * Also skip free vnodes. We are trying to make space 1003 * to expand the free list, not reduce it. 1004 */ 1005 if (vp->v_usecount || 1006 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1007 ((vp->v_iflag & VI_FREE) != 0) || 1008 (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL && 1009 vp->v_object->resident_page_count > trigger)) { 1010 VI_UNLOCK(vp); 1011 goto next_iter; 1012 } 1013 MNT_IUNLOCK(mp); 1014 vholdl(vp); 1015 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) { 1016 vdrop(vp); 1017 goto next_iter_mntunlocked; 1018 } 1019 VI_LOCK(vp); 1020 /* 1021 * v_usecount may have been bumped after VOP_LOCK() dropped 1022 * the vnode interlock and before it was locked again. 1023 * 1024 * It is not necessary to recheck VI_DOOMED because it can 1025 * only be set by another thread that holds both the vnode 1026 * lock and vnode interlock. If another thread has the 1027 * vnode lock before we get to VOP_LOCK() and obtains the 1028 * vnode interlock after VOP_LOCK() drops the vnode 1029 * interlock, the other thread will be unable to drop the 1030 * vnode lock before our VOP_LOCK() call fails. 1031 */ 1032 if (vp->v_usecount || 1033 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1034 (vp->v_iflag & VI_FREE) != 0 || 1035 (vp->v_object != NULL && 1036 vp->v_object->resident_page_count > trigger)) { 1037 VOP_UNLOCK(vp, LK_INTERLOCK); 1038 vdrop(vp); 1039 goto next_iter_mntunlocked; 1040 } 1041 KASSERT((vp->v_iflag & VI_DOOMED) == 0, 1042 ("VI_DOOMED unexpectedly detected in vlrureclaim()")); 1043 counter_u64_add(recycles_count, 1); 1044 vgonel(vp); 1045 VOP_UNLOCK(vp, 0); 1046 vdropl(vp); 1047 done++; 1048 next_iter_mntunlocked: 1049 if (!should_yield()) 1050 goto relock_mnt; 1051 goto yield; 1052 next_iter: 1053 if (!should_yield()) 1054 continue; 1055 MNT_IUNLOCK(mp); 1056 yield: 1057 kern_yield(PRI_USER); 1058 relock_mnt: 1059 MNT_ILOCK(mp); 1060 } 1061 MNT_IUNLOCK(mp); 1062 vn_finished_write(mp); 1063 return done; 1064 } 1065 1066 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1067 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1068 0, 1069 "limit on vnode free requests per call to the vnlru_free routine"); 1070 1071 /* 1072 * Attempt to reduce the free list by the requested amount. 1073 */ 1074 static void 1075 vnlru_free_locked(int count, struct vfsops *mnt_op) 1076 { 1077 struct vnode *vp; 1078 struct mount *mp; 1079 bool tried_batches; 1080 1081 tried_batches = false; 1082 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 1083 if (count > max_vnlru_free) 1084 count = max_vnlru_free; 1085 for (; count > 0; count--) { 1086 vp = TAILQ_FIRST(&vnode_free_list); 1087 /* 1088 * The list can be modified while the free_list_mtx 1089 * has been dropped and vp could be NULL here. 1090 */ 1091 if (vp == NULL) { 1092 if (tried_batches) 1093 break; 1094 mtx_unlock(&vnode_free_list_mtx); 1095 vnlru_return_batches(mnt_op); 1096 tried_batches = true; 1097 mtx_lock(&vnode_free_list_mtx); 1098 continue; 1099 } 1100 1101 VNASSERT(vp->v_op != NULL, vp, 1102 ("vnlru_free: vnode already reclaimed.")); 1103 KASSERT((vp->v_iflag & VI_FREE) != 0, 1104 ("Removing vnode not on freelist")); 1105 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 1106 ("Mangling active vnode")); 1107 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 1108 1109 /* 1110 * Don't recycle if our vnode is from different type 1111 * of mount point. Note that mp is type-safe, the 1112 * check does not reach unmapped address even if 1113 * vnode is reclaimed. 1114 * Don't recycle if we can't get the interlock without 1115 * blocking. 1116 */ 1117 if ((mnt_op != NULL && (mp = vp->v_mount) != NULL && 1118 mp->mnt_op != mnt_op) || !VI_TRYLOCK(vp)) { 1119 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist); 1120 continue; 1121 } 1122 VNASSERT((vp->v_iflag & VI_FREE) != 0 && vp->v_holdcnt == 0, 1123 vp, ("vp inconsistent on freelist")); 1124 1125 /* 1126 * The clear of VI_FREE prevents activation of the 1127 * vnode. There is no sense in putting the vnode on 1128 * the mount point active list, only to remove it 1129 * later during recycling. Inline the relevant part 1130 * of vholdl(), to avoid triggering assertions or 1131 * activating. 1132 */ 1133 freevnodes--; 1134 vp->v_iflag &= ~VI_FREE; 1135 VNODE_REFCOUNT_FENCE_REL(); 1136 refcount_acquire(&vp->v_holdcnt); 1137 1138 mtx_unlock(&vnode_free_list_mtx); 1139 VI_UNLOCK(vp); 1140 vtryrecycle(vp); 1141 /* 1142 * If the recycled succeeded this vdrop will actually free 1143 * the vnode. If not it will simply place it back on 1144 * the free list. 1145 */ 1146 vdrop(vp); 1147 mtx_lock(&vnode_free_list_mtx); 1148 } 1149 } 1150 1151 void 1152 vnlru_free(int count, struct vfsops *mnt_op) 1153 { 1154 1155 mtx_lock(&vnode_free_list_mtx); 1156 vnlru_free_locked(count, mnt_op); 1157 mtx_unlock(&vnode_free_list_mtx); 1158 } 1159 1160 1161 /* XXX some names and initialization are bad for limits and watermarks. */ 1162 static int 1163 vspace(void) 1164 { 1165 int space; 1166 1167 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1168 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1169 vlowat = vhiwat / 2; 1170 if (numvnodes > desiredvnodes) 1171 return (0); 1172 space = desiredvnodes - numvnodes; 1173 if (freevnodes > wantfreevnodes) 1174 space += freevnodes - wantfreevnodes; 1175 return (space); 1176 } 1177 1178 static void 1179 vnlru_return_batch_locked(struct mount *mp) 1180 { 1181 struct vnode *vp; 1182 1183 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 1184 1185 if (mp->mnt_tmpfreevnodelistsize == 0) 1186 return; 1187 1188 TAILQ_FOREACH(vp, &mp->mnt_tmpfreevnodelist, v_actfreelist) { 1189 VNASSERT((vp->v_mflag & VMP_TMPMNTFREELIST) != 0, vp, 1190 ("vnode without VMP_TMPMNTFREELIST on mnt_tmpfreevnodelist")); 1191 vp->v_mflag &= ~VMP_TMPMNTFREELIST; 1192 } 1193 mtx_lock(&vnode_free_list_mtx); 1194 TAILQ_CONCAT(&vnode_free_list, &mp->mnt_tmpfreevnodelist, v_actfreelist); 1195 freevnodes += mp->mnt_tmpfreevnodelistsize; 1196 mtx_unlock(&vnode_free_list_mtx); 1197 mp->mnt_tmpfreevnodelistsize = 0; 1198 } 1199 1200 static void 1201 vnlru_return_batch(struct mount *mp) 1202 { 1203 1204 mtx_lock(&mp->mnt_listmtx); 1205 vnlru_return_batch_locked(mp); 1206 mtx_unlock(&mp->mnt_listmtx); 1207 } 1208 1209 static void 1210 vnlru_return_batches(struct vfsops *mnt_op) 1211 { 1212 struct mount *mp, *nmp; 1213 bool need_unbusy; 1214 1215 mtx_lock(&mountlist_mtx); 1216 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 1217 need_unbusy = false; 1218 if (mnt_op != NULL && mp->mnt_op != mnt_op) 1219 goto next; 1220 if (mp->mnt_tmpfreevnodelistsize == 0) 1221 goto next; 1222 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK) == 0) { 1223 vnlru_return_batch(mp); 1224 need_unbusy = true; 1225 mtx_lock(&mountlist_mtx); 1226 } 1227 next: 1228 nmp = TAILQ_NEXT(mp, mnt_list); 1229 if (need_unbusy) 1230 vfs_unbusy(mp); 1231 } 1232 mtx_unlock(&mountlist_mtx); 1233 } 1234 1235 /* 1236 * Attempt to recycle vnodes in a context that is always safe to block. 1237 * Calling vlrurecycle() from the bowels of filesystem code has some 1238 * interesting deadlock problems. 1239 */ 1240 static struct proc *vnlruproc; 1241 static int vnlruproc_sig; 1242 1243 static void 1244 vnlru_proc(void) 1245 { 1246 struct mount *mp, *nmp; 1247 unsigned long onumvnodes; 1248 int done, force, trigger, usevnodes; 1249 bool reclaim_nc_src; 1250 1251 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1252 SHUTDOWN_PRI_FIRST); 1253 1254 force = 0; 1255 for (;;) { 1256 kproc_suspend_check(vnlruproc); 1257 mtx_lock(&vnode_free_list_mtx); 1258 /* 1259 * If numvnodes is too large (due to desiredvnodes being 1260 * adjusted using its sysctl, or emergency growth), first 1261 * try to reduce it by discarding from the free list. 1262 */ 1263 if (numvnodes > desiredvnodes) 1264 vnlru_free_locked(numvnodes - desiredvnodes, NULL); 1265 /* 1266 * Sleep if the vnode cache is in a good state. This is 1267 * when it is not over-full and has space for about a 4% 1268 * or 9% expansion (by growing its size or inexcessively 1269 * reducing its free list). Otherwise, try to reclaim 1270 * space for a 10% expansion. 1271 */ 1272 if (vstir && force == 0) { 1273 force = 1; 1274 vstir = 0; 1275 } 1276 if (vspace() >= vlowat && force == 0) { 1277 vnlruproc_sig = 0; 1278 wakeup(&vnlruproc_sig); 1279 msleep(vnlruproc, &vnode_free_list_mtx, 1280 PVFS|PDROP, "vlruwt", hz); 1281 continue; 1282 } 1283 mtx_unlock(&vnode_free_list_mtx); 1284 done = 0; 1285 onumvnodes = numvnodes; 1286 /* 1287 * Calculate parameters for recycling. These are the same 1288 * throughout the loop to give some semblance of fairness. 1289 * The trigger point is to avoid recycling vnodes with lots 1290 * of resident pages. We aren't trying to free memory; we 1291 * are trying to recycle or at least free vnodes. 1292 */ 1293 if (numvnodes <= desiredvnodes) 1294 usevnodes = numvnodes - freevnodes; 1295 else 1296 usevnodes = numvnodes; 1297 if (usevnodes <= 0) 1298 usevnodes = 1; 1299 /* 1300 * The trigger value is is chosen to give a conservatively 1301 * large value to ensure that it alone doesn't prevent 1302 * making progress. The value can easily be so large that 1303 * it is effectively infinite in some congested and 1304 * misconfigured cases, and this is necessary. Normally 1305 * it is about 8 to 100 (pages), which is quite large. 1306 */ 1307 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1308 if (force < 2) 1309 trigger = vsmalltrigger; 1310 reclaim_nc_src = force >= 3; 1311 mtx_lock(&mountlist_mtx); 1312 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 1313 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) { 1314 nmp = TAILQ_NEXT(mp, mnt_list); 1315 continue; 1316 } 1317 done += vlrureclaim(mp, reclaim_nc_src, trigger); 1318 mtx_lock(&mountlist_mtx); 1319 nmp = TAILQ_NEXT(mp, mnt_list); 1320 vfs_unbusy(mp); 1321 } 1322 mtx_unlock(&mountlist_mtx); 1323 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1324 uma_reclaim(); 1325 if (done == 0) { 1326 if (force == 0 || force == 1) { 1327 force = 2; 1328 continue; 1329 } 1330 if (force == 2) { 1331 force = 3; 1332 continue; 1333 } 1334 force = 0; 1335 vnlru_nowhere++; 1336 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1337 } else 1338 kern_yield(PRI_USER); 1339 /* 1340 * After becoming active to expand above low water, keep 1341 * active until above high water. 1342 */ 1343 force = vspace() < vhiwat; 1344 } 1345 } 1346 1347 static struct kproc_desc vnlru_kp = { 1348 "vnlru", 1349 vnlru_proc, 1350 &vnlruproc 1351 }; 1352 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1353 &vnlru_kp); 1354 1355 /* 1356 * Routines having to do with the management of the vnode table. 1357 */ 1358 1359 /* 1360 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1361 * before we actually vgone(). This function must be called with the vnode 1362 * held to prevent the vnode from being returned to the free list midway 1363 * through vgone(). 1364 */ 1365 static int 1366 vtryrecycle(struct vnode *vp) 1367 { 1368 struct mount *vnmp; 1369 1370 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1371 VNASSERT(vp->v_holdcnt, vp, 1372 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1373 /* 1374 * This vnode may found and locked via some other list, if so we 1375 * can't recycle it yet. 1376 */ 1377 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1378 CTR2(KTR_VFS, 1379 "%s: impossible to recycle, vp %p lock is already held", 1380 __func__, vp); 1381 return (EWOULDBLOCK); 1382 } 1383 /* 1384 * Don't recycle if its filesystem is being suspended. 1385 */ 1386 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1387 VOP_UNLOCK(vp, 0); 1388 CTR2(KTR_VFS, 1389 "%s: impossible to recycle, cannot start the write for %p", 1390 __func__, vp); 1391 return (EBUSY); 1392 } 1393 /* 1394 * If we got this far, we need to acquire the interlock and see if 1395 * anyone picked up this vnode from another list. If not, we will 1396 * mark it with DOOMED via vgonel() so that anyone who does find it 1397 * will skip over it. 1398 */ 1399 VI_LOCK(vp); 1400 if (vp->v_usecount) { 1401 VOP_UNLOCK(vp, LK_INTERLOCK); 1402 vn_finished_write(vnmp); 1403 CTR2(KTR_VFS, 1404 "%s: impossible to recycle, %p is already referenced", 1405 __func__, vp); 1406 return (EBUSY); 1407 } 1408 if ((vp->v_iflag & VI_DOOMED) == 0) { 1409 counter_u64_add(recycles_count, 1); 1410 vgonel(vp); 1411 } 1412 VOP_UNLOCK(vp, LK_INTERLOCK); 1413 vn_finished_write(vnmp); 1414 return (0); 1415 } 1416 1417 static void 1418 vcheckspace(void) 1419 { 1420 1421 if (vspace() < vlowat && vnlruproc_sig == 0) { 1422 vnlruproc_sig = 1; 1423 wakeup(vnlruproc); 1424 } 1425 } 1426 1427 /* 1428 * Wait if necessary for space for a new vnode. 1429 */ 1430 static int 1431 getnewvnode_wait(int suspended) 1432 { 1433 1434 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 1435 if (numvnodes >= desiredvnodes) { 1436 if (suspended) { 1437 /* 1438 * The file system is being suspended. We cannot 1439 * risk a deadlock here, so allow allocation of 1440 * another vnode even if this would give too many. 1441 */ 1442 return (0); 1443 } 1444 if (vnlruproc_sig == 0) { 1445 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 1446 wakeup(vnlruproc); 1447 } 1448 msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS, 1449 "vlruwk", hz); 1450 } 1451 /* Post-adjust like the pre-adjust in getnewvnode(). */ 1452 if (numvnodes + 1 > desiredvnodes && freevnodes > 1) 1453 vnlru_free_locked(1, NULL); 1454 return (numvnodes >= desiredvnodes ? ENFILE : 0); 1455 } 1456 1457 /* 1458 * This hack is fragile, and probably not needed any more now that the 1459 * watermark handling works. 1460 */ 1461 void 1462 getnewvnode_reserve(u_int count) 1463 { 1464 struct thread *td; 1465 1466 /* Pre-adjust like the pre-adjust in getnewvnode(), with any count. */ 1467 /* XXX no longer so quick, but this part is not racy. */ 1468 mtx_lock(&vnode_free_list_mtx); 1469 if (numvnodes + count > desiredvnodes && freevnodes > wantfreevnodes) 1470 vnlru_free_locked(ulmin(numvnodes + count - desiredvnodes, 1471 freevnodes - wantfreevnodes), NULL); 1472 mtx_unlock(&vnode_free_list_mtx); 1473 1474 td = curthread; 1475 /* First try to be quick and racy. */ 1476 if (atomic_fetchadd_long(&numvnodes, count) + count <= desiredvnodes) { 1477 td->td_vp_reserv += count; 1478 vcheckspace(); /* XXX no longer so quick, but more racy */ 1479 return; 1480 } else 1481 atomic_subtract_long(&numvnodes, count); 1482 1483 mtx_lock(&vnode_free_list_mtx); 1484 while (count > 0) { 1485 if (getnewvnode_wait(0) == 0) { 1486 count--; 1487 td->td_vp_reserv++; 1488 atomic_add_long(&numvnodes, 1); 1489 } 1490 } 1491 vcheckspace(); 1492 mtx_unlock(&vnode_free_list_mtx); 1493 } 1494 1495 /* 1496 * This hack is fragile, especially if desiredvnodes or wantvnodes are 1497 * misconfgured or changed significantly. Reducing desiredvnodes below 1498 * the reserved amount should cause bizarre behaviour like reducing it 1499 * below the number of active vnodes -- the system will try to reduce 1500 * numvnodes to match, but should fail, so the subtraction below should 1501 * not overflow. 1502 */ 1503 void 1504 getnewvnode_drop_reserve(void) 1505 { 1506 struct thread *td; 1507 1508 td = curthread; 1509 atomic_subtract_long(&numvnodes, td->td_vp_reserv); 1510 td->td_vp_reserv = 0; 1511 } 1512 1513 /* 1514 * Return the next vnode from the free list. 1515 */ 1516 int 1517 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1518 struct vnode **vpp) 1519 { 1520 struct vnode *vp; 1521 struct thread *td; 1522 struct lock_object *lo; 1523 static int cyclecount; 1524 int error __unused; 1525 1526 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1527 vp = NULL; 1528 td = curthread; 1529 if (td->td_vp_reserv > 0) { 1530 td->td_vp_reserv -= 1; 1531 goto alloc; 1532 } 1533 mtx_lock(&vnode_free_list_mtx); 1534 if (numvnodes < desiredvnodes) 1535 cyclecount = 0; 1536 else if (cyclecount++ >= freevnodes) { 1537 cyclecount = 0; 1538 vstir = 1; 1539 } 1540 /* 1541 * Grow the vnode cache if it will not be above its target max 1542 * after growing. Otherwise, if the free list is nonempty, try 1543 * to reclaim 1 item from it before growing the cache (possibly 1544 * above its target max if the reclamation failed or is delayed). 1545 * Otherwise, wait for some space. In all cases, schedule 1546 * vnlru_proc() if we are getting short of space. The watermarks 1547 * should be chosen so that we never wait or even reclaim from 1548 * the free list to below its target minimum. 1549 */ 1550 if (numvnodes + 1 <= desiredvnodes) 1551 ; 1552 else if (freevnodes > 0) 1553 vnlru_free_locked(1, NULL); 1554 else { 1555 error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag & 1556 MNTK_SUSPEND)); 1557 #if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */ 1558 if (error != 0) { 1559 mtx_unlock(&vnode_free_list_mtx); 1560 return (error); 1561 } 1562 #endif 1563 } 1564 vcheckspace(); 1565 atomic_add_long(&numvnodes, 1); 1566 mtx_unlock(&vnode_free_list_mtx); 1567 alloc: 1568 counter_u64_add(vnodes_created, 1); 1569 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK); 1570 /* 1571 * Locks are given the generic name "vnode" when created. 1572 * Follow the historic practice of using the filesystem 1573 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1574 * 1575 * Locks live in a witness group keyed on their name. Thus, 1576 * when a lock is renamed, it must also move from the witness 1577 * group of its old name to the witness group of its new name. 1578 * 1579 * The change only needs to be made when the vnode moves 1580 * from one filesystem type to another. We ensure that each 1581 * filesystem use a single static name pointer for its tag so 1582 * that we can compare pointers rather than doing a strcmp(). 1583 */ 1584 lo = &vp->v_vnlock->lock_object; 1585 if (lo->lo_name != tag) { 1586 lo->lo_name = tag; 1587 WITNESS_DESTROY(lo); 1588 WITNESS_INIT(lo, tag); 1589 } 1590 /* 1591 * By default, don't allow shared locks unless filesystems opt-in. 1592 */ 1593 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1594 /* 1595 * Finalize various vnode identity bits. 1596 */ 1597 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1598 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1599 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1600 vp->v_type = VNON; 1601 vp->v_tag = tag; 1602 vp->v_op = vops; 1603 v_init_counters(vp); 1604 vp->v_bufobj.bo_ops = &buf_ops_bio; 1605 #ifdef DIAGNOSTIC 1606 if (mp == NULL && vops != &dead_vnodeops) 1607 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1608 #endif 1609 #ifdef MAC 1610 mac_vnode_init(vp); 1611 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1612 mac_vnode_associate_singlelabel(mp, vp); 1613 #endif 1614 if (mp != NULL) { 1615 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1616 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1617 vp->v_vflag |= VV_NOKNOTE; 1618 } 1619 1620 /* 1621 * For the filesystems which do not use vfs_hash_insert(), 1622 * still initialize v_hash to have vfs_hash_index() useful. 1623 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1624 * its own hashing. 1625 */ 1626 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1627 1628 *vpp = vp; 1629 return (0); 1630 } 1631 1632 /* 1633 * Delete from old mount point vnode list, if on one. 1634 */ 1635 static void 1636 delmntque(struct vnode *vp) 1637 { 1638 struct mount *mp; 1639 int active; 1640 1641 mp = vp->v_mount; 1642 if (mp == NULL) 1643 return; 1644 MNT_ILOCK(mp); 1645 VI_LOCK(vp); 1646 KASSERT(mp->mnt_activevnodelistsize <= mp->mnt_nvnodelistsize, 1647 ("Active vnode list size %d > Vnode list size %d", 1648 mp->mnt_activevnodelistsize, mp->mnt_nvnodelistsize)); 1649 active = vp->v_iflag & VI_ACTIVE; 1650 vp->v_iflag &= ~VI_ACTIVE; 1651 if (active) { 1652 mtx_lock(&mp->mnt_listmtx); 1653 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); 1654 mp->mnt_activevnodelistsize--; 1655 mtx_unlock(&mp->mnt_listmtx); 1656 } 1657 vp->v_mount = NULL; 1658 VI_UNLOCK(vp); 1659 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1660 ("bad mount point vnode list size")); 1661 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1662 mp->mnt_nvnodelistsize--; 1663 MNT_REL(mp); 1664 MNT_IUNLOCK(mp); 1665 } 1666 1667 static void 1668 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1669 { 1670 1671 vp->v_data = NULL; 1672 vp->v_op = &dead_vnodeops; 1673 vgone(vp); 1674 vput(vp); 1675 } 1676 1677 /* 1678 * Insert into list of vnodes for the new mount point, if available. 1679 */ 1680 int 1681 insmntque1(struct vnode *vp, struct mount *mp, 1682 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1683 { 1684 1685 KASSERT(vp->v_mount == NULL, 1686 ("insmntque: vnode already on per mount vnode list")); 1687 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1688 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1689 1690 /* 1691 * We acquire the vnode interlock early to ensure that the 1692 * vnode cannot be recycled by another process releasing a 1693 * holdcnt on it before we get it on both the vnode list 1694 * and the active vnode list. The mount mutex protects only 1695 * manipulation of the vnode list and the vnode freelist 1696 * mutex protects only manipulation of the active vnode list. 1697 * Hence the need to hold the vnode interlock throughout. 1698 */ 1699 MNT_ILOCK(mp); 1700 VI_LOCK(vp); 1701 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 1702 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1703 mp->mnt_nvnodelistsize == 0)) && 1704 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1705 VI_UNLOCK(vp); 1706 MNT_IUNLOCK(mp); 1707 if (dtr != NULL) 1708 dtr(vp, dtr_arg); 1709 return (EBUSY); 1710 } 1711 vp->v_mount = mp; 1712 MNT_REF(mp); 1713 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1714 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1715 ("neg mount point vnode list size")); 1716 mp->mnt_nvnodelistsize++; 1717 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 1718 ("Activating already active vnode")); 1719 vp->v_iflag |= VI_ACTIVE; 1720 mtx_lock(&mp->mnt_listmtx); 1721 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 1722 mp->mnt_activevnodelistsize++; 1723 mtx_unlock(&mp->mnt_listmtx); 1724 VI_UNLOCK(vp); 1725 MNT_IUNLOCK(mp); 1726 return (0); 1727 } 1728 1729 int 1730 insmntque(struct vnode *vp, struct mount *mp) 1731 { 1732 1733 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1734 } 1735 1736 /* 1737 * Flush out and invalidate all buffers associated with a bufobj 1738 * Called with the underlying object locked. 1739 */ 1740 int 1741 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1742 { 1743 int error; 1744 1745 BO_LOCK(bo); 1746 if (flags & V_SAVE) { 1747 error = bufobj_wwait(bo, slpflag, slptimeo); 1748 if (error) { 1749 BO_UNLOCK(bo); 1750 return (error); 1751 } 1752 if (bo->bo_dirty.bv_cnt > 0) { 1753 BO_UNLOCK(bo); 1754 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1755 return (error); 1756 /* 1757 * XXX We could save a lock/unlock if this was only 1758 * enabled under INVARIANTS 1759 */ 1760 BO_LOCK(bo); 1761 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1762 panic("vinvalbuf: dirty bufs"); 1763 } 1764 } 1765 /* 1766 * If you alter this loop please notice that interlock is dropped and 1767 * reacquired in flushbuflist. Special care is needed to ensure that 1768 * no race conditions occur from this. 1769 */ 1770 do { 1771 error = flushbuflist(&bo->bo_clean, 1772 flags, bo, slpflag, slptimeo); 1773 if (error == 0 && !(flags & V_CLEANONLY)) 1774 error = flushbuflist(&bo->bo_dirty, 1775 flags, bo, slpflag, slptimeo); 1776 if (error != 0 && error != EAGAIN) { 1777 BO_UNLOCK(bo); 1778 return (error); 1779 } 1780 } while (error != 0); 1781 1782 /* 1783 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1784 * have write I/O in-progress but if there is a VM object then the 1785 * VM object can also have read-I/O in-progress. 1786 */ 1787 do { 1788 bufobj_wwait(bo, 0, 0); 1789 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 1790 BO_UNLOCK(bo); 1791 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 1792 BO_LOCK(bo); 1793 } 1794 } while (bo->bo_numoutput > 0); 1795 BO_UNLOCK(bo); 1796 1797 /* 1798 * Destroy the copy in the VM cache, too. 1799 */ 1800 if (bo->bo_object != NULL && 1801 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 1802 VM_OBJECT_WLOCK(bo->bo_object); 1803 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1804 OBJPR_CLEANONLY : 0); 1805 VM_OBJECT_WUNLOCK(bo->bo_object); 1806 } 1807 1808 #ifdef INVARIANTS 1809 BO_LOCK(bo); 1810 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 1811 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 1812 bo->bo_clean.bv_cnt > 0)) 1813 panic("vinvalbuf: flush failed"); 1814 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 1815 bo->bo_dirty.bv_cnt > 0) 1816 panic("vinvalbuf: flush dirty failed"); 1817 BO_UNLOCK(bo); 1818 #endif 1819 return (0); 1820 } 1821 1822 /* 1823 * Flush out and invalidate all buffers associated with a vnode. 1824 * Called with the underlying object locked. 1825 */ 1826 int 1827 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 1828 { 1829 1830 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 1831 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1832 if (vp->v_object != NULL && vp->v_object->handle != vp) 1833 return (0); 1834 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 1835 } 1836 1837 /* 1838 * Flush out buffers on the specified list. 1839 * 1840 */ 1841 static int 1842 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 1843 int slptimeo) 1844 { 1845 struct buf *bp, *nbp; 1846 int retval, error; 1847 daddr_t lblkno; 1848 b_xflags_t xflags; 1849 1850 ASSERT_BO_WLOCKED(bo); 1851 1852 retval = 0; 1853 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 1854 /* 1855 * If we are flushing both V_NORMAL and V_ALT buffers then 1856 * do not skip any buffers. If we are flushing only V_NORMAL 1857 * buffers then skip buffers marked as BX_ALTDATA. If we are 1858 * flushing only V_ALT buffers then skip buffers not marked 1859 * as BX_ALTDATA. 1860 */ 1861 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 1862 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 1863 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 1864 continue; 1865 } 1866 if (nbp != NULL) { 1867 lblkno = nbp->b_lblkno; 1868 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 1869 } 1870 retval = EAGAIN; 1871 error = BUF_TIMELOCK(bp, 1872 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 1873 "flushbuf", slpflag, slptimeo); 1874 if (error) { 1875 BO_LOCK(bo); 1876 return (error != ENOLCK ? error : EAGAIN); 1877 } 1878 KASSERT(bp->b_bufobj == bo, 1879 ("bp %p wrong b_bufobj %p should be %p", 1880 bp, bp->b_bufobj, bo)); 1881 /* 1882 * XXX Since there are no node locks for NFS, I 1883 * believe there is a slight chance that a delayed 1884 * write will occur while sleeping just above, so 1885 * check for it. 1886 */ 1887 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1888 (flags & V_SAVE)) { 1889 bremfree(bp); 1890 bp->b_flags |= B_ASYNC; 1891 bwrite(bp); 1892 BO_LOCK(bo); 1893 return (EAGAIN); /* XXX: why not loop ? */ 1894 } 1895 bremfree(bp); 1896 bp->b_flags |= (B_INVAL | B_RELBUF); 1897 bp->b_flags &= ~B_ASYNC; 1898 brelse(bp); 1899 BO_LOCK(bo); 1900 if (nbp == NULL) 1901 break; 1902 nbp = gbincore(bo, lblkno); 1903 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1904 != xflags) 1905 break; /* nbp invalid */ 1906 } 1907 return (retval); 1908 } 1909 1910 int 1911 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 1912 { 1913 struct buf *bp; 1914 int error; 1915 daddr_t lblkno; 1916 1917 ASSERT_BO_LOCKED(bo); 1918 1919 for (lblkno = startn;;) { 1920 again: 1921 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 1922 if (bp == NULL || bp->b_lblkno >= endn || 1923 bp->b_lblkno < startn) 1924 break; 1925 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 1926 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 1927 if (error != 0) { 1928 BO_RLOCK(bo); 1929 if (error == ENOLCK) 1930 goto again; 1931 return (error); 1932 } 1933 KASSERT(bp->b_bufobj == bo, 1934 ("bp %p wrong b_bufobj %p should be %p", 1935 bp, bp->b_bufobj, bo)); 1936 lblkno = bp->b_lblkno + 1; 1937 if ((bp->b_flags & B_MANAGED) == 0) 1938 bremfree(bp); 1939 bp->b_flags |= B_RELBUF; 1940 /* 1941 * In the VMIO case, use the B_NOREUSE flag to hint that the 1942 * pages backing each buffer in the range are unlikely to be 1943 * reused. Dirty buffers will have the hint applied once 1944 * they've been written. 1945 */ 1946 if ((bp->b_flags & B_VMIO) != 0) 1947 bp->b_flags |= B_NOREUSE; 1948 brelse(bp); 1949 BO_RLOCK(bo); 1950 } 1951 return (0); 1952 } 1953 1954 /* 1955 * Truncate a file's buffer and pages to a specified length. This 1956 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 1957 * sync activity. 1958 */ 1959 int 1960 vtruncbuf(struct vnode *vp, off_t length, int blksize) 1961 { 1962 struct buf *bp, *nbp; 1963 struct bufobj *bo; 1964 daddr_t startlbn; 1965 1966 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 1967 vp, blksize, (uintmax_t)length); 1968 1969 /* 1970 * Round up to the *next* lbn. 1971 */ 1972 startlbn = howmany(length, blksize); 1973 1974 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 1975 1976 bo = &vp->v_bufobj; 1977 restart_unlocked: 1978 BO_LOCK(bo); 1979 1980 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 1981 ; 1982 1983 if (length > 0) { 1984 restartsync: 1985 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1986 if (bp->b_lblkno > 0) 1987 continue; 1988 /* 1989 * Since we hold the vnode lock this should only 1990 * fail if we're racing with the buf daemon. 1991 */ 1992 if (BUF_LOCK(bp, 1993 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1994 BO_LOCKPTR(bo)) == ENOLCK) 1995 goto restart_unlocked; 1996 1997 VNASSERT((bp->b_flags & B_DELWRI), vp, 1998 ("buf(%p) on dirty queue without DELWRI", bp)); 1999 2000 bremfree(bp); 2001 bawrite(bp); 2002 BO_LOCK(bo); 2003 goto restartsync; 2004 } 2005 } 2006 2007 bufobj_wwait(bo, 0, 0); 2008 BO_UNLOCK(bo); 2009 vnode_pager_setsize(vp, length); 2010 2011 return (0); 2012 } 2013 2014 /* 2015 * Invalidate the cached pages of a file's buffer within the range of block 2016 * numbers [startlbn, endlbn). 2017 */ 2018 void 2019 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2020 int blksize) 2021 { 2022 struct bufobj *bo; 2023 off_t start, end; 2024 2025 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2026 2027 start = blksize * startlbn; 2028 end = blksize * endlbn; 2029 2030 bo = &vp->v_bufobj; 2031 BO_LOCK(bo); 2032 MPASS(blksize == bo->bo_bsize); 2033 2034 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2035 ; 2036 2037 BO_UNLOCK(bo); 2038 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2039 } 2040 2041 static int 2042 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2043 daddr_t startlbn, daddr_t endlbn) 2044 { 2045 struct buf *bp, *nbp; 2046 bool anyfreed; 2047 2048 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2049 ASSERT_BO_LOCKED(bo); 2050 2051 do { 2052 anyfreed = false; 2053 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2054 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2055 continue; 2056 if (BUF_LOCK(bp, 2057 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2058 BO_LOCKPTR(bo)) == ENOLCK) { 2059 BO_LOCK(bo); 2060 return (EAGAIN); 2061 } 2062 2063 bremfree(bp); 2064 bp->b_flags |= B_INVAL | B_RELBUF; 2065 bp->b_flags &= ~B_ASYNC; 2066 brelse(bp); 2067 anyfreed = true; 2068 2069 BO_LOCK(bo); 2070 if (nbp != NULL && 2071 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2072 nbp->b_vp != vp || 2073 (nbp->b_flags & B_DELWRI) != 0)) 2074 return (EAGAIN); 2075 } 2076 2077 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2078 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2079 continue; 2080 if (BUF_LOCK(bp, 2081 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2082 BO_LOCKPTR(bo)) == ENOLCK) { 2083 BO_LOCK(bo); 2084 return (EAGAIN); 2085 } 2086 bremfree(bp); 2087 bp->b_flags |= B_INVAL | B_RELBUF; 2088 bp->b_flags &= ~B_ASYNC; 2089 brelse(bp); 2090 anyfreed = true; 2091 2092 BO_LOCK(bo); 2093 if (nbp != NULL && 2094 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2095 (nbp->b_vp != vp) || 2096 (nbp->b_flags & B_DELWRI) == 0)) 2097 return (EAGAIN); 2098 } 2099 } while (anyfreed); 2100 return (0); 2101 } 2102 2103 static void 2104 buf_vlist_remove(struct buf *bp) 2105 { 2106 struct bufv *bv; 2107 2108 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2109 ASSERT_BO_WLOCKED(bp->b_bufobj); 2110 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != 2111 (BX_VNDIRTY|BX_VNCLEAN), 2112 ("buf_vlist_remove: Buf %p is on two lists", bp)); 2113 if (bp->b_xflags & BX_VNDIRTY) 2114 bv = &bp->b_bufobj->bo_dirty; 2115 else 2116 bv = &bp->b_bufobj->bo_clean; 2117 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2118 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2119 bv->bv_cnt--; 2120 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2121 } 2122 2123 /* 2124 * Add the buffer to the sorted clean or dirty block list. 2125 * 2126 * NOTE: xflags is passed as a constant, optimizing this inline function! 2127 */ 2128 static void 2129 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2130 { 2131 struct bufv *bv; 2132 struct buf *n; 2133 int error; 2134 2135 ASSERT_BO_WLOCKED(bo); 2136 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2137 ("dead bo %p", bo)); 2138 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2139 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2140 bp->b_xflags |= xflags; 2141 if (xflags & BX_VNDIRTY) 2142 bv = &bo->bo_dirty; 2143 else 2144 bv = &bo->bo_clean; 2145 2146 /* 2147 * Keep the list ordered. Optimize empty list insertion. Assume 2148 * we tend to grow at the tail so lookup_le should usually be cheaper 2149 * than _ge. 2150 */ 2151 if (bv->bv_cnt == 0 || 2152 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2153 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2154 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2155 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2156 else 2157 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2158 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2159 if (error) 2160 panic("buf_vlist_add: Preallocated nodes insufficient."); 2161 bv->bv_cnt++; 2162 } 2163 2164 /* 2165 * Look up a buffer using the buffer tries. 2166 */ 2167 struct buf * 2168 gbincore(struct bufobj *bo, daddr_t lblkno) 2169 { 2170 struct buf *bp; 2171 2172 ASSERT_BO_LOCKED(bo); 2173 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2174 if (bp != NULL) 2175 return (bp); 2176 return BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno); 2177 } 2178 2179 /* 2180 * Associate a buffer with a vnode. 2181 */ 2182 void 2183 bgetvp(struct vnode *vp, struct buf *bp) 2184 { 2185 struct bufobj *bo; 2186 2187 bo = &vp->v_bufobj; 2188 ASSERT_BO_WLOCKED(bo); 2189 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2190 2191 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2192 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2193 ("bgetvp: bp already attached! %p", bp)); 2194 2195 vhold(vp); 2196 bp->b_vp = vp; 2197 bp->b_bufobj = bo; 2198 /* 2199 * Insert onto list for new vnode. 2200 */ 2201 buf_vlist_add(bp, bo, BX_VNCLEAN); 2202 } 2203 2204 /* 2205 * Disassociate a buffer from a vnode. 2206 */ 2207 void 2208 brelvp(struct buf *bp) 2209 { 2210 struct bufobj *bo; 2211 struct vnode *vp; 2212 2213 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2214 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2215 2216 /* 2217 * Delete from old vnode list, if on one. 2218 */ 2219 vp = bp->b_vp; /* XXX */ 2220 bo = bp->b_bufobj; 2221 BO_LOCK(bo); 2222 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2223 buf_vlist_remove(bp); 2224 else 2225 panic("brelvp: Buffer %p not on queue.", bp); 2226 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2227 bo->bo_flag &= ~BO_ONWORKLST; 2228 mtx_lock(&sync_mtx); 2229 LIST_REMOVE(bo, bo_synclist); 2230 syncer_worklist_len--; 2231 mtx_unlock(&sync_mtx); 2232 } 2233 bp->b_vp = NULL; 2234 bp->b_bufobj = NULL; 2235 BO_UNLOCK(bo); 2236 vdrop(vp); 2237 } 2238 2239 /* 2240 * Add an item to the syncer work queue. 2241 */ 2242 static void 2243 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2244 { 2245 int slot; 2246 2247 ASSERT_BO_WLOCKED(bo); 2248 2249 mtx_lock(&sync_mtx); 2250 if (bo->bo_flag & BO_ONWORKLST) 2251 LIST_REMOVE(bo, bo_synclist); 2252 else { 2253 bo->bo_flag |= BO_ONWORKLST; 2254 syncer_worklist_len++; 2255 } 2256 2257 if (delay > syncer_maxdelay - 2) 2258 delay = syncer_maxdelay - 2; 2259 slot = (syncer_delayno + delay) & syncer_mask; 2260 2261 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2262 mtx_unlock(&sync_mtx); 2263 } 2264 2265 static int 2266 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2267 { 2268 int error, len; 2269 2270 mtx_lock(&sync_mtx); 2271 len = syncer_worklist_len - sync_vnode_count; 2272 mtx_unlock(&sync_mtx); 2273 error = SYSCTL_OUT(req, &len, sizeof(len)); 2274 return (error); 2275 } 2276 2277 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0, 2278 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2279 2280 static struct proc *updateproc; 2281 static void sched_sync(void); 2282 static struct kproc_desc up_kp = { 2283 "syncer", 2284 sched_sync, 2285 &updateproc 2286 }; 2287 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2288 2289 static int 2290 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2291 { 2292 struct vnode *vp; 2293 struct mount *mp; 2294 2295 *bo = LIST_FIRST(slp); 2296 if (*bo == NULL) 2297 return (0); 2298 vp = bo2vnode(*bo); 2299 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2300 return (1); 2301 /* 2302 * We use vhold in case the vnode does not 2303 * successfully sync. vhold prevents the vnode from 2304 * going away when we unlock the sync_mtx so that 2305 * we can acquire the vnode interlock. 2306 */ 2307 vholdl(vp); 2308 mtx_unlock(&sync_mtx); 2309 VI_UNLOCK(vp); 2310 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2311 vdrop(vp); 2312 mtx_lock(&sync_mtx); 2313 return (*bo == LIST_FIRST(slp)); 2314 } 2315 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2316 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2317 VOP_UNLOCK(vp, 0); 2318 vn_finished_write(mp); 2319 BO_LOCK(*bo); 2320 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2321 /* 2322 * Put us back on the worklist. The worklist 2323 * routine will remove us from our current 2324 * position and then add us back in at a later 2325 * position. 2326 */ 2327 vn_syncer_add_to_worklist(*bo, syncdelay); 2328 } 2329 BO_UNLOCK(*bo); 2330 vdrop(vp); 2331 mtx_lock(&sync_mtx); 2332 return (0); 2333 } 2334 2335 static int first_printf = 1; 2336 2337 /* 2338 * System filesystem synchronizer daemon. 2339 */ 2340 static void 2341 sched_sync(void) 2342 { 2343 struct synclist *next, *slp; 2344 struct bufobj *bo; 2345 long starttime; 2346 struct thread *td = curthread; 2347 int last_work_seen; 2348 int net_worklist_len; 2349 int syncer_final_iter; 2350 int error; 2351 2352 last_work_seen = 0; 2353 syncer_final_iter = 0; 2354 syncer_state = SYNCER_RUNNING; 2355 starttime = time_uptime; 2356 td->td_pflags |= TDP_NORUNNINGBUF; 2357 2358 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2359 SHUTDOWN_PRI_LAST); 2360 2361 mtx_lock(&sync_mtx); 2362 for (;;) { 2363 if (syncer_state == SYNCER_FINAL_DELAY && 2364 syncer_final_iter == 0) { 2365 mtx_unlock(&sync_mtx); 2366 kproc_suspend_check(td->td_proc); 2367 mtx_lock(&sync_mtx); 2368 } 2369 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2370 if (syncer_state != SYNCER_RUNNING && 2371 starttime != time_uptime) { 2372 if (first_printf) { 2373 printf("\nSyncing disks, vnodes remaining... "); 2374 first_printf = 0; 2375 } 2376 printf("%d ", net_worklist_len); 2377 } 2378 starttime = time_uptime; 2379 2380 /* 2381 * Push files whose dirty time has expired. Be careful 2382 * of interrupt race on slp queue. 2383 * 2384 * Skip over empty worklist slots when shutting down. 2385 */ 2386 do { 2387 slp = &syncer_workitem_pending[syncer_delayno]; 2388 syncer_delayno += 1; 2389 if (syncer_delayno == syncer_maxdelay) 2390 syncer_delayno = 0; 2391 next = &syncer_workitem_pending[syncer_delayno]; 2392 /* 2393 * If the worklist has wrapped since the 2394 * it was emptied of all but syncer vnodes, 2395 * switch to the FINAL_DELAY state and run 2396 * for one more second. 2397 */ 2398 if (syncer_state == SYNCER_SHUTTING_DOWN && 2399 net_worklist_len == 0 && 2400 last_work_seen == syncer_delayno) { 2401 syncer_state = SYNCER_FINAL_DELAY; 2402 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2403 } 2404 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2405 syncer_worklist_len > 0); 2406 2407 /* 2408 * Keep track of the last time there was anything 2409 * on the worklist other than syncer vnodes. 2410 * Return to the SHUTTING_DOWN state if any 2411 * new work appears. 2412 */ 2413 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2414 last_work_seen = syncer_delayno; 2415 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2416 syncer_state = SYNCER_SHUTTING_DOWN; 2417 while (!LIST_EMPTY(slp)) { 2418 error = sync_vnode(slp, &bo, td); 2419 if (error == 1) { 2420 LIST_REMOVE(bo, bo_synclist); 2421 LIST_INSERT_HEAD(next, bo, bo_synclist); 2422 continue; 2423 } 2424 2425 if (first_printf == 0) { 2426 /* 2427 * Drop the sync mutex, because some watchdog 2428 * drivers need to sleep while patting 2429 */ 2430 mtx_unlock(&sync_mtx); 2431 wdog_kern_pat(WD_LASTVAL); 2432 mtx_lock(&sync_mtx); 2433 } 2434 2435 } 2436 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2437 syncer_final_iter--; 2438 /* 2439 * The variable rushjob allows the kernel to speed up the 2440 * processing of the filesystem syncer process. A rushjob 2441 * value of N tells the filesystem syncer to process the next 2442 * N seconds worth of work on its queue ASAP. Currently rushjob 2443 * is used by the soft update code to speed up the filesystem 2444 * syncer process when the incore state is getting so far 2445 * ahead of the disk that the kernel memory pool is being 2446 * threatened with exhaustion. 2447 */ 2448 if (rushjob > 0) { 2449 rushjob -= 1; 2450 continue; 2451 } 2452 /* 2453 * Just sleep for a short period of time between 2454 * iterations when shutting down to allow some I/O 2455 * to happen. 2456 * 2457 * If it has taken us less than a second to process the 2458 * current work, then wait. Otherwise start right over 2459 * again. We can still lose time if any single round 2460 * takes more than two seconds, but it does not really 2461 * matter as we are just trying to generally pace the 2462 * filesystem activity. 2463 */ 2464 if (syncer_state != SYNCER_RUNNING || 2465 time_uptime == starttime) { 2466 thread_lock(td); 2467 sched_prio(td, PPAUSE); 2468 thread_unlock(td); 2469 } 2470 if (syncer_state != SYNCER_RUNNING) 2471 cv_timedwait(&sync_wakeup, &sync_mtx, 2472 hz / SYNCER_SHUTDOWN_SPEEDUP); 2473 else if (time_uptime == starttime) 2474 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2475 } 2476 } 2477 2478 /* 2479 * Request the syncer daemon to speed up its work. 2480 * We never push it to speed up more than half of its 2481 * normal turn time, otherwise it could take over the cpu. 2482 */ 2483 int 2484 speedup_syncer(void) 2485 { 2486 int ret = 0; 2487 2488 mtx_lock(&sync_mtx); 2489 if (rushjob < syncdelay / 2) { 2490 rushjob += 1; 2491 stat_rush_requests += 1; 2492 ret = 1; 2493 } 2494 mtx_unlock(&sync_mtx); 2495 cv_broadcast(&sync_wakeup); 2496 return (ret); 2497 } 2498 2499 /* 2500 * Tell the syncer to speed up its work and run though its work 2501 * list several times, then tell it to shut down. 2502 */ 2503 static void 2504 syncer_shutdown(void *arg, int howto) 2505 { 2506 2507 if (howto & RB_NOSYNC) 2508 return; 2509 mtx_lock(&sync_mtx); 2510 syncer_state = SYNCER_SHUTTING_DOWN; 2511 rushjob = 0; 2512 mtx_unlock(&sync_mtx); 2513 cv_broadcast(&sync_wakeup); 2514 kproc_shutdown(arg, howto); 2515 } 2516 2517 void 2518 syncer_suspend(void) 2519 { 2520 2521 syncer_shutdown(updateproc, 0); 2522 } 2523 2524 void 2525 syncer_resume(void) 2526 { 2527 2528 mtx_lock(&sync_mtx); 2529 first_printf = 1; 2530 syncer_state = SYNCER_RUNNING; 2531 mtx_unlock(&sync_mtx); 2532 cv_broadcast(&sync_wakeup); 2533 kproc_resume(updateproc); 2534 } 2535 2536 /* 2537 * Reassign a buffer from one vnode to another. 2538 * Used to assign file specific control information 2539 * (indirect blocks) to the vnode to which they belong. 2540 */ 2541 void 2542 reassignbuf(struct buf *bp) 2543 { 2544 struct vnode *vp; 2545 struct bufobj *bo; 2546 int delay; 2547 #ifdef INVARIANTS 2548 struct bufv *bv; 2549 #endif 2550 2551 vp = bp->b_vp; 2552 bo = bp->b_bufobj; 2553 ++reassignbufcalls; 2554 2555 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2556 bp, bp->b_vp, bp->b_flags); 2557 /* 2558 * B_PAGING flagged buffers cannot be reassigned because their vp 2559 * is not fully linked in. 2560 */ 2561 if (bp->b_flags & B_PAGING) 2562 panic("cannot reassign paging buffer"); 2563 2564 /* 2565 * Delete from old vnode list, if on one. 2566 */ 2567 BO_LOCK(bo); 2568 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2569 buf_vlist_remove(bp); 2570 else 2571 panic("reassignbuf: Buffer %p not on queue.", bp); 2572 /* 2573 * If dirty, put on list of dirty buffers; otherwise insert onto list 2574 * of clean buffers. 2575 */ 2576 if (bp->b_flags & B_DELWRI) { 2577 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2578 switch (vp->v_type) { 2579 case VDIR: 2580 delay = dirdelay; 2581 break; 2582 case VCHR: 2583 delay = metadelay; 2584 break; 2585 default: 2586 delay = filedelay; 2587 } 2588 vn_syncer_add_to_worklist(bo, delay); 2589 } 2590 buf_vlist_add(bp, bo, BX_VNDIRTY); 2591 } else { 2592 buf_vlist_add(bp, bo, BX_VNCLEAN); 2593 2594 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2595 mtx_lock(&sync_mtx); 2596 LIST_REMOVE(bo, bo_synclist); 2597 syncer_worklist_len--; 2598 mtx_unlock(&sync_mtx); 2599 bo->bo_flag &= ~BO_ONWORKLST; 2600 } 2601 } 2602 #ifdef INVARIANTS 2603 bv = &bo->bo_clean; 2604 bp = TAILQ_FIRST(&bv->bv_hd); 2605 KASSERT(bp == NULL || bp->b_bufobj == bo, 2606 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2607 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2608 KASSERT(bp == NULL || bp->b_bufobj == bo, 2609 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2610 bv = &bo->bo_dirty; 2611 bp = TAILQ_FIRST(&bv->bv_hd); 2612 KASSERT(bp == NULL || bp->b_bufobj == bo, 2613 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2614 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2615 KASSERT(bp == NULL || bp->b_bufobj == bo, 2616 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2617 #endif 2618 BO_UNLOCK(bo); 2619 } 2620 2621 static void 2622 v_init_counters(struct vnode *vp) 2623 { 2624 2625 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2626 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2627 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2628 2629 refcount_init(&vp->v_holdcnt, 1); 2630 refcount_init(&vp->v_usecount, 1); 2631 } 2632 2633 static void 2634 v_incr_usecount_locked(struct vnode *vp) 2635 { 2636 2637 ASSERT_VI_LOCKED(vp, __func__); 2638 if ((vp->v_iflag & VI_OWEINACT) != 0) { 2639 VNASSERT(vp->v_usecount == 0, vp, 2640 ("vnode with usecount and VI_OWEINACT set")); 2641 vp->v_iflag &= ~VI_OWEINACT; 2642 } 2643 refcount_acquire(&vp->v_usecount); 2644 v_incr_devcount(vp); 2645 } 2646 2647 /* 2648 * Increment the use count on the vnode, taking care to reference 2649 * the driver's usecount if this is a chardev. 2650 */ 2651 static void 2652 v_incr_usecount(struct vnode *vp) 2653 { 2654 2655 ASSERT_VI_UNLOCKED(vp, __func__); 2656 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2657 2658 if (vp->v_type != VCHR && 2659 refcount_acquire_if_not_zero(&vp->v_usecount)) { 2660 VNODE_REFCOUNT_FENCE_ACQ(); 2661 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2662 ("vnode with usecount and VI_OWEINACT set")); 2663 } else { 2664 VI_LOCK(vp); 2665 v_incr_usecount_locked(vp); 2666 VI_UNLOCK(vp); 2667 } 2668 } 2669 2670 /* 2671 * Increment si_usecount of the associated device, if any. 2672 */ 2673 static void 2674 v_incr_devcount(struct vnode *vp) 2675 { 2676 2677 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2678 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2679 dev_lock(); 2680 vp->v_rdev->si_usecount++; 2681 dev_unlock(); 2682 } 2683 } 2684 2685 /* 2686 * Decrement si_usecount of the associated device, if any. 2687 */ 2688 static void 2689 v_decr_devcount(struct vnode *vp) 2690 { 2691 2692 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2693 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2694 dev_lock(); 2695 vp->v_rdev->si_usecount--; 2696 dev_unlock(); 2697 } 2698 } 2699 2700 /* 2701 * Grab a particular vnode from the free list, increment its 2702 * reference count and lock it. VI_DOOMED is set if the vnode 2703 * is being destroyed. Only callers who specify LK_RETRY will 2704 * see doomed vnodes. If inactive processing was delayed in 2705 * vput try to do it here. 2706 * 2707 * Notes on lockless counter manipulation: 2708 * _vhold, vputx and other routines make various decisions based 2709 * on either holdcnt or usecount being 0. As long as either counter 2710 * is not transitioning 0->1 nor 1->0, the manipulation can be done 2711 * with atomic operations. Otherwise the interlock is taken covering 2712 * both the atomic and additional actions. 2713 */ 2714 int 2715 vget(struct vnode *vp, int flags, struct thread *td) 2716 { 2717 int error, oweinact; 2718 2719 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 2720 ("vget: invalid lock operation")); 2721 2722 if ((flags & LK_INTERLOCK) != 0) 2723 ASSERT_VI_LOCKED(vp, __func__); 2724 else 2725 ASSERT_VI_UNLOCKED(vp, __func__); 2726 if ((flags & LK_VNHELD) != 0) 2727 VNASSERT((vp->v_holdcnt > 0), vp, 2728 ("vget: LK_VNHELD passed but vnode not held")); 2729 2730 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2731 2732 if ((flags & LK_VNHELD) == 0) 2733 _vhold(vp, (flags & LK_INTERLOCK) != 0); 2734 2735 if ((error = vn_lock(vp, flags)) != 0) { 2736 vdrop(vp); 2737 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2738 vp); 2739 return (error); 2740 } 2741 if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0) 2742 panic("vget: vn_lock failed to return ENOENT\n"); 2743 /* 2744 * We don't guarantee that any particular close will 2745 * trigger inactive processing so just make a best effort 2746 * here at preventing a reference to a removed file. If 2747 * we don't succeed no harm is done. 2748 * 2749 * Upgrade our holdcnt to a usecount. 2750 */ 2751 if (vp->v_type == VCHR || 2752 !refcount_acquire_if_not_zero(&vp->v_usecount)) { 2753 VI_LOCK(vp); 2754 if ((vp->v_iflag & VI_OWEINACT) == 0) { 2755 oweinact = 0; 2756 } else { 2757 oweinact = 1; 2758 vp->v_iflag &= ~VI_OWEINACT; 2759 VNODE_REFCOUNT_FENCE_REL(); 2760 } 2761 refcount_acquire(&vp->v_usecount); 2762 v_incr_devcount(vp); 2763 if (oweinact && VOP_ISLOCKED(vp) == LK_EXCLUSIVE && 2764 (flags & LK_NOWAIT) == 0) 2765 vinactive(vp, td); 2766 VI_UNLOCK(vp); 2767 } 2768 return (0); 2769 } 2770 2771 /* 2772 * Increase the reference (use) and hold count of a vnode. 2773 * This will also remove the vnode from the free list if it is presently free. 2774 */ 2775 void 2776 vref(struct vnode *vp) 2777 { 2778 2779 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2780 _vhold(vp, false); 2781 v_incr_usecount(vp); 2782 } 2783 2784 void 2785 vrefl(struct vnode *vp) 2786 { 2787 2788 ASSERT_VI_LOCKED(vp, __func__); 2789 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2790 _vhold(vp, true); 2791 v_incr_usecount_locked(vp); 2792 } 2793 2794 void 2795 vrefact(struct vnode *vp) 2796 { 2797 2798 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2799 if (__predict_false(vp->v_type == VCHR)) { 2800 VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp, 2801 ("%s: wrong ref counts", __func__)); 2802 vref(vp); 2803 return; 2804 } 2805 #ifdef INVARIANTS 2806 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 2807 VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__)); 2808 old = atomic_fetchadd_int(&vp->v_usecount, 1); 2809 VNASSERT(old > 0, vp, ("%s: wrong use count", __func__)); 2810 #else 2811 refcount_acquire(&vp->v_holdcnt); 2812 refcount_acquire(&vp->v_usecount); 2813 #endif 2814 } 2815 2816 /* 2817 * Return reference count of a vnode. 2818 * 2819 * The results of this call are only guaranteed when some mechanism is used to 2820 * stop other processes from gaining references to the vnode. This may be the 2821 * case if the caller holds the only reference. This is also useful when stale 2822 * data is acceptable as race conditions may be accounted for by some other 2823 * means. 2824 */ 2825 int 2826 vrefcnt(struct vnode *vp) 2827 { 2828 2829 return (vp->v_usecount); 2830 } 2831 2832 #define VPUTX_VRELE 1 2833 #define VPUTX_VPUT 2 2834 #define VPUTX_VUNREF 3 2835 2836 /* 2837 * Decrement the use and hold counts for a vnode. 2838 * 2839 * See an explanation near vget() as to why atomic operation is safe. 2840 */ 2841 static void 2842 vputx(struct vnode *vp, int func) 2843 { 2844 int error; 2845 2846 KASSERT(vp != NULL, ("vputx: null vp")); 2847 if (func == VPUTX_VUNREF) 2848 ASSERT_VOP_LOCKED(vp, "vunref"); 2849 else if (func == VPUTX_VPUT) 2850 ASSERT_VOP_LOCKED(vp, "vput"); 2851 else 2852 KASSERT(func == VPUTX_VRELE, ("vputx: wrong func")); 2853 ASSERT_VI_UNLOCKED(vp, __func__); 2854 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2855 2856 if (vp->v_type != VCHR && 2857 refcount_release_if_not_last(&vp->v_usecount)) { 2858 if (func == VPUTX_VPUT) 2859 VOP_UNLOCK(vp, 0); 2860 vdrop(vp); 2861 return; 2862 } 2863 2864 VI_LOCK(vp); 2865 2866 /* 2867 * We want to hold the vnode until the inactive finishes to 2868 * prevent vgone() races. We drop the use count here and the 2869 * hold count below when we're done. 2870 */ 2871 if (!refcount_release(&vp->v_usecount) || 2872 (vp->v_iflag & VI_DOINGINACT)) { 2873 if (func == VPUTX_VPUT) 2874 VOP_UNLOCK(vp, 0); 2875 v_decr_devcount(vp); 2876 vdropl(vp); 2877 return; 2878 } 2879 2880 v_decr_devcount(vp); 2881 2882 error = 0; 2883 2884 if (vp->v_usecount != 0) { 2885 vn_printf(vp, "vputx: usecount not zero for vnode "); 2886 panic("vputx: usecount not zero"); 2887 } 2888 2889 CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp); 2890 2891 /* 2892 * We must call VOP_INACTIVE with the node locked. Mark 2893 * as VI_DOINGINACT to avoid recursion. 2894 */ 2895 vp->v_iflag |= VI_OWEINACT; 2896 switch (func) { 2897 case VPUTX_VRELE: 2898 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 2899 VI_LOCK(vp); 2900 break; 2901 case VPUTX_VPUT: 2902 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 2903 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 2904 LK_NOWAIT); 2905 VI_LOCK(vp); 2906 } 2907 break; 2908 case VPUTX_VUNREF: 2909 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 2910 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 2911 VI_LOCK(vp); 2912 } 2913 break; 2914 } 2915 VNASSERT(vp->v_usecount == 0 || (vp->v_iflag & VI_OWEINACT) == 0, vp, 2916 ("vnode with usecount and VI_OWEINACT set")); 2917 if (error == 0) { 2918 if (vp->v_iflag & VI_OWEINACT) 2919 vinactive(vp, curthread); 2920 if (func != VPUTX_VUNREF) 2921 VOP_UNLOCK(vp, 0); 2922 } 2923 vdropl(vp); 2924 } 2925 2926 /* 2927 * Vnode put/release. 2928 * If count drops to zero, call inactive routine and return to freelist. 2929 */ 2930 void 2931 vrele(struct vnode *vp) 2932 { 2933 2934 vputx(vp, VPUTX_VRELE); 2935 } 2936 2937 /* 2938 * Release an already locked vnode. This give the same effects as 2939 * unlock+vrele(), but takes less time and avoids releasing and 2940 * re-aquiring the lock (as vrele() acquires the lock internally.) 2941 */ 2942 void 2943 vput(struct vnode *vp) 2944 { 2945 2946 vputx(vp, VPUTX_VPUT); 2947 } 2948 2949 /* 2950 * Release an exclusively locked vnode. Do not unlock the vnode lock. 2951 */ 2952 void 2953 vunref(struct vnode *vp) 2954 { 2955 2956 vputx(vp, VPUTX_VUNREF); 2957 } 2958 2959 /* 2960 * Increase the hold count and activate if this is the first reference. 2961 */ 2962 void 2963 _vhold(struct vnode *vp, bool locked) 2964 { 2965 struct mount *mp; 2966 2967 if (locked) 2968 ASSERT_VI_LOCKED(vp, __func__); 2969 else 2970 ASSERT_VI_UNLOCKED(vp, __func__); 2971 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2972 if (!locked) { 2973 if (refcount_acquire_if_not_zero(&vp->v_holdcnt)) { 2974 VNODE_REFCOUNT_FENCE_ACQ(); 2975 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 2976 ("_vhold: vnode with holdcnt is free")); 2977 return; 2978 } 2979 VI_LOCK(vp); 2980 } 2981 if ((vp->v_iflag & VI_FREE) == 0) { 2982 refcount_acquire(&vp->v_holdcnt); 2983 if (!locked) 2984 VI_UNLOCK(vp); 2985 return; 2986 } 2987 VNASSERT(vp->v_holdcnt == 0, vp, 2988 ("%s: wrong hold count", __func__)); 2989 VNASSERT(vp->v_op != NULL, vp, 2990 ("%s: vnode already reclaimed.", __func__)); 2991 /* 2992 * Remove a vnode from the free list, mark it as in use, 2993 * and put it on the active list. 2994 */ 2995 VNASSERT(vp->v_mount != NULL, vp, 2996 ("_vhold: vnode not on per mount vnode list")); 2997 mp = vp->v_mount; 2998 mtx_lock(&mp->mnt_listmtx); 2999 if ((vp->v_mflag & VMP_TMPMNTFREELIST) != 0) { 3000 TAILQ_REMOVE(&mp->mnt_tmpfreevnodelist, vp, v_actfreelist); 3001 mp->mnt_tmpfreevnodelistsize--; 3002 vp->v_mflag &= ~VMP_TMPMNTFREELIST; 3003 } else { 3004 mtx_lock(&vnode_free_list_mtx); 3005 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 3006 freevnodes--; 3007 mtx_unlock(&vnode_free_list_mtx); 3008 } 3009 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 3010 ("Activating already active vnode")); 3011 vp->v_iflag &= ~VI_FREE; 3012 vp->v_iflag |= VI_ACTIVE; 3013 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 3014 mp->mnt_activevnodelistsize++; 3015 mtx_unlock(&mp->mnt_listmtx); 3016 refcount_acquire(&vp->v_holdcnt); 3017 if (!locked) 3018 VI_UNLOCK(vp); 3019 } 3020 3021 void 3022 vholdnz(struct vnode *vp) 3023 { 3024 3025 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3026 #ifdef INVARIANTS 3027 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3028 VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__)); 3029 #else 3030 atomic_add_int(&vp->v_holdcnt, 1); 3031 #endif 3032 } 3033 3034 /* 3035 * Drop the hold count of the vnode. If this is the last reference to 3036 * the vnode we place it on the free list unless it has been vgone'd 3037 * (marked VI_DOOMED) in which case we will free it. 3038 * 3039 * Because the vnode vm object keeps a hold reference on the vnode if 3040 * there is at least one resident non-cached page, the vnode cannot 3041 * leave the active list without the page cleanup done. 3042 */ 3043 void 3044 _vdrop(struct vnode *vp, bool locked) 3045 { 3046 struct bufobj *bo; 3047 struct mount *mp; 3048 int active; 3049 3050 if (locked) 3051 ASSERT_VI_LOCKED(vp, __func__); 3052 else 3053 ASSERT_VI_UNLOCKED(vp, __func__); 3054 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3055 if ((int)vp->v_holdcnt <= 0) 3056 panic("vdrop: holdcnt %d", vp->v_holdcnt); 3057 if (!locked) { 3058 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3059 return; 3060 VI_LOCK(vp); 3061 } 3062 if (refcount_release(&vp->v_holdcnt) == 0) { 3063 VI_UNLOCK(vp); 3064 return; 3065 } 3066 if ((vp->v_iflag & VI_DOOMED) == 0) { 3067 /* 3068 * Mark a vnode as free: remove it from its active list 3069 * and put it up for recycling on the freelist. 3070 */ 3071 VNASSERT(vp->v_op != NULL, vp, 3072 ("vdropl: vnode already reclaimed.")); 3073 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 3074 ("vnode already free")); 3075 VNASSERT(vp->v_holdcnt == 0, vp, 3076 ("vdropl: freeing when we shouldn't")); 3077 active = vp->v_iflag & VI_ACTIVE; 3078 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3079 vp->v_iflag &= ~VI_ACTIVE; 3080 mp = vp->v_mount; 3081 if (mp != NULL) { 3082 mtx_lock(&mp->mnt_listmtx); 3083 if (active) { 3084 TAILQ_REMOVE(&mp->mnt_activevnodelist, 3085 vp, v_actfreelist); 3086 mp->mnt_activevnodelistsize--; 3087 } 3088 TAILQ_INSERT_TAIL(&mp->mnt_tmpfreevnodelist, 3089 vp, v_actfreelist); 3090 mp->mnt_tmpfreevnodelistsize++; 3091 vp->v_iflag |= VI_FREE; 3092 vp->v_mflag |= VMP_TMPMNTFREELIST; 3093 VI_UNLOCK(vp); 3094 if (mp->mnt_tmpfreevnodelistsize >= 3095 mnt_free_list_batch) 3096 vnlru_return_batch_locked(mp); 3097 mtx_unlock(&mp->mnt_listmtx); 3098 } else { 3099 VNASSERT(active == 0, vp, 3100 ("vdropl: active vnode not on per mount " 3101 "vnode list")); 3102 mtx_lock(&vnode_free_list_mtx); 3103 TAILQ_INSERT_TAIL(&vnode_free_list, vp, 3104 v_actfreelist); 3105 freevnodes++; 3106 vp->v_iflag |= VI_FREE; 3107 VI_UNLOCK(vp); 3108 mtx_unlock(&vnode_free_list_mtx); 3109 } 3110 } else { 3111 VI_UNLOCK(vp); 3112 counter_u64_add(free_owe_inact, 1); 3113 } 3114 return; 3115 } 3116 /* 3117 * The vnode has been marked for destruction, so free it. 3118 * 3119 * The vnode will be returned to the zone where it will 3120 * normally remain until it is needed for another vnode. We 3121 * need to cleanup (or verify that the cleanup has already 3122 * been done) any residual data left from its current use 3123 * so as not to contaminate the freshly allocated vnode. 3124 */ 3125 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 3126 atomic_subtract_long(&numvnodes, 1); 3127 bo = &vp->v_bufobj; 3128 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 3129 ("cleaned vnode still on the free list.")); 3130 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 3131 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); 3132 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 3133 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 3134 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 3135 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 3136 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 3137 ("clean blk trie not empty")); 3138 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 3139 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 3140 ("dirty blk trie not empty")); 3141 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 3142 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 3143 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 3144 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 3145 ("Dangling rangelock waiters")); 3146 VI_UNLOCK(vp); 3147 #ifdef MAC 3148 mac_vnode_destroy(vp); 3149 #endif 3150 if (vp->v_pollinfo != NULL) { 3151 destroy_vpollinfo(vp->v_pollinfo); 3152 vp->v_pollinfo = NULL; 3153 } 3154 #ifdef INVARIANTS 3155 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ 3156 vp->v_op = NULL; 3157 #endif 3158 vp->v_mountedhere = NULL; 3159 vp->v_unpcb = NULL; 3160 vp->v_rdev = NULL; 3161 vp->v_fifoinfo = NULL; 3162 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 3163 vp->v_iflag = 0; 3164 vp->v_vflag = 0; 3165 bo->bo_flag = 0; 3166 uma_zfree(vnode_zone, vp); 3167 } 3168 3169 /* 3170 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3171 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3172 * OWEINACT tracks whether a vnode missed a call to inactive due to a 3173 * failed lock upgrade. 3174 */ 3175 void 3176 vinactive(struct vnode *vp, struct thread *td) 3177 { 3178 struct vm_object *obj; 3179 3180 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3181 ASSERT_VI_LOCKED(vp, "vinactive"); 3182 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3183 ("vinactive: recursed on VI_DOINGINACT")); 3184 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3185 vp->v_iflag |= VI_DOINGINACT; 3186 vp->v_iflag &= ~VI_OWEINACT; 3187 VI_UNLOCK(vp); 3188 /* 3189 * Before moving off the active list, we must be sure that any 3190 * modified pages are converted into the vnode's dirty 3191 * buffers, since these will no longer be checked once the 3192 * vnode is on the inactive list. 3193 * 3194 * The write-out of the dirty pages is asynchronous. At the 3195 * point that VOP_INACTIVE() is called, there could still be 3196 * pending I/O and dirty pages in the object. 3197 */ 3198 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3199 (obj->flags & OBJ_MIGHTBEDIRTY) != 0) { 3200 VM_OBJECT_WLOCK(obj); 3201 vm_object_page_clean(obj, 0, 0, 0); 3202 VM_OBJECT_WUNLOCK(obj); 3203 } 3204 VOP_INACTIVE(vp, td); 3205 VI_LOCK(vp); 3206 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3207 ("vinactive: lost VI_DOINGINACT")); 3208 vp->v_iflag &= ~VI_DOINGINACT; 3209 } 3210 3211 /* 3212 * Remove any vnodes in the vnode table belonging to mount point mp. 3213 * 3214 * If FORCECLOSE is not specified, there should not be any active ones, 3215 * return error if any are found (nb: this is a user error, not a 3216 * system error). If FORCECLOSE is specified, detach any active vnodes 3217 * that are found. 3218 * 3219 * If WRITECLOSE is set, only flush out regular file vnodes open for 3220 * writing. 3221 * 3222 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3223 * 3224 * `rootrefs' specifies the base reference count for the root vnode 3225 * of this filesystem. The root vnode is considered busy if its 3226 * v_usecount exceeds this value. On a successful return, vflush(, td) 3227 * will call vrele() on the root vnode exactly rootrefs times. 3228 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3229 * be zero. 3230 */ 3231 #ifdef DIAGNOSTIC 3232 static int busyprt = 0; /* print out busy vnodes */ 3233 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3234 #endif 3235 3236 int 3237 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3238 { 3239 struct vnode *vp, *mvp, *rootvp = NULL; 3240 struct vattr vattr; 3241 int busy = 0, error; 3242 3243 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3244 rootrefs, flags); 3245 if (rootrefs > 0) { 3246 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3247 ("vflush: bad args")); 3248 /* 3249 * Get the filesystem root vnode. We can vput() it 3250 * immediately, since with rootrefs > 0, it won't go away. 3251 */ 3252 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3253 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3254 __func__, error); 3255 return (error); 3256 } 3257 vput(rootvp); 3258 } 3259 loop: 3260 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3261 vholdl(vp); 3262 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3263 if (error) { 3264 vdrop(vp); 3265 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3266 goto loop; 3267 } 3268 /* 3269 * Skip over a vnodes marked VV_SYSTEM. 3270 */ 3271 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3272 VOP_UNLOCK(vp, 0); 3273 vdrop(vp); 3274 continue; 3275 } 3276 /* 3277 * If WRITECLOSE is set, flush out unlinked but still open 3278 * files (even if open only for reading) and regular file 3279 * vnodes open for writing. 3280 */ 3281 if (flags & WRITECLOSE) { 3282 if (vp->v_object != NULL) { 3283 VM_OBJECT_WLOCK(vp->v_object); 3284 vm_object_page_clean(vp->v_object, 0, 0, 0); 3285 VM_OBJECT_WUNLOCK(vp->v_object); 3286 } 3287 error = VOP_FSYNC(vp, MNT_WAIT, td); 3288 if (error != 0) { 3289 VOP_UNLOCK(vp, 0); 3290 vdrop(vp); 3291 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3292 return (error); 3293 } 3294 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3295 VI_LOCK(vp); 3296 3297 if ((vp->v_type == VNON || 3298 (error == 0 && vattr.va_nlink > 0)) && 3299 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3300 VOP_UNLOCK(vp, 0); 3301 vdropl(vp); 3302 continue; 3303 } 3304 } else 3305 VI_LOCK(vp); 3306 /* 3307 * With v_usecount == 0, all we need to do is clear out the 3308 * vnode data structures and we are done. 3309 * 3310 * If FORCECLOSE is set, forcibly close the vnode. 3311 */ 3312 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3313 vgonel(vp); 3314 } else { 3315 busy++; 3316 #ifdef DIAGNOSTIC 3317 if (busyprt) 3318 vn_printf(vp, "vflush: busy vnode "); 3319 #endif 3320 } 3321 VOP_UNLOCK(vp, 0); 3322 vdropl(vp); 3323 } 3324 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3325 /* 3326 * If just the root vnode is busy, and if its refcount 3327 * is equal to `rootrefs', then go ahead and kill it. 3328 */ 3329 VI_LOCK(rootvp); 3330 KASSERT(busy > 0, ("vflush: not busy")); 3331 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3332 ("vflush: usecount %d < rootrefs %d", 3333 rootvp->v_usecount, rootrefs)); 3334 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3335 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3336 vgone(rootvp); 3337 VOP_UNLOCK(rootvp, 0); 3338 busy = 0; 3339 } else 3340 VI_UNLOCK(rootvp); 3341 } 3342 if (busy) { 3343 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3344 busy); 3345 return (EBUSY); 3346 } 3347 for (; rootrefs > 0; rootrefs--) 3348 vrele(rootvp); 3349 return (0); 3350 } 3351 3352 /* 3353 * Recycle an unused vnode to the front of the free list. 3354 */ 3355 int 3356 vrecycle(struct vnode *vp) 3357 { 3358 int recycled; 3359 3360 VI_LOCK(vp); 3361 recycled = vrecyclel(vp); 3362 VI_UNLOCK(vp); 3363 return (recycled); 3364 } 3365 3366 /* 3367 * vrecycle, with the vp interlock held. 3368 */ 3369 int 3370 vrecyclel(struct vnode *vp) 3371 { 3372 int recycled; 3373 3374 ASSERT_VOP_ELOCKED(vp, __func__); 3375 ASSERT_VI_LOCKED(vp, __func__); 3376 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3377 recycled = 0; 3378 if (vp->v_usecount == 0) { 3379 recycled = 1; 3380 vgonel(vp); 3381 } 3382 return (recycled); 3383 } 3384 3385 /* 3386 * Eliminate all activity associated with a vnode 3387 * in preparation for reuse. 3388 */ 3389 void 3390 vgone(struct vnode *vp) 3391 { 3392 VI_LOCK(vp); 3393 vgonel(vp); 3394 VI_UNLOCK(vp); 3395 } 3396 3397 static void 3398 notify_lowervp_vfs_dummy(struct mount *mp __unused, 3399 struct vnode *lowervp __unused) 3400 { 3401 } 3402 3403 /* 3404 * Notify upper mounts about reclaimed or unlinked vnode. 3405 */ 3406 void 3407 vfs_notify_upper(struct vnode *vp, int event) 3408 { 3409 static struct vfsops vgonel_vfsops = { 3410 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 3411 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 3412 }; 3413 struct mount *mp, *ump, *mmp; 3414 3415 mp = vp->v_mount; 3416 if (mp == NULL) 3417 return; 3418 3419 MNT_ILOCK(mp); 3420 if (TAILQ_EMPTY(&mp->mnt_uppers)) 3421 goto unlock; 3422 MNT_IUNLOCK(mp); 3423 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 3424 mmp->mnt_op = &vgonel_vfsops; 3425 mmp->mnt_kern_flag |= MNTK_MARKER; 3426 MNT_ILOCK(mp); 3427 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 3428 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 3429 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 3430 ump = TAILQ_NEXT(ump, mnt_upper_link); 3431 continue; 3432 } 3433 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 3434 MNT_IUNLOCK(mp); 3435 switch (event) { 3436 case VFS_NOTIFY_UPPER_RECLAIM: 3437 VFS_RECLAIM_LOWERVP(ump, vp); 3438 break; 3439 case VFS_NOTIFY_UPPER_UNLINK: 3440 VFS_UNLINK_LOWERVP(ump, vp); 3441 break; 3442 default: 3443 KASSERT(0, ("invalid event %d", event)); 3444 break; 3445 } 3446 MNT_ILOCK(mp); 3447 ump = TAILQ_NEXT(mmp, mnt_upper_link); 3448 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 3449 } 3450 free(mmp, M_TEMP); 3451 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 3452 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 3453 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 3454 wakeup(&mp->mnt_uppers); 3455 } 3456 unlock: 3457 MNT_IUNLOCK(mp); 3458 } 3459 3460 /* 3461 * vgone, with the vp interlock held. 3462 */ 3463 static void 3464 vgonel(struct vnode *vp) 3465 { 3466 struct thread *td; 3467 int oweinact; 3468 int active; 3469 struct mount *mp; 3470 3471 ASSERT_VOP_ELOCKED(vp, "vgonel"); 3472 ASSERT_VI_LOCKED(vp, "vgonel"); 3473 VNASSERT(vp->v_holdcnt, vp, 3474 ("vgonel: vp %p has no reference.", vp)); 3475 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3476 td = curthread; 3477 3478 /* 3479 * Don't vgonel if we're already doomed. 3480 */ 3481 if (vp->v_iflag & VI_DOOMED) 3482 return; 3483 vp->v_iflag |= VI_DOOMED; 3484 3485 /* 3486 * Check to see if the vnode is in use. If so, we have to call 3487 * VOP_CLOSE() and VOP_INACTIVE(). 3488 */ 3489 active = vp->v_usecount; 3490 oweinact = (vp->v_iflag & VI_OWEINACT); 3491 VI_UNLOCK(vp); 3492 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 3493 3494 /* 3495 * If purging an active vnode, it must be closed and 3496 * deactivated before being reclaimed. 3497 */ 3498 if (active) 3499 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 3500 if (oweinact || active) { 3501 VI_LOCK(vp); 3502 if ((vp->v_iflag & VI_DOINGINACT) == 0) 3503 vinactive(vp, td); 3504 VI_UNLOCK(vp); 3505 } 3506 if (vp->v_type == VSOCK) 3507 vfs_unp_reclaim(vp); 3508 3509 /* 3510 * Clean out any buffers associated with the vnode. 3511 * If the flush fails, just toss the buffers. 3512 */ 3513 mp = NULL; 3514 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 3515 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 3516 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 3517 while (vinvalbuf(vp, 0, 0, 0) != 0) 3518 ; 3519 } 3520 3521 BO_LOCK(&vp->v_bufobj); 3522 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 3523 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 3524 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 3525 vp->v_bufobj.bo_clean.bv_cnt == 0, 3526 ("vp %p bufobj not invalidated", vp)); 3527 3528 /* 3529 * For VMIO bufobj, BO_DEAD is set in vm_object_terminate() 3530 * after the object's page queue is flushed. 3531 */ 3532 if (vp->v_bufobj.bo_object == NULL) 3533 vp->v_bufobj.bo_flag |= BO_DEAD; 3534 BO_UNLOCK(&vp->v_bufobj); 3535 3536 /* 3537 * Reclaim the vnode. 3538 */ 3539 if (VOP_RECLAIM(vp, td)) 3540 panic("vgone: cannot reclaim"); 3541 if (mp != NULL) 3542 vn_finished_secondary_write(mp); 3543 VNASSERT(vp->v_object == NULL, vp, 3544 ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag)); 3545 /* 3546 * Clear the advisory locks and wake up waiting threads. 3547 */ 3548 (void)VOP_ADVLOCKPURGE(vp); 3549 vp->v_lockf = NULL; 3550 /* 3551 * Delete from old mount point vnode list. 3552 */ 3553 delmntque(vp); 3554 cache_purge(vp); 3555 /* 3556 * Done with purge, reset to the standard lock and invalidate 3557 * the vnode. 3558 */ 3559 VI_LOCK(vp); 3560 vp->v_vnlock = &vp->v_lock; 3561 vp->v_op = &dead_vnodeops; 3562 vp->v_tag = "none"; 3563 vp->v_type = VBAD; 3564 } 3565 3566 /* 3567 * Calculate the total number of references to a special device. 3568 */ 3569 int 3570 vcount(struct vnode *vp) 3571 { 3572 int count; 3573 3574 dev_lock(); 3575 count = vp->v_rdev->si_usecount; 3576 dev_unlock(); 3577 return (count); 3578 } 3579 3580 /* 3581 * Same as above, but using the struct cdev *as argument 3582 */ 3583 int 3584 count_dev(struct cdev *dev) 3585 { 3586 int count; 3587 3588 dev_lock(); 3589 count = dev->si_usecount; 3590 dev_unlock(); 3591 return(count); 3592 } 3593 3594 /* 3595 * Print out a description of a vnode. 3596 */ 3597 static char *typename[] = 3598 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 3599 "VMARKER"}; 3600 3601 void 3602 vn_printf(struct vnode *vp, const char *fmt, ...) 3603 { 3604 va_list ap; 3605 char buf[256], buf2[16]; 3606 u_long flags; 3607 3608 va_start(ap, fmt); 3609 vprintf(fmt, ap); 3610 va_end(ap); 3611 printf("%p: ", (void *)vp); 3612 printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]); 3613 printf(" usecount %d, writecount %d, refcount %d", 3614 vp->v_usecount, vp->v_writecount, vp->v_holdcnt); 3615 switch (vp->v_type) { 3616 case VDIR: 3617 printf(" mountedhere %p\n", vp->v_mountedhere); 3618 break; 3619 case VCHR: 3620 printf(" rdev %p\n", vp->v_rdev); 3621 break; 3622 case VSOCK: 3623 printf(" socket %p\n", vp->v_unpcb); 3624 break; 3625 case VFIFO: 3626 printf(" fifoinfo %p\n", vp->v_fifoinfo); 3627 break; 3628 default: 3629 printf("\n"); 3630 break; 3631 } 3632 buf[0] = '\0'; 3633 buf[1] = '\0'; 3634 if (vp->v_vflag & VV_ROOT) 3635 strlcat(buf, "|VV_ROOT", sizeof(buf)); 3636 if (vp->v_vflag & VV_ISTTY) 3637 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 3638 if (vp->v_vflag & VV_NOSYNC) 3639 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 3640 if (vp->v_vflag & VV_ETERNALDEV) 3641 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 3642 if (vp->v_vflag & VV_CACHEDLABEL) 3643 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 3644 if (vp->v_vflag & VV_COPYONWRITE) 3645 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 3646 if (vp->v_vflag & VV_SYSTEM) 3647 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 3648 if (vp->v_vflag & VV_PROCDEP) 3649 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 3650 if (vp->v_vflag & VV_NOKNOTE) 3651 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 3652 if (vp->v_vflag & VV_DELETED) 3653 strlcat(buf, "|VV_DELETED", sizeof(buf)); 3654 if (vp->v_vflag & VV_MD) 3655 strlcat(buf, "|VV_MD", sizeof(buf)); 3656 if (vp->v_vflag & VV_FORCEINSMQ) 3657 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 3658 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 3659 VV_CACHEDLABEL | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 3660 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); 3661 if (flags != 0) { 3662 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 3663 strlcat(buf, buf2, sizeof(buf)); 3664 } 3665 if (vp->v_iflag & VI_MOUNT) 3666 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 3667 if (vp->v_iflag & VI_DOOMED) 3668 strlcat(buf, "|VI_DOOMED", sizeof(buf)); 3669 if (vp->v_iflag & VI_FREE) 3670 strlcat(buf, "|VI_FREE", sizeof(buf)); 3671 if (vp->v_iflag & VI_ACTIVE) 3672 strlcat(buf, "|VI_ACTIVE", sizeof(buf)); 3673 if (vp->v_iflag & VI_DOINGINACT) 3674 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 3675 if (vp->v_iflag & VI_OWEINACT) 3676 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 3677 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOOMED | VI_FREE | 3678 VI_ACTIVE | VI_DOINGINACT | VI_OWEINACT); 3679 if (flags != 0) { 3680 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 3681 strlcat(buf, buf2, sizeof(buf)); 3682 } 3683 printf(" flags (%s)\n", buf + 1); 3684 if (mtx_owned(VI_MTX(vp))) 3685 printf(" VI_LOCKed"); 3686 if (vp->v_object != NULL) 3687 printf(" v_object %p ref %d pages %d " 3688 "cleanbuf %d dirtybuf %d\n", 3689 vp->v_object, vp->v_object->ref_count, 3690 vp->v_object->resident_page_count, 3691 vp->v_bufobj.bo_clean.bv_cnt, 3692 vp->v_bufobj.bo_dirty.bv_cnt); 3693 printf(" "); 3694 lockmgr_printinfo(vp->v_vnlock); 3695 if (vp->v_data != NULL) 3696 VOP_PRINT(vp); 3697 } 3698 3699 #ifdef DDB 3700 /* 3701 * List all of the locked vnodes in the system. 3702 * Called when debugging the kernel. 3703 */ 3704 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 3705 { 3706 struct mount *mp; 3707 struct vnode *vp; 3708 3709 /* 3710 * Note: because this is DDB, we can't obey the locking semantics 3711 * for these structures, which means we could catch an inconsistent 3712 * state and dereference a nasty pointer. Not much to be done 3713 * about that. 3714 */ 3715 db_printf("Locked vnodes\n"); 3716 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3717 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3718 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 3719 vn_printf(vp, "vnode "); 3720 } 3721 } 3722 } 3723 3724 /* 3725 * Show details about the given vnode. 3726 */ 3727 DB_SHOW_COMMAND(vnode, db_show_vnode) 3728 { 3729 struct vnode *vp; 3730 3731 if (!have_addr) 3732 return; 3733 vp = (struct vnode *)addr; 3734 vn_printf(vp, "vnode "); 3735 } 3736 3737 /* 3738 * Show details about the given mount point. 3739 */ 3740 DB_SHOW_COMMAND(mount, db_show_mount) 3741 { 3742 struct mount *mp; 3743 struct vfsopt *opt; 3744 struct statfs *sp; 3745 struct vnode *vp; 3746 char buf[512]; 3747 uint64_t mflags; 3748 u_int flags; 3749 3750 if (!have_addr) { 3751 /* No address given, print short info about all mount points. */ 3752 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3753 db_printf("%p %s on %s (%s)\n", mp, 3754 mp->mnt_stat.f_mntfromname, 3755 mp->mnt_stat.f_mntonname, 3756 mp->mnt_stat.f_fstypename); 3757 if (db_pager_quit) 3758 break; 3759 } 3760 db_printf("\nMore info: show mount <addr>\n"); 3761 return; 3762 } 3763 3764 mp = (struct mount *)addr; 3765 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 3766 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 3767 3768 buf[0] = '\0'; 3769 mflags = mp->mnt_flag; 3770 #define MNT_FLAG(flag) do { \ 3771 if (mflags & (flag)) { \ 3772 if (buf[0] != '\0') \ 3773 strlcat(buf, ", ", sizeof(buf)); \ 3774 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 3775 mflags &= ~(flag); \ 3776 } \ 3777 } while (0) 3778 MNT_FLAG(MNT_RDONLY); 3779 MNT_FLAG(MNT_SYNCHRONOUS); 3780 MNT_FLAG(MNT_NOEXEC); 3781 MNT_FLAG(MNT_NOSUID); 3782 MNT_FLAG(MNT_NFS4ACLS); 3783 MNT_FLAG(MNT_UNION); 3784 MNT_FLAG(MNT_ASYNC); 3785 MNT_FLAG(MNT_SUIDDIR); 3786 MNT_FLAG(MNT_SOFTDEP); 3787 MNT_FLAG(MNT_NOSYMFOLLOW); 3788 MNT_FLAG(MNT_GJOURNAL); 3789 MNT_FLAG(MNT_MULTILABEL); 3790 MNT_FLAG(MNT_ACLS); 3791 MNT_FLAG(MNT_NOATIME); 3792 MNT_FLAG(MNT_NOCLUSTERR); 3793 MNT_FLAG(MNT_NOCLUSTERW); 3794 MNT_FLAG(MNT_SUJ); 3795 MNT_FLAG(MNT_EXRDONLY); 3796 MNT_FLAG(MNT_EXPORTED); 3797 MNT_FLAG(MNT_DEFEXPORTED); 3798 MNT_FLAG(MNT_EXPORTANON); 3799 MNT_FLAG(MNT_EXKERB); 3800 MNT_FLAG(MNT_EXPUBLIC); 3801 MNT_FLAG(MNT_LOCAL); 3802 MNT_FLAG(MNT_QUOTA); 3803 MNT_FLAG(MNT_ROOTFS); 3804 MNT_FLAG(MNT_USER); 3805 MNT_FLAG(MNT_IGNORE); 3806 MNT_FLAG(MNT_UPDATE); 3807 MNT_FLAG(MNT_DELEXPORT); 3808 MNT_FLAG(MNT_RELOAD); 3809 MNT_FLAG(MNT_FORCE); 3810 MNT_FLAG(MNT_SNAPSHOT); 3811 MNT_FLAG(MNT_BYFSID); 3812 #undef MNT_FLAG 3813 if (mflags != 0) { 3814 if (buf[0] != '\0') 3815 strlcat(buf, ", ", sizeof(buf)); 3816 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3817 "0x%016jx", mflags); 3818 } 3819 db_printf(" mnt_flag = %s\n", buf); 3820 3821 buf[0] = '\0'; 3822 flags = mp->mnt_kern_flag; 3823 #define MNT_KERN_FLAG(flag) do { \ 3824 if (flags & (flag)) { \ 3825 if (buf[0] != '\0') \ 3826 strlcat(buf, ", ", sizeof(buf)); \ 3827 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 3828 flags &= ~(flag); \ 3829 } \ 3830 } while (0) 3831 MNT_KERN_FLAG(MNTK_UNMOUNTF); 3832 MNT_KERN_FLAG(MNTK_ASYNC); 3833 MNT_KERN_FLAG(MNTK_SOFTDEP); 3834 MNT_KERN_FLAG(MNTK_DRAINING); 3835 MNT_KERN_FLAG(MNTK_REFEXPIRE); 3836 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 3837 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 3838 MNT_KERN_FLAG(MNTK_NO_IOPF); 3839 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 3840 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 3841 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 3842 MNT_KERN_FLAG(MNTK_MARKER); 3843 MNT_KERN_FLAG(MNTK_USES_BCACHE); 3844 MNT_KERN_FLAG(MNTK_NOASYNC); 3845 MNT_KERN_FLAG(MNTK_UNMOUNT); 3846 MNT_KERN_FLAG(MNTK_MWAIT); 3847 MNT_KERN_FLAG(MNTK_SUSPEND); 3848 MNT_KERN_FLAG(MNTK_SUSPEND2); 3849 MNT_KERN_FLAG(MNTK_SUSPENDED); 3850 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 3851 MNT_KERN_FLAG(MNTK_NOKNOTE); 3852 #undef MNT_KERN_FLAG 3853 if (flags != 0) { 3854 if (buf[0] != '\0') 3855 strlcat(buf, ", ", sizeof(buf)); 3856 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3857 "0x%08x", flags); 3858 } 3859 db_printf(" mnt_kern_flag = %s\n", buf); 3860 3861 db_printf(" mnt_opt = "); 3862 opt = TAILQ_FIRST(mp->mnt_opt); 3863 if (opt != NULL) { 3864 db_printf("%s", opt->name); 3865 opt = TAILQ_NEXT(opt, link); 3866 while (opt != NULL) { 3867 db_printf(", %s", opt->name); 3868 opt = TAILQ_NEXT(opt, link); 3869 } 3870 } 3871 db_printf("\n"); 3872 3873 sp = &mp->mnt_stat; 3874 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 3875 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 3876 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 3877 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 3878 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 3879 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 3880 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 3881 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 3882 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 3883 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 3884 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 3885 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 3886 3887 db_printf(" mnt_cred = { uid=%u ruid=%u", 3888 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 3889 if (jailed(mp->mnt_cred)) 3890 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 3891 db_printf(" }\n"); 3892 db_printf(" mnt_ref = %d\n", mp->mnt_ref); 3893 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 3894 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 3895 db_printf(" mnt_activevnodelistsize = %d\n", 3896 mp->mnt_activevnodelistsize); 3897 db_printf(" mnt_writeopcount = %d\n", mp->mnt_writeopcount); 3898 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 3899 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 3900 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 3901 db_printf(" mnt_lockref = %d\n", mp->mnt_lockref); 3902 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 3903 db_printf(" mnt_secondary_accwrites = %d\n", 3904 mp->mnt_secondary_accwrites); 3905 db_printf(" mnt_gjprovider = %s\n", 3906 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 3907 3908 db_printf("\n\nList of active vnodes\n"); 3909 TAILQ_FOREACH(vp, &mp->mnt_activevnodelist, v_actfreelist) { 3910 if (vp->v_type != VMARKER) { 3911 vn_printf(vp, "vnode "); 3912 if (db_pager_quit) 3913 break; 3914 } 3915 } 3916 db_printf("\n\nList of inactive vnodes\n"); 3917 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3918 if (vp->v_type != VMARKER && (vp->v_iflag & VI_ACTIVE) == 0) { 3919 vn_printf(vp, "vnode "); 3920 if (db_pager_quit) 3921 break; 3922 } 3923 } 3924 } 3925 #endif /* DDB */ 3926 3927 /* 3928 * Fill in a struct xvfsconf based on a struct vfsconf. 3929 */ 3930 static int 3931 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 3932 { 3933 struct xvfsconf xvfsp; 3934 3935 bzero(&xvfsp, sizeof(xvfsp)); 3936 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 3937 xvfsp.vfc_typenum = vfsp->vfc_typenum; 3938 xvfsp.vfc_refcount = vfsp->vfc_refcount; 3939 xvfsp.vfc_flags = vfsp->vfc_flags; 3940 /* 3941 * These are unused in userland, we keep them 3942 * to not break binary compatibility. 3943 */ 3944 xvfsp.vfc_vfsops = NULL; 3945 xvfsp.vfc_next = NULL; 3946 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 3947 } 3948 3949 #ifdef COMPAT_FREEBSD32 3950 struct xvfsconf32 { 3951 uint32_t vfc_vfsops; 3952 char vfc_name[MFSNAMELEN]; 3953 int32_t vfc_typenum; 3954 int32_t vfc_refcount; 3955 int32_t vfc_flags; 3956 uint32_t vfc_next; 3957 }; 3958 3959 static int 3960 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 3961 { 3962 struct xvfsconf32 xvfsp; 3963 3964 bzero(&xvfsp, sizeof(xvfsp)); 3965 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 3966 xvfsp.vfc_typenum = vfsp->vfc_typenum; 3967 xvfsp.vfc_refcount = vfsp->vfc_refcount; 3968 xvfsp.vfc_flags = vfsp->vfc_flags; 3969 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 3970 } 3971 #endif 3972 3973 /* 3974 * Top level filesystem related information gathering. 3975 */ 3976 static int 3977 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 3978 { 3979 struct vfsconf *vfsp; 3980 int error; 3981 3982 error = 0; 3983 vfsconf_slock(); 3984 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3985 #ifdef COMPAT_FREEBSD32 3986 if (req->flags & SCTL_MASK32) 3987 error = vfsconf2x32(req, vfsp); 3988 else 3989 #endif 3990 error = vfsconf2x(req, vfsp); 3991 if (error) 3992 break; 3993 } 3994 vfsconf_sunlock(); 3995 return (error); 3996 } 3997 3998 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 3999 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4000 "S,xvfsconf", "List of all configured filesystems"); 4001 4002 #ifndef BURN_BRIDGES 4003 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4004 4005 static int 4006 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4007 { 4008 int *name = (int *)arg1 - 1; /* XXX */ 4009 u_int namelen = arg2 + 1; /* XXX */ 4010 struct vfsconf *vfsp; 4011 4012 log(LOG_WARNING, "userland calling deprecated sysctl, " 4013 "please rebuild world\n"); 4014 4015 #if 1 || defined(COMPAT_PRELITE2) 4016 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4017 if (namelen == 1) 4018 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4019 #endif 4020 4021 switch (name[1]) { 4022 case VFS_MAXTYPENUM: 4023 if (namelen != 2) 4024 return (ENOTDIR); 4025 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4026 case VFS_CONF: 4027 if (namelen != 3) 4028 return (ENOTDIR); /* overloaded */ 4029 vfsconf_slock(); 4030 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4031 if (vfsp->vfc_typenum == name[2]) 4032 break; 4033 } 4034 vfsconf_sunlock(); 4035 if (vfsp == NULL) 4036 return (EOPNOTSUPP); 4037 #ifdef COMPAT_FREEBSD32 4038 if (req->flags & SCTL_MASK32) 4039 return (vfsconf2x32(req, vfsp)); 4040 else 4041 #endif 4042 return (vfsconf2x(req, vfsp)); 4043 } 4044 return (EOPNOTSUPP); 4045 } 4046 4047 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4048 CTLFLAG_MPSAFE, vfs_sysctl, 4049 "Generic filesystem"); 4050 4051 #if 1 || defined(COMPAT_PRELITE2) 4052 4053 static int 4054 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4055 { 4056 int error; 4057 struct vfsconf *vfsp; 4058 struct ovfsconf ovfs; 4059 4060 vfsconf_slock(); 4061 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4062 bzero(&ovfs, sizeof(ovfs)); 4063 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4064 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4065 ovfs.vfc_index = vfsp->vfc_typenum; 4066 ovfs.vfc_refcount = vfsp->vfc_refcount; 4067 ovfs.vfc_flags = vfsp->vfc_flags; 4068 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4069 if (error != 0) { 4070 vfsconf_sunlock(); 4071 return (error); 4072 } 4073 } 4074 vfsconf_sunlock(); 4075 return (0); 4076 } 4077 4078 #endif /* 1 || COMPAT_PRELITE2 */ 4079 #endif /* !BURN_BRIDGES */ 4080 4081 #define KINFO_VNODESLOP 10 4082 #ifdef notyet 4083 /* 4084 * Dump vnode list (via sysctl). 4085 */ 4086 /* ARGSUSED */ 4087 static int 4088 sysctl_vnode(SYSCTL_HANDLER_ARGS) 4089 { 4090 struct xvnode *xvn; 4091 struct mount *mp; 4092 struct vnode *vp; 4093 int error, len, n; 4094 4095 /* 4096 * Stale numvnodes access is not fatal here. 4097 */ 4098 req->lock = 0; 4099 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 4100 if (!req->oldptr) 4101 /* Make an estimate */ 4102 return (SYSCTL_OUT(req, 0, len)); 4103 4104 error = sysctl_wire_old_buffer(req, 0); 4105 if (error != 0) 4106 return (error); 4107 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 4108 n = 0; 4109 mtx_lock(&mountlist_mtx); 4110 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4111 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 4112 continue; 4113 MNT_ILOCK(mp); 4114 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4115 if (n == len) 4116 break; 4117 vref(vp); 4118 xvn[n].xv_size = sizeof *xvn; 4119 xvn[n].xv_vnode = vp; 4120 xvn[n].xv_id = 0; /* XXX compat */ 4121 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 4122 XV_COPY(usecount); 4123 XV_COPY(writecount); 4124 XV_COPY(holdcnt); 4125 XV_COPY(mount); 4126 XV_COPY(numoutput); 4127 XV_COPY(type); 4128 #undef XV_COPY 4129 xvn[n].xv_flag = vp->v_vflag; 4130 4131 switch (vp->v_type) { 4132 case VREG: 4133 case VDIR: 4134 case VLNK: 4135 break; 4136 case VBLK: 4137 case VCHR: 4138 if (vp->v_rdev == NULL) { 4139 vrele(vp); 4140 continue; 4141 } 4142 xvn[n].xv_dev = dev2udev(vp->v_rdev); 4143 break; 4144 case VSOCK: 4145 xvn[n].xv_socket = vp->v_socket; 4146 break; 4147 case VFIFO: 4148 xvn[n].xv_fifo = vp->v_fifoinfo; 4149 break; 4150 case VNON: 4151 case VBAD: 4152 default: 4153 /* shouldn't happen? */ 4154 vrele(vp); 4155 continue; 4156 } 4157 vrele(vp); 4158 ++n; 4159 } 4160 MNT_IUNLOCK(mp); 4161 mtx_lock(&mountlist_mtx); 4162 vfs_unbusy(mp); 4163 if (n == len) 4164 break; 4165 } 4166 mtx_unlock(&mountlist_mtx); 4167 4168 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 4169 free(xvn, M_TEMP); 4170 return (error); 4171 } 4172 4173 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 4174 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 4175 ""); 4176 #endif 4177 4178 static void 4179 unmount_or_warn(struct mount *mp) 4180 { 4181 int error; 4182 4183 error = dounmount(mp, MNT_FORCE, curthread); 4184 if (error != 0) { 4185 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4186 if (error == EBUSY) 4187 printf("BUSY)\n"); 4188 else 4189 printf("%d)\n", error); 4190 } 4191 } 4192 4193 /* 4194 * Unmount all filesystems. The list is traversed in reverse order 4195 * of mounting to avoid dependencies. 4196 */ 4197 void 4198 vfs_unmountall(void) 4199 { 4200 struct mount *mp, *tmp; 4201 4202 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4203 4204 /* 4205 * Since this only runs when rebooting, it is not interlocked. 4206 */ 4207 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4208 vfs_ref(mp); 4209 4210 /* 4211 * Forcibly unmounting "/dev" before "/" would prevent clean 4212 * unmount of the latter. 4213 */ 4214 if (mp == rootdevmp) 4215 continue; 4216 4217 unmount_or_warn(mp); 4218 } 4219 4220 if (rootdevmp != NULL) 4221 unmount_or_warn(rootdevmp); 4222 } 4223 4224 /* 4225 * perform msync on all vnodes under a mount point 4226 * the mount point must be locked. 4227 */ 4228 void 4229 vfs_msync(struct mount *mp, int flags) 4230 { 4231 struct vnode *vp, *mvp; 4232 struct vm_object *obj; 4233 4234 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4235 4236 vnlru_return_batch(mp); 4237 4238 MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { 4239 obj = vp->v_object; 4240 if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0 && 4241 (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) { 4242 if (!vget(vp, 4243 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, 4244 curthread)) { 4245 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */ 4246 vput(vp); 4247 continue; 4248 } 4249 4250 obj = vp->v_object; 4251 if (obj != NULL) { 4252 VM_OBJECT_WLOCK(obj); 4253 vm_object_page_clean(obj, 0, 0, 4254 flags == MNT_WAIT ? 4255 OBJPC_SYNC : OBJPC_NOSYNC); 4256 VM_OBJECT_WUNLOCK(obj); 4257 } 4258 vput(vp); 4259 } 4260 } else 4261 VI_UNLOCK(vp); 4262 } 4263 } 4264 4265 static void 4266 destroy_vpollinfo_free(struct vpollinfo *vi) 4267 { 4268 4269 knlist_destroy(&vi->vpi_selinfo.si_note); 4270 mtx_destroy(&vi->vpi_lock); 4271 uma_zfree(vnodepoll_zone, vi); 4272 } 4273 4274 static void 4275 destroy_vpollinfo(struct vpollinfo *vi) 4276 { 4277 4278 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4279 seldrain(&vi->vpi_selinfo); 4280 destroy_vpollinfo_free(vi); 4281 } 4282 4283 /* 4284 * Initialize per-vnode helper structure to hold poll-related state. 4285 */ 4286 void 4287 v_addpollinfo(struct vnode *vp) 4288 { 4289 struct vpollinfo *vi; 4290 4291 if (vp->v_pollinfo != NULL) 4292 return; 4293 vi = uma_zalloc(vnodepoll_zone, M_WAITOK | M_ZERO); 4294 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4295 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4296 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 4297 VI_LOCK(vp); 4298 if (vp->v_pollinfo != NULL) { 4299 VI_UNLOCK(vp); 4300 destroy_vpollinfo_free(vi); 4301 return; 4302 } 4303 vp->v_pollinfo = vi; 4304 VI_UNLOCK(vp); 4305 } 4306 4307 /* 4308 * Record a process's interest in events which might happen to 4309 * a vnode. Because poll uses the historic select-style interface 4310 * internally, this routine serves as both the ``check for any 4311 * pending events'' and the ``record my interest in future events'' 4312 * functions. (These are done together, while the lock is held, 4313 * to avoid race conditions.) 4314 */ 4315 int 4316 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4317 { 4318 4319 v_addpollinfo(vp); 4320 mtx_lock(&vp->v_pollinfo->vpi_lock); 4321 if (vp->v_pollinfo->vpi_revents & events) { 4322 /* 4323 * This leaves events we are not interested 4324 * in available for the other process which 4325 * which presumably had requested them 4326 * (otherwise they would never have been 4327 * recorded). 4328 */ 4329 events &= vp->v_pollinfo->vpi_revents; 4330 vp->v_pollinfo->vpi_revents &= ~events; 4331 4332 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4333 return (events); 4334 } 4335 vp->v_pollinfo->vpi_events |= events; 4336 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 4337 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4338 return (0); 4339 } 4340 4341 /* 4342 * Routine to create and manage a filesystem syncer vnode. 4343 */ 4344 #define sync_close ((int (*)(struct vop_close_args *))nullop) 4345 static int sync_fsync(struct vop_fsync_args *); 4346 static int sync_inactive(struct vop_inactive_args *); 4347 static int sync_reclaim(struct vop_reclaim_args *); 4348 4349 static struct vop_vector sync_vnodeops = { 4350 .vop_bypass = VOP_EOPNOTSUPP, 4351 .vop_close = sync_close, /* close */ 4352 .vop_fsync = sync_fsync, /* fsync */ 4353 .vop_inactive = sync_inactive, /* inactive */ 4354 .vop_reclaim = sync_reclaim, /* reclaim */ 4355 .vop_lock1 = vop_stdlock, /* lock */ 4356 .vop_unlock = vop_stdunlock, /* unlock */ 4357 .vop_islocked = vop_stdislocked, /* islocked */ 4358 }; 4359 4360 /* 4361 * Create a new filesystem syncer vnode for the specified mount point. 4362 */ 4363 void 4364 vfs_allocate_syncvnode(struct mount *mp) 4365 { 4366 struct vnode *vp; 4367 struct bufobj *bo; 4368 static long start, incr, next; 4369 int error; 4370 4371 /* Allocate a new vnode */ 4372 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 4373 if (error != 0) 4374 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 4375 vp->v_type = VNON; 4376 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4377 vp->v_vflag |= VV_FORCEINSMQ; 4378 error = insmntque(vp, mp); 4379 if (error != 0) 4380 panic("vfs_allocate_syncvnode: insmntque() failed"); 4381 vp->v_vflag &= ~VV_FORCEINSMQ; 4382 VOP_UNLOCK(vp, 0); 4383 /* 4384 * Place the vnode onto the syncer worklist. We attempt to 4385 * scatter them about on the list so that they will go off 4386 * at evenly distributed times even if all the filesystems 4387 * are mounted at once. 4388 */ 4389 next += incr; 4390 if (next == 0 || next > syncer_maxdelay) { 4391 start /= 2; 4392 incr /= 2; 4393 if (start == 0) { 4394 start = syncer_maxdelay / 2; 4395 incr = syncer_maxdelay; 4396 } 4397 next = start; 4398 } 4399 bo = &vp->v_bufobj; 4400 BO_LOCK(bo); 4401 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 4402 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 4403 mtx_lock(&sync_mtx); 4404 sync_vnode_count++; 4405 if (mp->mnt_syncer == NULL) { 4406 mp->mnt_syncer = vp; 4407 vp = NULL; 4408 } 4409 mtx_unlock(&sync_mtx); 4410 BO_UNLOCK(bo); 4411 if (vp != NULL) { 4412 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4413 vgone(vp); 4414 vput(vp); 4415 } 4416 } 4417 4418 void 4419 vfs_deallocate_syncvnode(struct mount *mp) 4420 { 4421 struct vnode *vp; 4422 4423 mtx_lock(&sync_mtx); 4424 vp = mp->mnt_syncer; 4425 if (vp != NULL) 4426 mp->mnt_syncer = NULL; 4427 mtx_unlock(&sync_mtx); 4428 if (vp != NULL) 4429 vrele(vp); 4430 } 4431 4432 /* 4433 * Do a lazy sync of the filesystem. 4434 */ 4435 static int 4436 sync_fsync(struct vop_fsync_args *ap) 4437 { 4438 struct vnode *syncvp = ap->a_vp; 4439 struct mount *mp = syncvp->v_mount; 4440 int error, save; 4441 struct bufobj *bo; 4442 4443 /* 4444 * We only need to do something if this is a lazy evaluation. 4445 */ 4446 if (ap->a_waitfor != MNT_LAZY) 4447 return (0); 4448 4449 /* 4450 * Move ourselves to the back of the sync list. 4451 */ 4452 bo = &syncvp->v_bufobj; 4453 BO_LOCK(bo); 4454 vn_syncer_add_to_worklist(bo, syncdelay); 4455 BO_UNLOCK(bo); 4456 4457 /* 4458 * Walk the list of vnodes pushing all that are dirty and 4459 * not already on the sync list. 4460 */ 4461 if (vfs_busy(mp, MBF_NOWAIT) != 0) 4462 return (0); 4463 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 4464 vfs_unbusy(mp); 4465 return (0); 4466 } 4467 save = curthread_pflags_set(TDP_SYNCIO); 4468 vfs_msync(mp, MNT_NOWAIT); 4469 error = VFS_SYNC(mp, MNT_LAZY); 4470 curthread_pflags_restore(save); 4471 vn_finished_write(mp); 4472 vfs_unbusy(mp); 4473 return (error); 4474 } 4475 4476 /* 4477 * The syncer vnode is no referenced. 4478 */ 4479 static int 4480 sync_inactive(struct vop_inactive_args *ap) 4481 { 4482 4483 vgone(ap->a_vp); 4484 return (0); 4485 } 4486 4487 /* 4488 * The syncer vnode is no longer needed and is being decommissioned. 4489 * 4490 * Modifications to the worklist must be protected by sync_mtx. 4491 */ 4492 static int 4493 sync_reclaim(struct vop_reclaim_args *ap) 4494 { 4495 struct vnode *vp = ap->a_vp; 4496 struct bufobj *bo; 4497 4498 bo = &vp->v_bufobj; 4499 BO_LOCK(bo); 4500 mtx_lock(&sync_mtx); 4501 if (vp->v_mount->mnt_syncer == vp) 4502 vp->v_mount->mnt_syncer = NULL; 4503 if (bo->bo_flag & BO_ONWORKLST) { 4504 LIST_REMOVE(bo, bo_synclist); 4505 syncer_worklist_len--; 4506 sync_vnode_count--; 4507 bo->bo_flag &= ~BO_ONWORKLST; 4508 } 4509 mtx_unlock(&sync_mtx); 4510 BO_UNLOCK(bo); 4511 4512 return (0); 4513 } 4514 4515 /* 4516 * Check if vnode represents a disk device 4517 */ 4518 int 4519 vn_isdisk(struct vnode *vp, int *errp) 4520 { 4521 int error; 4522 4523 if (vp->v_type != VCHR) { 4524 error = ENOTBLK; 4525 goto out; 4526 } 4527 error = 0; 4528 dev_lock(); 4529 if (vp->v_rdev == NULL) 4530 error = ENXIO; 4531 else if (vp->v_rdev->si_devsw == NULL) 4532 error = ENXIO; 4533 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 4534 error = ENOTBLK; 4535 dev_unlock(); 4536 out: 4537 if (errp != NULL) 4538 *errp = error; 4539 return (error == 0); 4540 } 4541 4542 /* 4543 * Common filesystem object access control check routine. Accepts a 4544 * vnode's type, "mode", uid and gid, requested access mode, credentials, 4545 * and optional call-by-reference privused argument allowing vaccess() 4546 * to indicate to the caller whether privilege was used to satisfy the 4547 * request (obsoleted). Returns 0 on success, or an errno on failure. 4548 */ 4549 int 4550 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 4551 accmode_t accmode, struct ucred *cred, int *privused) 4552 { 4553 accmode_t dac_granted; 4554 accmode_t priv_granted; 4555 4556 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 4557 ("invalid bit in accmode")); 4558 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 4559 ("VAPPEND without VWRITE")); 4560 4561 /* 4562 * Look for a normal, non-privileged way to access the file/directory 4563 * as requested. If it exists, go with that. 4564 */ 4565 4566 if (privused != NULL) 4567 *privused = 0; 4568 4569 dac_granted = 0; 4570 4571 /* Check the owner. */ 4572 if (cred->cr_uid == file_uid) { 4573 dac_granted |= VADMIN; 4574 if (file_mode & S_IXUSR) 4575 dac_granted |= VEXEC; 4576 if (file_mode & S_IRUSR) 4577 dac_granted |= VREAD; 4578 if (file_mode & S_IWUSR) 4579 dac_granted |= (VWRITE | VAPPEND); 4580 4581 if ((accmode & dac_granted) == accmode) 4582 return (0); 4583 4584 goto privcheck; 4585 } 4586 4587 /* Otherwise, check the groups (first match) */ 4588 if (groupmember(file_gid, cred)) { 4589 if (file_mode & S_IXGRP) 4590 dac_granted |= VEXEC; 4591 if (file_mode & S_IRGRP) 4592 dac_granted |= VREAD; 4593 if (file_mode & S_IWGRP) 4594 dac_granted |= (VWRITE | VAPPEND); 4595 4596 if ((accmode & dac_granted) == accmode) 4597 return (0); 4598 4599 goto privcheck; 4600 } 4601 4602 /* Otherwise, check everyone else. */ 4603 if (file_mode & S_IXOTH) 4604 dac_granted |= VEXEC; 4605 if (file_mode & S_IROTH) 4606 dac_granted |= VREAD; 4607 if (file_mode & S_IWOTH) 4608 dac_granted |= (VWRITE | VAPPEND); 4609 if ((accmode & dac_granted) == accmode) 4610 return (0); 4611 4612 privcheck: 4613 /* 4614 * Build a privilege mask to determine if the set of privileges 4615 * satisfies the requirements when combined with the granted mask 4616 * from above. For each privilege, if the privilege is required, 4617 * bitwise or the request type onto the priv_granted mask. 4618 */ 4619 priv_granted = 0; 4620 4621 if (type == VDIR) { 4622 /* 4623 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 4624 * requests, instead of PRIV_VFS_EXEC. 4625 */ 4626 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4627 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 4628 priv_granted |= VEXEC; 4629 } else { 4630 /* 4631 * Ensure that at least one execute bit is on. Otherwise, 4632 * a privileged user will always succeed, and we don't want 4633 * this to happen unless the file really is executable. 4634 */ 4635 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4636 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 4637 !priv_check_cred(cred, PRIV_VFS_EXEC)) 4638 priv_granted |= VEXEC; 4639 } 4640 4641 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 4642 !priv_check_cred(cred, PRIV_VFS_READ)) 4643 priv_granted |= VREAD; 4644 4645 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 4646 !priv_check_cred(cred, PRIV_VFS_WRITE)) 4647 priv_granted |= (VWRITE | VAPPEND); 4648 4649 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 4650 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 4651 priv_granted |= VADMIN; 4652 4653 if ((accmode & (priv_granted | dac_granted)) == accmode) { 4654 /* XXX audit: privilege used */ 4655 if (privused != NULL) 4656 *privused = 1; 4657 return (0); 4658 } 4659 4660 return ((accmode & VADMIN) ? EPERM : EACCES); 4661 } 4662 4663 /* 4664 * Credential check based on process requesting service, and per-attribute 4665 * permissions. 4666 */ 4667 int 4668 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 4669 struct thread *td, accmode_t accmode) 4670 { 4671 4672 /* 4673 * Kernel-invoked always succeeds. 4674 */ 4675 if (cred == NOCRED) 4676 return (0); 4677 4678 /* 4679 * Do not allow privileged processes in jail to directly manipulate 4680 * system attributes. 4681 */ 4682 switch (attrnamespace) { 4683 case EXTATTR_NAMESPACE_SYSTEM: 4684 /* Potentially should be: return (EPERM); */ 4685 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 4686 case EXTATTR_NAMESPACE_USER: 4687 return (VOP_ACCESS(vp, accmode, cred, td)); 4688 default: 4689 return (EPERM); 4690 } 4691 } 4692 4693 #ifdef DEBUG_VFS_LOCKS 4694 /* 4695 * This only exists to suppress warnings from unlocked specfs accesses. It is 4696 * no longer ok to have an unlocked VFS. 4697 */ 4698 #define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \ 4699 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 4700 4701 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 4702 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 4703 "Drop into debugger on lock violation"); 4704 4705 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 4706 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 4707 0, "Check for interlock across VOPs"); 4708 4709 int vfs_badlock_print = 1; /* Print lock violations. */ 4710 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 4711 0, "Print lock violations"); 4712 4713 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 4714 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 4715 0, "Print vnode details on lock violations"); 4716 4717 #ifdef KDB 4718 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 4719 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 4720 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 4721 #endif 4722 4723 static void 4724 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 4725 { 4726 4727 #ifdef KDB 4728 if (vfs_badlock_backtrace) 4729 kdb_backtrace(); 4730 #endif 4731 if (vfs_badlock_vnode) 4732 vn_printf(vp, "vnode "); 4733 if (vfs_badlock_print) 4734 printf("%s: %p %s\n", str, (void *)vp, msg); 4735 if (vfs_badlock_ddb) 4736 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 4737 } 4738 4739 void 4740 assert_vi_locked(struct vnode *vp, const char *str) 4741 { 4742 4743 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 4744 vfs_badlock("interlock is not locked but should be", str, vp); 4745 } 4746 4747 void 4748 assert_vi_unlocked(struct vnode *vp, const char *str) 4749 { 4750 4751 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 4752 vfs_badlock("interlock is locked but should not be", str, vp); 4753 } 4754 4755 void 4756 assert_vop_locked(struct vnode *vp, const char *str) 4757 { 4758 int locked; 4759 4760 if (!IGNORE_LOCK(vp)) { 4761 locked = VOP_ISLOCKED(vp); 4762 if (locked == 0 || locked == LK_EXCLOTHER) 4763 vfs_badlock("is not locked but should be", str, vp); 4764 } 4765 } 4766 4767 void 4768 assert_vop_unlocked(struct vnode *vp, const char *str) 4769 { 4770 4771 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 4772 vfs_badlock("is locked but should not be", str, vp); 4773 } 4774 4775 void 4776 assert_vop_elocked(struct vnode *vp, const char *str) 4777 { 4778 4779 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 4780 vfs_badlock("is not exclusive locked but should be", str, vp); 4781 } 4782 #endif /* DEBUG_VFS_LOCKS */ 4783 4784 void 4785 vop_rename_fail(struct vop_rename_args *ap) 4786 { 4787 4788 if (ap->a_tvp != NULL) 4789 vput(ap->a_tvp); 4790 if (ap->a_tdvp == ap->a_tvp) 4791 vrele(ap->a_tdvp); 4792 else 4793 vput(ap->a_tdvp); 4794 vrele(ap->a_fdvp); 4795 vrele(ap->a_fvp); 4796 } 4797 4798 void 4799 vop_rename_pre(void *ap) 4800 { 4801 struct vop_rename_args *a = ap; 4802 4803 #ifdef DEBUG_VFS_LOCKS 4804 if (a->a_tvp) 4805 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 4806 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 4807 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 4808 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 4809 4810 /* Check the source (from). */ 4811 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 4812 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 4813 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 4814 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 4815 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 4816 4817 /* Check the target. */ 4818 if (a->a_tvp) 4819 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 4820 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 4821 #endif 4822 if (a->a_tdvp != a->a_fdvp) 4823 vhold(a->a_fdvp); 4824 if (a->a_tvp != a->a_fvp) 4825 vhold(a->a_fvp); 4826 vhold(a->a_tdvp); 4827 if (a->a_tvp) 4828 vhold(a->a_tvp); 4829 } 4830 4831 #ifdef DEBUG_VFS_LOCKS 4832 void 4833 vop_strategy_pre(void *ap) 4834 { 4835 struct vop_strategy_args *a; 4836 struct buf *bp; 4837 4838 a = ap; 4839 bp = a->a_bp; 4840 4841 /* 4842 * Cluster ops lock their component buffers but not the IO container. 4843 */ 4844 if ((bp->b_flags & B_CLUSTER) != 0) 4845 return; 4846 4847 if (panicstr == NULL && !BUF_ISLOCKED(bp)) { 4848 if (vfs_badlock_print) 4849 printf( 4850 "VOP_STRATEGY: bp is not locked but should be\n"); 4851 if (vfs_badlock_ddb) 4852 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 4853 } 4854 } 4855 4856 void 4857 vop_lock_pre(void *ap) 4858 { 4859 struct vop_lock1_args *a = ap; 4860 4861 if ((a->a_flags & LK_INTERLOCK) == 0) 4862 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 4863 else 4864 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 4865 } 4866 4867 void 4868 vop_lock_post(void *ap, int rc) 4869 { 4870 struct vop_lock1_args *a = ap; 4871 4872 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 4873 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 4874 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 4875 } 4876 4877 void 4878 vop_unlock_pre(void *ap) 4879 { 4880 struct vop_unlock_args *a = ap; 4881 4882 if (a->a_flags & LK_INTERLOCK) 4883 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK"); 4884 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 4885 } 4886 4887 void 4888 vop_unlock_post(void *ap, int rc) 4889 { 4890 struct vop_unlock_args *a = ap; 4891 4892 if (a->a_flags & LK_INTERLOCK) 4893 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK"); 4894 } 4895 #endif 4896 4897 void 4898 vop_create_post(void *ap, int rc) 4899 { 4900 struct vop_create_args *a = ap; 4901 4902 if (!rc) 4903 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4904 } 4905 4906 void 4907 vop_deleteextattr_post(void *ap, int rc) 4908 { 4909 struct vop_deleteextattr_args *a = ap; 4910 4911 if (!rc) 4912 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4913 } 4914 4915 void 4916 vop_link_post(void *ap, int rc) 4917 { 4918 struct vop_link_args *a = ap; 4919 4920 if (!rc) { 4921 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); 4922 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE); 4923 } 4924 } 4925 4926 void 4927 vop_mkdir_post(void *ap, int rc) 4928 { 4929 struct vop_mkdir_args *a = ap; 4930 4931 if (!rc) 4932 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 4933 } 4934 4935 void 4936 vop_mknod_post(void *ap, int rc) 4937 { 4938 struct vop_mknod_args *a = ap; 4939 4940 if (!rc) 4941 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4942 } 4943 4944 void 4945 vop_reclaim_post(void *ap, int rc) 4946 { 4947 struct vop_reclaim_args *a = ap; 4948 4949 if (!rc) 4950 VFS_KNOTE_LOCKED(a->a_vp, NOTE_REVOKE); 4951 } 4952 4953 void 4954 vop_remove_post(void *ap, int rc) 4955 { 4956 struct vop_remove_args *a = ap; 4957 4958 if (!rc) { 4959 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4960 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 4961 } 4962 } 4963 4964 void 4965 vop_rename_post(void *ap, int rc) 4966 { 4967 struct vop_rename_args *a = ap; 4968 long hint; 4969 4970 if (!rc) { 4971 hint = NOTE_WRITE; 4972 if (a->a_fdvp == a->a_tdvp) { 4973 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 4974 hint |= NOTE_LINK; 4975 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 4976 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 4977 } else { 4978 hint |= NOTE_EXTEND; 4979 if (a->a_fvp->v_type == VDIR) 4980 hint |= NOTE_LINK; 4981 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 4982 4983 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 4984 a->a_tvp->v_type == VDIR) 4985 hint &= ~NOTE_LINK; 4986 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 4987 } 4988 4989 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 4990 if (a->a_tvp) 4991 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 4992 } 4993 if (a->a_tdvp != a->a_fdvp) 4994 vdrop(a->a_fdvp); 4995 if (a->a_tvp != a->a_fvp) 4996 vdrop(a->a_fvp); 4997 vdrop(a->a_tdvp); 4998 if (a->a_tvp) 4999 vdrop(a->a_tvp); 5000 } 5001 5002 void 5003 vop_rmdir_post(void *ap, int rc) 5004 { 5005 struct vop_rmdir_args *a = ap; 5006 5007 if (!rc) { 5008 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 5009 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 5010 } 5011 } 5012 5013 void 5014 vop_setattr_post(void *ap, int rc) 5015 { 5016 struct vop_setattr_args *a = ap; 5017 5018 if (!rc) 5019 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5020 } 5021 5022 void 5023 vop_setextattr_post(void *ap, int rc) 5024 { 5025 struct vop_setextattr_args *a = ap; 5026 5027 if (!rc) 5028 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5029 } 5030 5031 void 5032 vop_symlink_post(void *ap, int rc) 5033 { 5034 struct vop_symlink_args *a = ap; 5035 5036 if (!rc) 5037 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5038 } 5039 5040 void 5041 vop_open_post(void *ap, int rc) 5042 { 5043 struct vop_open_args *a = ap; 5044 5045 if (!rc) 5046 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 5047 } 5048 5049 void 5050 vop_close_post(void *ap, int rc) 5051 { 5052 struct vop_close_args *a = ap; 5053 5054 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 5055 (a->a_vp->v_iflag & VI_DOOMED) == 0)) { 5056 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 5057 NOTE_CLOSE_WRITE : NOTE_CLOSE); 5058 } 5059 } 5060 5061 void 5062 vop_read_post(void *ap, int rc) 5063 { 5064 struct vop_read_args *a = ap; 5065 5066 if (!rc) 5067 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5068 } 5069 5070 void 5071 vop_readdir_post(void *ap, int rc) 5072 { 5073 struct vop_readdir_args *a = ap; 5074 5075 if (!rc) 5076 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5077 } 5078 5079 static struct knlist fs_knlist; 5080 5081 static void 5082 vfs_event_init(void *arg) 5083 { 5084 knlist_init_mtx(&fs_knlist, NULL); 5085 } 5086 /* XXX - correct order? */ 5087 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 5088 5089 void 5090 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 5091 { 5092 5093 KNOTE_UNLOCKED(&fs_knlist, event); 5094 } 5095 5096 static int filt_fsattach(struct knote *kn); 5097 static void filt_fsdetach(struct knote *kn); 5098 static int filt_fsevent(struct knote *kn, long hint); 5099 5100 struct filterops fs_filtops = { 5101 .f_isfd = 0, 5102 .f_attach = filt_fsattach, 5103 .f_detach = filt_fsdetach, 5104 .f_event = filt_fsevent 5105 }; 5106 5107 static int 5108 filt_fsattach(struct knote *kn) 5109 { 5110 5111 kn->kn_flags |= EV_CLEAR; 5112 knlist_add(&fs_knlist, kn, 0); 5113 return (0); 5114 } 5115 5116 static void 5117 filt_fsdetach(struct knote *kn) 5118 { 5119 5120 knlist_remove(&fs_knlist, kn, 0); 5121 } 5122 5123 static int 5124 filt_fsevent(struct knote *kn, long hint) 5125 { 5126 5127 kn->kn_fflags |= hint; 5128 return (kn->kn_fflags != 0); 5129 } 5130 5131 static int 5132 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 5133 { 5134 struct vfsidctl vc; 5135 int error; 5136 struct mount *mp; 5137 5138 error = SYSCTL_IN(req, &vc, sizeof(vc)); 5139 if (error) 5140 return (error); 5141 if (vc.vc_vers != VFS_CTL_VERS1) 5142 return (EINVAL); 5143 mp = vfs_getvfs(&vc.vc_fsid); 5144 if (mp == NULL) 5145 return (ENOENT); 5146 /* ensure that a specific sysctl goes to the right filesystem. */ 5147 if (strcmp(vc.vc_fstypename, "*") != 0 && 5148 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 5149 vfs_rel(mp); 5150 return (EINVAL); 5151 } 5152 VCTLTOREQ(&vc, req); 5153 error = VFS_SYSCTL(mp, vc.vc_op, req); 5154 vfs_rel(mp); 5155 return (error); 5156 } 5157 5158 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_WR, 5159 NULL, 0, sysctl_vfs_ctl, "", 5160 "Sysctl by fsid"); 5161 5162 /* 5163 * Function to initialize a va_filerev field sensibly. 5164 * XXX: Wouldn't a random number make a lot more sense ?? 5165 */ 5166 u_quad_t 5167 init_va_filerev(void) 5168 { 5169 struct bintime bt; 5170 5171 getbinuptime(&bt); 5172 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 5173 } 5174 5175 static int filt_vfsread(struct knote *kn, long hint); 5176 static int filt_vfswrite(struct knote *kn, long hint); 5177 static int filt_vfsvnode(struct knote *kn, long hint); 5178 static void filt_vfsdetach(struct knote *kn); 5179 static struct filterops vfsread_filtops = { 5180 .f_isfd = 1, 5181 .f_detach = filt_vfsdetach, 5182 .f_event = filt_vfsread 5183 }; 5184 static struct filterops vfswrite_filtops = { 5185 .f_isfd = 1, 5186 .f_detach = filt_vfsdetach, 5187 .f_event = filt_vfswrite 5188 }; 5189 static struct filterops vfsvnode_filtops = { 5190 .f_isfd = 1, 5191 .f_detach = filt_vfsdetach, 5192 .f_event = filt_vfsvnode 5193 }; 5194 5195 static void 5196 vfs_knllock(void *arg) 5197 { 5198 struct vnode *vp = arg; 5199 5200 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5201 } 5202 5203 static void 5204 vfs_knlunlock(void *arg) 5205 { 5206 struct vnode *vp = arg; 5207 5208 VOP_UNLOCK(vp, 0); 5209 } 5210 5211 static void 5212 vfs_knl_assert_locked(void *arg) 5213 { 5214 #ifdef DEBUG_VFS_LOCKS 5215 struct vnode *vp = arg; 5216 5217 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 5218 #endif 5219 } 5220 5221 static void 5222 vfs_knl_assert_unlocked(void *arg) 5223 { 5224 #ifdef DEBUG_VFS_LOCKS 5225 struct vnode *vp = arg; 5226 5227 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 5228 #endif 5229 } 5230 5231 int 5232 vfs_kqfilter(struct vop_kqfilter_args *ap) 5233 { 5234 struct vnode *vp = ap->a_vp; 5235 struct knote *kn = ap->a_kn; 5236 struct knlist *knl; 5237 5238 switch (kn->kn_filter) { 5239 case EVFILT_READ: 5240 kn->kn_fop = &vfsread_filtops; 5241 break; 5242 case EVFILT_WRITE: 5243 kn->kn_fop = &vfswrite_filtops; 5244 break; 5245 case EVFILT_VNODE: 5246 kn->kn_fop = &vfsvnode_filtops; 5247 break; 5248 default: 5249 return (EINVAL); 5250 } 5251 5252 kn->kn_hook = (caddr_t)vp; 5253 5254 v_addpollinfo(vp); 5255 if (vp->v_pollinfo == NULL) 5256 return (ENOMEM); 5257 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 5258 vhold(vp); 5259 knlist_add(knl, kn, 0); 5260 5261 return (0); 5262 } 5263 5264 /* 5265 * Detach knote from vnode 5266 */ 5267 static void 5268 filt_vfsdetach(struct knote *kn) 5269 { 5270 struct vnode *vp = (struct vnode *)kn->kn_hook; 5271 5272 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 5273 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 5274 vdrop(vp); 5275 } 5276 5277 /*ARGSUSED*/ 5278 static int 5279 filt_vfsread(struct knote *kn, long hint) 5280 { 5281 struct vnode *vp = (struct vnode *)kn->kn_hook; 5282 struct vattr va; 5283 int res; 5284 5285 /* 5286 * filesystem is gone, so set the EOF flag and schedule 5287 * the knote for deletion. 5288 */ 5289 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5290 VI_LOCK(vp); 5291 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5292 VI_UNLOCK(vp); 5293 return (1); 5294 } 5295 5296 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 5297 return (0); 5298 5299 VI_LOCK(vp); 5300 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 5301 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 5302 VI_UNLOCK(vp); 5303 return (res); 5304 } 5305 5306 /*ARGSUSED*/ 5307 static int 5308 filt_vfswrite(struct knote *kn, long hint) 5309 { 5310 struct vnode *vp = (struct vnode *)kn->kn_hook; 5311 5312 VI_LOCK(vp); 5313 5314 /* 5315 * filesystem is gone, so set the EOF flag and schedule 5316 * the knote for deletion. 5317 */ 5318 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 5319 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5320 5321 kn->kn_data = 0; 5322 VI_UNLOCK(vp); 5323 return (1); 5324 } 5325 5326 static int 5327 filt_vfsvnode(struct knote *kn, long hint) 5328 { 5329 struct vnode *vp = (struct vnode *)kn->kn_hook; 5330 int res; 5331 5332 VI_LOCK(vp); 5333 if (kn->kn_sfflags & hint) 5334 kn->kn_fflags |= hint; 5335 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5336 kn->kn_flags |= EV_EOF; 5337 VI_UNLOCK(vp); 5338 return (1); 5339 } 5340 res = (kn->kn_fflags != 0); 5341 VI_UNLOCK(vp); 5342 return (res); 5343 } 5344 5345 int 5346 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 5347 { 5348 int error; 5349 5350 if (dp->d_reclen > ap->a_uio->uio_resid) 5351 return (ENAMETOOLONG); 5352 error = uiomove(dp, dp->d_reclen, ap->a_uio); 5353 if (error) { 5354 if (ap->a_ncookies != NULL) { 5355 if (ap->a_cookies != NULL) 5356 free(ap->a_cookies, M_TEMP); 5357 ap->a_cookies = NULL; 5358 *ap->a_ncookies = 0; 5359 } 5360 return (error); 5361 } 5362 if (ap->a_ncookies == NULL) 5363 return (0); 5364 5365 KASSERT(ap->a_cookies, 5366 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 5367 5368 *ap->a_cookies = realloc(*ap->a_cookies, 5369 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 5370 (*ap->a_cookies)[*ap->a_ncookies] = off; 5371 *ap->a_ncookies += 1; 5372 return (0); 5373 } 5374 5375 /* 5376 * Mark for update the access time of the file if the filesystem 5377 * supports VOP_MARKATIME. This functionality is used by execve and 5378 * mmap, so we want to avoid the I/O implied by directly setting 5379 * va_atime for the sake of efficiency. 5380 */ 5381 void 5382 vfs_mark_atime(struct vnode *vp, struct ucred *cred) 5383 { 5384 struct mount *mp; 5385 5386 mp = vp->v_mount; 5387 ASSERT_VOP_LOCKED(vp, "vfs_mark_atime"); 5388 if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) 5389 (void)VOP_MARKATIME(vp); 5390 } 5391 5392 /* 5393 * The purpose of this routine is to remove granularity from accmode_t, 5394 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 5395 * VADMIN and VAPPEND. 5396 * 5397 * If it returns 0, the caller is supposed to continue with the usual 5398 * access checks using 'accmode' as modified by this routine. If it 5399 * returns nonzero value, the caller is supposed to return that value 5400 * as errno. 5401 * 5402 * Note that after this routine runs, accmode may be zero. 5403 */ 5404 int 5405 vfs_unixify_accmode(accmode_t *accmode) 5406 { 5407 /* 5408 * There is no way to specify explicit "deny" rule using 5409 * file mode or POSIX.1e ACLs. 5410 */ 5411 if (*accmode & VEXPLICIT_DENY) { 5412 *accmode = 0; 5413 return (0); 5414 } 5415 5416 /* 5417 * None of these can be translated into usual access bits. 5418 * Also, the common case for NFSv4 ACLs is to not contain 5419 * either of these bits. Caller should check for VWRITE 5420 * on the containing directory instead. 5421 */ 5422 if (*accmode & (VDELETE_CHILD | VDELETE)) 5423 return (EPERM); 5424 5425 if (*accmode & VADMIN_PERMS) { 5426 *accmode &= ~VADMIN_PERMS; 5427 *accmode |= VADMIN; 5428 } 5429 5430 /* 5431 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 5432 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 5433 */ 5434 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 5435 5436 return (0); 5437 } 5438 5439 /* 5440 * These are helper functions for filesystems to traverse all 5441 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 5442 * 5443 * This interface replaces MNT_VNODE_FOREACH. 5444 */ 5445 5446 MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 5447 5448 struct vnode * 5449 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 5450 { 5451 struct vnode *vp; 5452 5453 if (should_yield()) 5454 kern_yield(PRI_USER); 5455 MNT_ILOCK(mp); 5456 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5457 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 5458 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 5459 /* Allow a racy peek at VI_DOOMED to save a lock acquisition. */ 5460 if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0) 5461 continue; 5462 VI_LOCK(vp); 5463 if ((vp->v_iflag & VI_DOOMED) != 0) { 5464 VI_UNLOCK(vp); 5465 continue; 5466 } 5467 break; 5468 } 5469 if (vp == NULL) { 5470 __mnt_vnode_markerfree_all(mvp, mp); 5471 /* MNT_IUNLOCK(mp); -- done in above function */ 5472 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 5473 return (NULL); 5474 } 5475 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 5476 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 5477 MNT_IUNLOCK(mp); 5478 return (vp); 5479 } 5480 5481 struct vnode * 5482 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 5483 { 5484 struct vnode *vp; 5485 5486 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 5487 MNT_ILOCK(mp); 5488 MNT_REF(mp); 5489 (*mvp)->v_mount = mp; 5490 (*mvp)->v_type = VMARKER; 5491 5492 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 5493 /* Allow a racy peek at VI_DOOMED to save a lock acquisition. */ 5494 if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0) 5495 continue; 5496 VI_LOCK(vp); 5497 if ((vp->v_iflag & VI_DOOMED) != 0) { 5498 VI_UNLOCK(vp); 5499 continue; 5500 } 5501 break; 5502 } 5503 if (vp == NULL) { 5504 MNT_REL(mp); 5505 MNT_IUNLOCK(mp); 5506 free(*mvp, M_VNODE_MARKER); 5507 *mvp = NULL; 5508 return (NULL); 5509 } 5510 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 5511 MNT_IUNLOCK(mp); 5512 return (vp); 5513 } 5514 5515 void 5516 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 5517 { 5518 5519 if (*mvp == NULL) { 5520 MNT_IUNLOCK(mp); 5521 return; 5522 } 5523 5524 mtx_assert(MNT_MTX(mp), MA_OWNED); 5525 5526 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5527 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 5528 MNT_REL(mp); 5529 MNT_IUNLOCK(mp); 5530 free(*mvp, M_VNODE_MARKER); 5531 *mvp = NULL; 5532 } 5533 5534 /* 5535 * These are helper functions for filesystems to traverse their 5536 * active vnodes. See MNT_VNODE_FOREACH_ACTIVE() in sys/mount.h 5537 */ 5538 static void 5539 mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 5540 { 5541 5542 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5543 5544 MNT_ILOCK(mp); 5545 MNT_REL(mp); 5546 MNT_IUNLOCK(mp); 5547 free(*mvp, M_VNODE_MARKER); 5548 *mvp = NULL; 5549 } 5550 5551 /* 5552 * Relock the mp mount vnode list lock with the vp vnode interlock in the 5553 * conventional lock order during mnt_vnode_next_active iteration. 5554 * 5555 * On entry, the mount vnode list lock is held and the vnode interlock is not. 5556 * The list lock is dropped and reacquired. On success, both locks are held. 5557 * On failure, the mount vnode list lock is held but the vnode interlock is 5558 * not, and the procedure may have yielded. 5559 */ 5560 static bool 5561 mnt_vnode_next_active_relock(struct vnode *mvp, struct mount *mp, 5562 struct vnode *vp) 5563 { 5564 const struct vnode *tmp; 5565 bool held, ret; 5566 5567 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 5568 TAILQ_NEXT(mvp, v_actfreelist) != NULL, mvp, 5569 ("%s: bad marker", __func__)); 5570 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 5571 ("%s: inappropriate vnode", __func__)); 5572 ASSERT_VI_UNLOCKED(vp, __func__); 5573 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 5574 5575 ret = false; 5576 5577 TAILQ_REMOVE(&mp->mnt_activevnodelist, mvp, v_actfreelist); 5578 TAILQ_INSERT_BEFORE(vp, mvp, v_actfreelist); 5579 5580 /* 5581 * Use a hold to prevent vp from disappearing while the mount vnode 5582 * list lock is dropped and reacquired. Normally a hold would be 5583 * acquired with vhold(), but that might try to acquire the vnode 5584 * interlock, which would be a LOR with the mount vnode list lock. 5585 */ 5586 held = refcount_acquire_if_not_zero(&vp->v_holdcnt); 5587 mtx_unlock(&mp->mnt_listmtx); 5588 if (!held) 5589 goto abort; 5590 VI_LOCK(vp); 5591 if (!refcount_release_if_not_last(&vp->v_holdcnt)) { 5592 vdropl(vp); 5593 goto abort; 5594 } 5595 mtx_lock(&mp->mnt_listmtx); 5596 5597 /* 5598 * Determine whether the vnode is still the next one after the marker, 5599 * excepting any other markers. If the vnode has not been doomed by 5600 * vgone() then the hold should have ensured that it remained on the 5601 * active list. If it has been doomed but is still on the active list, 5602 * don't abort, but rather skip over it (avoid spinning on doomed 5603 * vnodes). 5604 */ 5605 tmp = mvp; 5606 do { 5607 tmp = TAILQ_NEXT(tmp, v_actfreelist); 5608 } while (tmp != NULL && tmp->v_type == VMARKER); 5609 if (tmp != vp) { 5610 mtx_unlock(&mp->mnt_listmtx); 5611 VI_UNLOCK(vp); 5612 goto abort; 5613 } 5614 5615 ret = true; 5616 goto out; 5617 abort: 5618 maybe_yield(); 5619 mtx_lock(&mp->mnt_listmtx); 5620 out: 5621 if (ret) 5622 ASSERT_VI_LOCKED(vp, __func__); 5623 else 5624 ASSERT_VI_UNLOCKED(vp, __func__); 5625 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 5626 return (ret); 5627 } 5628 5629 static struct vnode * 5630 mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 5631 { 5632 struct vnode *vp, *nvp; 5633 5634 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 5635 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5636 restart: 5637 vp = TAILQ_NEXT(*mvp, v_actfreelist); 5638 while (vp != NULL) { 5639 if (vp->v_type == VMARKER) { 5640 vp = TAILQ_NEXT(vp, v_actfreelist); 5641 continue; 5642 } 5643 /* 5644 * Try-lock because this is the wrong lock order. If that does 5645 * not succeed, drop the mount vnode list lock and try to 5646 * reacquire it and the vnode interlock in the right order. 5647 */ 5648 if (!VI_TRYLOCK(vp) && 5649 !mnt_vnode_next_active_relock(*mvp, mp, vp)) 5650 goto restart; 5651 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 5652 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 5653 ("alien vnode on the active list %p %p", vp, mp)); 5654 if (vp->v_mount == mp && (vp->v_iflag & VI_DOOMED) == 0) 5655 break; 5656 nvp = TAILQ_NEXT(vp, v_actfreelist); 5657 VI_UNLOCK(vp); 5658 vp = nvp; 5659 } 5660 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 5661 5662 /* Check if we are done */ 5663 if (vp == NULL) { 5664 mtx_unlock(&mp->mnt_listmtx); 5665 mnt_vnode_markerfree_active(mvp, mp); 5666 return (NULL); 5667 } 5668 TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist); 5669 mtx_unlock(&mp->mnt_listmtx); 5670 ASSERT_VI_LOCKED(vp, "active iter"); 5671 KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp)); 5672 return (vp); 5673 } 5674 5675 struct vnode * 5676 __mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 5677 { 5678 5679 if (should_yield()) 5680 kern_yield(PRI_USER); 5681 mtx_lock(&mp->mnt_listmtx); 5682 return (mnt_vnode_next_active(mvp, mp)); 5683 } 5684 5685 struct vnode * 5686 __mnt_vnode_first_active(struct vnode **mvp, struct mount *mp) 5687 { 5688 struct vnode *vp; 5689 5690 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 5691 MNT_ILOCK(mp); 5692 MNT_REF(mp); 5693 MNT_IUNLOCK(mp); 5694 (*mvp)->v_type = VMARKER; 5695 (*mvp)->v_mount = mp; 5696 5697 mtx_lock(&mp->mnt_listmtx); 5698 vp = TAILQ_FIRST(&mp->mnt_activevnodelist); 5699 if (vp == NULL) { 5700 mtx_unlock(&mp->mnt_listmtx); 5701 mnt_vnode_markerfree_active(mvp, mp); 5702 return (NULL); 5703 } 5704 TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist); 5705 return (mnt_vnode_next_active(mvp, mp)); 5706 } 5707 5708 void 5709 __mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 5710 { 5711 5712 if (*mvp == NULL) 5713 return; 5714 5715 mtx_lock(&mp->mnt_listmtx); 5716 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 5717 mtx_unlock(&mp->mnt_listmtx); 5718 mnt_vnode_markerfree_active(mvp, mp); 5719 } 5720