1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/capsicum.h> 54 #include <sys/condvar.h> 55 #include <sys/conf.h> 56 #include <sys/counter.h> 57 #include <sys/dirent.h> 58 #include <sys/event.h> 59 #include <sys/eventhandler.h> 60 #include <sys/extattr.h> 61 #include <sys/file.h> 62 #include <sys/fcntl.h> 63 #include <sys/jail.h> 64 #include <sys/kdb.h> 65 #include <sys/kernel.h> 66 #include <sys/kthread.h> 67 #include <sys/ktr.h> 68 #include <sys/lockf.h> 69 #include <sys/malloc.h> 70 #include <sys/mount.h> 71 #include <sys/namei.h> 72 #include <sys/pctrie.h> 73 #include <sys/priv.h> 74 #include <sys/reboot.h> 75 #include <sys/refcount.h> 76 #include <sys/rwlock.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smp.h> 80 #include <sys/stat.h> 81 #include <sys/sysctl.h> 82 #include <sys/syslog.h> 83 #include <sys/vmmeter.h> 84 #include <sys/vnode.h> 85 #include <sys/watchdog.h> 86 87 #include <machine/stdarg.h> 88 89 #include <security/mac/mac_framework.h> 90 91 #include <vm/vm.h> 92 #include <vm/vm_object.h> 93 #include <vm/vm_extern.h> 94 #include <vm/pmap.h> 95 #include <vm/vm_map.h> 96 #include <vm/vm_page.h> 97 #include <vm/vm_kern.h> 98 #include <vm/uma.h> 99 100 #ifdef DDB 101 #include <ddb/ddb.h> 102 #endif 103 104 static void delmntque(struct vnode *vp); 105 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 106 int slpflag, int slptimeo); 107 static void syncer_shutdown(void *arg, int howto); 108 static int vtryrecycle(struct vnode *vp); 109 static void v_init_counters(struct vnode *); 110 static void v_incr_devcount(struct vnode *); 111 static void v_decr_devcount(struct vnode *); 112 static void vgonel(struct vnode *); 113 static void vfs_knllock(void *arg); 114 static void vfs_knlunlock(void *arg); 115 static void vfs_knl_assert_locked(void *arg); 116 static void vfs_knl_assert_unlocked(void *arg); 117 static void destroy_vpollinfo(struct vpollinfo *vi); 118 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 119 daddr_t startlbn, daddr_t endlbn); 120 static void vnlru_recalc(void); 121 122 /* 123 * These fences are intended for cases where some synchronization is 124 * needed between access of v_iflags and lockless vnode refcount (v_holdcnt 125 * and v_usecount) updates. Access to v_iflags is generally synchronized 126 * by the interlock, but we have some internal assertions that check vnode 127 * flags without acquiring the lock. Thus, these fences are INVARIANTS-only 128 * for now. 129 */ 130 #ifdef INVARIANTS 131 #define VNODE_REFCOUNT_FENCE_ACQ() atomic_thread_fence_acq() 132 #define VNODE_REFCOUNT_FENCE_REL() atomic_thread_fence_rel() 133 #else 134 #define VNODE_REFCOUNT_FENCE_ACQ() 135 #define VNODE_REFCOUNT_FENCE_REL() 136 #endif 137 138 /* 139 * Number of vnodes in existence. Increased whenever getnewvnode() 140 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 141 */ 142 static u_long __exclusive_cache_line numvnodes; 143 144 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 145 "Number of vnodes in existence"); 146 147 static counter_u64_t vnodes_created; 148 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 149 "Number of vnodes created by getnewvnode"); 150 151 /* 152 * Conversion tables for conversion from vnode types to inode formats 153 * and back. 154 */ 155 enum vtype iftovt_tab[16] = { 156 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 157 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 158 }; 159 int vttoif_tab[10] = { 160 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 161 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 162 }; 163 164 /* 165 * List of allocates vnodes in the system. 166 */ 167 static TAILQ_HEAD(freelst, vnode) vnode_list; 168 static struct vnode *vnode_list_free_marker; 169 static struct vnode *vnode_list_reclaim_marker; 170 171 /* 172 * "Free" vnode target. Free vnodes are rarely completely free, but are 173 * just ones that are cheap to recycle. Usually they are for files which 174 * have been stat'd but not read; these usually have inode and namecache 175 * data attached to them. This target is the preferred minimum size of a 176 * sub-cache consisting mostly of such files. The system balances the size 177 * of this sub-cache with its complement to try to prevent either from 178 * thrashing while the other is relatively inactive. The targets express 179 * a preference for the best balance. 180 * 181 * "Above" this target there are 2 further targets (watermarks) related 182 * to recyling of free vnodes. In the best-operating case, the cache is 183 * exactly full, the free list has size between vlowat and vhiwat above the 184 * free target, and recycling from it and normal use maintains this state. 185 * Sometimes the free list is below vlowat or even empty, but this state 186 * is even better for immediate use provided the cache is not full. 187 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 188 * ones) to reach one of these states. The watermarks are currently hard- 189 * coded as 4% and 9% of the available space higher. These and the default 190 * of 25% for wantfreevnodes are too large if the memory size is large. 191 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 192 * whenever vnlru_proc() becomes active. 193 */ 194 static long wantfreevnodes; 195 static long __exclusive_cache_line freevnodes; 196 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 197 &freevnodes, 0, "Number of \"free\" vnodes"); 198 static long freevnodes_old; 199 200 static counter_u64_t recycles_count; 201 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 202 "Number of vnodes recycled to meet vnode cache targets"); 203 204 static counter_u64_t recycles_free_count; 205 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 206 "Number of free vnodes recycled to meet vnode cache targets"); 207 208 /* 209 * Various variables used for debugging the new implementation of 210 * reassignbuf(). 211 * XXX these are probably of (very) limited utility now. 212 */ 213 static int reassignbufcalls; 214 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW | CTLFLAG_STATS, 215 &reassignbufcalls, 0, "Number of calls to reassignbuf"); 216 217 static counter_u64_t deferred_inact; 218 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, &deferred_inact, 219 "Number of times inactive processing was deferred"); 220 221 /* To keep more than one thread at a time from running vfs_getnewfsid */ 222 static struct mtx mntid_mtx; 223 224 /* 225 * Lock for any access to the following: 226 * vnode_list 227 * numvnodes 228 * freevnodes 229 */ 230 static struct mtx __exclusive_cache_line vnode_list_mtx; 231 232 /* Publicly exported FS */ 233 struct nfs_public nfs_pub; 234 235 static uma_zone_t buf_trie_zone; 236 237 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 238 static uma_zone_t vnode_zone; 239 static uma_zone_t vnodepoll_zone; 240 241 /* 242 * The workitem queue. 243 * 244 * It is useful to delay writes of file data and filesystem metadata 245 * for tens of seconds so that quickly created and deleted files need 246 * not waste disk bandwidth being created and removed. To realize this, 247 * we append vnodes to a "workitem" queue. When running with a soft 248 * updates implementation, most pending metadata dependencies should 249 * not wait for more than a few seconds. Thus, mounted on block devices 250 * are delayed only about a half the time that file data is delayed. 251 * Similarly, directory updates are more critical, so are only delayed 252 * about a third the time that file data is delayed. Thus, there are 253 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 254 * one each second (driven off the filesystem syncer process). The 255 * syncer_delayno variable indicates the next queue that is to be processed. 256 * Items that need to be processed soon are placed in this queue: 257 * 258 * syncer_workitem_pending[syncer_delayno] 259 * 260 * A delay of fifteen seconds is done by placing the request fifteen 261 * entries later in the queue: 262 * 263 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 264 * 265 */ 266 static int syncer_delayno; 267 static long syncer_mask; 268 LIST_HEAD(synclist, bufobj); 269 static struct synclist *syncer_workitem_pending; 270 /* 271 * The sync_mtx protects: 272 * bo->bo_synclist 273 * sync_vnode_count 274 * syncer_delayno 275 * syncer_state 276 * syncer_workitem_pending 277 * syncer_worklist_len 278 * rushjob 279 */ 280 static struct mtx sync_mtx; 281 static struct cv sync_wakeup; 282 283 #define SYNCER_MAXDELAY 32 284 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 285 static int syncdelay = 30; /* max time to delay syncing data */ 286 static int filedelay = 30; /* time to delay syncing files */ 287 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 288 "Time to delay syncing files (in seconds)"); 289 static int dirdelay = 29; /* time to delay syncing directories */ 290 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 291 "Time to delay syncing directories (in seconds)"); 292 static int metadelay = 28; /* time to delay syncing metadata */ 293 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 294 "Time to delay syncing metadata (in seconds)"); 295 static int rushjob; /* number of slots to run ASAP */ 296 static int stat_rush_requests; /* number of times I/O speeded up */ 297 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 298 "Number of times I/O speeded up (rush requests)"); 299 300 #define VDBATCH_SIZE 8 301 struct vdbatch { 302 u_int index; 303 long freevnodes; 304 struct mtx lock; 305 struct vnode *tab[VDBATCH_SIZE]; 306 }; 307 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 308 309 static void vdbatch_dequeue(struct vnode *vp); 310 311 /* 312 * When shutting down the syncer, run it at four times normal speed. 313 */ 314 #define SYNCER_SHUTDOWN_SPEEDUP 4 315 static int sync_vnode_count; 316 static int syncer_worklist_len; 317 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 318 syncer_state; 319 320 /* Target for maximum number of vnodes. */ 321 u_long desiredvnodes; 322 static u_long gapvnodes; /* gap between wanted and desired */ 323 static u_long vhiwat; /* enough extras after expansion */ 324 static u_long vlowat; /* minimal extras before expansion */ 325 static u_long vstir; /* nonzero to stir non-free vnodes */ 326 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 327 328 static u_long vnlru_read_freevnodes(void); 329 330 /* 331 * Note that no attempt is made to sanitize these parameters. 332 */ 333 static int 334 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 335 { 336 u_long val; 337 int error; 338 339 val = desiredvnodes; 340 error = sysctl_handle_long(oidp, &val, 0, req); 341 if (error != 0 || req->newptr == NULL) 342 return (error); 343 344 if (val == desiredvnodes) 345 return (0); 346 mtx_lock(&vnode_list_mtx); 347 desiredvnodes = val; 348 wantfreevnodes = desiredvnodes / 4; 349 vnlru_recalc(); 350 mtx_unlock(&vnode_list_mtx); 351 /* 352 * XXX There is no protection against multiple threads changing 353 * desiredvnodes at the same time. Locking above only helps vnlru and 354 * getnewvnode. 355 */ 356 vfs_hash_changesize(desiredvnodes); 357 cache_changesize(desiredvnodes); 358 return (0); 359 } 360 361 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 362 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 363 "UL", "Target for maximum number of vnodes"); 364 365 static int 366 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 367 { 368 u_long val; 369 int error; 370 371 val = wantfreevnodes; 372 error = sysctl_handle_long(oidp, &val, 0, req); 373 if (error != 0 || req->newptr == NULL) 374 return (error); 375 376 if (val == wantfreevnodes) 377 return (0); 378 mtx_lock(&vnode_list_mtx); 379 wantfreevnodes = val; 380 vnlru_recalc(); 381 mtx_unlock(&vnode_list_mtx); 382 return (0); 383 } 384 385 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 386 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 387 "UL", "Target for minimum number of \"free\" vnodes"); 388 389 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 390 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 391 static int vnlru_nowhere; 392 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 393 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 394 395 static int 396 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 397 { 398 struct vnode *vp; 399 struct nameidata nd; 400 char *buf; 401 unsigned long ndflags; 402 int error; 403 404 if (req->newptr == NULL) 405 return (EINVAL); 406 if (req->newlen >= PATH_MAX) 407 return (E2BIG); 408 409 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 410 error = SYSCTL_IN(req, buf, req->newlen); 411 if (error != 0) 412 goto out; 413 414 buf[req->newlen] = '\0'; 415 416 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1 | NOCACHE | SAVENAME; 417 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf, curthread); 418 if ((error = namei(&nd)) != 0) 419 goto out; 420 vp = nd.ni_vp; 421 422 if (VN_IS_DOOMED(vp)) { 423 /* 424 * This vnode is being recycled. Return != 0 to let the caller 425 * know that the sysctl had no effect. Return EAGAIN because a 426 * subsequent call will likely succeed (since namei will create 427 * a new vnode if necessary) 428 */ 429 error = EAGAIN; 430 goto putvnode; 431 } 432 433 counter_u64_add(recycles_count, 1); 434 vgone(vp); 435 putvnode: 436 NDFREE(&nd, 0); 437 out: 438 free(buf, M_TEMP); 439 return (error); 440 } 441 442 static int 443 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 444 { 445 struct thread *td = curthread; 446 struct vnode *vp; 447 struct file *fp; 448 int error; 449 int fd; 450 451 if (req->newptr == NULL) 452 return (EBADF); 453 454 error = sysctl_handle_int(oidp, &fd, 0, req); 455 if (error != 0) 456 return (error); 457 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 458 if (error != 0) 459 return (error); 460 vp = fp->f_vnode; 461 462 error = vn_lock(vp, LK_EXCLUSIVE); 463 if (error != 0) 464 goto drop; 465 466 counter_u64_add(recycles_count, 1); 467 vgone(vp); 468 VOP_UNLOCK(vp); 469 drop: 470 fdrop(fp, td); 471 return (error); 472 } 473 474 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 475 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 476 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 477 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 478 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 479 sysctl_ftry_reclaim_vnode, "I", 480 "Try to reclaim a vnode by its file descriptor"); 481 482 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 483 static int vnsz2log; 484 485 /* 486 * Support for the bufobj clean & dirty pctrie. 487 */ 488 static void * 489 buf_trie_alloc(struct pctrie *ptree) 490 { 491 492 return uma_zalloc(buf_trie_zone, M_NOWAIT); 493 } 494 495 static void 496 buf_trie_free(struct pctrie *ptree, void *node) 497 { 498 499 uma_zfree(buf_trie_zone, node); 500 } 501 PCTRIE_DEFINE(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free); 502 503 /* 504 * Initialize the vnode management data structures. 505 * 506 * Reevaluate the following cap on the number of vnodes after the physical 507 * memory size exceeds 512GB. In the limit, as the physical memory size 508 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 509 */ 510 #ifndef MAXVNODES_MAX 511 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 512 #endif 513 514 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 515 516 static struct vnode * 517 vn_alloc_marker(struct mount *mp) 518 { 519 struct vnode *vp; 520 521 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 522 vp->v_type = VMARKER; 523 vp->v_mount = mp; 524 525 return (vp); 526 } 527 528 static void 529 vn_free_marker(struct vnode *vp) 530 { 531 532 MPASS(vp->v_type == VMARKER); 533 free(vp, M_VNODE_MARKER); 534 } 535 536 /* 537 * Initialize a vnode as it first enters the zone. 538 */ 539 static int 540 vnode_init(void *mem, int size, int flags) 541 { 542 struct vnode *vp; 543 544 vp = mem; 545 bzero(vp, size); 546 /* 547 * Setup locks. 548 */ 549 vp->v_vnlock = &vp->v_lock; 550 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 551 /* 552 * By default, don't allow shared locks unless filesystems opt-in. 553 */ 554 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 555 LK_NOSHARE | LK_IS_VNODE); 556 /* 557 * Initialize bufobj. 558 */ 559 bufobj_init(&vp->v_bufobj, vp); 560 /* 561 * Initialize namecache. 562 */ 563 LIST_INIT(&vp->v_cache_src); 564 TAILQ_INIT(&vp->v_cache_dst); 565 /* 566 * Initialize rangelocks. 567 */ 568 rangelock_init(&vp->v_rl); 569 570 vp->v_dbatchcpu = NOCPU; 571 572 mtx_lock(&vnode_list_mtx); 573 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 574 mtx_unlock(&vnode_list_mtx); 575 return (0); 576 } 577 578 /* 579 * Free a vnode when it is cleared from the zone. 580 */ 581 static void 582 vnode_fini(void *mem, int size) 583 { 584 struct vnode *vp; 585 struct bufobj *bo; 586 587 vp = mem; 588 vdbatch_dequeue(vp); 589 mtx_lock(&vnode_list_mtx); 590 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 591 mtx_unlock(&vnode_list_mtx); 592 rangelock_destroy(&vp->v_rl); 593 lockdestroy(vp->v_vnlock); 594 mtx_destroy(&vp->v_interlock); 595 bo = &vp->v_bufobj; 596 rw_destroy(BO_LOCKPTR(bo)); 597 } 598 599 /* 600 * Provide the size of NFS nclnode and NFS fh for calculation of the 601 * vnode memory consumption. The size is specified directly to 602 * eliminate dependency on NFS-private header. 603 * 604 * Other filesystems may use bigger or smaller (like UFS and ZFS) 605 * private inode data, but the NFS-based estimation is ample enough. 606 * Still, we care about differences in the size between 64- and 32-bit 607 * platforms. 608 * 609 * Namecache structure size is heuristically 610 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 611 */ 612 #ifdef _LP64 613 #define NFS_NCLNODE_SZ (528 + 64) 614 #define NC_SZ 148 615 #else 616 #define NFS_NCLNODE_SZ (360 + 32) 617 #define NC_SZ 92 618 #endif 619 620 static void 621 vntblinit(void *dummy __unused) 622 { 623 struct vdbatch *vd; 624 int cpu, physvnodes, virtvnodes; 625 u_int i; 626 627 /* 628 * Desiredvnodes is a function of the physical memory size and the 629 * kernel's heap size. Generally speaking, it scales with the 630 * physical memory size. The ratio of desiredvnodes to the physical 631 * memory size is 1:16 until desiredvnodes exceeds 98,304. 632 * Thereafter, the 633 * marginal ratio of desiredvnodes to the physical memory size is 634 * 1:64. However, desiredvnodes is limited by the kernel's heap 635 * size. The memory required by desiredvnodes vnodes and vm objects 636 * must not exceed 1/10th of the kernel's heap size. 637 */ 638 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 639 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 640 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 641 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 642 desiredvnodes = min(physvnodes, virtvnodes); 643 if (desiredvnodes > MAXVNODES_MAX) { 644 if (bootverbose) 645 printf("Reducing kern.maxvnodes %lu -> %lu\n", 646 desiredvnodes, MAXVNODES_MAX); 647 desiredvnodes = MAXVNODES_MAX; 648 } 649 wantfreevnodes = desiredvnodes / 4; 650 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 651 TAILQ_INIT(&vnode_list); 652 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 653 /* 654 * The lock is taken to appease WITNESS. 655 */ 656 mtx_lock(&vnode_list_mtx); 657 vnlru_recalc(); 658 mtx_unlock(&vnode_list_mtx); 659 vnode_list_free_marker = vn_alloc_marker(NULL); 660 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 661 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 662 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 663 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 664 vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); 665 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 666 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 667 /* 668 * Preallocate enough nodes to support one-per buf so that 669 * we can not fail an insert. reassignbuf() callers can not 670 * tolerate the insertion failure. 671 */ 672 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 673 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 674 UMA_ZONE_NOFREE | UMA_ZONE_VM); 675 uma_prealloc(buf_trie_zone, nbuf); 676 677 vnodes_created = counter_u64_alloc(M_WAITOK); 678 recycles_count = counter_u64_alloc(M_WAITOK); 679 recycles_free_count = counter_u64_alloc(M_WAITOK); 680 deferred_inact = counter_u64_alloc(M_WAITOK); 681 682 /* 683 * Initialize the filesystem syncer. 684 */ 685 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 686 &syncer_mask); 687 syncer_maxdelay = syncer_mask + 1; 688 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 689 cv_init(&sync_wakeup, "syncer"); 690 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 691 vnsz2log++; 692 vnsz2log--; 693 694 CPU_FOREACH(cpu) { 695 vd = DPCPU_ID_PTR((cpu), vd); 696 bzero(vd, sizeof(*vd)); 697 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 698 } 699 } 700 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 701 702 703 /* 704 * Mark a mount point as busy. Used to synchronize access and to delay 705 * unmounting. Eventually, mountlist_mtx is not released on failure. 706 * 707 * vfs_busy() is a custom lock, it can block the caller. 708 * vfs_busy() only sleeps if the unmount is active on the mount point. 709 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 710 * vnode belonging to mp. 711 * 712 * Lookup uses vfs_busy() to traverse mount points. 713 * root fs var fs 714 * / vnode lock A / vnode lock (/var) D 715 * /var vnode lock B /log vnode lock(/var/log) E 716 * vfs_busy lock C vfs_busy lock F 717 * 718 * Within each file system, the lock order is C->A->B and F->D->E. 719 * 720 * When traversing across mounts, the system follows that lock order: 721 * 722 * C->A->B 723 * | 724 * +->F->D->E 725 * 726 * The lookup() process for namei("/var") illustrates the process: 727 * VOP_LOOKUP() obtains B while A is held 728 * vfs_busy() obtains a shared lock on F while A and B are held 729 * vput() releases lock on B 730 * vput() releases lock on A 731 * VFS_ROOT() obtains lock on D while shared lock on F is held 732 * vfs_unbusy() releases shared lock on F 733 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 734 * Attempt to lock A (instead of vp_crossmp) while D is held would 735 * violate the global order, causing deadlocks. 736 * 737 * dounmount() locks B while F is drained. 738 */ 739 int 740 vfs_busy(struct mount *mp, int flags) 741 { 742 743 MPASS((flags & ~MBF_MASK) == 0); 744 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 745 746 if (vfs_op_thread_enter(mp)) { 747 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 748 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 749 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 750 vfs_mp_count_add_pcpu(mp, ref, 1); 751 vfs_mp_count_add_pcpu(mp, lockref, 1); 752 vfs_op_thread_exit(mp); 753 if (flags & MBF_MNTLSTLOCK) 754 mtx_unlock(&mountlist_mtx); 755 return (0); 756 } 757 758 MNT_ILOCK(mp); 759 vfs_assert_mount_counters(mp); 760 MNT_REF(mp); 761 /* 762 * If mount point is currently being unmounted, sleep until the 763 * mount point fate is decided. If thread doing the unmounting fails, 764 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 765 * that this mount point has survived the unmount attempt and vfs_busy 766 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 767 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 768 * about to be really destroyed. vfs_busy needs to release its 769 * reference on the mount point in this case and return with ENOENT, 770 * telling the caller that mount mount it tried to busy is no longer 771 * valid. 772 */ 773 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 774 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 775 MNT_REL(mp); 776 MNT_IUNLOCK(mp); 777 CTR1(KTR_VFS, "%s: failed busying before sleeping", 778 __func__); 779 return (ENOENT); 780 } 781 if (flags & MBF_MNTLSTLOCK) 782 mtx_unlock(&mountlist_mtx); 783 mp->mnt_kern_flag |= MNTK_MWAIT; 784 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 785 if (flags & MBF_MNTLSTLOCK) 786 mtx_lock(&mountlist_mtx); 787 MNT_ILOCK(mp); 788 } 789 if (flags & MBF_MNTLSTLOCK) 790 mtx_unlock(&mountlist_mtx); 791 mp->mnt_lockref++; 792 MNT_IUNLOCK(mp); 793 return (0); 794 } 795 796 /* 797 * Free a busy filesystem. 798 */ 799 void 800 vfs_unbusy(struct mount *mp) 801 { 802 int c; 803 804 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 805 806 if (vfs_op_thread_enter(mp)) { 807 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 808 vfs_mp_count_sub_pcpu(mp, lockref, 1); 809 vfs_mp_count_sub_pcpu(mp, ref, 1); 810 vfs_op_thread_exit(mp); 811 return; 812 } 813 814 MNT_ILOCK(mp); 815 vfs_assert_mount_counters(mp); 816 MNT_REL(mp); 817 c = --mp->mnt_lockref; 818 if (mp->mnt_vfs_ops == 0) { 819 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 820 MNT_IUNLOCK(mp); 821 return; 822 } 823 if (c < 0) 824 vfs_dump_mount_counters(mp); 825 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 826 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 827 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 828 mp->mnt_kern_flag &= ~MNTK_DRAINING; 829 wakeup(&mp->mnt_lockref); 830 } 831 MNT_IUNLOCK(mp); 832 } 833 834 /* 835 * Lookup a mount point by filesystem identifier. 836 */ 837 struct mount * 838 vfs_getvfs(fsid_t *fsid) 839 { 840 struct mount *mp; 841 842 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 843 mtx_lock(&mountlist_mtx); 844 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 845 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 846 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 847 vfs_ref(mp); 848 mtx_unlock(&mountlist_mtx); 849 return (mp); 850 } 851 } 852 mtx_unlock(&mountlist_mtx); 853 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 854 return ((struct mount *) 0); 855 } 856 857 /* 858 * Lookup a mount point by filesystem identifier, busying it before 859 * returning. 860 * 861 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 862 * cache for popular filesystem identifiers. The cache is lockess, using 863 * the fact that struct mount's are never freed. In worst case we may 864 * get pointer to unmounted or even different filesystem, so we have to 865 * check what we got, and go slow way if so. 866 */ 867 struct mount * 868 vfs_busyfs(fsid_t *fsid) 869 { 870 #define FSID_CACHE_SIZE 256 871 typedef struct mount * volatile vmp_t; 872 static vmp_t cache[FSID_CACHE_SIZE]; 873 struct mount *mp; 874 int error; 875 uint32_t hash; 876 877 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 878 hash = fsid->val[0] ^ fsid->val[1]; 879 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 880 mp = cache[hash]; 881 if (mp == NULL || 882 mp->mnt_stat.f_fsid.val[0] != fsid->val[0] || 883 mp->mnt_stat.f_fsid.val[1] != fsid->val[1]) 884 goto slow; 885 if (vfs_busy(mp, 0) != 0) { 886 cache[hash] = NULL; 887 goto slow; 888 } 889 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 890 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) 891 return (mp); 892 else 893 vfs_unbusy(mp); 894 895 slow: 896 mtx_lock(&mountlist_mtx); 897 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 898 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 899 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 900 error = vfs_busy(mp, MBF_MNTLSTLOCK); 901 if (error) { 902 cache[hash] = NULL; 903 mtx_unlock(&mountlist_mtx); 904 return (NULL); 905 } 906 cache[hash] = mp; 907 return (mp); 908 } 909 } 910 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 911 mtx_unlock(&mountlist_mtx); 912 return ((struct mount *) 0); 913 } 914 915 /* 916 * Check if a user can access privileged mount options. 917 */ 918 int 919 vfs_suser(struct mount *mp, struct thread *td) 920 { 921 int error; 922 923 if (jailed(td->td_ucred)) { 924 /* 925 * If the jail of the calling thread lacks permission for 926 * this type of file system, deny immediately. 927 */ 928 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 929 return (EPERM); 930 931 /* 932 * If the file system was mounted outside the jail of the 933 * calling thread, deny immediately. 934 */ 935 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 936 return (EPERM); 937 } 938 939 /* 940 * If file system supports delegated administration, we don't check 941 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 942 * by the file system itself. 943 * If this is not the user that did original mount, we check for 944 * the PRIV_VFS_MOUNT_OWNER privilege. 945 */ 946 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 947 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 948 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 949 return (error); 950 } 951 return (0); 952 } 953 954 /* 955 * Get a new unique fsid. Try to make its val[0] unique, since this value 956 * will be used to create fake device numbers for stat(). Also try (but 957 * not so hard) make its val[0] unique mod 2^16, since some emulators only 958 * support 16-bit device numbers. We end up with unique val[0]'s for the 959 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 960 * 961 * Keep in mind that several mounts may be running in parallel. Starting 962 * the search one past where the previous search terminated is both a 963 * micro-optimization and a defense against returning the same fsid to 964 * different mounts. 965 */ 966 void 967 vfs_getnewfsid(struct mount *mp) 968 { 969 static uint16_t mntid_base; 970 struct mount *nmp; 971 fsid_t tfsid; 972 int mtype; 973 974 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 975 mtx_lock(&mntid_mtx); 976 mtype = mp->mnt_vfc->vfc_typenum; 977 tfsid.val[1] = mtype; 978 mtype = (mtype & 0xFF) << 24; 979 for (;;) { 980 tfsid.val[0] = makedev(255, 981 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 982 mntid_base++; 983 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 984 break; 985 vfs_rel(nmp); 986 } 987 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 988 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 989 mtx_unlock(&mntid_mtx); 990 } 991 992 /* 993 * Knob to control the precision of file timestamps: 994 * 995 * 0 = seconds only; nanoseconds zeroed. 996 * 1 = seconds and nanoseconds, accurate within 1/HZ. 997 * 2 = seconds and nanoseconds, truncated to microseconds. 998 * >=3 = seconds and nanoseconds, maximum precision. 999 */ 1000 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 1001 1002 static int timestamp_precision = TSP_USEC; 1003 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 1004 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 1005 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 1006 "3+: sec + ns (max. precision))"); 1007 1008 /* 1009 * Get a current timestamp. 1010 */ 1011 void 1012 vfs_timestamp(struct timespec *tsp) 1013 { 1014 struct timeval tv; 1015 1016 switch (timestamp_precision) { 1017 case TSP_SEC: 1018 tsp->tv_sec = time_second; 1019 tsp->tv_nsec = 0; 1020 break; 1021 case TSP_HZ: 1022 getnanotime(tsp); 1023 break; 1024 case TSP_USEC: 1025 microtime(&tv); 1026 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1027 break; 1028 case TSP_NSEC: 1029 default: 1030 nanotime(tsp); 1031 break; 1032 } 1033 } 1034 1035 /* 1036 * Set vnode attributes to VNOVAL 1037 */ 1038 void 1039 vattr_null(struct vattr *vap) 1040 { 1041 1042 vap->va_type = VNON; 1043 vap->va_size = VNOVAL; 1044 vap->va_bytes = VNOVAL; 1045 vap->va_mode = VNOVAL; 1046 vap->va_nlink = VNOVAL; 1047 vap->va_uid = VNOVAL; 1048 vap->va_gid = VNOVAL; 1049 vap->va_fsid = VNOVAL; 1050 vap->va_fileid = VNOVAL; 1051 vap->va_blocksize = VNOVAL; 1052 vap->va_rdev = VNOVAL; 1053 vap->va_atime.tv_sec = VNOVAL; 1054 vap->va_atime.tv_nsec = VNOVAL; 1055 vap->va_mtime.tv_sec = VNOVAL; 1056 vap->va_mtime.tv_nsec = VNOVAL; 1057 vap->va_ctime.tv_sec = VNOVAL; 1058 vap->va_ctime.tv_nsec = VNOVAL; 1059 vap->va_birthtime.tv_sec = VNOVAL; 1060 vap->va_birthtime.tv_nsec = VNOVAL; 1061 vap->va_flags = VNOVAL; 1062 vap->va_gen = VNOVAL; 1063 vap->va_vaflags = 0; 1064 } 1065 1066 /* 1067 * Try to reduce the total number of vnodes. 1068 * 1069 * This routine (and its user) are buggy in at least the following ways: 1070 * - all parameters were picked years ago when RAM sizes were significantly 1071 * smaller 1072 * - it can pick vnodes based on pages used by the vm object, but filesystems 1073 * like ZFS don't use it making the pick broken 1074 * - since ZFS has its own aging policy it gets partially combated by this one 1075 * - a dedicated method should be provided for filesystems to let them decide 1076 * whether the vnode should be recycled 1077 * 1078 * This routine is called when we have too many vnodes. It attempts 1079 * to free <count> vnodes and will potentially free vnodes that still 1080 * have VM backing store (VM backing store is typically the cause 1081 * of a vnode blowout so we want to do this). Therefore, this operation 1082 * is not considered cheap. 1083 * 1084 * A number of conditions may prevent a vnode from being reclaimed. 1085 * the buffer cache may have references on the vnode, a directory 1086 * vnode may still have references due to the namei cache representing 1087 * underlying files, or the vnode may be in active use. It is not 1088 * desirable to reuse such vnodes. These conditions may cause the 1089 * number of vnodes to reach some minimum value regardless of what 1090 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1091 * 1092 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1093 * entries if this argument is strue 1094 * @param trigger Only reclaim vnodes with fewer than this many resident 1095 * pages. 1096 * @param target How many vnodes to reclaim. 1097 * @return The number of vnodes that were reclaimed. 1098 */ 1099 static int 1100 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1101 { 1102 struct vnode *vp, *mvp; 1103 struct mount *mp; 1104 u_long done; 1105 bool retried; 1106 1107 mtx_assert(&vnode_list_mtx, MA_OWNED); 1108 1109 retried = false; 1110 done = 0; 1111 1112 mvp = vnode_list_reclaim_marker; 1113 restart: 1114 vp = mvp; 1115 while (done < target) { 1116 vp = TAILQ_NEXT(vp, v_vnodelist); 1117 if (__predict_false(vp == NULL)) 1118 break; 1119 1120 if (__predict_false(vp->v_type == VMARKER)) 1121 continue; 1122 1123 /* 1124 * If it's been deconstructed already, it's still 1125 * referenced, or it exceeds the trigger, skip it. 1126 * Also skip free vnodes. We are trying to make space 1127 * to expand the free list, not reduce it. 1128 */ 1129 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1130 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1131 goto next_iter; 1132 1133 if (vp->v_type == VBAD || vp->v_type == VNON) 1134 goto next_iter; 1135 1136 if (!VI_TRYLOCK(vp)) 1137 goto next_iter; 1138 1139 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1140 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1141 vp->v_type == VBAD || vp->v_type == VNON || 1142 (vp->v_object != NULL && 1143 vp->v_object->resident_page_count > trigger)) { 1144 VI_UNLOCK(vp); 1145 goto next_iter; 1146 } 1147 vholdl(vp); 1148 VI_UNLOCK(vp); 1149 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1150 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1151 mtx_unlock(&vnode_list_mtx); 1152 1153 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1154 vdrop(vp); 1155 goto next_iter_unlocked; 1156 } 1157 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1158 vdrop(vp); 1159 vn_finished_write(mp); 1160 goto next_iter_unlocked; 1161 } 1162 1163 VI_LOCK(vp); 1164 if (vp->v_usecount > 0 || 1165 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1166 (vp->v_object != NULL && 1167 vp->v_object->resident_page_count > trigger)) { 1168 VOP_UNLOCK(vp); 1169 vdropl(vp); 1170 vn_finished_write(mp); 1171 goto next_iter_unlocked; 1172 } 1173 counter_u64_add(recycles_count, 1); 1174 vgonel(vp); 1175 VOP_UNLOCK(vp); 1176 vdropl(vp); 1177 vn_finished_write(mp); 1178 done++; 1179 next_iter_unlocked: 1180 if (should_yield()) 1181 kern_yield(PRI_USER); 1182 mtx_lock(&vnode_list_mtx); 1183 goto restart; 1184 next_iter: 1185 MPASS(vp->v_type != VMARKER); 1186 if (!should_yield()) 1187 continue; 1188 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1189 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1190 mtx_unlock(&vnode_list_mtx); 1191 kern_yield(PRI_USER); 1192 mtx_lock(&vnode_list_mtx); 1193 goto restart; 1194 } 1195 if (done == 0 && !retried) { 1196 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1197 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1198 retried = true; 1199 goto restart; 1200 } 1201 return (done); 1202 } 1203 1204 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1205 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1206 0, 1207 "limit on vnode free requests per call to the vnlru_free routine"); 1208 1209 /* 1210 * Attempt to reduce the free list by the requested amount. 1211 */ 1212 static int 1213 vnlru_free_locked(int count, struct vfsops *mnt_op) 1214 { 1215 struct vnode *vp, *mvp; 1216 struct mount *mp; 1217 int ocount; 1218 1219 mtx_assert(&vnode_list_mtx, MA_OWNED); 1220 if (count > max_vnlru_free) 1221 count = max_vnlru_free; 1222 ocount = count; 1223 mvp = vnode_list_free_marker; 1224 restart: 1225 vp = mvp; 1226 while (count > 0) { 1227 vp = TAILQ_NEXT(vp, v_vnodelist); 1228 if (__predict_false(vp == NULL)) { 1229 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1230 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1231 break; 1232 } 1233 if (__predict_false(vp->v_type == VMARKER)) 1234 continue; 1235 1236 /* 1237 * Don't recycle if our vnode is from different type 1238 * of mount point. Note that mp is type-safe, the 1239 * check does not reach unmapped address even if 1240 * vnode is reclaimed. 1241 * Don't recycle if we can't get the interlock without 1242 * blocking. 1243 */ 1244 if (vp->v_holdcnt > 0 || (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1245 mp->mnt_op != mnt_op) || !VI_TRYLOCK(vp)) { 1246 continue; 1247 } 1248 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1249 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1250 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1251 VI_UNLOCK(vp); 1252 continue; 1253 } 1254 vholdl(vp); 1255 count--; 1256 mtx_unlock(&vnode_list_mtx); 1257 VI_UNLOCK(vp); 1258 vtryrecycle(vp); 1259 vdrop(vp); 1260 mtx_lock(&vnode_list_mtx); 1261 goto restart; 1262 } 1263 return (ocount - count); 1264 } 1265 1266 void 1267 vnlru_free(int count, struct vfsops *mnt_op) 1268 { 1269 1270 mtx_lock(&vnode_list_mtx); 1271 vnlru_free_locked(count, mnt_op); 1272 mtx_unlock(&vnode_list_mtx); 1273 } 1274 1275 static void 1276 vnlru_recalc(void) 1277 { 1278 1279 mtx_assert(&vnode_list_mtx, MA_OWNED); 1280 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1281 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1282 vlowat = vhiwat / 2; 1283 } 1284 1285 /* 1286 * Attempt to recycle vnodes in a context that is always safe to block. 1287 * Calling vlrurecycle() from the bowels of filesystem code has some 1288 * interesting deadlock problems. 1289 */ 1290 static struct proc *vnlruproc; 1291 static int vnlruproc_sig; 1292 1293 /* 1294 * The main freevnodes counter is only updated when threads requeue their vnode 1295 * batches. CPUs are conditionally walked to compute a more accurate total. 1296 * 1297 * Limit how much of a slop are we willing to tolerate. Note: the actual value 1298 * at any given moment can still exceed slop, but it should not be by significant 1299 * margin in practice. 1300 */ 1301 #define VNLRU_FREEVNODES_SLOP 128 1302 1303 static u_long 1304 vnlru_read_freevnodes(void) 1305 { 1306 struct vdbatch *vd; 1307 long slop; 1308 int cpu; 1309 1310 mtx_assert(&vnode_list_mtx, MA_OWNED); 1311 if (freevnodes > freevnodes_old) 1312 slop = freevnodes - freevnodes_old; 1313 else 1314 slop = freevnodes_old - freevnodes; 1315 if (slop < VNLRU_FREEVNODES_SLOP) 1316 return (freevnodes >= 0 ? freevnodes : 0); 1317 freevnodes_old = freevnodes; 1318 CPU_FOREACH(cpu) { 1319 vd = DPCPU_ID_PTR((cpu), vd); 1320 freevnodes_old += vd->freevnodes; 1321 } 1322 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1323 } 1324 1325 static bool 1326 vnlru_under(u_long rnumvnodes, u_long limit) 1327 { 1328 u_long rfreevnodes, space; 1329 1330 if (__predict_false(rnumvnodes > desiredvnodes)) 1331 return (true); 1332 1333 space = desiredvnodes - rnumvnodes; 1334 if (space < limit) { 1335 rfreevnodes = vnlru_read_freevnodes(); 1336 if (rfreevnodes > wantfreevnodes) 1337 space += rfreevnodes - wantfreevnodes; 1338 } 1339 return (space < limit); 1340 } 1341 1342 static bool 1343 vnlru_under_unlocked(u_long rnumvnodes, u_long limit) 1344 { 1345 long rfreevnodes, space; 1346 1347 if (__predict_false(rnumvnodes > desiredvnodes)) 1348 return (true); 1349 1350 space = desiredvnodes - rnumvnodes; 1351 if (space < limit) { 1352 rfreevnodes = atomic_load_long(&freevnodes); 1353 if (rfreevnodes > wantfreevnodes) 1354 space += rfreevnodes - wantfreevnodes; 1355 } 1356 return (space < limit); 1357 } 1358 1359 static void 1360 vnlru_kick(void) 1361 { 1362 1363 mtx_assert(&vnode_list_mtx, MA_OWNED); 1364 if (vnlruproc_sig == 0) { 1365 vnlruproc_sig = 1; 1366 wakeup(vnlruproc); 1367 } 1368 } 1369 1370 static void 1371 vnlru_proc(void) 1372 { 1373 u_long rnumvnodes, rfreevnodes, target; 1374 unsigned long onumvnodes; 1375 int done, force, trigger, usevnodes; 1376 bool reclaim_nc_src, want_reread; 1377 1378 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1379 SHUTDOWN_PRI_FIRST); 1380 1381 force = 0; 1382 want_reread = false; 1383 for (;;) { 1384 kproc_suspend_check(vnlruproc); 1385 mtx_lock(&vnode_list_mtx); 1386 rnumvnodes = atomic_load_long(&numvnodes); 1387 1388 if (want_reread) { 1389 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1390 want_reread = false; 1391 } 1392 1393 /* 1394 * If numvnodes is too large (due to desiredvnodes being 1395 * adjusted using its sysctl, or emergency growth), first 1396 * try to reduce it by discarding from the free list. 1397 */ 1398 if (rnumvnodes > desiredvnodes) { 1399 vnlru_free_locked(rnumvnodes - desiredvnodes, NULL); 1400 rnumvnodes = atomic_load_long(&numvnodes); 1401 } 1402 /* 1403 * Sleep if the vnode cache is in a good state. This is 1404 * when it is not over-full and has space for about a 4% 1405 * or 9% expansion (by growing its size or inexcessively 1406 * reducing its free list). Otherwise, try to reclaim 1407 * space for a 10% expansion. 1408 */ 1409 if (vstir && force == 0) { 1410 force = 1; 1411 vstir = 0; 1412 } 1413 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1414 vnlruproc_sig = 0; 1415 wakeup(&vnlruproc_sig); 1416 msleep(vnlruproc, &vnode_list_mtx, 1417 PVFS|PDROP, "vlruwt", hz); 1418 continue; 1419 } 1420 rfreevnodes = vnlru_read_freevnodes(); 1421 1422 onumvnodes = rnumvnodes; 1423 /* 1424 * Calculate parameters for recycling. These are the same 1425 * throughout the loop to give some semblance of fairness. 1426 * The trigger point is to avoid recycling vnodes with lots 1427 * of resident pages. We aren't trying to free memory; we 1428 * are trying to recycle or at least free vnodes. 1429 */ 1430 if (rnumvnodes <= desiredvnodes) 1431 usevnodes = rnumvnodes - rfreevnodes; 1432 else 1433 usevnodes = rnumvnodes; 1434 if (usevnodes <= 0) 1435 usevnodes = 1; 1436 /* 1437 * The trigger value is is chosen to give a conservatively 1438 * large value to ensure that it alone doesn't prevent 1439 * making progress. The value can easily be so large that 1440 * it is effectively infinite in some congested and 1441 * misconfigured cases, and this is necessary. Normally 1442 * it is about 8 to 100 (pages), which is quite large. 1443 */ 1444 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1445 if (force < 2) 1446 trigger = vsmalltrigger; 1447 reclaim_nc_src = force >= 3; 1448 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1449 target = target / 10 + 1; 1450 done = vlrureclaim(reclaim_nc_src, trigger, target); 1451 mtx_unlock(&vnode_list_mtx); 1452 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1453 uma_reclaim(UMA_RECLAIM_DRAIN); 1454 if (done == 0) { 1455 if (force == 0 || force == 1) { 1456 force = 2; 1457 continue; 1458 } 1459 if (force == 2) { 1460 force = 3; 1461 continue; 1462 } 1463 want_reread = true; 1464 force = 0; 1465 vnlru_nowhere++; 1466 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1467 } else { 1468 want_reread = true; 1469 kern_yield(PRI_USER); 1470 } 1471 } 1472 } 1473 1474 static struct kproc_desc vnlru_kp = { 1475 "vnlru", 1476 vnlru_proc, 1477 &vnlruproc 1478 }; 1479 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1480 &vnlru_kp); 1481 1482 /* 1483 * Routines having to do with the management of the vnode table. 1484 */ 1485 1486 /* 1487 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1488 * before we actually vgone(). This function must be called with the vnode 1489 * held to prevent the vnode from being returned to the free list midway 1490 * through vgone(). 1491 */ 1492 static int 1493 vtryrecycle(struct vnode *vp) 1494 { 1495 struct mount *vnmp; 1496 1497 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1498 VNASSERT(vp->v_holdcnt, vp, 1499 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1500 /* 1501 * This vnode may found and locked via some other list, if so we 1502 * can't recycle it yet. 1503 */ 1504 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1505 CTR2(KTR_VFS, 1506 "%s: impossible to recycle, vp %p lock is already held", 1507 __func__, vp); 1508 return (EWOULDBLOCK); 1509 } 1510 /* 1511 * Don't recycle if its filesystem is being suspended. 1512 */ 1513 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1514 VOP_UNLOCK(vp); 1515 CTR2(KTR_VFS, 1516 "%s: impossible to recycle, cannot start the write for %p", 1517 __func__, vp); 1518 return (EBUSY); 1519 } 1520 /* 1521 * If we got this far, we need to acquire the interlock and see if 1522 * anyone picked up this vnode from another list. If not, we will 1523 * mark it with DOOMED via vgonel() so that anyone who does find it 1524 * will skip over it. 1525 */ 1526 VI_LOCK(vp); 1527 if (vp->v_usecount) { 1528 VOP_UNLOCK(vp); 1529 VI_UNLOCK(vp); 1530 vn_finished_write(vnmp); 1531 CTR2(KTR_VFS, 1532 "%s: impossible to recycle, %p is already referenced", 1533 __func__, vp); 1534 return (EBUSY); 1535 } 1536 if (!VN_IS_DOOMED(vp)) { 1537 counter_u64_add(recycles_free_count, 1); 1538 vgonel(vp); 1539 } 1540 VOP_UNLOCK(vp); 1541 VI_UNLOCK(vp); 1542 vn_finished_write(vnmp); 1543 return (0); 1544 } 1545 1546 /* 1547 * Allocate a new vnode. 1548 * 1549 * The operation never returns an error. Returning an error was disabled 1550 * in r145385 (dated 2005) with the following comment: 1551 * 1552 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1553 * 1554 * Given the age of this commit (almost 15 years at the time of writing this 1555 * comment) restoring the ability to fail requires a significant audit of 1556 * all codepaths. 1557 * 1558 * The routine can try to free a vnode or stall for up to 1 second waiting for 1559 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1560 */ 1561 static u_long vn_alloc_cyclecount; 1562 1563 static struct vnode * __noinline 1564 vn_alloc_hard(struct mount *mp) 1565 { 1566 u_long rnumvnodes, rfreevnodes; 1567 1568 mtx_lock(&vnode_list_mtx); 1569 rnumvnodes = atomic_load_long(&numvnodes); 1570 if (rnumvnodes + 1 < desiredvnodes) { 1571 vn_alloc_cyclecount = 0; 1572 goto alloc; 1573 } 1574 rfreevnodes = vnlru_read_freevnodes(); 1575 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1576 vn_alloc_cyclecount = 0; 1577 vstir = 1; 1578 } 1579 /* 1580 * Grow the vnode cache if it will not be above its target max 1581 * after growing. Otherwise, if the free list is nonempty, try 1582 * to reclaim 1 item from it before growing the cache (possibly 1583 * above its target max if the reclamation failed or is delayed). 1584 * Otherwise, wait for some space. In all cases, schedule 1585 * vnlru_proc() if we are getting short of space. The watermarks 1586 * should be chosen so that we never wait or even reclaim from 1587 * the free list to below its target minimum. 1588 */ 1589 if (vnlru_free_locked(1, NULL) > 0) 1590 goto alloc; 1591 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1592 /* 1593 * Wait for space for a new vnode. 1594 */ 1595 vnlru_kick(); 1596 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 1597 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 1598 vnlru_read_freevnodes() > 1) 1599 vnlru_free_locked(1, NULL); 1600 } 1601 alloc: 1602 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1603 if (vnlru_under(rnumvnodes, vlowat)) 1604 vnlru_kick(); 1605 mtx_unlock(&vnode_list_mtx); 1606 return (uma_zalloc(vnode_zone, M_WAITOK)); 1607 } 1608 1609 static struct vnode * 1610 vn_alloc(struct mount *mp) 1611 { 1612 u_long rnumvnodes; 1613 1614 if (__predict_false(vn_alloc_cyclecount != 0)) 1615 return (vn_alloc_hard(mp)); 1616 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1617 if (__predict_false(vnlru_under_unlocked(rnumvnodes, vlowat))) { 1618 atomic_subtract_long(&numvnodes, 1); 1619 return (vn_alloc_hard(mp)); 1620 } 1621 1622 return (uma_zalloc(vnode_zone, M_WAITOK)); 1623 } 1624 1625 static void 1626 vn_free(struct vnode *vp) 1627 { 1628 1629 atomic_subtract_long(&numvnodes, 1); 1630 uma_zfree(vnode_zone, vp); 1631 } 1632 1633 /* 1634 * Return the next vnode from the free list. 1635 */ 1636 int 1637 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1638 struct vnode **vpp) 1639 { 1640 struct vnode *vp; 1641 struct thread *td; 1642 struct lock_object *lo; 1643 1644 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1645 1646 KASSERT(vops->registered, 1647 ("%s: not registered vector op %p\n", __func__, vops)); 1648 1649 td = curthread; 1650 if (td->td_vp_reserved != NULL) { 1651 vp = td->td_vp_reserved; 1652 td->td_vp_reserved = NULL; 1653 } else { 1654 vp = vn_alloc(mp); 1655 } 1656 counter_u64_add(vnodes_created, 1); 1657 /* 1658 * Locks are given the generic name "vnode" when created. 1659 * Follow the historic practice of using the filesystem 1660 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1661 * 1662 * Locks live in a witness group keyed on their name. Thus, 1663 * when a lock is renamed, it must also move from the witness 1664 * group of its old name to the witness group of its new name. 1665 * 1666 * The change only needs to be made when the vnode moves 1667 * from one filesystem type to another. We ensure that each 1668 * filesystem use a single static name pointer for its tag so 1669 * that we can compare pointers rather than doing a strcmp(). 1670 */ 1671 lo = &vp->v_vnlock->lock_object; 1672 #ifdef WITNESS 1673 if (lo->lo_name != tag) { 1674 #endif 1675 lo->lo_name = tag; 1676 #ifdef WITNESS 1677 WITNESS_DESTROY(lo); 1678 WITNESS_INIT(lo, tag); 1679 } 1680 #endif 1681 /* 1682 * By default, don't allow shared locks unless filesystems opt-in. 1683 */ 1684 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1685 /* 1686 * Finalize various vnode identity bits. 1687 */ 1688 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1689 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1690 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1691 vp->v_type = VNON; 1692 vp->v_op = vops; 1693 v_init_counters(vp); 1694 vp->v_bufobj.bo_ops = &buf_ops_bio; 1695 #ifdef DIAGNOSTIC 1696 if (mp == NULL && vops != &dead_vnodeops) 1697 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1698 #endif 1699 #ifdef MAC 1700 mac_vnode_init(vp); 1701 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1702 mac_vnode_associate_singlelabel(mp, vp); 1703 #endif 1704 if (mp != NULL) { 1705 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1706 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1707 vp->v_vflag |= VV_NOKNOTE; 1708 } 1709 1710 /* 1711 * For the filesystems which do not use vfs_hash_insert(), 1712 * still initialize v_hash to have vfs_hash_index() useful. 1713 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1714 * its own hashing. 1715 */ 1716 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1717 1718 *vpp = vp; 1719 return (0); 1720 } 1721 1722 void 1723 getnewvnode_reserve(void) 1724 { 1725 struct thread *td; 1726 1727 td = curthread; 1728 MPASS(td->td_vp_reserved == NULL); 1729 td->td_vp_reserved = vn_alloc(NULL); 1730 } 1731 1732 void 1733 getnewvnode_drop_reserve(void) 1734 { 1735 struct thread *td; 1736 1737 td = curthread; 1738 if (td->td_vp_reserved != NULL) { 1739 vn_free(td->td_vp_reserved); 1740 td->td_vp_reserved = NULL; 1741 } 1742 } 1743 1744 static void 1745 freevnode(struct vnode *vp) 1746 { 1747 struct bufobj *bo; 1748 1749 /* 1750 * The vnode has been marked for destruction, so free it. 1751 * 1752 * The vnode will be returned to the zone where it will 1753 * normally remain until it is needed for another vnode. We 1754 * need to cleanup (or verify that the cleanup has already 1755 * been done) any residual data left from its current use 1756 * so as not to contaminate the freshly allocated vnode. 1757 */ 1758 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 1759 bo = &vp->v_bufobj; 1760 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 1761 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); 1762 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 1763 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 1764 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 1765 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 1766 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 1767 ("clean blk trie not empty")); 1768 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 1769 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 1770 ("dirty blk trie not empty")); 1771 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 1772 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 1773 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 1774 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 1775 ("Dangling rangelock waiters")); 1776 VI_UNLOCK(vp); 1777 #ifdef MAC 1778 mac_vnode_destroy(vp); 1779 #endif 1780 if (vp->v_pollinfo != NULL) { 1781 destroy_vpollinfo(vp->v_pollinfo); 1782 vp->v_pollinfo = NULL; 1783 } 1784 #ifdef INVARIANTS 1785 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ 1786 vp->v_op = NULL; 1787 #endif 1788 vp->v_mountedhere = NULL; 1789 vp->v_unpcb = NULL; 1790 vp->v_rdev = NULL; 1791 vp->v_fifoinfo = NULL; 1792 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 1793 vp->v_irflag = 0; 1794 vp->v_iflag = 0; 1795 vp->v_vflag = 0; 1796 bo->bo_flag = 0; 1797 vn_free(vp); 1798 } 1799 1800 /* 1801 * Delete from old mount point vnode list, if on one. 1802 */ 1803 static void 1804 delmntque(struct vnode *vp) 1805 { 1806 struct mount *mp; 1807 1808 mp = vp->v_mount; 1809 if (mp == NULL) 1810 return; 1811 MNT_ILOCK(mp); 1812 VI_LOCK(vp); 1813 if (vp->v_mflag & VMP_LAZYLIST) { 1814 mtx_lock(&mp->mnt_listmtx); 1815 if (vp->v_mflag & VMP_LAZYLIST) { 1816 vp->v_mflag &= ~VMP_LAZYLIST; 1817 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 1818 mp->mnt_lazyvnodelistsize--; 1819 } 1820 mtx_unlock(&mp->mnt_listmtx); 1821 } 1822 vp->v_mount = NULL; 1823 VI_UNLOCK(vp); 1824 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1825 ("bad mount point vnode list size")); 1826 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1827 mp->mnt_nvnodelistsize--; 1828 MNT_REL(mp); 1829 MNT_IUNLOCK(mp); 1830 } 1831 1832 static void 1833 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1834 { 1835 1836 vp->v_data = NULL; 1837 vp->v_op = &dead_vnodeops; 1838 vgone(vp); 1839 vput(vp); 1840 } 1841 1842 /* 1843 * Insert into list of vnodes for the new mount point, if available. 1844 */ 1845 int 1846 insmntque1(struct vnode *vp, struct mount *mp, 1847 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1848 { 1849 1850 KASSERT(vp->v_mount == NULL, 1851 ("insmntque: vnode already on per mount vnode list")); 1852 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1853 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1854 1855 /* 1856 * We acquire the vnode interlock early to ensure that the 1857 * vnode cannot be recycled by another process releasing a 1858 * holdcnt on it before we get it on both the vnode list 1859 * and the active vnode list. The mount mutex protects only 1860 * manipulation of the vnode list and the vnode freelist 1861 * mutex protects only manipulation of the active vnode list. 1862 * Hence the need to hold the vnode interlock throughout. 1863 */ 1864 MNT_ILOCK(mp); 1865 VI_LOCK(vp); 1866 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 1867 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1868 mp->mnt_nvnodelistsize == 0)) && 1869 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1870 VI_UNLOCK(vp); 1871 MNT_IUNLOCK(mp); 1872 if (dtr != NULL) 1873 dtr(vp, dtr_arg); 1874 return (EBUSY); 1875 } 1876 vp->v_mount = mp; 1877 MNT_REF(mp); 1878 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1879 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1880 ("neg mount point vnode list size")); 1881 mp->mnt_nvnodelistsize++; 1882 VI_UNLOCK(vp); 1883 MNT_IUNLOCK(mp); 1884 return (0); 1885 } 1886 1887 int 1888 insmntque(struct vnode *vp, struct mount *mp) 1889 { 1890 1891 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1892 } 1893 1894 /* 1895 * Flush out and invalidate all buffers associated with a bufobj 1896 * Called with the underlying object locked. 1897 */ 1898 int 1899 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1900 { 1901 int error; 1902 1903 BO_LOCK(bo); 1904 if (flags & V_SAVE) { 1905 error = bufobj_wwait(bo, slpflag, slptimeo); 1906 if (error) { 1907 BO_UNLOCK(bo); 1908 return (error); 1909 } 1910 if (bo->bo_dirty.bv_cnt > 0) { 1911 BO_UNLOCK(bo); 1912 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1913 return (error); 1914 /* 1915 * XXX We could save a lock/unlock if this was only 1916 * enabled under INVARIANTS 1917 */ 1918 BO_LOCK(bo); 1919 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1920 panic("vinvalbuf: dirty bufs"); 1921 } 1922 } 1923 /* 1924 * If you alter this loop please notice that interlock is dropped and 1925 * reacquired in flushbuflist. Special care is needed to ensure that 1926 * no race conditions occur from this. 1927 */ 1928 do { 1929 error = flushbuflist(&bo->bo_clean, 1930 flags, bo, slpflag, slptimeo); 1931 if (error == 0 && !(flags & V_CLEANONLY)) 1932 error = flushbuflist(&bo->bo_dirty, 1933 flags, bo, slpflag, slptimeo); 1934 if (error != 0 && error != EAGAIN) { 1935 BO_UNLOCK(bo); 1936 return (error); 1937 } 1938 } while (error != 0); 1939 1940 /* 1941 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1942 * have write I/O in-progress but if there is a VM object then the 1943 * VM object can also have read-I/O in-progress. 1944 */ 1945 do { 1946 bufobj_wwait(bo, 0, 0); 1947 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 1948 BO_UNLOCK(bo); 1949 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 1950 BO_LOCK(bo); 1951 } 1952 } while (bo->bo_numoutput > 0); 1953 BO_UNLOCK(bo); 1954 1955 /* 1956 * Destroy the copy in the VM cache, too. 1957 */ 1958 if (bo->bo_object != NULL && 1959 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 1960 VM_OBJECT_WLOCK(bo->bo_object); 1961 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1962 OBJPR_CLEANONLY : 0); 1963 VM_OBJECT_WUNLOCK(bo->bo_object); 1964 } 1965 1966 #ifdef INVARIANTS 1967 BO_LOCK(bo); 1968 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 1969 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 1970 bo->bo_clean.bv_cnt > 0)) 1971 panic("vinvalbuf: flush failed"); 1972 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 1973 bo->bo_dirty.bv_cnt > 0) 1974 panic("vinvalbuf: flush dirty failed"); 1975 BO_UNLOCK(bo); 1976 #endif 1977 return (0); 1978 } 1979 1980 /* 1981 * Flush out and invalidate all buffers associated with a vnode. 1982 * Called with the underlying object locked. 1983 */ 1984 int 1985 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 1986 { 1987 1988 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 1989 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1990 if (vp->v_object != NULL && vp->v_object->handle != vp) 1991 return (0); 1992 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 1993 } 1994 1995 /* 1996 * Flush out buffers on the specified list. 1997 * 1998 */ 1999 static int 2000 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2001 int slptimeo) 2002 { 2003 struct buf *bp, *nbp; 2004 int retval, error; 2005 daddr_t lblkno; 2006 b_xflags_t xflags; 2007 2008 ASSERT_BO_WLOCKED(bo); 2009 2010 retval = 0; 2011 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2012 /* 2013 * If we are flushing both V_NORMAL and V_ALT buffers then 2014 * do not skip any buffers. If we are flushing only V_NORMAL 2015 * buffers then skip buffers marked as BX_ALTDATA. If we are 2016 * flushing only V_ALT buffers then skip buffers not marked 2017 * as BX_ALTDATA. 2018 */ 2019 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2020 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2021 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2022 continue; 2023 } 2024 if (nbp != NULL) { 2025 lblkno = nbp->b_lblkno; 2026 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2027 } 2028 retval = EAGAIN; 2029 error = BUF_TIMELOCK(bp, 2030 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2031 "flushbuf", slpflag, slptimeo); 2032 if (error) { 2033 BO_LOCK(bo); 2034 return (error != ENOLCK ? error : EAGAIN); 2035 } 2036 KASSERT(bp->b_bufobj == bo, 2037 ("bp %p wrong b_bufobj %p should be %p", 2038 bp, bp->b_bufobj, bo)); 2039 /* 2040 * XXX Since there are no node locks for NFS, I 2041 * believe there is a slight chance that a delayed 2042 * write will occur while sleeping just above, so 2043 * check for it. 2044 */ 2045 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2046 (flags & V_SAVE)) { 2047 bremfree(bp); 2048 bp->b_flags |= B_ASYNC; 2049 bwrite(bp); 2050 BO_LOCK(bo); 2051 return (EAGAIN); /* XXX: why not loop ? */ 2052 } 2053 bremfree(bp); 2054 bp->b_flags |= (B_INVAL | B_RELBUF); 2055 bp->b_flags &= ~B_ASYNC; 2056 brelse(bp); 2057 BO_LOCK(bo); 2058 if (nbp == NULL) 2059 break; 2060 nbp = gbincore(bo, lblkno); 2061 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2062 != xflags) 2063 break; /* nbp invalid */ 2064 } 2065 return (retval); 2066 } 2067 2068 int 2069 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2070 { 2071 struct buf *bp; 2072 int error; 2073 daddr_t lblkno; 2074 2075 ASSERT_BO_LOCKED(bo); 2076 2077 for (lblkno = startn;;) { 2078 again: 2079 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2080 if (bp == NULL || bp->b_lblkno >= endn || 2081 bp->b_lblkno < startn) 2082 break; 2083 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2084 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2085 if (error != 0) { 2086 BO_RLOCK(bo); 2087 if (error == ENOLCK) 2088 goto again; 2089 return (error); 2090 } 2091 KASSERT(bp->b_bufobj == bo, 2092 ("bp %p wrong b_bufobj %p should be %p", 2093 bp, bp->b_bufobj, bo)); 2094 lblkno = bp->b_lblkno + 1; 2095 if ((bp->b_flags & B_MANAGED) == 0) 2096 bremfree(bp); 2097 bp->b_flags |= B_RELBUF; 2098 /* 2099 * In the VMIO case, use the B_NOREUSE flag to hint that the 2100 * pages backing each buffer in the range are unlikely to be 2101 * reused. Dirty buffers will have the hint applied once 2102 * they've been written. 2103 */ 2104 if ((bp->b_flags & B_VMIO) != 0) 2105 bp->b_flags |= B_NOREUSE; 2106 brelse(bp); 2107 BO_RLOCK(bo); 2108 } 2109 return (0); 2110 } 2111 2112 /* 2113 * Truncate a file's buffer and pages to a specified length. This 2114 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2115 * sync activity. 2116 */ 2117 int 2118 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2119 { 2120 struct buf *bp, *nbp; 2121 struct bufobj *bo; 2122 daddr_t startlbn; 2123 2124 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2125 vp, blksize, (uintmax_t)length); 2126 2127 /* 2128 * Round up to the *next* lbn. 2129 */ 2130 startlbn = howmany(length, blksize); 2131 2132 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2133 2134 bo = &vp->v_bufobj; 2135 restart_unlocked: 2136 BO_LOCK(bo); 2137 2138 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2139 ; 2140 2141 if (length > 0) { 2142 restartsync: 2143 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2144 if (bp->b_lblkno > 0) 2145 continue; 2146 /* 2147 * Since we hold the vnode lock this should only 2148 * fail if we're racing with the buf daemon. 2149 */ 2150 if (BUF_LOCK(bp, 2151 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2152 BO_LOCKPTR(bo)) == ENOLCK) 2153 goto restart_unlocked; 2154 2155 VNASSERT((bp->b_flags & B_DELWRI), vp, 2156 ("buf(%p) on dirty queue without DELWRI", bp)); 2157 2158 bremfree(bp); 2159 bawrite(bp); 2160 BO_LOCK(bo); 2161 goto restartsync; 2162 } 2163 } 2164 2165 bufobj_wwait(bo, 0, 0); 2166 BO_UNLOCK(bo); 2167 vnode_pager_setsize(vp, length); 2168 2169 return (0); 2170 } 2171 2172 /* 2173 * Invalidate the cached pages of a file's buffer within the range of block 2174 * numbers [startlbn, endlbn). 2175 */ 2176 void 2177 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2178 int blksize) 2179 { 2180 struct bufobj *bo; 2181 off_t start, end; 2182 2183 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2184 2185 start = blksize * startlbn; 2186 end = blksize * endlbn; 2187 2188 bo = &vp->v_bufobj; 2189 BO_LOCK(bo); 2190 MPASS(blksize == bo->bo_bsize); 2191 2192 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2193 ; 2194 2195 BO_UNLOCK(bo); 2196 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2197 } 2198 2199 static int 2200 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2201 daddr_t startlbn, daddr_t endlbn) 2202 { 2203 struct buf *bp, *nbp; 2204 bool anyfreed; 2205 2206 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2207 ASSERT_BO_LOCKED(bo); 2208 2209 do { 2210 anyfreed = false; 2211 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2212 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2213 continue; 2214 if (BUF_LOCK(bp, 2215 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2216 BO_LOCKPTR(bo)) == ENOLCK) { 2217 BO_LOCK(bo); 2218 return (EAGAIN); 2219 } 2220 2221 bremfree(bp); 2222 bp->b_flags |= B_INVAL | B_RELBUF; 2223 bp->b_flags &= ~B_ASYNC; 2224 brelse(bp); 2225 anyfreed = true; 2226 2227 BO_LOCK(bo); 2228 if (nbp != NULL && 2229 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2230 nbp->b_vp != vp || 2231 (nbp->b_flags & B_DELWRI) != 0)) 2232 return (EAGAIN); 2233 } 2234 2235 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2236 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2237 continue; 2238 if (BUF_LOCK(bp, 2239 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2240 BO_LOCKPTR(bo)) == ENOLCK) { 2241 BO_LOCK(bo); 2242 return (EAGAIN); 2243 } 2244 bremfree(bp); 2245 bp->b_flags |= B_INVAL | B_RELBUF; 2246 bp->b_flags &= ~B_ASYNC; 2247 brelse(bp); 2248 anyfreed = true; 2249 2250 BO_LOCK(bo); 2251 if (nbp != NULL && 2252 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2253 (nbp->b_vp != vp) || 2254 (nbp->b_flags & B_DELWRI) == 0)) 2255 return (EAGAIN); 2256 } 2257 } while (anyfreed); 2258 return (0); 2259 } 2260 2261 static void 2262 buf_vlist_remove(struct buf *bp) 2263 { 2264 struct bufv *bv; 2265 2266 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2267 ASSERT_BO_WLOCKED(bp->b_bufobj); 2268 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != 2269 (BX_VNDIRTY|BX_VNCLEAN), 2270 ("buf_vlist_remove: Buf %p is on two lists", bp)); 2271 if (bp->b_xflags & BX_VNDIRTY) 2272 bv = &bp->b_bufobj->bo_dirty; 2273 else 2274 bv = &bp->b_bufobj->bo_clean; 2275 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2276 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2277 bv->bv_cnt--; 2278 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2279 } 2280 2281 /* 2282 * Add the buffer to the sorted clean or dirty block list. 2283 * 2284 * NOTE: xflags is passed as a constant, optimizing this inline function! 2285 */ 2286 static void 2287 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2288 { 2289 struct bufv *bv; 2290 struct buf *n; 2291 int error; 2292 2293 ASSERT_BO_WLOCKED(bo); 2294 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2295 ("dead bo %p", bo)); 2296 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2297 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2298 bp->b_xflags |= xflags; 2299 if (xflags & BX_VNDIRTY) 2300 bv = &bo->bo_dirty; 2301 else 2302 bv = &bo->bo_clean; 2303 2304 /* 2305 * Keep the list ordered. Optimize empty list insertion. Assume 2306 * we tend to grow at the tail so lookup_le should usually be cheaper 2307 * than _ge. 2308 */ 2309 if (bv->bv_cnt == 0 || 2310 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2311 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2312 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2313 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2314 else 2315 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2316 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2317 if (error) 2318 panic("buf_vlist_add: Preallocated nodes insufficient."); 2319 bv->bv_cnt++; 2320 } 2321 2322 /* 2323 * Look up a buffer using the buffer tries. 2324 */ 2325 struct buf * 2326 gbincore(struct bufobj *bo, daddr_t lblkno) 2327 { 2328 struct buf *bp; 2329 2330 ASSERT_BO_LOCKED(bo); 2331 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2332 if (bp != NULL) 2333 return (bp); 2334 return BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno); 2335 } 2336 2337 /* 2338 * Associate a buffer with a vnode. 2339 */ 2340 void 2341 bgetvp(struct vnode *vp, struct buf *bp) 2342 { 2343 struct bufobj *bo; 2344 2345 bo = &vp->v_bufobj; 2346 ASSERT_BO_WLOCKED(bo); 2347 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2348 2349 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2350 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2351 ("bgetvp: bp already attached! %p", bp)); 2352 2353 vhold(vp); 2354 bp->b_vp = vp; 2355 bp->b_bufobj = bo; 2356 /* 2357 * Insert onto list for new vnode. 2358 */ 2359 buf_vlist_add(bp, bo, BX_VNCLEAN); 2360 } 2361 2362 /* 2363 * Disassociate a buffer from a vnode. 2364 */ 2365 void 2366 brelvp(struct buf *bp) 2367 { 2368 struct bufobj *bo; 2369 struct vnode *vp; 2370 2371 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2372 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2373 2374 /* 2375 * Delete from old vnode list, if on one. 2376 */ 2377 vp = bp->b_vp; /* XXX */ 2378 bo = bp->b_bufobj; 2379 BO_LOCK(bo); 2380 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2381 buf_vlist_remove(bp); 2382 else 2383 panic("brelvp: Buffer %p not on queue.", bp); 2384 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2385 bo->bo_flag &= ~BO_ONWORKLST; 2386 mtx_lock(&sync_mtx); 2387 LIST_REMOVE(bo, bo_synclist); 2388 syncer_worklist_len--; 2389 mtx_unlock(&sync_mtx); 2390 } 2391 bp->b_vp = NULL; 2392 bp->b_bufobj = NULL; 2393 BO_UNLOCK(bo); 2394 vdrop(vp); 2395 } 2396 2397 /* 2398 * Add an item to the syncer work queue. 2399 */ 2400 static void 2401 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2402 { 2403 int slot; 2404 2405 ASSERT_BO_WLOCKED(bo); 2406 2407 mtx_lock(&sync_mtx); 2408 if (bo->bo_flag & BO_ONWORKLST) 2409 LIST_REMOVE(bo, bo_synclist); 2410 else { 2411 bo->bo_flag |= BO_ONWORKLST; 2412 syncer_worklist_len++; 2413 } 2414 2415 if (delay > syncer_maxdelay - 2) 2416 delay = syncer_maxdelay - 2; 2417 slot = (syncer_delayno + delay) & syncer_mask; 2418 2419 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2420 mtx_unlock(&sync_mtx); 2421 } 2422 2423 static int 2424 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2425 { 2426 int error, len; 2427 2428 mtx_lock(&sync_mtx); 2429 len = syncer_worklist_len - sync_vnode_count; 2430 mtx_unlock(&sync_mtx); 2431 error = SYSCTL_OUT(req, &len, sizeof(len)); 2432 return (error); 2433 } 2434 2435 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2436 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2437 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2438 2439 static struct proc *updateproc; 2440 static void sched_sync(void); 2441 static struct kproc_desc up_kp = { 2442 "syncer", 2443 sched_sync, 2444 &updateproc 2445 }; 2446 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2447 2448 static int 2449 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2450 { 2451 struct vnode *vp; 2452 struct mount *mp; 2453 2454 *bo = LIST_FIRST(slp); 2455 if (*bo == NULL) 2456 return (0); 2457 vp = bo2vnode(*bo); 2458 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2459 return (1); 2460 /* 2461 * We use vhold in case the vnode does not 2462 * successfully sync. vhold prevents the vnode from 2463 * going away when we unlock the sync_mtx so that 2464 * we can acquire the vnode interlock. 2465 */ 2466 vholdl(vp); 2467 mtx_unlock(&sync_mtx); 2468 VI_UNLOCK(vp); 2469 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2470 vdrop(vp); 2471 mtx_lock(&sync_mtx); 2472 return (*bo == LIST_FIRST(slp)); 2473 } 2474 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2475 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2476 VOP_UNLOCK(vp); 2477 vn_finished_write(mp); 2478 BO_LOCK(*bo); 2479 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2480 /* 2481 * Put us back on the worklist. The worklist 2482 * routine will remove us from our current 2483 * position and then add us back in at a later 2484 * position. 2485 */ 2486 vn_syncer_add_to_worklist(*bo, syncdelay); 2487 } 2488 BO_UNLOCK(*bo); 2489 vdrop(vp); 2490 mtx_lock(&sync_mtx); 2491 return (0); 2492 } 2493 2494 static int first_printf = 1; 2495 2496 /* 2497 * System filesystem synchronizer daemon. 2498 */ 2499 static void 2500 sched_sync(void) 2501 { 2502 struct synclist *next, *slp; 2503 struct bufobj *bo; 2504 long starttime; 2505 struct thread *td = curthread; 2506 int last_work_seen; 2507 int net_worklist_len; 2508 int syncer_final_iter; 2509 int error; 2510 2511 last_work_seen = 0; 2512 syncer_final_iter = 0; 2513 syncer_state = SYNCER_RUNNING; 2514 starttime = time_uptime; 2515 td->td_pflags |= TDP_NORUNNINGBUF; 2516 2517 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2518 SHUTDOWN_PRI_LAST); 2519 2520 mtx_lock(&sync_mtx); 2521 for (;;) { 2522 if (syncer_state == SYNCER_FINAL_DELAY && 2523 syncer_final_iter == 0) { 2524 mtx_unlock(&sync_mtx); 2525 kproc_suspend_check(td->td_proc); 2526 mtx_lock(&sync_mtx); 2527 } 2528 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2529 if (syncer_state != SYNCER_RUNNING && 2530 starttime != time_uptime) { 2531 if (first_printf) { 2532 printf("\nSyncing disks, vnodes remaining... "); 2533 first_printf = 0; 2534 } 2535 printf("%d ", net_worklist_len); 2536 } 2537 starttime = time_uptime; 2538 2539 /* 2540 * Push files whose dirty time has expired. Be careful 2541 * of interrupt race on slp queue. 2542 * 2543 * Skip over empty worklist slots when shutting down. 2544 */ 2545 do { 2546 slp = &syncer_workitem_pending[syncer_delayno]; 2547 syncer_delayno += 1; 2548 if (syncer_delayno == syncer_maxdelay) 2549 syncer_delayno = 0; 2550 next = &syncer_workitem_pending[syncer_delayno]; 2551 /* 2552 * If the worklist has wrapped since the 2553 * it was emptied of all but syncer vnodes, 2554 * switch to the FINAL_DELAY state and run 2555 * for one more second. 2556 */ 2557 if (syncer_state == SYNCER_SHUTTING_DOWN && 2558 net_worklist_len == 0 && 2559 last_work_seen == syncer_delayno) { 2560 syncer_state = SYNCER_FINAL_DELAY; 2561 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2562 } 2563 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2564 syncer_worklist_len > 0); 2565 2566 /* 2567 * Keep track of the last time there was anything 2568 * on the worklist other than syncer vnodes. 2569 * Return to the SHUTTING_DOWN state if any 2570 * new work appears. 2571 */ 2572 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2573 last_work_seen = syncer_delayno; 2574 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2575 syncer_state = SYNCER_SHUTTING_DOWN; 2576 while (!LIST_EMPTY(slp)) { 2577 error = sync_vnode(slp, &bo, td); 2578 if (error == 1) { 2579 LIST_REMOVE(bo, bo_synclist); 2580 LIST_INSERT_HEAD(next, bo, bo_synclist); 2581 continue; 2582 } 2583 2584 if (first_printf == 0) { 2585 /* 2586 * Drop the sync mutex, because some watchdog 2587 * drivers need to sleep while patting 2588 */ 2589 mtx_unlock(&sync_mtx); 2590 wdog_kern_pat(WD_LASTVAL); 2591 mtx_lock(&sync_mtx); 2592 } 2593 2594 } 2595 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2596 syncer_final_iter--; 2597 /* 2598 * The variable rushjob allows the kernel to speed up the 2599 * processing of the filesystem syncer process. A rushjob 2600 * value of N tells the filesystem syncer to process the next 2601 * N seconds worth of work on its queue ASAP. Currently rushjob 2602 * is used by the soft update code to speed up the filesystem 2603 * syncer process when the incore state is getting so far 2604 * ahead of the disk that the kernel memory pool is being 2605 * threatened with exhaustion. 2606 */ 2607 if (rushjob > 0) { 2608 rushjob -= 1; 2609 continue; 2610 } 2611 /* 2612 * Just sleep for a short period of time between 2613 * iterations when shutting down to allow some I/O 2614 * to happen. 2615 * 2616 * If it has taken us less than a second to process the 2617 * current work, then wait. Otherwise start right over 2618 * again. We can still lose time if any single round 2619 * takes more than two seconds, but it does not really 2620 * matter as we are just trying to generally pace the 2621 * filesystem activity. 2622 */ 2623 if (syncer_state != SYNCER_RUNNING || 2624 time_uptime == starttime) { 2625 thread_lock(td); 2626 sched_prio(td, PPAUSE); 2627 thread_unlock(td); 2628 } 2629 if (syncer_state != SYNCER_RUNNING) 2630 cv_timedwait(&sync_wakeup, &sync_mtx, 2631 hz / SYNCER_SHUTDOWN_SPEEDUP); 2632 else if (time_uptime == starttime) 2633 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2634 } 2635 } 2636 2637 /* 2638 * Request the syncer daemon to speed up its work. 2639 * We never push it to speed up more than half of its 2640 * normal turn time, otherwise it could take over the cpu. 2641 */ 2642 int 2643 speedup_syncer(void) 2644 { 2645 int ret = 0; 2646 2647 mtx_lock(&sync_mtx); 2648 if (rushjob < syncdelay / 2) { 2649 rushjob += 1; 2650 stat_rush_requests += 1; 2651 ret = 1; 2652 } 2653 mtx_unlock(&sync_mtx); 2654 cv_broadcast(&sync_wakeup); 2655 return (ret); 2656 } 2657 2658 /* 2659 * Tell the syncer to speed up its work and run though its work 2660 * list several times, then tell it to shut down. 2661 */ 2662 static void 2663 syncer_shutdown(void *arg, int howto) 2664 { 2665 2666 if (howto & RB_NOSYNC) 2667 return; 2668 mtx_lock(&sync_mtx); 2669 syncer_state = SYNCER_SHUTTING_DOWN; 2670 rushjob = 0; 2671 mtx_unlock(&sync_mtx); 2672 cv_broadcast(&sync_wakeup); 2673 kproc_shutdown(arg, howto); 2674 } 2675 2676 void 2677 syncer_suspend(void) 2678 { 2679 2680 syncer_shutdown(updateproc, 0); 2681 } 2682 2683 void 2684 syncer_resume(void) 2685 { 2686 2687 mtx_lock(&sync_mtx); 2688 first_printf = 1; 2689 syncer_state = SYNCER_RUNNING; 2690 mtx_unlock(&sync_mtx); 2691 cv_broadcast(&sync_wakeup); 2692 kproc_resume(updateproc); 2693 } 2694 2695 /* 2696 * Reassign a buffer from one vnode to another. 2697 * Used to assign file specific control information 2698 * (indirect blocks) to the vnode to which they belong. 2699 */ 2700 void 2701 reassignbuf(struct buf *bp) 2702 { 2703 struct vnode *vp; 2704 struct bufobj *bo; 2705 int delay; 2706 #ifdef INVARIANTS 2707 struct bufv *bv; 2708 #endif 2709 2710 vp = bp->b_vp; 2711 bo = bp->b_bufobj; 2712 ++reassignbufcalls; 2713 2714 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2715 bp, bp->b_vp, bp->b_flags); 2716 /* 2717 * B_PAGING flagged buffers cannot be reassigned because their vp 2718 * is not fully linked in. 2719 */ 2720 if (bp->b_flags & B_PAGING) 2721 panic("cannot reassign paging buffer"); 2722 2723 /* 2724 * Delete from old vnode list, if on one. 2725 */ 2726 BO_LOCK(bo); 2727 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2728 buf_vlist_remove(bp); 2729 else 2730 panic("reassignbuf: Buffer %p not on queue.", bp); 2731 /* 2732 * If dirty, put on list of dirty buffers; otherwise insert onto list 2733 * of clean buffers. 2734 */ 2735 if (bp->b_flags & B_DELWRI) { 2736 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2737 switch (vp->v_type) { 2738 case VDIR: 2739 delay = dirdelay; 2740 break; 2741 case VCHR: 2742 delay = metadelay; 2743 break; 2744 default: 2745 delay = filedelay; 2746 } 2747 vn_syncer_add_to_worklist(bo, delay); 2748 } 2749 buf_vlist_add(bp, bo, BX_VNDIRTY); 2750 } else { 2751 buf_vlist_add(bp, bo, BX_VNCLEAN); 2752 2753 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2754 mtx_lock(&sync_mtx); 2755 LIST_REMOVE(bo, bo_synclist); 2756 syncer_worklist_len--; 2757 mtx_unlock(&sync_mtx); 2758 bo->bo_flag &= ~BO_ONWORKLST; 2759 } 2760 } 2761 #ifdef INVARIANTS 2762 bv = &bo->bo_clean; 2763 bp = TAILQ_FIRST(&bv->bv_hd); 2764 KASSERT(bp == NULL || bp->b_bufobj == bo, 2765 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2766 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2767 KASSERT(bp == NULL || bp->b_bufobj == bo, 2768 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2769 bv = &bo->bo_dirty; 2770 bp = TAILQ_FIRST(&bv->bv_hd); 2771 KASSERT(bp == NULL || bp->b_bufobj == bo, 2772 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2773 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2774 KASSERT(bp == NULL || bp->b_bufobj == bo, 2775 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2776 #endif 2777 BO_UNLOCK(bo); 2778 } 2779 2780 static void 2781 v_init_counters(struct vnode *vp) 2782 { 2783 2784 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2785 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2786 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2787 2788 refcount_init(&vp->v_holdcnt, 1); 2789 refcount_init(&vp->v_usecount, 1); 2790 } 2791 2792 /* 2793 * Increment si_usecount of the associated device, if any. 2794 */ 2795 static void 2796 v_incr_devcount(struct vnode *vp) 2797 { 2798 2799 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2800 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2801 dev_lock(); 2802 vp->v_rdev->si_usecount++; 2803 dev_unlock(); 2804 } 2805 } 2806 2807 /* 2808 * Decrement si_usecount of the associated device, if any. 2809 */ 2810 static void 2811 v_decr_devcount(struct vnode *vp) 2812 { 2813 2814 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2815 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2816 dev_lock(); 2817 vp->v_rdev->si_usecount--; 2818 dev_unlock(); 2819 } 2820 } 2821 2822 /* 2823 * Grab a particular vnode from the free list, increment its 2824 * reference count and lock it. VIRF_DOOMED is set if the vnode 2825 * is being destroyed. Only callers who specify LK_RETRY will 2826 * see doomed vnodes. If inactive processing was delayed in 2827 * vput try to do it here. 2828 * 2829 * usecount is manipulated using atomics without holding any locks. 2830 * 2831 * holdcnt can be manipulated using atomics without holding any locks, 2832 * except when transitioning 1<->0, in which case the interlock is held. 2833 */ 2834 enum vgetstate 2835 vget_prep(struct vnode *vp) 2836 { 2837 enum vgetstate vs; 2838 2839 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2840 vs = VGET_USECOUNT; 2841 } else { 2842 vhold(vp); 2843 vs = VGET_HOLDCNT; 2844 } 2845 return (vs); 2846 } 2847 2848 int 2849 vget(struct vnode *vp, int flags, struct thread *td) 2850 { 2851 enum vgetstate vs; 2852 2853 MPASS(td == curthread); 2854 2855 vs = vget_prep(vp); 2856 return (vget_finish(vp, flags, vs)); 2857 } 2858 2859 static int __noinline 2860 vget_finish_vchr(struct vnode *vp) 2861 { 2862 2863 VNASSERT(vp->v_type == VCHR, vp, ("type != VCHR)")); 2864 2865 /* 2866 * See the comment in vget_finish before usecount bump. 2867 */ 2868 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2869 #ifdef INVARIANTS 2870 int old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 2871 VNASSERT(old > 0, vp, ("%s: wrong hold count %d", __func__, old)); 2872 #else 2873 refcount_release(&vp->v_holdcnt); 2874 #endif 2875 return (0); 2876 } 2877 2878 VI_LOCK(vp); 2879 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2880 #ifdef INVARIANTS 2881 int old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 2882 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 2883 #else 2884 refcount_release(&vp->v_holdcnt); 2885 #endif 2886 VI_UNLOCK(vp); 2887 return (0); 2888 } 2889 v_incr_devcount(vp); 2890 refcount_acquire(&vp->v_usecount); 2891 VI_UNLOCK(vp); 2892 return (0); 2893 } 2894 2895 int 2896 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 2897 { 2898 int error, old; 2899 2900 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 2901 ("%s: invalid lock operation", __func__)); 2902 2903 if ((flags & LK_INTERLOCK) != 0) 2904 ASSERT_VI_LOCKED(vp, __func__); 2905 else 2906 ASSERT_VI_UNLOCKED(vp, __func__); 2907 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 2908 if (vs == VGET_USECOUNT) { 2909 VNASSERT(vp->v_usecount > 0, vp, 2910 ("%s: vnode without usecount when VGET_USECOUNT was passed", 2911 __func__)); 2912 } 2913 2914 error = vn_lock(vp, flags); 2915 if (__predict_false(error != 0)) { 2916 if (vs == VGET_USECOUNT) 2917 vrele(vp); 2918 else 2919 vdrop(vp); 2920 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2921 vp); 2922 return (error); 2923 } 2924 2925 if (vs == VGET_USECOUNT) { 2926 return (0); 2927 } 2928 2929 if (__predict_false(vp->v_type == VCHR)) 2930 return (vget_finish_vchr(vp)); 2931 2932 /* 2933 * We hold the vnode. If the usecount is 0 it will be utilized to keep 2934 * the vnode around. Otherwise someone else lended their hold count and 2935 * we have to drop ours. 2936 */ 2937 old = atomic_fetchadd_int(&vp->v_usecount, 1); 2938 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 2939 if (old != 0) { 2940 #ifdef INVARIANTS 2941 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 2942 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 2943 #else 2944 refcount_release(&vp->v_holdcnt); 2945 #endif 2946 } 2947 return (0); 2948 } 2949 2950 /* 2951 * Increase the reference (use) and hold count of a vnode. 2952 * This will also remove the vnode from the free list if it is presently free. 2953 */ 2954 static void __noinline 2955 vref_vchr(struct vnode *vp, bool interlock) 2956 { 2957 2958 /* 2959 * See the comment in vget_finish before usecount bump. 2960 */ 2961 if (!interlock) { 2962 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2963 VNODE_REFCOUNT_FENCE_ACQ(); 2964 VNASSERT(vp->v_holdcnt > 0, vp, 2965 ("%s: active vnode not held", __func__)); 2966 return; 2967 } 2968 VI_LOCK(vp); 2969 /* 2970 * By the time we get here the vnode might have been doomed, at 2971 * which point the 0->1 use count transition is no longer 2972 * protected by the interlock. Since it can't bounce back to 2973 * VCHR and requires vref semantics, punt it back 2974 */ 2975 if (__predict_false(vp->v_type == VBAD)) { 2976 VI_UNLOCK(vp); 2977 vref(vp); 2978 return; 2979 } 2980 } 2981 VNASSERT(vp->v_type == VCHR, vp, ("type != VCHR)")); 2982 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2983 VNODE_REFCOUNT_FENCE_ACQ(); 2984 VNASSERT(vp->v_holdcnt > 0, vp, 2985 ("%s: active vnode not held", __func__)); 2986 if (!interlock) 2987 VI_UNLOCK(vp); 2988 return; 2989 } 2990 vhold(vp); 2991 v_incr_devcount(vp); 2992 refcount_acquire(&vp->v_usecount); 2993 if (!interlock) 2994 VI_UNLOCK(vp); 2995 return; 2996 } 2997 2998 void 2999 vref(struct vnode *vp) 3000 { 3001 int old; 3002 3003 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3004 if (__predict_false(vp->v_type == VCHR)) { 3005 vref_vchr(vp, false); 3006 return; 3007 } 3008 3009 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3010 VNODE_REFCOUNT_FENCE_ACQ(); 3011 VNASSERT(vp->v_holdcnt > 0, vp, 3012 ("%s: active vnode not held", __func__)); 3013 return; 3014 } 3015 vhold(vp); 3016 /* 3017 * See the comment in vget_finish. 3018 */ 3019 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3020 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3021 if (old != 0) { 3022 #ifdef INVARIANTS 3023 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3024 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3025 #else 3026 refcount_release(&vp->v_holdcnt); 3027 #endif 3028 } 3029 } 3030 3031 void 3032 vrefl(struct vnode *vp) 3033 { 3034 3035 ASSERT_VI_LOCKED(vp, __func__); 3036 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3037 if (__predict_false(vp->v_type == VCHR)) { 3038 vref_vchr(vp, true); 3039 return; 3040 } 3041 vref(vp); 3042 } 3043 3044 void 3045 vrefact(struct vnode *vp) 3046 { 3047 3048 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3049 #ifdef INVARIANTS 3050 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 3051 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3052 #else 3053 refcount_acquire(&vp->v_usecount); 3054 #endif 3055 } 3056 3057 /* 3058 * Return reference count of a vnode. 3059 * 3060 * The results of this call are only guaranteed when some mechanism is used to 3061 * stop other processes from gaining references to the vnode. This may be the 3062 * case if the caller holds the only reference. This is also useful when stale 3063 * data is acceptable as race conditions may be accounted for by some other 3064 * means. 3065 */ 3066 int 3067 vrefcnt(struct vnode *vp) 3068 { 3069 3070 return (vp->v_usecount); 3071 } 3072 3073 void 3074 vlazy(struct vnode *vp) 3075 { 3076 struct mount *mp; 3077 3078 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 3079 3080 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 3081 return; 3082 mp = vp->v_mount; 3083 mtx_lock(&mp->mnt_listmtx); 3084 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 3085 vp->v_mflag |= VMP_LAZYLIST; 3086 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3087 mp->mnt_lazyvnodelistsize++; 3088 } 3089 mtx_unlock(&mp->mnt_listmtx); 3090 } 3091 3092 static void 3093 vdefer_inactive(struct vnode *vp) 3094 { 3095 3096 ASSERT_VI_LOCKED(vp, __func__); 3097 VNASSERT(vp->v_holdcnt > 0, vp, 3098 ("%s: vnode without hold count", __func__)); 3099 if (VN_IS_DOOMED(vp)) { 3100 vdropl(vp); 3101 return; 3102 } 3103 if (vp->v_iflag & VI_DEFINACT) { 3104 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3105 vdropl(vp); 3106 return; 3107 } 3108 if (vp->v_usecount > 0) { 3109 vp->v_iflag &= ~VI_OWEINACT; 3110 vdropl(vp); 3111 return; 3112 } 3113 vlazy(vp); 3114 vp->v_iflag |= VI_DEFINACT; 3115 VI_UNLOCK(vp); 3116 counter_u64_add(deferred_inact, 1); 3117 } 3118 3119 static void 3120 vdefer_inactive_unlocked(struct vnode *vp) 3121 { 3122 3123 VI_LOCK(vp); 3124 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3125 vdropl(vp); 3126 return; 3127 } 3128 vdefer_inactive(vp); 3129 } 3130 3131 enum vputx_op { VPUTX_VRELE, VPUTX_VPUT, VPUTX_VUNREF }; 3132 3133 /* 3134 * Decrement the use and hold counts for a vnode. 3135 * 3136 * See an explanation near vget() as to why atomic operation is safe. 3137 * 3138 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3139 * on the lock being held all the way until VOP_INACTIVE. This in particular 3140 * happens with UFS which adds half-constructed vnodes to the hash, where they 3141 * can be found by other code. 3142 */ 3143 static void 3144 vputx(struct vnode *vp, enum vputx_op func) 3145 { 3146 int error; 3147 3148 KASSERT(vp != NULL, ("vputx: null vp")); 3149 if (func == VPUTX_VUNREF) 3150 ASSERT_VOP_LOCKED(vp, "vunref"); 3151 else if (func == VPUTX_VPUT) 3152 ASSERT_VOP_LOCKED(vp, "vput"); 3153 ASSERT_VI_UNLOCKED(vp, __func__); 3154 VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp, 3155 ("%s: wrong ref counts", __func__)); 3156 3157 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3158 3159 /* 3160 * We want to hold the vnode until the inactive finishes to 3161 * prevent vgone() races. We drop the use count here and the 3162 * hold count below when we're done. 3163 * 3164 * If we release the last usecount we take ownership of the hold 3165 * count which provides liveness of the vnode, in which case we 3166 * have to vdrop. 3167 */ 3168 if (!refcount_release(&vp->v_usecount)) { 3169 if (func == VPUTX_VPUT) 3170 VOP_UNLOCK(vp); 3171 return; 3172 } 3173 VI_LOCK(vp); 3174 v_decr_devcount(vp); 3175 /* 3176 * By the time we got here someone else might have transitioned 3177 * the count back to > 0. 3178 */ 3179 if (vp->v_usecount > 0 || vp->v_iflag & VI_DOINGINACT) 3180 goto out; 3181 3182 /* 3183 * Check if the fs wants to perform inactive processing. Note we 3184 * may be only holding the interlock, in which case it is possible 3185 * someone else called vgone on the vnode and ->v_data is now NULL. 3186 * Since vgone performs inactive on its own there is nothing to do 3187 * here but to drop our hold count. 3188 */ 3189 if (__predict_false(VN_IS_DOOMED(vp)) || 3190 VOP_NEED_INACTIVE(vp) == 0) 3191 goto out; 3192 3193 /* 3194 * We must call VOP_INACTIVE with the node locked. Mark 3195 * as VI_DOINGINACT to avoid recursion. 3196 */ 3197 vp->v_iflag |= VI_OWEINACT; 3198 switch (func) { 3199 case VPUTX_VRELE: 3200 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3201 VI_LOCK(vp); 3202 break; 3203 case VPUTX_VPUT: 3204 error = 0; 3205 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3206 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3207 LK_NOWAIT); 3208 VI_LOCK(vp); 3209 } 3210 break; 3211 case VPUTX_VUNREF: 3212 error = 0; 3213 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3214 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3215 VI_LOCK(vp); 3216 } 3217 break; 3218 } 3219 if (error == 0) { 3220 vinactive(vp); 3221 if (func != VPUTX_VUNREF) 3222 VOP_UNLOCK(vp); 3223 vdropl(vp); 3224 } else { 3225 vdefer_inactive(vp); 3226 } 3227 return; 3228 out: 3229 if (func == VPUTX_VPUT) 3230 VOP_UNLOCK(vp); 3231 vdropl(vp); 3232 } 3233 3234 /* 3235 * Vnode put/release. 3236 * If count drops to zero, call inactive routine and return to freelist. 3237 */ 3238 void 3239 vrele(struct vnode *vp) 3240 { 3241 3242 vputx(vp, VPUTX_VRELE); 3243 } 3244 3245 /* 3246 * Release an already locked vnode. This give the same effects as 3247 * unlock+vrele(), but takes less time and avoids releasing and 3248 * re-aquiring the lock (as vrele() acquires the lock internally.) 3249 */ 3250 void 3251 vput(struct vnode *vp) 3252 { 3253 3254 vputx(vp, VPUTX_VPUT); 3255 } 3256 3257 /* 3258 * Release an exclusively locked vnode. Do not unlock the vnode lock. 3259 */ 3260 void 3261 vunref(struct vnode *vp) 3262 { 3263 3264 vputx(vp, VPUTX_VUNREF); 3265 } 3266 3267 void 3268 vhold(struct vnode *vp) 3269 { 3270 struct vdbatch *vd; 3271 int old; 3272 3273 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3274 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3275 VNASSERT(old >= 0, vp, ("%s: wrong hold count %d", __func__, old)); 3276 if (old != 0) 3277 return; 3278 critical_enter(); 3279 vd = DPCPU_PTR(vd); 3280 vd->freevnodes--; 3281 critical_exit(); 3282 } 3283 3284 void 3285 vholdl(struct vnode *vp) 3286 { 3287 3288 ASSERT_VI_LOCKED(vp, __func__); 3289 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3290 vhold(vp); 3291 } 3292 3293 void 3294 vholdnz(struct vnode *vp) 3295 { 3296 3297 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3298 #ifdef INVARIANTS 3299 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3300 VNASSERT(old > 0, vp, ("%s: wrong hold count %d", __func__, old)); 3301 #else 3302 atomic_add_int(&vp->v_holdcnt, 1); 3303 #endif 3304 } 3305 3306 static void __noinline 3307 vdbatch_process(struct vdbatch *vd) 3308 { 3309 struct vnode *vp; 3310 int i; 3311 3312 mtx_assert(&vd->lock, MA_OWNED); 3313 MPASS(curthread->td_pinned > 0); 3314 MPASS(vd->index == VDBATCH_SIZE); 3315 3316 mtx_lock(&vnode_list_mtx); 3317 critical_enter(); 3318 freevnodes += vd->freevnodes; 3319 for (i = 0; i < VDBATCH_SIZE; i++) { 3320 vp = vd->tab[i]; 3321 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3322 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3323 MPASS(vp->v_dbatchcpu != NOCPU); 3324 vp->v_dbatchcpu = NOCPU; 3325 } 3326 mtx_unlock(&vnode_list_mtx); 3327 vd->freevnodes = 0; 3328 bzero(vd->tab, sizeof(vd->tab)); 3329 vd->index = 0; 3330 critical_exit(); 3331 } 3332 3333 static void 3334 vdbatch_enqueue(struct vnode *vp) 3335 { 3336 struct vdbatch *vd; 3337 3338 ASSERT_VI_LOCKED(vp, __func__); 3339 VNASSERT(!VN_IS_DOOMED(vp), vp, 3340 ("%s: deferring requeue of a doomed vnode", __func__)); 3341 3342 critical_enter(); 3343 vd = DPCPU_PTR(vd); 3344 vd->freevnodes++; 3345 if (vp->v_dbatchcpu != NOCPU) { 3346 VI_UNLOCK(vp); 3347 critical_exit(); 3348 return; 3349 } 3350 3351 sched_pin(); 3352 critical_exit(); 3353 mtx_lock(&vd->lock); 3354 MPASS(vd->index < VDBATCH_SIZE); 3355 MPASS(vd->tab[vd->index] == NULL); 3356 /* 3357 * A hack: we depend on being pinned so that we know what to put in 3358 * ->v_dbatchcpu. 3359 */ 3360 vp->v_dbatchcpu = curcpu; 3361 vd->tab[vd->index] = vp; 3362 vd->index++; 3363 VI_UNLOCK(vp); 3364 if (vd->index == VDBATCH_SIZE) 3365 vdbatch_process(vd); 3366 mtx_unlock(&vd->lock); 3367 sched_unpin(); 3368 } 3369 3370 /* 3371 * This routine must only be called for vnodes which are about to be 3372 * deallocated. Supporting dequeue for arbitrary vndoes would require 3373 * validating that the locked batch matches. 3374 */ 3375 static void 3376 vdbatch_dequeue(struct vnode *vp) 3377 { 3378 struct vdbatch *vd; 3379 int i; 3380 short cpu; 3381 3382 VNASSERT(vp->v_type == VBAD || vp->v_type == VNON, vp, 3383 ("%s: called for a used vnode\n", __func__)); 3384 3385 cpu = vp->v_dbatchcpu; 3386 if (cpu == NOCPU) 3387 return; 3388 3389 vd = DPCPU_ID_PTR(cpu, vd); 3390 mtx_lock(&vd->lock); 3391 for (i = 0; i < vd->index; i++) { 3392 if (vd->tab[i] != vp) 3393 continue; 3394 vp->v_dbatchcpu = NOCPU; 3395 vd->index--; 3396 vd->tab[i] = vd->tab[vd->index]; 3397 vd->tab[vd->index] = NULL; 3398 break; 3399 } 3400 mtx_unlock(&vd->lock); 3401 /* 3402 * Either we dequeued the vnode above or the target CPU beat us to it. 3403 */ 3404 MPASS(vp->v_dbatchcpu == NOCPU); 3405 } 3406 3407 /* 3408 * Drop the hold count of the vnode. If this is the last reference to 3409 * the vnode we place it on the free list unless it has been vgone'd 3410 * (marked VIRF_DOOMED) in which case we will free it. 3411 * 3412 * Because the vnode vm object keeps a hold reference on the vnode if 3413 * there is at least one resident non-cached page, the vnode cannot 3414 * leave the active list without the page cleanup done. 3415 */ 3416 static void 3417 vdrop_deactivate(struct vnode *vp) 3418 { 3419 struct mount *mp; 3420 3421 ASSERT_VI_LOCKED(vp, __func__); 3422 /* 3423 * Mark a vnode as free: remove it from its active list 3424 * and put it up for recycling on the freelist. 3425 */ 3426 VNASSERT(!VN_IS_DOOMED(vp), vp, 3427 ("vdrop: returning doomed vnode")); 3428 VNASSERT(vp->v_op != NULL, vp, 3429 ("vdrop: vnode already reclaimed.")); 3430 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 3431 ("vnode with VI_OWEINACT set")); 3432 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, 3433 ("vnode with VI_DEFINACT set")); 3434 if (vp->v_mflag & VMP_LAZYLIST) { 3435 mp = vp->v_mount; 3436 mtx_lock(&mp->mnt_listmtx); 3437 VNASSERT(vp->v_mflag & VMP_LAZYLIST, vp, ("lost VMP_LAZYLIST")); 3438 /* 3439 * Don't remove the vnode from the lazy list if another thread 3440 * has increased the hold count. It may have re-enqueued the 3441 * vnode to the lazy list and is now responsible for its 3442 * removal. 3443 */ 3444 if (vp->v_holdcnt == 0) { 3445 vp->v_mflag &= ~VMP_LAZYLIST; 3446 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3447 mp->mnt_lazyvnodelistsize--; 3448 } 3449 mtx_unlock(&mp->mnt_listmtx); 3450 } 3451 vdbatch_enqueue(vp); 3452 } 3453 3454 void 3455 vdrop(struct vnode *vp) 3456 { 3457 3458 ASSERT_VI_UNLOCKED(vp, __func__); 3459 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3460 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3461 return; 3462 VI_LOCK(vp); 3463 vdropl(vp); 3464 } 3465 3466 void 3467 vdropl(struct vnode *vp) 3468 { 3469 3470 ASSERT_VI_LOCKED(vp, __func__); 3471 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3472 if (!refcount_release(&vp->v_holdcnt)) { 3473 VI_UNLOCK(vp); 3474 return; 3475 } 3476 if (VN_IS_DOOMED(vp)) { 3477 freevnode(vp); 3478 return; 3479 } 3480 vdrop_deactivate(vp); 3481 } 3482 3483 /* 3484 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3485 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3486 */ 3487 static void 3488 vinactivef(struct vnode *vp) 3489 { 3490 struct vm_object *obj; 3491 3492 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3493 ASSERT_VI_LOCKED(vp, "vinactive"); 3494 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3495 ("vinactive: recursed on VI_DOINGINACT")); 3496 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3497 vp->v_iflag |= VI_DOINGINACT; 3498 vp->v_iflag &= ~VI_OWEINACT; 3499 VI_UNLOCK(vp); 3500 /* 3501 * Before moving off the active list, we must be sure that any 3502 * modified pages are converted into the vnode's dirty 3503 * buffers, since these will no longer be checked once the 3504 * vnode is on the inactive list. 3505 * 3506 * The write-out of the dirty pages is asynchronous. At the 3507 * point that VOP_INACTIVE() is called, there could still be 3508 * pending I/O and dirty pages in the object. 3509 */ 3510 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3511 vm_object_mightbedirty(obj)) { 3512 VM_OBJECT_WLOCK(obj); 3513 vm_object_page_clean(obj, 0, 0, 0); 3514 VM_OBJECT_WUNLOCK(obj); 3515 } 3516 VOP_INACTIVE(vp, curthread); 3517 VI_LOCK(vp); 3518 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3519 ("vinactive: lost VI_DOINGINACT")); 3520 vp->v_iflag &= ~VI_DOINGINACT; 3521 } 3522 3523 void 3524 vinactive(struct vnode *vp) 3525 { 3526 3527 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3528 ASSERT_VI_LOCKED(vp, "vinactive"); 3529 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3530 3531 if ((vp->v_iflag & VI_OWEINACT) == 0) 3532 return; 3533 if (vp->v_iflag & VI_DOINGINACT) 3534 return; 3535 if (vp->v_usecount > 0) { 3536 vp->v_iflag &= ~VI_OWEINACT; 3537 return; 3538 } 3539 vinactivef(vp); 3540 } 3541 3542 /* 3543 * Remove any vnodes in the vnode table belonging to mount point mp. 3544 * 3545 * If FORCECLOSE is not specified, there should not be any active ones, 3546 * return error if any are found (nb: this is a user error, not a 3547 * system error). If FORCECLOSE is specified, detach any active vnodes 3548 * that are found. 3549 * 3550 * If WRITECLOSE is set, only flush out regular file vnodes open for 3551 * writing. 3552 * 3553 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3554 * 3555 * `rootrefs' specifies the base reference count for the root vnode 3556 * of this filesystem. The root vnode is considered busy if its 3557 * v_usecount exceeds this value. On a successful return, vflush(, td) 3558 * will call vrele() on the root vnode exactly rootrefs times. 3559 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3560 * be zero. 3561 */ 3562 #ifdef DIAGNOSTIC 3563 static int busyprt = 0; /* print out busy vnodes */ 3564 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3565 #endif 3566 3567 int 3568 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3569 { 3570 struct vnode *vp, *mvp, *rootvp = NULL; 3571 struct vattr vattr; 3572 int busy = 0, error; 3573 3574 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3575 rootrefs, flags); 3576 if (rootrefs > 0) { 3577 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3578 ("vflush: bad args")); 3579 /* 3580 * Get the filesystem root vnode. We can vput() it 3581 * immediately, since with rootrefs > 0, it won't go away. 3582 */ 3583 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3584 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3585 __func__, error); 3586 return (error); 3587 } 3588 vput(rootvp); 3589 } 3590 loop: 3591 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3592 vholdl(vp); 3593 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3594 if (error) { 3595 vdrop(vp); 3596 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3597 goto loop; 3598 } 3599 /* 3600 * Skip over a vnodes marked VV_SYSTEM. 3601 */ 3602 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3603 VOP_UNLOCK(vp); 3604 vdrop(vp); 3605 continue; 3606 } 3607 /* 3608 * If WRITECLOSE is set, flush out unlinked but still open 3609 * files (even if open only for reading) and regular file 3610 * vnodes open for writing. 3611 */ 3612 if (flags & WRITECLOSE) { 3613 if (vp->v_object != NULL) { 3614 VM_OBJECT_WLOCK(vp->v_object); 3615 vm_object_page_clean(vp->v_object, 0, 0, 0); 3616 VM_OBJECT_WUNLOCK(vp->v_object); 3617 } 3618 error = VOP_FSYNC(vp, MNT_WAIT, td); 3619 if (error != 0) { 3620 VOP_UNLOCK(vp); 3621 vdrop(vp); 3622 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3623 return (error); 3624 } 3625 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3626 VI_LOCK(vp); 3627 3628 if ((vp->v_type == VNON || 3629 (error == 0 && vattr.va_nlink > 0)) && 3630 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3631 VOP_UNLOCK(vp); 3632 vdropl(vp); 3633 continue; 3634 } 3635 } else 3636 VI_LOCK(vp); 3637 /* 3638 * With v_usecount == 0, all we need to do is clear out the 3639 * vnode data structures and we are done. 3640 * 3641 * If FORCECLOSE is set, forcibly close the vnode. 3642 */ 3643 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3644 vgonel(vp); 3645 } else { 3646 busy++; 3647 #ifdef DIAGNOSTIC 3648 if (busyprt) 3649 vn_printf(vp, "vflush: busy vnode "); 3650 #endif 3651 } 3652 VOP_UNLOCK(vp); 3653 vdropl(vp); 3654 } 3655 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3656 /* 3657 * If just the root vnode is busy, and if its refcount 3658 * is equal to `rootrefs', then go ahead and kill it. 3659 */ 3660 VI_LOCK(rootvp); 3661 KASSERT(busy > 0, ("vflush: not busy")); 3662 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3663 ("vflush: usecount %d < rootrefs %d", 3664 rootvp->v_usecount, rootrefs)); 3665 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3666 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3667 vgone(rootvp); 3668 VOP_UNLOCK(rootvp); 3669 busy = 0; 3670 } else 3671 VI_UNLOCK(rootvp); 3672 } 3673 if (busy) { 3674 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3675 busy); 3676 return (EBUSY); 3677 } 3678 for (; rootrefs > 0; rootrefs--) 3679 vrele(rootvp); 3680 return (0); 3681 } 3682 3683 /* 3684 * Recycle an unused vnode to the front of the free list. 3685 */ 3686 int 3687 vrecycle(struct vnode *vp) 3688 { 3689 int recycled; 3690 3691 VI_LOCK(vp); 3692 recycled = vrecyclel(vp); 3693 VI_UNLOCK(vp); 3694 return (recycled); 3695 } 3696 3697 /* 3698 * vrecycle, with the vp interlock held. 3699 */ 3700 int 3701 vrecyclel(struct vnode *vp) 3702 { 3703 int recycled; 3704 3705 ASSERT_VOP_ELOCKED(vp, __func__); 3706 ASSERT_VI_LOCKED(vp, __func__); 3707 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3708 recycled = 0; 3709 if (vp->v_usecount == 0) { 3710 recycled = 1; 3711 vgonel(vp); 3712 } 3713 return (recycled); 3714 } 3715 3716 /* 3717 * Eliminate all activity associated with a vnode 3718 * in preparation for reuse. 3719 */ 3720 void 3721 vgone(struct vnode *vp) 3722 { 3723 VI_LOCK(vp); 3724 vgonel(vp); 3725 VI_UNLOCK(vp); 3726 } 3727 3728 static void 3729 notify_lowervp_vfs_dummy(struct mount *mp __unused, 3730 struct vnode *lowervp __unused) 3731 { 3732 } 3733 3734 /* 3735 * Notify upper mounts about reclaimed or unlinked vnode. 3736 */ 3737 void 3738 vfs_notify_upper(struct vnode *vp, int event) 3739 { 3740 static struct vfsops vgonel_vfsops = { 3741 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 3742 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 3743 }; 3744 struct mount *mp, *ump, *mmp; 3745 3746 mp = vp->v_mount; 3747 if (mp == NULL) 3748 return; 3749 if (TAILQ_EMPTY(&mp->mnt_uppers)) 3750 return; 3751 3752 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 3753 mmp->mnt_op = &vgonel_vfsops; 3754 mmp->mnt_kern_flag |= MNTK_MARKER; 3755 MNT_ILOCK(mp); 3756 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 3757 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 3758 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 3759 ump = TAILQ_NEXT(ump, mnt_upper_link); 3760 continue; 3761 } 3762 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 3763 MNT_IUNLOCK(mp); 3764 switch (event) { 3765 case VFS_NOTIFY_UPPER_RECLAIM: 3766 VFS_RECLAIM_LOWERVP(ump, vp); 3767 break; 3768 case VFS_NOTIFY_UPPER_UNLINK: 3769 VFS_UNLINK_LOWERVP(ump, vp); 3770 break; 3771 default: 3772 KASSERT(0, ("invalid event %d", event)); 3773 break; 3774 } 3775 MNT_ILOCK(mp); 3776 ump = TAILQ_NEXT(mmp, mnt_upper_link); 3777 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 3778 } 3779 free(mmp, M_TEMP); 3780 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 3781 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 3782 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 3783 wakeup(&mp->mnt_uppers); 3784 } 3785 MNT_IUNLOCK(mp); 3786 } 3787 3788 /* 3789 * vgone, with the vp interlock held. 3790 */ 3791 static void 3792 vgonel(struct vnode *vp) 3793 { 3794 struct thread *td; 3795 struct mount *mp; 3796 vm_object_t object; 3797 bool active, oweinact; 3798 3799 ASSERT_VOP_ELOCKED(vp, "vgonel"); 3800 ASSERT_VI_LOCKED(vp, "vgonel"); 3801 VNASSERT(vp->v_holdcnt, vp, 3802 ("vgonel: vp %p has no reference.", vp)); 3803 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3804 td = curthread; 3805 3806 /* 3807 * Don't vgonel if we're already doomed. 3808 */ 3809 if (vp->v_irflag & VIRF_DOOMED) 3810 return; 3811 vp->v_irflag |= VIRF_DOOMED; 3812 3813 /* 3814 * Check to see if the vnode is in use. If so, we have to call 3815 * VOP_CLOSE() and VOP_INACTIVE(). 3816 */ 3817 active = vp->v_usecount > 0; 3818 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 3819 /* 3820 * If we need to do inactive VI_OWEINACT will be set. 3821 */ 3822 if (vp->v_iflag & VI_DEFINACT) { 3823 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3824 vp->v_iflag &= ~VI_DEFINACT; 3825 vdropl(vp); 3826 } else { 3827 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 3828 VI_UNLOCK(vp); 3829 } 3830 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 3831 3832 /* 3833 * If purging an active vnode, it must be closed and 3834 * deactivated before being reclaimed. 3835 */ 3836 if (active) 3837 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 3838 if (oweinact || active) { 3839 VI_LOCK(vp); 3840 vinactivef(vp); 3841 VI_UNLOCK(vp); 3842 } 3843 if (vp->v_type == VSOCK) 3844 vfs_unp_reclaim(vp); 3845 3846 /* 3847 * Clean out any buffers associated with the vnode. 3848 * If the flush fails, just toss the buffers. 3849 */ 3850 mp = NULL; 3851 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 3852 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 3853 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 3854 while (vinvalbuf(vp, 0, 0, 0) != 0) 3855 ; 3856 } 3857 3858 BO_LOCK(&vp->v_bufobj); 3859 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 3860 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 3861 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 3862 vp->v_bufobj.bo_clean.bv_cnt == 0, 3863 ("vp %p bufobj not invalidated", vp)); 3864 3865 /* 3866 * For VMIO bufobj, BO_DEAD is set later, or in 3867 * vm_object_terminate() after the object's page queue is 3868 * flushed. 3869 */ 3870 object = vp->v_bufobj.bo_object; 3871 if (object == NULL) 3872 vp->v_bufobj.bo_flag |= BO_DEAD; 3873 BO_UNLOCK(&vp->v_bufobj); 3874 3875 /* 3876 * Handle the VM part. Tmpfs handles v_object on its own (the 3877 * OBJT_VNODE check). Nullfs or other bypassing filesystems 3878 * should not touch the object borrowed from the lower vnode 3879 * (the handle check). 3880 */ 3881 if (object != NULL && object->type == OBJT_VNODE && 3882 object->handle == vp) 3883 vnode_destroy_vobject(vp); 3884 3885 /* 3886 * Reclaim the vnode. 3887 */ 3888 if (VOP_RECLAIM(vp, td)) 3889 panic("vgone: cannot reclaim"); 3890 if (mp != NULL) 3891 vn_finished_secondary_write(mp); 3892 VNASSERT(vp->v_object == NULL, vp, 3893 ("vop_reclaim left v_object vp=%p", vp)); 3894 /* 3895 * Clear the advisory locks and wake up waiting threads. 3896 */ 3897 (void)VOP_ADVLOCKPURGE(vp); 3898 vp->v_lockf = NULL; 3899 /* 3900 * Delete from old mount point vnode list. 3901 */ 3902 delmntque(vp); 3903 cache_purge(vp); 3904 /* 3905 * Done with purge, reset to the standard lock and invalidate 3906 * the vnode. 3907 */ 3908 VI_LOCK(vp); 3909 vp->v_vnlock = &vp->v_lock; 3910 vp->v_op = &dead_vnodeops; 3911 vp->v_type = VBAD; 3912 } 3913 3914 /* 3915 * Calculate the total number of references to a special device. 3916 */ 3917 int 3918 vcount(struct vnode *vp) 3919 { 3920 int count; 3921 3922 dev_lock(); 3923 count = vp->v_rdev->si_usecount; 3924 dev_unlock(); 3925 return (count); 3926 } 3927 3928 /* 3929 * Print out a description of a vnode. 3930 */ 3931 static char *typename[] = 3932 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 3933 "VMARKER"}; 3934 3935 void 3936 vn_printf(struct vnode *vp, const char *fmt, ...) 3937 { 3938 va_list ap; 3939 char buf[256], buf2[16]; 3940 u_long flags; 3941 3942 va_start(ap, fmt); 3943 vprintf(fmt, ap); 3944 va_end(ap); 3945 printf("%p: ", (void *)vp); 3946 printf("type %s\n", typename[vp->v_type]); 3947 printf(" usecount %d, writecount %d, refcount %d", 3948 vp->v_usecount, vp->v_writecount, vp->v_holdcnt); 3949 switch (vp->v_type) { 3950 case VDIR: 3951 printf(" mountedhere %p\n", vp->v_mountedhere); 3952 break; 3953 case VCHR: 3954 printf(" rdev %p\n", vp->v_rdev); 3955 break; 3956 case VSOCK: 3957 printf(" socket %p\n", vp->v_unpcb); 3958 break; 3959 case VFIFO: 3960 printf(" fifoinfo %p\n", vp->v_fifoinfo); 3961 break; 3962 default: 3963 printf("\n"); 3964 break; 3965 } 3966 buf[0] = '\0'; 3967 buf[1] = '\0'; 3968 if (vp->v_irflag & VIRF_DOOMED) 3969 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 3970 flags = vp->v_irflag & ~(VIRF_DOOMED); 3971 if (flags != 0) { 3972 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 3973 strlcat(buf, buf2, sizeof(buf)); 3974 } 3975 if (vp->v_vflag & VV_ROOT) 3976 strlcat(buf, "|VV_ROOT", sizeof(buf)); 3977 if (vp->v_vflag & VV_ISTTY) 3978 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 3979 if (vp->v_vflag & VV_NOSYNC) 3980 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 3981 if (vp->v_vflag & VV_ETERNALDEV) 3982 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 3983 if (vp->v_vflag & VV_CACHEDLABEL) 3984 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 3985 if (vp->v_vflag & VV_VMSIZEVNLOCK) 3986 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 3987 if (vp->v_vflag & VV_COPYONWRITE) 3988 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 3989 if (vp->v_vflag & VV_SYSTEM) 3990 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 3991 if (vp->v_vflag & VV_PROCDEP) 3992 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 3993 if (vp->v_vflag & VV_NOKNOTE) 3994 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 3995 if (vp->v_vflag & VV_DELETED) 3996 strlcat(buf, "|VV_DELETED", sizeof(buf)); 3997 if (vp->v_vflag & VV_MD) 3998 strlcat(buf, "|VV_MD", sizeof(buf)); 3999 if (vp->v_vflag & VV_FORCEINSMQ) 4000 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4001 if (vp->v_vflag & VV_READLINK) 4002 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4003 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4004 VV_CACHEDLABEL | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 4005 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); 4006 if (flags != 0) { 4007 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4008 strlcat(buf, buf2, sizeof(buf)); 4009 } 4010 if (vp->v_iflag & VI_TEXT_REF) 4011 strlcat(buf, "|VI_TEXT_REF", sizeof(buf)); 4012 if (vp->v_iflag & VI_MOUNT) 4013 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4014 if (vp->v_iflag & VI_DOINGINACT) 4015 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4016 if (vp->v_iflag & VI_OWEINACT) 4017 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4018 if (vp->v_iflag & VI_DEFINACT) 4019 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4020 flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_DOINGINACT | 4021 VI_OWEINACT | VI_DEFINACT); 4022 if (flags != 0) { 4023 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4024 strlcat(buf, buf2, sizeof(buf)); 4025 } 4026 if (vp->v_mflag & VMP_LAZYLIST) 4027 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4028 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4029 if (flags != 0) { 4030 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4031 strlcat(buf, buf2, sizeof(buf)); 4032 } 4033 printf(" flags (%s)\n", buf + 1); 4034 if (mtx_owned(VI_MTX(vp))) 4035 printf(" VI_LOCKed"); 4036 if (vp->v_object != NULL) 4037 printf(" v_object %p ref %d pages %d " 4038 "cleanbuf %d dirtybuf %d\n", 4039 vp->v_object, vp->v_object->ref_count, 4040 vp->v_object->resident_page_count, 4041 vp->v_bufobj.bo_clean.bv_cnt, 4042 vp->v_bufobj.bo_dirty.bv_cnt); 4043 printf(" "); 4044 lockmgr_printinfo(vp->v_vnlock); 4045 if (vp->v_data != NULL) 4046 VOP_PRINT(vp); 4047 } 4048 4049 #ifdef DDB 4050 /* 4051 * List all of the locked vnodes in the system. 4052 * Called when debugging the kernel. 4053 */ 4054 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 4055 { 4056 struct mount *mp; 4057 struct vnode *vp; 4058 4059 /* 4060 * Note: because this is DDB, we can't obey the locking semantics 4061 * for these structures, which means we could catch an inconsistent 4062 * state and dereference a nasty pointer. Not much to be done 4063 * about that. 4064 */ 4065 db_printf("Locked vnodes\n"); 4066 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4067 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4068 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4069 vn_printf(vp, "vnode "); 4070 } 4071 } 4072 } 4073 4074 /* 4075 * Show details about the given vnode. 4076 */ 4077 DB_SHOW_COMMAND(vnode, db_show_vnode) 4078 { 4079 struct vnode *vp; 4080 4081 if (!have_addr) 4082 return; 4083 vp = (struct vnode *)addr; 4084 vn_printf(vp, "vnode "); 4085 } 4086 4087 /* 4088 * Show details about the given mount point. 4089 */ 4090 DB_SHOW_COMMAND(mount, db_show_mount) 4091 { 4092 struct mount *mp; 4093 struct vfsopt *opt; 4094 struct statfs *sp; 4095 struct vnode *vp; 4096 char buf[512]; 4097 uint64_t mflags; 4098 u_int flags; 4099 4100 if (!have_addr) { 4101 /* No address given, print short info about all mount points. */ 4102 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4103 db_printf("%p %s on %s (%s)\n", mp, 4104 mp->mnt_stat.f_mntfromname, 4105 mp->mnt_stat.f_mntonname, 4106 mp->mnt_stat.f_fstypename); 4107 if (db_pager_quit) 4108 break; 4109 } 4110 db_printf("\nMore info: show mount <addr>\n"); 4111 return; 4112 } 4113 4114 mp = (struct mount *)addr; 4115 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4116 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4117 4118 buf[0] = '\0'; 4119 mflags = mp->mnt_flag; 4120 #define MNT_FLAG(flag) do { \ 4121 if (mflags & (flag)) { \ 4122 if (buf[0] != '\0') \ 4123 strlcat(buf, ", ", sizeof(buf)); \ 4124 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4125 mflags &= ~(flag); \ 4126 } \ 4127 } while (0) 4128 MNT_FLAG(MNT_RDONLY); 4129 MNT_FLAG(MNT_SYNCHRONOUS); 4130 MNT_FLAG(MNT_NOEXEC); 4131 MNT_FLAG(MNT_NOSUID); 4132 MNT_FLAG(MNT_NFS4ACLS); 4133 MNT_FLAG(MNT_UNION); 4134 MNT_FLAG(MNT_ASYNC); 4135 MNT_FLAG(MNT_SUIDDIR); 4136 MNT_FLAG(MNT_SOFTDEP); 4137 MNT_FLAG(MNT_NOSYMFOLLOW); 4138 MNT_FLAG(MNT_GJOURNAL); 4139 MNT_FLAG(MNT_MULTILABEL); 4140 MNT_FLAG(MNT_ACLS); 4141 MNT_FLAG(MNT_NOATIME); 4142 MNT_FLAG(MNT_NOCLUSTERR); 4143 MNT_FLAG(MNT_NOCLUSTERW); 4144 MNT_FLAG(MNT_SUJ); 4145 MNT_FLAG(MNT_EXRDONLY); 4146 MNT_FLAG(MNT_EXPORTED); 4147 MNT_FLAG(MNT_DEFEXPORTED); 4148 MNT_FLAG(MNT_EXPORTANON); 4149 MNT_FLAG(MNT_EXKERB); 4150 MNT_FLAG(MNT_EXPUBLIC); 4151 MNT_FLAG(MNT_LOCAL); 4152 MNT_FLAG(MNT_QUOTA); 4153 MNT_FLAG(MNT_ROOTFS); 4154 MNT_FLAG(MNT_USER); 4155 MNT_FLAG(MNT_IGNORE); 4156 MNT_FLAG(MNT_UPDATE); 4157 MNT_FLAG(MNT_DELEXPORT); 4158 MNT_FLAG(MNT_RELOAD); 4159 MNT_FLAG(MNT_FORCE); 4160 MNT_FLAG(MNT_SNAPSHOT); 4161 MNT_FLAG(MNT_BYFSID); 4162 #undef MNT_FLAG 4163 if (mflags != 0) { 4164 if (buf[0] != '\0') 4165 strlcat(buf, ", ", sizeof(buf)); 4166 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4167 "0x%016jx", mflags); 4168 } 4169 db_printf(" mnt_flag = %s\n", buf); 4170 4171 buf[0] = '\0'; 4172 flags = mp->mnt_kern_flag; 4173 #define MNT_KERN_FLAG(flag) do { \ 4174 if (flags & (flag)) { \ 4175 if (buf[0] != '\0') \ 4176 strlcat(buf, ", ", sizeof(buf)); \ 4177 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4178 flags &= ~(flag); \ 4179 } \ 4180 } while (0) 4181 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4182 MNT_KERN_FLAG(MNTK_ASYNC); 4183 MNT_KERN_FLAG(MNTK_SOFTDEP); 4184 MNT_KERN_FLAG(MNTK_DRAINING); 4185 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4186 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4187 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4188 MNT_KERN_FLAG(MNTK_NO_IOPF); 4189 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 4190 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 4191 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 4192 MNT_KERN_FLAG(MNTK_MARKER); 4193 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4194 MNT_KERN_FLAG(MNTK_NOASYNC); 4195 MNT_KERN_FLAG(MNTK_UNMOUNT); 4196 MNT_KERN_FLAG(MNTK_MWAIT); 4197 MNT_KERN_FLAG(MNTK_SUSPEND); 4198 MNT_KERN_FLAG(MNTK_SUSPEND2); 4199 MNT_KERN_FLAG(MNTK_SUSPENDED); 4200 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4201 MNT_KERN_FLAG(MNTK_NOKNOTE); 4202 #undef MNT_KERN_FLAG 4203 if (flags != 0) { 4204 if (buf[0] != '\0') 4205 strlcat(buf, ", ", sizeof(buf)); 4206 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4207 "0x%08x", flags); 4208 } 4209 db_printf(" mnt_kern_flag = %s\n", buf); 4210 4211 db_printf(" mnt_opt = "); 4212 opt = TAILQ_FIRST(mp->mnt_opt); 4213 if (opt != NULL) { 4214 db_printf("%s", opt->name); 4215 opt = TAILQ_NEXT(opt, link); 4216 while (opt != NULL) { 4217 db_printf(", %s", opt->name); 4218 opt = TAILQ_NEXT(opt, link); 4219 } 4220 } 4221 db_printf("\n"); 4222 4223 sp = &mp->mnt_stat; 4224 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4225 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4226 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4227 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4228 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4229 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4230 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4231 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4232 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4233 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4234 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4235 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4236 4237 db_printf(" mnt_cred = { uid=%u ruid=%u", 4238 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4239 if (jailed(mp->mnt_cred)) 4240 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4241 db_printf(" }\n"); 4242 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4243 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4244 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4245 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4246 db_printf(" mnt_lazyvnodelistsize = %d\n", 4247 mp->mnt_lazyvnodelistsize); 4248 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4249 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4250 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 4251 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4252 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4253 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4254 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4255 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4256 db_printf(" mnt_secondary_accwrites = %d\n", 4257 mp->mnt_secondary_accwrites); 4258 db_printf(" mnt_gjprovider = %s\n", 4259 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4260 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4261 4262 db_printf("\n\nList of active vnodes\n"); 4263 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4264 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4265 vn_printf(vp, "vnode "); 4266 if (db_pager_quit) 4267 break; 4268 } 4269 } 4270 db_printf("\n\nList of inactive vnodes\n"); 4271 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4272 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4273 vn_printf(vp, "vnode "); 4274 if (db_pager_quit) 4275 break; 4276 } 4277 } 4278 } 4279 #endif /* DDB */ 4280 4281 /* 4282 * Fill in a struct xvfsconf based on a struct vfsconf. 4283 */ 4284 static int 4285 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4286 { 4287 struct xvfsconf xvfsp; 4288 4289 bzero(&xvfsp, sizeof(xvfsp)); 4290 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4291 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4292 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4293 xvfsp.vfc_flags = vfsp->vfc_flags; 4294 /* 4295 * These are unused in userland, we keep them 4296 * to not break binary compatibility. 4297 */ 4298 xvfsp.vfc_vfsops = NULL; 4299 xvfsp.vfc_next = NULL; 4300 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4301 } 4302 4303 #ifdef COMPAT_FREEBSD32 4304 struct xvfsconf32 { 4305 uint32_t vfc_vfsops; 4306 char vfc_name[MFSNAMELEN]; 4307 int32_t vfc_typenum; 4308 int32_t vfc_refcount; 4309 int32_t vfc_flags; 4310 uint32_t vfc_next; 4311 }; 4312 4313 static int 4314 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4315 { 4316 struct xvfsconf32 xvfsp; 4317 4318 bzero(&xvfsp, sizeof(xvfsp)); 4319 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4320 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4321 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4322 xvfsp.vfc_flags = vfsp->vfc_flags; 4323 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4324 } 4325 #endif 4326 4327 /* 4328 * Top level filesystem related information gathering. 4329 */ 4330 static int 4331 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4332 { 4333 struct vfsconf *vfsp; 4334 int error; 4335 4336 error = 0; 4337 vfsconf_slock(); 4338 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4339 #ifdef COMPAT_FREEBSD32 4340 if (req->flags & SCTL_MASK32) 4341 error = vfsconf2x32(req, vfsp); 4342 else 4343 #endif 4344 error = vfsconf2x(req, vfsp); 4345 if (error) 4346 break; 4347 } 4348 vfsconf_sunlock(); 4349 return (error); 4350 } 4351 4352 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4353 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4354 "S,xvfsconf", "List of all configured filesystems"); 4355 4356 #ifndef BURN_BRIDGES 4357 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4358 4359 static int 4360 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4361 { 4362 int *name = (int *)arg1 - 1; /* XXX */ 4363 u_int namelen = arg2 + 1; /* XXX */ 4364 struct vfsconf *vfsp; 4365 4366 log(LOG_WARNING, "userland calling deprecated sysctl, " 4367 "please rebuild world\n"); 4368 4369 #if 1 || defined(COMPAT_PRELITE2) 4370 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4371 if (namelen == 1) 4372 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4373 #endif 4374 4375 switch (name[1]) { 4376 case VFS_MAXTYPENUM: 4377 if (namelen != 2) 4378 return (ENOTDIR); 4379 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4380 case VFS_CONF: 4381 if (namelen != 3) 4382 return (ENOTDIR); /* overloaded */ 4383 vfsconf_slock(); 4384 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4385 if (vfsp->vfc_typenum == name[2]) 4386 break; 4387 } 4388 vfsconf_sunlock(); 4389 if (vfsp == NULL) 4390 return (EOPNOTSUPP); 4391 #ifdef COMPAT_FREEBSD32 4392 if (req->flags & SCTL_MASK32) 4393 return (vfsconf2x32(req, vfsp)); 4394 else 4395 #endif 4396 return (vfsconf2x(req, vfsp)); 4397 } 4398 return (EOPNOTSUPP); 4399 } 4400 4401 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4402 CTLFLAG_MPSAFE, vfs_sysctl, 4403 "Generic filesystem"); 4404 4405 #if 1 || defined(COMPAT_PRELITE2) 4406 4407 static int 4408 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4409 { 4410 int error; 4411 struct vfsconf *vfsp; 4412 struct ovfsconf ovfs; 4413 4414 vfsconf_slock(); 4415 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4416 bzero(&ovfs, sizeof(ovfs)); 4417 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4418 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4419 ovfs.vfc_index = vfsp->vfc_typenum; 4420 ovfs.vfc_refcount = vfsp->vfc_refcount; 4421 ovfs.vfc_flags = vfsp->vfc_flags; 4422 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4423 if (error != 0) { 4424 vfsconf_sunlock(); 4425 return (error); 4426 } 4427 } 4428 vfsconf_sunlock(); 4429 return (0); 4430 } 4431 4432 #endif /* 1 || COMPAT_PRELITE2 */ 4433 #endif /* !BURN_BRIDGES */ 4434 4435 #define KINFO_VNODESLOP 10 4436 #ifdef notyet 4437 /* 4438 * Dump vnode list (via sysctl). 4439 */ 4440 /* ARGSUSED */ 4441 static int 4442 sysctl_vnode(SYSCTL_HANDLER_ARGS) 4443 { 4444 struct xvnode *xvn; 4445 struct mount *mp; 4446 struct vnode *vp; 4447 int error, len, n; 4448 4449 /* 4450 * Stale numvnodes access is not fatal here. 4451 */ 4452 req->lock = 0; 4453 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 4454 if (!req->oldptr) 4455 /* Make an estimate */ 4456 return (SYSCTL_OUT(req, 0, len)); 4457 4458 error = sysctl_wire_old_buffer(req, 0); 4459 if (error != 0) 4460 return (error); 4461 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 4462 n = 0; 4463 mtx_lock(&mountlist_mtx); 4464 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4465 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 4466 continue; 4467 MNT_ILOCK(mp); 4468 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4469 if (n == len) 4470 break; 4471 vref(vp); 4472 xvn[n].xv_size = sizeof *xvn; 4473 xvn[n].xv_vnode = vp; 4474 xvn[n].xv_id = 0; /* XXX compat */ 4475 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 4476 XV_COPY(usecount); 4477 XV_COPY(writecount); 4478 XV_COPY(holdcnt); 4479 XV_COPY(mount); 4480 XV_COPY(numoutput); 4481 XV_COPY(type); 4482 #undef XV_COPY 4483 xvn[n].xv_flag = vp->v_vflag; 4484 4485 switch (vp->v_type) { 4486 case VREG: 4487 case VDIR: 4488 case VLNK: 4489 break; 4490 case VBLK: 4491 case VCHR: 4492 if (vp->v_rdev == NULL) { 4493 vrele(vp); 4494 continue; 4495 } 4496 xvn[n].xv_dev = dev2udev(vp->v_rdev); 4497 break; 4498 case VSOCK: 4499 xvn[n].xv_socket = vp->v_socket; 4500 break; 4501 case VFIFO: 4502 xvn[n].xv_fifo = vp->v_fifoinfo; 4503 break; 4504 case VNON: 4505 case VBAD: 4506 default: 4507 /* shouldn't happen? */ 4508 vrele(vp); 4509 continue; 4510 } 4511 vrele(vp); 4512 ++n; 4513 } 4514 MNT_IUNLOCK(mp); 4515 mtx_lock(&mountlist_mtx); 4516 vfs_unbusy(mp); 4517 if (n == len) 4518 break; 4519 } 4520 mtx_unlock(&mountlist_mtx); 4521 4522 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 4523 free(xvn, M_TEMP); 4524 return (error); 4525 } 4526 4527 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 4528 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 4529 ""); 4530 #endif 4531 4532 static void 4533 unmount_or_warn(struct mount *mp) 4534 { 4535 int error; 4536 4537 error = dounmount(mp, MNT_FORCE, curthread); 4538 if (error != 0) { 4539 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4540 if (error == EBUSY) 4541 printf("BUSY)\n"); 4542 else 4543 printf("%d)\n", error); 4544 } 4545 } 4546 4547 /* 4548 * Unmount all filesystems. The list is traversed in reverse order 4549 * of mounting to avoid dependencies. 4550 */ 4551 void 4552 vfs_unmountall(void) 4553 { 4554 struct mount *mp, *tmp; 4555 4556 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4557 4558 /* 4559 * Since this only runs when rebooting, it is not interlocked. 4560 */ 4561 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4562 vfs_ref(mp); 4563 4564 /* 4565 * Forcibly unmounting "/dev" before "/" would prevent clean 4566 * unmount of the latter. 4567 */ 4568 if (mp == rootdevmp) 4569 continue; 4570 4571 unmount_or_warn(mp); 4572 } 4573 4574 if (rootdevmp != NULL) 4575 unmount_or_warn(rootdevmp); 4576 } 4577 4578 static void 4579 vfs_deferred_inactive(struct vnode *vp, int lkflags) 4580 { 4581 4582 ASSERT_VI_LOCKED(vp, __func__); 4583 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, ("VI_DEFINACT still set")); 4584 if ((vp->v_iflag & VI_OWEINACT) == 0) { 4585 vdropl(vp); 4586 return; 4587 } 4588 if (vn_lock(vp, lkflags) == 0) { 4589 VI_LOCK(vp); 4590 vinactive(vp); 4591 VOP_UNLOCK(vp); 4592 vdropl(vp); 4593 return; 4594 } 4595 vdefer_inactive_unlocked(vp); 4596 } 4597 4598 static int 4599 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 4600 { 4601 4602 return (vp->v_iflag & VI_DEFINACT); 4603 } 4604 4605 static void __noinline 4606 vfs_periodic_inactive(struct mount *mp, int flags) 4607 { 4608 struct vnode *vp, *mvp; 4609 int lkflags; 4610 4611 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4612 if (flags != MNT_WAIT) 4613 lkflags |= LK_NOWAIT; 4614 4615 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 4616 if ((vp->v_iflag & VI_DEFINACT) == 0) { 4617 VI_UNLOCK(vp); 4618 continue; 4619 } 4620 vp->v_iflag &= ~VI_DEFINACT; 4621 vfs_deferred_inactive(vp, lkflags); 4622 } 4623 } 4624 4625 static inline bool 4626 vfs_want_msync(struct vnode *vp) 4627 { 4628 struct vm_object *obj; 4629 4630 /* 4631 * This test may be performed without any locks held. 4632 * We rely on vm_object's type stability. 4633 */ 4634 if (vp->v_vflag & VV_NOSYNC) 4635 return (false); 4636 obj = vp->v_object; 4637 return (obj != NULL && vm_object_mightbedirty(obj)); 4638 } 4639 4640 static int 4641 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 4642 { 4643 4644 if (vp->v_vflag & VV_NOSYNC) 4645 return (false); 4646 if (vp->v_iflag & VI_DEFINACT) 4647 return (true); 4648 return (vfs_want_msync(vp)); 4649 } 4650 4651 static void __noinline 4652 vfs_periodic_msync_inactive(struct mount *mp, int flags) 4653 { 4654 struct vnode *vp, *mvp; 4655 struct vm_object *obj; 4656 struct thread *td; 4657 int lkflags, objflags; 4658 bool seen_defer; 4659 4660 td = curthread; 4661 4662 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4663 if (flags != MNT_WAIT) { 4664 lkflags |= LK_NOWAIT; 4665 objflags = OBJPC_NOSYNC; 4666 } else { 4667 objflags = OBJPC_SYNC; 4668 } 4669 4670 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 4671 seen_defer = false; 4672 if (vp->v_iflag & VI_DEFINACT) { 4673 vp->v_iflag &= ~VI_DEFINACT; 4674 seen_defer = true; 4675 } 4676 if (!vfs_want_msync(vp)) { 4677 if (seen_defer) 4678 vfs_deferred_inactive(vp, lkflags); 4679 else 4680 VI_UNLOCK(vp); 4681 continue; 4682 } 4683 if (vget(vp, lkflags, td) == 0) { 4684 obj = vp->v_object; 4685 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 4686 VM_OBJECT_WLOCK(obj); 4687 vm_object_page_clean(obj, 0, 0, objflags); 4688 VM_OBJECT_WUNLOCK(obj); 4689 } 4690 vput(vp); 4691 if (seen_defer) 4692 vdrop(vp); 4693 } else { 4694 if (seen_defer) 4695 vdefer_inactive_unlocked(vp); 4696 } 4697 } 4698 } 4699 4700 void 4701 vfs_periodic(struct mount *mp, int flags) 4702 { 4703 4704 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4705 4706 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 4707 vfs_periodic_inactive(mp, flags); 4708 else 4709 vfs_periodic_msync_inactive(mp, flags); 4710 } 4711 4712 static void 4713 destroy_vpollinfo_free(struct vpollinfo *vi) 4714 { 4715 4716 knlist_destroy(&vi->vpi_selinfo.si_note); 4717 mtx_destroy(&vi->vpi_lock); 4718 uma_zfree(vnodepoll_zone, vi); 4719 } 4720 4721 static void 4722 destroy_vpollinfo(struct vpollinfo *vi) 4723 { 4724 4725 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4726 seldrain(&vi->vpi_selinfo); 4727 destroy_vpollinfo_free(vi); 4728 } 4729 4730 /* 4731 * Initialize per-vnode helper structure to hold poll-related state. 4732 */ 4733 void 4734 v_addpollinfo(struct vnode *vp) 4735 { 4736 struct vpollinfo *vi; 4737 4738 if (vp->v_pollinfo != NULL) 4739 return; 4740 vi = uma_zalloc(vnodepoll_zone, M_WAITOK | M_ZERO); 4741 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4742 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4743 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 4744 VI_LOCK(vp); 4745 if (vp->v_pollinfo != NULL) { 4746 VI_UNLOCK(vp); 4747 destroy_vpollinfo_free(vi); 4748 return; 4749 } 4750 vp->v_pollinfo = vi; 4751 VI_UNLOCK(vp); 4752 } 4753 4754 /* 4755 * Record a process's interest in events which might happen to 4756 * a vnode. Because poll uses the historic select-style interface 4757 * internally, this routine serves as both the ``check for any 4758 * pending events'' and the ``record my interest in future events'' 4759 * functions. (These are done together, while the lock is held, 4760 * to avoid race conditions.) 4761 */ 4762 int 4763 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4764 { 4765 4766 v_addpollinfo(vp); 4767 mtx_lock(&vp->v_pollinfo->vpi_lock); 4768 if (vp->v_pollinfo->vpi_revents & events) { 4769 /* 4770 * This leaves events we are not interested 4771 * in available for the other process which 4772 * which presumably had requested them 4773 * (otherwise they would never have been 4774 * recorded). 4775 */ 4776 events &= vp->v_pollinfo->vpi_revents; 4777 vp->v_pollinfo->vpi_revents &= ~events; 4778 4779 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4780 return (events); 4781 } 4782 vp->v_pollinfo->vpi_events |= events; 4783 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 4784 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4785 return (0); 4786 } 4787 4788 /* 4789 * Routine to create and manage a filesystem syncer vnode. 4790 */ 4791 #define sync_close ((int (*)(struct vop_close_args *))nullop) 4792 static int sync_fsync(struct vop_fsync_args *); 4793 static int sync_inactive(struct vop_inactive_args *); 4794 static int sync_reclaim(struct vop_reclaim_args *); 4795 4796 static struct vop_vector sync_vnodeops = { 4797 .vop_bypass = VOP_EOPNOTSUPP, 4798 .vop_close = sync_close, /* close */ 4799 .vop_fsync = sync_fsync, /* fsync */ 4800 .vop_inactive = sync_inactive, /* inactive */ 4801 .vop_need_inactive = vop_stdneed_inactive, /* need_inactive */ 4802 .vop_reclaim = sync_reclaim, /* reclaim */ 4803 .vop_lock1 = vop_stdlock, /* lock */ 4804 .vop_unlock = vop_stdunlock, /* unlock */ 4805 .vop_islocked = vop_stdislocked, /* islocked */ 4806 }; 4807 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 4808 4809 /* 4810 * Create a new filesystem syncer vnode for the specified mount point. 4811 */ 4812 void 4813 vfs_allocate_syncvnode(struct mount *mp) 4814 { 4815 struct vnode *vp; 4816 struct bufobj *bo; 4817 static long start, incr, next; 4818 int error; 4819 4820 /* Allocate a new vnode */ 4821 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 4822 if (error != 0) 4823 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 4824 vp->v_type = VNON; 4825 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4826 vp->v_vflag |= VV_FORCEINSMQ; 4827 error = insmntque(vp, mp); 4828 if (error != 0) 4829 panic("vfs_allocate_syncvnode: insmntque() failed"); 4830 vp->v_vflag &= ~VV_FORCEINSMQ; 4831 VOP_UNLOCK(vp); 4832 /* 4833 * Place the vnode onto the syncer worklist. We attempt to 4834 * scatter them about on the list so that they will go off 4835 * at evenly distributed times even if all the filesystems 4836 * are mounted at once. 4837 */ 4838 next += incr; 4839 if (next == 0 || next > syncer_maxdelay) { 4840 start /= 2; 4841 incr /= 2; 4842 if (start == 0) { 4843 start = syncer_maxdelay / 2; 4844 incr = syncer_maxdelay; 4845 } 4846 next = start; 4847 } 4848 bo = &vp->v_bufobj; 4849 BO_LOCK(bo); 4850 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 4851 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 4852 mtx_lock(&sync_mtx); 4853 sync_vnode_count++; 4854 if (mp->mnt_syncer == NULL) { 4855 mp->mnt_syncer = vp; 4856 vp = NULL; 4857 } 4858 mtx_unlock(&sync_mtx); 4859 BO_UNLOCK(bo); 4860 if (vp != NULL) { 4861 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4862 vgone(vp); 4863 vput(vp); 4864 } 4865 } 4866 4867 void 4868 vfs_deallocate_syncvnode(struct mount *mp) 4869 { 4870 struct vnode *vp; 4871 4872 mtx_lock(&sync_mtx); 4873 vp = mp->mnt_syncer; 4874 if (vp != NULL) 4875 mp->mnt_syncer = NULL; 4876 mtx_unlock(&sync_mtx); 4877 if (vp != NULL) 4878 vrele(vp); 4879 } 4880 4881 /* 4882 * Do a lazy sync of the filesystem. 4883 */ 4884 static int 4885 sync_fsync(struct vop_fsync_args *ap) 4886 { 4887 struct vnode *syncvp = ap->a_vp; 4888 struct mount *mp = syncvp->v_mount; 4889 int error, save; 4890 struct bufobj *bo; 4891 4892 /* 4893 * We only need to do something if this is a lazy evaluation. 4894 */ 4895 if (ap->a_waitfor != MNT_LAZY) 4896 return (0); 4897 4898 /* 4899 * Move ourselves to the back of the sync list. 4900 */ 4901 bo = &syncvp->v_bufobj; 4902 BO_LOCK(bo); 4903 vn_syncer_add_to_worklist(bo, syncdelay); 4904 BO_UNLOCK(bo); 4905 4906 /* 4907 * Walk the list of vnodes pushing all that are dirty and 4908 * not already on the sync list. 4909 */ 4910 if (vfs_busy(mp, MBF_NOWAIT) != 0) 4911 return (0); 4912 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 4913 vfs_unbusy(mp); 4914 return (0); 4915 } 4916 save = curthread_pflags_set(TDP_SYNCIO); 4917 /* 4918 * The filesystem at hand may be idle with free vnodes stored in the 4919 * batch. Return them instead of letting them stay there indefinitely. 4920 */ 4921 vfs_periodic(mp, MNT_NOWAIT); 4922 error = VFS_SYNC(mp, MNT_LAZY); 4923 curthread_pflags_restore(save); 4924 vn_finished_write(mp); 4925 vfs_unbusy(mp); 4926 return (error); 4927 } 4928 4929 /* 4930 * The syncer vnode is no referenced. 4931 */ 4932 static int 4933 sync_inactive(struct vop_inactive_args *ap) 4934 { 4935 4936 vgone(ap->a_vp); 4937 return (0); 4938 } 4939 4940 /* 4941 * The syncer vnode is no longer needed and is being decommissioned. 4942 * 4943 * Modifications to the worklist must be protected by sync_mtx. 4944 */ 4945 static int 4946 sync_reclaim(struct vop_reclaim_args *ap) 4947 { 4948 struct vnode *vp = ap->a_vp; 4949 struct bufobj *bo; 4950 4951 bo = &vp->v_bufobj; 4952 BO_LOCK(bo); 4953 mtx_lock(&sync_mtx); 4954 if (vp->v_mount->mnt_syncer == vp) 4955 vp->v_mount->mnt_syncer = NULL; 4956 if (bo->bo_flag & BO_ONWORKLST) { 4957 LIST_REMOVE(bo, bo_synclist); 4958 syncer_worklist_len--; 4959 sync_vnode_count--; 4960 bo->bo_flag &= ~BO_ONWORKLST; 4961 } 4962 mtx_unlock(&sync_mtx); 4963 BO_UNLOCK(bo); 4964 4965 return (0); 4966 } 4967 4968 int 4969 vn_need_pageq_flush(struct vnode *vp) 4970 { 4971 struct vm_object *obj; 4972 int need; 4973 4974 MPASS(mtx_owned(VI_MTX(vp))); 4975 need = 0; 4976 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 4977 vm_object_mightbedirty(obj)) 4978 need = 1; 4979 return (need); 4980 } 4981 4982 /* 4983 * Check if vnode represents a disk device 4984 */ 4985 int 4986 vn_isdisk(struct vnode *vp, int *errp) 4987 { 4988 int error; 4989 4990 if (vp->v_type != VCHR) { 4991 error = ENOTBLK; 4992 goto out; 4993 } 4994 error = 0; 4995 dev_lock(); 4996 if (vp->v_rdev == NULL) 4997 error = ENXIO; 4998 else if (vp->v_rdev->si_devsw == NULL) 4999 error = ENXIO; 5000 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5001 error = ENOTBLK; 5002 dev_unlock(); 5003 out: 5004 if (errp != NULL) 5005 *errp = error; 5006 return (error == 0); 5007 } 5008 5009 /* 5010 * Common filesystem object access control check routine. Accepts a 5011 * vnode's type, "mode", uid and gid, requested access mode, credentials, 5012 * and optional call-by-reference privused argument allowing vaccess() 5013 * to indicate to the caller whether privilege was used to satisfy the 5014 * request (obsoleted). Returns 0 on success, or an errno on failure. 5015 */ 5016 int 5017 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5018 accmode_t accmode, struct ucred *cred, int *privused) 5019 { 5020 accmode_t dac_granted; 5021 accmode_t priv_granted; 5022 5023 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5024 ("invalid bit in accmode")); 5025 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5026 ("VAPPEND without VWRITE")); 5027 5028 /* 5029 * Look for a normal, non-privileged way to access the file/directory 5030 * as requested. If it exists, go with that. 5031 */ 5032 5033 if (privused != NULL) 5034 *privused = 0; 5035 5036 dac_granted = 0; 5037 5038 /* Check the owner. */ 5039 if (cred->cr_uid == file_uid) { 5040 dac_granted |= VADMIN; 5041 if (file_mode & S_IXUSR) 5042 dac_granted |= VEXEC; 5043 if (file_mode & S_IRUSR) 5044 dac_granted |= VREAD; 5045 if (file_mode & S_IWUSR) 5046 dac_granted |= (VWRITE | VAPPEND); 5047 5048 if ((accmode & dac_granted) == accmode) 5049 return (0); 5050 5051 goto privcheck; 5052 } 5053 5054 /* Otherwise, check the groups (first match) */ 5055 if (groupmember(file_gid, cred)) { 5056 if (file_mode & S_IXGRP) 5057 dac_granted |= VEXEC; 5058 if (file_mode & S_IRGRP) 5059 dac_granted |= VREAD; 5060 if (file_mode & S_IWGRP) 5061 dac_granted |= (VWRITE | VAPPEND); 5062 5063 if ((accmode & dac_granted) == accmode) 5064 return (0); 5065 5066 goto privcheck; 5067 } 5068 5069 /* Otherwise, check everyone else. */ 5070 if (file_mode & S_IXOTH) 5071 dac_granted |= VEXEC; 5072 if (file_mode & S_IROTH) 5073 dac_granted |= VREAD; 5074 if (file_mode & S_IWOTH) 5075 dac_granted |= (VWRITE | VAPPEND); 5076 if ((accmode & dac_granted) == accmode) 5077 return (0); 5078 5079 privcheck: 5080 /* 5081 * Build a privilege mask to determine if the set of privileges 5082 * satisfies the requirements when combined with the granted mask 5083 * from above. For each privilege, if the privilege is required, 5084 * bitwise or the request type onto the priv_granted mask. 5085 */ 5086 priv_granted = 0; 5087 5088 if (type == VDIR) { 5089 /* 5090 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5091 * requests, instead of PRIV_VFS_EXEC. 5092 */ 5093 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5094 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5095 priv_granted |= VEXEC; 5096 } else { 5097 /* 5098 * Ensure that at least one execute bit is on. Otherwise, 5099 * a privileged user will always succeed, and we don't want 5100 * this to happen unless the file really is executable. 5101 */ 5102 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5103 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5104 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5105 priv_granted |= VEXEC; 5106 } 5107 5108 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5109 !priv_check_cred(cred, PRIV_VFS_READ)) 5110 priv_granted |= VREAD; 5111 5112 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5113 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5114 priv_granted |= (VWRITE | VAPPEND); 5115 5116 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5117 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5118 priv_granted |= VADMIN; 5119 5120 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5121 /* XXX audit: privilege used */ 5122 if (privused != NULL) 5123 *privused = 1; 5124 return (0); 5125 } 5126 5127 return ((accmode & VADMIN) ? EPERM : EACCES); 5128 } 5129 5130 /* 5131 * Credential check based on process requesting service, and per-attribute 5132 * permissions. 5133 */ 5134 int 5135 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5136 struct thread *td, accmode_t accmode) 5137 { 5138 5139 /* 5140 * Kernel-invoked always succeeds. 5141 */ 5142 if (cred == NOCRED) 5143 return (0); 5144 5145 /* 5146 * Do not allow privileged processes in jail to directly manipulate 5147 * system attributes. 5148 */ 5149 switch (attrnamespace) { 5150 case EXTATTR_NAMESPACE_SYSTEM: 5151 /* Potentially should be: return (EPERM); */ 5152 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5153 case EXTATTR_NAMESPACE_USER: 5154 return (VOP_ACCESS(vp, accmode, cred, td)); 5155 default: 5156 return (EPERM); 5157 } 5158 } 5159 5160 #ifdef DEBUG_VFS_LOCKS 5161 /* 5162 * This only exists to suppress warnings from unlocked specfs accesses. It is 5163 * no longer ok to have an unlocked VFS. 5164 */ 5165 #define IGNORE_LOCK(vp) (KERNEL_PANICKED() || (vp) == NULL || \ 5166 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 5167 5168 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5169 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5170 "Drop into debugger on lock violation"); 5171 5172 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5173 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5174 0, "Check for interlock across VOPs"); 5175 5176 int vfs_badlock_print = 1; /* Print lock violations. */ 5177 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5178 0, "Print lock violations"); 5179 5180 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5181 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5182 0, "Print vnode details on lock violations"); 5183 5184 #ifdef KDB 5185 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5186 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5187 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5188 #endif 5189 5190 static void 5191 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5192 { 5193 5194 #ifdef KDB 5195 if (vfs_badlock_backtrace) 5196 kdb_backtrace(); 5197 #endif 5198 if (vfs_badlock_vnode) 5199 vn_printf(vp, "vnode "); 5200 if (vfs_badlock_print) 5201 printf("%s: %p %s\n", str, (void *)vp, msg); 5202 if (vfs_badlock_ddb) 5203 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5204 } 5205 5206 void 5207 assert_vi_locked(struct vnode *vp, const char *str) 5208 { 5209 5210 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5211 vfs_badlock("interlock is not locked but should be", str, vp); 5212 } 5213 5214 void 5215 assert_vi_unlocked(struct vnode *vp, const char *str) 5216 { 5217 5218 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5219 vfs_badlock("interlock is locked but should not be", str, vp); 5220 } 5221 5222 void 5223 assert_vop_locked(struct vnode *vp, const char *str) 5224 { 5225 int locked; 5226 5227 if (!IGNORE_LOCK(vp)) { 5228 locked = VOP_ISLOCKED(vp); 5229 if (locked == 0 || locked == LK_EXCLOTHER) 5230 vfs_badlock("is not locked but should be", str, vp); 5231 } 5232 } 5233 5234 void 5235 assert_vop_unlocked(struct vnode *vp, const char *str) 5236 { 5237 5238 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5239 vfs_badlock("is locked but should not be", str, vp); 5240 } 5241 5242 void 5243 assert_vop_elocked(struct vnode *vp, const char *str) 5244 { 5245 5246 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5247 vfs_badlock("is not exclusive locked but should be", str, vp); 5248 } 5249 #endif /* DEBUG_VFS_LOCKS */ 5250 5251 void 5252 vop_rename_fail(struct vop_rename_args *ap) 5253 { 5254 5255 if (ap->a_tvp != NULL) 5256 vput(ap->a_tvp); 5257 if (ap->a_tdvp == ap->a_tvp) 5258 vrele(ap->a_tdvp); 5259 else 5260 vput(ap->a_tdvp); 5261 vrele(ap->a_fdvp); 5262 vrele(ap->a_fvp); 5263 } 5264 5265 void 5266 vop_rename_pre(void *ap) 5267 { 5268 struct vop_rename_args *a = ap; 5269 5270 #ifdef DEBUG_VFS_LOCKS 5271 if (a->a_tvp) 5272 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5273 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5274 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5275 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5276 5277 /* Check the source (from). */ 5278 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5279 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5280 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5281 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5282 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5283 5284 /* Check the target. */ 5285 if (a->a_tvp) 5286 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5287 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5288 #endif 5289 if (a->a_tdvp != a->a_fdvp) 5290 vhold(a->a_fdvp); 5291 if (a->a_tvp != a->a_fvp) 5292 vhold(a->a_fvp); 5293 vhold(a->a_tdvp); 5294 if (a->a_tvp) 5295 vhold(a->a_tvp); 5296 } 5297 5298 #ifdef DEBUG_VFS_LOCKS 5299 void 5300 vop_strategy_pre(void *ap) 5301 { 5302 struct vop_strategy_args *a; 5303 struct buf *bp; 5304 5305 a = ap; 5306 bp = a->a_bp; 5307 5308 /* 5309 * Cluster ops lock their component buffers but not the IO container. 5310 */ 5311 if ((bp->b_flags & B_CLUSTER) != 0) 5312 return; 5313 5314 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5315 if (vfs_badlock_print) 5316 printf( 5317 "VOP_STRATEGY: bp is not locked but should be\n"); 5318 if (vfs_badlock_ddb) 5319 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5320 } 5321 } 5322 5323 void 5324 vop_lock_pre(void *ap) 5325 { 5326 struct vop_lock1_args *a = ap; 5327 5328 if ((a->a_flags & LK_INTERLOCK) == 0) 5329 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5330 else 5331 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5332 } 5333 5334 void 5335 vop_lock_post(void *ap, int rc) 5336 { 5337 struct vop_lock1_args *a = ap; 5338 5339 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5340 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5341 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5342 } 5343 5344 void 5345 vop_unlock_pre(void *ap) 5346 { 5347 struct vop_unlock_args *a = ap; 5348 5349 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 5350 } 5351 5352 void 5353 vop_unlock_post(void *ap, int rc) 5354 { 5355 return; 5356 } 5357 5358 void 5359 vop_need_inactive_pre(void *ap) 5360 { 5361 struct vop_need_inactive_args *a = ap; 5362 5363 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5364 } 5365 5366 void 5367 vop_need_inactive_post(void *ap, int rc) 5368 { 5369 struct vop_need_inactive_args *a = ap; 5370 5371 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5372 } 5373 #endif 5374 5375 void 5376 vop_create_post(void *ap, int rc) 5377 { 5378 struct vop_create_args *a = ap; 5379 5380 if (!rc) 5381 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5382 } 5383 5384 void 5385 vop_deleteextattr_post(void *ap, int rc) 5386 { 5387 struct vop_deleteextattr_args *a = ap; 5388 5389 if (!rc) 5390 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5391 } 5392 5393 void 5394 vop_link_post(void *ap, int rc) 5395 { 5396 struct vop_link_args *a = ap; 5397 5398 if (!rc) { 5399 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); 5400 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE); 5401 } 5402 } 5403 5404 void 5405 vop_mkdir_post(void *ap, int rc) 5406 { 5407 struct vop_mkdir_args *a = ap; 5408 5409 if (!rc) 5410 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 5411 } 5412 5413 void 5414 vop_mknod_post(void *ap, int rc) 5415 { 5416 struct vop_mknod_args *a = ap; 5417 5418 if (!rc) 5419 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5420 } 5421 5422 void 5423 vop_reclaim_post(void *ap, int rc) 5424 { 5425 struct vop_reclaim_args *a = ap; 5426 5427 if (!rc) 5428 VFS_KNOTE_LOCKED(a->a_vp, NOTE_REVOKE); 5429 } 5430 5431 void 5432 vop_remove_post(void *ap, int rc) 5433 { 5434 struct vop_remove_args *a = ap; 5435 5436 if (!rc) { 5437 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5438 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 5439 } 5440 } 5441 5442 void 5443 vop_rename_post(void *ap, int rc) 5444 { 5445 struct vop_rename_args *a = ap; 5446 long hint; 5447 5448 if (!rc) { 5449 hint = NOTE_WRITE; 5450 if (a->a_fdvp == a->a_tdvp) { 5451 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 5452 hint |= NOTE_LINK; 5453 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5454 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5455 } else { 5456 hint |= NOTE_EXTEND; 5457 if (a->a_fvp->v_type == VDIR) 5458 hint |= NOTE_LINK; 5459 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5460 5461 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 5462 a->a_tvp->v_type == VDIR) 5463 hint &= ~NOTE_LINK; 5464 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5465 } 5466 5467 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 5468 if (a->a_tvp) 5469 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 5470 } 5471 if (a->a_tdvp != a->a_fdvp) 5472 vdrop(a->a_fdvp); 5473 if (a->a_tvp != a->a_fvp) 5474 vdrop(a->a_fvp); 5475 vdrop(a->a_tdvp); 5476 if (a->a_tvp) 5477 vdrop(a->a_tvp); 5478 } 5479 5480 void 5481 vop_rmdir_post(void *ap, int rc) 5482 { 5483 struct vop_rmdir_args *a = ap; 5484 5485 if (!rc) { 5486 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 5487 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 5488 } 5489 } 5490 5491 void 5492 vop_setattr_post(void *ap, int rc) 5493 { 5494 struct vop_setattr_args *a = ap; 5495 5496 if (!rc) 5497 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5498 } 5499 5500 void 5501 vop_setextattr_post(void *ap, int rc) 5502 { 5503 struct vop_setextattr_args *a = ap; 5504 5505 if (!rc) 5506 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5507 } 5508 5509 void 5510 vop_symlink_post(void *ap, int rc) 5511 { 5512 struct vop_symlink_args *a = ap; 5513 5514 if (!rc) 5515 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5516 } 5517 5518 void 5519 vop_open_post(void *ap, int rc) 5520 { 5521 struct vop_open_args *a = ap; 5522 5523 if (!rc) 5524 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 5525 } 5526 5527 void 5528 vop_close_post(void *ap, int rc) 5529 { 5530 struct vop_close_args *a = ap; 5531 5532 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 5533 !VN_IS_DOOMED(a->a_vp))) { 5534 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 5535 NOTE_CLOSE_WRITE : NOTE_CLOSE); 5536 } 5537 } 5538 5539 void 5540 vop_read_post(void *ap, int rc) 5541 { 5542 struct vop_read_args *a = ap; 5543 5544 if (!rc) 5545 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5546 } 5547 5548 void 5549 vop_readdir_post(void *ap, int rc) 5550 { 5551 struct vop_readdir_args *a = ap; 5552 5553 if (!rc) 5554 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5555 } 5556 5557 static struct knlist fs_knlist; 5558 5559 static void 5560 vfs_event_init(void *arg) 5561 { 5562 knlist_init_mtx(&fs_knlist, NULL); 5563 } 5564 /* XXX - correct order? */ 5565 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 5566 5567 void 5568 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 5569 { 5570 5571 KNOTE_UNLOCKED(&fs_knlist, event); 5572 } 5573 5574 static int filt_fsattach(struct knote *kn); 5575 static void filt_fsdetach(struct knote *kn); 5576 static int filt_fsevent(struct knote *kn, long hint); 5577 5578 struct filterops fs_filtops = { 5579 .f_isfd = 0, 5580 .f_attach = filt_fsattach, 5581 .f_detach = filt_fsdetach, 5582 .f_event = filt_fsevent 5583 }; 5584 5585 static int 5586 filt_fsattach(struct knote *kn) 5587 { 5588 5589 kn->kn_flags |= EV_CLEAR; 5590 knlist_add(&fs_knlist, kn, 0); 5591 return (0); 5592 } 5593 5594 static void 5595 filt_fsdetach(struct knote *kn) 5596 { 5597 5598 knlist_remove(&fs_knlist, kn, 0); 5599 } 5600 5601 static int 5602 filt_fsevent(struct knote *kn, long hint) 5603 { 5604 5605 kn->kn_fflags |= hint; 5606 return (kn->kn_fflags != 0); 5607 } 5608 5609 static int 5610 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 5611 { 5612 struct vfsidctl vc; 5613 int error; 5614 struct mount *mp; 5615 5616 error = SYSCTL_IN(req, &vc, sizeof(vc)); 5617 if (error) 5618 return (error); 5619 if (vc.vc_vers != VFS_CTL_VERS1) 5620 return (EINVAL); 5621 mp = vfs_getvfs(&vc.vc_fsid); 5622 if (mp == NULL) 5623 return (ENOENT); 5624 /* ensure that a specific sysctl goes to the right filesystem. */ 5625 if (strcmp(vc.vc_fstypename, "*") != 0 && 5626 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 5627 vfs_rel(mp); 5628 return (EINVAL); 5629 } 5630 VCTLTOREQ(&vc, req); 5631 error = VFS_SYSCTL(mp, vc.vc_op, req); 5632 vfs_rel(mp); 5633 return (error); 5634 } 5635 5636 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 5637 NULL, 0, sysctl_vfs_ctl, "", 5638 "Sysctl by fsid"); 5639 5640 /* 5641 * Function to initialize a va_filerev field sensibly. 5642 * XXX: Wouldn't a random number make a lot more sense ?? 5643 */ 5644 u_quad_t 5645 init_va_filerev(void) 5646 { 5647 struct bintime bt; 5648 5649 getbinuptime(&bt); 5650 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 5651 } 5652 5653 static int filt_vfsread(struct knote *kn, long hint); 5654 static int filt_vfswrite(struct knote *kn, long hint); 5655 static int filt_vfsvnode(struct knote *kn, long hint); 5656 static void filt_vfsdetach(struct knote *kn); 5657 static struct filterops vfsread_filtops = { 5658 .f_isfd = 1, 5659 .f_detach = filt_vfsdetach, 5660 .f_event = filt_vfsread 5661 }; 5662 static struct filterops vfswrite_filtops = { 5663 .f_isfd = 1, 5664 .f_detach = filt_vfsdetach, 5665 .f_event = filt_vfswrite 5666 }; 5667 static struct filterops vfsvnode_filtops = { 5668 .f_isfd = 1, 5669 .f_detach = filt_vfsdetach, 5670 .f_event = filt_vfsvnode 5671 }; 5672 5673 static void 5674 vfs_knllock(void *arg) 5675 { 5676 struct vnode *vp = arg; 5677 5678 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5679 } 5680 5681 static void 5682 vfs_knlunlock(void *arg) 5683 { 5684 struct vnode *vp = arg; 5685 5686 VOP_UNLOCK(vp); 5687 } 5688 5689 static void 5690 vfs_knl_assert_locked(void *arg) 5691 { 5692 #ifdef DEBUG_VFS_LOCKS 5693 struct vnode *vp = arg; 5694 5695 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 5696 #endif 5697 } 5698 5699 static void 5700 vfs_knl_assert_unlocked(void *arg) 5701 { 5702 #ifdef DEBUG_VFS_LOCKS 5703 struct vnode *vp = arg; 5704 5705 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 5706 #endif 5707 } 5708 5709 int 5710 vfs_kqfilter(struct vop_kqfilter_args *ap) 5711 { 5712 struct vnode *vp = ap->a_vp; 5713 struct knote *kn = ap->a_kn; 5714 struct knlist *knl; 5715 5716 switch (kn->kn_filter) { 5717 case EVFILT_READ: 5718 kn->kn_fop = &vfsread_filtops; 5719 break; 5720 case EVFILT_WRITE: 5721 kn->kn_fop = &vfswrite_filtops; 5722 break; 5723 case EVFILT_VNODE: 5724 kn->kn_fop = &vfsvnode_filtops; 5725 break; 5726 default: 5727 return (EINVAL); 5728 } 5729 5730 kn->kn_hook = (caddr_t)vp; 5731 5732 v_addpollinfo(vp); 5733 if (vp->v_pollinfo == NULL) 5734 return (ENOMEM); 5735 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 5736 vhold(vp); 5737 knlist_add(knl, kn, 0); 5738 5739 return (0); 5740 } 5741 5742 /* 5743 * Detach knote from vnode 5744 */ 5745 static void 5746 filt_vfsdetach(struct knote *kn) 5747 { 5748 struct vnode *vp = (struct vnode *)kn->kn_hook; 5749 5750 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 5751 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 5752 vdrop(vp); 5753 } 5754 5755 /*ARGSUSED*/ 5756 static int 5757 filt_vfsread(struct knote *kn, long hint) 5758 { 5759 struct vnode *vp = (struct vnode *)kn->kn_hook; 5760 struct vattr va; 5761 int res; 5762 5763 /* 5764 * filesystem is gone, so set the EOF flag and schedule 5765 * the knote for deletion. 5766 */ 5767 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5768 VI_LOCK(vp); 5769 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5770 VI_UNLOCK(vp); 5771 return (1); 5772 } 5773 5774 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 5775 return (0); 5776 5777 VI_LOCK(vp); 5778 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 5779 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 5780 VI_UNLOCK(vp); 5781 return (res); 5782 } 5783 5784 /*ARGSUSED*/ 5785 static int 5786 filt_vfswrite(struct knote *kn, long hint) 5787 { 5788 struct vnode *vp = (struct vnode *)kn->kn_hook; 5789 5790 VI_LOCK(vp); 5791 5792 /* 5793 * filesystem is gone, so set the EOF flag and schedule 5794 * the knote for deletion. 5795 */ 5796 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 5797 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5798 5799 kn->kn_data = 0; 5800 VI_UNLOCK(vp); 5801 return (1); 5802 } 5803 5804 static int 5805 filt_vfsvnode(struct knote *kn, long hint) 5806 { 5807 struct vnode *vp = (struct vnode *)kn->kn_hook; 5808 int res; 5809 5810 VI_LOCK(vp); 5811 if (kn->kn_sfflags & hint) 5812 kn->kn_fflags |= hint; 5813 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5814 kn->kn_flags |= EV_EOF; 5815 VI_UNLOCK(vp); 5816 return (1); 5817 } 5818 res = (kn->kn_fflags != 0); 5819 VI_UNLOCK(vp); 5820 return (res); 5821 } 5822 5823 /* 5824 * Returns whether the directory is empty or not. 5825 * If it is empty, the return value is 0; otherwise 5826 * the return value is an error value (which may 5827 * be ENOTEMPTY). 5828 */ 5829 int 5830 vfs_emptydir(struct vnode *vp) 5831 { 5832 struct uio uio; 5833 struct iovec iov; 5834 struct dirent *dirent, *dp, *endp; 5835 int error, eof; 5836 5837 error = 0; 5838 eof = 0; 5839 5840 ASSERT_VOP_LOCKED(vp, "vfs_emptydir"); 5841 5842 dirent = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK); 5843 iov.iov_base = dirent; 5844 iov.iov_len = sizeof(struct dirent); 5845 5846 uio.uio_iov = &iov; 5847 uio.uio_iovcnt = 1; 5848 uio.uio_offset = 0; 5849 uio.uio_resid = sizeof(struct dirent); 5850 uio.uio_segflg = UIO_SYSSPACE; 5851 uio.uio_rw = UIO_READ; 5852 uio.uio_td = curthread; 5853 5854 while (eof == 0 && error == 0) { 5855 error = VOP_READDIR(vp, &uio, curthread->td_ucred, &eof, 5856 NULL, NULL); 5857 if (error != 0) 5858 break; 5859 endp = (void *)((uint8_t *)dirent + 5860 sizeof(struct dirent) - uio.uio_resid); 5861 for (dp = dirent; dp < endp; 5862 dp = (void *)((uint8_t *)dp + GENERIC_DIRSIZ(dp))) { 5863 if (dp->d_type == DT_WHT) 5864 continue; 5865 if (dp->d_namlen == 0) 5866 continue; 5867 if (dp->d_type != DT_DIR && 5868 dp->d_type != DT_UNKNOWN) { 5869 error = ENOTEMPTY; 5870 break; 5871 } 5872 if (dp->d_namlen > 2) { 5873 error = ENOTEMPTY; 5874 break; 5875 } 5876 if (dp->d_namlen == 1 && 5877 dp->d_name[0] != '.') { 5878 error = ENOTEMPTY; 5879 break; 5880 } 5881 if (dp->d_namlen == 2 && 5882 dp->d_name[1] != '.') { 5883 error = ENOTEMPTY; 5884 break; 5885 } 5886 uio.uio_resid = sizeof(struct dirent); 5887 } 5888 } 5889 free(dirent, M_TEMP); 5890 return (error); 5891 } 5892 5893 int 5894 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 5895 { 5896 int error; 5897 5898 if (dp->d_reclen > ap->a_uio->uio_resid) 5899 return (ENAMETOOLONG); 5900 error = uiomove(dp, dp->d_reclen, ap->a_uio); 5901 if (error) { 5902 if (ap->a_ncookies != NULL) { 5903 if (ap->a_cookies != NULL) 5904 free(ap->a_cookies, M_TEMP); 5905 ap->a_cookies = NULL; 5906 *ap->a_ncookies = 0; 5907 } 5908 return (error); 5909 } 5910 if (ap->a_ncookies == NULL) 5911 return (0); 5912 5913 KASSERT(ap->a_cookies, 5914 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 5915 5916 *ap->a_cookies = realloc(*ap->a_cookies, 5917 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 5918 (*ap->a_cookies)[*ap->a_ncookies] = off; 5919 *ap->a_ncookies += 1; 5920 return (0); 5921 } 5922 5923 /* 5924 * Mark for update the access time of the file if the filesystem 5925 * supports VOP_MARKATIME. This functionality is used by execve and 5926 * mmap, so we want to avoid the I/O implied by directly setting 5927 * va_atime for the sake of efficiency. 5928 */ 5929 void 5930 vfs_mark_atime(struct vnode *vp, struct ucred *cred) 5931 { 5932 struct mount *mp; 5933 5934 mp = vp->v_mount; 5935 ASSERT_VOP_LOCKED(vp, "vfs_mark_atime"); 5936 if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) 5937 (void)VOP_MARKATIME(vp); 5938 } 5939 5940 /* 5941 * The purpose of this routine is to remove granularity from accmode_t, 5942 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 5943 * VADMIN and VAPPEND. 5944 * 5945 * If it returns 0, the caller is supposed to continue with the usual 5946 * access checks using 'accmode' as modified by this routine. If it 5947 * returns nonzero value, the caller is supposed to return that value 5948 * as errno. 5949 * 5950 * Note that after this routine runs, accmode may be zero. 5951 */ 5952 int 5953 vfs_unixify_accmode(accmode_t *accmode) 5954 { 5955 /* 5956 * There is no way to specify explicit "deny" rule using 5957 * file mode or POSIX.1e ACLs. 5958 */ 5959 if (*accmode & VEXPLICIT_DENY) { 5960 *accmode = 0; 5961 return (0); 5962 } 5963 5964 /* 5965 * None of these can be translated into usual access bits. 5966 * Also, the common case for NFSv4 ACLs is to not contain 5967 * either of these bits. Caller should check for VWRITE 5968 * on the containing directory instead. 5969 */ 5970 if (*accmode & (VDELETE_CHILD | VDELETE)) 5971 return (EPERM); 5972 5973 if (*accmode & VADMIN_PERMS) { 5974 *accmode &= ~VADMIN_PERMS; 5975 *accmode |= VADMIN; 5976 } 5977 5978 /* 5979 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 5980 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 5981 */ 5982 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 5983 5984 return (0); 5985 } 5986 5987 /* 5988 * Clear out a doomed vnode (if any) and replace it with a new one as long 5989 * as the fs is not being unmounted. Return the root vnode to the caller. 5990 */ 5991 static int __noinline 5992 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 5993 { 5994 struct vnode *vp; 5995 int error; 5996 5997 restart: 5998 if (mp->mnt_rootvnode != NULL) { 5999 MNT_ILOCK(mp); 6000 vp = mp->mnt_rootvnode; 6001 if (vp != NULL) { 6002 if (!VN_IS_DOOMED(vp)) { 6003 vrefact(vp); 6004 MNT_IUNLOCK(mp); 6005 error = vn_lock(vp, flags); 6006 if (error == 0) { 6007 *vpp = vp; 6008 return (0); 6009 } 6010 vrele(vp); 6011 goto restart; 6012 } 6013 /* 6014 * Clear the old one. 6015 */ 6016 mp->mnt_rootvnode = NULL; 6017 } 6018 MNT_IUNLOCK(mp); 6019 if (vp != NULL) { 6020 /* 6021 * Paired with a fence in vfs_op_thread_exit(). 6022 */ 6023 atomic_thread_fence_acq(); 6024 vfs_op_barrier_wait(mp); 6025 vrele(vp); 6026 } 6027 } 6028 error = VFS_CACHEDROOT(mp, flags, vpp); 6029 if (error != 0) 6030 return (error); 6031 if (mp->mnt_vfs_ops == 0) { 6032 MNT_ILOCK(mp); 6033 if (mp->mnt_vfs_ops != 0) { 6034 MNT_IUNLOCK(mp); 6035 return (0); 6036 } 6037 if (mp->mnt_rootvnode == NULL) { 6038 vrefact(*vpp); 6039 mp->mnt_rootvnode = *vpp; 6040 } else { 6041 if (mp->mnt_rootvnode != *vpp) { 6042 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6043 panic("%s: mismatch between vnode returned " 6044 " by VFS_CACHEDROOT and the one cached " 6045 " (%p != %p)", 6046 __func__, *vpp, mp->mnt_rootvnode); 6047 } 6048 } 6049 } 6050 MNT_IUNLOCK(mp); 6051 } 6052 return (0); 6053 } 6054 6055 int 6056 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6057 { 6058 struct vnode *vp; 6059 int error; 6060 6061 if (!vfs_op_thread_enter(mp)) 6062 return (vfs_cache_root_fallback(mp, flags, vpp)); 6063 vp = (struct vnode *)atomic_load_ptr(&mp->mnt_rootvnode); 6064 if (vp == NULL || VN_IS_DOOMED(vp)) { 6065 vfs_op_thread_exit(mp); 6066 return (vfs_cache_root_fallback(mp, flags, vpp)); 6067 } 6068 vrefact(vp); 6069 vfs_op_thread_exit(mp); 6070 error = vn_lock(vp, flags); 6071 if (error != 0) { 6072 vrele(vp); 6073 return (vfs_cache_root_fallback(mp, flags, vpp)); 6074 } 6075 *vpp = vp; 6076 return (0); 6077 } 6078 6079 struct vnode * 6080 vfs_cache_root_clear(struct mount *mp) 6081 { 6082 struct vnode *vp; 6083 6084 /* 6085 * ops > 0 guarantees there is nobody who can see this vnode 6086 */ 6087 MPASS(mp->mnt_vfs_ops > 0); 6088 vp = mp->mnt_rootvnode; 6089 mp->mnt_rootvnode = NULL; 6090 return (vp); 6091 } 6092 6093 void 6094 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6095 { 6096 6097 MPASS(mp->mnt_vfs_ops > 0); 6098 vrefact(vp); 6099 mp->mnt_rootvnode = vp; 6100 } 6101 6102 /* 6103 * These are helper functions for filesystems to traverse all 6104 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6105 * 6106 * This interface replaces MNT_VNODE_FOREACH. 6107 */ 6108 6109 6110 struct vnode * 6111 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6112 { 6113 struct vnode *vp; 6114 6115 if (should_yield()) 6116 kern_yield(PRI_USER); 6117 MNT_ILOCK(mp); 6118 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6119 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6120 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6121 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6122 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6123 continue; 6124 VI_LOCK(vp); 6125 if (VN_IS_DOOMED(vp)) { 6126 VI_UNLOCK(vp); 6127 continue; 6128 } 6129 break; 6130 } 6131 if (vp == NULL) { 6132 __mnt_vnode_markerfree_all(mvp, mp); 6133 /* MNT_IUNLOCK(mp); -- done in above function */ 6134 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6135 return (NULL); 6136 } 6137 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6138 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6139 MNT_IUNLOCK(mp); 6140 return (vp); 6141 } 6142 6143 struct vnode * 6144 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6145 { 6146 struct vnode *vp; 6147 6148 *mvp = vn_alloc_marker(mp); 6149 MNT_ILOCK(mp); 6150 MNT_REF(mp); 6151 6152 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6153 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6154 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6155 continue; 6156 VI_LOCK(vp); 6157 if (VN_IS_DOOMED(vp)) { 6158 VI_UNLOCK(vp); 6159 continue; 6160 } 6161 break; 6162 } 6163 if (vp == NULL) { 6164 MNT_REL(mp); 6165 MNT_IUNLOCK(mp); 6166 vn_free_marker(*mvp); 6167 *mvp = NULL; 6168 return (NULL); 6169 } 6170 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6171 MNT_IUNLOCK(mp); 6172 return (vp); 6173 } 6174 6175 void 6176 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6177 { 6178 6179 if (*mvp == NULL) { 6180 MNT_IUNLOCK(mp); 6181 return; 6182 } 6183 6184 mtx_assert(MNT_MTX(mp), MA_OWNED); 6185 6186 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6187 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6188 MNT_REL(mp); 6189 MNT_IUNLOCK(mp); 6190 vn_free_marker(*mvp); 6191 *mvp = NULL; 6192 } 6193 6194 /* 6195 * These are helper functions for filesystems to traverse their 6196 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6197 */ 6198 static void 6199 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6200 { 6201 6202 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6203 6204 MNT_ILOCK(mp); 6205 MNT_REL(mp); 6206 MNT_IUNLOCK(mp); 6207 vn_free_marker(*mvp); 6208 *mvp = NULL; 6209 } 6210 6211 /* 6212 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6213 * conventional lock order during mnt_vnode_next_lazy iteration. 6214 * 6215 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6216 * The list lock is dropped and reacquired. On success, both locks are held. 6217 * On failure, the mount vnode list lock is held but the vnode interlock is 6218 * not, and the procedure may have yielded. 6219 */ 6220 static bool 6221 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6222 struct vnode *vp) 6223 { 6224 const struct vnode *tmp; 6225 bool held, ret; 6226 6227 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6228 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6229 ("%s: bad marker", __func__)); 6230 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6231 ("%s: inappropriate vnode", __func__)); 6232 ASSERT_VI_UNLOCKED(vp, __func__); 6233 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6234 6235 ret = false; 6236 6237 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6238 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6239 6240 /* 6241 * Use a hold to prevent vp from disappearing while the mount vnode 6242 * list lock is dropped and reacquired. Normally a hold would be 6243 * acquired with vhold(), but that might try to acquire the vnode 6244 * interlock, which would be a LOR with the mount vnode list lock. 6245 */ 6246 held = refcount_acquire_if_not_zero(&vp->v_holdcnt); 6247 mtx_unlock(&mp->mnt_listmtx); 6248 if (!held) 6249 goto abort; 6250 VI_LOCK(vp); 6251 if (!refcount_release_if_not_last(&vp->v_holdcnt)) { 6252 vdropl(vp); 6253 goto abort; 6254 } 6255 mtx_lock(&mp->mnt_listmtx); 6256 6257 /* 6258 * Determine whether the vnode is still the next one after the marker, 6259 * excepting any other markers. If the vnode has not been doomed by 6260 * vgone() then the hold should have ensured that it remained on the 6261 * lazy list. If it has been doomed but is still on the lazy list, 6262 * don't abort, but rather skip over it (avoid spinning on doomed 6263 * vnodes). 6264 */ 6265 tmp = mvp; 6266 do { 6267 tmp = TAILQ_NEXT(tmp, v_lazylist); 6268 } while (tmp != NULL && tmp->v_type == VMARKER); 6269 if (tmp != vp) { 6270 mtx_unlock(&mp->mnt_listmtx); 6271 VI_UNLOCK(vp); 6272 goto abort; 6273 } 6274 6275 ret = true; 6276 goto out; 6277 abort: 6278 maybe_yield(); 6279 mtx_lock(&mp->mnt_listmtx); 6280 out: 6281 if (ret) 6282 ASSERT_VI_LOCKED(vp, __func__); 6283 else 6284 ASSERT_VI_UNLOCKED(vp, __func__); 6285 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6286 return (ret); 6287 } 6288 6289 static struct vnode * 6290 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6291 void *cbarg) 6292 { 6293 struct vnode *vp, *nvp; 6294 6295 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6296 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6297 restart: 6298 vp = TAILQ_NEXT(*mvp, v_lazylist); 6299 while (vp != NULL) { 6300 if (vp->v_type == VMARKER) { 6301 vp = TAILQ_NEXT(vp, v_lazylist); 6302 continue; 6303 } 6304 /* 6305 * See if we want to process the vnode. Note we may encounter a 6306 * long string of vnodes we don't care about and hog the list 6307 * as a result. Check for it and requeue the marker. 6308 */ 6309 if (VN_IS_DOOMED(vp) || !cb(vp, cbarg)) { 6310 if (!should_yield()) { 6311 vp = TAILQ_NEXT(vp, v_lazylist); 6312 continue; 6313 } 6314 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 6315 v_lazylist); 6316 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 6317 v_lazylist); 6318 mtx_unlock(&mp->mnt_listmtx); 6319 kern_yield(PRI_USER); 6320 mtx_lock(&mp->mnt_listmtx); 6321 goto restart; 6322 } 6323 /* 6324 * Try-lock because this is the wrong lock order. If that does 6325 * not succeed, drop the mount vnode list lock and try to 6326 * reacquire it and the vnode interlock in the right order. 6327 */ 6328 if (!VI_TRYLOCK(vp) && 6329 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 6330 goto restart; 6331 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6332 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6333 ("alien vnode on the lazy list %p %p", vp, mp)); 6334 if (vp->v_mount == mp && !VN_IS_DOOMED(vp)) 6335 break; 6336 nvp = TAILQ_NEXT(vp, v_lazylist); 6337 VI_UNLOCK(vp); 6338 vp = nvp; 6339 } 6340 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6341 6342 /* Check if we are done */ 6343 if (vp == NULL) { 6344 mtx_unlock(&mp->mnt_listmtx); 6345 mnt_vnode_markerfree_lazy(mvp, mp); 6346 return (NULL); 6347 } 6348 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 6349 mtx_unlock(&mp->mnt_listmtx); 6350 ASSERT_VI_LOCKED(vp, "lazy iter"); 6351 return (vp); 6352 } 6353 6354 struct vnode * 6355 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6356 void *cbarg) 6357 { 6358 6359 if (should_yield()) 6360 kern_yield(PRI_USER); 6361 mtx_lock(&mp->mnt_listmtx); 6362 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6363 } 6364 6365 struct vnode * 6366 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6367 void *cbarg) 6368 { 6369 struct vnode *vp; 6370 6371 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 6372 return (NULL); 6373 6374 *mvp = vn_alloc_marker(mp); 6375 MNT_ILOCK(mp); 6376 MNT_REF(mp); 6377 MNT_IUNLOCK(mp); 6378 6379 mtx_lock(&mp->mnt_listmtx); 6380 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 6381 if (vp == NULL) { 6382 mtx_unlock(&mp->mnt_listmtx); 6383 mnt_vnode_markerfree_lazy(mvp, mp); 6384 return (NULL); 6385 } 6386 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 6387 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6388 } 6389 6390 void 6391 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6392 { 6393 6394 if (*mvp == NULL) 6395 return; 6396 6397 mtx_lock(&mp->mnt_listmtx); 6398 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6399 mtx_unlock(&mp->mnt_listmtx); 6400 mnt_vnode_markerfree_lazy(mvp, mp); 6401 } 6402