1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/capsicum.h> 54 #include <sys/condvar.h> 55 #include <sys/conf.h> 56 #include <sys/counter.h> 57 #include <sys/dirent.h> 58 #include <sys/event.h> 59 #include <sys/eventhandler.h> 60 #include <sys/extattr.h> 61 #include <sys/file.h> 62 #include <sys/fcntl.h> 63 #include <sys/jail.h> 64 #include <sys/kdb.h> 65 #include <sys/kernel.h> 66 #include <sys/kthread.h> 67 #include <sys/ktr.h> 68 #include <sys/lockf.h> 69 #include <sys/malloc.h> 70 #include <sys/mount.h> 71 #include <sys/namei.h> 72 #include <sys/pctrie.h> 73 #include <sys/priv.h> 74 #include <sys/reboot.h> 75 #include <sys/refcount.h> 76 #include <sys/rwlock.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smr.h> 80 #include <sys/smp.h> 81 #include <sys/stat.h> 82 #include <sys/sysctl.h> 83 #include <sys/syslog.h> 84 #include <sys/vmmeter.h> 85 #include <sys/vnode.h> 86 #include <sys/watchdog.h> 87 88 #include <machine/stdarg.h> 89 90 #include <security/mac/mac_framework.h> 91 92 #include <vm/vm.h> 93 #include <vm/vm_object.h> 94 #include <vm/vm_extern.h> 95 #include <vm/pmap.h> 96 #include <vm/vm_map.h> 97 #include <vm/vm_page.h> 98 #include <vm/vm_kern.h> 99 #include <vm/uma.h> 100 101 #ifdef DDB 102 #include <ddb/ddb.h> 103 #endif 104 105 static void delmntque(struct vnode *vp); 106 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 107 int slpflag, int slptimeo); 108 static void syncer_shutdown(void *arg, int howto); 109 static int vtryrecycle(struct vnode *vp); 110 static void v_init_counters(struct vnode *); 111 static void vgonel(struct vnode *); 112 static void vfs_knllock(void *arg); 113 static void vfs_knlunlock(void *arg); 114 static void vfs_knl_assert_locked(void *arg); 115 static void vfs_knl_assert_unlocked(void *arg); 116 static void destroy_vpollinfo(struct vpollinfo *vi); 117 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 118 daddr_t startlbn, daddr_t endlbn); 119 static void vnlru_recalc(void); 120 121 /* 122 * These fences are intended for cases where some synchronization is 123 * needed between access of v_iflags and lockless vnode refcount (v_holdcnt 124 * and v_usecount) updates. Access to v_iflags is generally synchronized 125 * by the interlock, but we have some internal assertions that check vnode 126 * flags without acquiring the lock. Thus, these fences are INVARIANTS-only 127 * for now. 128 */ 129 #ifdef INVARIANTS 130 #define VNODE_REFCOUNT_FENCE_ACQ() atomic_thread_fence_acq() 131 #define VNODE_REFCOUNT_FENCE_REL() atomic_thread_fence_rel() 132 #else 133 #define VNODE_REFCOUNT_FENCE_ACQ() 134 #define VNODE_REFCOUNT_FENCE_REL() 135 #endif 136 137 /* 138 * Number of vnodes in existence. Increased whenever getnewvnode() 139 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 140 */ 141 static u_long __exclusive_cache_line numvnodes; 142 143 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 144 "Number of vnodes in existence"); 145 146 static counter_u64_t vnodes_created; 147 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 148 "Number of vnodes created by getnewvnode"); 149 150 /* 151 * Conversion tables for conversion from vnode types to inode formats 152 * and back. 153 */ 154 enum vtype iftovt_tab[16] = { 155 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 156 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 157 }; 158 int vttoif_tab[10] = { 159 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 160 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 161 }; 162 163 /* 164 * List of allocates vnodes in the system. 165 */ 166 static TAILQ_HEAD(freelst, vnode) vnode_list; 167 static struct vnode *vnode_list_free_marker; 168 static struct vnode *vnode_list_reclaim_marker; 169 170 /* 171 * "Free" vnode target. Free vnodes are rarely completely free, but are 172 * just ones that are cheap to recycle. Usually they are for files which 173 * have been stat'd but not read; these usually have inode and namecache 174 * data attached to them. This target is the preferred minimum size of a 175 * sub-cache consisting mostly of such files. The system balances the size 176 * of this sub-cache with its complement to try to prevent either from 177 * thrashing while the other is relatively inactive. The targets express 178 * a preference for the best balance. 179 * 180 * "Above" this target there are 2 further targets (watermarks) related 181 * to recyling of free vnodes. In the best-operating case, the cache is 182 * exactly full, the free list has size between vlowat and vhiwat above the 183 * free target, and recycling from it and normal use maintains this state. 184 * Sometimes the free list is below vlowat or even empty, but this state 185 * is even better for immediate use provided the cache is not full. 186 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 187 * ones) to reach one of these states. The watermarks are currently hard- 188 * coded as 4% and 9% of the available space higher. These and the default 189 * of 25% for wantfreevnodes are too large if the memory size is large. 190 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 191 * whenever vnlru_proc() becomes active. 192 */ 193 static long wantfreevnodes; 194 static long __exclusive_cache_line freevnodes; 195 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 196 &freevnodes, 0, "Number of \"free\" vnodes"); 197 static long freevnodes_old; 198 199 static counter_u64_t recycles_count; 200 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 201 "Number of vnodes recycled to meet vnode cache targets"); 202 203 static counter_u64_t recycles_free_count; 204 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 205 "Number of free vnodes recycled to meet vnode cache targets"); 206 207 static counter_u64_t deferred_inact; 208 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, &deferred_inact, 209 "Number of times inactive processing was deferred"); 210 211 /* To keep more than one thread at a time from running vfs_getnewfsid */ 212 static struct mtx mntid_mtx; 213 214 /* 215 * Lock for any access to the following: 216 * vnode_list 217 * numvnodes 218 * freevnodes 219 */ 220 static struct mtx __exclusive_cache_line vnode_list_mtx; 221 222 /* Publicly exported FS */ 223 struct nfs_public nfs_pub; 224 225 static uma_zone_t buf_trie_zone; 226 static smr_t buf_trie_smr; 227 228 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 229 static uma_zone_t vnode_zone; 230 static uma_zone_t vnodepoll_zone; 231 232 __read_frequently smr_t vfs_smr; 233 234 /* 235 * The workitem queue. 236 * 237 * It is useful to delay writes of file data and filesystem metadata 238 * for tens of seconds so that quickly created and deleted files need 239 * not waste disk bandwidth being created and removed. To realize this, 240 * we append vnodes to a "workitem" queue. When running with a soft 241 * updates implementation, most pending metadata dependencies should 242 * not wait for more than a few seconds. Thus, mounted on block devices 243 * are delayed only about a half the time that file data is delayed. 244 * Similarly, directory updates are more critical, so are only delayed 245 * about a third the time that file data is delayed. Thus, there are 246 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 247 * one each second (driven off the filesystem syncer process). The 248 * syncer_delayno variable indicates the next queue that is to be processed. 249 * Items that need to be processed soon are placed in this queue: 250 * 251 * syncer_workitem_pending[syncer_delayno] 252 * 253 * A delay of fifteen seconds is done by placing the request fifteen 254 * entries later in the queue: 255 * 256 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 257 * 258 */ 259 static int syncer_delayno; 260 static long syncer_mask; 261 LIST_HEAD(synclist, bufobj); 262 static struct synclist *syncer_workitem_pending; 263 /* 264 * The sync_mtx protects: 265 * bo->bo_synclist 266 * sync_vnode_count 267 * syncer_delayno 268 * syncer_state 269 * syncer_workitem_pending 270 * syncer_worklist_len 271 * rushjob 272 */ 273 static struct mtx sync_mtx; 274 static struct cv sync_wakeup; 275 276 #define SYNCER_MAXDELAY 32 277 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 278 static int syncdelay = 30; /* max time to delay syncing data */ 279 static int filedelay = 30; /* time to delay syncing files */ 280 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 281 "Time to delay syncing files (in seconds)"); 282 static int dirdelay = 29; /* time to delay syncing directories */ 283 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 284 "Time to delay syncing directories (in seconds)"); 285 static int metadelay = 28; /* time to delay syncing metadata */ 286 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 287 "Time to delay syncing metadata (in seconds)"); 288 static int rushjob; /* number of slots to run ASAP */ 289 static int stat_rush_requests; /* number of times I/O speeded up */ 290 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 291 "Number of times I/O speeded up (rush requests)"); 292 293 #define VDBATCH_SIZE 8 294 struct vdbatch { 295 u_int index; 296 long freevnodes; 297 struct mtx lock; 298 struct vnode *tab[VDBATCH_SIZE]; 299 }; 300 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 301 302 static void vdbatch_dequeue(struct vnode *vp); 303 304 /* 305 * When shutting down the syncer, run it at four times normal speed. 306 */ 307 #define SYNCER_SHUTDOWN_SPEEDUP 4 308 static int sync_vnode_count; 309 static int syncer_worklist_len; 310 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 311 syncer_state; 312 313 /* Target for maximum number of vnodes. */ 314 u_long desiredvnodes; 315 static u_long gapvnodes; /* gap between wanted and desired */ 316 static u_long vhiwat; /* enough extras after expansion */ 317 static u_long vlowat; /* minimal extras before expansion */ 318 static u_long vstir; /* nonzero to stir non-free vnodes */ 319 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 320 321 static u_long vnlru_read_freevnodes(void); 322 323 /* 324 * Note that no attempt is made to sanitize these parameters. 325 */ 326 static int 327 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 328 { 329 u_long val; 330 int error; 331 332 val = desiredvnodes; 333 error = sysctl_handle_long(oidp, &val, 0, req); 334 if (error != 0 || req->newptr == NULL) 335 return (error); 336 337 if (val == desiredvnodes) 338 return (0); 339 mtx_lock(&vnode_list_mtx); 340 desiredvnodes = val; 341 wantfreevnodes = desiredvnodes / 4; 342 vnlru_recalc(); 343 mtx_unlock(&vnode_list_mtx); 344 /* 345 * XXX There is no protection against multiple threads changing 346 * desiredvnodes at the same time. Locking above only helps vnlru and 347 * getnewvnode. 348 */ 349 vfs_hash_changesize(desiredvnodes); 350 cache_changesize(desiredvnodes); 351 return (0); 352 } 353 354 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 355 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 356 "LU", "Target for maximum number of vnodes"); 357 358 static int 359 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 360 { 361 u_long val; 362 int error; 363 364 val = wantfreevnodes; 365 error = sysctl_handle_long(oidp, &val, 0, req); 366 if (error != 0 || req->newptr == NULL) 367 return (error); 368 369 if (val == wantfreevnodes) 370 return (0); 371 mtx_lock(&vnode_list_mtx); 372 wantfreevnodes = val; 373 vnlru_recalc(); 374 mtx_unlock(&vnode_list_mtx); 375 return (0); 376 } 377 378 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 379 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 380 "LU", "Target for minimum number of \"free\" vnodes"); 381 382 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 383 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 384 static int vnlru_nowhere; 385 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 386 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 387 388 static int 389 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 390 { 391 struct vnode *vp; 392 struct nameidata nd; 393 char *buf; 394 unsigned long ndflags; 395 int error; 396 397 if (req->newptr == NULL) 398 return (EINVAL); 399 if (req->newlen >= PATH_MAX) 400 return (E2BIG); 401 402 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 403 error = SYSCTL_IN(req, buf, req->newlen); 404 if (error != 0) 405 goto out; 406 407 buf[req->newlen] = '\0'; 408 409 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1 | SAVENAME; 410 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf, curthread); 411 if ((error = namei(&nd)) != 0) 412 goto out; 413 vp = nd.ni_vp; 414 415 if (VN_IS_DOOMED(vp)) { 416 /* 417 * This vnode is being recycled. Return != 0 to let the caller 418 * know that the sysctl had no effect. Return EAGAIN because a 419 * subsequent call will likely succeed (since namei will create 420 * a new vnode if necessary) 421 */ 422 error = EAGAIN; 423 goto putvnode; 424 } 425 426 counter_u64_add(recycles_count, 1); 427 vgone(vp); 428 putvnode: 429 NDFREE(&nd, 0); 430 out: 431 free(buf, M_TEMP); 432 return (error); 433 } 434 435 static int 436 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 437 { 438 struct thread *td = curthread; 439 struct vnode *vp; 440 struct file *fp; 441 int error; 442 int fd; 443 444 if (req->newptr == NULL) 445 return (EBADF); 446 447 error = sysctl_handle_int(oidp, &fd, 0, req); 448 if (error != 0) 449 return (error); 450 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 451 if (error != 0) 452 return (error); 453 vp = fp->f_vnode; 454 455 error = vn_lock(vp, LK_EXCLUSIVE); 456 if (error != 0) 457 goto drop; 458 459 counter_u64_add(recycles_count, 1); 460 vgone(vp); 461 VOP_UNLOCK(vp); 462 drop: 463 fdrop(fp, td); 464 return (error); 465 } 466 467 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 468 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 469 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 470 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 471 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 472 sysctl_ftry_reclaim_vnode, "I", 473 "Try to reclaim a vnode by its file descriptor"); 474 475 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 476 static int vnsz2log; 477 478 /* 479 * Support for the bufobj clean & dirty pctrie. 480 */ 481 static void * 482 buf_trie_alloc(struct pctrie *ptree) 483 { 484 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 485 } 486 487 static void 488 buf_trie_free(struct pctrie *ptree, void *node) 489 { 490 uma_zfree_smr(buf_trie_zone, node); 491 } 492 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 493 buf_trie_smr); 494 495 /* 496 * Initialize the vnode management data structures. 497 * 498 * Reevaluate the following cap on the number of vnodes after the physical 499 * memory size exceeds 512GB. In the limit, as the physical memory size 500 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 501 */ 502 #ifndef MAXVNODES_MAX 503 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 504 #endif 505 506 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 507 508 static struct vnode * 509 vn_alloc_marker(struct mount *mp) 510 { 511 struct vnode *vp; 512 513 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 514 vp->v_type = VMARKER; 515 vp->v_mount = mp; 516 517 return (vp); 518 } 519 520 static void 521 vn_free_marker(struct vnode *vp) 522 { 523 524 MPASS(vp->v_type == VMARKER); 525 free(vp, M_VNODE_MARKER); 526 } 527 528 /* 529 * Initialize a vnode as it first enters the zone. 530 */ 531 static int 532 vnode_init(void *mem, int size, int flags) 533 { 534 struct vnode *vp; 535 536 vp = mem; 537 bzero(vp, size); 538 /* 539 * Setup locks. 540 */ 541 vp->v_vnlock = &vp->v_lock; 542 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 543 /* 544 * By default, don't allow shared locks unless filesystems opt-in. 545 */ 546 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 547 LK_NOSHARE | LK_IS_VNODE); 548 /* 549 * Initialize bufobj. 550 */ 551 bufobj_init(&vp->v_bufobj, vp); 552 /* 553 * Initialize namecache. 554 */ 555 cache_vnode_init(vp); 556 /* 557 * Initialize rangelocks. 558 */ 559 rangelock_init(&vp->v_rl); 560 561 vp->v_dbatchcpu = NOCPU; 562 563 mtx_lock(&vnode_list_mtx); 564 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 565 mtx_unlock(&vnode_list_mtx); 566 return (0); 567 } 568 569 /* 570 * Free a vnode when it is cleared from the zone. 571 */ 572 static void 573 vnode_fini(void *mem, int size) 574 { 575 struct vnode *vp; 576 struct bufobj *bo; 577 578 vp = mem; 579 vdbatch_dequeue(vp); 580 mtx_lock(&vnode_list_mtx); 581 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 582 mtx_unlock(&vnode_list_mtx); 583 rangelock_destroy(&vp->v_rl); 584 lockdestroy(vp->v_vnlock); 585 mtx_destroy(&vp->v_interlock); 586 bo = &vp->v_bufobj; 587 rw_destroy(BO_LOCKPTR(bo)); 588 } 589 590 /* 591 * Provide the size of NFS nclnode and NFS fh for calculation of the 592 * vnode memory consumption. The size is specified directly to 593 * eliminate dependency on NFS-private header. 594 * 595 * Other filesystems may use bigger or smaller (like UFS and ZFS) 596 * private inode data, but the NFS-based estimation is ample enough. 597 * Still, we care about differences in the size between 64- and 32-bit 598 * platforms. 599 * 600 * Namecache structure size is heuristically 601 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 602 */ 603 #ifdef _LP64 604 #define NFS_NCLNODE_SZ (528 + 64) 605 #define NC_SZ 148 606 #else 607 #define NFS_NCLNODE_SZ (360 + 32) 608 #define NC_SZ 92 609 #endif 610 611 static void 612 vntblinit(void *dummy __unused) 613 { 614 struct vdbatch *vd; 615 int cpu, physvnodes, virtvnodes; 616 u_int i; 617 618 /* 619 * Desiredvnodes is a function of the physical memory size and the 620 * kernel's heap size. Generally speaking, it scales with the 621 * physical memory size. The ratio of desiredvnodes to the physical 622 * memory size is 1:16 until desiredvnodes exceeds 98,304. 623 * Thereafter, the 624 * marginal ratio of desiredvnodes to the physical memory size is 625 * 1:64. However, desiredvnodes is limited by the kernel's heap 626 * size. The memory required by desiredvnodes vnodes and vm objects 627 * must not exceed 1/10th of the kernel's heap size. 628 */ 629 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 630 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 631 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 632 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 633 desiredvnodes = min(physvnodes, virtvnodes); 634 if (desiredvnodes > MAXVNODES_MAX) { 635 if (bootverbose) 636 printf("Reducing kern.maxvnodes %lu -> %lu\n", 637 desiredvnodes, MAXVNODES_MAX); 638 desiredvnodes = MAXVNODES_MAX; 639 } 640 wantfreevnodes = desiredvnodes / 4; 641 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 642 TAILQ_INIT(&vnode_list); 643 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 644 /* 645 * The lock is taken to appease WITNESS. 646 */ 647 mtx_lock(&vnode_list_mtx); 648 vnlru_recalc(); 649 mtx_unlock(&vnode_list_mtx); 650 vnode_list_free_marker = vn_alloc_marker(NULL); 651 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 652 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 653 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 654 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 655 vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); 656 uma_zone_set_smr(vnode_zone, vfs_smr); 657 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 658 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 659 /* 660 * Preallocate enough nodes to support one-per buf so that 661 * we can not fail an insert. reassignbuf() callers can not 662 * tolerate the insertion failure. 663 */ 664 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 665 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 666 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 667 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 668 uma_prealloc(buf_trie_zone, nbuf); 669 670 vnodes_created = counter_u64_alloc(M_WAITOK); 671 recycles_count = counter_u64_alloc(M_WAITOK); 672 recycles_free_count = counter_u64_alloc(M_WAITOK); 673 deferred_inact = counter_u64_alloc(M_WAITOK); 674 675 /* 676 * Initialize the filesystem syncer. 677 */ 678 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 679 &syncer_mask); 680 syncer_maxdelay = syncer_mask + 1; 681 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 682 cv_init(&sync_wakeup, "syncer"); 683 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 684 vnsz2log++; 685 vnsz2log--; 686 687 CPU_FOREACH(cpu) { 688 vd = DPCPU_ID_PTR((cpu), vd); 689 bzero(vd, sizeof(*vd)); 690 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 691 } 692 } 693 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 694 695 /* 696 * Mark a mount point as busy. Used to synchronize access and to delay 697 * unmounting. Eventually, mountlist_mtx is not released on failure. 698 * 699 * vfs_busy() is a custom lock, it can block the caller. 700 * vfs_busy() only sleeps if the unmount is active on the mount point. 701 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 702 * vnode belonging to mp. 703 * 704 * Lookup uses vfs_busy() to traverse mount points. 705 * root fs var fs 706 * / vnode lock A / vnode lock (/var) D 707 * /var vnode lock B /log vnode lock(/var/log) E 708 * vfs_busy lock C vfs_busy lock F 709 * 710 * Within each file system, the lock order is C->A->B and F->D->E. 711 * 712 * When traversing across mounts, the system follows that lock order: 713 * 714 * C->A->B 715 * | 716 * +->F->D->E 717 * 718 * The lookup() process for namei("/var") illustrates the process: 719 * VOP_LOOKUP() obtains B while A is held 720 * vfs_busy() obtains a shared lock on F while A and B are held 721 * vput() releases lock on B 722 * vput() releases lock on A 723 * VFS_ROOT() obtains lock on D while shared lock on F is held 724 * vfs_unbusy() releases shared lock on F 725 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 726 * Attempt to lock A (instead of vp_crossmp) while D is held would 727 * violate the global order, causing deadlocks. 728 * 729 * dounmount() locks B while F is drained. 730 */ 731 int 732 vfs_busy(struct mount *mp, int flags) 733 { 734 735 MPASS((flags & ~MBF_MASK) == 0); 736 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 737 738 if (vfs_op_thread_enter(mp)) { 739 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 740 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 741 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 742 vfs_mp_count_add_pcpu(mp, ref, 1); 743 vfs_mp_count_add_pcpu(mp, lockref, 1); 744 vfs_op_thread_exit(mp); 745 if (flags & MBF_MNTLSTLOCK) 746 mtx_unlock(&mountlist_mtx); 747 return (0); 748 } 749 750 MNT_ILOCK(mp); 751 vfs_assert_mount_counters(mp); 752 MNT_REF(mp); 753 /* 754 * If mount point is currently being unmounted, sleep until the 755 * mount point fate is decided. If thread doing the unmounting fails, 756 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 757 * that this mount point has survived the unmount attempt and vfs_busy 758 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 759 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 760 * about to be really destroyed. vfs_busy needs to release its 761 * reference on the mount point in this case and return with ENOENT, 762 * telling the caller that mount mount it tried to busy is no longer 763 * valid. 764 */ 765 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 766 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 767 MNT_REL(mp); 768 MNT_IUNLOCK(mp); 769 CTR1(KTR_VFS, "%s: failed busying before sleeping", 770 __func__); 771 return (ENOENT); 772 } 773 if (flags & MBF_MNTLSTLOCK) 774 mtx_unlock(&mountlist_mtx); 775 mp->mnt_kern_flag |= MNTK_MWAIT; 776 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 777 if (flags & MBF_MNTLSTLOCK) 778 mtx_lock(&mountlist_mtx); 779 MNT_ILOCK(mp); 780 } 781 if (flags & MBF_MNTLSTLOCK) 782 mtx_unlock(&mountlist_mtx); 783 mp->mnt_lockref++; 784 MNT_IUNLOCK(mp); 785 return (0); 786 } 787 788 /* 789 * Free a busy filesystem. 790 */ 791 void 792 vfs_unbusy(struct mount *mp) 793 { 794 int c; 795 796 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 797 798 if (vfs_op_thread_enter(mp)) { 799 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 800 vfs_mp_count_sub_pcpu(mp, lockref, 1); 801 vfs_mp_count_sub_pcpu(mp, ref, 1); 802 vfs_op_thread_exit(mp); 803 return; 804 } 805 806 MNT_ILOCK(mp); 807 vfs_assert_mount_counters(mp); 808 MNT_REL(mp); 809 c = --mp->mnt_lockref; 810 if (mp->mnt_vfs_ops == 0) { 811 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 812 MNT_IUNLOCK(mp); 813 return; 814 } 815 if (c < 0) 816 vfs_dump_mount_counters(mp); 817 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 818 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 819 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 820 mp->mnt_kern_flag &= ~MNTK_DRAINING; 821 wakeup(&mp->mnt_lockref); 822 } 823 MNT_IUNLOCK(mp); 824 } 825 826 /* 827 * Lookup a mount point by filesystem identifier. 828 */ 829 struct mount * 830 vfs_getvfs(fsid_t *fsid) 831 { 832 struct mount *mp; 833 834 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 835 mtx_lock(&mountlist_mtx); 836 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 837 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 838 vfs_ref(mp); 839 mtx_unlock(&mountlist_mtx); 840 return (mp); 841 } 842 } 843 mtx_unlock(&mountlist_mtx); 844 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 845 return ((struct mount *) 0); 846 } 847 848 /* 849 * Lookup a mount point by filesystem identifier, busying it before 850 * returning. 851 * 852 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 853 * cache for popular filesystem identifiers. The cache is lockess, using 854 * the fact that struct mount's are never freed. In worst case we may 855 * get pointer to unmounted or even different filesystem, so we have to 856 * check what we got, and go slow way if so. 857 */ 858 struct mount * 859 vfs_busyfs(fsid_t *fsid) 860 { 861 #define FSID_CACHE_SIZE 256 862 typedef struct mount * volatile vmp_t; 863 static vmp_t cache[FSID_CACHE_SIZE]; 864 struct mount *mp; 865 int error; 866 uint32_t hash; 867 868 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 869 hash = fsid->val[0] ^ fsid->val[1]; 870 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 871 mp = cache[hash]; 872 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 873 goto slow; 874 if (vfs_busy(mp, 0) != 0) { 875 cache[hash] = NULL; 876 goto slow; 877 } 878 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 879 return (mp); 880 else 881 vfs_unbusy(mp); 882 883 slow: 884 mtx_lock(&mountlist_mtx); 885 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 886 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 887 error = vfs_busy(mp, MBF_MNTLSTLOCK); 888 if (error) { 889 cache[hash] = NULL; 890 mtx_unlock(&mountlist_mtx); 891 return (NULL); 892 } 893 cache[hash] = mp; 894 return (mp); 895 } 896 } 897 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 898 mtx_unlock(&mountlist_mtx); 899 return ((struct mount *) 0); 900 } 901 902 /* 903 * Check if a user can access privileged mount options. 904 */ 905 int 906 vfs_suser(struct mount *mp, struct thread *td) 907 { 908 int error; 909 910 if (jailed(td->td_ucred)) { 911 /* 912 * If the jail of the calling thread lacks permission for 913 * this type of file system, deny immediately. 914 */ 915 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 916 return (EPERM); 917 918 /* 919 * If the file system was mounted outside the jail of the 920 * calling thread, deny immediately. 921 */ 922 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 923 return (EPERM); 924 } 925 926 /* 927 * If file system supports delegated administration, we don't check 928 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 929 * by the file system itself. 930 * If this is not the user that did original mount, we check for 931 * the PRIV_VFS_MOUNT_OWNER privilege. 932 */ 933 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 934 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 935 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 936 return (error); 937 } 938 return (0); 939 } 940 941 /* 942 * Get a new unique fsid. Try to make its val[0] unique, since this value 943 * will be used to create fake device numbers for stat(). Also try (but 944 * not so hard) make its val[0] unique mod 2^16, since some emulators only 945 * support 16-bit device numbers. We end up with unique val[0]'s for the 946 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 947 * 948 * Keep in mind that several mounts may be running in parallel. Starting 949 * the search one past where the previous search terminated is both a 950 * micro-optimization and a defense against returning the same fsid to 951 * different mounts. 952 */ 953 void 954 vfs_getnewfsid(struct mount *mp) 955 { 956 static uint16_t mntid_base; 957 struct mount *nmp; 958 fsid_t tfsid; 959 int mtype; 960 961 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 962 mtx_lock(&mntid_mtx); 963 mtype = mp->mnt_vfc->vfc_typenum; 964 tfsid.val[1] = mtype; 965 mtype = (mtype & 0xFF) << 24; 966 for (;;) { 967 tfsid.val[0] = makedev(255, 968 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 969 mntid_base++; 970 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 971 break; 972 vfs_rel(nmp); 973 } 974 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 975 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 976 mtx_unlock(&mntid_mtx); 977 } 978 979 /* 980 * Knob to control the precision of file timestamps: 981 * 982 * 0 = seconds only; nanoseconds zeroed. 983 * 1 = seconds and nanoseconds, accurate within 1/HZ. 984 * 2 = seconds and nanoseconds, truncated to microseconds. 985 * >=3 = seconds and nanoseconds, maximum precision. 986 */ 987 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 988 989 static int timestamp_precision = TSP_USEC; 990 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 991 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 992 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 993 "3+: sec + ns (max. precision))"); 994 995 /* 996 * Get a current timestamp. 997 */ 998 void 999 vfs_timestamp(struct timespec *tsp) 1000 { 1001 struct timeval tv; 1002 1003 switch (timestamp_precision) { 1004 case TSP_SEC: 1005 tsp->tv_sec = time_second; 1006 tsp->tv_nsec = 0; 1007 break; 1008 case TSP_HZ: 1009 getnanotime(tsp); 1010 break; 1011 case TSP_USEC: 1012 microtime(&tv); 1013 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1014 break; 1015 case TSP_NSEC: 1016 default: 1017 nanotime(tsp); 1018 break; 1019 } 1020 } 1021 1022 /* 1023 * Set vnode attributes to VNOVAL 1024 */ 1025 void 1026 vattr_null(struct vattr *vap) 1027 { 1028 1029 vap->va_type = VNON; 1030 vap->va_size = VNOVAL; 1031 vap->va_bytes = VNOVAL; 1032 vap->va_mode = VNOVAL; 1033 vap->va_nlink = VNOVAL; 1034 vap->va_uid = VNOVAL; 1035 vap->va_gid = VNOVAL; 1036 vap->va_fsid = VNOVAL; 1037 vap->va_fileid = VNOVAL; 1038 vap->va_blocksize = VNOVAL; 1039 vap->va_rdev = VNOVAL; 1040 vap->va_atime.tv_sec = VNOVAL; 1041 vap->va_atime.tv_nsec = VNOVAL; 1042 vap->va_mtime.tv_sec = VNOVAL; 1043 vap->va_mtime.tv_nsec = VNOVAL; 1044 vap->va_ctime.tv_sec = VNOVAL; 1045 vap->va_ctime.tv_nsec = VNOVAL; 1046 vap->va_birthtime.tv_sec = VNOVAL; 1047 vap->va_birthtime.tv_nsec = VNOVAL; 1048 vap->va_flags = VNOVAL; 1049 vap->va_gen = VNOVAL; 1050 vap->va_vaflags = 0; 1051 } 1052 1053 /* 1054 * Try to reduce the total number of vnodes. 1055 * 1056 * This routine (and its user) are buggy in at least the following ways: 1057 * - all parameters were picked years ago when RAM sizes were significantly 1058 * smaller 1059 * - it can pick vnodes based on pages used by the vm object, but filesystems 1060 * like ZFS don't use it making the pick broken 1061 * - since ZFS has its own aging policy it gets partially combated by this one 1062 * - a dedicated method should be provided for filesystems to let them decide 1063 * whether the vnode should be recycled 1064 * 1065 * This routine is called when we have too many vnodes. It attempts 1066 * to free <count> vnodes and will potentially free vnodes that still 1067 * have VM backing store (VM backing store is typically the cause 1068 * of a vnode blowout so we want to do this). Therefore, this operation 1069 * is not considered cheap. 1070 * 1071 * A number of conditions may prevent a vnode from being reclaimed. 1072 * the buffer cache may have references on the vnode, a directory 1073 * vnode may still have references due to the namei cache representing 1074 * underlying files, or the vnode may be in active use. It is not 1075 * desirable to reuse such vnodes. These conditions may cause the 1076 * number of vnodes to reach some minimum value regardless of what 1077 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1078 * 1079 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1080 * entries if this argument is strue 1081 * @param trigger Only reclaim vnodes with fewer than this many resident 1082 * pages. 1083 * @param target How many vnodes to reclaim. 1084 * @return The number of vnodes that were reclaimed. 1085 */ 1086 static int 1087 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1088 { 1089 struct vnode *vp, *mvp; 1090 struct mount *mp; 1091 struct vm_object *object; 1092 u_long done; 1093 bool retried; 1094 1095 mtx_assert(&vnode_list_mtx, MA_OWNED); 1096 1097 retried = false; 1098 done = 0; 1099 1100 mvp = vnode_list_reclaim_marker; 1101 restart: 1102 vp = mvp; 1103 while (done < target) { 1104 vp = TAILQ_NEXT(vp, v_vnodelist); 1105 if (__predict_false(vp == NULL)) 1106 break; 1107 1108 if (__predict_false(vp->v_type == VMARKER)) 1109 continue; 1110 1111 /* 1112 * If it's been deconstructed already, it's still 1113 * referenced, or it exceeds the trigger, skip it. 1114 * Also skip free vnodes. We are trying to make space 1115 * to expand the free list, not reduce it. 1116 */ 1117 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1118 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1119 goto next_iter; 1120 1121 if (vp->v_type == VBAD || vp->v_type == VNON) 1122 goto next_iter; 1123 1124 object = atomic_load_ptr(&vp->v_object); 1125 if (object == NULL || object->resident_page_count > trigger) { 1126 goto next_iter; 1127 } 1128 1129 vhold(vp); 1130 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1131 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1132 mtx_unlock(&vnode_list_mtx); 1133 1134 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1135 vdrop(vp); 1136 goto next_iter_unlocked; 1137 } 1138 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1139 vdrop(vp); 1140 vn_finished_write(mp); 1141 goto next_iter_unlocked; 1142 } 1143 1144 VI_LOCK(vp); 1145 if (vp->v_usecount > 0 || 1146 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1147 (vp->v_object != NULL && 1148 vp->v_object->resident_page_count > trigger)) { 1149 VOP_UNLOCK(vp); 1150 vdropl(vp); 1151 vn_finished_write(mp); 1152 goto next_iter_unlocked; 1153 } 1154 counter_u64_add(recycles_count, 1); 1155 vgonel(vp); 1156 VOP_UNLOCK(vp); 1157 vdropl(vp); 1158 vn_finished_write(mp); 1159 done++; 1160 next_iter_unlocked: 1161 if (should_yield()) 1162 kern_yield(PRI_USER); 1163 mtx_lock(&vnode_list_mtx); 1164 goto restart; 1165 next_iter: 1166 MPASS(vp->v_type != VMARKER); 1167 if (!should_yield()) 1168 continue; 1169 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1170 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1171 mtx_unlock(&vnode_list_mtx); 1172 kern_yield(PRI_USER); 1173 mtx_lock(&vnode_list_mtx); 1174 goto restart; 1175 } 1176 if (done == 0 && !retried) { 1177 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1178 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1179 retried = true; 1180 goto restart; 1181 } 1182 return (done); 1183 } 1184 1185 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1186 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1187 0, 1188 "limit on vnode free requests per call to the vnlru_free routine"); 1189 1190 /* 1191 * Attempt to reduce the free list by the requested amount. 1192 */ 1193 static int 1194 vnlru_free_locked(int count, struct vfsops *mnt_op) 1195 { 1196 struct vnode *vp, *mvp; 1197 struct mount *mp; 1198 int ocount; 1199 1200 mtx_assert(&vnode_list_mtx, MA_OWNED); 1201 if (count > max_vnlru_free) 1202 count = max_vnlru_free; 1203 ocount = count; 1204 mvp = vnode_list_free_marker; 1205 restart: 1206 vp = mvp; 1207 while (count > 0) { 1208 vp = TAILQ_NEXT(vp, v_vnodelist); 1209 if (__predict_false(vp == NULL)) { 1210 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1211 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1212 break; 1213 } 1214 if (__predict_false(vp->v_type == VMARKER)) 1215 continue; 1216 1217 /* 1218 * Don't recycle if our vnode is from different type 1219 * of mount point. Note that mp is type-safe, the 1220 * check does not reach unmapped address even if 1221 * vnode is reclaimed. 1222 * Don't recycle if we can't get the interlock without 1223 * blocking. 1224 */ 1225 if (vp->v_holdcnt > 0 || (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1226 mp->mnt_op != mnt_op)) { 1227 continue; 1228 } 1229 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1230 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1231 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1232 continue; 1233 } 1234 vhold(vp); 1235 count--; 1236 mtx_unlock(&vnode_list_mtx); 1237 vtryrecycle(vp); 1238 mtx_lock(&vnode_list_mtx); 1239 goto restart; 1240 } 1241 return (ocount - count); 1242 } 1243 1244 void 1245 vnlru_free(int count, struct vfsops *mnt_op) 1246 { 1247 1248 mtx_lock(&vnode_list_mtx); 1249 vnlru_free_locked(count, mnt_op); 1250 mtx_unlock(&vnode_list_mtx); 1251 } 1252 1253 static void 1254 vnlru_recalc(void) 1255 { 1256 1257 mtx_assert(&vnode_list_mtx, MA_OWNED); 1258 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1259 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1260 vlowat = vhiwat / 2; 1261 } 1262 1263 /* 1264 * Attempt to recycle vnodes in a context that is always safe to block. 1265 * Calling vlrurecycle() from the bowels of filesystem code has some 1266 * interesting deadlock problems. 1267 */ 1268 static struct proc *vnlruproc; 1269 static int vnlruproc_sig; 1270 1271 /* 1272 * The main freevnodes counter is only updated when threads requeue their vnode 1273 * batches. CPUs are conditionally walked to compute a more accurate total. 1274 * 1275 * Limit how much of a slop are we willing to tolerate. Note: the actual value 1276 * at any given moment can still exceed slop, but it should not be by significant 1277 * margin in practice. 1278 */ 1279 #define VNLRU_FREEVNODES_SLOP 128 1280 1281 static __inline void 1282 vn_freevnodes_inc(void) 1283 { 1284 struct vdbatch *vd; 1285 1286 critical_enter(); 1287 vd = DPCPU_PTR(vd); 1288 vd->freevnodes++; 1289 critical_exit(); 1290 } 1291 1292 static __inline void 1293 vn_freevnodes_dec(void) 1294 { 1295 struct vdbatch *vd; 1296 1297 critical_enter(); 1298 vd = DPCPU_PTR(vd); 1299 vd->freevnodes--; 1300 critical_exit(); 1301 } 1302 1303 static u_long 1304 vnlru_read_freevnodes(void) 1305 { 1306 struct vdbatch *vd; 1307 long slop; 1308 int cpu; 1309 1310 mtx_assert(&vnode_list_mtx, MA_OWNED); 1311 if (freevnodes > freevnodes_old) 1312 slop = freevnodes - freevnodes_old; 1313 else 1314 slop = freevnodes_old - freevnodes; 1315 if (slop < VNLRU_FREEVNODES_SLOP) 1316 return (freevnodes >= 0 ? freevnodes : 0); 1317 freevnodes_old = freevnodes; 1318 CPU_FOREACH(cpu) { 1319 vd = DPCPU_ID_PTR((cpu), vd); 1320 freevnodes_old += vd->freevnodes; 1321 } 1322 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1323 } 1324 1325 static bool 1326 vnlru_under(u_long rnumvnodes, u_long limit) 1327 { 1328 u_long rfreevnodes, space; 1329 1330 if (__predict_false(rnumvnodes > desiredvnodes)) 1331 return (true); 1332 1333 space = desiredvnodes - rnumvnodes; 1334 if (space < limit) { 1335 rfreevnodes = vnlru_read_freevnodes(); 1336 if (rfreevnodes > wantfreevnodes) 1337 space += rfreevnodes - wantfreevnodes; 1338 } 1339 return (space < limit); 1340 } 1341 1342 static bool 1343 vnlru_under_unlocked(u_long rnumvnodes, u_long limit) 1344 { 1345 long rfreevnodes, space; 1346 1347 if (__predict_false(rnumvnodes > desiredvnodes)) 1348 return (true); 1349 1350 space = desiredvnodes - rnumvnodes; 1351 if (space < limit) { 1352 rfreevnodes = atomic_load_long(&freevnodes); 1353 if (rfreevnodes > wantfreevnodes) 1354 space += rfreevnodes - wantfreevnodes; 1355 } 1356 return (space < limit); 1357 } 1358 1359 static void 1360 vnlru_kick(void) 1361 { 1362 1363 mtx_assert(&vnode_list_mtx, MA_OWNED); 1364 if (vnlruproc_sig == 0) { 1365 vnlruproc_sig = 1; 1366 wakeup(vnlruproc); 1367 } 1368 } 1369 1370 static void 1371 vnlru_proc(void) 1372 { 1373 u_long rnumvnodes, rfreevnodes, target; 1374 unsigned long onumvnodes; 1375 int done, force, trigger, usevnodes; 1376 bool reclaim_nc_src, want_reread; 1377 1378 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1379 SHUTDOWN_PRI_FIRST); 1380 1381 force = 0; 1382 want_reread = false; 1383 for (;;) { 1384 kproc_suspend_check(vnlruproc); 1385 mtx_lock(&vnode_list_mtx); 1386 rnumvnodes = atomic_load_long(&numvnodes); 1387 1388 if (want_reread) { 1389 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1390 want_reread = false; 1391 } 1392 1393 /* 1394 * If numvnodes is too large (due to desiredvnodes being 1395 * adjusted using its sysctl, or emergency growth), first 1396 * try to reduce it by discarding from the free list. 1397 */ 1398 if (rnumvnodes > desiredvnodes) { 1399 vnlru_free_locked(rnumvnodes - desiredvnodes, NULL); 1400 rnumvnodes = atomic_load_long(&numvnodes); 1401 } 1402 /* 1403 * Sleep if the vnode cache is in a good state. This is 1404 * when it is not over-full and has space for about a 4% 1405 * or 9% expansion (by growing its size or inexcessively 1406 * reducing its free list). Otherwise, try to reclaim 1407 * space for a 10% expansion. 1408 */ 1409 if (vstir && force == 0) { 1410 force = 1; 1411 vstir = 0; 1412 } 1413 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1414 vnlruproc_sig = 0; 1415 wakeup(&vnlruproc_sig); 1416 msleep(vnlruproc, &vnode_list_mtx, 1417 PVFS|PDROP, "vlruwt", hz); 1418 continue; 1419 } 1420 rfreevnodes = vnlru_read_freevnodes(); 1421 1422 onumvnodes = rnumvnodes; 1423 /* 1424 * Calculate parameters for recycling. These are the same 1425 * throughout the loop to give some semblance of fairness. 1426 * The trigger point is to avoid recycling vnodes with lots 1427 * of resident pages. We aren't trying to free memory; we 1428 * are trying to recycle or at least free vnodes. 1429 */ 1430 if (rnumvnodes <= desiredvnodes) 1431 usevnodes = rnumvnodes - rfreevnodes; 1432 else 1433 usevnodes = rnumvnodes; 1434 if (usevnodes <= 0) 1435 usevnodes = 1; 1436 /* 1437 * The trigger value is is chosen to give a conservatively 1438 * large value to ensure that it alone doesn't prevent 1439 * making progress. The value can easily be so large that 1440 * it is effectively infinite in some congested and 1441 * misconfigured cases, and this is necessary. Normally 1442 * it is about 8 to 100 (pages), which is quite large. 1443 */ 1444 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1445 if (force < 2) 1446 trigger = vsmalltrigger; 1447 reclaim_nc_src = force >= 3; 1448 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1449 target = target / 10 + 1; 1450 done = vlrureclaim(reclaim_nc_src, trigger, target); 1451 mtx_unlock(&vnode_list_mtx); 1452 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1453 uma_reclaim(UMA_RECLAIM_DRAIN); 1454 if (done == 0) { 1455 if (force == 0 || force == 1) { 1456 force = 2; 1457 continue; 1458 } 1459 if (force == 2) { 1460 force = 3; 1461 continue; 1462 } 1463 want_reread = true; 1464 force = 0; 1465 vnlru_nowhere++; 1466 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1467 } else { 1468 want_reread = true; 1469 kern_yield(PRI_USER); 1470 } 1471 } 1472 } 1473 1474 static struct kproc_desc vnlru_kp = { 1475 "vnlru", 1476 vnlru_proc, 1477 &vnlruproc 1478 }; 1479 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1480 &vnlru_kp); 1481 1482 /* 1483 * Routines having to do with the management of the vnode table. 1484 */ 1485 1486 /* 1487 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1488 * before we actually vgone(). This function must be called with the vnode 1489 * held to prevent the vnode from being returned to the free list midway 1490 * through vgone(). 1491 */ 1492 static int 1493 vtryrecycle(struct vnode *vp) 1494 { 1495 struct mount *vnmp; 1496 1497 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1498 VNASSERT(vp->v_holdcnt, vp, 1499 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1500 /* 1501 * This vnode may found and locked via some other list, if so we 1502 * can't recycle it yet. 1503 */ 1504 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1505 CTR2(KTR_VFS, 1506 "%s: impossible to recycle, vp %p lock is already held", 1507 __func__, vp); 1508 vdrop(vp); 1509 return (EWOULDBLOCK); 1510 } 1511 /* 1512 * Don't recycle if its filesystem is being suspended. 1513 */ 1514 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1515 VOP_UNLOCK(vp); 1516 CTR2(KTR_VFS, 1517 "%s: impossible to recycle, cannot start the write for %p", 1518 __func__, vp); 1519 vdrop(vp); 1520 return (EBUSY); 1521 } 1522 /* 1523 * If we got this far, we need to acquire the interlock and see if 1524 * anyone picked up this vnode from another list. If not, we will 1525 * mark it with DOOMED via vgonel() so that anyone who does find it 1526 * will skip over it. 1527 */ 1528 VI_LOCK(vp); 1529 if (vp->v_usecount) { 1530 VOP_UNLOCK(vp); 1531 vdropl(vp); 1532 vn_finished_write(vnmp); 1533 CTR2(KTR_VFS, 1534 "%s: impossible to recycle, %p is already referenced", 1535 __func__, vp); 1536 return (EBUSY); 1537 } 1538 if (!VN_IS_DOOMED(vp)) { 1539 counter_u64_add(recycles_free_count, 1); 1540 vgonel(vp); 1541 } 1542 VOP_UNLOCK(vp); 1543 vdropl(vp); 1544 vn_finished_write(vnmp); 1545 return (0); 1546 } 1547 1548 /* 1549 * Allocate a new vnode. 1550 * 1551 * The operation never returns an error. Returning an error was disabled 1552 * in r145385 (dated 2005) with the following comment: 1553 * 1554 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1555 * 1556 * Given the age of this commit (almost 15 years at the time of writing this 1557 * comment) restoring the ability to fail requires a significant audit of 1558 * all codepaths. 1559 * 1560 * The routine can try to free a vnode or stall for up to 1 second waiting for 1561 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1562 */ 1563 static u_long vn_alloc_cyclecount; 1564 1565 static struct vnode * __noinline 1566 vn_alloc_hard(struct mount *mp) 1567 { 1568 u_long rnumvnodes, rfreevnodes; 1569 1570 mtx_lock(&vnode_list_mtx); 1571 rnumvnodes = atomic_load_long(&numvnodes); 1572 if (rnumvnodes + 1 < desiredvnodes) { 1573 vn_alloc_cyclecount = 0; 1574 goto alloc; 1575 } 1576 rfreevnodes = vnlru_read_freevnodes(); 1577 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1578 vn_alloc_cyclecount = 0; 1579 vstir = 1; 1580 } 1581 /* 1582 * Grow the vnode cache if it will not be above its target max 1583 * after growing. Otherwise, if the free list is nonempty, try 1584 * to reclaim 1 item from it before growing the cache (possibly 1585 * above its target max if the reclamation failed or is delayed). 1586 * Otherwise, wait for some space. In all cases, schedule 1587 * vnlru_proc() if we are getting short of space. The watermarks 1588 * should be chosen so that we never wait or even reclaim from 1589 * the free list to below its target minimum. 1590 */ 1591 if (vnlru_free_locked(1, NULL) > 0) 1592 goto alloc; 1593 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1594 /* 1595 * Wait for space for a new vnode. 1596 */ 1597 vnlru_kick(); 1598 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 1599 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 1600 vnlru_read_freevnodes() > 1) 1601 vnlru_free_locked(1, NULL); 1602 } 1603 alloc: 1604 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1605 if (vnlru_under(rnumvnodes, vlowat)) 1606 vnlru_kick(); 1607 mtx_unlock(&vnode_list_mtx); 1608 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1609 } 1610 1611 static struct vnode * 1612 vn_alloc(struct mount *mp) 1613 { 1614 u_long rnumvnodes; 1615 1616 if (__predict_false(vn_alloc_cyclecount != 0)) 1617 return (vn_alloc_hard(mp)); 1618 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1619 if (__predict_false(vnlru_under_unlocked(rnumvnodes, vlowat))) { 1620 atomic_subtract_long(&numvnodes, 1); 1621 return (vn_alloc_hard(mp)); 1622 } 1623 1624 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1625 } 1626 1627 static void 1628 vn_free(struct vnode *vp) 1629 { 1630 1631 atomic_subtract_long(&numvnodes, 1); 1632 uma_zfree_smr(vnode_zone, vp); 1633 } 1634 1635 /* 1636 * Return the next vnode from the free list. 1637 */ 1638 int 1639 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1640 struct vnode **vpp) 1641 { 1642 struct vnode *vp; 1643 struct thread *td; 1644 struct lock_object *lo; 1645 1646 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1647 1648 KASSERT(vops->registered, 1649 ("%s: not registered vector op %p\n", __func__, vops)); 1650 1651 td = curthread; 1652 if (td->td_vp_reserved != NULL) { 1653 vp = td->td_vp_reserved; 1654 td->td_vp_reserved = NULL; 1655 } else { 1656 vp = vn_alloc(mp); 1657 } 1658 counter_u64_add(vnodes_created, 1); 1659 /* 1660 * Locks are given the generic name "vnode" when created. 1661 * Follow the historic practice of using the filesystem 1662 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1663 * 1664 * Locks live in a witness group keyed on their name. Thus, 1665 * when a lock is renamed, it must also move from the witness 1666 * group of its old name to the witness group of its new name. 1667 * 1668 * The change only needs to be made when the vnode moves 1669 * from one filesystem type to another. We ensure that each 1670 * filesystem use a single static name pointer for its tag so 1671 * that we can compare pointers rather than doing a strcmp(). 1672 */ 1673 lo = &vp->v_vnlock->lock_object; 1674 #ifdef WITNESS 1675 if (lo->lo_name != tag) { 1676 #endif 1677 lo->lo_name = tag; 1678 #ifdef WITNESS 1679 WITNESS_DESTROY(lo); 1680 WITNESS_INIT(lo, tag); 1681 } 1682 #endif 1683 /* 1684 * By default, don't allow shared locks unless filesystems opt-in. 1685 */ 1686 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1687 /* 1688 * Finalize various vnode identity bits. 1689 */ 1690 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1691 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1692 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1693 vp->v_type = VNON; 1694 vp->v_op = vops; 1695 v_init_counters(vp); 1696 vp->v_bufobj.bo_ops = &buf_ops_bio; 1697 #ifdef DIAGNOSTIC 1698 if (mp == NULL && vops != &dead_vnodeops) 1699 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1700 #endif 1701 #ifdef MAC 1702 mac_vnode_init(vp); 1703 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1704 mac_vnode_associate_singlelabel(mp, vp); 1705 #endif 1706 if (mp != NULL) { 1707 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1708 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1709 vp->v_vflag |= VV_NOKNOTE; 1710 } 1711 1712 /* 1713 * For the filesystems which do not use vfs_hash_insert(), 1714 * still initialize v_hash to have vfs_hash_index() useful. 1715 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1716 * its own hashing. 1717 */ 1718 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1719 1720 *vpp = vp; 1721 return (0); 1722 } 1723 1724 void 1725 getnewvnode_reserve(void) 1726 { 1727 struct thread *td; 1728 1729 td = curthread; 1730 MPASS(td->td_vp_reserved == NULL); 1731 td->td_vp_reserved = vn_alloc(NULL); 1732 } 1733 1734 void 1735 getnewvnode_drop_reserve(void) 1736 { 1737 struct thread *td; 1738 1739 td = curthread; 1740 if (td->td_vp_reserved != NULL) { 1741 vn_free(td->td_vp_reserved); 1742 td->td_vp_reserved = NULL; 1743 } 1744 } 1745 1746 static void __noinline 1747 freevnode(struct vnode *vp) 1748 { 1749 struct bufobj *bo; 1750 1751 /* 1752 * The vnode has been marked for destruction, so free it. 1753 * 1754 * The vnode will be returned to the zone where it will 1755 * normally remain until it is needed for another vnode. We 1756 * need to cleanup (or verify that the cleanup has already 1757 * been done) any residual data left from its current use 1758 * so as not to contaminate the freshly allocated vnode. 1759 */ 1760 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 1761 /* 1762 * Paired with vgone. 1763 */ 1764 vn_seqc_write_end_locked(vp); 1765 VNPASS(vp->v_seqc_users == 0, vp); 1766 1767 bo = &vp->v_bufobj; 1768 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 1769 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 1770 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 1771 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 1772 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 1773 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 1774 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 1775 ("clean blk trie not empty")); 1776 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 1777 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 1778 ("dirty blk trie not empty")); 1779 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 1780 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 1781 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 1782 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 1783 ("Dangling rangelock waiters")); 1784 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, 1785 ("Leaked inactivation")); 1786 VI_UNLOCK(vp); 1787 #ifdef MAC 1788 mac_vnode_destroy(vp); 1789 #endif 1790 if (vp->v_pollinfo != NULL) { 1791 destroy_vpollinfo(vp->v_pollinfo); 1792 vp->v_pollinfo = NULL; 1793 } 1794 #ifdef INVARIANTS 1795 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ 1796 vp->v_op = NULL; 1797 #endif 1798 vp->v_mountedhere = NULL; 1799 vp->v_unpcb = NULL; 1800 vp->v_rdev = NULL; 1801 vp->v_fifoinfo = NULL; 1802 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 1803 vp->v_irflag = 0; 1804 vp->v_iflag = 0; 1805 vp->v_vflag = 0; 1806 bo->bo_flag = 0; 1807 vn_free(vp); 1808 } 1809 1810 /* 1811 * Delete from old mount point vnode list, if on one. 1812 */ 1813 static void 1814 delmntque(struct vnode *vp) 1815 { 1816 struct mount *mp; 1817 1818 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 1819 1820 mp = vp->v_mount; 1821 if (mp == NULL) 1822 return; 1823 MNT_ILOCK(mp); 1824 VI_LOCK(vp); 1825 vp->v_mount = NULL; 1826 VI_UNLOCK(vp); 1827 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1828 ("bad mount point vnode list size")); 1829 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1830 mp->mnt_nvnodelistsize--; 1831 MNT_REL(mp); 1832 MNT_IUNLOCK(mp); 1833 } 1834 1835 static void 1836 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1837 { 1838 1839 vp->v_data = NULL; 1840 vp->v_op = &dead_vnodeops; 1841 vgone(vp); 1842 vput(vp); 1843 } 1844 1845 /* 1846 * Insert into list of vnodes for the new mount point, if available. 1847 */ 1848 int 1849 insmntque1(struct vnode *vp, struct mount *mp, 1850 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1851 { 1852 1853 KASSERT(vp->v_mount == NULL, 1854 ("insmntque: vnode already on per mount vnode list")); 1855 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1856 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1857 1858 /* 1859 * We acquire the vnode interlock early to ensure that the 1860 * vnode cannot be recycled by another process releasing a 1861 * holdcnt on it before we get it on both the vnode list 1862 * and the active vnode list. The mount mutex protects only 1863 * manipulation of the vnode list and the vnode freelist 1864 * mutex protects only manipulation of the active vnode list. 1865 * Hence the need to hold the vnode interlock throughout. 1866 */ 1867 MNT_ILOCK(mp); 1868 VI_LOCK(vp); 1869 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 1870 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1871 mp->mnt_nvnodelistsize == 0)) && 1872 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1873 VI_UNLOCK(vp); 1874 MNT_IUNLOCK(mp); 1875 if (dtr != NULL) 1876 dtr(vp, dtr_arg); 1877 return (EBUSY); 1878 } 1879 vp->v_mount = mp; 1880 MNT_REF(mp); 1881 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1882 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1883 ("neg mount point vnode list size")); 1884 mp->mnt_nvnodelistsize++; 1885 VI_UNLOCK(vp); 1886 MNT_IUNLOCK(mp); 1887 return (0); 1888 } 1889 1890 int 1891 insmntque(struct vnode *vp, struct mount *mp) 1892 { 1893 1894 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1895 } 1896 1897 /* 1898 * Flush out and invalidate all buffers associated with a bufobj 1899 * Called with the underlying object locked. 1900 */ 1901 int 1902 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1903 { 1904 int error; 1905 1906 BO_LOCK(bo); 1907 if (flags & V_SAVE) { 1908 error = bufobj_wwait(bo, slpflag, slptimeo); 1909 if (error) { 1910 BO_UNLOCK(bo); 1911 return (error); 1912 } 1913 if (bo->bo_dirty.bv_cnt > 0) { 1914 BO_UNLOCK(bo); 1915 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1916 return (error); 1917 /* 1918 * XXX We could save a lock/unlock if this was only 1919 * enabled under INVARIANTS 1920 */ 1921 BO_LOCK(bo); 1922 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1923 panic("vinvalbuf: dirty bufs"); 1924 } 1925 } 1926 /* 1927 * If you alter this loop please notice that interlock is dropped and 1928 * reacquired in flushbuflist. Special care is needed to ensure that 1929 * no race conditions occur from this. 1930 */ 1931 do { 1932 error = flushbuflist(&bo->bo_clean, 1933 flags, bo, slpflag, slptimeo); 1934 if (error == 0 && !(flags & V_CLEANONLY)) 1935 error = flushbuflist(&bo->bo_dirty, 1936 flags, bo, slpflag, slptimeo); 1937 if (error != 0 && error != EAGAIN) { 1938 BO_UNLOCK(bo); 1939 return (error); 1940 } 1941 } while (error != 0); 1942 1943 /* 1944 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1945 * have write I/O in-progress but if there is a VM object then the 1946 * VM object can also have read-I/O in-progress. 1947 */ 1948 do { 1949 bufobj_wwait(bo, 0, 0); 1950 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 1951 BO_UNLOCK(bo); 1952 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 1953 BO_LOCK(bo); 1954 } 1955 } while (bo->bo_numoutput > 0); 1956 BO_UNLOCK(bo); 1957 1958 /* 1959 * Destroy the copy in the VM cache, too. 1960 */ 1961 if (bo->bo_object != NULL && 1962 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 1963 VM_OBJECT_WLOCK(bo->bo_object); 1964 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1965 OBJPR_CLEANONLY : 0); 1966 VM_OBJECT_WUNLOCK(bo->bo_object); 1967 } 1968 1969 #ifdef INVARIANTS 1970 BO_LOCK(bo); 1971 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 1972 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 1973 bo->bo_clean.bv_cnt > 0)) 1974 panic("vinvalbuf: flush failed"); 1975 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 1976 bo->bo_dirty.bv_cnt > 0) 1977 panic("vinvalbuf: flush dirty failed"); 1978 BO_UNLOCK(bo); 1979 #endif 1980 return (0); 1981 } 1982 1983 /* 1984 * Flush out and invalidate all buffers associated with a vnode. 1985 * Called with the underlying object locked. 1986 */ 1987 int 1988 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 1989 { 1990 1991 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 1992 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1993 if (vp->v_object != NULL && vp->v_object->handle != vp) 1994 return (0); 1995 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 1996 } 1997 1998 /* 1999 * Flush out buffers on the specified list. 2000 * 2001 */ 2002 static int 2003 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2004 int slptimeo) 2005 { 2006 struct buf *bp, *nbp; 2007 int retval, error; 2008 daddr_t lblkno; 2009 b_xflags_t xflags; 2010 2011 ASSERT_BO_WLOCKED(bo); 2012 2013 retval = 0; 2014 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2015 /* 2016 * If we are flushing both V_NORMAL and V_ALT buffers then 2017 * do not skip any buffers. If we are flushing only V_NORMAL 2018 * buffers then skip buffers marked as BX_ALTDATA. If we are 2019 * flushing only V_ALT buffers then skip buffers not marked 2020 * as BX_ALTDATA. 2021 */ 2022 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2023 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2024 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2025 continue; 2026 } 2027 if (nbp != NULL) { 2028 lblkno = nbp->b_lblkno; 2029 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2030 } 2031 retval = EAGAIN; 2032 error = BUF_TIMELOCK(bp, 2033 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2034 "flushbuf", slpflag, slptimeo); 2035 if (error) { 2036 BO_LOCK(bo); 2037 return (error != ENOLCK ? error : EAGAIN); 2038 } 2039 KASSERT(bp->b_bufobj == bo, 2040 ("bp %p wrong b_bufobj %p should be %p", 2041 bp, bp->b_bufobj, bo)); 2042 /* 2043 * XXX Since there are no node locks for NFS, I 2044 * believe there is a slight chance that a delayed 2045 * write will occur while sleeping just above, so 2046 * check for it. 2047 */ 2048 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2049 (flags & V_SAVE)) { 2050 bremfree(bp); 2051 bp->b_flags |= B_ASYNC; 2052 bwrite(bp); 2053 BO_LOCK(bo); 2054 return (EAGAIN); /* XXX: why not loop ? */ 2055 } 2056 bremfree(bp); 2057 bp->b_flags |= (B_INVAL | B_RELBUF); 2058 bp->b_flags &= ~B_ASYNC; 2059 brelse(bp); 2060 BO_LOCK(bo); 2061 if (nbp == NULL) 2062 break; 2063 nbp = gbincore(bo, lblkno); 2064 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2065 != xflags) 2066 break; /* nbp invalid */ 2067 } 2068 return (retval); 2069 } 2070 2071 int 2072 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2073 { 2074 struct buf *bp; 2075 int error; 2076 daddr_t lblkno; 2077 2078 ASSERT_BO_LOCKED(bo); 2079 2080 for (lblkno = startn;;) { 2081 again: 2082 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2083 if (bp == NULL || bp->b_lblkno >= endn || 2084 bp->b_lblkno < startn) 2085 break; 2086 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2087 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2088 if (error != 0) { 2089 BO_RLOCK(bo); 2090 if (error == ENOLCK) 2091 goto again; 2092 return (error); 2093 } 2094 KASSERT(bp->b_bufobj == bo, 2095 ("bp %p wrong b_bufobj %p should be %p", 2096 bp, bp->b_bufobj, bo)); 2097 lblkno = bp->b_lblkno + 1; 2098 if ((bp->b_flags & B_MANAGED) == 0) 2099 bremfree(bp); 2100 bp->b_flags |= B_RELBUF; 2101 /* 2102 * In the VMIO case, use the B_NOREUSE flag to hint that the 2103 * pages backing each buffer in the range are unlikely to be 2104 * reused. Dirty buffers will have the hint applied once 2105 * they've been written. 2106 */ 2107 if ((bp->b_flags & B_VMIO) != 0) 2108 bp->b_flags |= B_NOREUSE; 2109 brelse(bp); 2110 BO_RLOCK(bo); 2111 } 2112 return (0); 2113 } 2114 2115 /* 2116 * Truncate a file's buffer and pages to a specified length. This 2117 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2118 * sync activity. 2119 */ 2120 int 2121 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2122 { 2123 struct buf *bp, *nbp; 2124 struct bufobj *bo; 2125 daddr_t startlbn; 2126 2127 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2128 vp, blksize, (uintmax_t)length); 2129 2130 /* 2131 * Round up to the *next* lbn. 2132 */ 2133 startlbn = howmany(length, blksize); 2134 2135 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2136 2137 bo = &vp->v_bufobj; 2138 restart_unlocked: 2139 BO_LOCK(bo); 2140 2141 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2142 ; 2143 2144 if (length > 0) { 2145 restartsync: 2146 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2147 if (bp->b_lblkno > 0) 2148 continue; 2149 /* 2150 * Since we hold the vnode lock this should only 2151 * fail if we're racing with the buf daemon. 2152 */ 2153 if (BUF_LOCK(bp, 2154 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2155 BO_LOCKPTR(bo)) == ENOLCK) 2156 goto restart_unlocked; 2157 2158 VNASSERT((bp->b_flags & B_DELWRI), vp, 2159 ("buf(%p) on dirty queue without DELWRI", bp)); 2160 2161 bremfree(bp); 2162 bawrite(bp); 2163 BO_LOCK(bo); 2164 goto restartsync; 2165 } 2166 } 2167 2168 bufobj_wwait(bo, 0, 0); 2169 BO_UNLOCK(bo); 2170 vnode_pager_setsize(vp, length); 2171 2172 return (0); 2173 } 2174 2175 /* 2176 * Invalidate the cached pages of a file's buffer within the range of block 2177 * numbers [startlbn, endlbn). 2178 */ 2179 void 2180 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2181 int blksize) 2182 { 2183 struct bufobj *bo; 2184 off_t start, end; 2185 2186 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2187 2188 start = blksize * startlbn; 2189 end = blksize * endlbn; 2190 2191 bo = &vp->v_bufobj; 2192 BO_LOCK(bo); 2193 MPASS(blksize == bo->bo_bsize); 2194 2195 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2196 ; 2197 2198 BO_UNLOCK(bo); 2199 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2200 } 2201 2202 static int 2203 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2204 daddr_t startlbn, daddr_t endlbn) 2205 { 2206 struct buf *bp, *nbp; 2207 bool anyfreed; 2208 2209 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2210 ASSERT_BO_LOCKED(bo); 2211 2212 do { 2213 anyfreed = false; 2214 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2215 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2216 continue; 2217 if (BUF_LOCK(bp, 2218 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2219 BO_LOCKPTR(bo)) == ENOLCK) { 2220 BO_LOCK(bo); 2221 return (EAGAIN); 2222 } 2223 2224 bremfree(bp); 2225 bp->b_flags |= B_INVAL | B_RELBUF; 2226 bp->b_flags &= ~B_ASYNC; 2227 brelse(bp); 2228 anyfreed = true; 2229 2230 BO_LOCK(bo); 2231 if (nbp != NULL && 2232 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2233 nbp->b_vp != vp || 2234 (nbp->b_flags & B_DELWRI) != 0)) 2235 return (EAGAIN); 2236 } 2237 2238 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2239 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2240 continue; 2241 if (BUF_LOCK(bp, 2242 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2243 BO_LOCKPTR(bo)) == ENOLCK) { 2244 BO_LOCK(bo); 2245 return (EAGAIN); 2246 } 2247 bremfree(bp); 2248 bp->b_flags |= B_INVAL | B_RELBUF; 2249 bp->b_flags &= ~B_ASYNC; 2250 brelse(bp); 2251 anyfreed = true; 2252 2253 BO_LOCK(bo); 2254 if (nbp != NULL && 2255 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2256 (nbp->b_vp != vp) || 2257 (nbp->b_flags & B_DELWRI) == 0)) 2258 return (EAGAIN); 2259 } 2260 } while (anyfreed); 2261 return (0); 2262 } 2263 2264 static void 2265 buf_vlist_remove(struct buf *bp) 2266 { 2267 struct bufv *bv; 2268 b_xflags_t flags; 2269 2270 flags = bp->b_xflags; 2271 2272 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2273 ASSERT_BO_WLOCKED(bp->b_bufobj); 2274 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 && 2275 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN), 2276 ("%s: buffer %p has invalid queue state", __func__, bp)); 2277 2278 if ((flags & BX_VNDIRTY) != 0) 2279 bv = &bp->b_bufobj->bo_dirty; 2280 else 2281 bv = &bp->b_bufobj->bo_clean; 2282 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2283 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2284 bv->bv_cnt--; 2285 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2286 } 2287 2288 /* 2289 * Add the buffer to the sorted clean or dirty block list. 2290 * 2291 * NOTE: xflags is passed as a constant, optimizing this inline function! 2292 */ 2293 static void 2294 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2295 { 2296 struct bufv *bv; 2297 struct buf *n; 2298 int error; 2299 2300 ASSERT_BO_WLOCKED(bo); 2301 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2302 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2303 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2304 ("dead bo %p", bo)); 2305 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2306 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2307 bp->b_xflags |= xflags; 2308 if (xflags & BX_VNDIRTY) 2309 bv = &bo->bo_dirty; 2310 else 2311 bv = &bo->bo_clean; 2312 2313 /* 2314 * Keep the list ordered. Optimize empty list insertion. Assume 2315 * we tend to grow at the tail so lookup_le should usually be cheaper 2316 * than _ge. 2317 */ 2318 if (bv->bv_cnt == 0 || 2319 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2320 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2321 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2322 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2323 else 2324 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2325 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2326 if (error) 2327 panic("buf_vlist_add: Preallocated nodes insufficient."); 2328 bv->bv_cnt++; 2329 } 2330 2331 /* 2332 * Look up a buffer using the buffer tries. 2333 */ 2334 struct buf * 2335 gbincore(struct bufobj *bo, daddr_t lblkno) 2336 { 2337 struct buf *bp; 2338 2339 ASSERT_BO_LOCKED(bo); 2340 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2341 if (bp != NULL) 2342 return (bp); 2343 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2344 } 2345 2346 /* 2347 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2348 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2349 * stability of the result. Like other lockless lookups, the found buf may 2350 * already be invalid by the time this function returns. 2351 */ 2352 struct buf * 2353 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2354 { 2355 struct buf *bp; 2356 2357 ASSERT_BO_UNLOCKED(bo); 2358 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2359 if (bp != NULL) 2360 return (bp); 2361 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2362 } 2363 2364 /* 2365 * Associate a buffer with a vnode. 2366 */ 2367 void 2368 bgetvp(struct vnode *vp, struct buf *bp) 2369 { 2370 struct bufobj *bo; 2371 2372 bo = &vp->v_bufobj; 2373 ASSERT_BO_WLOCKED(bo); 2374 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2375 2376 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2377 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2378 ("bgetvp: bp already attached! %p", bp)); 2379 2380 vhold(vp); 2381 bp->b_vp = vp; 2382 bp->b_bufobj = bo; 2383 /* 2384 * Insert onto list for new vnode. 2385 */ 2386 buf_vlist_add(bp, bo, BX_VNCLEAN); 2387 } 2388 2389 /* 2390 * Disassociate a buffer from a vnode. 2391 */ 2392 void 2393 brelvp(struct buf *bp) 2394 { 2395 struct bufobj *bo; 2396 struct vnode *vp; 2397 2398 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2399 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2400 2401 /* 2402 * Delete from old vnode list, if on one. 2403 */ 2404 vp = bp->b_vp; /* XXX */ 2405 bo = bp->b_bufobj; 2406 BO_LOCK(bo); 2407 buf_vlist_remove(bp); 2408 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2409 bo->bo_flag &= ~BO_ONWORKLST; 2410 mtx_lock(&sync_mtx); 2411 LIST_REMOVE(bo, bo_synclist); 2412 syncer_worklist_len--; 2413 mtx_unlock(&sync_mtx); 2414 } 2415 bp->b_vp = NULL; 2416 bp->b_bufobj = NULL; 2417 BO_UNLOCK(bo); 2418 vdrop(vp); 2419 } 2420 2421 /* 2422 * Add an item to the syncer work queue. 2423 */ 2424 static void 2425 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2426 { 2427 int slot; 2428 2429 ASSERT_BO_WLOCKED(bo); 2430 2431 mtx_lock(&sync_mtx); 2432 if (bo->bo_flag & BO_ONWORKLST) 2433 LIST_REMOVE(bo, bo_synclist); 2434 else { 2435 bo->bo_flag |= BO_ONWORKLST; 2436 syncer_worklist_len++; 2437 } 2438 2439 if (delay > syncer_maxdelay - 2) 2440 delay = syncer_maxdelay - 2; 2441 slot = (syncer_delayno + delay) & syncer_mask; 2442 2443 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2444 mtx_unlock(&sync_mtx); 2445 } 2446 2447 static int 2448 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2449 { 2450 int error, len; 2451 2452 mtx_lock(&sync_mtx); 2453 len = syncer_worklist_len - sync_vnode_count; 2454 mtx_unlock(&sync_mtx); 2455 error = SYSCTL_OUT(req, &len, sizeof(len)); 2456 return (error); 2457 } 2458 2459 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2460 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2461 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2462 2463 static struct proc *updateproc; 2464 static void sched_sync(void); 2465 static struct kproc_desc up_kp = { 2466 "syncer", 2467 sched_sync, 2468 &updateproc 2469 }; 2470 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2471 2472 static int 2473 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2474 { 2475 struct vnode *vp; 2476 struct mount *mp; 2477 2478 *bo = LIST_FIRST(slp); 2479 if (*bo == NULL) 2480 return (0); 2481 vp = bo2vnode(*bo); 2482 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2483 return (1); 2484 /* 2485 * We use vhold in case the vnode does not 2486 * successfully sync. vhold prevents the vnode from 2487 * going away when we unlock the sync_mtx so that 2488 * we can acquire the vnode interlock. 2489 */ 2490 vholdl(vp); 2491 mtx_unlock(&sync_mtx); 2492 VI_UNLOCK(vp); 2493 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2494 vdrop(vp); 2495 mtx_lock(&sync_mtx); 2496 return (*bo == LIST_FIRST(slp)); 2497 } 2498 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2499 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2500 VOP_UNLOCK(vp); 2501 vn_finished_write(mp); 2502 BO_LOCK(*bo); 2503 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2504 /* 2505 * Put us back on the worklist. The worklist 2506 * routine will remove us from our current 2507 * position and then add us back in at a later 2508 * position. 2509 */ 2510 vn_syncer_add_to_worklist(*bo, syncdelay); 2511 } 2512 BO_UNLOCK(*bo); 2513 vdrop(vp); 2514 mtx_lock(&sync_mtx); 2515 return (0); 2516 } 2517 2518 static int first_printf = 1; 2519 2520 /* 2521 * System filesystem synchronizer daemon. 2522 */ 2523 static void 2524 sched_sync(void) 2525 { 2526 struct synclist *next, *slp; 2527 struct bufobj *bo; 2528 long starttime; 2529 struct thread *td = curthread; 2530 int last_work_seen; 2531 int net_worklist_len; 2532 int syncer_final_iter; 2533 int error; 2534 2535 last_work_seen = 0; 2536 syncer_final_iter = 0; 2537 syncer_state = SYNCER_RUNNING; 2538 starttime = time_uptime; 2539 td->td_pflags |= TDP_NORUNNINGBUF; 2540 2541 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2542 SHUTDOWN_PRI_LAST); 2543 2544 mtx_lock(&sync_mtx); 2545 for (;;) { 2546 if (syncer_state == SYNCER_FINAL_DELAY && 2547 syncer_final_iter == 0) { 2548 mtx_unlock(&sync_mtx); 2549 kproc_suspend_check(td->td_proc); 2550 mtx_lock(&sync_mtx); 2551 } 2552 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2553 if (syncer_state != SYNCER_RUNNING && 2554 starttime != time_uptime) { 2555 if (first_printf) { 2556 printf("\nSyncing disks, vnodes remaining... "); 2557 first_printf = 0; 2558 } 2559 printf("%d ", net_worklist_len); 2560 } 2561 starttime = time_uptime; 2562 2563 /* 2564 * Push files whose dirty time has expired. Be careful 2565 * of interrupt race on slp queue. 2566 * 2567 * Skip over empty worklist slots when shutting down. 2568 */ 2569 do { 2570 slp = &syncer_workitem_pending[syncer_delayno]; 2571 syncer_delayno += 1; 2572 if (syncer_delayno == syncer_maxdelay) 2573 syncer_delayno = 0; 2574 next = &syncer_workitem_pending[syncer_delayno]; 2575 /* 2576 * If the worklist has wrapped since the 2577 * it was emptied of all but syncer vnodes, 2578 * switch to the FINAL_DELAY state and run 2579 * for one more second. 2580 */ 2581 if (syncer_state == SYNCER_SHUTTING_DOWN && 2582 net_worklist_len == 0 && 2583 last_work_seen == syncer_delayno) { 2584 syncer_state = SYNCER_FINAL_DELAY; 2585 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2586 } 2587 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2588 syncer_worklist_len > 0); 2589 2590 /* 2591 * Keep track of the last time there was anything 2592 * on the worklist other than syncer vnodes. 2593 * Return to the SHUTTING_DOWN state if any 2594 * new work appears. 2595 */ 2596 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2597 last_work_seen = syncer_delayno; 2598 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2599 syncer_state = SYNCER_SHUTTING_DOWN; 2600 while (!LIST_EMPTY(slp)) { 2601 error = sync_vnode(slp, &bo, td); 2602 if (error == 1) { 2603 LIST_REMOVE(bo, bo_synclist); 2604 LIST_INSERT_HEAD(next, bo, bo_synclist); 2605 continue; 2606 } 2607 2608 if (first_printf == 0) { 2609 /* 2610 * Drop the sync mutex, because some watchdog 2611 * drivers need to sleep while patting 2612 */ 2613 mtx_unlock(&sync_mtx); 2614 wdog_kern_pat(WD_LASTVAL); 2615 mtx_lock(&sync_mtx); 2616 } 2617 } 2618 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2619 syncer_final_iter--; 2620 /* 2621 * The variable rushjob allows the kernel to speed up the 2622 * processing of the filesystem syncer process. A rushjob 2623 * value of N tells the filesystem syncer to process the next 2624 * N seconds worth of work on its queue ASAP. Currently rushjob 2625 * is used by the soft update code to speed up the filesystem 2626 * syncer process when the incore state is getting so far 2627 * ahead of the disk that the kernel memory pool is being 2628 * threatened with exhaustion. 2629 */ 2630 if (rushjob > 0) { 2631 rushjob -= 1; 2632 continue; 2633 } 2634 /* 2635 * Just sleep for a short period of time between 2636 * iterations when shutting down to allow some I/O 2637 * to happen. 2638 * 2639 * If it has taken us less than a second to process the 2640 * current work, then wait. Otherwise start right over 2641 * again. We can still lose time if any single round 2642 * takes more than two seconds, but it does not really 2643 * matter as we are just trying to generally pace the 2644 * filesystem activity. 2645 */ 2646 if (syncer_state != SYNCER_RUNNING || 2647 time_uptime == starttime) { 2648 thread_lock(td); 2649 sched_prio(td, PPAUSE); 2650 thread_unlock(td); 2651 } 2652 if (syncer_state != SYNCER_RUNNING) 2653 cv_timedwait(&sync_wakeup, &sync_mtx, 2654 hz / SYNCER_SHUTDOWN_SPEEDUP); 2655 else if (time_uptime == starttime) 2656 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2657 } 2658 } 2659 2660 /* 2661 * Request the syncer daemon to speed up its work. 2662 * We never push it to speed up more than half of its 2663 * normal turn time, otherwise it could take over the cpu. 2664 */ 2665 int 2666 speedup_syncer(void) 2667 { 2668 int ret = 0; 2669 2670 mtx_lock(&sync_mtx); 2671 if (rushjob < syncdelay / 2) { 2672 rushjob += 1; 2673 stat_rush_requests += 1; 2674 ret = 1; 2675 } 2676 mtx_unlock(&sync_mtx); 2677 cv_broadcast(&sync_wakeup); 2678 return (ret); 2679 } 2680 2681 /* 2682 * Tell the syncer to speed up its work and run though its work 2683 * list several times, then tell it to shut down. 2684 */ 2685 static void 2686 syncer_shutdown(void *arg, int howto) 2687 { 2688 2689 if (howto & RB_NOSYNC) 2690 return; 2691 mtx_lock(&sync_mtx); 2692 syncer_state = SYNCER_SHUTTING_DOWN; 2693 rushjob = 0; 2694 mtx_unlock(&sync_mtx); 2695 cv_broadcast(&sync_wakeup); 2696 kproc_shutdown(arg, howto); 2697 } 2698 2699 void 2700 syncer_suspend(void) 2701 { 2702 2703 syncer_shutdown(updateproc, 0); 2704 } 2705 2706 void 2707 syncer_resume(void) 2708 { 2709 2710 mtx_lock(&sync_mtx); 2711 first_printf = 1; 2712 syncer_state = SYNCER_RUNNING; 2713 mtx_unlock(&sync_mtx); 2714 cv_broadcast(&sync_wakeup); 2715 kproc_resume(updateproc); 2716 } 2717 2718 /* 2719 * Move the buffer between the clean and dirty lists of its vnode. 2720 */ 2721 void 2722 reassignbuf(struct buf *bp) 2723 { 2724 struct vnode *vp; 2725 struct bufobj *bo; 2726 int delay; 2727 #ifdef INVARIANTS 2728 struct bufv *bv; 2729 #endif 2730 2731 vp = bp->b_vp; 2732 bo = bp->b_bufobj; 2733 2734 KASSERT((bp->b_flags & B_PAGING) == 0, 2735 ("%s: cannot reassign paging buffer %p", __func__, bp)); 2736 2737 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2738 bp, bp->b_vp, bp->b_flags); 2739 2740 BO_LOCK(bo); 2741 buf_vlist_remove(bp); 2742 2743 /* 2744 * If dirty, put on list of dirty buffers; otherwise insert onto list 2745 * of clean buffers. 2746 */ 2747 if (bp->b_flags & B_DELWRI) { 2748 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2749 switch (vp->v_type) { 2750 case VDIR: 2751 delay = dirdelay; 2752 break; 2753 case VCHR: 2754 delay = metadelay; 2755 break; 2756 default: 2757 delay = filedelay; 2758 } 2759 vn_syncer_add_to_worklist(bo, delay); 2760 } 2761 buf_vlist_add(bp, bo, BX_VNDIRTY); 2762 } else { 2763 buf_vlist_add(bp, bo, BX_VNCLEAN); 2764 2765 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2766 mtx_lock(&sync_mtx); 2767 LIST_REMOVE(bo, bo_synclist); 2768 syncer_worklist_len--; 2769 mtx_unlock(&sync_mtx); 2770 bo->bo_flag &= ~BO_ONWORKLST; 2771 } 2772 } 2773 #ifdef INVARIANTS 2774 bv = &bo->bo_clean; 2775 bp = TAILQ_FIRST(&bv->bv_hd); 2776 KASSERT(bp == NULL || bp->b_bufobj == bo, 2777 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2778 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2779 KASSERT(bp == NULL || bp->b_bufobj == bo, 2780 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2781 bv = &bo->bo_dirty; 2782 bp = TAILQ_FIRST(&bv->bv_hd); 2783 KASSERT(bp == NULL || bp->b_bufobj == bo, 2784 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2785 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2786 KASSERT(bp == NULL || bp->b_bufobj == bo, 2787 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2788 #endif 2789 BO_UNLOCK(bo); 2790 } 2791 2792 static void 2793 v_init_counters(struct vnode *vp) 2794 { 2795 2796 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2797 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2798 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2799 2800 refcount_init(&vp->v_holdcnt, 1); 2801 refcount_init(&vp->v_usecount, 1); 2802 } 2803 2804 /* 2805 * Grab a particular vnode from the free list, increment its 2806 * reference count and lock it. VIRF_DOOMED is set if the vnode 2807 * is being destroyed. Only callers who specify LK_RETRY will 2808 * see doomed vnodes. If inactive processing was delayed in 2809 * vput try to do it here. 2810 * 2811 * usecount is manipulated using atomics without holding any locks. 2812 * 2813 * holdcnt can be manipulated using atomics without holding any locks, 2814 * except when transitioning 1<->0, in which case the interlock is held. 2815 * 2816 * Consumers which don't guarantee liveness of the vnode can use SMR to 2817 * try to get a reference. Note this operation can fail since the vnode 2818 * may be awaiting getting freed by the time they get to it. 2819 */ 2820 enum vgetstate 2821 vget_prep_smr(struct vnode *vp) 2822 { 2823 enum vgetstate vs; 2824 2825 VFS_SMR_ASSERT_ENTERED(); 2826 2827 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2828 vs = VGET_USECOUNT; 2829 } else { 2830 if (vhold_smr(vp)) 2831 vs = VGET_HOLDCNT; 2832 else 2833 vs = VGET_NONE; 2834 } 2835 return (vs); 2836 } 2837 2838 enum vgetstate 2839 vget_prep(struct vnode *vp) 2840 { 2841 enum vgetstate vs; 2842 2843 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2844 vs = VGET_USECOUNT; 2845 } else { 2846 vhold(vp); 2847 vs = VGET_HOLDCNT; 2848 } 2849 return (vs); 2850 } 2851 2852 void 2853 vget_abort(struct vnode *vp, enum vgetstate vs) 2854 { 2855 2856 switch (vs) { 2857 case VGET_USECOUNT: 2858 vrele(vp); 2859 break; 2860 case VGET_HOLDCNT: 2861 vdrop(vp); 2862 break; 2863 default: 2864 __assert_unreachable(); 2865 } 2866 } 2867 2868 int 2869 vget(struct vnode *vp, int flags) 2870 { 2871 enum vgetstate vs; 2872 2873 vs = vget_prep(vp); 2874 return (vget_finish(vp, flags, vs)); 2875 } 2876 2877 int 2878 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 2879 { 2880 int error; 2881 2882 if ((flags & LK_INTERLOCK) != 0) 2883 ASSERT_VI_LOCKED(vp, __func__); 2884 else 2885 ASSERT_VI_UNLOCKED(vp, __func__); 2886 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 2887 VNPASS(vp->v_holdcnt > 0, vp); 2888 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 2889 2890 error = vn_lock(vp, flags); 2891 if (__predict_false(error != 0)) { 2892 vget_abort(vp, vs); 2893 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2894 vp); 2895 return (error); 2896 } 2897 2898 vget_finish_ref(vp, vs); 2899 return (0); 2900 } 2901 2902 void 2903 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 2904 { 2905 int old; 2906 2907 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 2908 VNPASS(vp->v_holdcnt > 0, vp); 2909 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 2910 2911 if (vs == VGET_USECOUNT) 2912 return; 2913 2914 /* 2915 * We hold the vnode. If the usecount is 0 it will be utilized to keep 2916 * the vnode around. Otherwise someone else lended their hold count and 2917 * we have to drop ours. 2918 */ 2919 old = atomic_fetchadd_int(&vp->v_usecount, 1); 2920 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 2921 if (old != 0) { 2922 #ifdef INVARIANTS 2923 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 2924 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 2925 #else 2926 refcount_release(&vp->v_holdcnt); 2927 #endif 2928 } 2929 } 2930 2931 void 2932 vref(struct vnode *vp) 2933 { 2934 enum vgetstate vs; 2935 2936 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2937 vs = vget_prep(vp); 2938 vget_finish_ref(vp, vs); 2939 } 2940 2941 void 2942 vrefact(struct vnode *vp) 2943 { 2944 2945 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2946 #ifdef INVARIANTS 2947 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 2948 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 2949 #else 2950 refcount_acquire(&vp->v_usecount); 2951 #endif 2952 } 2953 2954 void 2955 vlazy(struct vnode *vp) 2956 { 2957 struct mount *mp; 2958 2959 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 2960 2961 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 2962 return; 2963 /* 2964 * We may get here for inactive routines after the vnode got doomed. 2965 */ 2966 if (VN_IS_DOOMED(vp)) 2967 return; 2968 mp = vp->v_mount; 2969 mtx_lock(&mp->mnt_listmtx); 2970 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 2971 vp->v_mflag |= VMP_LAZYLIST; 2972 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 2973 mp->mnt_lazyvnodelistsize++; 2974 } 2975 mtx_unlock(&mp->mnt_listmtx); 2976 } 2977 2978 /* 2979 * This routine is only meant to be called from vgonel prior to dooming 2980 * the vnode. 2981 */ 2982 static void 2983 vunlazy_gone(struct vnode *vp) 2984 { 2985 struct mount *mp; 2986 2987 ASSERT_VOP_ELOCKED(vp, __func__); 2988 ASSERT_VI_LOCKED(vp, __func__); 2989 VNPASS(!VN_IS_DOOMED(vp), vp); 2990 2991 if (vp->v_mflag & VMP_LAZYLIST) { 2992 mp = vp->v_mount; 2993 mtx_lock(&mp->mnt_listmtx); 2994 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 2995 vp->v_mflag &= ~VMP_LAZYLIST; 2996 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 2997 mp->mnt_lazyvnodelistsize--; 2998 mtx_unlock(&mp->mnt_listmtx); 2999 } 3000 } 3001 3002 static void 3003 vdefer_inactive(struct vnode *vp) 3004 { 3005 3006 ASSERT_VI_LOCKED(vp, __func__); 3007 VNASSERT(vp->v_holdcnt > 0, vp, 3008 ("%s: vnode without hold count", __func__)); 3009 if (VN_IS_DOOMED(vp)) { 3010 vdropl(vp); 3011 return; 3012 } 3013 if (vp->v_iflag & VI_DEFINACT) { 3014 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3015 vdropl(vp); 3016 return; 3017 } 3018 if (vp->v_usecount > 0) { 3019 vp->v_iflag &= ~VI_OWEINACT; 3020 vdropl(vp); 3021 return; 3022 } 3023 vlazy(vp); 3024 vp->v_iflag |= VI_DEFINACT; 3025 VI_UNLOCK(vp); 3026 counter_u64_add(deferred_inact, 1); 3027 } 3028 3029 static void 3030 vdefer_inactive_unlocked(struct vnode *vp) 3031 { 3032 3033 VI_LOCK(vp); 3034 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3035 vdropl(vp); 3036 return; 3037 } 3038 vdefer_inactive(vp); 3039 } 3040 3041 enum vput_op { VRELE, VPUT, VUNREF }; 3042 3043 /* 3044 * Handle ->v_usecount transitioning to 0. 3045 * 3046 * By releasing the last usecount we take ownership of the hold count which 3047 * provides liveness of the vnode, meaning we have to vdrop. 3048 * 3049 * For all vnodes we may need to perform inactive processing. It requires an 3050 * exclusive lock on the vnode, while it is legal to call here with only a 3051 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3052 * inactive processing gets deferred to the syncer. 3053 * 3054 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3055 * on the lock being held all the way until VOP_INACTIVE. This in particular 3056 * happens with UFS which adds half-constructed vnodes to the hash, where they 3057 * can be found by other code. 3058 */ 3059 static void 3060 vput_final(struct vnode *vp, enum vput_op func) 3061 { 3062 int error; 3063 bool want_unlock; 3064 3065 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3066 VNPASS(vp->v_holdcnt > 0, vp); 3067 3068 VI_LOCK(vp); 3069 3070 /* 3071 * By the time we got here someone else might have transitioned 3072 * the count back to > 0. 3073 */ 3074 if (vp->v_usecount > 0) 3075 goto out; 3076 3077 /* 3078 * If the vnode is doomed vgone already performed inactive processing 3079 * (if needed). 3080 */ 3081 if (VN_IS_DOOMED(vp)) 3082 goto out; 3083 3084 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3085 goto out; 3086 3087 if (vp->v_iflag & VI_DOINGINACT) 3088 goto out; 3089 3090 /* 3091 * Locking operations here will drop the interlock and possibly the 3092 * vnode lock, opening a window where the vnode can get doomed all the 3093 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3094 * perform inactive. 3095 */ 3096 vp->v_iflag |= VI_OWEINACT; 3097 want_unlock = false; 3098 error = 0; 3099 switch (func) { 3100 case VRELE: 3101 switch (VOP_ISLOCKED(vp)) { 3102 case LK_EXCLUSIVE: 3103 break; 3104 case LK_EXCLOTHER: 3105 case 0: 3106 want_unlock = true; 3107 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3108 VI_LOCK(vp); 3109 break; 3110 default: 3111 /* 3112 * The lock has at least one sharer, but we have no way 3113 * to conclude whether this is us. Play it safe and 3114 * defer processing. 3115 */ 3116 error = EAGAIN; 3117 break; 3118 } 3119 break; 3120 case VPUT: 3121 want_unlock = true; 3122 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3123 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3124 LK_NOWAIT); 3125 VI_LOCK(vp); 3126 } 3127 break; 3128 case VUNREF: 3129 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3130 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3131 VI_LOCK(vp); 3132 } 3133 break; 3134 } 3135 if (error == 0) { 3136 vinactive(vp); 3137 if (want_unlock) 3138 VOP_UNLOCK(vp); 3139 vdropl(vp); 3140 } else { 3141 vdefer_inactive(vp); 3142 } 3143 return; 3144 out: 3145 if (func == VPUT) 3146 VOP_UNLOCK(vp); 3147 vdropl(vp); 3148 } 3149 3150 /* 3151 * Decrement ->v_usecount for a vnode. 3152 * 3153 * Releasing the last use count requires additional processing, see vput_final 3154 * above for details. 3155 * 3156 * Comment above each variant denotes lock state on entry and exit. 3157 */ 3158 3159 /* 3160 * in: any 3161 * out: same as passed in 3162 */ 3163 void 3164 vrele(struct vnode *vp) 3165 { 3166 3167 ASSERT_VI_UNLOCKED(vp, __func__); 3168 if (!refcount_release(&vp->v_usecount)) 3169 return; 3170 vput_final(vp, VRELE); 3171 } 3172 3173 /* 3174 * in: locked 3175 * out: unlocked 3176 */ 3177 void 3178 vput(struct vnode *vp) 3179 { 3180 3181 ASSERT_VOP_LOCKED(vp, __func__); 3182 ASSERT_VI_UNLOCKED(vp, __func__); 3183 if (!refcount_release(&vp->v_usecount)) { 3184 VOP_UNLOCK(vp); 3185 return; 3186 } 3187 vput_final(vp, VPUT); 3188 } 3189 3190 /* 3191 * in: locked 3192 * out: locked 3193 */ 3194 void 3195 vunref(struct vnode *vp) 3196 { 3197 3198 ASSERT_VOP_LOCKED(vp, __func__); 3199 ASSERT_VI_UNLOCKED(vp, __func__); 3200 if (!refcount_release(&vp->v_usecount)) 3201 return; 3202 vput_final(vp, VUNREF); 3203 } 3204 3205 void 3206 vhold(struct vnode *vp) 3207 { 3208 int old; 3209 3210 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3211 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3212 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3213 ("%s: wrong hold count %d", __func__, old)); 3214 if (old == 0) 3215 vn_freevnodes_dec(); 3216 } 3217 3218 void 3219 vholdnz(struct vnode *vp) 3220 { 3221 3222 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3223 #ifdef INVARIANTS 3224 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3225 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3226 ("%s: wrong hold count %d", __func__, old)); 3227 #else 3228 atomic_add_int(&vp->v_holdcnt, 1); 3229 #endif 3230 } 3231 3232 /* 3233 * Grab a hold count unless the vnode is freed. 3234 * 3235 * Only use this routine if vfs smr is the only protection you have against 3236 * freeing the vnode. 3237 * 3238 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3239 * is not set. After the flag is set the vnode becomes immutable to anyone but 3240 * the thread which managed to set the flag. 3241 * 3242 * It may be tempting to replace the loop with: 3243 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3244 * if (count & VHOLD_NO_SMR) { 3245 * backpedal and error out; 3246 * } 3247 * 3248 * However, while this is more performant, it hinders debugging by eliminating 3249 * the previously mentioned invariant. 3250 */ 3251 bool 3252 vhold_smr(struct vnode *vp) 3253 { 3254 int count; 3255 3256 VFS_SMR_ASSERT_ENTERED(); 3257 3258 count = atomic_load_int(&vp->v_holdcnt); 3259 for (;;) { 3260 if (count & VHOLD_NO_SMR) { 3261 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3262 ("non-zero hold count with flags %d\n", count)); 3263 return (false); 3264 } 3265 3266 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3267 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3268 if (count == 0) 3269 vn_freevnodes_dec(); 3270 return (true); 3271 } 3272 } 3273 } 3274 3275 static void __noinline 3276 vdbatch_process(struct vdbatch *vd) 3277 { 3278 struct vnode *vp; 3279 int i; 3280 3281 mtx_assert(&vd->lock, MA_OWNED); 3282 MPASS(curthread->td_pinned > 0); 3283 MPASS(vd->index == VDBATCH_SIZE); 3284 3285 mtx_lock(&vnode_list_mtx); 3286 critical_enter(); 3287 freevnodes += vd->freevnodes; 3288 for (i = 0; i < VDBATCH_SIZE; i++) { 3289 vp = vd->tab[i]; 3290 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3291 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3292 MPASS(vp->v_dbatchcpu != NOCPU); 3293 vp->v_dbatchcpu = NOCPU; 3294 } 3295 mtx_unlock(&vnode_list_mtx); 3296 vd->freevnodes = 0; 3297 bzero(vd->tab, sizeof(vd->tab)); 3298 vd->index = 0; 3299 critical_exit(); 3300 } 3301 3302 static void 3303 vdbatch_enqueue(struct vnode *vp) 3304 { 3305 struct vdbatch *vd; 3306 3307 ASSERT_VI_LOCKED(vp, __func__); 3308 VNASSERT(!VN_IS_DOOMED(vp), vp, 3309 ("%s: deferring requeue of a doomed vnode", __func__)); 3310 3311 if (vp->v_dbatchcpu != NOCPU) { 3312 VI_UNLOCK(vp); 3313 return; 3314 } 3315 3316 sched_pin(); 3317 vd = DPCPU_PTR(vd); 3318 mtx_lock(&vd->lock); 3319 MPASS(vd->index < VDBATCH_SIZE); 3320 MPASS(vd->tab[vd->index] == NULL); 3321 /* 3322 * A hack: we depend on being pinned so that we know what to put in 3323 * ->v_dbatchcpu. 3324 */ 3325 vp->v_dbatchcpu = curcpu; 3326 vd->tab[vd->index] = vp; 3327 vd->index++; 3328 VI_UNLOCK(vp); 3329 if (vd->index == VDBATCH_SIZE) 3330 vdbatch_process(vd); 3331 mtx_unlock(&vd->lock); 3332 sched_unpin(); 3333 } 3334 3335 /* 3336 * This routine must only be called for vnodes which are about to be 3337 * deallocated. Supporting dequeue for arbitrary vndoes would require 3338 * validating that the locked batch matches. 3339 */ 3340 static void 3341 vdbatch_dequeue(struct vnode *vp) 3342 { 3343 struct vdbatch *vd; 3344 int i; 3345 short cpu; 3346 3347 VNASSERT(vp->v_type == VBAD || vp->v_type == VNON, vp, 3348 ("%s: called for a used vnode\n", __func__)); 3349 3350 cpu = vp->v_dbatchcpu; 3351 if (cpu == NOCPU) 3352 return; 3353 3354 vd = DPCPU_ID_PTR(cpu, vd); 3355 mtx_lock(&vd->lock); 3356 for (i = 0; i < vd->index; i++) { 3357 if (vd->tab[i] != vp) 3358 continue; 3359 vp->v_dbatchcpu = NOCPU; 3360 vd->index--; 3361 vd->tab[i] = vd->tab[vd->index]; 3362 vd->tab[vd->index] = NULL; 3363 break; 3364 } 3365 mtx_unlock(&vd->lock); 3366 /* 3367 * Either we dequeued the vnode above or the target CPU beat us to it. 3368 */ 3369 MPASS(vp->v_dbatchcpu == NOCPU); 3370 } 3371 3372 /* 3373 * Drop the hold count of the vnode. If this is the last reference to 3374 * the vnode we place it on the free list unless it has been vgone'd 3375 * (marked VIRF_DOOMED) in which case we will free it. 3376 * 3377 * Because the vnode vm object keeps a hold reference on the vnode if 3378 * there is at least one resident non-cached page, the vnode cannot 3379 * leave the active list without the page cleanup done. 3380 */ 3381 static void 3382 vdrop_deactivate(struct vnode *vp) 3383 { 3384 struct mount *mp; 3385 3386 ASSERT_VI_LOCKED(vp, __func__); 3387 /* 3388 * Mark a vnode as free: remove it from its active list 3389 * and put it up for recycling on the freelist. 3390 */ 3391 VNASSERT(!VN_IS_DOOMED(vp), vp, 3392 ("vdrop: returning doomed vnode")); 3393 VNASSERT(vp->v_op != NULL, vp, 3394 ("vdrop: vnode already reclaimed.")); 3395 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 3396 ("vnode with VI_OWEINACT set")); 3397 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, 3398 ("vnode with VI_DEFINACT set")); 3399 if (vp->v_mflag & VMP_LAZYLIST) { 3400 mp = vp->v_mount; 3401 mtx_lock(&mp->mnt_listmtx); 3402 VNASSERT(vp->v_mflag & VMP_LAZYLIST, vp, ("lost VMP_LAZYLIST")); 3403 /* 3404 * Don't remove the vnode from the lazy list if another thread 3405 * has increased the hold count. It may have re-enqueued the 3406 * vnode to the lazy list and is now responsible for its 3407 * removal. 3408 */ 3409 if (vp->v_holdcnt == 0) { 3410 vp->v_mflag &= ~VMP_LAZYLIST; 3411 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3412 mp->mnt_lazyvnodelistsize--; 3413 } 3414 mtx_unlock(&mp->mnt_listmtx); 3415 } 3416 vdbatch_enqueue(vp); 3417 } 3418 3419 static void __noinline 3420 vdropl_final(struct vnode *vp) 3421 { 3422 3423 ASSERT_VI_LOCKED(vp, __func__); 3424 VNPASS(VN_IS_DOOMED(vp), vp); 3425 /* 3426 * Set the VHOLD_NO_SMR flag. 3427 * 3428 * We may be racing against vhold_smr. If they win we can just pretend 3429 * we never got this far, they will vdrop later. 3430 */ 3431 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { 3432 vn_freevnodes_inc(); 3433 VI_UNLOCK(vp); 3434 /* 3435 * We lost the aforementioned race. Any subsequent access is 3436 * invalid as they might have managed to vdropl on their own. 3437 */ 3438 return; 3439 } 3440 /* 3441 * Don't bump freevnodes as this one is going away. 3442 */ 3443 freevnode(vp); 3444 } 3445 3446 void 3447 vdrop(struct vnode *vp) 3448 { 3449 3450 ASSERT_VI_UNLOCKED(vp, __func__); 3451 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3452 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3453 return; 3454 VI_LOCK(vp); 3455 vdropl(vp); 3456 } 3457 3458 void 3459 vdropl(struct vnode *vp) 3460 { 3461 3462 ASSERT_VI_LOCKED(vp, __func__); 3463 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3464 if (!refcount_release(&vp->v_holdcnt)) { 3465 VI_UNLOCK(vp); 3466 return; 3467 } 3468 if (!VN_IS_DOOMED(vp)) { 3469 vn_freevnodes_inc(); 3470 vdrop_deactivate(vp); 3471 /* 3472 * Also unlocks the interlock. We can't assert on it as we 3473 * released our hold and by now the vnode might have been 3474 * freed. 3475 */ 3476 return; 3477 } 3478 vdropl_final(vp); 3479 } 3480 3481 /* 3482 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3483 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3484 */ 3485 static void 3486 vinactivef(struct vnode *vp) 3487 { 3488 struct vm_object *obj; 3489 3490 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3491 ASSERT_VI_LOCKED(vp, "vinactive"); 3492 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3493 ("vinactive: recursed on VI_DOINGINACT")); 3494 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3495 vp->v_iflag |= VI_DOINGINACT; 3496 vp->v_iflag &= ~VI_OWEINACT; 3497 VI_UNLOCK(vp); 3498 /* 3499 * Before moving off the active list, we must be sure that any 3500 * modified pages are converted into the vnode's dirty 3501 * buffers, since these will no longer be checked once the 3502 * vnode is on the inactive list. 3503 * 3504 * The write-out of the dirty pages is asynchronous. At the 3505 * point that VOP_INACTIVE() is called, there could still be 3506 * pending I/O and dirty pages in the object. 3507 */ 3508 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3509 vm_object_mightbedirty(obj)) { 3510 VM_OBJECT_WLOCK(obj); 3511 vm_object_page_clean(obj, 0, 0, 0); 3512 VM_OBJECT_WUNLOCK(obj); 3513 } 3514 VOP_INACTIVE(vp); 3515 VI_LOCK(vp); 3516 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3517 ("vinactive: lost VI_DOINGINACT")); 3518 vp->v_iflag &= ~VI_DOINGINACT; 3519 } 3520 3521 void 3522 vinactive(struct vnode *vp) 3523 { 3524 3525 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3526 ASSERT_VI_LOCKED(vp, "vinactive"); 3527 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3528 3529 if ((vp->v_iflag & VI_OWEINACT) == 0) 3530 return; 3531 if (vp->v_iflag & VI_DOINGINACT) 3532 return; 3533 if (vp->v_usecount > 0) { 3534 vp->v_iflag &= ~VI_OWEINACT; 3535 return; 3536 } 3537 vinactivef(vp); 3538 } 3539 3540 /* 3541 * Remove any vnodes in the vnode table belonging to mount point mp. 3542 * 3543 * If FORCECLOSE is not specified, there should not be any active ones, 3544 * return error if any are found (nb: this is a user error, not a 3545 * system error). If FORCECLOSE is specified, detach any active vnodes 3546 * that are found. 3547 * 3548 * If WRITECLOSE is set, only flush out regular file vnodes open for 3549 * writing. 3550 * 3551 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3552 * 3553 * `rootrefs' specifies the base reference count for the root vnode 3554 * of this filesystem. The root vnode is considered busy if its 3555 * v_usecount exceeds this value. On a successful return, vflush(, td) 3556 * will call vrele() on the root vnode exactly rootrefs times. 3557 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3558 * be zero. 3559 */ 3560 #ifdef DIAGNOSTIC 3561 static int busyprt = 0; /* print out busy vnodes */ 3562 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3563 #endif 3564 3565 int 3566 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3567 { 3568 struct vnode *vp, *mvp, *rootvp = NULL; 3569 struct vattr vattr; 3570 int busy = 0, error; 3571 3572 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3573 rootrefs, flags); 3574 if (rootrefs > 0) { 3575 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3576 ("vflush: bad args")); 3577 /* 3578 * Get the filesystem root vnode. We can vput() it 3579 * immediately, since with rootrefs > 0, it won't go away. 3580 */ 3581 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3582 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3583 __func__, error); 3584 return (error); 3585 } 3586 vput(rootvp); 3587 } 3588 loop: 3589 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3590 vholdl(vp); 3591 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3592 if (error) { 3593 vdrop(vp); 3594 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3595 goto loop; 3596 } 3597 /* 3598 * Skip over a vnodes marked VV_SYSTEM. 3599 */ 3600 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3601 VOP_UNLOCK(vp); 3602 vdrop(vp); 3603 continue; 3604 } 3605 /* 3606 * If WRITECLOSE is set, flush out unlinked but still open 3607 * files (even if open only for reading) and regular file 3608 * vnodes open for writing. 3609 */ 3610 if (flags & WRITECLOSE) { 3611 if (vp->v_object != NULL) { 3612 VM_OBJECT_WLOCK(vp->v_object); 3613 vm_object_page_clean(vp->v_object, 0, 0, 0); 3614 VM_OBJECT_WUNLOCK(vp->v_object); 3615 } 3616 error = VOP_FSYNC(vp, MNT_WAIT, td); 3617 if (error != 0) { 3618 VOP_UNLOCK(vp); 3619 vdrop(vp); 3620 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3621 return (error); 3622 } 3623 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3624 VI_LOCK(vp); 3625 3626 if ((vp->v_type == VNON || 3627 (error == 0 && vattr.va_nlink > 0)) && 3628 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3629 VOP_UNLOCK(vp); 3630 vdropl(vp); 3631 continue; 3632 } 3633 } else 3634 VI_LOCK(vp); 3635 /* 3636 * With v_usecount == 0, all we need to do is clear out the 3637 * vnode data structures and we are done. 3638 * 3639 * If FORCECLOSE is set, forcibly close the vnode. 3640 */ 3641 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3642 vgonel(vp); 3643 } else { 3644 busy++; 3645 #ifdef DIAGNOSTIC 3646 if (busyprt) 3647 vn_printf(vp, "vflush: busy vnode "); 3648 #endif 3649 } 3650 VOP_UNLOCK(vp); 3651 vdropl(vp); 3652 } 3653 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3654 /* 3655 * If just the root vnode is busy, and if its refcount 3656 * is equal to `rootrefs', then go ahead and kill it. 3657 */ 3658 VI_LOCK(rootvp); 3659 KASSERT(busy > 0, ("vflush: not busy")); 3660 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3661 ("vflush: usecount %d < rootrefs %d", 3662 rootvp->v_usecount, rootrefs)); 3663 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3664 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3665 vgone(rootvp); 3666 VOP_UNLOCK(rootvp); 3667 busy = 0; 3668 } else 3669 VI_UNLOCK(rootvp); 3670 } 3671 if (busy) { 3672 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3673 busy); 3674 return (EBUSY); 3675 } 3676 for (; rootrefs > 0; rootrefs--) 3677 vrele(rootvp); 3678 return (0); 3679 } 3680 3681 /* 3682 * Recycle an unused vnode to the front of the free list. 3683 */ 3684 int 3685 vrecycle(struct vnode *vp) 3686 { 3687 int recycled; 3688 3689 VI_LOCK(vp); 3690 recycled = vrecyclel(vp); 3691 VI_UNLOCK(vp); 3692 return (recycled); 3693 } 3694 3695 /* 3696 * vrecycle, with the vp interlock held. 3697 */ 3698 int 3699 vrecyclel(struct vnode *vp) 3700 { 3701 int recycled; 3702 3703 ASSERT_VOP_ELOCKED(vp, __func__); 3704 ASSERT_VI_LOCKED(vp, __func__); 3705 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3706 recycled = 0; 3707 if (vp->v_usecount == 0) { 3708 recycled = 1; 3709 vgonel(vp); 3710 } 3711 return (recycled); 3712 } 3713 3714 /* 3715 * Eliminate all activity associated with a vnode 3716 * in preparation for reuse. 3717 */ 3718 void 3719 vgone(struct vnode *vp) 3720 { 3721 VI_LOCK(vp); 3722 vgonel(vp); 3723 VI_UNLOCK(vp); 3724 } 3725 3726 static void 3727 notify_lowervp_vfs_dummy(struct mount *mp __unused, 3728 struct vnode *lowervp __unused) 3729 { 3730 } 3731 3732 /* 3733 * Notify upper mounts about reclaimed or unlinked vnode. 3734 */ 3735 void 3736 vfs_notify_upper(struct vnode *vp, int event) 3737 { 3738 static struct vfsops vgonel_vfsops = { 3739 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 3740 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 3741 }; 3742 struct mount *mp, *ump, *mmp; 3743 3744 mp = vp->v_mount; 3745 if (mp == NULL) 3746 return; 3747 if (TAILQ_EMPTY(&mp->mnt_uppers)) 3748 return; 3749 3750 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 3751 mmp->mnt_op = &vgonel_vfsops; 3752 mmp->mnt_kern_flag |= MNTK_MARKER; 3753 MNT_ILOCK(mp); 3754 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 3755 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 3756 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 3757 ump = TAILQ_NEXT(ump, mnt_upper_link); 3758 continue; 3759 } 3760 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 3761 MNT_IUNLOCK(mp); 3762 switch (event) { 3763 case VFS_NOTIFY_UPPER_RECLAIM: 3764 VFS_RECLAIM_LOWERVP(ump, vp); 3765 break; 3766 case VFS_NOTIFY_UPPER_UNLINK: 3767 VFS_UNLINK_LOWERVP(ump, vp); 3768 break; 3769 default: 3770 KASSERT(0, ("invalid event %d", event)); 3771 break; 3772 } 3773 MNT_ILOCK(mp); 3774 ump = TAILQ_NEXT(mmp, mnt_upper_link); 3775 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 3776 } 3777 free(mmp, M_TEMP); 3778 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 3779 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 3780 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 3781 wakeup(&mp->mnt_uppers); 3782 } 3783 MNT_IUNLOCK(mp); 3784 } 3785 3786 /* 3787 * vgone, with the vp interlock held. 3788 */ 3789 static void 3790 vgonel(struct vnode *vp) 3791 { 3792 struct thread *td; 3793 struct mount *mp; 3794 vm_object_t object; 3795 bool active, doinginact, oweinact; 3796 3797 ASSERT_VOP_ELOCKED(vp, "vgonel"); 3798 ASSERT_VI_LOCKED(vp, "vgonel"); 3799 VNASSERT(vp->v_holdcnt, vp, 3800 ("vgonel: vp %p has no reference.", vp)); 3801 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3802 td = curthread; 3803 3804 /* 3805 * Don't vgonel if we're already doomed. 3806 */ 3807 if (vp->v_irflag & VIRF_DOOMED) 3808 return; 3809 /* 3810 * Paired with freevnode. 3811 */ 3812 vn_seqc_write_begin_locked(vp); 3813 vunlazy_gone(vp); 3814 vp->v_irflag |= VIRF_DOOMED; 3815 3816 /* 3817 * Check to see if the vnode is in use. If so, we have to 3818 * call VOP_CLOSE() and VOP_INACTIVE(). 3819 * 3820 * It could be that VOP_INACTIVE() requested reclamation, in 3821 * which case we should avoid recursion, so check 3822 * VI_DOINGINACT. This is not precise but good enough. 3823 */ 3824 active = vp->v_usecount > 0; 3825 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 3826 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; 3827 3828 /* 3829 * If we need to do inactive VI_OWEINACT will be set. 3830 */ 3831 if (vp->v_iflag & VI_DEFINACT) { 3832 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3833 vp->v_iflag &= ~VI_DEFINACT; 3834 vdropl(vp); 3835 } else { 3836 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 3837 VI_UNLOCK(vp); 3838 } 3839 cache_purge_vgone(vp); 3840 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 3841 3842 /* 3843 * If purging an active vnode, it must be closed and 3844 * deactivated before being reclaimed. 3845 */ 3846 if (active) 3847 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 3848 if ((oweinact || active) && !doinginact) { 3849 VI_LOCK(vp); 3850 vinactivef(vp); 3851 VI_UNLOCK(vp); 3852 } 3853 if (vp->v_type == VSOCK) 3854 vfs_unp_reclaim(vp); 3855 3856 /* 3857 * Clean out any buffers associated with the vnode. 3858 * If the flush fails, just toss the buffers. 3859 */ 3860 mp = NULL; 3861 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 3862 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 3863 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 3864 while (vinvalbuf(vp, 0, 0, 0) != 0) 3865 ; 3866 } 3867 3868 BO_LOCK(&vp->v_bufobj); 3869 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 3870 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 3871 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 3872 vp->v_bufobj.bo_clean.bv_cnt == 0, 3873 ("vp %p bufobj not invalidated", vp)); 3874 3875 /* 3876 * For VMIO bufobj, BO_DEAD is set later, or in 3877 * vm_object_terminate() after the object's page queue is 3878 * flushed. 3879 */ 3880 object = vp->v_bufobj.bo_object; 3881 if (object == NULL) 3882 vp->v_bufobj.bo_flag |= BO_DEAD; 3883 BO_UNLOCK(&vp->v_bufobj); 3884 3885 /* 3886 * Handle the VM part. Tmpfs handles v_object on its own (the 3887 * OBJT_VNODE check). Nullfs or other bypassing filesystems 3888 * should not touch the object borrowed from the lower vnode 3889 * (the handle check). 3890 */ 3891 if (object != NULL && object->type == OBJT_VNODE && 3892 object->handle == vp) 3893 vnode_destroy_vobject(vp); 3894 3895 /* 3896 * Reclaim the vnode. 3897 */ 3898 if (VOP_RECLAIM(vp)) 3899 panic("vgone: cannot reclaim"); 3900 if (mp != NULL) 3901 vn_finished_secondary_write(mp); 3902 VNASSERT(vp->v_object == NULL, vp, 3903 ("vop_reclaim left v_object vp=%p", vp)); 3904 /* 3905 * Clear the advisory locks and wake up waiting threads. 3906 */ 3907 (void)VOP_ADVLOCKPURGE(vp); 3908 vp->v_lockf = NULL; 3909 /* 3910 * Delete from old mount point vnode list. 3911 */ 3912 delmntque(vp); 3913 /* 3914 * Done with purge, reset to the standard lock and invalidate 3915 * the vnode. 3916 */ 3917 VI_LOCK(vp); 3918 vp->v_vnlock = &vp->v_lock; 3919 vp->v_op = &dead_vnodeops; 3920 vp->v_type = VBAD; 3921 } 3922 3923 /* 3924 * Print out a description of a vnode. 3925 */ 3926 static const char * const typename[] = 3927 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 3928 "VMARKER"}; 3929 3930 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 3931 "new hold count flag not added to vn_printf"); 3932 3933 void 3934 vn_printf(struct vnode *vp, const char *fmt, ...) 3935 { 3936 va_list ap; 3937 char buf[256], buf2[16]; 3938 u_long flags; 3939 u_int holdcnt; 3940 3941 va_start(ap, fmt); 3942 vprintf(fmt, ap); 3943 va_end(ap); 3944 printf("%p: ", (void *)vp); 3945 printf("type %s\n", typename[vp->v_type]); 3946 holdcnt = atomic_load_int(&vp->v_holdcnt); 3947 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 3948 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 3949 vp->v_seqc_users); 3950 switch (vp->v_type) { 3951 case VDIR: 3952 printf(" mountedhere %p\n", vp->v_mountedhere); 3953 break; 3954 case VCHR: 3955 printf(" rdev %p\n", vp->v_rdev); 3956 break; 3957 case VSOCK: 3958 printf(" socket %p\n", vp->v_unpcb); 3959 break; 3960 case VFIFO: 3961 printf(" fifoinfo %p\n", vp->v_fifoinfo); 3962 break; 3963 default: 3964 printf("\n"); 3965 break; 3966 } 3967 buf[0] = '\0'; 3968 buf[1] = '\0'; 3969 if (holdcnt & VHOLD_NO_SMR) 3970 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 3971 printf(" hold count flags (%s)\n", buf + 1); 3972 3973 buf[0] = '\0'; 3974 buf[1] = '\0'; 3975 if (vp->v_irflag & VIRF_DOOMED) 3976 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 3977 if (vp->v_irflag & VIRF_PGREAD) 3978 strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); 3979 flags = vp->v_irflag & ~(VIRF_DOOMED | VIRF_PGREAD); 3980 if (flags != 0) { 3981 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 3982 strlcat(buf, buf2, sizeof(buf)); 3983 } 3984 if (vp->v_vflag & VV_ROOT) 3985 strlcat(buf, "|VV_ROOT", sizeof(buf)); 3986 if (vp->v_vflag & VV_ISTTY) 3987 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 3988 if (vp->v_vflag & VV_NOSYNC) 3989 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 3990 if (vp->v_vflag & VV_ETERNALDEV) 3991 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 3992 if (vp->v_vflag & VV_CACHEDLABEL) 3993 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 3994 if (vp->v_vflag & VV_VMSIZEVNLOCK) 3995 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 3996 if (vp->v_vflag & VV_COPYONWRITE) 3997 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 3998 if (vp->v_vflag & VV_SYSTEM) 3999 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4000 if (vp->v_vflag & VV_PROCDEP) 4001 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4002 if (vp->v_vflag & VV_NOKNOTE) 4003 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 4004 if (vp->v_vflag & VV_DELETED) 4005 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4006 if (vp->v_vflag & VV_MD) 4007 strlcat(buf, "|VV_MD", sizeof(buf)); 4008 if (vp->v_vflag & VV_FORCEINSMQ) 4009 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4010 if (vp->v_vflag & VV_READLINK) 4011 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4012 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4013 VV_CACHEDLABEL | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 4014 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); 4015 if (flags != 0) { 4016 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4017 strlcat(buf, buf2, sizeof(buf)); 4018 } 4019 if (vp->v_iflag & VI_TEXT_REF) 4020 strlcat(buf, "|VI_TEXT_REF", sizeof(buf)); 4021 if (vp->v_iflag & VI_MOUNT) 4022 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4023 if (vp->v_iflag & VI_DOINGINACT) 4024 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4025 if (vp->v_iflag & VI_OWEINACT) 4026 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4027 if (vp->v_iflag & VI_DEFINACT) 4028 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4029 flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_DOINGINACT | 4030 VI_OWEINACT | VI_DEFINACT); 4031 if (flags != 0) { 4032 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4033 strlcat(buf, buf2, sizeof(buf)); 4034 } 4035 if (vp->v_mflag & VMP_LAZYLIST) 4036 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4037 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4038 if (flags != 0) { 4039 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4040 strlcat(buf, buf2, sizeof(buf)); 4041 } 4042 printf(" flags (%s)\n", buf + 1); 4043 if (mtx_owned(VI_MTX(vp))) 4044 printf(" VI_LOCKed"); 4045 if (vp->v_object != NULL) 4046 printf(" v_object %p ref %d pages %d " 4047 "cleanbuf %d dirtybuf %d\n", 4048 vp->v_object, vp->v_object->ref_count, 4049 vp->v_object->resident_page_count, 4050 vp->v_bufobj.bo_clean.bv_cnt, 4051 vp->v_bufobj.bo_dirty.bv_cnt); 4052 printf(" "); 4053 lockmgr_printinfo(vp->v_vnlock); 4054 if (vp->v_data != NULL) 4055 VOP_PRINT(vp); 4056 } 4057 4058 #ifdef DDB 4059 /* 4060 * List all of the locked vnodes in the system. 4061 * Called when debugging the kernel. 4062 */ 4063 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 4064 { 4065 struct mount *mp; 4066 struct vnode *vp; 4067 4068 /* 4069 * Note: because this is DDB, we can't obey the locking semantics 4070 * for these structures, which means we could catch an inconsistent 4071 * state and dereference a nasty pointer. Not much to be done 4072 * about that. 4073 */ 4074 db_printf("Locked vnodes\n"); 4075 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4076 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4077 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4078 vn_printf(vp, "vnode "); 4079 } 4080 } 4081 } 4082 4083 /* 4084 * Show details about the given vnode. 4085 */ 4086 DB_SHOW_COMMAND(vnode, db_show_vnode) 4087 { 4088 struct vnode *vp; 4089 4090 if (!have_addr) 4091 return; 4092 vp = (struct vnode *)addr; 4093 vn_printf(vp, "vnode "); 4094 } 4095 4096 /* 4097 * Show details about the given mount point. 4098 */ 4099 DB_SHOW_COMMAND(mount, db_show_mount) 4100 { 4101 struct mount *mp; 4102 struct vfsopt *opt; 4103 struct statfs *sp; 4104 struct vnode *vp; 4105 char buf[512]; 4106 uint64_t mflags; 4107 u_int flags; 4108 4109 if (!have_addr) { 4110 /* No address given, print short info about all mount points. */ 4111 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4112 db_printf("%p %s on %s (%s)\n", mp, 4113 mp->mnt_stat.f_mntfromname, 4114 mp->mnt_stat.f_mntonname, 4115 mp->mnt_stat.f_fstypename); 4116 if (db_pager_quit) 4117 break; 4118 } 4119 db_printf("\nMore info: show mount <addr>\n"); 4120 return; 4121 } 4122 4123 mp = (struct mount *)addr; 4124 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4125 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4126 4127 buf[0] = '\0'; 4128 mflags = mp->mnt_flag; 4129 #define MNT_FLAG(flag) do { \ 4130 if (mflags & (flag)) { \ 4131 if (buf[0] != '\0') \ 4132 strlcat(buf, ", ", sizeof(buf)); \ 4133 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4134 mflags &= ~(flag); \ 4135 } \ 4136 } while (0) 4137 MNT_FLAG(MNT_RDONLY); 4138 MNT_FLAG(MNT_SYNCHRONOUS); 4139 MNT_FLAG(MNT_NOEXEC); 4140 MNT_FLAG(MNT_NOSUID); 4141 MNT_FLAG(MNT_NFS4ACLS); 4142 MNT_FLAG(MNT_UNION); 4143 MNT_FLAG(MNT_ASYNC); 4144 MNT_FLAG(MNT_SUIDDIR); 4145 MNT_FLAG(MNT_SOFTDEP); 4146 MNT_FLAG(MNT_NOSYMFOLLOW); 4147 MNT_FLAG(MNT_GJOURNAL); 4148 MNT_FLAG(MNT_MULTILABEL); 4149 MNT_FLAG(MNT_ACLS); 4150 MNT_FLAG(MNT_NOATIME); 4151 MNT_FLAG(MNT_NOCLUSTERR); 4152 MNT_FLAG(MNT_NOCLUSTERW); 4153 MNT_FLAG(MNT_SUJ); 4154 MNT_FLAG(MNT_EXRDONLY); 4155 MNT_FLAG(MNT_EXPORTED); 4156 MNT_FLAG(MNT_DEFEXPORTED); 4157 MNT_FLAG(MNT_EXPORTANON); 4158 MNT_FLAG(MNT_EXKERB); 4159 MNT_FLAG(MNT_EXPUBLIC); 4160 MNT_FLAG(MNT_LOCAL); 4161 MNT_FLAG(MNT_QUOTA); 4162 MNT_FLAG(MNT_ROOTFS); 4163 MNT_FLAG(MNT_USER); 4164 MNT_FLAG(MNT_IGNORE); 4165 MNT_FLAG(MNT_UPDATE); 4166 MNT_FLAG(MNT_DELEXPORT); 4167 MNT_FLAG(MNT_RELOAD); 4168 MNT_FLAG(MNT_FORCE); 4169 MNT_FLAG(MNT_SNAPSHOT); 4170 MNT_FLAG(MNT_BYFSID); 4171 #undef MNT_FLAG 4172 if (mflags != 0) { 4173 if (buf[0] != '\0') 4174 strlcat(buf, ", ", sizeof(buf)); 4175 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4176 "0x%016jx", mflags); 4177 } 4178 db_printf(" mnt_flag = %s\n", buf); 4179 4180 buf[0] = '\0'; 4181 flags = mp->mnt_kern_flag; 4182 #define MNT_KERN_FLAG(flag) do { \ 4183 if (flags & (flag)) { \ 4184 if (buf[0] != '\0') \ 4185 strlcat(buf, ", ", sizeof(buf)); \ 4186 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4187 flags &= ~(flag); \ 4188 } \ 4189 } while (0) 4190 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4191 MNT_KERN_FLAG(MNTK_ASYNC); 4192 MNT_KERN_FLAG(MNTK_SOFTDEP); 4193 MNT_KERN_FLAG(MNTK_DRAINING); 4194 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4195 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4196 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4197 MNT_KERN_FLAG(MNTK_NO_IOPF); 4198 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 4199 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 4200 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 4201 MNT_KERN_FLAG(MNTK_MARKER); 4202 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4203 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4204 MNT_KERN_FLAG(MNTK_NOASYNC); 4205 MNT_KERN_FLAG(MNTK_UNMOUNT); 4206 MNT_KERN_FLAG(MNTK_MWAIT); 4207 MNT_KERN_FLAG(MNTK_SUSPEND); 4208 MNT_KERN_FLAG(MNTK_SUSPEND2); 4209 MNT_KERN_FLAG(MNTK_SUSPENDED); 4210 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4211 MNT_KERN_FLAG(MNTK_NOKNOTE); 4212 #undef MNT_KERN_FLAG 4213 if (flags != 0) { 4214 if (buf[0] != '\0') 4215 strlcat(buf, ", ", sizeof(buf)); 4216 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4217 "0x%08x", flags); 4218 } 4219 db_printf(" mnt_kern_flag = %s\n", buf); 4220 4221 db_printf(" mnt_opt = "); 4222 opt = TAILQ_FIRST(mp->mnt_opt); 4223 if (opt != NULL) { 4224 db_printf("%s", opt->name); 4225 opt = TAILQ_NEXT(opt, link); 4226 while (opt != NULL) { 4227 db_printf(", %s", opt->name); 4228 opt = TAILQ_NEXT(opt, link); 4229 } 4230 } 4231 db_printf("\n"); 4232 4233 sp = &mp->mnt_stat; 4234 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4235 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4236 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4237 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4238 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4239 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4240 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4241 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4242 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4243 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4244 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4245 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4246 4247 db_printf(" mnt_cred = { uid=%u ruid=%u", 4248 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4249 if (jailed(mp->mnt_cred)) 4250 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4251 db_printf(" }\n"); 4252 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4253 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4254 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4255 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4256 db_printf(" mnt_lazyvnodelistsize = %d\n", 4257 mp->mnt_lazyvnodelistsize); 4258 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4259 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4260 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 4261 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4262 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4263 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4264 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4265 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4266 db_printf(" mnt_secondary_accwrites = %d\n", 4267 mp->mnt_secondary_accwrites); 4268 db_printf(" mnt_gjprovider = %s\n", 4269 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4270 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4271 4272 db_printf("\n\nList of active vnodes\n"); 4273 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4274 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4275 vn_printf(vp, "vnode "); 4276 if (db_pager_quit) 4277 break; 4278 } 4279 } 4280 db_printf("\n\nList of inactive vnodes\n"); 4281 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4282 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4283 vn_printf(vp, "vnode "); 4284 if (db_pager_quit) 4285 break; 4286 } 4287 } 4288 } 4289 #endif /* DDB */ 4290 4291 /* 4292 * Fill in a struct xvfsconf based on a struct vfsconf. 4293 */ 4294 static int 4295 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4296 { 4297 struct xvfsconf xvfsp; 4298 4299 bzero(&xvfsp, sizeof(xvfsp)); 4300 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4301 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4302 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4303 xvfsp.vfc_flags = vfsp->vfc_flags; 4304 /* 4305 * These are unused in userland, we keep them 4306 * to not break binary compatibility. 4307 */ 4308 xvfsp.vfc_vfsops = NULL; 4309 xvfsp.vfc_next = NULL; 4310 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4311 } 4312 4313 #ifdef COMPAT_FREEBSD32 4314 struct xvfsconf32 { 4315 uint32_t vfc_vfsops; 4316 char vfc_name[MFSNAMELEN]; 4317 int32_t vfc_typenum; 4318 int32_t vfc_refcount; 4319 int32_t vfc_flags; 4320 uint32_t vfc_next; 4321 }; 4322 4323 static int 4324 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4325 { 4326 struct xvfsconf32 xvfsp; 4327 4328 bzero(&xvfsp, sizeof(xvfsp)); 4329 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4330 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4331 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4332 xvfsp.vfc_flags = vfsp->vfc_flags; 4333 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4334 } 4335 #endif 4336 4337 /* 4338 * Top level filesystem related information gathering. 4339 */ 4340 static int 4341 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4342 { 4343 struct vfsconf *vfsp; 4344 int error; 4345 4346 error = 0; 4347 vfsconf_slock(); 4348 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4349 #ifdef COMPAT_FREEBSD32 4350 if (req->flags & SCTL_MASK32) 4351 error = vfsconf2x32(req, vfsp); 4352 else 4353 #endif 4354 error = vfsconf2x(req, vfsp); 4355 if (error) 4356 break; 4357 } 4358 vfsconf_sunlock(); 4359 return (error); 4360 } 4361 4362 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4363 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4364 "S,xvfsconf", "List of all configured filesystems"); 4365 4366 #ifndef BURN_BRIDGES 4367 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4368 4369 static int 4370 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4371 { 4372 int *name = (int *)arg1 - 1; /* XXX */ 4373 u_int namelen = arg2 + 1; /* XXX */ 4374 struct vfsconf *vfsp; 4375 4376 log(LOG_WARNING, "userland calling deprecated sysctl, " 4377 "please rebuild world\n"); 4378 4379 #if 1 || defined(COMPAT_PRELITE2) 4380 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4381 if (namelen == 1) 4382 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4383 #endif 4384 4385 switch (name[1]) { 4386 case VFS_MAXTYPENUM: 4387 if (namelen != 2) 4388 return (ENOTDIR); 4389 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4390 case VFS_CONF: 4391 if (namelen != 3) 4392 return (ENOTDIR); /* overloaded */ 4393 vfsconf_slock(); 4394 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4395 if (vfsp->vfc_typenum == name[2]) 4396 break; 4397 } 4398 vfsconf_sunlock(); 4399 if (vfsp == NULL) 4400 return (EOPNOTSUPP); 4401 #ifdef COMPAT_FREEBSD32 4402 if (req->flags & SCTL_MASK32) 4403 return (vfsconf2x32(req, vfsp)); 4404 else 4405 #endif 4406 return (vfsconf2x(req, vfsp)); 4407 } 4408 return (EOPNOTSUPP); 4409 } 4410 4411 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4412 CTLFLAG_MPSAFE, vfs_sysctl, 4413 "Generic filesystem"); 4414 4415 #if 1 || defined(COMPAT_PRELITE2) 4416 4417 static int 4418 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4419 { 4420 int error; 4421 struct vfsconf *vfsp; 4422 struct ovfsconf ovfs; 4423 4424 vfsconf_slock(); 4425 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4426 bzero(&ovfs, sizeof(ovfs)); 4427 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4428 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4429 ovfs.vfc_index = vfsp->vfc_typenum; 4430 ovfs.vfc_refcount = vfsp->vfc_refcount; 4431 ovfs.vfc_flags = vfsp->vfc_flags; 4432 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4433 if (error != 0) { 4434 vfsconf_sunlock(); 4435 return (error); 4436 } 4437 } 4438 vfsconf_sunlock(); 4439 return (0); 4440 } 4441 4442 #endif /* 1 || COMPAT_PRELITE2 */ 4443 #endif /* !BURN_BRIDGES */ 4444 4445 #define KINFO_VNODESLOP 10 4446 #ifdef notyet 4447 /* 4448 * Dump vnode list (via sysctl). 4449 */ 4450 /* ARGSUSED */ 4451 static int 4452 sysctl_vnode(SYSCTL_HANDLER_ARGS) 4453 { 4454 struct xvnode *xvn; 4455 struct mount *mp; 4456 struct vnode *vp; 4457 int error, len, n; 4458 4459 /* 4460 * Stale numvnodes access is not fatal here. 4461 */ 4462 req->lock = 0; 4463 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 4464 if (!req->oldptr) 4465 /* Make an estimate */ 4466 return (SYSCTL_OUT(req, 0, len)); 4467 4468 error = sysctl_wire_old_buffer(req, 0); 4469 if (error != 0) 4470 return (error); 4471 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 4472 n = 0; 4473 mtx_lock(&mountlist_mtx); 4474 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4475 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 4476 continue; 4477 MNT_ILOCK(mp); 4478 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4479 if (n == len) 4480 break; 4481 vref(vp); 4482 xvn[n].xv_size = sizeof *xvn; 4483 xvn[n].xv_vnode = vp; 4484 xvn[n].xv_id = 0; /* XXX compat */ 4485 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 4486 XV_COPY(usecount); 4487 XV_COPY(writecount); 4488 XV_COPY(holdcnt); 4489 XV_COPY(mount); 4490 XV_COPY(numoutput); 4491 XV_COPY(type); 4492 #undef XV_COPY 4493 xvn[n].xv_flag = vp->v_vflag; 4494 4495 switch (vp->v_type) { 4496 case VREG: 4497 case VDIR: 4498 case VLNK: 4499 break; 4500 case VBLK: 4501 case VCHR: 4502 if (vp->v_rdev == NULL) { 4503 vrele(vp); 4504 continue; 4505 } 4506 xvn[n].xv_dev = dev2udev(vp->v_rdev); 4507 break; 4508 case VSOCK: 4509 xvn[n].xv_socket = vp->v_socket; 4510 break; 4511 case VFIFO: 4512 xvn[n].xv_fifo = vp->v_fifoinfo; 4513 break; 4514 case VNON: 4515 case VBAD: 4516 default: 4517 /* shouldn't happen? */ 4518 vrele(vp); 4519 continue; 4520 } 4521 vrele(vp); 4522 ++n; 4523 } 4524 MNT_IUNLOCK(mp); 4525 mtx_lock(&mountlist_mtx); 4526 vfs_unbusy(mp); 4527 if (n == len) 4528 break; 4529 } 4530 mtx_unlock(&mountlist_mtx); 4531 4532 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 4533 free(xvn, M_TEMP); 4534 return (error); 4535 } 4536 4537 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 4538 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 4539 ""); 4540 #endif 4541 4542 static void 4543 unmount_or_warn(struct mount *mp) 4544 { 4545 int error; 4546 4547 error = dounmount(mp, MNT_FORCE, curthread); 4548 if (error != 0) { 4549 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4550 if (error == EBUSY) 4551 printf("BUSY)\n"); 4552 else 4553 printf("%d)\n", error); 4554 } 4555 } 4556 4557 /* 4558 * Unmount all filesystems. The list is traversed in reverse order 4559 * of mounting to avoid dependencies. 4560 */ 4561 void 4562 vfs_unmountall(void) 4563 { 4564 struct mount *mp, *tmp; 4565 4566 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4567 4568 /* 4569 * Since this only runs when rebooting, it is not interlocked. 4570 */ 4571 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4572 vfs_ref(mp); 4573 4574 /* 4575 * Forcibly unmounting "/dev" before "/" would prevent clean 4576 * unmount of the latter. 4577 */ 4578 if (mp == rootdevmp) 4579 continue; 4580 4581 unmount_or_warn(mp); 4582 } 4583 4584 if (rootdevmp != NULL) 4585 unmount_or_warn(rootdevmp); 4586 } 4587 4588 static void 4589 vfs_deferred_inactive(struct vnode *vp, int lkflags) 4590 { 4591 4592 ASSERT_VI_LOCKED(vp, __func__); 4593 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, ("VI_DEFINACT still set")); 4594 if ((vp->v_iflag & VI_OWEINACT) == 0) { 4595 vdropl(vp); 4596 return; 4597 } 4598 if (vn_lock(vp, lkflags) == 0) { 4599 VI_LOCK(vp); 4600 vinactive(vp); 4601 VOP_UNLOCK(vp); 4602 vdropl(vp); 4603 return; 4604 } 4605 vdefer_inactive_unlocked(vp); 4606 } 4607 4608 static int 4609 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 4610 { 4611 4612 return (vp->v_iflag & VI_DEFINACT); 4613 } 4614 4615 static void __noinline 4616 vfs_periodic_inactive(struct mount *mp, int flags) 4617 { 4618 struct vnode *vp, *mvp; 4619 int lkflags; 4620 4621 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4622 if (flags != MNT_WAIT) 4623 lkflags |= LK_NOWAIT; 4624 4625 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 4626 if ((vp->v_iflag & VI_DEFINACT) == 0) { 4627 VI_UNLOCK(vp); 4628 continue; 4629 } 4630 vp->v_iflag &= ~VI_DEFINACT; 4631 vfs_deferred_inactive(vp, lkflags); 4632 } 4633 } 4634 4635 static inline bool 4636 vfs_want_msync(struct vnode *vp) 4637 { 4638 struct vm_object *obj; 4639 4640 /* 4641 * This test may be performed without any locks held. 4642 * We rely on vm_object's type stability. 4643 */ 4644 if (vp->v_vflag & VV_NOSYNC) 4645 return (false); 4646 obj = vp->v_object; 4647 return (obj != NULL && vm_object_mightbedirty(obj)); 4648 } 4649 4650 static int 4651 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 4652 { 4653 4654 if (vp->v_vflag & VV_NOSYNC) 4655 return (false); 4656 if (vp->v_iflag & VI_DEFINACT) 4657 return (true); 4658 return (vfs_want_msync(vp)); 4659 } 4660 4661 static void __noinline 4662 vfs_periodic_msync_inactive(struct mount *mp, int flags) 4663 { 4664 struct vnode *vp, *mvp; 4665 struct vm_object *obj; 4666 int lkflags, objflags; 4667 bool seen_defer; 4668 4669 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4670 if (flags != MNT_WAIT) { 4671 lkflags |= LK_NOWAIT; 4672 objflags = OBJPC_NOSYNC; 4673 } else { 4674 objflags = OBJPC_SYNC; 4675 } 4676 4677 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 4678 seen_defer = false; 4679 if (vp->v_iflag & VI_DEFINACT) { 4680 vp->v_iflag &= ~VI_DEFINACT; 4681 seen_defer = true; 4682 } 4683 if (!vfs_want_msync(vp)) { 4684 if (seen_defer) 4685 vfs_deferred_inactive(vp, lkflags); 4686 else 4687 VI_UNLOCK(vp); 4688 continue; 4689 } 4690 if (vget(vp, lkflags) == 0) { 4691 obj = vp->v_object; 4692 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 4693 VM_OBJECT_WLOCK(obj); 4694 vm_object_page_clean(obj, 0, 0, objflags); 4695 VM_OBJECT_WUNLOCK(obj); 4696 } 4697 vput(vp); 4698 if (seen_defer) 4699 vdrop(vp); 4700 } else { 4701 if (seen_defer) 4702 vdefer_inactive_unlocked(vp); 4703 } 4704 } 4705 } 4706 4707 void 4708 vfs_periodic(struct mount *mp, int flags) 4709 { 4710 4711 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4712 4713 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 4714 vfs_periodic_inactive(mp, flags); 4715 else 4716 vfs_periodic_msync_inactive(mp, flags); 4717 } 4718 4719 static void 4720 destroy_vpollinfo_free(struct vpollinfo *vi) 4721 { 4722 4723 knlist_destroy(&vi->vpi_selinfo.si_note); 4724 mtx_destroy(&vi->vpi_lock); 4725 uma_zfree(vnodepoll_zone, vi); 4726 } 4727 4728 static void 4729 destroy_vpollinfo(struct vpollinfo *vi) 4730 { 4731 4732 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4733 seldrain(&vi->vpi_selinfo); 4734 destroy_vpollinfo_free(vi); 4735 } 4736 4737 /* 4738 * Initialize per-vnode helper structure to hold poll-related state. 4739 */ 4740 void 4741 v_addpollinfo(struct vnode *vp) 4742 { 4743 struct vpollinfo *vi; 4744 4745 if (vp->v_pollinfo != NULL) 4746 return; 4747 vi = uma_zalloc(vnodepoll_zone, M_WAITOK | M_ZERO); 4748 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4749 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4750 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 4751 VI_LOCK(vp); 4752 if (vp->v_pollinfo != NULL) { 4753 VI_UNLOCK(vp); 4754 destroy_vpollinfo_free(vi); 4755 return; 4756 } 4757 vp->v_pollinfo = vi; 4758 VI_UNLOCK(vp); 4759 } 4760 4761 /* 4762 * Record a process's interest in events which might happen to 4763 * a vnode. Because poll uses the historic select-style interface 4764 * internally, this routine serves as both the ``check for any 4765 * pending events'' and the ``record my interest in future events'' 4766 * functions. (These are done together, while the lock is held, 4767 * to avoid race conditions.) 4768 */ 4769 int 4770 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4771 { 4772 4773 v_addpollinfo(vp); 4774 mtx_lock(&vp->v_pollinfo->vpi_lock); 4775 if (vp->v_pollinfo->vpi_revents & events) { 4776 /* 4777 * This leaves events we are not interested 4778 * in available for the other process which 4779 * which presumably had requested them 4780 * (otherwise they would never have been 4781 * recorded). 4782 */ 4783 events &= vp->v_pollinfo->vpi_revents; 4784 vp->v_pollinfo->vpi_revents &= ~events; 4785 4786 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4787 return (events); 4788 } 4789 vp->v_pollinfo->vpi_events |= events; 4790 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 4791 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4792 return (0); 4793 } 4794 4795 /* 4796 * Routine to create and manage a filesystem syncer vnode. 4797 */ 4798 #define sync_close ((int (*)(struct vop_close_args *))nullop) 4799 static int sync_fsync(struct vop_fsync_args *); 4800 static int sync_inactive(struct vop_inactive_args *); 4801 static int sync_reclaim(struct vop_reclaim_args *); 4802 4803 static struct vop_vector sync_vnodeops = { 4804 .vop_bypass = VOP_EOPNOTSUPP, 4805 .vop_close = sync_close, /* close */ 4806 .vop_fsync = sync_fsync, /* fsync */ 4807 .vop_inactive = sync_inactive, /* inactive */ 4808 .vop_need_inactive = vop_stdneed_inactive, /* need_inactive */ 4809 .vop_reclaim = sync_reclaim, /* reclaim */ 4810 .vop_lock1 = vop_stdlock, /* lock */ 4811 .vop_unlock = vop_stdunlock, /* unlock */ 4812 .vop_islocked = vop_stdislocked, /* islocked */ 4813 }; 4814 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 4815 4816 /* 4817 * Create a new filesystem syncer vnode for the specified mount point. 4818 */ 4819 void 4820 vfs_allocate_syncvnode(struct mount *mp) 4821 { 4822 struct vnode *vp; 4823 struct bufobj *bo; 4824 static long start, incr, next; 4825 int error; 4826 4827 /* Allocate a new vnode */ 4828 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 4829 if (error != 0) 4830 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 4831 vp->v_type = VNON; 4832 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4833 vp->v_vflag |= VV_FORCEINSMQ; 4834 error = insmntque(vp, mp); 4835 if (error != 0) 4836 panic("vfs_allocate_syncvnode: insmntque() failed"); 4837 vp->v_vflag &= ~VV_FORCEINSMQ; 4838 VOP_UNLOCK(vp); 4839 /* 4840 * Place the vnode onto the syncer worklist. We attempt to 4841 * scatter them about on the list so that they will go off 4842 * at evenly distributed times even if all the filesystems 4843 * are mounted at once. 4844 */ 4845 next += incr; 4846 if (next == 0 || next > syncer_maxdelay) { 4847 start /= 2; 4848 incr /= 2; 4849 if (start == 0) { 4850 start = syncer_maxdelay / 2; 4851 incr = syncer_maxdelay; 4852 } 4853 next = start; 4854 } 4855 bo = &vp->v_bufobj; 4856 BO_LOCK(bo); 4857 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 4858 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 4859 mtx_lock(&sync_mtx); 4860 sync_vnode_count++; 4861 if (mp->mnt_syncer == NULL) { 4862 mp->mnt_syncer = vp; 4863 vp = NULL; 4864 } 4865 mtx_unlock(&sync_mtx); 4866 BO_UNLOCK(bo); 4867 if (vp != NULL) { 4868 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4869 vgone(vp); 4870 vput(vp); 4871 } 4872 } 4873 4874 void 4875 vfs_deallocate_syncvnode(struct mount *mp) 4876 { 4877 struct vnode *vp; 4878 4879 mtx_lock(&sync_mtx); 4880 vp = mp->mnt_syncer; 4881 if (vp != NULL) 4882 mp->mnt_syncer = NULL; 4883 mtx_unlock(&sync_mtx); 4884 if (vp != NULL) 4885 vrele(vp); 4886 } 4887 4888 /* 4889 * Do a lazy sync of the filesystem. 4890 */ 4891 static int 4892 sync_fsync(struct vop_fsync_args *ap) 4893 { 4894 struct vnode *syncvp = ap->a_vp; 4895 struct mount *mp = syncvp->v_mount; 4896 int error, save; 4897 struct bufobj *bo; 4898 4899 /* 4900 * We only need to do something if this is a lazy evaluation. 4901 */ 4902 if (ap->a_waitfor != MNT_LAZY) 4903 return (0); 4904 4905 /* 4906 * Move ourselves to the back of the sync list. 4907 */ 4908 bo = &syncvp->v_bufobj; 4909 BO_LOCK(bo); 4910 vn_syncer_add_to_worklist(bo, syncdelay); 4911 BO_UNLOCK(bo); 4912 4913 /* 4914 * Walk the list of vnodes pushing all that are dirty and 4915 * not already on the sync list. 4916 */ 4917 if (vfs_busy(mp, MBF_NOWAIT) != 0) 4918 return (0); 4919 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 4920 vfs_unbusy(mp); 4921 return (0); 4922 } 4923 save = curthread_pflags_set(TDP_SYNCIO); 4924 /* 4925 * The filesystem at hand may be idle with free vnodes stored in the 4926 * batch. Return them instead of letting them stay there indefinitely. 4927 */ 4928 vfs_periodic(mp, MNT_NOWAIT); 4929 error = VFS_SYNC(mp, MNT_LAZY); 4930 curthread_pflags_restore(save); 4931 vn_finished_write(mp); 4932 vfs_unbusy(mp); 4933 return (error); 4934 } 4935 4936 /* 4937 * The syncer vnode is no referenced. 4938 */ 4939 static int 4940 sync_inactive(struct vop_inactive_args *ap) 4941 { 4942 4943 vgone(ap->a_vp); 4944 return (0); 4945 } 4946 4947 /* 4948 * The syncer vnode is no longer needed and is being decommissioned. 4949 * 4950 * Modifications to the worklist must be protected by sync_mtx. 4951 */ 4952 static int 4953 sync_reclaim(struct vop_reclaim_args *ap) 4954 { 4955 struct vnode *vp = ap->a_vp; 4956 struct bufobj *bo; 4957 4958 bo = &vp->v_bufobj; 4959 BO_LOCK(bo); 4960 mtx_lock(&sync_mtx); 4961 if (vp->v_mount->mnt_syncer == vp) 4962 vp->v_mount->mnt_syncer = NULL; 4963 if (bo->bo_flag & BO_ONWORKLST) { 4964 LIST_REMOVE(bo, bo_synclist); 4965 syncer_worklist_len--; 4966 sync_vnode_count--; 4967 bo->bo_flag &= ~BO_ONWORKLST; 4968 } 4969 mtx_unlock(&sync_mtx); 4970 BO_UNLOCK(bo); 4971 4972 return (0); 4973 } 4974 4975 int 4976 vn_need_pageq_flush(struct vnode *vp) 4977 { 4978 struct vm_object *obj; 4979 int need; 4980 4981 MPASS(mtx_owned(VI_MTX(vp))); 4982 need = 0; 4983 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 4984 vm_object_mightbedirty(obj)) 4985 need = 1; 4986 return (need); 4987 } 4988 4989 /* 4990 * Check if vnode represents a disk device 4991 */ 4992 bool 4993 vn_isdisk_error(struct vnode *vp, int *errp) 4994 { 4995 int error; 4996 4997 if (vp->v_type != VCHR) { 4998 error = ENOTBLK; 4999 goto out; 5000 } 5001 error = 0; 5002 dev_lock(); 5003 if (vp->v_rdev == NULL) 5004 error = ENXIO; 5005 else if (vp->v_rdev->si_devsw == NULL) 5006 error = ENXIO; 5007 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5008 error = ENOTBLK; 5009 dev_unlock(); 5010 out: 5011 *errp = error; 5012 return (error == 0); 5013 } 5014 5015 bool 5016 vn_isdisk(struct vnode *vp) 5017 { 5018 int error; 5019 5020 return (vn_isdisk_error(vp, &error)); 5021 } 5022 5023 /* 5024 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5025 * the comment above cache_fplookup for details. 5026 */ 5027 int 5028 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5029 { 5030 int error; 5031 5032 VFS_SMR_ASSERT_ENTERED(); 5033 5034 /* Check the owner. */ 5035 if (cred->cr_uid == file_uid) { 5036 if (file_mode & S_IXUSR) 5037 return (0); 5038 goto out_error; 5039 } 5040 5041 /* Otherwise, check the groups (first match) */ 5042 if (groupmember(file_gid, cred)) { 5043 if (file_mode & S_IXGRP) 5044 return (0); 5045 goto out_error; 5046 } 5047 5048 /* Otherwise, check everyone else. */ 5049 if (file_mode & S_IXOTH) 5050 return (0); 5051 out_error: 5052 /* 5053 * Permission check failed, but it is possible denial will get overwritten 5054 * (e.g., when root is traversing through a 700 directory owned by someone 5055 * else). 5056 * 5057 * vaccess() calls priv_check_cred which in turn can descent into MAC 5058 * modules overriding this result. It's quite unclear what semantics 5059 * are allowed for them to operate, thus for safety we don't call them 5060 * from within the SMR section. This also means if any such modules 5061 * are present, we have to let the regular lookup decide. 5062 */ 5063 error = priv_check_cred_vfs_lookup_nomac(cred); 5064 switch (error) { 5065 case 0: 5066 return (0); 5067 case EAGAIN: 5068 /* 5069 * MAC modules present. 5070 */ 5071 return (EAGAIN); 5072 case EPERM: 5073 return (EACCES); 5074 default: 5075 return (error); 5076 } 5077 } 5078 5079 /* 5080 * Common filesystem object access control check routine. Accepts a 5081 * vnode's type, "mode", uid and gid, requested access mode, and credentials. 5082 * Returns 0 on success, or an errno on failure. 5083 */ 5084 int 5085 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5086 accmode_t accmode, struct ucred *cred) 5087 { 5088 accmode_t dac_granted; 5089 accmode_t priv_granted; 5090 5091 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5092 ("invalid bit in accmode")); 5093 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5094 ("VAPPEND without VWRITE")); 5095 5096 /* 5097 * Look for a normal, non-privileged way to access the file/directory 5098 * as requested. If it exists, go with that. 5099 */ 5100 5101 dac_granted = 0; 5102 5103 /* Check the owner. */ 5104 if (cred->cr_uid == file_uid) { 5105 dac_granted |= VADMIN; 5106 if (file_mode & S_IXUSR) 5107 dac_granted |= VEXEC; 5108 if (file_mode & S_IRUSR) 5109 dac_granted |= VREAD; 5110 if (file_mode & S_IWUSR) 5111 dac_granted |= (VWRITE | VAPPEND); 5112 5113 if ((accmode & dac_granted) == accmode) 5114 return (0); 5115 5116 goto privcheck; 5117 } 5118 5119 /* Otherwise, check the groups (first match) */ 5120 if (groupmember(file_gid, cred)) { 5121 if (file_mode & S_IXGRP) 5122 dac_granted |= VEXEC; 5123 if (file_mode & S_IRGRP) 5124 dac_granted |= VREAD; 5125 if (file_mode & S_IWGRP) 5126 dac_granted |= (VWRITE | VAPPEND); 5127 5128 if ((accmode & dac_granted) == accmode) 5129 return (0); 5130 5131 goto privcheck; 5132 } 5133 5134 /* Otherwise, check everyone else. */ 5135 if (file_mode & S_IXOTH) 5136 dac_granted |= VEXEC; 5137 if (file_mode & S_IROTH) 5138 dac_granted |= VREAD; 5139 if (file_mode & S_IWOTH) 5140 dac_granted |= (VWRITE | VAPPEND); 5141 if ((accmode & dac_granted) == accmode) 5142 return (0); 5143 5144 privcheck: 5145 /* 5146 * Build a privilege mask to determine if the set of privileges 5147 * satisfies the requirements when combined with the granted mask 5148 * from above. For each privilege, if the privilege is required, 5149 * bitwise or the request type onto the priv_granted mask. 5150 */ 5151 priv_granted = 0; 5152 5153 if (type == VDIR) { 5154 /* 5155 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5156 * requests, instead of PRIV_VFS_EXEC. 5157 */ 5158 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5159 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5160 priv_granted |= VEXEC; 5161 } else { 5162 /* 5163 * Ensure that at least one execute bit is on. Otherwise, 5164 * a privileged user will always succeed, and we don't want 5165 * this to happen unless the file really is executable. 5166 */ 5167 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5168 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5169 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5170 priv_granted |= VEXEC; 5171 } 5172 5173 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5174 !priv_check_cred(cred, PRIV_VFS_READ)) 5175 priv_granted |= VREAD; 5176 5177 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5178 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5179 priv_granted |= (VWRITE | VAPPEND); 5180 5181 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5182 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5183 priv_granted |= VADMIN; 5184 5185 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5186 return (0); 5187 } 5188 5189 return ((accmode & VADMIN) ? EPERM : EACCES); 5190 } 5191 5192 /* 5193 * Credential check based on process requesting service, and per-attribute 5194 * permissions. 5195 */ 5196 int 5197 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5198 struct thread *td, accmode_t accmode) 5199 { 5200 5201 /* 5202 * Kernel-invoked always succeeds. 5203 */ 5204 if (cred == NOCRED) 5205 return (0); 5206 5207 /* 5208 * Do not allow privileged processes in jail to directly manipulate 5209 * system attributes. 5210 */ 5211 switch (attrnamespace) { 5212 case EXTATTR_NAMESPACE_SYSTEM: 5213 /* Potentially should be: return (EPERM); */ 5214 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5215 case EXTATTR_NAMESPACE_USER: 5216 return (VOP_ACCESS(vp, accmode, cred, td)); 5217 default: 5218 return (EPERM); 5219 } 5220 } 5221 5222 #ifdef DEBUG_VFS_LOCKS 5223 /* 5224 * This only exists to suppress warnings from unlocked specfs accesses. It is 5225 * no longer ok to have an unlocked VFS. 5226 */ 5227 #define IGNORE_LOCK(vp) (KERNEL_PANICKED() || (vp) == NULL || \ 5228 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 5229 5230 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5231 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5232 "Drop into debugger on lock violation"); 5233 5234 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5235 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5236 0, "Check for interlock across VOPs"); 5237 5238 int vfs_badlock_print = 1; /* Print lock violations. */ 5239 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5240 0, "Print lock violations"); 5241 5242 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5243 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5244 0, "Print vnode details on lock violations"); 5245 5246 #ifdef KDB 5247 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5248 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5249 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5250 #endif 5251 5252 static void 5253 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5254 { 5255 5256 #ifdef KDB 5257 if (vfs_badlock_backtrace) 5258 kdb_backtrace(); 5259 #endif 5260 if (vfs_badlock_vnode) 5261 vn_printf(vp, "vnode "); 5262 if (vfs_badlock_print) 5263 printf("%s: %p %s\n", str, (void *)vp, msg); 5264 if (vfs_badlock_ddb) 5265 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5266 } 5267 5268 void 5269 assert_vi_locked(struct vnode *vp, const char *str) 5270 { 5271 5272 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5273 vfs_badlock("interlock is not locked but should be", str, vp); 5274 } 5275 5276 void 5277 assert_vi_unlocked(struct vnode *vp, const char *str) 5278 { 5279 5280 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5281 vfs_badlock("interlock is locked but should not be", str, vp); 5282 } 5283 5284 void 5285 assert_vop_locked(struct vnode *vp, const char *str) 5286 { 5287 int locked; 5288 5289 if (!IGNORE_LOCK(vp)) { 5290 locked = VOP_ISLOCKED(vp); 5291 if (locked == 0 || locked == LK_EXCLOTHER) 5292 vfs_badlock("is not locked but should be", str, vp); 5293 } 5294 } 5295 5296 void 5297 assert_vop_unlocked(struct vnode *vp, const char *str) 5298 { 5299 5300 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5301 vfs_badlock("is locked but should not be", str, vp); 5302 } 5303 5304 void 5305 assert_vop_elocked(struct vnode *vp, const char *str) 5306 { 5307 5308 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5309 vfs_badlock("is not exclusive locked but should be", str, vp); 5310 } 5311 #endif /* DEBUG_VFS_LOCKS */ 5312 5313 void 5314 vop_rename_fail(struct vop_rename_args *ap) 5315 { 5316 5317 if (ap->a_tvp != NULL) 5318 vput(ap->a_tvp); 5319 if (ap->a_tdvp == ap->a_tvp) 5320 vrele(ap->a_tdvp); 5321 else 5322 vput(ap->a_tdvp); 5323 vrele(ap->a_fdvp); 5324 vrele(ap->a_fvp); 5325 } 5326 5327 void 5328 vop_rename_pre(void *ap) 5329 { 5330 struct vop_rename_args *a = ap; 5331 5332 #ifdef DEBUG_VFS_LOCKS 5333 if (a->a_tvp) 5334 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5335 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5336 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5337 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5338 5339 /* Check the source (from). */ 5340 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5341 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5342 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5343 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5344 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5345 5346 /* Check the target. */ 5347 if (a->a_tvp) 5348 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5349 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5350 #endif 5351 /* 5352 * It may be tempting to add vn_seqc_write_begin/end calls here and 5353 * in vop_rename_post but that's not going to work out since some 5354 * filesystems relookup vnodes mid-rename. This is probably a bug. 5355 * 5356 * For now filesystems are expected to do the relevant calls after they 5357 * decide what vnodes to operate on. 5358 */ 5359 if (a->a_tdvp != a->a_fdvp) 5360 vhold(a->a_fdvp); 5361 if (a->a_tvp != a->a_fvp) 5362 vhold(a->a_fvp); 5363 vhold(a->a_tdvp); 5364 if (a->a_tvp) 5365 vhold(a->a_tvp); 5366 } 5367 5368 #ifdef DEBUG_VFS_LOCKS 5369 void 5370 vop_fplookup_vexec_debugpre(void *ap __unused) 5371 { 5372 5373 VFS_SMR_ASSERT_ENTERED(); 5374 } 5375 5376 void 5377 vop_fplookup_vexec_debugpost(void *ap __unused, int rc __unused) 5378 { 5379 5380 VFS_SMR_ASSERT_ENTERED(); 5381 } 5382 5383 void 5384 vop_strategy_debugpre(void *ap) 5385 { 5386 struct vop_strategy_args *a; 5387 struct buf *bp; 5388 5389 a = ap; 5390 bp = a->a_bp; 5391 5392 /* 5393 * Cluster ops lock their component buffers but not the IO container. 5394 */ 5395 if ((bp->b_flags & B_CLUSTER) != 0) 5396 return; 5397 5398 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5399 if (vfs_badlock_print) 5400 printf( 5401 "VOP_STRATEGY: bp is not locked but should be\n"); 5402 if (vfs_badlock_ddb) 5403 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5404 } 5405 } 5406 5407 void 5408 vop_lock_debugpre(void *ap) 5409 { 5410 struct vop_lock1_args *a = ap; 5411 5412 if ((a->a_flags & LK_INTERLOCK) == 0) 5413 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5414 else 5415 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5416 } 5417 5418 void 5419 vop_lock_debugpost(void *ap, int rc) 5420 { 5421 struct vop_lock1_args *a = ap; 5422 5423 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5424 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5425 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5426 } 5427 5428 void 5429 vop_unlock_debugpre(void *ap) 5430 { 5431 struct vop_unlock_args *a = ap; 5432 5433 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 5434 } 5435 5436 void 5437 vop_need_inactive_debugpre(void *ap) 5438 { 5439 struct vop_need_inactive_args *a = ap; 5440 5441 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5442 } 5443 5444 void 5445 vop_need_inactive_debugpost(void *ap, int rc) 5446 { 5447 struct vop_need_inactive_args *a = ap; 5448 5449 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5450 } 5451 #endif 5452 5453 void 5454 vop_create_pre(void *ap) 5455 { 5456 struct vop_create_args *a; 5457 struct vnode *dvp; 5458 5459 a = ap; 5460 dvp = a->a_dvp; 5461 vn_seqc_write_begin(dvp); 5462 } 5463 5464 void 5465 vop_create_post(void *ap, int rc) 5466 { 5467 struct vop_create_args *a; 5468 struct vnode *dvp; 5469 5470 a = ap; 5471 dvp = a->a_dvp; 5472 vn_seqc_write_end(dvp); 5473 if (!rc) 5474 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5475 } 5476 5477 void 5478 vop_whiteout_pre(void *ap) 5479 { 5480 struct vop_whiteout_args *a; 5481 struct vnode *dvp; 5482 5483 a = ap; 5484 dvp = a->a_dvp; 5485 vn_seqc_write_begin(dvp); 5486 } 5487 5488 void 5489 vop_whiteout_post(void *ap, int rc) 5490 { 5491 struct vop_whiteout_args *a; 5492 struct vnode *dvp; 5493 5494 a = ap; 5495 dvp = a->a_dvp; 5496 vn_seqc_write_end(dvp); 5497 } 5498 5499 void 5500 vop_deleteextattr_pre(void *ap) 5501 { 5502 struct vop_deleteextattr_args *a; 5503 struct vnode *vp; 5504 5505 a = ap; 5506 vp = a->a_vp; 5507 vn_seqc_write_begin(vp); 5508 } 5509 5510 void 5511 vop_deleteextattr_post(void *ap, int rc) 5512 { 5513 struct vop_deleteextattr_args *a; 5514 struct vnode *vp; 5515 5516 a = ap; 5517 vp = a->a_vp; 5518 vn_seqc_write_end(vp); 5519 if (!rc) 5520 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5521 } 5522 5523 void 5524 vop_link_pre(void *ap) 5525 { 5526 struct vop_link_args *a; 5527 struct vnode *vp, *tdvp; 5528 5529 a = ap; 5530 vp = a->a_vp; 5531 tdvp = a->a_tdvp; 5532 vn_seqc_write_begin(vp); 5533 vn_seqc_write_begin(tdvp); 5534 } 5535 5536 void 5537 vop_link_post(void *ap, int rc) 5538 { 5539 struct vop_link_args *a; 5540 struct vnode *vp, *tdvp; 5541 5542 a = ap; 5543 vp = a->a_vp; 5544 tdvp = a->a_tdvp; 5545 vn_seqc_write_end(vp); 5546 vn_seqc_write_end(tdvp); 5547 if (!rc) { 5548 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 5549 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 5550 } 5551 } 5552 5553 void 5554 vop_mkdir_pre(void *ap) 5555 { 5556 struct vop_mkdir_args *a; 5557 struct vnode *dvp; 5558 5559 a = ap; 5560 dvp = a->a_dvp; 5561 vn_seqc_write_begin(dvp); 5562 } 5563 5564 void 5565 vop_mkdir_post(void *ap, int rc) 5566 { 5567 struct vop_mkdir_args *a; 5568 struct vnode *dvp; 5569 5570 a = ap; 5571 dvp = a->a_dvp; 5572 vn_seqc_write_end(dvp); 5573 if (!rc) 5574 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5575 } 5576 5577 #ifdef DEBUG_VFS_LOCKS 5578 void 5579 vop_mkdir_debugpost(void *ap, int rc) 5580 { 5581 struct vop_mkdir_args *a; 5582 5583 a = ap; 5584 if (!rc) 5585 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); 5586 } 5587 #endif 5588 5589 void 5590 vop_mknod_pre(void *ap) 5591 { 5592 struct vop_mknod_args *a; 5593 struct vnode *dvp; 5594 5595 a = ap; 5596 dvp = a->a_dvp; 5597 vn_seqc_write_begin(dvp); 5598 } 5599 5600 void 5601 vop_mknod_post(void *ap, int rc) 5602 { 5603 struct vop_mknod_args *a; 5604 struct vnode *dvp; 5605 5606 a = ap; 5607 dvp = a->a_dvp; 5608 vn_seqc_write_end(dvp); 5609 if (!rc) 5610 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5611 } 5612 5613 void 5614 vop_reclaim_post(void *ap, int rc) 5615 { 5616 struct vop_reclaim_args *a; 5617 struct vnode *vp; 5618 5619 a = ap; 5620 vp = a->a_vp; 5621 ASSERT_VOP_IN_SEQC(vp); 5622 if (!rc) 5623 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 5624 } 5625 5626 void 5627 vop_remove_pre(void *ap) 5628 { 5629 struct vop_remove_args *a; 5630 struct vnode *dvp, *vp; 5631 5632 a = ap; 5633 dvp = a->a_dvp; 5634 vp = a->a_vp; 5635 vn_seqc_write_begin(dvp); 5636 vn_seqc_write_begin(vp); 5637 } 5638 5639 void 5640 vop_remove_post(void *ap, int rc) 5641 { 5642 struct vop_remove_args *a; 5643 struct vnode *dvp, *vp; 5644 5645 a = ap; 5646 dvp = a->a_dvp; 5647 vp = a->a_vp; 5648 vn_seqc_write_end(dvp); 5649 vn_seqc_write_end(vp); 5650 if (!rc) { 5651 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5652 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5653 } 5654 } 5655 5656 void 5657 vop_rename_post(void *ap, int rc) 5658 { 5659 struct vop_rename_args *a = ap; 5660 long hint; 5661 5662 if (!rc) { 5663 hint = NOTE_WRITE; 5664 if (a->a_fdvp == a->a_tdvp) { 5665 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 5666 hint |= NOTE_LINK; 5667 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5668 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5669 } else { 5670 hint |= NOTE_EXTEND; 5671 if (a->a_fvp->v_type == VDIR) 5672 hint |= NOTE_LINK; 5673 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5674 5675 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 5676 a->a_tvp->v_type == VDIR) 5677 hint &= ~NOTE_LINK; 5678 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5679 } 5680 5681 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 5682 if (a->a_tvp) 5683 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 5684 } 5685 if (a->a_tdvp != a->a_fdvp) 5686 vdrop(a->a_fdvp); 5687 if (a->a_tvp != a->a_fvp) 5688 vdrop(a->a_fvp); 5689 vdrop(a->a_tdvp); 5690 if (a->a_tvp) 5691 vdrop(a->a_tvp); 5692 } 5693 5694 void 5695 vop_rmdir_pre(void *ap) 5696 { 5697 struct vop_rmdir_args *a; 5698 struct vnode *dvp, *vp; 5699 5700 a = ap; 5701 dvp = a->a_dvp; 5702 vp = a->a_vp; 5703 vn_seqc_write_begin(dvp); 5704 vn_seqc_write_begin(vp); 5705 } 5706 5707 void 5708 vop_rmdir_post(void *ap, int rc) 5709 { 5710 struct vop_rmdir_args *a; 5711 struct vnode *dvp, *vp; 5712 5713 a = ap; 5714 dvp = a->a_dvp; 5715 vp = a->a_vp; 5716 vn_seqc_write_end(dvp); 5717 vn_seqc_write_end(vp); 5718 if (!rc) { 5719 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5720 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5721 } 5722 } 5723 5724 void 5725 vop_setattr_pre(void *ap) 5726 { 5727 struct vop_setattr_args *a; 5728 struct vnode *vp; 5729 5730 a = ap; 5731 vp = a->a_vp; 5732 vn_seqc_write_begin(vp); 5733 } 5734 5735 void 5736 vop_setattr_post(void *ap, int rc) 5737 { 5738 struct vop_setattr_args *a; 5739 struct vnode *vp; 5740 5741 a = ap; 5742 vp = a->a_vp; 5743 vn_seqc_write_end(vp); 5744 if (!rc) 5745 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 5746 } 5747 5748 void 5749 vop_setacl_pre(void *ap) 5750 { 5751 struct vop_setacl_args *a; 5752 struct vnode *vp; 5753 5754 a = ap; 5755 vp = a->a_vp; 5756 vn_seqc_write_begin(vp); 5757 } 5758 5759 void 5760 vop_setacl_post(void *ap, int rc __unused) 5761 { 5762 struct vop_setacl_args *a; 5763 struct vnode *vp; 5764 5765 a = ap; 5766 vp = a->a_vp; 5767 vn_seqc_write_end(vp); 5768 } 5769 5770 void 5771 vop_setextattr_pre(void *ap) 5772 { 5773 struct vop_setextattr_args *a; 5774 struct vnode *vp; 5775 5776 a = ap; 5777 vp = a->a_vp; 5778 vn_seqc_write_begin(vp); 5779 } 5780 5781 void 5782 vop_setextattr_post(void *ap, int rc) 5783 { 5784 struct vop_setextattr_args *a; 5785 struct vnode *vp; 5786 5787 a = ap; 5788 vp = a->a_vp; 5789 vn_seqc_write_end(vp); 5790 if (!rc) 5791 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 5792 } 5793 5794 void 5795 vop_symlink_pre(void *ap) 5796 { 5797 struct vop_symlink_args *a; 5798 struct vnode *dvp; 5799 5800 a = ap; 5801 dvp = a->a_dvp; 5802 vn_seqc_write_begin(dvp); 5803 } 5804 5805 void 5806 vop_symlink_post(void *ap, int rc) 5807 { 5808 struct vop_symlink_args *a; 5809 struct vnode *dvp; 5810 5811 a = ap; 5812 dvp = a->a_dvp; 5813 vn_seqc_write_end(dvp); 5814 if (!rc) 5815 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5816 } 5817 5818 void 5819 vop_open_post(void *ap, int rc) 5820 { 5821 struct vop_open_args *a = ap; 5822 5823 if (!rc) 5824 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 5825 } 5826 5827 void 5828 vop_close_post(void *ap, int rc) 5829 { 5830 struct vop_close_args *a = ap; 5831 5832 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 5833 !VN_IS_DOOMED(a->a_vp))) { 5834 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 5835 NOTE_CLOSE_WRITE : NOTE_CLOSE); 5836 } 5837 } 5838 5839 void 5840 vop_read_post(void *ap, int rc) 5841 { 5842 struct vop_read_args *a = ap; 5843 5844 if (!rc) 5845 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5846 } 5847 5848 void 5849 vop_read_pgcache_post(void *ap, int rc) 5850 { 5851 struct vop_read_pgcache_args *a = ap; 5852 5853 if (!rc) 5854 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); 5855 } 5856 5857 void 5858 vop_readdir_post(void *ap, int rc) 5859 { 5860 struct vop_readdir_args *a = ap; 5861 5862 if (!rc) 5863 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5864 } 5865 5866 static struct knlist fs_knlist; 5867 5868 static void 5869 vfs_event_init(void *arg) 5870 { 5871 knlist_init_mtx(&fs_knlist, NULL); 5872 } 5873 /* XXX - correct order? */ 5874 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 5875 5876 void 5877 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 5878 { 5879 5880 KNOTE_UNLOCKED(&fs_knlist, event); 5881 } 5882 5883 static int filt_fsattach(struct knote *kn); 5884 static void filt_fsdetach(struct knote *kn); 5885 static int filt_fsevent(struct knote *kn, long hint); 5886 5887 struct filterops fs_filtops = { 5888 .f_isfd = 0, 5889 .f_attach = filt_fsattach, 5890 .f_detach = filt_fsdetach, 5891 .f_event = filt_fsevent 5892 }; 5893 5894 static int 5895 filt_fsattach(struct knote *kn) 5896 { 5897 5898 kn->kn_flags |= EV_CLEAR; 5899 knlist_add(&fs_knlist, kn, 0); 5900 return (0); 5901 } 5902 5903 static void 5904 filt_fsdetach(struct knote *kn) 5905 { 5906 5907 knlist_remove(&fs_knlist, kn, 0); 5908 } 5909 5910 static int 5911 filt_fsevent(struct knote *kn, long hint) 5912 { 5913 5914 kn->kn_fflags |= hint; 5915 return (kn->kn_fflags != 0); 5916 } 5917 5918 static int 5919 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 5920 { 5921 struct vfsidctl vc; 5922 int error; 5923 struct mount *mp; 5924 5925 error = SYSCTL_IN(req, &vc, sizeof(vc)); 5926 if (error) 5927 return (error); 5928 if (vc.vc_vers != VFS_CTL_VERS1) 5929 return (EINVAL); 5930 mp = vfs_getvfs(&vc.vc_fsid); 5931 if (mp == NULL) 5932 return (ENOENT); 5933 /* ensure that a specific sysctl goes to the right filesystem. */ 5934 if (strcmp(vc.vc_fstypename, "*") != 0 && 5935 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 5936 vfs_rel(mp); 5937 return (EINVAL); 5938 } 5939 VCTLTOREQ(&vc, req); 5940 error = VFS_SYSCTL(mp, vc.vc_op, req); 5941 vfs_rel(mp); 5942 return (error); 5943 } 5944 5945 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 5946 NULL, 0, sysctl_vfs_ctl, "", 5947 "Sysctl by fsid"); 5948 5949 /* 5950 * Function to initialize a va_filerev field sensibly. 5951 * XXX: Wouldn't a random number make a lot more sense ?? 5952 */ 5953 u_quad_t 5954 init_va_filerev(void) 5955 { 5956 struct bintime bt; 5957 5958 getbinuptime(&bt); 5959 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 5960 } 5961 5962 static int filt_vfsread(struct knote *kn, long hint); 5963 static int filt_vfswrite(struct knote *kn, long hint); 5964 static int filt_vfsvnode(struct knote *kn, long hint); 5965 static void filt_vfsdetach(struct knote *kn); 5966 static struct filterops vfsread_filtops = { 5967 .f_isfd = 1, 5968 .f_detach = filt_vfsdetach, 5969 .f_event = filt_vfsread 5970 }; 5971 static struct filterops vfswrite_filtops = { 5972 .f_isfd = 1, 5973 .f_detach = filt_vfsdetach, 5974 .f_event = filt_vfswrite 5975 }; 5976 static struct filterops vfsvnode_filtops = { 5977 .f_isfd = 1, 5978 .f_detach = filt_vfsdetach, 5979 .f_event = filt_vfsvnode 5980 }; 5981 5982 static void 5983 vfs_knllock(void *arg) 5984 { 5985 struct vnode *vp = arg; 5986 5987 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5988 } 5989 5990 static void 5991 vfs_knlunlock(void *arg) 5992 { 5993 struct vnode *vp = arg; 5994 5995 VOP_UNLOCK(vp); 5996 } 5997 5998 static void 5999 vfs_knl_assert_locked(void *arg) 6000 { 6001 #ifdef DEBUG_VFS_LOCKS 6002 struct vnode *vp = arg; 6003 6004 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6005 #endif 6006 } 6007 6008 static void 6009 vfs_knl_assert_unlocked(void *arg) 6010 { 6011 #ifdef DEBUG_VFS_LOCKS 6012 struct vnode *vp = arg; 6013 6014 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6015 #endif 6016 } 6017 6018 int 6019 vfs_kqfilter(struct vop_kqfilter_args *ap) 6020 { 6021 struct vnode *vp = ap->a_vp; 6022 struct knote *kn = ap->a_kn; 6023 struct knlist *knl; 6024 6025 switch (kn->kn_filter) { 6026 case EVFILT_READ: 6027 kn->kn_fop = &vfsread_filtops; 6028 break; 6029 case EVFILT_WRITE: 6030 kn->kn_fop = &vfswrite_filtops; 6031 break; 6032 case EVFILT_VNODE: 6033 kn->kn_fop = &vfsvnode_filtops; 6034 break; 6035 default: 6036 return (EINVAL); 6037 } 6038 6039 kn->kn_hook = (caddr_t)vp; 6040 6041 v_addpollinfo(vp); 6042 if (vp->v_pollinfo == NULL) 6043 return (ENOMEM); 6044 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6045 vhold(vp); 6046 knlist_add(knl, kn, 0); 6047 6048 return (0); 6049 } 6050 6051 /* 6052 * Detach knote from vnode 6053 */ 6054 static void 6055 filt_vfsdetach(struct knote *kn) 6056 { 6057 struct vnode *vp = (struct vnode *)kn->kn_hook; 6058 6059 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6060 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6061 vdrop(vp); 6062 } 6063 6064 /*ARGSUSED*/ 6065 static int 6066 filt_vfsread(struct knote *kn, long hint) 6067 { 6068 struct vnode *vp = (struct vnode *)kn->kn_hook; 6069 struct vattr va; 6070 int res; 6071 6072 /* 6073 * filesystem is gone, so set the EOF flag and schedule 6074 * the knote for deletion. 6075 */ 6076 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6077 VI_LOCK(vp); 6078 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6079 VI_UNLOCK(vp); 6080 return (1); 6081 } 6082 6083 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 6084 return (0); 6085 6086 VI_LOCK(vp); 6087 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 6088 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6089 VI_UNLOCK(vp); 6090 return (res); 6091 } 6092 6093 /*ARGSUSED*/ 6094 static int 6095 filt_vfswrite(struct knote *kn, long hint) 6096 { 6097 struct vnode *vp = (struct vnode *)kn->kn_hook; 6098 6099 VI_LOCK(vp); 6100 6101 /* 6102 * filesystem is gone, so set the EOF flag and schedule 6103 * the knote for deletion. 6104 */ 6105 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6106 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6107 6108 kn->kn_data = 0; 6109 VI_UNLOCK(vp); 6110 return (1); 6111 } 6112 6113 static int 6114 filt_vfsvnode(struct knote *kn, long hint) 6115 { 6116 struct vnode *vp = (struct vnode *)kn->kn_hook; 6117 int res; 6118 6119 VI_LOCK(vp); 6120 if (kn->kn_sfflags & hint) 6121 kn->kn_fflags |= hint; 6122 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6123 kn->kn_flags |= EV_EOF; 6124 VI_UNLOCK(vp); 6125 return (1); 6126 } 6127 res = (kn->kn_fflags != 0); 6128 VI_UNLOCK(vp); 6129 return (res); 6130 } 6131 6132 /* 6133 * Returns whether the directory is empty or not. 6134 * If it is empty, the return value is 0; otherwise 6135 * the return value is an error value (which may 6136 * be ENOTEMPTY). 6137 */ 6138 int 6139 vfs_emptydir(struct vnode *vp) 6140 { 6141 struct uio uio; 6142 struct iovec iov; 6143 struct dirent *dirent, *dp, *endp; 6144 int error, eof; 6145 6146 error = 0; 6147 eof = 0; 6148 6149 ASSERT_VOP_LOCKED(vp, "vfs_emptydir"); 6150 6151 dirent = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK); 6152 iov.iov_base = dirent; 6153 iov.iov_len = sizeof(struct dirent); 6154 6155 uio.uio_iov = &iov; 6156 uio.uio_iovcnt = 1; 6157 uio.uio_offset = 0; 6158 uio.uio_resid = sizeof(struct dirent); 6159 uio.uio_segflg = UIO_SYSSPACE; 6160 uio.uio_rw = UIO_READ; 6161 uio.uio_td = curthread; 6162 6163 while (eof == 0 && error == 0) { 6164 error = VOP_READDIR(vp, &uio, curthread->td_ucred, &eof, 6165 NULL, NULL); 6166 if (error != 0) 6167 break; 6168 endp = (void *)((uint8_t *)dirent + 6169 sizeof(struct dirent) - uio.uio_resid); 6170 for (dp = dirent; dp < endp; 6171 dp = (void *)((uint8_t *)dp + GENERIC_DIRSIZ(dp))) { 6172 if (dp->d_type == DT_WHT) 6173 continue; 6174 if (dp->d_namlen == 0) 6175 continue; 6176 if (dp->d_type != DT_DIR && 6177 dp->d_type != DT_UNKNOWN) { 6178 error = ENOTEMPTY; 6179 break; 6180 } 6181 if (dp->d_namlen > 2) { 6182 error = ENOTEMPTY; 6183 break; 6184 } 6185 if (dp->d_namlen == 1 && 6186 dp->d_name[0] != '.') { 6187 error = ENOTEMPTY; 6188 break; 6189 } 6190 if (dp->d_namlen == 2 && 6191 dp->d_name[1] != '.') { 6192 error = ENOTEMPTY; 6193 break; 6194 } 6195 uio.uio_resid = sizeof(struct dirent); 6196 } 6197 } 6198 free(dirent, M_TEMP); 6199 return (error); 6200 } 6201 6202 int 6203 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6204 { 6205 int error; 6206 6207 if (dp->d_reclen > ap->a_uio->uio_resid) 6208 return (ENAMETOOLONG); 6209 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6210 if (error) { 6211 if (ap->a_ncookies != NULL) { 6212 if (ap->a_cookies != NULL) 6213 free(ap->a_cookies, M_TEMP); 6214 ap->a_cookies = NULL; 6215 *ap->a_ncookies = 0; 6216 } 6217 return (error); 6218 } 6219 if (ap->a_ncookies == NULL) 6220 return (0); 6221 6222 KASSERT(ap->a_cookies, 6223 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6224 6225 *ap->a_cookies = realloc(*ap->a_cookies, 6226 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 6227 (*ap->a_cookies)[*ap->a_ncookies] = off; 6228 *ap->a_ncookies += 1; 6229 return (0); 6230 } 6231 6232 /* 6233 * The purpose of this routine is to remove granularity from accmode_t, 6234 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6235 * VADMIN and VAPPEND. 6236 * 6237 * If it returns 0, the caller is supposed to continue with the usual 6238 * access checks using 'accmode' as modified by this routine. If it 6239 * returns nonzero value, the caller is supposed to return that value 6240 * as errno. 6241 * 6242 * Note that after this routine runs, accmode may be zero. 6243 */ 6244 int 6245 vfs_unixify_accmode(accmode_t *accmode) 6246 { 6247 /* 6248 * There is no way to specify explicit "deny" rule using 6249 * file mode or POSIX.1e ACLs. 6250 */ 6251 if (*accmode & VEXPLICIT_DENY) { 6252 *accmode = 0; 6253 return (0); 6254 } 6255 6256 /* 6257 * None of these can be translated into usual access bits. 6258 * Also, the common case for NFSv4 ACLs is to not contain 6259 * either of these bits. Caller should check for VWRITE 6260 * on the containing directory instead. 6261 */ 6262 if (*accmode & (VDELETE_CHILD | VDELETE)) 6263 return (EPERM); 6264 6265 if (*accmode & VADMIN_PERMS) { 6266 *accmode &= ~VADMIN_PERMS; 6267 *accmode |= VADMIN; 6268 } 6269 6270 /* 6271 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6272 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6273 */ 6274 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6275 6276 return (0); 6277 } 6278 6279 /* 6280 * Clear out a doomed vnode (if any) and replace it with a new one as long 6281 * as the fs is not being unmounted. Return the root vnode to the caller. 6282 */ 6283 static int __noinline 6284 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6285 { 6286 struct vnode *vp; 6287 int error; 6288 6289 restart: 6290 if (mp->mnt_rootvnode != NULL) { 6291 MNT_ILOCK(mp); 6292 vp = mp->mnt_rootvnode; 6293 if (vp != NULL) { 6294 if (!VN_IS_DOOMED(vp)) { 6295 vrefact(vp); 6296 MNT_IUNLOCK(mp); 6297 error = vn_lock(vp, flags); 6298 if (error == 0) { 6299 *vpp = vp; 6300 return (0); 6301 } 6302 vrele(vp); 6303 goto restart; 6304 } 6305 /* 6306 * Clear the old one. 6307 */ 6308 mp->mnt_rootvnode = NULL; 6309 } 6310 MNT_IUNLOCK(mp); 6311 if (vp != NULL) { 6312 vfs_op_barrier_wait(mp); 6313 vrele(vp); 6314 } 6315 } 6316 error = VFS_CACHEDROOT(mp, flags, vpp); 6317 if (error != 0) 6318 return (error); 6319 if (mp->mnt_vfs_ops == 0) { 6320 MNT_ILOCK(mp); 6321 if (mp->mnt_vfs_ops != 0) { 6322 MNT_IUNLOCK(mp); 6323 return (0); 6324 } 6325 if (mp->mnt_rootvnode == NULL) { 6326 vrefact(*vpp); 6327 mp->mnt_rootvnode = *vpp; 6328 } else { 6329 if (mp->mnt_rootvnode != *vpp) { 6330 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6331 panic("%s: mismatch between vnode returned " 6332 " by VFS_CACHEDROOT and the one cached " 6333 " (%p != %p)", 6334 __func__, *vpp, mp->mnt_rootvnode); 6335 } 6336 } 6337 } 6338 MNT_IUNLOCK(mp); 6339 } 6340 return (0); 6341 } 6342 6343 int 6344 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6345 { 6346 struct vnode *vp; 6347 int error; 6348 6349 if (!vfs_op_thread_enter(mp)) 6350 return (vfs_cache_root_fallback(mp, flags, vpp)); 6351 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6352 if (vp == NULL || VN_IS_DOOMED(vp)) { 6353 vfs_op_thread_exit(mp); 6354 return (vfs_cache_root_fallback(mp, flags, vpp)); 6355 } 6356 vrefact(vp); 6357 vfs_op_thread_exit(mp); 6358 error = vn_lock(vp, flags); 6359 if (error != 0) { 6360 vrele(vp); 6361 return (vfs_cache_root_fallback(mp, flags, vpp)); 6362 } 6363 *vpp = vp; 6364 return (0); 6365 } 6366 6367 struct vnode * 6368 vfs_cache_root_clear(struct mount *mp) 6369 { 6370 struct vnode *vp; 6371 6372 /* 6373 * ops > 0 guarantees there is nobody who can see this vnode 6374 */ 6375 MPASS(mp->mnt_vfs_ops > 0); 6376 vp = mp->mnt_rootvnode; 6377 if (vp != NULL) 6378 vn_seqc_write_begin(vp); 6379 mp->mnt_rootvnode = NULL; 6380 return (vp); 6381 } 6382 6383 void 6384 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6385 { 6386 6387 MPASS(mp->mnt_vfs_ops > 0); 6388 vrefact(vp); 6389 mp->mnt_rootvnode = vp; 6390 } 6391 6392 /* 6393 * These are helper functions for filesystems to traverse all 6394 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6395 * 6396 * This interface replaces MNT_VNODE_FOREACH. 6397 */ 6398 6399 struct vnode * 6400 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6401 { 6402 struct vnode *vp; 6403 6404 if (should_yield()) 6405 kern_yield(PRI_USER); 6406 MNT_ILOCK(mp); 6407 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6408 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6409 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6410 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6411 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6412 continue; 6413 VI_LOCK(vp); 6414 if (VN_IS_DOOMED(vp)) { 6415 VI_UNLOCK(vp); 6416 continue; 6417 } 6418 break; 6419 } 6420 if (vp == NULL) { 6421 __mnt_vnode_markerfree_all(mvp, mp); 6422 /* MNT_IUNLOCK(mp); -- done in above function */ 6423 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6424 return (NULL); 6425 } 6426 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6427 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6428 MNT_IUNLOCK(mp); 6429 return (vp); 6430 } 6431 6432 struct vnode * 6433 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6434 { 6435 struct vnode *vp; 6436 6437 *mvp = vn_alloc_marker(mp); 6438 MNT_ILOCK(mp); 6439 MNT_REF(mp); 6440 6441 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6442 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6443 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6444 continue; 6445 VI_LOCK(vp); 6446 if (VN_IS_DOOMED(vp)) { 6447 VI_UNLOCK(vp); 6448 continue; 6449 } 6450 break; 6451 } 6452 if (vp == NULL) { 6453 MNT_REL(mp); 6454 MNT_IUNLOCK(mp); 6455 vn_free_marker(*mvp); 6456 *mvp = NULL; 6457 return (NULL); 6458 } 6459 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6460 MNT_IUNLOCK(mp); 6461 return (vp); 6462 } 6463 6464 void 6465 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6466 { 6467 6468 if (*mvp == NULL) { 6469 MNT_IUNLOCK(mp); 6470 return; 6471 } 6472 6473 mtx_assert(MNT_MTX(mp), MA_OWNED); 6474 6475 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6476 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6477 MNT_REL(mp); 6478 MNT_IUNLOCK(mp); 6479 vn_free_marker(*mvp); 6480 *mvp = NULL; 6481 } 6482 6483 /* 6484 * These are helper functions for filesystems to traverse their 6485 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6486 */ 6487 static void 6488 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6489 { 6490 6491 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6492 6493 MNT_ILOCK(mp); 6494 MNT_REL(mp); 6495 MNT_IUNLOCK(mp); 6496 vn_free_marker(*mvp); 6497 *mvp = NULL; 6498 } 6499 6500 /* 6501 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6502 * conventional lock order during mnt_vnode_next_lazy iteration. 6503 * 6504 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6505 * The list lock is dropped and reacquired. On success, both locks are held. 6506 * On failure, the mount vnode list lock is held but the vnode interlock is 6507 * not, and the procedure may have yielded. 6508 */ 6509 static bool 6510 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6511 struct vnode *vp) 6512 { 6513 6514 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6515 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6516 ("%s: bad marker", __func__)); 6517 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6518 ("%s: inappropriate vnode", __func__)); 6519 ASSERT_VI_UNLOCKED(vp, __func__); 6520 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6521 6522 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6523 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6524 6525 /* 6526 * Note we may be racing against vdrop which transitioned the hold 6527 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 6528 * if we are the only user after we get the interlock we will just 6529 * vdrop. 6530 */ 6531 vhold(vp); 6532 mtx_unlock(&mp->mnt_listmtx); 6533 VI_LOCK(vp); 6534 if (VN_IS_DOOMED(vp)) { 6535 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 6536 goto out_lost; 6537 } 6538 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 6539 /* 6540 * There is nothing to do if we are the last user. 6541 */ 6542 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 6543 goto out_lost; 6544 mtx_lock(&mp->mnt_listmtx); 6545 return (true); 6546 out_lost: 6547 vdropl(vp); 6548 maybe_yield(); 6549 mtx_lock(&mp->mnt_listmtx); 6550 return (false); 6551 } 6552 6553 static struct vnode * 6554 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6555 void *cbarg) 6556 { 6557 struct vnode *vp; 6558 6559 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6560 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6561 restart: 6562 vp = TAILQ_NEXT(*mvp, v_lazylist); 6563 while (vp != NULL) { 6564 if (vp->v_type == VMARKER) { 6565 vp = TAILQ_NEXT(vp, v_lazylist); 6566 continue; 6567 } 6568 /* 6569 * See if we want to process the vnode. Note we may encounter a 6570 * long string of vnodes we don't care about and hog the list 6571 * as a result. Check for it and requeue the marker. 6572 */ 6573 VNPASS(!VN_IS_DOOMED(vp), vp); 6574 if (!cb(vp, cbarg)) { 6575 if (!should_yield()) { 6576 vp = TAILQ_NEXT(vp, v_lazylist); 6577 continue; 6578 } 6579 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 6580 v_lazylist); 6581 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 6582 v_lazylist); 6583 mtx_unlock(&mp->mnt_listmtx); 6584 kern_yield(PRI_USER); 6585 mtx_lock(&mp->mnt_listmtx); 6586 goto restart; 6587 } 6588 /* 6589 * Try-lock because this is the wrong lock order. 6590 */ 6591 if (!VI_TRYLOCK(vp) && 6592 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 6593 goto restart; 6594 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6595 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6596 ("alien vnode on the lazy list %p %p", vp, mp)); 6597 VNPASS(vp->v_mount == mp, vp); 6598 VNPASS(!VN_IS_DOOMED(vp), vp); 6599 break; 6600 } 6601 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6602 6603 /* Check if we are done */ 6604 if (vp == NULL) { 6605 mtx_unlock(&mp->mnt_listmtx); 6606 mnt_vnode_markerfree_lazy(mvp, mp); 6607 return (NULL); 6608 } 6609 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 6610 mtx_unlock(&mp->mnt_listmtx); 6611 ASSERT_VI_LOCKED(vp, "lazy iter"); 6612 return (vp); 6613 } 6614 6615 struct vnode * 6616 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6617 void *cbarg) 6618 { 6619 6620 if (should_yield()) 6621 kern_yield(PRI_USER); 6622 mtx_lock(&mp->mnt_listmtx); 6623 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6624 } 6625 6626 struct vnode * 6627 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6628 void *cbarg) 6629 { 6630 struct vnode *vp; 6631 6632 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 6633 return (NULL); 6634 6635 *mvp = vn_alloc_marker(mp); 6636 MNT_ILOCK(mp); 6637 MNT_REF(mp); 6638 MNT_IUNLOCK(mp); 6639 6640 mtx_lock(&mp->mnt_listmtx); 6641 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 6642 if (vp == NULL) { 6643 mtx_unlock(&mp->mnt_listmtx); 6644 mnt_vnode_markerfree_lazy(mvp, mp); 6645 return (NULL); 6646 } 6647 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 6648 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6649 } 6650 6651 void 6652 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6653 { 6654 6655 if (*mvp == NULL) 6656 return; 6657 6658 mtx_lock(&mp->mnt_listmtx); 6659 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6660 mtx_unlock(&mp->mnt_listmtx); 6661 mnt_vnode_markerfree_lazy(mvp, mp); 6662 } 6663 6664 int 6665 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 6666 { 6667 6668 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 6669 cnp->cn_flags &= ~NOEXECCHECK; 6670 return (0); 6671 } 6672 6673 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, cnp->cn_thread)); 6674 } 6675 6676 /* 6677 * Do not use this variant unless you have means other than the hold count 6678 * to prevent the vnode from getting freed. 6679 */ 6680 void 6681 vn_seqc_write_begin_unheld_locked(struct vnode *vp) 6682 { 6683 6684 ASSERT_VI_LOCKED(vp, __func__); 6685 VNPASS(vp->v_seqc_users >= 0, vp); 6686 vp->v_seqc_users++; 6687 if (vp->v_seqc_users == 1) 6688 seqc_sleepable_write_begin(&vp->v_seqc); 6689 } 6690 6691 void 6692 vn_seqc_write_begin_locked(struct vnode *vp) 6693 { 6694 6695 ASSERT_VI_LOCKED(vp, __func__); 6696 VNPASS(vp->v_holdcnt > 0, vp); 6697 vn_seqc_write_begin_unheld_locked(vp); 6698 } 6699 6700 void 6701 vn_seqc_write_begin(struct vnode *vp) 6702 { 6703 6704 VI_LOCK(vp); 6705 vn_seqc_write_begin_locked(vp); 6706 VI_UNLOCK(vp); 6707 } 6708 6709 void 6710 vn_seqc_write_begin_unheld(struct vnode *vp) 6711 { 6712 6713 VI_LOCK(vp); 6714 vn_seqc_write_begin_unheld_locked(vp); 6715 VI_UNLOCK(vp); 6716 } 6717 6718 void 6719 vn_seqc_write_end_locked(struct vnode *vp) 6720 { 6721 6722 ASSERT_VI_LOCKED(vp, __func__); 6723 VNPASS(vp->v_seqc_users > 0, vp); 6724 vp->v_seqc_users--; 6725 if (vp->v_seqc_users == 0) 6726 seqc_sleepable_write_end(&vp->v_seqc); 6727 } 6728 6729 void 6730 vn_seqc_write_end(struct vnode *vp) 6731 { 6732 6733 VI_LOCK(vp); 6734 vn_seqc_write_end_locked(vp); 6735 VI_UNLOCK(vp); 6736 } 6737