1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/capsicum.h> 54 #include <sys/condvar.h> 55 #include <sys/conf.h> 56 #include <sys/counter.h> 57 #include <sys/dirent.h> 58 #include <sys/event.h> 59 #include <sys/eventhandler.h> 60 #include <sys/extattr.h> 61 #include <sys/file.h> 62 #include <sys/fcntl.h> 63 #include <sys/jail.h> 64 #include <sys/kdb.h> 65 #include <sys/kernel.h> 66 #include <sys/kthread.h> 67 #include <sys/ktr.h> 68 #include <sys/lockf.h> 69 #include <sys/malloc.h> 70 #include <sys/mount.h> 71 #include <sys/namei.h> 72 #include <sys/pctrie.h> 73 #include <sys/priv.h> 74 #include <sys/reboot.h> 75 #include <sys/refcount.h> 76 #include <sys/rwlock.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smr.h> 80 #include <sys/smp.h> 81 #include <sys/stat.h> 82 #include <sys/sysctl.h> 83 #include <sys/syslog.h> 84 #include <sys/vmmeter.h> 85 #include <sys/vnode.h> 86 #include <sys/watchdog.h> 87 88 #include <machine/stdarg.h> 89 90 #include <security/mac/mac_framework.h> 91 92 #include <vm/vm.h> 93 #include <vm/vm_object.h> 94 #include <vm/vm_extern.h> 95 #include <vm/pmap.h> 96 #include <vm/vm_map.h> 97 #include <vm/vm_page.h> 98 #include <vm/vm_kern.h> 99 #include <vm/uma.h> 100 101 #ifdef DDB 102 #include <ddb/ddb.h> 103 #endif 104 105 static void delmntque(struct vnode *vp); 106 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 107 int slpflag, int slptimeo); 108 static void syncer_shutdown(void *arg, int howto); 109 static int vtryrecycle(struct vnode *vp); 110 static void v_init_counters(struct vnode *); 111 static void vgonel(struct vnode *); 112 static void vfs_knllock(void *arg); 113 static void vfs_knlunlock(void *arg); 114 static void vfs_knl_assert_locked(void *arg); 115 static void vfs_knl_assert_unlocked(void *arg); 116 static void destroy_vpollinfo(struct vpollinfo *vi); 117 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 118 daddr_t startlbn, daddr_t endlbn); 119 static void vnlru_recalc(void); 120 121 /* 122 * These fences are intended for cases where some synchronization is 123 * needed between access of v_iflags and lockless vnode refcount (v_holdcnt 124 * and v_usecount) updates. Access to v_iflags is generally synchronized 125 * by the interlock, but we have some internal assertions that check vnode 126 * flags without acquiring the lock. Thus, these fences are INVARIANTS-only 127 * for now. 128 */ 129 #ifdef INVARIANTS 130 #define VNODE_REFCOUNT_FENCE_ACQ() atomic_thread_fence_acq() 131 #define VNODE_REFCOUNT_FENCE_REL() atomic_thread_fence_rel() 132 #else 133 #define VNODE_REFCOUNT_FENCE_ACQ() 134 #define VNODE_REFCOUNT_FENCE_REL() 135 #endif 136 137 /* 138 * Number of vnodes in existence. Increased whenever getnewvnode() 139 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 140 */ 141 static u_long __exclusive_cache_line numvnodes; 142 143 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 144 "Number of vnodes in existence"); 145 146 static counter_u64_t vnodes_created; 147 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 148 "Number of vnodes created by getnewvnode"); 149 150 /* 151 * Conversion tables for conversion from vnode types to inode formats 152 * and back. 153 */ 154 enum vtype iftovt_tab[16] = { 155 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 156 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 157 }; 158 int vttoif_tab[10] = { 159 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 160 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 161 }; 162 163 /* 164 * List of allocates vnodes in the system. 165 */ 166 static TAILQ_HEAD(freelst, vnode) vnode_list; 167 static struct vnode *vnode_list_free_marker; 168 static struct vnode *vnode_list_reclaim_marker; 169 170 /* 171 * "Free" vnode target. Free vnodes are rarely completely free, but are 172 * just ones that are cheap to recycle. Usually they are for files which 173 * have been stat'd but not read; these usually have inode and namecache 174 * data attached to them. This target is the preferred minimum size of a 175 * sub-cache consisting mostly of such files. The system balances the size 176 * of this sub-cache with its complement to try to prevent either from 177 * thrashing while the other is relatively inactive. The targets express 178 * a preference for the best balance. 179 * 180 * "Above" this target there are 2 further targets (watermarks) related 181 * to recyling of free vnodes. In the best-operating case, the cache is 182 * exactly full, the free list has size between vlowat and vhiwat above the 183 * free target, and recycling from it and normal use maintains this state. 184 * Sometimes the free list is below vlowat or even empty, but this state 185 * is even better for immediate use provided the cache is not full. 186 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 187 * ones) to reach one of these states. The watermarks are currently hard- 188 * coded as 4% and 9% of the available space higher. These and the default 189 * of 25% for wantfreevnodes are too large if the memory size is large. 190 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 191 * whenever vnlru_proc() becomes active. 192 */ 193 static long wantfreevnodes; 194 static long __exclusive_cache_line freevnodes; 195 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 196 &freevnodes, 0, "Number of \"free\" vnodes"); 197 static long freevnodes_old; 198 199 static counter_u64_t recycles_count; 200 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 201 "Number of vnodes recycled to meet vnode cache targets"); 202 203 static counter_u64_t recycles_free_count; 204 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 205 "Number of free vnodes recycled to meet vnode cache targets"); 206 207 static counter_u64_t deferred_inact; 208 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, &deferred_inact, 209 "Number of times inactive processing was deferred"); 210 211 /* To keep more than one thread at a time from running vfs_getnewfsid */ 212 static struct mtx mntid_mtx; 213 214 /* 215 * Lock for any access to the following: 216 * vnode_list 217 * numvnodes 218 * freevnodes 219 */ 220 static struct mtx __exclusive_cache_line vnode_list_mtx; 221 222 /* Publicly exported FS */ 223 struct nfs_public nfs_pub; 224 225 static uma_zone_t buf_trie_zone; 226 static smr_t buf_trie_smr; 227 228 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 229 static uma_zone_t vnode_zone; 230 static uma_zone_t vnodepoll_zone; 231 232 __read_frequently smr_t vfs_smr; 233 234 /* 235 * The workitem queue. 236 * 237 * It is useful to delay writes of file data and filesystem metadata 238 * for tens of seconds so that quickly created and deleted files need 239 * not waste disk bandwidth being created and removed. To realize this, 240 * we append vnodes to a "workitem" queue. When running with a soft 241 * updates implementation, most pending metadata dependencies should 242 * not wait for more than a few seconds. Thus, mounted on block devices 243 * are delayed only about a half the time that file data is delayed. 244 * Similarly, directory updates are more critical, so are only delayed 245 * about a third the time that file data is delayed. Thus, there are 246 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 247 * one each second (driven off the filesystem syncer process). The 248 * syncer_delayno variable indicates the next queue that is to be processed. 249 * Items that need to be processed soon are placed in this queue: 250 * 251 * syncer_workitem_pending[syncer_delayno] 252 * 253 * A delay of fifteen seconds is done by placing the request fifteen 254 * entries later in the queue: 255 * 256 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 257 * 258 */ 259 static int syncer_delayno; 260 static long syncer_mask; 261 LIST_HEAD(synclist, bufobj); 262 static struct synclist *syncer_workitem_pending; 263 /* 264 * The sync_mtx protects: 265 * bo->bo_synclist 266 * sync_vnode_count 267 * syncer_delayno 268 * syncer_state 269 * syncer_workitem_pending 270 * syncer_worklist_len 271 * rushjob 272 */ 273 static struct mtx sync_mtx; 274 static struct cv sync_wakeup; 275 276 #define SYNCER_MAXDELAY 32 277 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 278 static int syncdelay = 30; /* max time to delay syncing data */ 279 static int filedelay = 30; /* time to delay syncing files */ 280 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 281 "Time to delay syncing files (in seconds)"); 282 static int dirdelay = 29; /* time to delay syncing directories */ 283 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 284 "Time to delay syncing directories (in seconds)"); 285 static int metadelay = 28; /* time to delay syncing metadata */ 286 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 287 "Time to delay syncing metadata (in seconds)"); 288 static int rushjob; /* number of slots to run ASAP */ 289 static int stat_rush_requests; /* number of times I/O speeded up */ 290 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 291 "Number of times I/O speeded up (rush requests)"); 292 293 #define VDBATCH_SIZE 8 294 struct vdbatch { 295 u_int index; 296 long freevnodes; 297 struct mtx lock; 298 struct vnode *tab[VDBATCH_SIZE]; 299 }; 300 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 301 302 static void vdbatch_dequeue(struct vnode *vp); 303 304 /* 305 * When shutting down the syncer, run it at four times normal speed. 306 */ 307 #define SYNCER_SHUTDOWN_SPEEDUP 4 308 static int sync_vnode_count; 309 static int syncer_worklist_len; 310 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 311 syncer_state; 312 313 /* Target for maximum number of vnodes. */ 314 u_long desiredvnodes; 315 static u_long gapvnodes; /* gap between wanted and desired */ 316 static u_long vhiwat; /* enough extras after expansion */ 317 static u_long vlowat; /* minimal extras before expansion */ 318 static u_long vstir; /* nonzero to stir non-free vnodes */ 319 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 320 321 static u_long vnlru_read_freevnodes(void); 322 323 /* 324 * Note that no attempt is made to sanitize these parameters. 325 */ 326 static int 327 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 328 { 329 u_long val; 330 int error; 331 332 val = desiredvnodes; 333 error = sysctl_handle_long(oidp, &val, 0, req); 334 if (error != 0 || req->newptr == NULL) 335 return (error); 336 337 if (val == desiredvnodes) 338 return (0); 339 mtx_lock(&vnode_list_mtx); 340 desiredvnodes = val; 341 wantfreevnodes = desiredvnodes / 4; 342 vnlru_recalc(); 343 mtx_unlock(&vnode_list_mtx); 344 /* 345 * XXX There is no protection against multiple threads changing 346 * desiredvnodes at the same time. Locking above only helps vnlru and 347 * getnewvnode. 348 */ 349 vfs_hash_changesize(desiredvnodes); 350 cache_changesize(desiredvnodes); 351 return (0); 352 } 353 354 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 355 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 356 "LU", "Target for maximum number of vnodes"); 357 358 static int 359 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 360 { 361 u_long val; 362 int error; 363 364 val = wantfreevnodes; 365 error = sysctl_handle_long(oidp, &val, 0, req); 366 if (error != 0 || req->newptr == NULL) 367 return (error); 368 369 if (val == wantfreevnodes) 370 return (0); 371 mtx_lock(&vnode_list_mtx); 372 wantfreevnodes = val; 373 vnlru_recalc(); 374 mtx_unlock(&vnode_list_mtx); 375 return (0); 376 } 377 378 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 379 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 380 "LU", "Target for minimum number of \"free\" vnodes"); 381 382 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 383 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 384 static int vnlru_nowhere; 385 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 386 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 387 388 static int 389 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 390 { 391 struct vnode *vp; 392 struct nameidata nd; 393 char *buf; 394 unsigned long ndflags; 395 int error; 396 397 if (req->newptr == NULL) 398 return (EINVAL); 399 if (req->newlen >= PATH_MAX) 400 return (E2BIG); 401 402 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 403 error = SYSCTL_IN(req, buf, req->newlen); 404 if (error != 0) 405 goto out; 406 407 buf[req->newlen] = '\0'; 408 409 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1 | SAVENAME; 410 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf, curthread); 411 if ((error = namei(&nd)) != 0) 412 goto out; 413 vp = nd.ni_vp; 414 415 if (VN_IS_DOOMED(vp)) { 416 /* 417 * This vnode is being recycled. Return != 0 to let the caller 418 * know that the sysctl had no effect. Return EAGAIN because a 419 * subsequent call will likely succeed (since namei will create 420 * a new vnode if necessary) 421 */ 422 error = EAGAIN; 423 goto putvnode; 424 } 425 426 counter_u64_add(recycles_count, 1); 427 vgone(vp); 428 putvnode: 429 NDFREE(&nd, 0); 430 out: 431 free(buf, M_TEMP); 432 return (error); 433 } 434 435 static int 436 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 437 { 438 struct thread *td = curthread; 439 struct vnode *vp; 440 struct file *fp; 441 int error; 442 int fd; 443 444 if (req->newptr == NULL) 445 return (EBADF); 446 447 error = sysctl_handle_int(oidp, &fd, 0, req); 448 if (error != 0) 449 return (error); 450 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 451 if (error != 0) 452 return (error); 453 vp = fp->f_vnode; 454 455 error = vn_lock(vp, LK_EXCLUSIVE); 456 if (error != 0) 457 goto drop; 458 459 counter_u64_add(recycles_count, 1); 460 vgone(vp); 461 VOP_UNLOCK(vp); 462 drop: 463 fdrop(fp, td); 464 return (error); 465 } 466 467 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 468 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 469 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 470 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 471 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 472 sysctl_ftry_reclaim_vnode, "I", 473 "Try to reclaim a vnode by its file descriptor"); 474 475 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 476 static int vnsz2log; 477 478 /* 479 * Support for the bufobj clean & dirty pctrie. 480 */ 481 static void * 482 buf_trie_alloc(struct pctrie *ptree) 483 { 484 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 485 } 486 487 static void 488 buf_trie_free(struct pctrie *ptree, void *node) 489 { 490 uma_zfree_smr(buf_trie_zone, node); 491 } 492 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 493 buf_trie_smr); 494 495 /* 496 * Initialize the vnode management data structures. 497 * 498 * Reevaluate the following cap on the number of vnodes after the physical 499 * memory size exceeds 512GB. In the limit, as the physical memory size 500 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 501 */ 502 #ifndef MAXVNODES_MAX 503 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 504 #endif 505 506 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 507 508 static struct vnode * 509 vn_alloc_marker(struct mount *mp) 510 { 511 struct vnode *vp; 512 513 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 514 vp->v_type = VMARKER; 515 vp->v_mount = mp; 516 517 return (vp); 518 } 519 520 static void 521 vn_free_marker(struct vnode *vp) 522 { 523 524 MPASS(vp->v_type == VMARKER); 525 free(vp, M_VNODE_MARKER); 526 } 527 528 /* 529 * Initialize a vnode as it first enters the zone. 530 */ 531 static int 532 vnode_init(void *mem, int size, int flags) 533 { 534 struct vnode *vp; 535 536 vp = mem; 537 bzero(vp, size); 538 /* 539 * Setup locks. 540 */ 541 vp->v_vnlock = &vp->v_lock; 542 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 543 /* 544 * By default, don't allow shared locks unless filesystems opt-in. 545 */ 546 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 547 LK_NOSHARE | LK_IS_VNODE); 548 /* 549 * Initialize bufobj. 550 */ 551 bufobj_init(&vp->v_bufobj, vp); 552 /* 553 * Initialize namecache. 554 */ 555 cache_vnode_init(vp); 556 /* 557 * Initialize rangelocks. 558 */ 559 rangelock_init(&vp->v_rl); 560 561 vp->v_dbatchcpu = NOCPU; 562 563 mtx_lock(&vnode_list_mtx); 564 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 565 mtx_unlock(&vnode_list_mtx); 566 return (0); 567 } 568 569 /* 570 * Free a vnode when it is cleared from the zone. 571 */ 572 static void 573 vnode_fini(void *mem, int size) 574 { 575 struct vnode *vp; 576 struct bufobj *bo; 577 578 vp = mem; 579 vdbatch_dequeue(vp); 580 mtx_lock(&vnode_list_mtx); 581 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 582 mtx_unlock(&vnode_list_mtx); 583 rangelock_destroy(&vp->v_rl); 584 lockdestroy(vp->v_vnlock); 585 mtx_destroy(&vp->v_interlock); 586 bo = &vp->v_bufobj; 587 rw_destroy(BO_LOCKPTR(bo)); 588 } 589 590 /* 591 * Provide the size of NFS nclnode and NFS fh for calculation of the 592 * vnode memory consumption. The size is specified directly to 593 * eliminate dependency on NFS-private header. 594 * 595 * Other filesystems may use bigger or smaller (like UFS and ZFS) 596 * private inode data, but the NFS-based estimation is ample enough. 597 * Still, we care about differences in the size between 64- and 32-bit 598 * platforms. 599 * 600 * Namecache structure size is heuristically 601 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 602 */ 603 #ifdef _LP64 604 #define NFS_NCLNODE_SZ (528 + 64) 605 #define NC_SZ 148 606 #else 607 #define NFS_NCLNODE_SZ (360 + 32) 608 #define NC_SZ 92 609 #endif 610 611 static void 612 vntblinit(void *dummy __unused) 613 { 614 struct vdbatch *vd; 615 int cpu, physvnodes, virtvnodes; 616 u_int i; 617 618 /* 619 * Desiredvnodes is a function of the physical memory size and the 620 * kernel's heap size. Generally speaking, it scales with the 621 * physical memory size. The ratio of desiredvnodes to the physical 622 * memory size is 1:16 until desiredvnodes exceeds 98,304. 623 * Thereafter, the 624 * marginal ratio of desiredvnodes to the physical memory size is 625 * 1:64. However, desiredvnodes is limited by the kernel's heap 626 * size. The memory required by desiredvnodes vnodes and vm objects 627 * must not exceed 1/10th of the kernel's heap size. 628 */ 629 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 630 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 631 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 632 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 633 desiredvnodes = min(physvnodes, virtvnodes); 634 if (desiredvnodes > MAXVNODES_MAX) { 635 if (bootverbose) 636 printf("Reducing kern.maxvnodes %lu -> %lu\n", 637 desiredvnodes, MAXVNODES_MAX); 638 desiredvnodes = MAXVNODES_MAX; 639 } 640 wantfreevnodes = desiredvnodes / 4; 641 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 642 TAILQ_INIT(&vnode_list); 643 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 644 /* 645 * The lock is taken to appease WITNESS. 646 */ 647 mtx_lock(&vnode_list_mtx); 648 vnlru_recalc(); 649 mtx_unlock(&vnode_list_mtx); 650 vnode_list_free_marker = vn_alloc_marker(NULL); 651 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 652 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 653 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 654 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 655 vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); 656 uma_zone_set_smr(vnode_zone, vfs_smr); 657 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 658 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 659 /* 660 * Preallocate enough nodes to support one-per buf so that 661 * we can not fail an insert. reassignbuf() callers can not 662 * tolerate the insertion failure. 663 */ 664 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 665 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 666 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 667 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 668 uma_prealloc(buf_trie_zone, nbuf); 669 670 vnodes_created = counter_u64_alloc(M_WAITOK); 671 recycles_count = counter_u64_alloc(M_WAITOK); 672 recycles_free_count = counter_u64_alloc(M_WAITOK); 673 deferred_inact = counter_u64_alloc(M_WAITOK); 674 675 /* 676 * Initialize the filesystem syncer. 677 */ 678 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 679 &syncer_mask); 680 syncer_maxdelay = syncer_mask + 1; 681 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 682 cv_init(&sync_wakeup, "syncer"); 683 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 684 vnsz2log++; 685 vnsz2log--; 686 687 CPU_FOREACH(cpu) { 688 vd = DPCPU_ID_PTR((cpu), vd); 689 bzero(vd, sizeof(*vd)); 690 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 691 } 692 } 693 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 694 695 /* 696 * Mark a mount point as busy. Used to synchronize access and to delay 697 * unmounting. Eventually, mountlist_mtx is not released on failure. 698 * 699 * vfs_busy() is a custom lock, it can block the caller. 700 * vfs_busy() only sleeps if the unmount is active on the mount point. 701 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 702 * vnode belonging to mp. 703 * 704 * Lookup uses vfs_busy() to traverse mount points. 705 * root fs var fs 706 * / vnode lock A / vnode lock (/var) D 707 * /var vnode lock B /log vnode lock(/var/log) E 708 * vfs_busy lock C vfs_busy lock F 709 * 710 * Within each file system, the lock order is C->A->B and F->D->E. 711 * 712 * When traversing across mounts, the system follows that lock order: 713 * 714 * C->A->B 715 * | 716 * +->F->D->E 717 * 718 * The lookup() process for namei("/var") illustrates the process: 719 * VOP_LOOKUP() obtains B while A is held 720 * vfs_busy() obtains a shared lock on F while A and B are held 721 * vput() releases lock on B 722 * vput() releases lock on A 723 * VFS_ROOT() obtains lock on D while shared lock on F is held 724 * vfs_unbusy() releases shared lock on F 725 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 726 * Attempt to lock A (instead of vp_crossmp) while D is held would 727 * violate the global order, causing deadlocks. 728 * 729 * dounmount() locks B while F is drained. 730 */ 731 int 732 vfs_busy(struct mount *mp, int flags) 733 { 734 735 MPASS((flags & ~MBF_MASK) == 0); 736 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 737 738 if (vfs_op_thread_enter(mp)) { 739 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 740 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 741 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 742 vfs_mp_count_add_pcpu(mp, ref, 1); 743 vfs_mp_count_add_pcpu(mp, lockref, 1); 744 vfs_op_thread_exit(mp); 745 if (flags & MBF_MNTLSTLOCK) 746 mtx_unlock(&mountlist_mtx); 747 return (0); 748 } 749 750 MNT_ILOCK(mp); 751 vfs_assert_mount_counters(mp); 752 MNT_REF(mp); 753 /* 754 * If mount point is currently being unmounted, sleep until the 755 * mount point fate is decided. If thread doing the unmounting fails, 756 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 757 * that this mount point has survived the unmount attempt and vfs_busy 758 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 759 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 760 * about to be really destroyed. vfs_busy needs to release its 761 * reference on the mount point in this case and return with ENOENT, 762 * telling the caller that mount mount it tried to busy is no longer 763 * valid. 764 */ 765 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 766 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 767 MNT_REL(mp); 768 MNT_IUNLOCK(mp); 769 CTR1(KTR_VFS, "%s: failed busying before sleeping", 770 __func__); 771 return (ENOENT); 772 } 773 if (flags & MBF_MNTLSTLOCK) 774 mtx_unlock(&mountlist_mtx); 775 mp->mnt_kern_flag |= MNTK_MWAIT; 776 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 777 if (flags & MBF_MNTLSTLOCK) 778 mtx_lock(&mountlist_mtx); 779 MNT_ILOCK(mp); 780 } 781 if (flags & MBF_MNTLSTLOCK) 782 mtx_unlock(&mountlist_mtx); 783 mp->mnt_lockref++; 784 MNT_IUNLOCK(mp); 785 return (0); 786 } 787 788 /* 789 * Free a busy filesystem. 790 */ 791 void 792 vfs_unbusy(struct mount *mp) 793 { 794 int c; 795 796 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 797 798 if (vfs_op_thread_enter(mp)) { 799 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 800 vfs_mp_count_sub_pcpu(mp, lockref, 1); 801 vfs_mp_count_sub_pcpu(mp, ref, 1); 802 vfs_op_thread_exit(mp); 803 return; 804 } 805 806 MNT_ILOCK(mp); 807 vfs_assert_mount_counters(mp); 808 MNT_REL(mp); 809 c = --mp->mnt_lockref; 810 if (mp->mnt_vfs_ops == 0) { 811 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 812 MNT_IUNLOCK(mp); 813 return; 814 } 815 if (c < 0) 816 vfs_dump_mount_counters(mp); 817 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 818 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 819 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 820 mp->mnt_kern_flag &= ~MNTK_DRAINING; 821 wakeup(&mp->mnt_lockref); 822 } 823 MNT_IUNLOCK(mp); 824 } 825 826 /* 827 * Lookup a mount point by filesystem identifier. 828 */ 829 struct mount * 830 vfs_getvfs(fsid_t *fsid) 831 { 832 struct mount *mp; 833 834 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 835 mtx_lock(&mountlist_mtx); 836 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 837 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 838 vfs_ref(mp); 839 mtx_unlock(&mountlist_mtx); 840 return (mp); 841 } 842 } 843 mtx_unlock(&mountlist_mtx); 844 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 845 return ((struct mount *) 0); 846 } 847 848 /* 849 * Lookup a mount point by filesystem identifier, busying it before 850 * returning. 851 * 852 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 853 * cache for popular filesystem identifiers. The cache is lockess, using 854 * the fact that struct mount's are never freed. In worst case we may 855 * get pointer to unmounted or even different filesystem, so we have to 856 * check what we got, and go slow way if so. 857 */ 858 struct mount * 859 vfs_busyfs(fsid_t *fsid) 860 { 861 #define FSID_CACHE_SIZE 256 862 typedef struct mount * volatile vmp_t; 863 static vmp_t cache[FSID_CACHE_SIZE]; 864 struct mount *mp; 865 int error; 866 uint32_t hash; 867 868 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 869 hash = fsid->val[0] ^ fsid->val[1]; 870 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 871 mp = cache[hash]; 872 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 873 goto slow; 874 if (vfs_busy(mp, 0) != 0) { 875 cache[hash] = NULL; 876 goto slow; 877 } 878 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 879 return (mp); 880 else 881 vfs_unbusy(mp); 882 883 slow: 884 mtx_lock(&mountlist_mtx); 885 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 886 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 887 error = vfs_busy(mp, MBF_MNTLSTLOCK); 888 if (error) { 889 cache[hash] = NULL; 890 mtx_unlock(&mountlist_mtx); 891 return (NULL); 892 } 893 cache[hash] = mp; 894 return (mp); 895 } 896 } 897 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 898 mtx_unlock(&mountlist_mtx); 899 return ((struct mount *) 0); 900 } 901 902 /* 903 * Check if a user can access privileged mount options. 904 */ 905 int 906 vfs_suser(struct mount *mp, struct thread *td) 907 { 908 int error; 909 910 if (jailed(td->td_ucred)) { 911 /* 912 * If the jail of the calling thread lacks permission for 913 * this type of file system, deny immediately. 914 */ 915 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 916 return (EPERM); 917 918 /* 919 * If the file system was mounted outside the jail of the 920 * calling thread, deny immediately. 921 */ 922 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 923 return (EPERM); 924 } 925 926 /* 927 * If file system supports delegated administration, we don't check 928 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 929 * by the file system itself. 930 * If this is not the user that did original mount, we check for 931 * the PRIV_VFS_MOUNT_OWNER privilege. 932 */ 933 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 934 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 935 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 936 return (error); 937 } 938 return (0); 939 } 940 941 /* 942 * Get a new unique fsid. Try to make its val[0] unique, since this value 943 * will be used to create fake device numbers for stat(). Also try (but 944 * not so hard) make its val[0] unique mod 2^16, since some emulators only 945 * support 16-bit device numbers. We end up with unique val[0]'s for the 946 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 947 * 948 * Keep in mind that several mounts may be running in parallel. Starting 949 * the search one past where the previous search terminated is both a 950 * micro-optimization and a defense against returning the same fsid to 951 * different mounts. 952 */ 953 void 954 vfs_getnewfsid(struct mount *mp) 955 { 956 static uint16_t mntid_base; 957 struct mount *nmp; 958 fsid_t tfsid; 959 int mtype; 960 961 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 962 mtx_lock(&mntid_mtx); 963 mtype = mp->mnt_vfc->vfc_typenum; 964 tfsid.val[1] = mtype; 965 mtype = (mtype & 0xFF) << 24; 966 for (;;) { 967 tfsid.val[0] = makedev(255, 968 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 969 mntid_base++; 970 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 971 break; 972 vfs_rel(nmp); 973 } 974 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 975 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 976 mtx_unlock(&mntid_mtx); 977 } 978 979 /* 980 * Knob to control the precision of file timestamps: 981 * 982 * 0 = seconds only; nanoseconds zeroed. 983 * 1 = seconds and nanoseconds, accurate within 1/HZ. 984 * 2 = seconds and nanoseconds, truncated to microseconds. 985 * >=3 = seconds and nanoseconds, maximum precision. 986 */ 987 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 988 989 static int timestamp_precision = TSP_USEC; 990 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 991 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 992 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 993 "3+: sec + ns (max. precision))"); 994 995 /* 996 * Get a current timestamp. 997 */ 998 void 999 vfs_timestamp(struct timespec *tsp) 1000 { 1001 struct timeval tv; 1002 1003 switch (timestamp_precision) { 1004 case TSP_SEC: 1005 tsp->tv_sec = time_second; 1006 tsp->tv_nsec = 0; 1007 break; 1008 case TSP_HZ: 1009 getnanotime(tsp); 1010 break; 1011 case TSP_USEC: 1012 microtime(&tv); 1013 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1014 break; 1015 case TSP_NSEC: 1016 default: 1017 nanotime(tsp); 1018 break; 1019 } 1020 } 1021 1022 /* 1023 * Set vnode attributes to VNOVAL 1024 */ 1025 void 1026 vattr_null(struct vattr *vap) 1027 { 1028 1029 vap->va_type = VNON; 1030 vap->va_size = VNOVAL; 1031 vap->va_bytes = VNOVAL; 1032 vap->va_mode = VNOVAL; 1033 vap->va_nlink = VNOVAL; 1034 vap->va_uid = VNOVAL; 1035 vap->va_gid = VNOVAL; 1036 vap->va_fsid = VNOVAL; 1037 vap->va_fileid = VNOVAL; 1038 vap->va_blocksize = VNOVAL; 1039 vap->va_rdev = VNOVAL; 1040 vap->va_atime.tv_sec = VNOVAL; 1041 vap->va_atime.tv_nsec = VNOVAL; 1042 vap->va_mtime.tv_sec = VNOVAL; 1043 vap->va_mtime.tv_nsec = VNOVAL; 1044 vap->va_ctime.tv_sec = VNOVAL; 1045 vap->va_ctime.tv_nsec = VNOVAL; 1046 vap->va_birthtime.tv_sec = VNOVAL; 1047 vap->va_birthtime.tv_nsec = VNOVAL; 1048 vap->va_flags = VNOVAL; 1049 vap->va_gen = VNOVAL; 1050 vap->va_vaflags = 0; 1051 } 1052 1053 /* 1054 * Try to reduce the total number of vnodes. 1055 * 1056 * This routine (and its user) are buggy in at least the following ways: 1057 * - all parameters were picked years ago when RAM sizes were significantly 1058 * smaller 1059 * - it can pick vnodes based on pages used by the vm object, but filesystems 1060 * like ZFS don't use it making the pick broken 1061 * - since ZFS has its own aging policy it gets partially combated by this one 1062 * - a dedicated method should be provided for filesystems to let them decide 1063 * whether the vnode should be recycled 1064 * 1065 * This routine is called when we have too many vnodes. It attempts 1066 * to free <count> vnodes and will potentially free vnodes that still 1067 * have VM backing store (VM backing store is typically the cause 1068 * of a vnode blowout so we want to do this). Therefore, this operation 1069 * is not considered cheap. 1070 * 1071 * A number of conditions may prevent a vnode from being reclaimed. 1072 * the buffer cache may have references on the vnode, a directory 1073 * vnode may still have references due to the namei cache representing 1074 * underlying files, or the vnode may be in active use. It is not 1075 * desirable to reuse such vnodes. These conditions may cause the 1076 * number of vnodes to reach some minimum value regardless of what 1077 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1078 * 1079 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1080 * entries if this argument is strue 1081 * @param trigger Only reclaim vnodes with fewer than this many resident 1082 * pages. 1083 * @param target How many vnodes to reclaim. 1084 * @return The number of vnodes that were reclaimed. 1085 */ 1086 static int 1087 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1088 { 1089 struct vnode *vp, *mvp; 1090 struct mount *mp; 1091 struct vm_object *object; 1092 u_long done; 1093 bool retried; 1094 1095 mtx_assert(&vnode_list_mtx, MA_OWNED); 1096 1097 retried = false; 1098 done = 0; 1099 1100 mvp = vnode_list_reclaim_marker; 1101 restart: 1102 vp = mvp; 1103 while (done < target) { 1104 vp = TAILQ_NEXT(vp, v_vnodelist); 1105 if (__predict_false(vp == NULL)) 1106 break; 1107 1108 if (__predict_false(vp->v_type == VMARKER)) 1109 continue; 1110 1111 /* 1112 * If it's been deconstructed already, it's still 1113 * referenced, or it exceeds the trigger, skip it. 1114 * Also skip free vnodes. We are trying to make space 1115 * to expand the free list, not reduce it. 1116 */ 1117 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1118 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1119 goto next_iter; 1120 1121 if (vp->v_type == VBAD || vp->v_type == VNON) 1122 goto next_iter; 1123 1124 if (!VI_TRYLOCK(vp)) 1125 goto next_iter; 1126 1127 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1128 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1129 VN_IS_DOOMED(vp) || vp->v_type == VNON) { 1130 VI_UNLOCK(vp); 1131 goto next_iter; 1132 } 1133 1134 object = atomic_load_ptr(&vp->v_object); 1135 if (object == NULL || object->resident_page_count > trigger) { 1136 VI_UNLOCK(vp); 1137 goto next_iter; 1138 } 1139 1140 vholdl(vp); 1141 VI_UNLOCK(vp); 1142 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1143 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1144 mtx_unlock(&vnode_list_mtx); 1145 1146 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1147 vdrop(vp); 1148 goto next_iter_unlocked; 1149 } 1150 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1151 vdrop(vp); 1152 vn_finished_write(mp); 1153 goto next_iter_unlocked; 1154 } 1155 1156 VI_LOCK(vp); 1157 if (vp->v_usecount > 0 || 1158 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1159 (vp->v_object != NULL && 1160 vp->v_object->resident_page_count > trigger)) { 1161 VOP_UNLOCK(vp); 1162 vdropl(vp); 1163 vn_finished_write(mp); 1164 goto next_iter_unlocked; 1165 } 1166 counter_u64_add(recycles_count, 1); 1167 vgonel(vp); 1168 VOP_UNLOCK(vp); 1169 vdropl(vp); 1170 vn_finished_write(mp); 1171 done++; 1172 next_iter_unlocked: 1173 if (should_yield()) 1174 kern_yield(PRI_USER); 1175 mtx_lock(&vnode_list_mtx); 1176 goto restart; 1177 next_iter: 1178 MPASS(vp->v_type != VMARKER); 1179 if (!should_yield()) 1180 continue; 1181 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1182 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1183 mtx_unlock(&vnode_list_mtx); 1184 kern_yield(PRI_USER); 1185 mtx_lock(&vnode_list_mtx); 1186 goto restart; 1187 } 1188 if (done == 0 && !retried) { 1189 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1190 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1191 retried = true; 1192 goto restart; 1193 } 1194 return (done); 1195 } 1196 1197 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1198 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1199 0, 1200 "limit on vnode free requests per call to the vnlru_free routine"); 1201 1202 /* 1203 * Attempt to reduce the free list by the requested amount. 1204 */ 1205 static int 1206 vnlru_free_locked(int count, struct vfsops *mnt_op) 1207 { 1208 struct vnode *vp, *mvp; 1209 struct mount *mp; 1210 int ocount; 1211 1212 mtx_assert(&vnode_list_mtx, MA_OWNED); 1213 if (count > max_vnlru_free) 1214 count = max_vnlru_free; 1215 ocount = count; 1216 mvp = vnode_list_free_marker; 1217 restart: 1218 vp = mvp; 1219 while (count > 0) { 1220 vp = TAILQ_NEXT(vp, v_vnodelist); 1221 if (__predict_false(vp == NULL)) { 1222 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1223 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1224 break; 1225 } 1226 if (__predict_false(vp->v_type == VMARKER)) 1227 continue; 1228 1229 /* 1230 * Don't recycle if our vnode is from different type 1231 * of mount point. Note that mp is type-safe, the 1232 * check does not reach unmapped address even if 1233 * vnode is reclaimed. 1234 * Don't recycle if we can't get the interlock without 1235 * blocking. 1236 */ 1237 if (vp->v_holdcnt > 0 || (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1238 mp->mnt_op != mnt_op) || !VI_TRYLOCK(vp)) { 1239 continue; 1240 } 1241 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1242 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1243 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1244 VI_UNLOCK(vp); 1245 continue; 1246 } 1247 vholdl(vp); 1248 count--; 1249 mtx_unlock(&vnode_list_mtx); 1250 VI_UNLOCK(vp); 1251 vtryrecycle(vp); 1252 vdrop(vp); 1253 mtx_lock(&vnode_list_mtx); 1254 goto restart; 1255 } 1256 return (ocount - count); 1257 } 1258 1259 void 1260 vnlru_free(int count, struct vfsops *mnt_op) 1261 { 1262 1263 mtx_lock(&vnode_list_mtx); 1264 vnlru_free_locked(count, mnt_op); 1265 mtx_unlock(&vnode_list_mtx); 1266 } 1267 1268 static void 1269 vnlru_recalc(void) 1270 { 1271 1272 mtx_assert(&vnode_list_mtx, MA_OWNED); 1273 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1274 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1275 vlowat = vhiwat / 2; 1276 } 1277 1278 /* 1279 * Attempt to recycle vnodes in a context that is always safe to block. 1280 * Calling vlrurecycle() from the bowels of filesystem code has some 1281 * interesting deadlock problems. 1282 */ 1283 static struct proc *vnlruproc; 1284 static int vnlruproc_sig; 1285 1286 /* 1287 * The main freevnodes counter is only updated when threads requeue their vnode 1288 * batches. CPUs are conditionally walked to compute a more accurate total. 1289 * 1290 * Limit how much of a slop are we willing to tolerate. Note: the actual value 1291 * at any given moment can still exceed slop, but it should not be by significant 1292 * margin in practice. 1293 */ 1294 #define VNLRU_FREEVNODES_SLOP 128 1295 1296 static __inline void 1297 vn_freevnodes_inc(void) 1298 { 1299 struct vdbatch *vd; 1300 1301 critical_enter(); 1302 vd = DPCPU_PTR(vd); 1303 vd->freevnodes++; 1304 critical_exit(); 1305 } 1306 1307 static __inline void 1308 vn_freevnodes_dec(void) 1309 { 1310 struct vdbatch *vd; 1311 1312 critical_enter(); 1313 vd = DPCPU_PTR(vd); 1314 vd->freevnodes--; 1315 critical_exit(); 1316 } 1317 1318 static u_long 1319 vnlru_read_freevnodes(void) 1320 { 1321 struct vdbatch *vd; 1322 long slop; 1323 int cpu; 1324 1325 mtx_assert(&vnode_list_mtx, MA_OWNED); 1326 if (freevnodes > freevnodes_old) 1327 slop = freevnodes - freevnodes_old; 1328 else 1329 slop = freevnodes_old - freevnodes; 1330 if (slop < VNLRU_FREEVNODES_SLOP) 1331 return (freevnodes >= 0 ? freevnodes : 0); 1332 freevnodes_old = freevnodes; 1333 CPU_FOREACH(cpu) { 1334 vd = DPCPU_ID_PTR((cpu), vd); 1335 freevnodes_old += vd->freevnodes; 1336 } 1337 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1338 } 1339 1340 static bool 1341 vnlru_under(u_long rnumvnodes, u_long limit) 1342 { 1343 u_long rfreevnodes, space; 1344 1345 if (__predict_false(rnumvnodes > desiredvnodes)) 1346 return (true); 1347 1348 space = desiredvnodes - rnumvnodes; 1349 if (space < limit) { 1350 rfreevnodes = vnlru_read_freevnodes(); 1351 if (rfreevnodes > wantfreevnodes) 1352 space += rfreevnodes - wantfreevnodes; 1353 } 1354 return (space < limit); 1355 } 1356 1357 static bool 1358 vnlru_under_unlocked(u_long rnumvnodes, u_long limit) 1359 { 1360 long rfreevnodes, space; 1361 1362 if (__predict_false(rnumvnodes > desiredvnodes)) 1363 return (true); 1364 1365 space = desiredvnodes - rnumvnodes; 1366 if (space < limit) { 1367 rfreevnodes = atomic_load_long(&freevnodes); 1368 if (rfreevnodes > wantfreevnodes) 1369 space += rfreevnodes - wantfreevnodes; 1370 } 1371 return (space < limit); 1372 } 1373 1374 static void 1375 vnlru_kick(void) 1376 { 1377 1378 mtx_assert(&vnode_list_mtx, MA_OWNED); 1379 if (vnlruproc_sig == 0) { 1380 vnlruproc_sig = 1; 1381 wakeup(vnlruproc); 1382 } 1383 } 1384 1385 static void 1386 vnlru_proc(void) 1387 { 1388 u_long rnumvnodes, rfreevnodes, target; 1389 unsigned long onumvnodes; 1390 int done, force, trigger, usevnodes; 1391 bool reclaim_nc_src, want_reread; 1392 1393 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1394 SHUTDOWN_PRI_FIRST); 1395 1396 force = 0; 1397 want_reread = false; 1398 for (;;) { 1399 kproc_suspend_check(vnlruproc); 1400 mtx_lock(&vnode_list_mtx); 1401 rnumvnodes = atomic_load_long(&numvnodes); 1402 1403 if (want_reread) { 1404 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1405 want_reread = false; 1406 } 1407 1408 /* 1409 * If numvnodes is too large (due to desiredvnodes being 1410 * adjusted using its sysctl, or emergency growth), first 1411 * try to reduce it by discarding from the free list. 1412 */ 1413 if (rnumvnodes > desiredvnodes) { 1414 vnlru_free_locked(rnumvnodes - desiredvnodes, NULL); 1415 rnumvnodes = atomic_load_long(&numvnodes); 1416 } 1417 /* 1418 * Sleep if the vnode cache is in a good state. This is 1419 * when it is not over-full and has space for about a 4% 1420 * or 9% expansion (by growing its size or inexcessively 1421 * reducing its free list). Otherwise, try to reclaim 1422 * space for a 10% expansion. 1423 */ 1424 if (vstir && force == 0) { 1425 force = 1; 1426 vstir = 0; 1427 } 1428 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1429 vnlruproc_sig = 0; 1430 wakeup(&vnlruproc_sig); 1431 msleep(vnlruproc, &vnode_list_mtx, 1432 PVFS|PDROP, "vlruwt", hz); 1433 continue; 1434 } 1435 rfreevnodes = vnlru_read_freevnodes(); 1436 1437 onumvnodes = rnumvnodes; 1438 /* 1439 * Calculate parameters for recycling. These are the same 1440 * throughout the loop to give some semblance of fairness. 1441 * The trigger point is to avoid recycling vnodes with lots 1442 * of resident pages. We aren't trying to free memory; we 1443 * are trying to recycle or at least free vnodes. 1444 */ 1445 if (rnumvnodes <= desiredvnodes) 1446 usevnodes = rnumvnodes - rfreevnodes; 1447 else 1448 usevnodes = rnumvnodes; 1449 if (usevnodes <= 0) 1450 usevnodes = 1; 1451 /* 1452 * The trigger value is is chosen to give a conservatively 1453 * large value to ensure that it alone doesn't prevent 1454 * making progress. The value can easily be so large that 1455 * it is effectively infinite in some congested and 1456 * misconfigured cases, and this is necessary. Normally 1457 * it is about 8 to 100 (pages), which is quite large. 1458 */ 1459 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1460 if (force < 2) 1461 trigger = vsmalltrigger; 1462 reclaim_nc_src = force >= 3; 1463 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1464 target = target / 10 + 1; 1465 done = vlrureclaim(reclaim_nc_src, trigger, target); 1466 mtx_unlock(&vnode_list_mtx); 1467 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1468 uma_reclaim(UMA_RECLAIM_DRAIN); 1469 if (done == 0) { 1470 if (force == 0 || force == 1) { 1471 force = 2; 1472 continue; 1473 } 1474 if (force == 2) { 1475 force = 3; 1476 continue; 1477 } 1478 want_reread = true; 1479 force = 0; 1480 vnlru_nowhere++; 1481 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1482 } else { 1483 want_reread = true; 1484 kern_yield(PRI_USER); 1485 } 1486 } 1487 } 1488 1489 static struct kproc_desc vnlru_kp = { 1490 "vnlru", 1491 vnlru_proc, 1492 &vnlruproc 1493 }; 1494 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1495 &vnlru_kp); 1496 1497 /* 1498 * Routines having to do with the management of the vnode table. 1499 */ 1500 1501 /* 1502 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1503 * before we actually vgone(). This function must be called with the vnode 1504 * held to prevent the vnode from being returned to the free list midway 1505 * through vgone(). 1506 */ 1507 static int 1508 vtryrecycle(struct vnode *vp) 1509 { 1510 struct mount *vnmp; 1511 1512 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1513 VNASSERT(vp->v_holdcnt, vp, 1514 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1515 /* 1516 * This vnode may found and locked via some other list, if so we 1517 * can't recycle it yet. 1518 */ 1519 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1520 CTR2(KTR_VFS, 1521 "%s: impossible to recycle, vp %p lock is already held", 1522 __func__, vp); 1523 return (EWOULDBLOCK); 1524 } 1525 /* 1526 * Don't recycle if its filesystem is being suspended. 1527 */ 1528 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1529 VOP_UNLOCK(vp); 1530 CTR2(KTR_VFS, 1531 "%s: impossible to recycle, cannot start the write for %p", 1532 __func__, vp); 1533 return (EBUSY); 1534 } 1535 /* 1536 * If we got this far, we need to acquire the interlock and see if 1537 * anyone picked up this vnode from another list. If not, we will 1538 * mark it with DOOMED via vgonel() so that anyone who does find it 1539 * will skip over it. 1540 */ 1541 VI_LOCK(vp); 1542 if (vp->v_usecount) { 1543 VOP_UNLOCK(vp); 1544 VI_UNLOCK(vp); 1545 vn_finished_write(vnmp); 1546 CTR2(KTR_VFS, 1547 "%s: impossible to recycle, %p is already referenced", 1548 __func__, vp); 1549 return (EBUSY); 1550 } 1551 if (!VN_IS_DOOMED(vp)) { 1552 counter_u64_add(recycles_free_count, 1); 1553 vgonel(vp); 1554 } 1555 VOP_UNLOCK(vp); 1556 VI_UNLOCK(vp); 1557 vn_finished_write(vnmp); 1558 return (0); 1559 } 1560 1561 /* 1562 * Allocate a new vnode. 1563 * 1564 * The operation never returns an error. Returning an error was disabled 1565 * in r145385 (dated 2005) with the following comment: 1566 * 1567 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1568 * 1569 * Given the age of this commit (almost 15 years at the time of writing this 1570 * comment) restoring the ability to fail requires a significant audit of 1571 * all codepaths. 1572 * 1573 * The routine can try to free a vnode or stall for up to 1 second waiting for 1574 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1575 */ 1576 static u_long vn_alloc_cyclecount; 1577 1578 static struct vnode * __noinline 1579 vn_alloc_hard(struct mount *mp) 1580 { 1581 u_long rnumvnodes, rfreevnodes; 1582 1583 mtx_lock(&vnode_list_mtx); 1584 rnumvnodes = atomic_load_long(&numvnodes); 1585 if (rnumvnodes + 1 < desiredvnodes) { 1586 vn_alloc_cyclecount = 0; 1587 goto alloc; 1588 } 1589 rfreevnodes = vnlru_read_freevnodes(); 1590 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1591 vn_alloc_cyclecount = 0; 1592 vstir = 1; 1593 } 1594 /* 1595 * Grow the vnode cache if it will not be above its target max 1596 * after growing. Otherwise, if the free list is nonempty, try 1597 * to reclaim 1 item from it before growing the cache (possibly 1598 * above its target max if the reclamation failed or is delayed). 1599 * Otherwise, wait for some space. In all cases, schedule 1600 * vnlru_proc() if we are getting short of space. The watermarks 1601 * should be chosen so that we never wait or even reclaim from 1602 * the free list to below its target minimum. 1603 */ 1604 if (vnlru_free_locked(1, NULL) > 0) 1605 goto alloc; 1606 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1607 /* 1608 * Wait for space for a new vnode. 1609 */ 1610 vnlru_kick(); 1611 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 1612 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 1613 vnlru_read_freevnodes() > 1) 1614 vnlru_free_locked(1, NULL); 1615 } 1616 alloc: 1617 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1618 if (vnlru_under(rnumvnodes, vlowat)) 1619 vnlru_kick(); 1620 mtx_unlock(&vnode_list_mtx); 1621 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1622 } 1623 1624 static struct vnode * 1625 vn_alloc(struct mount *mp) 1626 { 1627 u_long rnumvnodes; 1628 1629 if (__predict_false(vn_alloc_cyclecount != 0)) 1630 return (vn_alloc_hard(mp)); 1631 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1632 if (__predict_false(vnlru_under_unlocked(rnumvnodes, vlowat))) { 1633 atomic_subtract_long(&numvnodes, 1); 1634 return (vn_alloc_hard(mp)); 1635 } 1636 1637 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1638 } 1639 1640 static void 1641 vn_free(struct vnode *vp) 1642 { 1643 1644 atomic_subtract_long(&numvnodes, 1); 1645 uma_zfree_smr(vnode_zone, vp); 1646 } 1647 1648 /* 1649 * Return the next vnode from the free list. 1650 */ 1651 int 1652 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1653 struct vnode **vpp) 1654 { 1655 struct vnode *vp; 1656 struct thread *td; 1657 struct lock_object *lo; 1658 1659 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1660 1661 KASSERT(vops->registered, 1662 ("%s: not registered vector op %p\n", __func__, vops)); 1663 1664 td = curthread; 1665 if (td->td_vp_reserved != NULL) { 1666 vp = td->td_vp_reserved; 1667 td->td_vp_reserved = NULL; 1668 } else { 1669 vp = vn_alloc(mp); 1670 } 1671 counter_u64_add(vnodes_created, 1); 1672 /* 1673 * Locks are given the generic name "vnode" when created. 1674 * Follow the historic practice of using the filesystem 1675 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1676 * 1677 * Locks live in a witness group keyed on their name. Thus, 1678 * when a lock is renamed, it must also move from the witness 1679 * group of its old name to the witness group of its new name. 1680 * 1681 * The change only needs to be made when the vnode moves 1682 * from one filesystem type to another. We ensure that each 1683 * filesystem use a single static name pointer for its tag so 1684 * that we can compare pointers rather than doing a strcmp(). 1685 */ 1686 lo = &vp->v_vnlock->lock_object; 1687 #ifdef WITNESS 1688 if (lo->lo_name != tag) { 1689 #endif 1690 lo->lo_name = tag; 1691 #ifdef WITNESS 1692 WITNESS_DESTROY(lo); 1693 WITNESS_INIT(lo, tag); 1694 } 1695 #endif 1696 /* 1697 * By default, don't allow shared locks unless filesystems opt-in. 1698 */ 1699 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1700 /* 1701 * Finalize various vnode identity bits. 1702 */ 1703 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1704 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1705 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1706 vp->v_type = VNON; 1707 vp->v_op = vops; 1708 v_init_counters(vp); 1709 vp->v_bufobj.bo_ops = &buf_ops_bio; 1710 #ifdef DIAGNOSTIC 1711 if (mp == NULL && vops != &dead_vnodeops) 1712 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1713 #endif 1714 #ifdef MAC 1715 mac_vnode_init(vp); 1716 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1717 mac_vnode_associate_singlelabel(mp, vp); 1718 #endif 1719 if (mp != NULL) { 1720 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1721 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1722 vp->v_vflag |= VV_NOKNOTE; 1723 } 1724 1725 /* 1726 * For the filesystems which do not use vfs_hash_insert(), 1727 * still initialize v_hash to have vfs_hash_index() useful. 1728 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1729 * its own hashing. 1730 */ 1731 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1732 1733 *vpp = vp; 1734 return (0); 1735 } 1736 1737 void 1738 getnewvnode_reserve(void) 1739 { 1740 struct thread *td; 1741 1742 td = curthread; 1743 MPASS(td->td_vp_reserved == NULL); 1744 td->td_vp_reserved = vn_alloc(NULL); 1745 } 1746 1747 void 1748 getnewvnode_drop_reserve(void) 1749 { 1750 struct thread *td; 1751 1752 td = curthread; 1753 if (td->td_vp_reserved != NULL) { 1754 vn_free(td->td_vp_reserved); 1755 td->td_vp_reserved = NULL; 1756 } 1757 } 1758 1759 static void __noinline 1760 freevnode(struct vnode *vp) 1761 { 1762 struct bufobj *bo; 1763 1764 /* 1765 * The vnode has been marked for destruction, so free it. 1766 * 1767 * The vnode will be returned to the zone where it will 1768 * normally remain until it is needed for another vnode. We 1769 * need to cleanup (or verify that the cleanup has already 1770 * been done) any residual data left from its current use 1771 * so as not to contaminate the freshly allocated vnode. 1772 */ 1773 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 1774 /* 1775 * Paired with vgone. 1776 */ 1777 vn_seqc_write_end_locked(vp); 1778 VNPASS(vp->v_seqc_users == 0, vp); 1779 1780 bo = &vp->v_bufobj; 1781 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 1782 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 1783 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 1784 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 1785 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 1786 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 1787 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 1788 ("clean blk trie not empty")); 1789 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 1790 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 1791 ("dirty blk trie not empty")); 1792 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 1793 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 1794 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 1795 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 1796 ("Dangling rangelock waiters")); 1797 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, 1798 ("Leaked inactivation")); 1799 VI_UNLOCK(vp); 1800 #ifdef MAC 1801 mac_vnode_destroy(vp); 1802 #endif 1803 if (vp->v_pollinfo != NULL) { 1804 destroy_vpollinfo(vp->v_pollinfo); 1805 vp->v_pollinfo = NULL; 1806 } 1807 #ifdef INVARIANTS 1808 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ 1809 vp->v_op = NULL; 1810 #endif 1811 vp->v_mountedhere = NULL; 1812 vp->v_unpcb = NULL; 1813 vp->v_rdev = NULL; 1814 vp->v_fifoinfo = NULL; 1815 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 1816 vp->v_irflag = 0; 1817 vp->v_iflag = 0; 1818 vp->v_vflag = 0; 1819 bo->bo_flag = 0; 1820 vn_free(vp); 1821 } 1822 1823 /* 1824 * Delete from old mount point vnode list, if on one. 1825 */ 1826 static void 1827 delmntque(struct vnode *vp) 1828 { 1829 struct mount *mp; 1830 1831 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 1832 1833 mp = vp->v_mount; 1834 if (mp == NULL) 1835 return; 1836 MNT_ILOCK(mp); 1837 VI_LOCK(vp); 1838 vp->v_mount = NULL; 1839 VI_UNLOCK(vp); 1840 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1841 ("bad mount point vnode list size")); 1842 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1843 mp->mnt_nvnodelistsize--; 1844 MNT_REL(mp); 1845 MNT_IUNLOCK(mp); 1846 } 1847 1848 static void 1849 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1850 { 1851 1852 vp->v_data = NULL; 1853 vp->v_op = &dead_vnodeops; 1854 vgone(vp); 1855 vput(vp); 1856 } 1857 1858 /* 1859 * Insert into list of vnodes for the new mount point, if available. 1860 */ 1861 int 1862 insmntque1(struct vnode *vp, struct mount *mp, 1863 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1864 { 1865 1866 KASSERT(vp->v_mount == NULL, 1867 ("insmntque: vnode already on per mount vnode list")); 1868 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1869 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1870 1871 /* 1872 * We acquire the vnode interlock early to ensure that the 1873 * vnode cannot be recycled by another process releasing a 1874 * holdcnt on it before we get it on both the vnode list 1875 * and the active vnode list. The mount mutex protects only 1876 * manipulation of the vnode list and the vnode freelist 1877 * mutex protects only manipulation of the active vnode list. 1878 * Hence the need to hold the vnode interlock throughout. 1879 */ 1880 MNT_ILOCK(mp); 1881 VI_LOCK(vp); 1882 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 1883 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1884 mp->mnt_nvnodelistsize == 0)) && 1885 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1886 VI_UNLOCK(vp); 1887 MNT_IUNLOCK(mp); 1888 if (dtr != NULL) 1889 dtr(vp, dtr_arg); 1890 return (EBUSY); 1891 } 1892 vp->v_mount = mp; 1893 MNT_REF(mp); 1894 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1895 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1896 ("neg mount point vnode list size")); 1897 mp->mnt_nvnodelistsize++; 1898 VI_UNLOCK(vp); 1899 MNT_IUNLOCK(mp); 1900 return (0); 1901 } 1902 1903 int 1904 insmntque(struct vnode *vp, struct mount *mp) 1905 { 1906 1907 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1908 } 1909 1910 /* 1911 * Flush out and invalidate all buffers associated with a bufobj 1912 * Called with the underlying object locked. 1913 */ 1914 int 1915 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1916 { 1917 int error; 1918 1919 BO_LOCK(bo); 1920 if (flags & V_SAVE) { 1921 error = bufobj_wwait(bo, slpflag, slptimeo); 1922 if (error) { 1923 BO_UNLOCK(bo); 1924 return (error); 1925 } 1926 if (bo->bo_dirty.bv_cnt > 0) { 1927 BO_UNLOCK(bo); 1928 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1929 return (error); 1930 /* 1931 * XXX We could save a lock/unlock if this was only 1932 * enabled under INVARIANTS 1933 */ 1934 BO_LOCK(bo); 1935 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1936 panic("vinvalbuf: dirty bufs"); 1937 } 1938 } 1939 /* 1940 * If you alter this loop please notice that interlock is dropped and 1941 * reacquired in flushbuflist. Special care is needed to ensure that 1942 * no race conditions occur from this. 1943 */ 1944 do { 1945 error = flushbuflist(&bo->bo_clean, 1946 flags, bo, slpflag, slptimeo); 1947 if (error == 0 && !(flags & V_CLEANONLY)) 1948 error = flushbuflist(&bo->bo_dirty, 1949 flags, bo, slpflag, slptimeo); 1950 if (error != 0 && error != EAGAIN) { 1951 BO_UNLOCK(bo); 1952 return (error); 1953 } 1954 } while (error != 0); 1955 1956 /* 1957 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1958 * have write I/O in-progress but if there is a VM object then the 1959 * VM object can also have read-I/O in-progress. 1960 */ 1961 do { 1962 bufobj_wwait(bo, 0, 0); 1963 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 1964 BO_UNLOCK(bo); 1965 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 1966 BO_LOCK(bo); 1967 } 1968 } while (bo->bo_numoutput > 0); 1969 BO_UNLOCK(bo); 1970 1971 /* 1972 * Destroy the copy in the VM cache, too. 1973 */ 1974 if (bo->bo_object != NULL && 1975 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 1976 VM_OBJECT_WLOCK(bo->bo_object); 1977 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1978 OBJPR_CLEANONLY : 0); 1979 VM_OBJECT_WUNLOCK(bo->bo_object); 1980 } 1981 1982 #ifdef INVARIANTS 1983 BO_LOCK(bo); 1984 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 1985 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 1986 bo->bo_clean.bv_cnt > 0)) 1987 panic("vinvalbuf: flush failed"); 1988 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 1989 bo->bo_dirty.bv_cnt > 0) 1990 panic("vinvalbuf: flush dirty failed"); 1991 BO_UNLOCK(bo); 1992 #endif 1993 return (0); 1994 } 1995 1996 /* 1997 * Flush out and invalidate all buffers associated with a vnode. 1998 * Called with the underlying object locked. 1999 */ 2000 int 2001 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 2002 { 2003 2004 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2005 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 2006 if (vp->v_object != NULL && vp->v_object->handle != vp) 2007 return (0); 2008 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 2009 } 2010 2011 /* 2012 * Flush out buffers on the specified list. 2013 * 2014 */ 2015 static int 2016 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2017 int slptimeo) 2018 { 2019 struct buf *bp, *nbp; 2020 int retval, error; 2021 daddr_t lblkno; 2022 b_xflags_t xflags; 2023 2024 ASSERT_BO_WLOCKED(bo); 2025 2026 retval = 0; 2027 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2028 /* 2029 * If we are flushing both V_NORMAL and V_ALT buffers then 2030 * do not skip any buffers. If we are flushing only V_NORMAL 2031 * buffers then skip buffers marked as BX_ALTDATA. If we are 2032 * flushing only V_ALT buffers then skip buffers not marked 2033 * as BX_ALTDATA. 2034 */ 2035 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2036 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2037 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2038 continue; 2039 } 2040 if (nbp != NULL) { 2041 lblkno = nbp->b_lblkno; 2042 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2043 } 2044 retval = EAGAIN; 2045 error = BUF_TIMELOCK(bp, 2046 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2047 "flushbuf", slpflag, slptimeo); 2048 if (error) { 2049 BO_LOCK(bo); 2050 return (error != ENOLCK ? error : EAGAIN); 2051 } 2052 KASSERT(bp->b_bufobj == bo, 2053 ("bp %p wrong b_bufobj %p should be %p", 2054 bp, bp->b_bufobj, bo)); 2055 /* 2056 * XXX Since there are no node locks for NFS, I 2057 * believe there is a slight chance that a delayed 2058 * write will occur while sleeping just above, so 2059 * check for it. 2060 */ 2061 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2062 (flags & V_SAVE)) { 2063 bremfree(bp); 2064 bp->b_flags |= B_ASYNC; 2065 bwrite(bp); 2066 BO_LOCK(bo); 2067 return (EAGAIN); /* XXX: why not loop ? */ 2068 } 2069 bremfree(bp); 2070 bp->b_flags |= (B_INVAL | B_RELBUF); 2071 bp->b_flags &= ~B_ASYNC; 2072 brelse(bp); 2073 BO_LOCK(bo); 2074 if (nbp == NULL) 2075 break; 2076 nbp = gbincore(bo, lblkno); 2077 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2078 != xflags) 2079 break; /* nbp invalid */ 2080 } 2081 return (retval); 2082 } 2083 2084 int 2085 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2086 { 2087 struct buf *bp; 2088 int error; 2089 daddr_t lblkno; 2090 2091 ASSERT_BO_LOCKED(bo); 2092 2093 for (lblkno = startn;;) { 2094 again: 2095 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2096 if (bp == NULL || bp->b_lblkno >= endn || 2097 bp->b_lblkno < startn) 2098 break; 2099 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2100 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2101 if (error != 0) { 2102 BO_RLOCK(bo); 2103 if (error == ENOLCK) 2104 goto again; 2105 return (error); 2106 } 2107 KASSERT(bp->b_bufobj == bo, 2108 ("bp %p wrong b_bufobj %p should be %p", 2109 bp, bp->b_bufobj, bo)); 2110 lblkno = bp->b_lblkno + 1; 2111 if ((bp->b_flags & B_MANAGED) == 0) 2112 bremfree(bp); 2113 bp->b_flags |= B_RELBUF; 2114 /* 2115 * In the VMIO case, use the B_NOREUSE flag to hint that the 2116 * pages backing each buffer in the range are unlikely to be 2117 * reused. Dirty buffers will have the hint applied once 2118 * they've been written. 2119 */ 2120 if ((bp->b_flags & B_VMIO) != 0) 2121 bp->b_flags |= B_NOREUSE; 2122 brelse(bp); 2123 BO_RLOCK(bo); 2124 } 2125 return (0); 2126 } 2127 2128 /* 2129 * Truncate a file's buffer and pages to a specified length. This 2130 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2131 * sync activity. 2132 */ 2133 int 2134 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2135 { 2136 struct buf *bp, *nbp; 2137 struct bufobj *bo; 2138 daddr_t startlbn; 2139 2140 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2141 vp, blksize, (uintmax_t)length); 2142 2143 /* 2144 * Round up to the *next* lbn. 2145 */ 2146 startlbn = howmany(length, blksize); 2147 2148 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2149 2150 bo = &vp->v_bufobj; 2151 restart_unlocked: 2152 BO_LOCK(bo); 2153 2154 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2155 ; 2156 2157 if (length > 0) { 2158 restartsync: 2159 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2160 if (bp->b_lblkno > 0) 2161 continue; 2162 /* 2163 * Since we hold the vnode lock this should only 2164 * fail if we're racing with the buf daemon. 2165 */ 2166 if (BUF_LOCK(bp, 2167 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2168 BO_LOCKPTR(bo)) == ENOLCK) 2169 goto restart_unlocked; 2170 2171 VNASSERT((bp->b_flags & B_DELWRI), vp, 2172 ("buf(%p) on dirty queue without DELWRI", bp)); 2173 2174 bremfree(bp); 2175 bawrite(bp); 2176 BO_LOCK(bo); 2177 goto restartsync; 2178 } 2179 } 2180 2181 bufobj_wwait(bo, 0, 0); 2182 BO_UNLOCK(bo); 2183 vnode_pager_setsize(vp, length); 2184 2185 return (0); 2186 } 2187 2188 /* 2189 * Invalidate the cached pages of a file's buffer within the range of block 2190 * numbers [startlbn, endlbn). 2191 */ 2192 void 2193 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2194 int blksize) 2195 { 2196 struct bufobj *bo; 2197 off_t start, end; 2198 2199 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2200 2201 start = blksize * startlbn; 2202 end = blksize * endlbn; 2203 2204 bo = &vp->v_bufobj; 2205 BO_LOCK(bo); 2206 MPASS(blksize == bo->bo_bsize); 2207 2208 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2209 ; 2210 2211 BO_UNLOCK(bo); 2212 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2213 } 2214 2215 static int 2216 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2217 daddr_t startlbn, daddr_t endlbn) 2218 { 2219 struct buf *bp, *nbp; 2220 bool anyfreed; 2221 2222 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2223 ASSERT_BO_LOCKED(bo); 2224 2225 do { 2226 anyfreed = false; 2227 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2228 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2229 continue; 2230 if (BUF_LOCK(bp, 2231 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2232 BO_LOCKPTR(bo)) == ENOLCK) { 2233 BO_LOCK(bo); 2234 return (EAGAIN); 2235 } 2236 2237 bremfree(bp); 2238 bp->b_flags |= B_INVAL | B_RELBUF; 2239 bp->b_flags &= ~B_ASYNC; 2240 brelse(bp); 2241 anyfreed = true; 2242 2243 BO_LOCK(bo); 2244 if (nbp != NULL && 2245 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2246 nbp->b_vp != vp || 2247 (nbp->b_flags & B_DELWRI) != 0)) 2248 return (EAGAIN); 2249 } 2250 2251 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2252 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2253 continue; 2254 if (BUF_LOCK(bp, 2255 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2256 BO_LOCKPTR(bo)) == ENOLCK) { 2257 BO_LOCK(bo); 2258 return (EAGAIN); 2259 } 2260 bremfree(bp); 2261 bp->b_flags |= B_INVAL | B_RELBUF; 2262 bp->b_flags &= ~B_ASYNC; 2263 brelse(bp); 2264 anyfreed = true; 2265 2266 BO_LOCK(bo); 2267 if (nbp != NULL && 2268 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2269 (nbp->b_vp != vp) || 2270 (nbp->b_flags & B_DELWRI) == 0)) 2271 return (EAGAIN); 2272 } 2273 } while (anyfreed); 2274 return (0); 2275 } 2276 2277 static void 2278 buf_vlist_remove(struct buf *bp) 2279 { 2280 struct bufv *bv; 2281 b_xflags_t flags; 2282 2283 flags = bp->b_xflags; 2284 2285 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2286 ASSERT_BO_WLOCKED(bp->b_bufobj); 2287 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 && 2288 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN), 2289 ("%s: buffer %p has invalid queue state", __func__, bp)); 2290 2291 if ((flags & BX_VNDIRTY) != 0) 2292 bv = &bp->b_bufobj->bo_dirty; 2293 else 2294 bv = &bp->b_bufobj->bo_clean; 2295 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2296 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2297 bv->bv_cnt--; 2298 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2299 } 2300 2301 /* 2302 * Add the buffer to the sorted clean or dirty block list. 2303 * 2304 * NOTE: xflags is passed as a constant, optimizing this inline function! 2305 */ 2306 static void 2307 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2308 { 2309 struct bufv *bv; 2310 struct buf *n; 2311 int error; 2312 2313 ASSERT_BO_WLOCKED(bo); 2314 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2315 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2316 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2317 ("dead bo %p", bo)); 2318 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2319 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2320 bp->b_xflags |= xflags; 2321 if (xflags & BX_VNDIRTY) 2322 bv = &bo->bo_dirty; 2323 else 2324 bv = &bo->bo_clean; 2325 2326 /* 2327 * Keep the list ordered. Optimize empty list insertion. Assume 2328 * we tend to grow at the tail so lookup_le should usually be cheaper 2329 * than _ge. 2330 */ 2331 if (bv->bv_cnt == 0 || 2332 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2333 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2334 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2335 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2336 else 2337 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2338 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2339 if (error) 2340 panic("buf_vlist_add: Preallocated nodes insufficient."); 2341 bv->bv_cnt++; 2342 } 2343 2344 /* 2345 * Look up a buffer using the buffer tries. 2346 */ 2347 struct buf * 2348 gbincore(struct bufobj *bo, daddr_t lblkno) 2349 { 2350 struct buf *bp; 2351 2352 ASSERT_BO_LOCKED(bo); 2353 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2354 if (bp != NULL) 2355 return (bp); 2356 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2357 } 2358 2359 /* 2360 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2361 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2362 * stability of the result. Like other lockless lookups, the found buf may 2363 * already be invalid by the time this function returns. 2364 */ 2365 struct buf * 2366 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2367 { 2368 struct buf *bp; 2369 2370 ASSERT_BO_UNLOCKED(bo); 2371 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2372 if (bp != NULL) 2373 return (bp); 2374 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2375 } 2376 2377 /* 2378 * Associate a buffer with a vnode. 2379 */ 2380 void 2381 bgetvp(struct vnode *vp, struct buf *bp) 2382 { 2383 struct bufobj *bo; 2384 2385 bo = &vp->v_bufobj; 2386 ASSERT_BO_WLOCKED(bo); 2387 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2388 2389 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2390 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2391 ("bgetvp: bp already attached! %p", bp)); 2392 2393 vhold(vp); 2394 bp->b_vp = vp; 2395 bp->b_bufobj = bo; 2396 /* 2397 * Insert onto list for new vnode. 2398 */ 2399 buf_vlist_add(bp, bo, BX_VNCLEAN); 2400 } 2401 2402 /* 2403 * Disassociate a buffer from a vnode. 2404 */ 2405 void 2406 brelvp(struct buf *bp) 2407 { 2408 struct bufobj *bo; 2409 struct vnode *vp; 2410 2411 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2412 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2413 2414 /* 2415 * Delete from old vnode list, if on one. 2416 */ 2417 vp = bp->b_vp; /* XXX */ 2418 bo = bp->b_bufobj; 2419 BO_LOCK(bo); 2420 buf_vlist_remove(bp); 2421 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2422 bo->bo_flag &= ~BO_ONWORKLST; 2423 mtx_lock(&sync_mtx); 2424 LIST_REMOVE(bo, bo_synclist); 2425 syncer_worklist_len--; 2426 mtx_unlock(&sync_mtx); 2427 } 2428 bp->b_vp = NULL; 2429 bp->b_bufobj = NULL; 2430 BO_UNLOCK(bo); 2431 vdrop(vp); 2432 } 2433 2434 /* 2435 * Add an item to the syncer work queue. 2436 */ 2437 static void 2438 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2439 { 2440 int slot; 2441 2442 ASSERT_BO_WLOCKED(bo); 2443 2444 mtx_lock(&sync_mtx); 2445 if (bo->bo_flag & BO_ONWORKLST) 2446 LIST_REMOVE(bo, bo_synclist); 2447 else { 2448 bo->bo_flag |= BO_ONWORKLST; 2449 syncer_worklist_len++; 2450 } 2451 2452 if (delay > syncer_maxdelay - 2) 2453 delay = syncer_maxdelay - 2; 2454 slot = (syncer_delayno + delay) & syncer_mask; 2455 2456 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2457 mtx_unlock(&sync_mtx); 2458 } 2459 2460 static int 2461 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2462 { 2463 int error, len; 2464 2465 mtx_lock(&sync_mtx); 2466 len = syncer_worklist_len - sync_vnode_count; 2467 mtx_unlock(&sync_mtx); 2468 error = SYSCTL_OUT(req, &len, sizeof(len)); 2469 return (error); 2470 } 2471 2472 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2473 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2474 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2475 2476 static struct proc *updateproc; 2477 static void sched_sync(void); 2478 static struct kproc_desc up_kp = { 2479 "syncer", 2480 sched_sync, 2481 &updateproc 2482 }; 2483 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2484 2485 static int 2486 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2487 { 2488 struct vnode *vp; 2489 struct mount *mp; 2490 2491 *bo = LIST_FIRST(slp); 2492 if (*bo == NULL) 2493 return (0); 2494 vp = bo2vnode(*bo); 2495 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2496 return (1); 2497 /* 2498 * We use vhold in case the vnode does not 2499 * successfully sync. vhold prevents the vnode from 2500 * going away when we unlock the sync_mtx so that 2501 * we can acquire the vnode interlock. 2502 */ 2503 vholdl(vp); 2504 mtx_unlock(&sync_mtx); 2505 VI_UNLOCK(vp); 2506 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2507 vdrop(vp); 2508 mtx_lock(&sync_mtx); 2509 return (*bo == LIST_FIRST(slp)); 2510 } 2511 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2512 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2513 VOP_UNLOCK(vp); 2514 vn_finished_write(mp); 2515 BO_LOCK(*bo); 2516 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2517 /* 2518 * Put us back on the worklist. The worklist 2519 * routine will remove us from our current 2520 * position and then add us back in at a later 2521 * position. 2522 */ 2523 vn_syncer_add_to_worklist(*bo, syncdelay); 2524 } 2525 BO_UNLOCK(*bo); 2526 vdrop(vp); 2527 mtx_lock(&sync_mtx); 2528 return (0); 2529 } 2530 2531 static int first_printf = 1; 2532 2533 /* 2534 * System filesystem synchronizer daemon. 2535 */ 2536 static void 2537 sched_sync(void) 2538 { 2539 struct synclist *next, *slp; 2540 struct bufobj *bo; 2541 long starttime; 2542 struct thread *td = curthread; 2543 int last_work_seen; 2544 int net_worklist_len; 2545 int syncer_final_iter; 2546 int error; 2547 2548 last_work_seen = 0; 2549 syncer_final_iter = 0; 2550 syncer_state = SYNCER_RUNNING; 2551 starttime = time_uptime; 2552 td->td_pflags |= TDP_NORUNNINGBUF; 2553 2554 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2555 SHUTDOWN_PRI_LAST); 2556 2557 mtx_lock(&sync_mtx); 2558 for (;;) { 2559 if (syncer_state == SYNCER_FINAL_DELAY && 2560 syncer_final_iter == 0) { 2561 mtx_unlock(&sync_mtx); 2562 kproc_suspend_check(td->td_proc); 2563 mtx_lock(&sync_mtx); 2564 } 2565 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2566 if (syncer_state != SYNCER_RUNNING && 2567 starttime != time_uptime) { 2568 if (first_printf) { 2569 printf("\nSyncing disks, vnodes remaining... "); 2570 first_printf = 0; 2571 } 2572 printf("%d ", net_worklist_len); 2573 } 2574 starttime = time_uptime; 2575 2576 /* 2577 * Push files whose dirty time has expired. Be careful 2578 * of interrupt race on slp queue. 2579 * 2580 * Skip over empty worklist slots when shutting down. 2581 */ 2582 do { 2583 slp = &syncer_workitem_pending[syncer_delayno]; 2584 syncer_delayno += 1; 2585 if (syncer_delayno == syncer_maxdelay) 2586 syncer_delayno = 0; 2587 next = &syncer_workitem_pending[syncer_delayno]; 2588 /* 2589 * If the worklist has wrapped since the 2590 * it was emptied of all but syncer vnodes, 2591 * switch to the FINAL_DELAY state and run 2592 * for one more second. 2593 */ 2594 if (syncer_state == SYNCER_SHUTTING_DOWN && 2595 net_worklist_len == 0 && 2596 last_work_seen == syncer_delayno) { 2597 syncer_state = SYNCER_FINAL_DELAY; 2598 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2599 } 2600 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2601 syncer_worklist_len > 0); 2602 2603 /* 2604 * Keep track of the last time there was anything 2605 * on the worklist other than syncer vnodes. 2606 * Return to the SHUTTING_DOWN state if any 2607 * new work appears. 2608 */ 2609 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2610 last_work_seen = syncer_delayno; 2611 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2612 syncer_state = SYNCER_SHUTTING_DOWN; 2613 while (!LIST_EMPTY(slp)) { 2614 error = sync_vnode(slp, &bo, td); 2615 if (error == 1) { 2616 LIST_REMOVE(bo, bo_synclist); 2617 LIST_INSERT_HEAD(next, bo, bo_synclist); 2618 continue; 2619 } 2620 2621 if (first_printf == 0) { 2622 /* 2623 * Drop the sync mutex, because some watchdog 2624 * drivers need to sleep while patting 2625 */ 2626 mtx_unlock(&sync_mtx); 2627 wdog_kern_pat(WD_LASTVAL); 2628 mtx_lock(&sync_mtx); 2629 } 2630 } 2631 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2632 syncer_final_iter--; 2633 /* 2634 * The variable rushjob allows the kernel to speed up the 2635 * processing of the filesystem syncer process. A rushjob 2636 * value of N tells the filesystem syncer to process the next 2637 * N seconds worth of work on its queue ASAP. Currently rushjob 2638 * is used by the soft update code to speed up the filesystem 2639 * syncer process when the incore state is getting so far 2640 * ahead of the disk that the kernel memory pool is being 2641 * threatened with exhaustion. 2642 */ 2643 if (rushjob > 0) { 2644 rushjob -= 1; 2645 continue; 2646 } 2647 /* 2648 * Just sleep for a short period of time between 2649 * iterations when shutting down to allow some I/O 2650 * to happen. 2651 * 2652 * If it has taken us less than a second to process the 2653 * current work, then wait. Otherwise start right over 2654 * again. We can still lose time if any single round 2655 * takes more than two seconds, but it does not really 2656 * matter as we are just trying to generally pace the 2657 * filesystem activity. 2658 */ 2659 if (syncer_state != SYNCER_RUNNING || 2660 time_uptime == starttime) { 2661 thread_lock(td); 2662 sched_prio(td, PPAUSE); 2663 thread_unlock(td); 2664 } 2665 if (syncer_state != SYNCER_RUNNING) 2666 cv_timedwait(&sync_wakeup, &sync_mtx, 2667 hz / SYNCER_SHUTDOWN_SPEEDUP); 2668 else if (time_uptime == starttime) 2669 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2670 } 2671 } 2672 2673 /* 2674 * Request the syncer daemon to speed up its work. 2675 * We never push it to speed up more than half of its 2676 * normal turn time, otherwise it could take over the cpu. 2677 */ 2678 int 2679 speedup_syncer(void) 2680 { 2681 int ret = 0; 2682 2683 mtx_lock(&sync_mtx); 2684 if (rushjob < syncdelay / 2) { 2685 rushjob += 1; 2686 stat_rush_requests += 1; 2687 ret = 1; 2688 } 2689 mtx_unlock(&sync_mtx); 2690 cv_broadcast(&sync_wakeup); 2691 return (ret); 2692 } 2693 2694 /* 2695 * Tell the syncer to speed up its work and run though its work 2696 * list several times, then tell it to shut down. 2697 */ 2698 static void 2699 syncer_shutdown(void *arg, int howto) 2700 { 2701 2702 if (howto & RB_NOSYNC) 2703 return; 2704 mtx_lock(&sync_mtx); 2705 syncer_state = SYNCER_SHUTTING_DOWN; 2706 rushjob = 0; 2707 mtx_unlock(&sync_mtx); 2708 cv_broadcast(&sync_wakeup); 2709 kproc_shutdown(arg, howto); 2710 } 2711 2712 void 2713 syncer_suspend(void) 2714 { 2715 2716 syncer_shutdown(updateproc, 0); 2717 } 2718 2719 void 2720 syncer_resume(void) 2721 { 2722 2723 mtx_lock(&sync_mtx); 2724 first_printf = 1; 2725 syncer_state = SYNCER_RUNNING; 2726 mtx_unlock(&sync_mtx); 2727 cv_broadcast(&sync_wakeup); 2728 kproc_resume(updateproc); 2729 } 2730 2731 /* 2732 * Move the buffer between the clean and dirty lists of its vnode. 2733 */ 2734 void 2735 reassignbuf(struct buf *bp) 2736 { 2737 struct vnode *vp; 2738 struct bufobj *bo; 2739 int delay; 2740 #ifdef INVARIANTS 2741 struct bufv *bv; 2742 #endif 2743 2744 vp = bp->b_vp; 2745 bo = bp->b_bufobj; 2746 2747 KASSERT((bp->b_flags & B_PAGING) == 0, 2748 ("%s: cannot reassign paging buffer %p", __func__, bp)); 2749 2750 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2751 bp, bp->b_vp, bp->b_flags); 2752 2753 BO_LOCK(bo); 2754 buf_vlist_remove(bp); 2755 2756 /* 2757 * If dirty, put on list of dirty buffers; otherwise insert onto list 2758 * of clean buffers. 2759 */ 2760 if (bp->b_flags & B_DELWRI) { 2761 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2762 switch (vp->v_type) { 2763 case VDIR: 2764 delay = dirdelay; 2765 break; 2766 case VCHR: 2767 delay = metadelay; 2768 break; 2769 default: 2770 delay = filedelay; 2771 } 2772 vn_syncer_add_to_worklist(bo, delay); 2773 } 2774 buf_vlist_add(bp, bo, BX_VNDIRTY); 2775 } else { 2776 buf_vlist_add(bp, bo, BX_VNCLEAN); 2777 2778 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2779 mtx_lock(&sync_mtx); 2780 LIST_REMOVE(bo, bo_synclist); 2781 syncer_worklist_len--; 2782 mtx_unlock(&sync_mtx); 2783 bo->bo_flag &= ~BO_ONWORKLST; 2784 } 2785 } 2786 #ifdef INVARIANTS 2787 bv = &bo->bo_clean; 2788 bp = TAILQ_FIRST(&bv->bv_hd); 2789 KASSERT(bp == NULL || bp->b_bufobj == bo, 2790 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2791 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2792 KASSERT(bp == NULL || bp->b_bufobj == bo, 2793 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2794 bv = &bo->bo_dirty; 2795 bp = TAILQ_FIRST(&bv->bv_hd); 2796 KASSERT(bp == NULL || bp->b_bufobj == bo, 2797 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2798 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2799 KASSERT(bp == NULL || bp->b_bufobj == bo, 2800 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2801 #endif 2802 BO_UNLOCK(bo); 2803 } 2804 2805 static void 2806 v_init_counters(struct vnode *vp) 2807 { 2808 2809 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2810 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2811 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2812 2813 refcount_init(&vp->v_holdcnt, 1); 2814 refcount_init(&vp->v_usecount, 1); 2815 } 2816 2817 /* 2818 * Grab a particular vnode from the free list, increment its 2819 * reference count and lock it. VIRF_DOOMED is set if the vnode 2820 * is being destroyed. Only callers who specify LK_RETRY will 2821 * see doomed vnodes. If inactive processing was delayed in 2822 * vput try to do it here. 2823 * 2824 * usecount is manipulated using atomics without holding any locks. 2825 * 2826 * holdcnt can be manipulated using atomics without holding any locks, 2827 * except when transitioning 1<->0, in which case the interlock is held. 2828 * 2829 * Consumers which don't guarantee liveness of the vnode can use SMR to 2830 * try to get a reference. Note this operation can fail since the vnode 2831 * may be awaiting getting freed by the time they get to it. 2832 */ 2833 enum vgetstate 2834 vget_prep_smr(struct vnode *vp) 2835 { 2836 enum vgetstate vs; 2837 2838 VFS_SMR_ASSERT_ENTERED(); 2839 2840 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2841 vs = VGET_USECOUNT; 2842 } else { 2843 if (vhold_smr(vp)) 2844 vs = VGET_HOLDCNT; 2845 else 2846 vs = VGET_NONE; 2847 } 2848 return (vs); 2849 } 2850 2851 enum vgetstate 2852 vget_prep(struct vnode *vp) 2853 { 2854 enum vgetstate vs; 2855 2856 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2857 vs = VGET_USECOUNT; 2858 } else { 2859 vhold(vp); 2860 vs = VGET_HOLDCNT; 2861 } 2862 return (vs); 2863 } 2864 2865 void 2866 vget_abort(struct vnode *vp, enum vgetstate vs) 2867 { 2868 2869 switch (vs) { 2870 case VGET_USECOUNT: 2871 vrele(vp); 2872 break; 2873 case VGET_HOLDCNT: 2874 vdrop(vp); 2875 break; 2876 default: 2877 __assert_unreachable(); 2878 } 2879 } 2880 2881 int 2882 vget(struct vnode *vp, int flags) 2883 { 2884 enum vgetstate vs; 2885 2886 vs = vget_prep(vp); 2887 return (vget_finish(vp, flags, vs)); 2888 } 2889 2890 int 2891 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 2892 { 2893 int error; 2894 2895 if ((flags & LK_INTERLOCK) != 0) 2896 ASSERT_VI_LOCKED(vp, __func__); 2897 else 2898 ASSERT_VI_UNLOCKED(vp, __func__); 2899 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 2900 VNPASS(vp->v_holdcnt > 0, vp); 2901 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 2902 2903 error = vn_lock(vp, flags); 2904 if (__predict_false(error != 0)) { 2905 vget_abort(vp, vs); 2906 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2907 vp); 2908 return (error); 2909 } 2910 2911 vget_finish_ref(vp, vs); 2912 return (0); 2913 } 2914 2915 void 2916 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 2917 { 2918 int old; 2919 2920 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 2921 VNPASS(vp->v_holdcnt > 0, vp); 2922 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 2923 2924 if (vs == VGET_USECOUNT) 2925 return; 2926 2927 /* 2928 * We hold the vnode. If the usecount is 0 it will be utilized to keep 2929 * the vnode around. Otherwise someone else lended their hold count and 2930 * we have to drop ours. 2931 */ 2932 old = atomic_fetchadd_int(&vp->v_usecount, 1); 2933 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 2934 if (old != 0) { 2935 #ifdef INVARIANTS 2936 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 2937 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 2938 #else 2939 refcount_release(&vp->v_holdcnt); 2940 #endif 2941 } 2942 } 2943 2944 void 2945 vref(struct vnode *vp) 2946 { 2947 enum vgetstate vs; 2948 2949 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2950 vs = vget_prep(vp); 2951 vget_finish_ref(vp, vs); 2952 } 2953 2954 void 2955 vrefact(struct vnode *vp) 2956 { 2957 2958 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2959 #ifdef INVARIANTS 2960 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 2961 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 2962 #else 2963 refcount_acquire(&vp->v_usecount); 2964 #endif 2965 } 2966 2967 void 2968 vlazy(struct vnode *vp) 2969 { 2970 struct mount *mp; 2971 2972 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 2973 2974 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 2975 return; 2976 /* 2977 * We may get here for inactive routines after the vnode got doomed. 2978 */ 2979 if (VN_IS_DOOMED(vp)) 2980 return; 2981 mp = vp->v_mount; 2982 mtx_lock(&mp->mnt_listmtx); 2983 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 2984 vp->v_mflag |= VMP_LAZYLIST; 2985 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 2986 mp->mnt_lazyvnodelistsize++; 2987 } 2988 mtx_unlock(&mp->mnt_listmtx); 2989 } 2990 2991 /* 2992 * This routine is only meant to be called from vgonel prior to dooming 2993 * the vnode. 2994 */ 2995 static void 2996 vunlazy_gone(struct vnode *vp) 2997 { 2998 struct mount *mp; 2999 3000 ASSERT_VOP_ELOCKED(vp, __func__); 3001 ASSERT_VI_LOCKED(vp, __func__); 3002 VNPASS(!VN_IS_DOOMED(vp), vp); 3003 3004 if (vp->v_mflag & VMP_LAZYLIST) { 3005 mp = vp->v_mount; 3006 mtx_lock(&mp->mnt_listmtx); 3007 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3008 vp->v_mflag &= ~VMP_LAZYLIST; 3009 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3010 mp->mnt_lazyvnodelistsize--; 3011 mtx_unlock(&mp->mnt_listmtx); 3012 } 3013 } 3014 3015 static void 3016 vdefer_inactive(struct vnode *vp) 3017 { 3018 3019 ASSERT_VI_LOCKED(vp, __func__); 3020 VNASSERT(vp->v_holdcnt > 0, vp, 3021 ("%s: vnode without hold count", __func__)); 3022 if (VN_IS_DOOMED(vp)) { 3023 vdropl(vp); 3024 return; 3025 } 3026 if (vp->v_iflag & VI_DEFINACT) { 3027 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3028 vdropl(vp); 3029 return; 3030 } 3031 if (vp->v_usecount > 0) { 3032 vp->v_iflag &= ~VI_OWEINACT; 3033 vdropl(vp); 3034 return; 3035 } 3036 vlazy(vp); 3037 vp->v_iflag |= VI_DEFINACT; 3038 VI_UNLOCK(vp); 3039 counter_u64_add(deferred_inact, 1); 3040 } 3041 3042 static void 3043 vdefer_inactive_unlocked(struct vnode *vp) 3044 { 3045 3046 VI_LOCK(vp); 3047 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3048 vdropl(vp); 3049 return; 3050 } 3051 vdefer_inactive(vp); 3052 } 3053 3054 enum vput_op { VRELE, VPUT, VUNREF }; 3055 3056 /* 3057 * Handle ->v_usecount transitioning to 0. 3058 * 3059 * By releasing the last usecount we take ownership of the hold count which 3060 * provides liveness of the vnode, meaning we have to vdrop. 3061 * 3062 * For all vnodes we may need to perform inactive processing. It requires an 3063 * exclusive lock on the vnode, while it is legal to call here with only a 3064 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3065 * inactive processing gets deferred to the syncer. 3066 * 3067 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3068 * on the lock being held all the way until VOP_INACTIVE. This in particular 3069 * happens with UFS which adds half-constructed vnodes to the hash, where they 3070 * can be found by other code. 3071 */ 3072 static void 3073 vput_final(struct vnode *vp, enum vput_op func) 3074 { 3075 int error; 3076 bool want_unlock; 3077 3078 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3079 VNPASS(vp->v_holdcnt > 0, vp); 3080 3081 VI_LOCK(vp); 3082 3083 /* 3084 * By the time we got here someone else might have transitioned 3085 * the count back to > 0. 3086 */ 3087 if (vp->v_usecount > 0) 3088 goto out; 3089 3090 /* 3091 * If the vnode is doomed vgone already performed inactive processing 3092 * (if needed). 3093 */ 3094 if (VN_IS_DOOMED(vp)) 3095 goto out; 3096 3097 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3098 goto out; 3099 3100 if (vp->v_iflag & VI_DOINGINACT) 3101 goto out; 3102 3103 /* 3104 * Locking operations here will drop the interlock and possibly the 3105 * vnode lock, opening a window where the vnode can get doomed all the 3106 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3107 * perform inactive. 3108 */ 3109 vp->v_iflag |= VI_OWEINACT; 3110 want_unlock = false; 3111 error = 0; 3112 switch (func) { 3113 case VRELE: 3114 switch (VOP_ISLOCKED(vp)) { 3115 case LK_EXCLUSIVE: 3116 break; 3117 case LK_EXCLOTHER: 3118 case 0: 3119 want_unlock = true; 3120 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3121 VI_LOCK(vp); 3122 break; 3123 default: 3124 /* 3125 * The lock has at least one sharer, but we have no way 3126 * to conclude whether this is us. Play it safe and 3127 * defer processing. 3128 */ 3129 error = EAGAIN; 3130 break; 3131 } 3132 break; 3133 case VPUT: 3134 want_unlock = true; 3135 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3136 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3137 LK_NOWAIT); 3138 VI_LOCK(vp); 3139 } 3140 break; 3141 case VUNREF: 3142 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3143 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3144 VI_LOCK(vp); 3145 } 3146 break; 3147 } 3148 if (error == 0) { 3149 vinactive(vp); 3150 if (want_unlock) 3151 VOP_UNLOCK(vp); 3152 vdropl(vp); 3153 } else { 3154 vdefer_inactive(vp); 3155 } 3156 return; 3157 out: 3158 if (func == VPUT) 3159 VOP_UNLOCK(vp); 3160 vdropl(vp); 3161 } 3162 3163 /* 3164 * Decrement ->v_usecount for a vnode. 3165 * 3166 * Releasing the last use count requires additional processing, see vput_final 3167 * above for details. 3168 * 3169 * Comment above each variant denotes lock state on entry and exit. 3170 */ 3171 3172 /* 3173 * in: any 3174 * out: same as passed in 3175 */ 3176 void 3177 vrele(struct vnode *vp) 3178 { 3179 3180 ASSERT_VI_UNLOCKED(vp, __func__); 3181 if (!refcount_release(&vp->v_usecount)) 3182 return; 3183 vput_final(vp, VRELE); 3184 } 3185 3186 /* 3187 * in: locked 3188 * out: unlocked 3189 */ 3190 void 3191 vput(struct vnode *vp) 3192 { 3193 3194 ASSERT_VOP_LOCKED(vp, __func__); 3195 ASSERT_VI_UNLOCKED(vp, __func__); 3196 if (!refcount_release(&vp->v_usecount)) { 3197 VOP_UNLOCK(vp); 3198 return; 3199 } 3200 vput_final(vp, VPUT); 3201 } 3202 3203 /* 3204 * in: locked 3205 * out: locked 3206 */ 3207 void 3208 vunref(struct vnode *vp) 3209 { 3210 3211 ASSERT_VOP_LOCKED(vp, __func__); 3212 ASSERT_VI_UNLOCKED(vp, __func__); 3213 if (!refcount_release(&vp->v_usecount)) 3214 return; 3215 vput_final(vp, VUNREF); 3216 } 3217 3218 void 3219 vhold(struct vnode *vp) 3220 { 3221 int old; 3222 3223 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3224 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3225 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3226 ("%s: wrong hold count %d", __func__, old)); 3227 if (old == 0) 3228 vn_freevnodes_dec(); 3229 } 3230 3231 void 3232 vholdnz(struct vnode *vp) 3233 { 3234 3235 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3236 #ifdef INVARIANTS 3237 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3238 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3239 ("%s: wrong hold count %d", __func__, old)); 3240 #else 3241 atomic_add_int(&vp->v_holdcnt, 1); 3242 #endif 3243 } 3244 3245 /* 3246 * Grab a hold count unless the vnode is freed. 3247 * 3248 * Only use this routine if vfs smr is the only protection you have against 3249 * freeing the vnode. 3250 * 3251 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3252 * is not set. After the flag is set the vnode becomes immutable to anyone but 3253 * the thread which managed to set the flag. 3254 * 3255 * It may be tempting to replace the loop with: 3256 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3257 * if (count & VHOLD_NO_SMR) { 3258 * backpedal and error out; 3259 * } 3260 * 3261 * However, while this is more performant, it hinders debugging by eliminating 3262 * the previously mentioned invariant. 3263 */ 3264 bool 3265 vhold_smr(struct vnode *vp) 3266 { 3267 int count; 3268 3269 VFS_SMR_ASSERT_ENTERED(); 3270 3271 count = atomic_load_int(&vp->v_holdcnt); 3272 for (;;) { 3273 if (count & VHOLD_NO_SMR) { 3274 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3275 ("non-zero hold count with flags %d\n", count)); 3276 return (false); 3277 } 3278 3279 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3280 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3281 if (count == 0) 3282 vn_freevnodes_dec(); 3283 return (true); 3284 } 3285 } 3286 } 3287 3288 static void __noinline 3289 vdbatch_process(struct vdbatch *vd) 3290 { 3291 struct vnode *vp; 3292 int i; 3293 3294 mtx_assert(&vd->lock, MA_OWNED); 3295 MPASS(curthread->td_pinned > 0); 3296 MPASS(vd->index == VDBATCH_SIZE); 3297 3298 mtx_lock(&vnode_list_mtx); 3299 critical_enter(); 3300 freevnodes += vd->freevnodes; 3301 for (i = 0; i < VDBATCH_SIZE; i++) { 3302 vp = vd->tab[i]; 3303 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3304 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3305 MPASS(vp->v_dbatchcpu != NOCPU); 3306 vp->v_dbatchcpu = NOCPU; 3307 } 3308 mtx_unlock(&vnode_list_mtx); 3309 vd->freevnodes = 0; 3310 bzero(vd->tab, sizeof(vd->tab)); 3311 vd->index = 0; 3312 critical_exit(); 3313 } 3314 3315 static void 3316 vdbatch_enqueue(struct vnode *vp) 3317 { 3318 struct vdbatch *vd; 3319 3320 ASSERT_VI_LOCKED(vp, __func__); 3321 VNASSERT(!VN_IS_DOOMED(vp), vp, 3322 ("%s: deferring requeue of a doomed vnode", __func__)); 3323 3324 if (vp->v_dbatchcpu != NOCPU) { 3325 VI_UNLOCK(vp); 3326 return; 3327 } 3328 3329 sched_pin(); 3330 vd = DPCPU_PTR(vd); 3331 mtx_lock(&vd->lock); 3332 MPASS(vd->index < VDBATCH_SIZE); 3333 MPASS(vd->tab[vd->index] == NULL); 3334 /* 3335 * A hack: we depend on being pinned so that we know what to put in 3336 * ->v_dbatchcpu. 3337 */ 3338 vp->v_dbatchcpu = curcpu; 3339 vd->tab[vd->index] = vp; 3340 vd->index++; 3341 VI_UNLOCK(vp); 3342 if (vd->index == VDBATCH_SIZE) 3343 vdbatch_process(vd); 3344 mtx_unlock(&vd->lock); 3345 sched_unpin(); 3346 } 3347 3348 /* 3349 * This routine must only be called for vnodes which are about to be 3350 * deallocated. Supporting dequeue for arbitrary vndoes would require 3351 * validating that the locked batch matches. 3352 */ 3353 static void 3354 vdbatch_dequeue(struct vnode *vp) 3355 { 3356 struct vdbatch *vd; 3357 int i; 3358 short cpu; 3359 3360 VNASSERT(vp->v_type == VBAD || vp->v_type == VNON, vp, 3361 ("%s: called for a used vnode\n", __func__)); 3362 3363 cpu = vp->v_dbatchcpu; 3364 if (cpu == NOCPU) 3365 return; 3366 3367 vd = DPCPU_ID_PTR(cpu, vd); 3368 mtx_lock(&vd->lock); 3369 for (i = 0; i < vd->index; i++) { 3370 if (vd->tab[i] != vp) 3371 continue; 3372 vp->v_dbatchcpu = NOCPU; 3373 vd->index--; 3374 vd->tab[i] = vd->tab[vd->index]; 3375 vd->tab[vd->index] = NULL; 3376 break; 3377 } 3378 mtx_unlock(&vd->lock); 3379 /* 3380 * Either we dequeued the vnode above or the target CPU beat us to it. 3381 */ 3382 MPASS(vp->v_dbatchcpu == NOCPU); 3383 } 3384 3385 /* 3386 * Drop the hold count of the vnode. If this is the last reference to 3387 * the vnode we place it on the free list unless it has been vgone'd 3388 * (marked VIRF_DOOMED) in which case we will free it. 3389 * 3390 * Because the vnode vm object keeps a hold reference on the vnode if 3391 * there is at least one resident non-cached page, the vnode cannot 3392 * leave the active list without the page cleanup done. 3393 */ 3394 static void 3395 vdrop_deactivate(struct vnode *vp) 3396 { 3397 struct mount *mp; 3398 3399 ASSERT_VI_LOCKED(vp, __func__); 3400 /* 3401 * Mark a vnode as free: remove it from its active list 3402 * and put it up for recycling on the freelist. 3403 */ 3404 VNASSERT(!VN_IS_DOOMED(vp), vp, 3405 ("vdrop: returning doomed vnode")); 3406 VNASSERT(vp->v_op != NULL, vp, 3407 ("vdrop: vnode already reclaimed.")); 3408 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 3409 ("vnode with VI_OWEINACT set")); 3410 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, 3411 ("vnode with VI_DEFINACT set")); 3412 if (vp->v_mflag & VMP_LAZYLIST) { 3413 mp = vp->v_mount; 3414 mtx_lock(&mp->mnt_listmtx); 3415 VNASSERT(vp->v_mflag & VMP_LAZYLIST, vp, ("lost VMP_LAZYLIST")); 3416 /* 3417 * Don't remove the vnode from the lazy list if another thread 3418 * has increased the hold count. It may have re-enqueued the 3419 * vnode to the lazy list and is now responsible for its 3420 * removal. 3421 */ 3422 if (vp->v_holdcnt == 0) { 3423 vp->v_mflag &= ~VMP_LAZYLIST; 3424 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3425 mp->mnt_lazyvnodelistsize--; 3426 } 3427 mtx_unlock(&mp->mnt_listmtx); 3428 } 3429 vdbatch_enqueue(vp); 3430 } 3431 3432 static void __noinline 3433 vdropl_final(struct vnode *vp) 3434 { 3435 3436 ASSERT_VI_LOCKED(vp, __func__); 3437 VNPASS(VN_IS_DOOMED(vp), vp); 3438 /* 3439 * Set the VHOLD_NO_SMR flag. 3440 * 3441 * We may be racing against vhold_smr. If they win we can just pretend 3442 * we never got this far, they will vdrop later. 3443 */ 3444 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { 3445 vn_freevnodes_inc(); 3446 VI_UNLOCK(vp); 3447 /* 3448 * We lost the aforementioned race. Any subsequent access is 3449 * invalid as they might have managed to vdropl on their own. 3450 */ 3451 return; 3452 } 3453 /* 3454 * Don't bump freevnodes as this one is going away. 3455 */ 3456 freevnode(vp); 3457 } 3458 3459 void 3460 vdrop(struct vnode *vp) 3461 { 3462 3463 ASSERT_VI_UNLOCKED(vp, __func__); 3464 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3465 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3466 return; 3467 VI_LOCK(vp); 3468 vdropl(vp); 3469 } 3470 3471 void 3472 vdropl(struct vnode *vp) 3473 { 3474 3475 ASSERT_VI_LOCKED(vp, __func__); 3476 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3477 if (!refcount_release(&vp->v_holdcnt)) { 3478 VI_UNLOCK(vp); 3479 return; 3480 } 3481 if (!VN_IS_DOOMED(vp)) { 3482 vn_freevnodes_inc(); 3483 vdrop_deactivate(vp); 3484 /* 3485 * Also unlocks the interlock. We can't assert on it as we 3486 * released our hold and by now the vnode might have been 3487 * freed. 3488 */ 3489 return; 3490 } 3491 vdropl_final(vp); 3492 } 3493 3494 /* 3495 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3496 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3497 */ 3498 static void 3499 vinactivef(struct vnode *vp) 3500 { 3501 struct vm_object *obj; 3502 3503 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3504 ASSERT_VI_LOCKED(vp, "vinactive"); 3505 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3506 ("vinactive: recursed on VI_DOINGINACT")); 3507 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3508 vp->v_iflag |= VI_DOINGINACT; 3509 vp->v_iflag &= ~VI_OWEINACT; 3510 VI_UNLOCK(vp); 3511 /* 3512 * Before moving off the active list, we must be sure that any 3513 * modified pages are converted into the vnode's dirty 3514 * buffers, since these will no longer be checked once the 3515 * vnode is on the inactive list. 3516 * 3517 * The write-out of the dirty pages is asynchronous. At the 3518 * point that VOP_INACTIVE() is called, there could still be 3519 * pending I/O and dirty pages in the object. 3520 */ 3521 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3522 vm_object_mightbedirty(obj)) { 3523 VM_OBJECT_WLOCK(obj); 3524 vm_object_page_clean(obj, 0, 0, 0); 3525 VM_OBJECT_WUNLOCK(obj); 3526 } 3527 VOP_INACTIVE(vp, curthread); 3528 VI_LOCK(vp); 3529 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3530 ("vinactive: lost VI_DOINGINACT")); 3531 vp->v_iflag &= ~VI_DOINGINACT; 3532 } 3533 3534 void 3535 vinactive(struct vnode *vp) 3536 { 3537 3538 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3539 ASSERT_VI_LOCKED(vp, "vinactive"); 3540 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3541 3542 if ((vp->v_iflag & VI_OWEINACT) == 0) 3543 return; 3544 if (vp->v_iflag & VI_DOINGINACT) 3545 return; 3546 if (vp->v_usecount > 0) { 3547 vp->v_iflag &= ~VI_OWEINACT; 3548 return; 3549 } 3550 vinactivef(vp); 3551 } 3552 3553 /* 3554 * Remove any vnodes in the vnode table belonging to mount point mp. 3555 * 3556 * If FORCECLOSE is not specified, there should not be any active ones, 3557 * return error if any are found (nb: this is a user error, not a 3558 * system error). If FORCECLOSE is specified, detach any active vnodes 3559 * that are found. 3560 * 3561 * If WRITECLOSE is set, only flush out regular file vnodes open for 3562 * writing. 3563 * 3564 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3565 * 3566 * `rootrefs' specifies the base reference count for the root vnode 3567 * of this filesystem. The root vnode is considered busy if its 3568 * v_usecount exceeds this value. On a successful return, vflush(, td) 3569 * will call vrele() on the root vnode exactly rootrefs times. 3570 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3571 * be zero. 3572 */ 3573 #ifdef DIAGNOSTIC 3574 static int busyprt = 0; /* print out busy vnodes */ 3575 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3576 #endif 3577 3578 int 3579 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3580 { 3581 struct vnode *vp, *mvp, *rootvp = NULL; 3582 struct vattr vattr; 3583 int busy = 0, error; 3584 3585 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3586 rootrefs, flags); 3587 if (rootrefs > 0) { 3588 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3589 ("vflush: bad args")); 3590 /* 3591 * Get the filesystem root vnode. We can vput() it 3592 * immediately, since with rootrefs > 0, it won't go away. 3593 */ 3594 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3595 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3596 __func__, error); 3597 return (error); 3598 } 3599 vput(rootvp); 3600 } 3601 loop: 3602 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3603 vholdl(vp); 3604 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3605 if (error) { 3606 vdrop(vp); 3607 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3608 goto loop; 3609 } 3610 /* 3611 * Skip over a vnodes marked VV_SYSTEM. 3612 */ 3613 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3614 VOP_UNLOCK(vp); 3615 vdrop(vp); 3616 continue; 3617 } 3618 /* 3619 * If WRITECLOSE is set, flush out unlinked but still open 3620 * files (even if open only for reading) and regular file 3621 * vnodes open for writing. 3622 */ 3623 if (flags & WRITECLOSE) { 3624 if (vp->v_object != NULL) { 3625 VM_OBJECT_WLOCK(vp->v_object); 3626 vm_object_page_clean(vp->v_object, 0, 0, 0); 3627 VM_OBJECT_WUNLOCK(vp->v_object); 3628 } 3629 error = VOP_FSYNC(vp, MNT_WAIT, td); 3630 if (error != 0) { 3631 VOP_UNLOCK(vp); 3632 vdrop(vp); 3633 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3634 return (error); 3635 } 3636 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3637 VI_LOCK(vp); 3638 3639 if ((vp->v_type == VNON || 3640 (error == 0 && vattr.va_nlink > 0)) && 3641 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3642 VOP_UNLOCK(vp); 3643 vdropl(vp); 3644 continue; 3645 } 3646 } else 3647 VI_LOCK(vp); 3648 /* 3649 * With v_usecount == 0, all we need to do is clear out the 3650 * vnode data structures and we are done. 3651 * 3652 * If FORCECLOSE is set, forcibly close the vnode. 3653 */ 3654 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3655 vgonel(vp); 3656 } else { 3657 busy++; 3658 #ifdef DIAGNOSTIC 3659 if (busyprt) 3660 vn_printf(vp, "vflush: busy vnode "); 3661 #endif 3662 } 3663 VOP_UNLOCK(vp); 3664 vdropl(vp); 3665 } 3666 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3667 /* 3668 * If just the root vnode is busy, and if its refcount 3669 * is equal to `rootrefs', then go ahead and kill it. 3670 */ 3671 VI_LOCK(rootvp); 3672 KASSERT(busy > 0, ("vflush: not busy")); 3673 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3674 ("vflush: usecount %d < rootrefs %d", 3675 rootvp->v_usecount, rootrefs)); 3676 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3677 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3678 vgone(rootvp); 3679 VOP_UNLOCK(rootvp); 3680 busy = 0; 3681 } else 3682 VI_UNLOCK(rootvp); 3683 } 3684 if (busy) { 3685 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3686 busy); 3687 return (EBUSY); 3688 } 3689 for (; rootrefs > 0; rootrefs--) 3690 vrele(rootvp); 3691 return (0); 3692 } 3693 3694 /* 3695 * Recycle an unused vnode to the front of the free list. 3696 */ 3697 int 3698 vrecycle(struct vnode *vp) 3699 { 3700 int recycled; 3701 3702 VI_LOCK(vp); 3703 recycled = vrecyclel(vp); 3704 VI_UNLOCK(vp); 3705 return (recycled); 3706 } 3707 3708 /* 3709 * vrecycle, with the vp interlock held. 3710 */ 3711 int 3712 vrecyclel(struct vnode *vp) 3713 { 3714 int recycled; 3715 3716 ASSERT_VOP_ELOCKED(vp, __func__); 3717 ASSERT_VI_LOCKED(vp, __func__); 3718 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3719 recycled = 0; 3720 if (vp->v_usecount == 0) { 3721 recycled = 1; 3722 vgonel(vp); 3723 } 3724 return (recycled); 3725 } 3726 3727 /* 3728 * Eliminate all activity associated with a vnode 3729 * in preparation for reuse. 3730 */ 3731 void 3732 vgone(struct vnode *vp) 3733 { 3734 VI_LOCK(vp); 3735 vgonel(vp); 3736 VI_UNLOCK(vp); 3737 } 3738 3739 static void 3740 notify_lowervp_vfs_dummy(struct mount *mp __unused, 3741 struct vnode *lowervp __unused) 3742 { 3743 } 3744 3745 /* 3746 * Notify upper mounts about reclaimed or unlinked vnode. 3747 */ 3748 void 3749 vfs_notify_upper(struct vnode *vp, int event) 3750 { 3751 static struct vfsops vgonel_vfsops = { 3752 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 3753 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 3754 }; 3755 struct mount *mp, *ump, *mmp; 3756 3757 mp = vp->v_mount; 3758 if (mp == NULL) 3759 return; 3760 if (TAILQ_EMPTY(&mp->mnt_uppers)) 3761 return; 3762 3763 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 3764 mmp->mnt_op = &vgonel_vfsops; 3765 mmp->mnt_kern_flag |= MNTK_MARKER; 3766 MNT_ILOCK(mp); 3767 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 3768 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 3769 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 3770 ump = TAILQ_NEXT(ump, mnt_upper_link); 3771 continue; 3772 } 3773 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 3774 MNT_IUNLOCK(mp); 3775 switch (event) { 3776 case VFS_NOTIFY_UPPER_RECLAIM: 3777 VFS_RECLAIM_LOWERVP(ump, vp); 3778 break; 3779 case VFS_NOTIFY_UPPER_UNLINK: 3780 VFS_UNLINK_LOWERVP(ump, vp); 3781 break; 3782 default: 3783 KASSERT(0, ("invalid event %d", event)); 3784 break; 3785 } 3786 MNT_ILOCK(mp); 3787 ump = TAILQ_NEXT(mmp, mnt_upper_link); 3788 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 3789 } 3790 free(mmp, M_TEMP); 3791 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 3792 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 3793 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 3794 wakeup(&mp->mnt_uppers); 3795 } 3796 MNT_IUNLOCK(mp); 3797 } 3798 3799 /* 3800 * vgone, with the vp interlock held. 3801 */ 3802 static void 3803 vgonel(struct vnode *vp) 3804 { 3805 struct thread *td; 3806 struct mount *mp; 3807 vm_object_t object; 3808 bool active, doinginact, oweinact; 3809 3810 ASSERT_VOP_ELOCKED(vp, "vgonel"); 3811 ASSERT_VI_LOCKED(vp, "vgonel"); 3812 VNASSERT(vp->v_holdcnt, vp, 3813 ("vgonel: vp %p has no reference.", vp)); 3814 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3815 td = curthread; 3816 3817 /* 3818 * Don't vgonel if we're already doomed. 3819 */ 3820 if (vp->v_irflag & VIRF_DOOMED) 3821 return; 3822 /* 3823 * Paired with freevnode. 3824 */ 3825 vn_seqc_write_begin_locked(vp); 3826 vunlazy_gone(vp); 3827 vp->v_irflag |= VIRF_DOOMED; 3828 3829 /* 3830 * Check to see if the vnode is in use. If so, we have to 3831 * call VOP_CLOSE() and VOP_INACTIVE(). 3832 * 3833 * It could be that VOP_INACTIVE() requested reclamation, in 3834 * which case we should avoid recursion, so check 3835 * VI_DOINGINACT. This is not precise but good enough. 3836 */ 3837 active = vp->v_usecount > 0; 3838 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 3839 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; 3840 3841 /* 3842 * If we need to do inactive VI_OWEINACT will be set. 3843 */ 3844 if (vp->v_iflag & VI_DEFINACT) { 3845 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3846 vp->v_iflag &= ~VI_DEFINACT; 3847 vdropl(vp); 3848 } else { 3849 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 3850 VI_UNLOCK(vp); 3851 } 3852 cache_purge_vgone(vp); 3853 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 3854 3855 /* 3856 * If purging an active vnode, it must be closed and 3857 * deactivated before being reclaimed. 3858 */ 3859 if (active) 3860 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 3861 if ((oweinact || active) && !doinginact) { 3862 VI_LOCK(vp); 3863 vinactivef(vp); 3864 VI_UNLOCK(vp); 3865 } 3866 if (vp->v_type == VSOCK) 3867 vfs_unp_reclaim(vp); 3868 3869 /* 3870 * Clean out any buffers associated with the vnode. 3871 * If the flush fails, just toss the buffers. 3872 */ 3873 mp = NULL; 3874 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 3875 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 3876 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 3877 while (vinvalbuf(vp, 0, 0, 0) != 0) 3878 ; 3879 } 3880 3881 BO_LOCK(&vp->v_bufobj); 3882 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 3883 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 3884 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 3885 vp->v_bufobj.bo_clean.bv_cnt == 0, 3886 ("vp %p bufobj not invalidated", vp)); 3887 3888 /* 3889 * For VMIO bufobj, BO_DEAD is set later, or in 3890 * vm_object_terminate() after the object's page queue is 3891 * flushed. 3892 */ 3893 object = vp->v_bufobj.bo_object; 3894 if (object == NULL) 3895 vp->v_bufobj.bo_flag |= BO_DEAD; 3896 BO_UNLOCK(&vp->v_bufobj); 3897 3898 /* 3899 * Handle the VM part. Tmpfs handles v_object on its own (the 3900 * OBJT_VNODE check). Nullfs or other bypassing filesystems 3901 * should not touch the object borrowed from the lower vnode 3902 * (the handle check). 3903 */ 3904 if (object != NULL && object->type == OBJT_VNODE && 3905 object->handle == vp) 3906 vnode_destroy_vobject(vp); 3907 3908 /* 3909 * Reclaim the vnode. 3910 */ 3911 if (VOP_RECLAIM(vp)) 3912 panic("vgone: cannot reclaim"); 3913 if (mp != NULL) 3914 vn_finished_secondary_write(mp); 3915 VNASSERT(vp->v_object == NULL, vp, 3916 ("vop_reclaim left v_object vp=%p", vp)); 3917 /* 3918 * Clear the advisory locks and wake up waiting threads. 3919 */ 3920 (void)VOP_ADVLOCKPURGE(vp); 3921 vp->v_lockf = NULL; 3922 /* 3923 * Delete from old mount point vnode list. 3924 */ 3925 delmntque(vp); 3926 /* 3927 * Done with purge, reset to the standard lock and invalidate 3928 * the vnode. 3929 */ 3930 VI_LOCK(vp); 3931 vp->v_vnlock = &vp->v_lock; 3932 vp->v_op = &dead_vnodeops; 3933 vp->v_type = VBAD; 3934 } 3935 3936 /* 3937 * Print out a description of a vnode. 3938 */ 3939 static const char * const typename[] = 3940 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 3941 "VMARKER"}; 3942 3943 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 3944 "new hold count flag not added to vn_printf"); 3945 3946 void 3947 vn_printf(struct vnode *vp, const char *fmt, ...) 3948 { 3949 va_list ap; 3950 char buf[256], buf2[16]; 3951 u_long flags; 3952 u_int holdcnt; 3953 3954 va_start(ap, fmt); 3955 vprintf(fmt, ap); 3956 va_end(ap); 3957 printf("%p: ", (void *)vp); 3958 printf("type %s\n", typename[vp->v_type]); 3959 holdcnt = atomic_load_int(&vp->v_holdcnt); 3960 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 3961 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 3962 vp->v_seqc_users); 3963 switch (vp->v_type) { 3964 case VDIR: 3965 printf(" mountedhere %p\n", vp->v_mountedhere); 3966 break; 3967 case VCHR: 3968 printf(" rdev %p\n", vp->v_rdev); 3969 break; 3970 case VSOCK: 3971 printf(" socket %p\n", vp->v_unpcb); 3972 break; 3973 case VFIFO: 3974 printf(" fifoinfo %p\n", vp->v_fifoinfo); 3975 break; 3976 default: 3977 printf("\n"); 3978 break; 3979 } 3980 buf[0] = '\0'; 3981 buf[1] = '\0'; 3982 if (holdcnt & VHOLD_NO_SMR) 3983 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 3984 printf(" hold count flags (%s)\n", buf + 1); 3985 3986 buf[0] = '\0'; 3987 buf[1] = '\0'; 3988 if (vp->v_irflag & VIRF_DOOMED) 3989 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 3990 if (vp->v_irflag & VIRF_PGREAD) 3991 strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); 3992 flags = vp->v_irflag & ~(VIRF_DOOMED | VIRF_PGREAD); 3993 if (flags != 0) { 3994 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 3995 strlcat(buf, buf2, sizeof(buf)); 3996 } 3997 if (vp->v_vflag & VV_ROOT) 3998 strlcat(buf, "|VV_ROOT", sizeof(buf)); 3999 if (vp->v_vflag & VV_ISTTY) 4000 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4001 if (vp->v_vflag & VV_NOSYNC) 4002 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4003 if (vp->v_vflag & VV_ETERNALDEV) 4004 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4005 if (vp->v_vflag & VV_CACHEDLABEL) 4006 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4007 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4008 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4009 if (vp->v_vflag & VV_COPYONWRITE) 4010 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4011 if (vp->v_vflag & VV_SYSTEM) 4012 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4013 if (vp->v_vflag & VV_PROCDEP) 4014 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4015 if (vp->v_vflag & VV_NOKNOTE) 4016 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 4017 if (vp->v_vflag & VV_DELETED) 4018 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4019 if (vp->v_vflag & VV_MD) 4020 strlcat(buf, "|VV_MD", sizeof(buf)); 4021 if (vp->v_vflag & VV_FORCEINSMQ) 4022 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4023 if (vp->v_vflag & VV_READLINK) 4024 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4025 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4026 VV_CACHEDLABEL | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 4027 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); 4028 if (flags != 0) { 4029 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4030 strlcat(buf, buf2, sizeof(buf)); 4031 } 4032 if (vp->v_iflag & VI_TEXT_REF) 4033 strlcat(buf, "|VI_TEXT_REF", sizeof(buf)); 4034 if (vp->v_iflag & VI_MOUNT) 4035 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4036 if (vp->v_iflag & VI_DOINGINACT) 4037 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4038 if (vp->v_iflag & VI_OWEINACT) 4039 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4040 if (vp->v_iflag & VI_DEFINACT) 4041 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4042 flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_DOINGINACT | 4043 VI_OWEINACT | VI_DEFINACT); 4044 if (flags != 0) { 4045 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4046 strlcat(buf, buf2, sizeof(buf)); 4047 } 4048 if (vp->v_mflag & VMP_LAZYLIST) 4049 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4050 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4051 if (flags != 0) { 4052 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4053 strlcat(buf, buf2, sizeof(buf)); 4054 } 4055 printf(" flags (%s)\n", buf + 1); 4056 if (mtx_owned(VI_MTX(vp))) 4057 printf(" VI_LOCKed"); 4058 if (vp->v_object != NULL) 4059 printf(" v_object %p ref %d pages %d " 4060 "cleanbuf %d dirtybuf %d\n", 4061 vp->v_object, vp->v_object->ref_count, 4062 vp->v_object->resident_page_count, 4063 vp->v_bufobj.bo_clean.bv_cnt, 4064 vp->v_bufobj.bo_dirty.bv_cnt); 4065 printf(" "); 4066 lockmgr_printinfo(vp->v_vnlock); 4067 if (vp->v_data != NULL) 4068 VOP_PRINT(vp); 4069 } 4070 4071 #ifdef DDB 4072 /* 4073 * List all of the locked vnodes in the system. 4074 * Called when debugging the kernel. 4075 */ 4076 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 4077 { 4078 struct mount *mp; 4079 struct vnode *vp; 4080 4081 /* 4082 * Note: because this is DDB, we can't obey the locking semantics 4083 * for these structures, which means we could catch an inconsistent 4084 * state and dereference a nasty pointer. Not much to be done 4085 * about that. 4086 */ 4087 db_printf("Locked vnodes\n"); 4088 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4089 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4090 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4091 vn_printf(vp, "vnode "); 4092 } 4093 } 4094 } 4095 4096 /* 4097 * Show details about the given vnode. 4098 */ 4099 DB_SHOW_COMMAND(vnode, db_show_vnode) 4100 { 4101 struct vnode *vp; 4102 4103 if (!have_addr) 4104 return; 4105 vp = (struct vnode *)addr; 4106 vn_printf(vp, "vnode "); 4107 } 4108 4109 /* 4110 * Show details about the given mount point. 4111 */ 4112 DB_SHOW_COMMAND(mount, db_show_mount) 4113 { 4114 struct mount *mp; 4115 struct vfsopt *opt; 4116 struct statfs *sp; 4117 struct vnode *vp; 4118 char buf[512]; 4119 uint64_t mflags; 4120 u_int flags; 4121 4122 if (!have_addr) { 4123 /* No address given, print short info about all mount points. */ 4124 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4125 db_printf("%p %s on %s (%s)\n", mp, 4126 mp->mnt_stat.f_mntfromname, 4127 mp->mnt_stat.f_mntonname, 4128 mp->mnt_stat.f_fstypename); 4129 if (db_pager_quit) 4130 break; 4131 } 4132 db_printf("\nMore info: show mount <addr>\n"); 4133 return; 4134 } 4135 4136 mp = (struct mount *)addr; 4137 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4138 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4139 4140 buf[0] = '\0'; 4141 mflags = mp->mnt_flag; 4142 #define MNT_FLAG(flag) do { \ 4143 if (mflags & (flag)) { \ 4144 if (buf[0] != '\0') \ 4145 strlcat(buf, ", ", sizeof(buf)); \ 4146 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4147 mflags &= ~(flag); \ 4148 } \ 4149 } while (0) 4150 MNT_FLAG(MNT_RDONLY); 4151 MNT_FLAG(MNT_SYNCHRONOUS); 4152 MNT_FLAG(MNT_NOEXEC); 4153 MNT_FLAG(MNT_NOSUID); 4154 MNT_FLAG(MNT_NFS4ACLS); 4155 MNT_FLAG(MNT_UNION); 4156 MNT_FLAG(MNT_ASYNC); 4157 MNT_FLAG(MNT_SUIDDIR); 4158 MNT_FLAG(MNT_SOFTDEP); 4159 MNT_FLAG(MNT_NOSYMFOLLOW); 4160 MNT_FLAG(MNT_GJOURNAL); 4161 MNT_FLAG(MNT_MULTILABEL); 4162 MNT_FLAG(MNT_ACLS); 4163 MNT_FLAG(MNT_NOATIME); 4164 MNT_FLAG(MNT_NOCLUSTERR); 4165 MNT_FLAG(MNT_NOCLUSTERW); 4166 MNT_FLAG(MNT_SUJ); 4167 MNT_FLAG(MNT_EXRDONLY); 4168 MNT_FLAG(MNT_EXPORTED); 4169 MNT_FLAG(MNT_DEFEXPORTED); 4170 MNT_FLAG(MNT_EXPORTANON); 4171 MNT_FLAG(MNT_EXKERB); 4172 MNT_FLAG(MNT_EXPUBLIC); 4173 MNT_FLAG(MNT_LOCAL); 4174 MNT_FLAG(MNT_QUOTA); 4175 MNT_FLAG(MNT_ROOTFS); 4176 MNT_FLAG(MNT_USER); 4177 MNT_FLAG(MNT_IGNORE); 4178 MNT_FLAG(MNT_UPDATE); 4179 MNT_FLAG(MNT_DELEXPORT); 4180 MNT_FLAG(MNT_RELOAD); 4181 MNT_FLAG(MNT_FORCE); 4182 MNT_FLAG(MNT_SNAPSHOT); 4183 MNT_FLAG(MNT_BYFSID); 4184 #undef MNT_FLAG 4185 if (mflags != 0) { 4186 if (buf[0] != '\0') 4187 strlcat(buf, ", ", sizeof(buf)); 4188 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4189 "0x%016jx", mflags); 4190 } 4191 db_printf(" mnt_flag = %s\n", buf); 4192 4193 buf[0] = '\0'; 4194 flags = mp->mnt_kern_flag; 4195 #define MNT_KERN_FLAG(flag) do { \ 4196 if (flags & (flag)) { \ 4197 if (buf[0] != '\0') \ 4198 strlcat(buf, ", ", sizeof(buf)); \ 4199 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4200 flags &= ~(flag); \ 4201 } \ 4202 } while (0) 4203 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4204 MNT_KERN_FLAG(MNTK_ASYNC); 4205 MNT_KERN_FLAG(MNTK_SOFTDEP); 4206 MNT_KERN_FLAG(MNTK_DRAINING); 4207 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4208 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4209 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4210 MNT_KERN_FLAG(MNTK_NO_IOPF); 4211 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 4212 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 4213 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 4214 MNT_KERN_FLAG(MNTK_MARKER); 4215 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4216 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4217 MNT_KERN_FLAG(MNTK_NOASYNC); 4218 MNT_KERN_FLAG(MNTK_UNMOUNT); 4219 MNT_KERN_FLAG(MNTK_MWAIT); 4220 MNT_KERN_FLAG(MNTK_SUSPEND); 4221 MNT_KERN_FLAG(MNTK_SUSPEND2); 4222 MNT_KERN_FLAG(MNTK_SUSPENDED); 4223 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4224 MNT_KERN_FLAG(MNTK_NOKNOTE); 4225 #undef MNT_KERN_FLAG 4226 if (flags != 0) { 4227 if (buf[0] != '\0') 4228 strlcat(buf, ", ", sizeof(buf)); 4229 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4230 "0x%08x", flags); 4231 } 4232 db_printf(" mnt_kern_flag = %s\n", buf); 4233 4234 db_printf(" mnt_opt = "); 4235 opt = TAILQ_FIRST(mp->mnt_opt); 4236 if (opt != NULL) { 4237 db_printf("%s", opt->name); 4238 opt = TAILQ_NEXT(opt, link); 4239 while (opt != NULL) { 4240 db_printf(", %s", opt->name); 4241 opt = TAILQ_NEXT(opt, link); 4242 } 4243 } 4244 db_printf("\n"); 4245 4246 sp = &mp->mnt_stat; 4247 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4248 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4249 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4250 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4251 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4252 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4253 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4254 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4255 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4256 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4257 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4258 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4259 4260 db_printf(" mnt_cred = { uid=%u ruid=%u", 4261 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4262 if (jailed(mp->mnt_cred)) 4263 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4264 db_printf(" }\n"); 4265 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4266 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4267 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4268 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4269 db_printf(" mnt_lazyvnodelistsize = %d\n", 4270 mp->mnt_lazyvnodelistsize); 4271 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4272 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4273 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 4274 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4275 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4276 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4277 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4278 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4279 db_printf(" mnt_secondary_accwrites = %d\n", 4280 mp->mnt_secondary_accwrites); 4281 db_printf(" mnt_gjprovider = %s\n", 4282 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4283 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4284 4285 db_printf("\n\nList of active vnodes\n"); 4286 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4287 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4288 vn_printf(vp, "vnode "); 4289 if (db_pager_quit) 4290 break; 4291 } 4292 } 4293 db_printf("\n\nList of inactive vnodes\n"); 4294 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4295 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4296 vn_printf(vp, "vnode "); 4297 if (db_pager_quit) 4298 break; 4299 } 4300 } 4301 } 4302 #endif /* DDB */ 4303 4304 /* 4305 * Fill in a struct xvfsconf based on a struct vfsconf. 4306 */ 4307 static int 4308 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4309 { 4310 struct xvfsconf xvfsp; 4311 4312 bzero(&xvfsp, sizeof(xvfsp)); 4313 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4314 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4315 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4316 xvfsp.vfc_flags = vfsp->vfc_flags; 4317 /* 4318 * These are unused in userland, we keep them 4319 * to not break binary compatibility. 4320 */ 4321 xvfsp.vfc_vfsops = NULL; 4322 xvfsp.vfc_next = NULL; 4323 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4324 } 4325 4326 #ifdef COMPAT_FREEBSD32 4327 struct xvfsconf32 { 4328 uint32_t vfc_vfsops; 4329 char vfc_name[MFSNAMELEN]; 4330 int32_t vfc_typenum; 4331 int32_t vfc_refcount; 4332 int32_t vfc_flags; 4333 uint32_t vfc_next; 4334 }; 4335 4336 static int 4337 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4338 { 4339 struct xvfsconf32 xvfsp; 4340 4341 bzero(&xvfsp, sizeof(xvfsp)); 4342 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4343 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4344 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4345 xvfsp.vfc_flags = vfsp->vfc_flags; 4346 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4347 } 4348 #endif 4349 4350 /* 4351 * Top level filesystem related information gathering. 4352 */ 4353 static int 4354 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4355 { 4356 struct vfsconf *vfsp; 4357 int error; 4358 4359 error = 0; 4360 vfsconf_slock(); 4361 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4362 #ifdef COMPAT_FREEBSD32 4363 if (req->flags & SCTL_MASK32) 4364 error = vfsconf2x32(req, vfsp); 4365 else 4366 #endif 4367 error = vfsconf2x(req, vfsp); 4368 if (error) 4369 break; 4370 } 4371 vfsconf_sunlock(); 4372 return (error); 4373 } 4374 4375 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4376 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4377 "S,xvfsconf", "List of all configured filesystems"); 4378 4379 #ifndef BURN_BRIDGES 4380 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4381 4382 static int 4383 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4384 { 4385 int *name = (int *)arg1 - 1; /* XXX */ 4386 u_int namelen = arg2 + 1; /* XXX */ 4387 struct vfsconf *vfsp; 4388 4389 log(LOG_WARNING, "userland calling deprecated sysctl, " 4390 "please rebuild world\n"); 4391 4392 #if 1 || defined(COMPAT_PRELITE2) 4393 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4394 if (namelen == 1) 4395 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4396 #endif 4397 4398 switch (name[1]) { 4399 case VFS_MAXTYPENUM: 4400 if (namelen != 2) 4401 return (ENOTDIR); 4402 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4403 case VFS_CONF: 4404 if (namelen != 3) 4405 return (ENOTDIR); /* overloaded */ 4406 vfsconf_slock(); 4407 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4408 if (vfsp->vfc_typenum == name[2]) 4409 break; 4410 } 4411 vfsconf_sunlock(); 4412 if (vfsp == NULL) 4413 return (EOPNOTSUPP); 4414 #ifdef COMPAT_FREEBSD32 4415 if (req->flags & SCTL_MASK32) 4416 return (vfsconf2x32(req, vfsp)); 4417 else 4418 #endif 4419 return (vfsconf2x(req, vfsp)); 4420 } 4421 return (EOPNOTSUPP); 4422 } 4423 4424 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4425 CTLFLAG_MPSAFE, vfs_sysctl, 4426 "Generic filesystem"); 4427 4428 #if 1 || defined(COMPAT_PRELITE2) 4429 4430 static int 4431 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4432 { 4433 int error; 4434 struct vfsconf *vfsp; 4435 struct ovfsconf ovfs; 4436 4437 vfsconf_slock(); 4438 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4439 bzero(&ovfs, sizeof(ovfs)); 4440 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4441 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4442 ovfs.vfc_index = vfsp->vfc_typenum; 4443 ovfs.vfc_refcount = vfsp->vfc_refcount; 4444 ovfs.vfc_flags = vfsp->vfc_flags; 4445 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4446 if (error != 0) { 4447 vfsconf_sunlock(); 4448 return (error); 4449 } 4450 } 4451 vfsconf_sunlock(); 4452 return (0); 4453 } 4454 4455 #endif /* 1 || COMPAT_PRELITE2 */ 4456 #endif /* !BURN_BRIDGES */ 4457 4458 #define KINFO_VNODESLOP 10 4459 #ifdef notyet 4460 /* 4461 * Dump vnode list (via sysctl). 4462 */ 4463 /* ARGSUSED */ 4464 static int 4465 sysctl_vnode(SYSCTL_HANDLER_ARGS) 4466 { 4467 struct xvnode *xvn; 4468 struct mount *mp; 4469 struct vnode *vp; 4470 int error, len, n; 4471 4472 /* 4473 * Stale numvnodes access is not fatal here. 4474 */ 4475 req->lock = 0; 4476 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 4477 if (!req->oldptr) 4478 /* Make an estimate */ 4479 return (SYSCTL_OUT(req, 0, len)); 4480 4481 error = sysctl_wire_old_buffer(req, 0); 4482 if (error != 0) 4483 return (error); 4484 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 4485 n = 0; 4486 mtx_lock(&mountlist_mtx); 4487 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4488 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 4489 continue; 4490 MNT_ILOCK(mp); 4491 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4492 if (n == len) 4493 break; 4494 vref(vp); 4495 xvn[n].xv_size = sizeof *xvn; 4496 xvn[n].xv_vnode = vp; 4497 xvn[n].xv_id = 0; /* XXX compat */ 4498 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 4499 XV_COPY(usecount); 4500 XV_COPY(writecount); 4501 XV_COPY(holdcnt); 4502 XV_COPY(mount); 4503 XV_COPY(numoutput); 4504 XV_COPY(type); 4505 #undef XV_COPY 4506 xvn[n].xv_flag = vp->v_vflag; 4507 4508 switch (vp->v_type) { 4509 case VREG: 4510 case VDIR: 4511 case VLNK: 4512 break; 4513 case VBLK: 4514 case VCHR: 4515 if (vp->v_rdev == NULL) { 4516 vrele(vp); 4517 continue; 4518 } 4519 xvn[n].xv_dev = dev2udev(vp->v_rdev); 4520 break; 4521 case VSOCK: 4522 xvn[n].xv_socket = vp->v_socket; 4523 break; 4524 case VFIFO: 4525 xvn[n].xv_fifo = vp->v_fifoinfo; 4526 break; 4527 case VNON: 4528 case VBAD: 4529 default: 4530 /* shouldn't happen? */ 4531 vrele(vp); 4532 continue; 4533 } 4534 vrele(vp); 4535 ++n; 4536 } 4537 MNT_IUNLOCK(mp); 4538 mtx_lock(&mountlist_mtx); 4539 vfs_unbusy(mp); 4540 if (n == len) 4541 break; 4542 } 4543 mtx_unlock(&mountlist_mtx); 4544 4545 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 4546 free(xvn, M_TEMP); 4547 return (error); 4548 } 4549 4550 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 4551 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 4552 ""); 4553 #endif 4554 4555 static void 4556 unmount_or_warn(struct mount *mp) 4557 { 4558 int error; 4559 4560 error = dounmount(mp, MNT_FORCE, curthread); 4561 if (error != 0) { 4562 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4563 if (error == EBUSY) 4564 printf("BUSY)\n"); 4565 else 4566 printf("%d)\n", error); 4567 } 4568 } 4569 4570 /* 4571 * Unmount all filesystems. The list is traversed in reverse order 4572 * of mounting to avoid dependencies. 4573 */ 4574 void 4575 vfs_unmountall(void) 4576 { 4577 struct mount *mp, *tmp; 4578 4579 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4580 4581 /* 4582 * Since this only runs when rebooting, it is not interlocked. 4583 */ 4584 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4585 vfs_ref(mp); 4586 4587 /* 4588 * Forcibly unmounting "/dev" before "/" would prevent clean 4589 * unmount of the latter. 4590 */ 4591 if (mp == rootdevmp) 4592 continue; 4593 4594 unmount_or_warn(mp); 4595 } 4596 4597 if (rootdevmp != NULL) 4598 unmount_or_warn(rootdevmp); 4599 } 4600 4601 static void 4602 vfs_deferred_inactive(struct vnode *vp, int lkflags) 4603 { 4604 4605 ASSERT_VI_LOCKED(vp, __func__); 4606 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, ("VI_DEFINACT still set")); 4607 if ((vp->v_iflag & VI_OWEINACT) == 0) { 4608 vdropl(vp); 4609 return; 4610 } 4611 if (vn_lock(vp, lkflags) == 0) { 4612 VI_LOCK(vp); 4613 vinactive(vp); 4614 VOP_UNLOCK(vp); 4615 vdropl(vp); 4616 return; 4617 } 4618 vdefer_inactive_unlocked(vp); 4619 } 4620 4621 static int 4622 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 4623 { 4624 4625 return (vp->v_iflag & VI_DEFINACT); 4626 } 4627 4628 static void __noinline 4629 vfs_periodic_inactive(struct mount *mp, int flags) 4630 { 4631 struct vnode *vp, *mvp; 4632 int lkflags; 4633 4634 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4635 if (flags != MNT_WAIT) 4636 lkflags |= LK_NOWAIT; 4637 4638 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 4639 if ((vp->v_iflag & VI_DEFINACT) == 0) { 4640 VI_UNLOCK(vp); 4641 continue; 4642 } 4643 vp->v_iflag &= ~VI_DEFINACT; 4644 vfs_deferred_inactive(vp, lkflags); 4645 } 4646 } 4647 4648 static inline bool 4649 vfs_want_msync(struct vnode *vp) 4650 { 4651 struct vm_object *obj; 4652 4653 /* 4654 * This test may be performed without any locks held. 4655 * We rely on vm_object's type stability. 4656 */ 4657 if (vp->v_vflag & VV_NOSYNC) 4658 return (false); 4659 obj = vp->v_object; 4660 return (obj != NULL && vm_object_mightbedirty(obj)); 4661 } 4662 4663 static int 4664 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 4665 { 4666 4667 if (vp->v_vflag & VV_NOSYNC) 4668 return (false); 4669 if (vp->v_iflag & VI_DEFINACT) 4670 return (true); 4671 return (vfs_want_msync(vp)); 4672 } 4673 4674 static void __noinline 4675 vfs_periodic_msync_inactive(struct mount *mp, int flags) 4676 { 4677 struct vnode *vp, *mvp; 4678 struct vm_object *obj; 4679 int lkflags, objflags; 4680 bool seen_defer; 4681 4682 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4683 if (flags != MNT_WAIT) { 4684 lkflags |= LK_NOWAIT; 4685 objflags = OBJPC_NOSYNC; 4686 } else { 4687 objflags = OBJPC_SYNC; 4688 } 4689 4690 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 4691 seen_defer = false; 4692 if (vp->v_iflag & VI_DEFINACT) { 4693 vp->v_iflag &= ~VI_DEFINACT; 4694 seen_defer = true; 4695 } 4696 if (!vfs_want_msync(vp)) { 4697 if (seen_defer) 4698 vfs_deferred_inactive(vp, lkflags); 4699 else 4700 VI_UNLOCK(vp); 4701 continue; 4702 } 4703 if (vget(vp, lkflags) == 0) { 4704 obj = vp->v_object; 4705 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 4706 VM_OBJECT_WLOCK(obj); 4707 vm_object_page_clean(obj, 0, 0, objflags); 4708 VM_OBJECT_WUNLOCK(obj); 4709 } 4710 vput(vp); 4711 if (seen_defer) 4712 vdrop(vp); 4713 } else { 4714 if (seen_defer) 4715 vdefer_inactive_unlocked(vp); 4716 } 4717 } 4718 } 4719 4720 void 4721 vfs_periodic(struct mount *mp, int flags) 4722 { 4723 4724 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4725 4726 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 4727 vfs_periodic_inactive(mp, flags); 4728 else 4729 vfs_periodic_msync_inactive(mp, flags); 4730 } 4731 4732 static void 4733 destroy_vpollinfo_free(struct vpollinfo *vi) 4734 { 4735 4736 knlist_destroy(&vi->vpi_selinfo.si_note); 4737 mtx_destroy(&vi->vpi_lock); 4738 uma_zfree(vnodepoll_zone, vi); 4739 } 4740 4741 static void 4742 destroy_vpollinfo(struct vpollinfo *vi) 4743 { 4744 4745 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4746 seldrain(&vi->vpi_selinfo); 4747 destroy_vpollinfo_free(vi); 4748 } 4749 4750 /* 4751 * Initialize per-vnode helper structure to hold poll-related state. 4752 */ 4753 void 4754 v_addpollinfo(struct vnode *vp) 4755 { 4756 struct vpollinfo *vi; 4757 4758 if (vp->v_pollinfo != NULL) 4759 return; 4760 vi = uma_zalloc(vnodepoll_zone, M_WAITOK | M_ZERO); 4761 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4762 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4763 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 4764 VI_LOCK(vp); 4765 if (vp->v_pollinfo != NULL) { 4766 VI_UNLOCK(vp); 4767 destroy_vpollinfo_free(vi); 4768 return; 4769 } 4770 vp->v_pollinfo = vi; 4771 VI_UNLOCK(vp); 4772 } 4773 4774 /* 4775 * Record a process's interest in events which might happen to 4776 * a vnode. Because poll uses the historic select-style interface 4777 * internally, this routine serves as both the ``check for any 4778 * pending events'' and the ``record my interest in future events'' 4779 * functions. (These are done together, while the lock is held, 4780 * to avoid race conditions.) 4781 */ 4782 int 4783 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4784 { 4785 4786 v_addpollinfo(vp); 4787 mtx_lock(&vp->v_pollinfo->vpi_lock); 4788 if (vp->v_pollinfo->vpi_revents & events) { 4789 /* 4790 * This leaves events we are not interested 4791 * in available for the other process which 4792 * which presumably had requested them 4793 * (otherwise they would never have been 4794 * recorded). 4795 */ 4796 events &= vp->v_pollinfo->vpi_revents; 4797 vp->v_pollinfo->vpi_revents &= ~events; 4798 4799 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4800 return (events); 4801 } 4802 vp->v_pollinfo->vpi_events |= events; 4803 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 4804 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4805 return (0); 4806 } 4807 4808 /* 4809 * Routine to create and manage a filesystem syncer vnode. 4810 */ 4811 #define sync_close ((int (*)(struct vop_close_args *))nullop) 4812 static int sync_fsync(struct vop_fsync_args *); 4813 static int sync_inactive(struct vop_inactive_args *); 4814 static int sync_reclaim(struct vop_reclaim_args *); 4815 4816 static struct vop_vector sync_vnodeops = { 4817 .vop_bypass = VOP_EOPNOTSUPP, 4818 .vop_close = sync_close, /* close */ 4819 .vop_fsync = sync_fsync, /* fsync */ 4820 .vop_inactive = sync_inactive, /* inactive */ 4821 .vop_need_inactive = vop_stdneed_inactive, /* need_inactive */ 4822 .vop_reclaim = sync_reclaim, /* reclaim */ 4823 .vop_lock1 = vop_stdlock, /* lock */ 4824 .vop_unlock = vop_stdunlock, /* unlock */ 4825 .vop_islocked = vop_stdislocked, /* islocked */ 4826 }; 4827 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 4828 4829 /* 4830 * Create a new filesystem syncer vnode for the specified mount point. 4831 */ 4832 void 4833 vfs_allocate_syncvnode(struct mount *mp) 4834 { 4835 struct vnode *vp; 4836 struct bufobj *bo; 4837 static long start, incr, next; 4838 int error; 4839 4840 /* Allocate a new vnode */ 4841 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 4842 if (error != 0) 4843 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 4844 vp->v_type = VNON; 4845 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4846 vp->v_vflag |= VV_FORCEINSMQ; 4847 error = insmntque(vp, mp); 4848 if (error != 0) 4849 panic("vfs_allocate_syncvnode: insmntque() failed"); 4850 vp->v_vflag &= ~VV_FORCEINSMQ; 4851 VOP_UNLOCK(vp); 4852 /* 4853 * Place the vnode onto the syncer worklist. We attempt to 4854 * scatter them about on the list so that they will go off 4855 * at evenly distributed times even if all the filesystems 4856 * are mounted at once. 4857 */ 4858 next += incr; 4859 if (next == 0 || next > syncer_maxdelay) { 4860 start /= 2; 4861 incr /= 2; 4862 if (start == 0) { 4863 start = syncer_maxdelay / 2; 4864 incr = syncer_maxdelay; 4865 } 4866 next = start; 4867 } 4868 bo = &vp->v_bufobj; 4869 BO_LOCK(bo); 4870 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 4871 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 4872 mtx_lock(&sync_mtx); 4873 sync_vnode_count++; 4874 if (mp->mnt_syncer == NULL) { 4875 mp->mnt_syncer = vp; 4876 vp = NULL; 4877 } 4878 mtx_unlock(&sync_mtx); 4879 BO_UNLOCK(bo); 4880 if (vp != NULL) { 4881 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4882 vgone(vp); 4883 vput(vp); 4884 } 4885 } 4886 4887 void 4888 vfs_deallocate_syncvnode(struct mount *mp) 4889 { 4890 struct vnode *vp; 4891 4892 mtx_lock(&sync_mtx); 4893 vp = mp->mnt_syncer; 4894 if (vp != NULL) 4895 mp->mnt_syncer = NULL; 4896 mtx_unlock(&sync_mtx); 4897 if (vp != NULL) 4898 vrele(vp); 4899 } 4900 4901 /* 4902 * Do a lazy sync of the filesystem. 4903 */ 4904 static int 4905 sync_fsync(struct vop_fsync_args *ap) 4906 { 4907 struct vnode *syncvp = ap->a_vp; 4908 struct mount *mp = syncvp->v_mount; 4909 int error, save; 4910 struct bufobj *bo; 4911 4912 /* 4913 * We only need to do something if this is a lazy evaluation. 4914 */ 4915 if (ap->a_waitfor != MNT_LAZY) 4916 return (0); 4917 4918 /* 4919 * Move ourselves to the back of the sync list. 4920 */ 4921 bo = &syncvp->v_bufobj; 4922 BO_LOCK(bo); 4923 vn_syncer_add_to_worklist(bo, syncdelay); 4924 BO_UNLOCK(bo); 4925 4926 /* 4927 * Walk the list of vnodes pushing all that are dirty and 4928 * not already on the sync list. 4929 */ 4930 if (vfs_busy(mp, MBF_NOWAIT) != 0) 4931 return (0); 4932 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 4933 vfs_unbusy(mp); 4934 return (0); 4935 } 4936 save = curthread_pflags_set(TDP_SYNCIO); 4937 /* 4938 * The filesystem at hand may be idle with free vnodes stored in the 4939 * batch. Return them instead of letting them stay there indefinitely. 4940 */ 4941 vfs_periodic(mp, MNT_NOWAIT); 4942 error = VFS_SYNC(mp, MNT_LAZY); 4943 curthread_pflags_restore(save); 4944 vn_finished_write(mp); 4945 vfs_unbusy(mp); 4946 return (error); 4947 } 4948 4949 /* 4950 * The syncer vnode is no referenced. 4951 */ 4952 static int 4953 sync_inactive(struct vop_inactive_args *ap) 4954 { 4955 4956 vgone(ap->a_vp); 4957 return (0); 4958 } 4959 4960 /* 4961 * The syncer vnode is no longer needed and is being decommissioned. 4962 * 4963 * Modifications to the worklist must be protected by sync_mtx. 4964 */ 4965 static int 4966 sync_reclaim(struct vop_reclaim_args *ap) 4967 { 4968 struct vnode *vp = ap->a_vp; 4969 struct bufobj *bo; 4970 4971 bo = &vp->v_bufobj; 4972 BO_LOCK(bo); 4973 mtx_lock(&sync_mtx); 4974 if (vp->v_mount->mnt_syncer == vp) 4975 vp->v_mount->mnt_syncer = NULL; 4976 if (bo->bo_flag & BO_ONWORKLST) { 4977 LIST_REMOVE(bo, bo_synclist); 4978 syncer_worklist_len--; 4979 sync_vnode_count--; 4980 bo->bo_flag &= ~BO_ONWORKLST; 4981 } 4982 mtx_unlock(&sync_mtx); 4983 BO_UNLOCK(bo); 4984 4985 return (0); 4986 } 4987 4988 int 4989 vn_need_pageq_flush(struct vnode *vp) 4990 { 4991 struct vm_object *obj; 4992 int need; 4993 4994 MPASS(mtx_owned(VI_MTX(vp))); 4995 need = 0; 4996 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 4997 vm_object_mightbedirty(obj)) 4998 need = 1; 4999 return (need); 5000 } 5001 5002 /* 5003 * Check if vnode represents a disk device 5004 */ 5005 bool 5006 vn_isdisk_error(struct vnode *vp, int *errp) 5007 { 5008 int error; 5009 5010 if (vp->v_type != VCHR) { 5011 error = ENOTBLK; 5012 goto out; 5013 } 5014 error = 0; 5015 dev_lock(); 5016 if (vp->v_rdev == NULL) 5017 error = ENXIO; 5018 else if (vp->v_rdev->si_devsw == NULL) 5019 error = ENXIO; 5020 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5021 error = ENOTBLK; 5022 dev_unlock(); 5023 out: 5024 *errp = error; 5025 return (error == 0); 5026 } 5027 5028 bool 5029 vn_isdisk(struct vnode *vp) 5030 { 5031 int error; 5032 5033 return (vn_isdisk_error(vp, &error)); 5034 } 5035 5036 /* 5037 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5038 * the comment above cache_fplookup for details. 5039 */ 5040 int 5041 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5042 { 5043 int error; 5044 5045 VFS_SMR_ASSERT_ENTERED(); 5046 5047 /* Check the owner. */ 5048 if (cred->cr_uid == file_uid) { 5049 if (file_mode & S_IXUSR) 5050 return (0); 5051 goto out_error; 5052 } 5053 5054 /* Otherwise, check the groups (first match) */ 5055 if (groupmember(file_gid, cred)) { 5056 if (file_mode & S_IXGRP) 5057 return (0); 5058 goto out_error; 5059 } 5060 5061 /* Otherwise, check everyone else. */ 5062 if (file_mode & S_IXOTH) 5063 return (0); 5064 out_error: 5065 /* 5066 * Permission check failed, but it is possible denial will get overwritten 5067 * (e.g., when root is traversing through a 700 directory owned by someone 5068 * else). 5069 * 5070 * vaccess() calls priv_check_cred which in turn can descent into MAC 5071 * modules overriding this result. It's quite unclear what semantics 5072 * are allowed for them to operate, thus for safety we don't call them 5073 * from within the SMR section. This also means if any such modules 5074 * are present, we have to let the regular lookup decide. 5075 */ 5076 error = priv_check_cred_vfs_lookup_nomac(cred); 5077 switch (error) { 5078 case 0: 5079 return (0); 5080 case EAGAIN: 5081 /* 5082 * MAC modules present. 5083 */ 5084 return (EAGAIN); 5085 case EPERM: 5086 return (EACCES); 5087 default: 5088 return (error); 5089 } 5090 } 5091 5092 /* 5093 * Common filesystem object access control check routine. Accepts a 5094 * vnode's type, "mode", uid and gid, requested access mode, and credentials. 5095 * Returns 0 on success, or an errno on failure. 5096 */ 5097 int 5098 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5099 accmode_t accmode, struct ucred *cred) 5100 { 5101 accmode_t dac_granted; 5102 accmode_t priv_granted; 5103 5104 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5105 ("invalid bit in accmode")); 5106 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5107 ("VAPPEND without VWRITE")); 5108 5109 /* 5110 * Look for a normal, non-privileged way to access the file/directory 5111 * as requested. If it exists, go with that. 5112 */ 5113 5114 dac_granted = 0; 5115 5116 /* Check the owner. */ 5117 if (cred->cr_uid == file_uid) { 5118 dac_granted |= VADMIN; 5119 if (file_mode & S_IXUSR) 5120 dac_granted |= VEXEC; 5121 if (file_mode & S_IRUSR) 5122 dac_granted |= VREAD; 5123 if (file_mode & S_IWUSR) 5124 dac_granted |= (VWRITE | VAPPEND); 5125 5126 if ((accmode & dac_granted) == accmode) 5127 return (0); 5128 5129 goto privcheck; 5130 } 5131 5132 /* Otherwise, check the groups (first match) */ 5133 if (groupmember(file_gid, cred)) { 5134 if (file_mode & S_IXGRP) 5135 dac_granted |= VEXEC; 5136 if (file_mode & S_IRGRP) 5137 dac_granted |= VREAD; 5138 if (file_mode & S_IWGRP) 5139 dac_granted |= (VWRITE | VAPPEND); 5140 5141 if ((accmode & dac_granted) == accmode) 5142 return (0); 5143 5144 goto privcheck; 5145 } 5146 5147 /* Otherwise, check everyone else. */ 5148 if (file_mode & S_IXOTH) 5149 dac_granted |= VEXEC; 5150 if (file_mode & S_IROTH) 5151 dac_granted |= VREAD; 5152 if (file_mode & S_IWOTH) 5153 dac_granted |= (VWRITE | VAPPEND); 5154 if ((accmode & dac_granted) == accmode) 5155 return (0); 5156 5157 privcheck: 5158 /* 5159 * Build a privilege mask to determine if the set of privileges 5160 * satisfies the requirements when combined with the granted mask 5161 * from above. For each privilege, if the privilege is required, 5162 * bitwise or the request type onto the priv_granted mask. 5163 */ 5164 priv_granted = 0; 5165 5166 if (type == VDIR) { 5167 /* 5168 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5169 * requests, instead of PRIV_VFS_EXEC. 5170 */ 5171 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5172 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5173 priv_granted |= VEXEC; 5174 } else { 5175 /* 5176 * Ensure that at least one execute bit is on. Otherwise, 5177 * a privileged user will always succeed, and we don't want 5178 * this to happen unless the file really is executable. 5179 */ 5180 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5181 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5182 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5183 priv_granted |= VEXEC; 5184 } 5185 5186 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5187 !priv_check_cred(cred, PRIV_VFS_READ)) 5188 priv_granted |= VREAD; 5189 5190 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5191 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5192 priv_granted |= (VWRITE | VAPPEND); 5193 5194 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5195 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5196 priv_granted |= VADMIN; 5197 5198 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5199 return (0); 5200 } 5201 5202 return ((accmode & VADMIN) ? EPERM : EACCES); 5203 } 5204 5205 /* 5206 * Credential check based on process requesting service, and per-attribute 5207 * permissions. 5208 */ 5209 int 5210 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5211 struct thread *td, accmode_t accmode) 5212 { 5213 5214 /* 5215 * Kernel-invoked always succeeds. 5216 */ 5217 if (cred == NOCRED) 5218 return (0); 5219 5220 /* 5221 * Do not allow privileged processes in jail to directly manipulate 5222 * system attributes. 5223 */ 5224 switch (attrnamespace) { 5225 case EXTATTR_NAMESPACE_SYSTEM: 5226 /* Potentially should be: return (EPERM); */ 5227 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5228 case EXTATTR_NAMESPACE_USER: 5229 return (VOP_ACCESS(vp, accmode, cred, td)); 5230 default: 5231 return (EPERM); 5232 } 5233 } 5234 5235 #ifdef DEBUG_VFS_LOCKS 5236 /* 5237 * This only exists to suppress warnings from unlocked specfs accesses. It is 5238 * no longer ok to have an unlocked VFS. 5239 */ 5240 #define IGNORE_LOCK(vp) (KERNEL_PANICKED() || (vp) == NULL || \ 5241 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 5242 5243 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5244 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5245 "Drop into debugger on lock violation"); 5246 5247 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5248 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5249 0, "Check for interlock across VOPs"); 5250 5251 int vfs_badlock_print = 1; /* Print lock violations. */ 5252 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5253 0, "Print lock violations"); 5254 5255 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5256 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5257 0, "Print vnode details on lock violations"); 5258 5259 #ifdef KDB 5260 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5261 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5262 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5263 #endif 5264 5265 static void 5266 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5267 { 5268 5269 #ifdef KDB 5270 if (vfs_badlock_backtrace) 5271 kdb_backtrace(); 5272 #endif 5273 if (vfs_badlock_vnode) 5274 vn_printf(vp, "vnode "); 5275 if (vfs_badlock_print) 5276 printf("%s: %p %s\n", str, (void *)vp, msg); 5277 if (vfs_badlock_ddb) 5278 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5279 } 5280 5281 void 5282 assert_vi_locked(struct vnode *vp, const char *str) 5283 { 5284 5285 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5286 vfs_badlock("interlock is not locked but should be", str, vp); 5287 } 5288 5289 void 5290 assert_vi_unlocked(struct vnode *vp, const char *str) 5291 { 5292 5293 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5294 vfs_badlock("interlock is locked but should not be", str, vp); 5295 } 5296 5297 void 5298 assert_vop_locked(struct vnode *vp, const char *str) 5299 { 5300 int locked; 5301 5302 if (!IGNORE_LOCK(vp)) { 5303 locked = VOP_ISLOCKED(vp); 5304 if (locked == 0 || locked == LK_EXCLOTHER) 5305 vfs_badlock("is not locked but should be", str, vp); 5306 } 5307 } 5308 5309 void 5310 assert_vop_unlocked(struct vnode *vp, const char *str) 5311 { 5312 5313 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5314 vfs_badlock("is locked but should not be", str, vp); 5315 } 5316 5317 void 5318 assert_vop_elocked(struct vnode *vp, const char *str) 5319 { 5320 5321 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5322 vfs_badlock("is not exclusive locked but should be", str, vp); 5323 } 5324 #endif /* DEBUG_VFS_LOCKS */ 5325 5326 void 5327 vop_rename_fail(struct vop_rename_args *ap) 5328 { 5329 5330 if (ap->a_tvp != NULL) 5331 vput(ap->a_tvp); 5332 if (ap->a_tdvp == ap->a_tvp) 5333 vrele(ap->a_tdvp); 5334 else 5335 vput(ap->a_tdvp); 5336 vrele(ap->a_fdvp); 5337 vrele(ap->a_fvp); 5338 } 5339 5340 void 5341 vop_rename_pre(void *ap) 5342 { 5343 struct vop_rename_args *a = ap; 5344 5345 #ifdef DEBUG_VFS_LOCKS 5346 if (a->a_tvp) 5347 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5348 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5349 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5350 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5351 5352 /* Check the source (from). */ 5353 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5354 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5355 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5356 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5357 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5358 5359 /* Check the target. */ 5360 if (a->a_tvp) 5361 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5362 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5363 #endif 5364 /* 5365 * It may be tempting to add vn_seqc_write_begin/end calls here and 5366 * in vop_rename_post but that's not going to work out since some 5367 * filesystems relookup vnodes mid-rename. This is probably a bug. 5368 * 5369 * For now filesystems are expected to do the relevant calls after they 5370 * decide what vnodes to operate on. 5371 */ 5372 if (a->a_tdvp != a->a_fdvp) 5373 vhold(a->a_fdvp); 5374 if (a->a_tvp != a->a_fvp) 5375 vhold(a->a_fvp); 5376 vhold(a->a_tdvp); 5377 if (a->a_tvp) 5378 vhold(a->a_tvp); 5379 } 5380 5381 #ifdef DEBUG_VFS_LOCKS 5382 void 5383 vop_fplookup_vexec_debugpre(void *ap __unused) 5384 { 5385 5386 VFS_SMR_ASSERT_ENTERED(); 5387 } 5388 5389 void 5390 vop_fplookup_vexec_debugpost(void *ap __unused, int rc __unused) 5391 { 5392 5393 VFS_SMR_ASSERT_ENTERED(); 5394 } 5395 5396 void 5397 vop_strategy_debugpre(void *ap) 5398 { 5399 struct vop_strategy_args *a; 5400 struct buf *bp; 5401 5402 a = ap; 5403 bp = a->a_bp; 5404 5405 /* 5406 * Cluster ops lock their component buffers but not the IO container. 5407 */ 5408 if ((bp->b_flags & B_CLUSTER) != 0) 5409 return; 5410 5411 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5412 if (vfs_badlock_print) 5413 printf( 5414 "VOP_STRATEGY: bp is not locked but should be\n"); 5415 if (vfs_badlock_ddb) 5416 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5417 } 5418 } 5419 5420 void 5421 vop_lock_debugpre(void *ap) 5422 { 5423 struct vop_lock1_args *a = ap; 5424 5425 if ((a->a_flags & LK_INTERLOCK) == 0) 5426 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5427 else 5428 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5429 } 5430 5431 void 5432 vop_lock_debugpost(void *ap, int rc) 5433 { 5434 struct vop_lock1_args *a = ap; 5435 5436 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5437 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5438 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5439 } 5440 5441 void 5442 vop_unlock_debugpre(void *ap) 5443 { 5444 struct vop_unlock_args *a = ap; 5445 5446 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 5447 } 5448 5449 void 5450 vop_need_inactive_debugpre(void *ap) 5451 { 5452 struct vop_need_inactive_args *a = ap; 5453 5454 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5455 } 5456 5457 void 5458 vop_need_inactive_debugpost(void *ap, int rc) 5459 { 5460 struct vop_need_inactive_args *a = ap; 5461 5462 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5463 } 5464 #endif 5465 5466 void 5467 vop_create_pre(void *ap) 5468 { 5469 struct vop_create_args *a; 5470 struct vnode *dvp; 5471 5472 a = ap; 5473 dvp = a->a_dvp; 5474 vn_seqc_write_begin(dvp); 5475 } 5476 5477 void 5478 vop_create_post(void *ap, int rc) 5479 { 5480 struct vop_create_args *a; 5481 struct vnode *dvp; 5482 5483 a = ap; 5484 dvp = a->a_dvp; 5485 vn_seqc_write_end(dvp); 5486 if (!rc) 5487 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5488 } 5489 5490 void 5491 vop_whiteout_pre(void *ap) 5492 { 5493 struct vop_whiteout_args *a; 5494 struct vnode *dvp; 5495 5496 a = ap; 5497 dvp = a->a_dvp; 5498 vn_seqc_write_begin(dvp); 5499 } 5500 5501 void 5502 vop_whiteout_post(void *ap, int rc) 5503 { 5504 struct vop_whiteout_args *a; 5505 struct vnode *dvp; 5506 5507 a = ap; 5508 dvp = a->a_dvp; 5509 vn_seqc_write_end(dvp); 5510 } 5511 5512 void 5513 vop_deleteextattr_pre(void *ap) 5514 { 5515 struct vop_deleteextattr_args *a; 5516 struct vnode *vp; 5517 5518 a = ap; 5519 vp = a->a_vp; 5520 vn_seqc_write_begin(vp); 5521 } 5522 5523 void 5524 vop_deleteextattr_post(void *ap, int rc) 5525 { 5526 struct vop_deleteextattr_args *a; 5527 struct vnode *vp; 5528 5529 a = ap; 5530 vp = a->a_vp; 5531 vn_seqc_write_end(vp); 5532 if (!rc) 5533 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5534 } 5535 5536 void 5537 vop_link_pre(void *ap) 5538 { 5539 struct vop_link_args *a; 5540 struct vnode *vp, *tdvp; 5541 5542 a = ap; 5543 vp = a->a_vp; 5544 tdvp = a->a_tdvp; 5545 vn_seqc_write_begin(vp); 5546 vn_seqc_write_begin(tdvp); 5547 } 5548 5549 void 5550 vop_link_post(void *ap, int rc) 5551 { 5552 struct vop_link_args *a; 5553 struct vnode *vp, *tdvp; 5554 5555 a = ap; 5556 vp = a->a_vp; 5557 tdvp = a->a_tdvp; 5558 vn_seqc_write_end(vp); 5559 vn_seqc_write_end(tdvp); 5560 if (!rc) { 5561 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 5562 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 5563 } 5564 } 5565 5566 void 5567 vop_mkdir_pre(void *ap) 5568 { 5569 struct vop_mkdir_args *a; 5570 struct vnode *dvp; 5571 5572 a = ap; 5573 dvp = a->a_dvp; 5574 vn_seqc_write_begin(dvp); 5575 } 5576 5577 void 5578 vop_mkdir_post(void *ap, int rc) 5579 { 5580 struct vop_mkdir_args *a; 5581 struct vnode *dvp; 5582 5583 a = ap; 5584 dvp = a->a_dvp; 5585 vn_seqc_write_end(dvp); 5586 if (!rc) 5587 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5588 } 5589 5590 void 5591 vop_mknod_pre(void *ap) 5592 { 5593 struct vop_mknod_args *a; 5594 struct vnode *dvp; 5595 5596 a = ap; 5597 dvp = a->a_dvp; 5598 vn_seqc_write_begin(dvp); 5599 } 5600 5601 void 5602 vop_mknod_post(void *ap, int rc) 5603 { 5604 struct vop_mknod_args *a; 5605 struct vnode *dvp; 5606 5607 a = ap; 5608 dvp = a->a_dvp; 5609 vn_seqc_write_end(dvp); 5610 if (!rc) 5611 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5612 } 5613 5614 void 5615 vop_reclaim_post(void *ap, int rc) 5616 { 5617 struct vop_reclaim_args *a; 5618 struct vnode *vp; 5619 5620 a = ap; 5621 vp = a->a_vp; 5622 ASSERT_VOP_IN_SEQC(vp); 5623 if (!rc) 5624 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 5625 } 5626 5627 void 5628 vop_remove_pre(void *ap) 5629 { 5630 struct vop_remove_args *a; 5631 struct vnode *dvp, *vp; 5632 5633 a = ap; 5634 dvp = a->a_dvp; 5635 vp = a->a_vp; 5636 vn_seqc_write_begin(dvp); 5637 vn_seqc_write_begin(vp); 5638 } 5639 5640 void 5641 vop_remove_post(void *ap, int rc) 5642 { 5643 struct vop_remove_args *a; 5644 struct vnode *dvp, *vp; 5645 5646 a = ap; 5647 dvp = a->a_dvp; 5648 vp = a->a_vp; 5649 vn_seqc_write_end(dvp); 5650 vn_seqc_write_end(vp); 5651 if (!rc) { 5652 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5653 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5654 } 5655 } 5656 5657 void 5658 vop_rename_post(void *ap, int rc) 5659 { 5660 struct vop_rename_args *a = ap; 5661 long hint; 5662 5663 if (!rc) { 5664 hint = NOTE_WRITE; 5665 if (a->a_fdvp == a->a_tdvp) { 5666 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 5667 hint |= NOTE_LINK; 5668 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5669 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5670 } else { 5671 hint |= NOTE_EXTEND; 5672 if (a->a_fvp->v_type == VDIR) 5673 hint |= NOTE_LINK; 5674 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5675 5676 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 5677 a->a_tvp->v_type == VDIR) 5678 hint &= ~NOTE_LINK; 5679 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5680 } 5681 5682 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 5683 if (a->a_tvp) 5684 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 5685 } 5686 if (a->a_tdvp != a->a_fdvp) 5687 vdrop(a->a_fdvp); 5688 if (a->a_tvp != a->a_fvp) 5689 vdrop(a->a_fvp); 5690 vdrop(a->a_tdvp); 5691 if (a->a_tvp) 5692 vdrop(a->a_tvp); 5693 } 5694 5695 void 5696 vop_rmdir_pre(void *ap) 5697 { 5698 struct vop_rmdir_args *a; 5699 struct vnode *dvp, *vp; 5700 5701 a = ap; 5702 dvp = a->a_dvp; 5703 vp = a->a_vp; 5704 vn_seqc_write_begin(dvp); 5705 vn_seqc_write_begin(vp); 5706 } 5707 5708 void 5709 vop_rmdir_post(void *ap, int rc) 5710 { 5711 struct vop_rmdir_args *a; 5712 struct vnode *dvp, *vp; 5713 5714 a = ap; 5715 dvp = a->a_dvp; 5716 vp = a->a_vp; 5717 vn_seqc_write_end(dvp); 5718 vn_seqc_write_end(vp); 5719 if (!rc) { 5720 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5721 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5722 } 5723 } 5724 5725 void 5726 vop_setattr_pre(void *ap) 5727 { 5728 struct vop_setattr_args *a; 5729 struct vnode *vp; 5730 5731 a = ap; 5732 vp = a->a_vp; 5733 vn_seqc_write_begin(vp); 5734 } 5735 5736 void 5737 vop_setattr_post(void *ap, int rc) 5738 { 5739 struct vop_setattr_args *a; 5740 struct vnode *vp; 5741 5742 a = ap; 5743 vp = a->a_vp; 5744 vn_seqc_write_end(vp); 5745 if (!rc) 5746 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 5747 } 5748 5749 void 5750 vop_setacl_pre(void *ap) 5751 { 5752 struct vop_setacl_args *a; 5753 struct vnode *vp; 5754 5755 a = ap; 5756 vp = a->a_vp; 5757 vn_seqc_write_begin(vp); 5758 } 5759 5760 void 5761 vop_setacl_post(void *ap, int rc __unused) 5762 { 5763 struct vop_setacl_args *a; 5764 struct vnode *vp; 5765 5766 a = ap; 5767 vp = a->a_vp; 5768 vn_seqc_write_end(vp); 5769 } 5770 5771 void 5772 vop_setextattr_pre(void *ap) 5773 { 5774 struct vop_setextattr_args *a; 5775 struct vnode *vp; 5776 5777 a = ap; 5778 vp = a->a_vp; 5779 vn_seqc_write_begin(vp); 5780 } 5781 5782 void 5783 vop_setextattr_post(void *ap, int rc) 5784 { 5785 struct vop_setextattr_args *a; 5786 struct vnode *vp; 5787 5788 a = ap; 5789 vp = a->a_vp; 5790 vn_seqc_write_end(vp); 5791 if (!rc) 5792 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 5793 } 5794 5795 void 5796 vop_symlink_pre(void *ap) 5797 { 5798 struct vop_symlink_args *a; 5799 struct vnode *dvp; 5800 5801 a = ap; 5802 dvp = a->a_dvp; 5803 vn_seqc_write_begin(dvp); 5804 } 5805 5806 void 5807 vop_symlink_post(void *ap, int rc) 5808 { 5809 struct vop_symlink_args *a; 5810 struct vnode *dvp; 5811 5812 a = ap; 5813 dvp = a->a_dvp; 5814 vn_seqc_write_end(dvp); 5815 if (!rc) 5816 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5817 } 5818 5819 void 5820 vop_open_post(void *ap, int rc) 5821 { 5822 struct vop_open_args *a = ap; 5823 5824 if (!rc) 5825 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 5826 } 5827 5828 void 5829 vop_close_post(void *ap, int rc) 5830 { 5831 struct vop_close_args *a = ap; 5832 5833 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 5834 !VN_IS_DOOMED(a->a_vp))) { 5835 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 5836 NOTE_CLOSE_WRITE : NOTE_CLOSE); 5837 } 5838 } 5839 5840 void 5841 vop_read_post(void *ap, int rc) 5842 { 5843 struct vop_read_args *a = ap; 5844 5845 if (!rc) 5846 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5847 } 5848 5849 void 5850 vop_read_pgcache_post(void *ap, int rc) 5851 { 5852 struct vop_read_pgcache_args *a = ap; 5853 5854 if (!rc) 5855 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); 5856 } 5857 5858 void 5859 vop_readdir_post(void *ap, int rc) 5860 { 5861 struct vop_readdir_args *a = ap; 5862 5863 if (!rc) 5864 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5865 } 5866 5867 static struct knlist fs_knlist; 5868 5869 static void 5870 vfs_event_init(void *arg) 5871 { 5872 knlist_init_mtx(&fs_knlist, NULL); 5873 } 5874 /* XXX - correct order? */ 5875 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 5876 5877 void 5878 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 5879 { 5880 5881 KNOTE_UNLOCKED(&fs_knlist, event); 5882 } 5883 5884 static int filt_fsattach(struct knote *kn); 5885 static void filt_fsdetach(struct knote *kn); 5886 static int filt_fsevent(struct knote *kn, long hint); 5887 5888 struct filterops fs_filtops = { 5889 .f_isfd = 0, 5890 .f_attach = filt_fsattach, 5891 .f_detach = filt_fsdetach, 5892 .f_event = filt_fsevent 5893 }; 5894 5895 static int 5896 filt_fsattach(struct knote *kn) 5897 { 5898 5899 kn->kn_flags |= EV_CLEAR; 5900 knlist_add(&fs_knlist, kn, 0); 5901 return (0); 5902 } 5903 5904 static void 5905 filt_fsdetach(struct knote *kn) 5906 { 5907 5908 knlist_remove(&fs_knlist, kn, 0); 5909 } 5910 5911 static int 5912 filt_fsevent(struct knote *kn, long hint) 5913 { 5914 5915 kn->kn_fflags |= hint; 5916 return (kn->kn_fflags != 0); 5917 } 5918 5919 static int 5920 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 5921 { 5922 struct vfsidctl vc; 5923 int error; 5924 struct mount *mp; 5925 5926 error = SYSCTL_IN(req, &vc, sizeof(vc)); 5927 if (error) 5928 return (error); 5929 if (vc.vc_vers != VFS_CTL_VERS1) 5930 return (EINVAL); 5931 mp = vfs_getvfs(&vc.vc_fsid); 5932 if (mp == NULL) 5933 return (ENOENT); 5934 /* ensure that a specific sysctl goes to the right filesystem. */ 5935 if (strcmp(vc.vc_fstypename, "*") != 0 && 5936 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 5937 vfs_rel(mp); 5938 return (EINVAL); 5939 } 5940 VCTLTOREQ(&vc, req); 5941 error = VFS_SYSCTL(mp, vc.vc_op, req); 5942 vfs_rel(mp); 5943 return (error); 5944 } 5945 5946 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 5947 NULL, 0, sysctl_vfs_ctl, "", 5948 "Sysctl by fsid"); 5949 5950 /* 5951 * Function to initialize a va_filerev field sensibly. 5952 * XXX: Wouldn't a random number make a lot more sense ?? 5953 */ 5954 u_quad_t 5955 init_va_filerev(void) 5956 { 5957 struct bintime bt; 5958 5959 getbinuptime(&bt); 5960 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 5961 } 5962 5963 static int filt_vfsread(struct knote *kn, long hint); 5964 static int filt_vfswrite(struct knote *kn, long hint); 5965 static int filt_vfsvnode(struct knote *kn, long hint); 5966 static void filt_vfsdetach(struct knote *kn); 5967 static struct filterops vfsread_filtops = { 5968 .f_isfd = 1, 5969 .f_detach = filt_vfsdetach, 5970 .f_event = filt_vfsread 5971 }; 5972 static struct filterops vfswrite_filtops = { 5973 .f_isfd = 1, 5974 .f_detach = filt_vfsdetach, 5975 .f_event = filt_vfswrite 5976 }; 5977 static struct filterops vfsvnode_filtops = { 5978 .f_isfd = 1, 5979 .f_detach = filt_vfsdetach, 5980 .f_event = filt_vfsvnode 5981 }; 5982 5983 static void 5984 vfs_knllock(void *arg) 5985 { 5986 struct vnode *vp = arg; 5987 5988 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5989 } 5990 5991 static void 5992 vfs_knlunlock(void *arg) 5993 { 5994 struct vnode *vp = arg; 5995 5996 VOP_UNLOCK(vp); 5997 } 5998 5999 static void 6000 vfs_knl_assert_locked(void *arg) 6001 { 6002 #ifdef DEBUG_VFS_LOCKS 6003 struct vnode *vp = arg; 6004 6005 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6006 #endif 6007 } 6008 6009 static void 6010 vfs_knl_assert_unlocked(void *arg) 6011 { 6012 #ifdef DEBUG_VFS_LOCKS 6013 struct vnode *vp = arg; 6014 6015 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6016 #endif 6017 } 6018 6019 int 6020 vfs_kqfilter(struct vop_kqfilter_args *ap) 6021 { 6022 struct vnode *vp = ap->a_vp; 6023 struct knote *kn = ap->a_kn; 6024 struct knlist *knl; 6025 6026 switch (kn->kn_filter) { 6027 case EVFILT_READ: 6028 kn->kn_fop = &vfsread_filtops; 6029 break; 6030 case EVFILT_WRITE: 6031 kn->kn_fop = &vfswrite_filtops; 6032 break; 6033 case EVFILT_VNODE: 6034 kn->kn_fop = &vfsvnode_filtops; 6035 break; 6036 default: 6037 return (EINVAL); 6038 } 6039 6040 kn->kn_hook = (caddr_t)vp; 6041 6042 v_addpollinfo(vp); 6043 if (vp->v_pollinfo == NULL) 6044 return (ENOMEM); 6045 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6046 vhold(vp); 6047 knlist_add(knl, kn, 0); 6048 6049 return (0); 6050 } 6051 6052 /* 6053 * Detach knote from vnode 6054 */ 6055 static void 6056 filt_vfsdetach(struct knote *kn) 6057 { 6058 struct vnode *vp = (struct vnode *)kn->kn_hook; 6059 6060 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6061 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6062 vdrop(vp); 6063 } 6064 6065 /*ARGSUSED*/ 6066 static int 6067 filt_vfsread(struct knote *kn, long hint) 6068 { 6069 struct vnode *vp = (struct vnode *)kn->kn_hook; 6070 struct vattr va; 6071 int res; 6072 6073 /* 6074 * filesystem is gone, so set the EOF flag and schedule 6075 * the knote for deletion. 6076 */ 6077 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6078 VI_LOCK(vp); 6079 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6080 VI_UNLOCK(vp); 6081 return (1); 6082 } 6083 6084 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 6085 return (0); 6086 6087 VI_LOCK(vp); 6088 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 6089 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6090 VI_UNLOCK(vp); 6091 return (res); 6092 } 6093 6094 /*ARGSUSED*/ 6095 static int 6096 filt_vfswrite(struct knote *kn, long hint) 6097 { 6098 struct vnode *vp = (struct vnode *)kn->kn_hook; 6099 6100 VI_LOCK(vp); 6101 6102 /* 6103 * filesystem is gone, so set the EOF flag and schedule 6104 * the knote for deletion. 6105 */ 6106 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6107 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6108 6109 kn->kn_data = 0; 6110 VI_UNLOCK(vp); 6111 return (1); 6112 } 6113 6114 static int 6115 filt_vfsvnode(struct knote *kn, long hint) 6116 { 6117 struct vnode *vp = (struct vnode *)kn->kn_hook; 6118 int res; 6119 6120 VI_LOCK(vp); 6121 if (kn->kn_sfflags & hint) 6122 kn->kn_fflags |= hint; 6123 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6124 kn->kn_flags |= EV_EOF; 6125 VI_UNLOCK(vp); 6126 return (1); 6127 } 6128 res = (kn->kn_fflags != 0); 6129 VI_UNLOCK(vp); 6130 return (res); 6131 } 6132 6133 /* 6134 * Returns whether the directory is empty or not. 6135 * If it is empty, the return value is 0; otherwise 6136 * the return value is an error value (which may 6137 * be ENOTEMPTY). 6138 */ 6139 int 6140 vfs_emptydir(struct vnode *vp) 6141 { 6142 struct uio uio; 6143 struct iovec iov; 6144 struct dirent *dirent, *dp, *endp; 6145 int error, eof; 6146 6147 error = 0; 6148 eof = 0; 6149 6150 ASSERT_VOP_LOCKED(vp, "vfs_emptydir"); 6151 6152 dirent = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK); 6153 iov.iov_base = dirent; 6154 iov.iov_len = sizeof(struct dirent); 6155 6156 uio.uio_iov = &iov; 6157 uio.uio_iovcnt = 1; 6158 uio.uio_offset = 0; 6159 uio.uio_resid = sizeof(struct dirent); 6160 uio.uio_segflg = UIO_SYSSPACE; 6161 uio.uio_rw = UIO_READ; 6162 uio.uio_td = curthread; 6163 6164 while (eof == 0 && error == 0) { 6165 error = VOP_READDIR(vp, &uio, curthread->td_ucred, &eof, 6166 NULL, NULL); 6167 if (error != 0) 6168 break; 6169 endp = (void *)((uint8_t *)dirent + 6170 sizeof(struct dirent) - uio.uio_resid); 6171 for (dp = dirent; dp < endp; 6172 dp = (void *)((uint8_t *)dp + GENERIC_DIRSIZ(dp))) { 6173 if (dp->d_type == DT_WHT) 6174 continue; 6175 if (dp->d_namlen == 0) 6176 continue; 6177 if (dp->d_type != DT_DIR && 6178 dp->d_type != DT_UNKNOWN) { 6179 error = ENOTEMPTY; 6180 break; 6181 } 6182 if (dp->d_namlen > 2) { 6183 error = ENOTEMPTY; 6184 break; 6185 } 6186 if (dp->d_namlen == 1 && 6187 dp->d_name[0] != '.') { 6188 error = ENOTEMPTY; 6189 break; 6190 } 6191 if (dp->d_namlen == 2 && 6192 dp->d_name[1] != '.') { 6193 error = ENOTEMPTY; 6194 break; 6195 } 6196 uio.uio_resid = sizeof(struct dirent); 6197 } 6198 } 6199 free(dirent, M_TEMP); 6200 return (error); 6201 } 6202 6203 int 6204 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6205 { 6206 int error; 6207 6208 if (dp->d_reclen > ap->a_uio->uio_resid) 6209 return (ENAMETOOLONG); 6210 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6211 if (error) { 6212 if (ap->a_ncookies != NULL) { 6213 if (ap->a_cookies != NULL) 6214 free(ap->a_cookies, M_TEMP); 6215 ap->a_cookies = NULL; 6216 *ap->a_ncookies = 0; 6217 } 6218 return (error); 6219 } 6220 if (ap->a_ncookies == NULL) 6221 return (0); 6222 6223 KASSERT(ap->a_cookies, 6224 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6225 6226 *ap->a_cookies = realloc(*ap->a_cookies, 6227 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 6228 (*ap->a_cookies)[*ap->a_ncookies] = off; 6229 *ap->a_ncookies += 1; 6230 return (0); 6231 } 6232 6233 /* 6234 * The purpose of this routine is to remove granularity from accmode_t, 6235 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6236 * VADMIN and VAPPEND. 6237 * 6238 * If it returns 0, the caller is supposed to continue with the usual 6239 * access checks using 'accmode' as modified by this routine. If it 6240 * returns nonzero value, the caller is supposed to return that value 6241 * as errno. 6242 * 6243 * Note that after this routine runs, accmode may be zero. 6244 */ 6245 int 6246 vfs_unixify_accmode(accmode_t *accmode) 6247 { 6248 /* 6249 * There is no way to specify explicit "deny" rule using 6250 * file mode or POSIX.1e ACLs. 6251 */ 6252 if (*accmode & VEXPLICIT_DENY) { 6253 *accmode = 0; 6254 return (0); 6255 } 6256 6257 /* 6258 * None of these can be translated into usual access bits. 6259 * Also, the common case for NFSv4 ACLs is to not contain 6260 * either of these bits. Caller should check for VWRITE 6261 * on the containing directory instead. 6262 */ 6263 if (*accmode & (VDELETE_CHILD | VDELETE)) 6264 return (EPERM); 6265 6266 if (*accmode & VADMIN_PERMS) { 6267 *accmode &= ~VADMIN_PERMS; 6268 *accmode |= VADMIN; 6269 } 6270 6271 /* 6272 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6273 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6274 */ 6275 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6276 6277 return (0); 6278 } 6279 6280 /* 6281 * Clear out a doomed vnode (if any) and replace it with a new one as long 6282 * as the fs is not being unmounted. Return the root vnode to the caller. 6283 */ 6284 static int __noinline 6285 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6286 { 6287 struct vnode *vp; 6288 int error; 6289 6290 restart: 6291 if (mp->mnt_rootvnode != NULL) { 6292 MNT_ILOCK(mp); 6293 vp = mp->mnt_rootvnode; 6294 if (vp != NULL) { 6295 if (!VN_IS_DOOMED(vp)) { 6296 vrefact(vp); 6297 MNT_IUNLOCK(mp); 6298 error = vn_lock(vp, flags); 6299 if (error == 0) { 6300 *vpp = vp; 6301 return (0); 6302 } 6303 vrele(vp); 6304 goto restart; 6305 } 6306 /* 6307 * Clear the old one. 6308 */ 6309 mp->mnt_rootvnode = NULL; 6310 } 6311 MNT_IUNLOCK(mp); 6312 if (vp != NULL) { 6313 vfs_op_barrier_wait(mp); 6314 vrele(vp); 6315 } 6316 } 6317 error = VFS_CACHEDROOT(mp, flags, vpp); 6318 if (error != 0) 6319 return (error); 6320 if (mp->mnt_vfs_ops == 0) { 6321 MNT_ILOCK(mp); 6322 if (mp->mnt_vfs_ops != 0) { 6323 MNT_IUNLOCK(mp); 6324 return (0); 6325 } 6326 if (mp->mnt_rootvnode == NULL) { 6327 vrefact(*vpp); 6328 mp->mnt_rootvnode = *vpp; 6329 } else { 6330 if (mp->mnt_rootvnode != *vpp) { 6331 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6332 panic("%s: mismatch between vnode returned " 6333 " by VFS_CACHEDROOT and the one cached " 6334 " (%p != %p)", 6335 __func__, *vpp, mp->mnt_rootvnode); 6336 } 6337 } 6338 } 6339 MNT_IUNLOCK(mp); 6340 } 6341 return (0); 6342 } 6343 6344 int 6345 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6346 { 6347 struct vnode *vp; 6348 int error; 6349 6350 if (!vfs_op_thread_enter(mp)) 6351 return (vfs_cache_root_fallback(mp, flags, vpp)); 6352 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6353 if (vp == NULL || VN_IS_DOOMED(vp)) { 6354 vfs_op_thread_exit(mp); 6355 return (vfs_cache_root_fallback(mp, flags, vpp)); 6356 } 6357 vrefact(vp); 6358 vfs_op_thread_exit(mp); 6359 error = vn_lock(vp, flags); 6360 if (error != 0) { 6361 vrele(vp); 6362 return (vfs_cache_root_fallback(mp, flags, vpp)); 6363 } 6364 *vpp = vp; 6365 return (0); 6366 } 6367 6368 struct vnode * 6369 vfs_cache_root_clear(struct mount *mp) 6370 { 6371 struct vnode *vp; 6372 6373 /* 6374 * ops > 0 guarantees there is nobody who can see this vnode 6375 */ 6376 MPASS(mp->mnt_vfs_ops > 0); 6377 vp = mp->mnt_rootvnode; 6378 if (vp != NULL) 6379 vn_seqc_write_begin(vp); 6380 mp->mnt_rootvnode = NULL; 6381 return (vp); 6382 } 6383 6384 void 6385 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6386 { 6387 6388 MPASS(mp->mnt_vfs_ops > 0); 6389 vrefact(vp); 6390 mp->mnt_rootvnode = vp; 6391 } 6392 6393 /* 6394 * These are helper functions for filesystems to traverse all 6395 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6396 * 6397 * This interface replaces MNT_VNODE_FOREACH. 6398 */ 6399 6400 struct vnode * 6401 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6402 { 6403 struct vnode *vp; 6404 6405 if (should_yield()) 6406 kern_yield(PRI_USER); 6407 MNT_ILOCK(mp); 6408 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6409 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6410 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6411 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6412 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6413 continue; 6414 VI_LOCK(vp); 6415 if (VN_IS_DOOMED(vp)) { 6416 VI_UNLOCK(vp); 6417 continue; 6418 } 6419 break; 6420 } 6421 if (vp == NULL) { 6422 __mnt_vnode_markerfree_all(mvp, mp); 6423 /* MNT_IUNLOCK(mp); -- done in above function */ 6424 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6425 return (NULL); 6426 } 6427 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6428 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6429 MNT_IUNLOCK(mp); 6430 return (vp); 6431 } 6432 6433 struct vnode * 6434 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6435 { 6436 struct vnode *vp; 6437 6438 *mvp = vn_alloc_marker(mp); 6439 MNT_ILOCK(mp); 6440 MNT_REF(mp); 6441 6442 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6443 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6444 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6445 continue; 6446 VI_LOCK(vp); 6447 if (VN_IS_DOOMED(vp)) { 6448 VI_UNLOCK(vp); 6449 continue; 6450 } 6451 break; 6452 } 6453 if (vp == NULL) { 6454 MNT_REL(mp); 6455 MNT_IUNLOCK(mp); 6456 vn_free_marker(*mvp); 6457 *mvp = NULL; 6458 return (NULL); 6459 } 6460 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6461 MNT_IUNLOCK(mp); 6462 return (vp); 6463 } 6464 6465 void 6466 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6467 { 6468 6469 if (*mvp == NULL) { 6470 MNT_IUNLOCK(mp); 6471 return; 6472 } 6473 6474 mtx_assert(MNT_MTX(mp), MA_OWNED); 6475 6476 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6477 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6478 MNT_REL(mp); 6479 MNT_IUNLOCK(mp); 6480 vn_free_marker(*mvp); 6481 *mvp = NULL; 6482 } 6483 6484 /* 6485 * These are helper functions for filesystems to traverse their 6486 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6487 */ 6488 static void 6489 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6490 { 6491 6492 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6493 6494 MNT_ILOCK(mp); 6495 MNT_REL(mp); 6496 MNT_IUNLOCK(mp); 6497 vn_free_marker(*mvp); 6498 *mvp = NULL; 6499 } 6500 6501 /* 6502 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6503 * conventional lock order during mnt_vnode_next_lazy iteration. 6504 * 6505 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6506 * The list lock is dropped and reacquired. On success, both locks are held. 6507 * On failure, the mount vnode list lock is held but the vnode interlock is 6508 * not, and the procedure may have yielded. 6509 */ 6510 static bool 6511 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6512 struct vnode *vp) 6513 { 6514 6515 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6516 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6517 ("%s: bad marker", __func__)); 6518 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6519 ("%s: inappropriate vnode", __func__)); 6520 ASSERT_VI_UNLOCKED(vp, __func__); 6521 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6522 6523 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6524 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6525 6526 /* 6527 * Note we may be racing against vdrop which transitioned the hold 6528 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 6529 * if we are the only user after we get the interlock we will just 6530 * vdrop. 6531 */ 6532 vhold(vp); 6533 mtx_unlock(&mp->mnt_listmtx); 6534 VI_LOCK(vp); 6535 if (VN_IS_DOOMED(vp)) { 6536 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 6537 goto out_lost; 6538 } 6539 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 6540 /* 6541 * There is nothing to do if we are the last user. 6542 */ 6543 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 6544 goto out_lost; 6545 mtx_lock(&mp->mnt_listmtx); 6546 return (true); 6547 out_lost: 6548 vdropl(vp); 6549 maybe_yield(); 6550 mtx_lock(&mp->mnt_listmtx); 6551 return (false); 6552 } 6553 6554 static struct vnode * 6555 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6556 void *cbarg) 6557 { 6558 struct vnode *vp; 6559 6560 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6561 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6562 restart: 6563 vp = TAILQ_NEXT(*mvp, v_lazylist); 6564 while (vp != NULL) { 6565 if (vp->v_type == VMARKER) { 6566 vp = TAILQ_NEXT(vp, v_lazylist); 6567 continue; 6568 } 6569 /* 6570 * See if we want to process the vnode. Note we may encounter a 6571 * long string of vnodes we don't care about and hog the list 6572 * as a result. Check for it and requeue the marker. 6573 */ 6574 VNPASS(!VN_IS_DOOMED(vp), vp); 6575 if (!cb(vp, cbarg)) { 6576 if (!should_yield()) { 6577 vp = TAILQ_NEXT(vp, v_lazylist); 6578 continue; 6579 } 6580 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 6581 v_lazylist); 6582 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 6583 v_lazylist); 6584 mtx_unlock(&mp->mnt_listmtx); 6585 kern_yield(PRI_USER); 6586 mtx_lock(&mp->mnt_listmtx); 6587 goto restart; 6588 } 6589 /* 6590 * Try-lock because this is the wrong lock order. 6591 */ 6592 if (!VI_TRYLOCK(vp) && 6593 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 6594 goto restart; 6595 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6596 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6597 ("alien vnode on the lazy list %p %p", vp, mp)); 6598 VNPASS(vp->v_mount == mp, vp); 6599 VNPASS(!VN_IS_DOOMED(vp), vp); 6600 break; 6601 } 6602 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6603 6604 /* Check if we are done */ 6605 if (vp == NULL) { 6606 mtx_unlock(&mp->mnt_listmtx); 6607 mnt_vnode_markerfree_lazy(mvp, mp); 6608 return (NULL); 6609 } 6610 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 6611 mtx_unlock(&mp->mnt_listmtx); 6612 ASSERT_VI_LOCKED(vp, "lazy iter"); 6613 return (vp); 6614 } 6615 6616 struct vnode * 6617 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6618 void *cbarg) 6619 { 6620 6621 if (should_yield()) 6622 kern_yield(PRI_USER); 6623 mtx_lock(&mp->mnt_listmtx); 6624 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6625 } 6626 6627 struct vnode * 6628 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6629 void *cbarg) 6630 { 6631 struct vnode *vp; 6632 6633 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 6634 return (NULL); 6635 6636 *mvp = vn_alloc_marker(mp); 6637 MNT_ILOCK(mp); 6638 MNT_REF(mp); 6639 MNT_IUNLOCK(mp); 6640 6641 mtx_lock(&mp->mnt_listmtx); 6642 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 6643 if (vp == NULL) { 6644 mtx_unlock(&mp->mnt_listmtx); 6645 mnt_vnode_markerfree_lazy(mvp, mp); 6646 return (NULL); 6647 } 6648 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 6649 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6650 } 6651 6652 void 6653 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6654 { 6655 6656 if (*mvp == NULL) 6657 return; 6658 6659 mtx_lock(&mp->mnt_listmtx); 6660 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6661 mtx_unlock(&mp->mnt_listmtx); 6662 mnt_vnode_markerfree_lazy(mvp, mp); 6663 } 6664 6665 int 6666 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 6667 { 6668 6669 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 6670 cnp->cn_flags &= ~NOEXECCHECK; 6671 return (0); 6672 } 6673 6674 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, cnp->cn_thread)); 6675 } 6676 6677 /* 6678 * Do not use this variant unless you have means other than the hold count 6679 * to prevent the vnode from getting freed. 6680 */ 6681 void 6682 vn_seqc_write_begin_unheld_locked(struct vnode *vp) 6683 { 6684 6685 ASSERT_VI_LOCKED(vp, __func__); 6686 VNPASS(vp->v_seqc_users >= 0, vp); 6687 vp->v_seqc_users++; 6688 if (vp->v_seqc_users == 1) 6689 seqc_sleepable_write_begin(&vp->v_seqc); 6690 } 6691 6692 void 6693 vn_seqc_write_begin_locked(struct vnode *vp) 6694 { 6695 6696 ASSERT_VI_LOCKED(vp, __func__); 6697 VNPASS(vp->v_holdcnt > 0, vp); 6698 vn_seqc_write_begin_unheld_locked(vp); 6699 } 6700 6701 void 6702 vn_seqc_write_begin(struct vnode *vp) 6703 { 6704 6705 VI_LOCK(vp); 6706 vn_seqc_write_begin_locked(vp); 6707 VI_UNLOCK(vp); 6708 } 6709 6710 void 6711 vn_seqc_write_begin_unheld(struct vnode *vp) 6712 { 6713 6714 VI_LOCK(vp); 6715 vn_seqc_write_begin_unheld_locked(vp); 6716 VI_UNLOCK(vp); 6717 } 6718 6719 void 6720 vn_seqc_write_end_locked(struct vnode *vp) 6721 { 6722 6723 ASSERT_VI_LOCKED(vp, __func__); 6724 VNPASS(vp->v_seqc_users > 0, vp); 6725 vp->v_seqc_users--; 6726 if (vp->v_seqc_users == 0) 6727 seqc_sleepable_write_end(&vp->v_seqc); 6728 } 6729 6730 void 6731 vn_seqc_write_end(struct vnode *vp) 6732 { 6733 6734 VI_LOCK(vp); 6735 vn_seqc_write_end_locked(vp); 6736 VI_UNLOCK(vp); 6737 } 6738