1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 #include "opt_ddb.h" 45 #include "opt_watchdog.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/asan.h> 50 #include <sys/bio.h> 51 #include <sys/buf.h> 52 #include <sys/capsicum.h> 53 #include <sys/condvar.h> 54 #include <sys/conf.h> 55 #include <sys/counter.h> 56 #include <sys/dirent.h> 57 #include <sys/event.h> 58 #include <sys/eventhandler.h> 59 #include <sys/extattr.h> 60 #include <sys/file.h> 61 #include <sys/fcntl.h> 62 #include <sys/jail.h> 63 #include <sys/kdb.h> 64 #include <sys/kernel.h> 65 #include <sys/kthread.h> 66 #include <sys/ktr.h> 67 #include <sys/limits.h> 68 #include <sys/lockf.h> 69 #include <sys/malloc.h> 70 #include <sys/mount.h> 71 #include <sys/namei.h> 72 #include <sys/pctrie.h> 73 #include <sys/priv.h> 74 #include <sys/reboot.h> 75 #include <sys/refcount.h> 76 #include <sys/rwlock.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smr.h> 80 #include <sys/smp.h> 81 #include <sys/stat.h> 82 #include <sys/sysctl.h> 83 #include <sys/syslog.h> 84 #include <sys/vmmeter.h> 85 #include <sys/vnode.h> 86 #include <sys/watchdog.h> 87 88 #include <machine/stdarg.h> 89 90 #include <security/mac/mac_framework.h> 91 92 #include <vm/vm.h> 93 #include <vm/vm_object.h> 94 #include <vm/vm_extern.h> 95 #include <vm/pmap.h> 96 #include <vm/vm_map.h> 97 #include <vm/vm_page.h> 98 #include <vm/vm_kern.h> 99 #include <vm/uma.h> 100 101 #if defined(DEBUG_VFS_LOCKS) && (!defined(INVARIANTS) || !defined(WITNESS)) 102 #error DEBUG_VFS_LOCKS requires INVARIANTS and WITNESS 103 #endif 104 105 #ifdef DDB 106 #include <ddb/ddb.h> 107 #endif 108 109 static void delmntque(struct vnode *vp); 110 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 111 int slpflag, int slptimeo); 112 static void syncer_shutdown(void *arg, int howto); 113 static int vtryrecycle(struct vnode *vp); 114 static void v_init_counters(struct vnode *); 115 static void vn_seqc_init(struct vnode *); 116 static void vn_seqc_write_end_free(struct vnode *vp); 117 static void vgonel(struct vnode *); 118 static bool vhold_recycle_free(struct vnode *); 119 static void vdropl_recycle(struct vnode *vp); 120 static void vdrop_recycle(struct vnode *vp); 121 static void vfs_knllock(void *arg); 122 static void vfs_knlunlock(void *arg); 123 static void vfs_knl_assert_lock(void *arg, int what); 124 static void destroy_vpollinfo(struct vpollinfo *vi); 125 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 126 daddr_t startlbn, daddr_t endlbn); 127 static void vnlru_recalc(void); 128 129 /* 130 * Number of vnodes in existence. Increased whenever getnewvnode() 131 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 132 */ 133 static u_long __exclusive_cache_line numvnodes; 134 135 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 136 "Number of vnodes in existence"); 137 138 static counter_u64_t vnodes_created; 139 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 140 "Number of vnodes created by getnewvnode"); 141 142 /* 143 * Conversion tables for conversion from vnode types to inode formats 144 * and back. 145 */ 146 __enum_uint8(vtype) iftovt_tab[16] = { 147 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 148 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 149 }; 150 int vttoif_tab[10] = { 151 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 152 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 153 }; 154 155 /* 156 * List of allocates vnodes in the system. 157 */ 158 static TAILQ_HEAD(freelst, vnode) vnode_list; 159 static struct vnode *vnode_list_free_marker; 160 static struct vnode *vnode_list_reclaim_marker; 161 162 /* 163 * "Free" vnode target. Free vnodes are rarely completely free, but are 164 * just ones that are cheap to recycle. Usually they are for files which 165 * have been stat'd but not read; these usually have inode and namecache 166 * data attached to them. This target is the preferred minimum size of a 167 * sub-cache consisting mostly of such files. The system balances the size 168 * of this sub-cache with its complement to try to prevent either from 169 * thrashing while the other is relatively inactive. The targets express 170 * a preference for the best balance. 171 * 172 * "Above" this target there are 2 further targets (watermarks) related 173 * to recyling of free vnodes. In the best-operating case, the cache is 174 * exactly full, the free list has size between vlowat and vhiwat above the 175 * free target, and recycling from it and normal use maintains this state. 176 * Sometimes the free list is below vlowat or even empty, but this state 177 * is even better for immediate use provided the cache is not full. 178 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 179 * ones) to reach one of these states. The watermarks are currently hard- 180 * coded as 4% and 9% of the available space higher. These and the default 181 * of 25% for wantfreevnodes are too large if the memory size is large. 182 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 183 * whenever vnlru_proc() becomes active. 184 */ 185 static long wantfreevnodes; 186 static long __exclusive_cache_line freevnodes; 187 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 188 &freevnodes, 0, "Number of \"free\" vnodes"); 189 static long freevnodes_old; 190 191 static counter_u64_t recycles_count; 192 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 193 "Number of vnodes recycled to meet vnode cache targets"); 194 195 static counter_u64_t recycles_free_count; 196 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 197 "Number of free vnodes recycled to meet vnode cache targets"); 198 199 static counter_u64_t vnode_skipped_requeues; 200 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnode_skipped_requeues, CTLFLAG_RD, &vnode_skipped_requeues, 201 "Number of times LRU requeue was skipped due to lock contention"); 202 203 static u_long deferred_inact; 204 SYSCTL_ULONG(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, 205 &deferred_inact, 0, "Number of times inactive processing was deferred"); 206 207 /* To keep more than one thread at a time from running vfs_getnewfsid */ 208 static struct mtx mntid_mtx; 209 210 /* 211 * Lock for any access to the following: 212 * vnode_list 213 * numvnodes 214 * freevnodes 215 */ 216 static struct mtx __exclusive_cache_line vnode_list_mtx; 217 218 /* Publicly exported FS */ 219 struct nfs_public nfs_pub; 220 221 static uma_zone_t buf_trie_zone; 222 static smr_t buf_trie_smr; 223 224 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 225 static uma_zone_t vnode_zone; 226 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll"); 227 228 __read_frequently smr_t vfs_smr; 229 230 /* 231 * The workitem queue. 232 * 233 * It is useful to delay writes of file data and filesystem metadata 234 * for tens of seconds so that quickly created and deleted files need 235 * not waste disk bandwidth being created and removed. To realize this, 236 * we append vnodes to a "workitem" queue. When running with a soft 237 * updates implementation, most pending metadata dependencies should 238 * not wait for more than a few seconds. Thus, mounted on block devices 239 * are delayed only about a half the time that file data is delayed. 240 * Similarly, directory updates are more critical, so are only delayed 241 * about a third the time that file data is delayed. Thus, there are 242 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 243 * one each second (driven off the filesystem syncer process). The 244 * syncer_delayno variable indicates the next queue that is to be processed. 245 * Items that need to be processed soon are placed in this queue: 246 * 247 * syncer_workitem_pending[syncer_delayno] 248 * 249 * A delay of fifteen seconds is done by placing the request fifteen 250 * entries later in the queue: 251 * 252 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 253 * 254 */ 255 static int syncer_delayno; 256 static long syncer_mask; 257 LIST_HEAD(synclist, bufobj); 258 static struct synclist *syncer_workitem_pending; 259 /* 260 * The sync_mtx protects: 261 * bo->bo_synclist 262 * sync_vnode_count 263 * syncer_delayno 264 * syncer_state 265 * syncer_workitem_pending 266 * syncer_worklist_len 267 * rushjob 268 */ 269 static struct mtx sync_mtx; 270 static struct cv sync_wakeup; 271 272 #define SYNCER_MAXDELAY 32 273 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 274 static int syncdelay = 30; /* max time to delay syncing data */ 275 static int filedelay = 30; /* time to delay syncing files */ 276 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 277 "Time to delay syncing files (in seconds)"); 278 static int dirdelay = 29; /* time to delay syncing directories */ 279 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 280 "Time to delay syncing directories (in seconds)"); 281 static int metadelay = 28; /* time to delay syncing metadata */ 282 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 283 "Time to delay syncing metadata (in seconds)"); 284 static int rushjob; /* number of slots to run ASAP */ 285 static int stat_rush_requests; /* number of times I/O speeded up */ 286 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 287 "Number of times I/O speeded up (rush requests)"); 288 289 #define VDBATCH_SIZE 8 290 struct vdbatch { 291 u_int index; 292 struct mtx lock; 293 struct vnode *tab[VDBATCH_SIZE]; 294 }; 295 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 296 297 static void vdbatch_dequeue(struct vnode *vp); 298 299 /* 300 * When shutting down the syncer, run it at four times normal speed. 301 */ 302 #define SYNCER_SHUTDOWN_SPEEDUP 4 303 static int sync_vnode_count; 304 static int syncer_worklist_len; 305 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 306 syncer_state; 307 308 /* Target for maximum number of vnodes. */ 309 u_long desiredvnodes; 310 static u_long gapvnodes; /* gap between wanted and desired */ 311 static u_long vhiwat; /* enough extras after expansion */ 312 static u_long vlowat; /* minimal extras before expansion */ 313 static u_long vstir; /* nonzero to stir non-free vnodes */ 314 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 315 316 static u_long vnlru_read_freevnodes(void); 317 318 /* 319 * Note that no attempt is made to sanitize these parameters. 320 */ 321 static int 322 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 323 { 324 u_long val; 325 int error; 326 327 val = desiredvnodes; 328 error = sysctl_handle_long(oidp, &val, 0, req); 329 if (error != 0 || req->newptr == NULL) 330 return (error); 331 332 if (val == desiredvnodes) 333 return (0); 334 mtx_lock(&vnode_list_mtx); 335 desiredvnodes = val; 336 wantfreevnodes = desiredvnodes / 4; 337 vnlru_recalc(); 338 mtx_unlock(&vnode_list_mtx); 339 /* 340 * XXX There is no protection against multiple threads changing 341 * desiredvnodes at the same time. Locking above only helps vnlru and 342 * getnewvnode. 343 */ 344 vfs_hash_changesize(desiredvnodes); 345 cache_changesize(desiredvnodes); 346 return (0); 347 } 348 349 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 350 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 351 "LU", "Target for maximum number of vnodes"); 352 353 static int 354 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 355 { 356 u_long val; 357 int error; 358 359 val = wantfreevnodes; 360 error = sysctl_handle_long(oidp, &val, 0, req); 361 if (error != 0 || req->newptr == NULL) 362 return (error); 363 364 if (val == wantfreevnodes) 365 return (0); 366 mtx_lock(&vnode_list_mtx); 367 wantfreevnodes = val; 368 vnlru_recalc(); 369 mtx_unlock(&vnode_list_mtx); 370 return (0); 371 } 372 373 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 374 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 375 "LU", "Target for minimum number of \"free\" vnodes"); 376 377 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 378 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 379 static int vnlru_nowhere; 380 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW | CTLFLAG_STATS, 381 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 382 383 static int 384 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 385 { 386 struct vnode *vp; 387 struct nameidata nd; 388 char *buf; 389 unsigned long ndflags; 390 int error; 391 392 if (req->newptr == NULL) 393 return (EINVAL); 394 if (req->newlen >= PATH_MAX) 395 return (E2BIG); 396 397 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 398 error = SYSCTL_IN(req, buf, req->newlen); 399 if (error != 0) 400 goto out; 401 402 buf[req->newlen] = '\0'; 403 404 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1; 405 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf); 406 if ((error = namei(&nd)) != 0) 407 goto out; 408 vp = nd.ni_vp; 409 410 if (VN_IS_DOOMED(vp)) { 411 /* 412 * This vnode is being recycled. Return != 0 to let the caller 413 * know that the sysctl had no effect. Return EAGAIN because a 414 * subsequent call will likely succeed (since namei will create 415 * a new vnode if necessary) 416 */ 417 error = EAGAIN; 418 goto putvnode; 419 } 420 421 counter_u64_add(recycles_count, 1); 422 vgone(vp); 423 putvnode: 424 vput(vp); 425 NDFREE_PNBUF(&nd); 426 out: 427 free(buf, M_TEMP); 428 return (error); 429 } 430 431 static int 432 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 433 { 434 struct thread *td = curthread; 435 struct vnode *vp; 436 struct file *fp; 437 int error; 438 int fd; 439 440 if (req->newptr == NULL) 441 return (EBADF); 442 443 error = sysctl_handle_int(oidp, &fd, 0, req); 444 if (error != 0) 445 return (error); 446 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 447 if (error != 0) 448 return (error); 449 vp = fp->f_vnode; 450 451 error = vn_lock(vp, LK_EXCLUSIVE); 452 if (error != 0) 453 goto drop; 454 455 counter_u64_add(recycles_count, 1); 456 vgone(vp); 457 VOP_UNLOCK(vp); 458 drop: 459 fdrop(fp, td); 460 return (error); 461 } 462 463 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 464 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 465 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 466 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 467 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 468 sysctl_ftry_reclaim_vnode, "I", 469 "Try to reclaim a vnode by its file descriptor"); 470 471 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 472 #define vnsz2log 8 473 #ifndef DEBUG_LOCKS 474 _Static_assert(sizeof(struct vnode) >= 1UL << vnsz2log && 475 sizeof(struct vnode) < 1UL << (vnsz2log + 1), 476 "vnsz2log needs to be updated"); 477 #endif 478 479 /* 480 * Support for the bufobj clean & dirty pctrie. 481 */ 482 static void * 483 buf_trie_alloc(struct pctrie *ptree) 484 { 485 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 486 } 487 488 static void 489 buf_trie_free(struct pctrie *ptree, void *node) 490 { 491 uma_zfree_smr(buf_trie_zone, node); 492 } 493 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 494 buf_trie_smr); 495 496 /* 497 * Initialize the vnode management data structures. 498 * 499 * Reevaluate the following cap on the number of vnodes after the physical 500 * memory size exceeds 512GB. In the limit, as the physical memory size 501 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 502 */ 503 #ifndef MAXVNODES_MAX 504 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 505 #endif 506 507 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 508 509 static struct vnode * 510 vn_alloc_marker(struct mount *mp) 511 { 512 struct vnode *vp; 513 514 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 515 vp->v_type = VMARKER; 516 vp->v_mount = mp; 517 518 return (vp); 519 } 520 521 static void 522 vn_free_marker(struct vnode *vp) 523 { 524 525 MPASS(vp->v_type == VMARKER); 526 free(vp, M_VNODE_MARKER); 527 } 528 529 #ifdef KASAN 530 static int 531 vnode_ctor(void *mem, int size, void *arg __unused, int flags __unused) 532 { 533 kasan_mark(mem, size, roundup2(size, UMA_ALIGN_PTR + 1), 0); 534 return (0); 535 } 536 537 static void 538 vnode_dtor(void *mem, int size, void *arg __unused) 539 { 540 size_t end1, end2, off1, off2; 541 542 _Static_assert(offsetof(struct vnode, v_vnodelist) < 543 offsetof(struct vnode, v_dbatchcpu), 544 "KASAN marks require updating"); 545 546 off1 = offsetof(struct vnode, v_vnodelist); 547 off2 = offsetof(struct vnode, v_dbatchcpu); 548 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist); 549 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu); 550 551 /* 552 * Access to the v_vnodelist and v_dbatchcpu fields are permitted even 553 * after the vnode has been freed. Try to get some KASAN coverage by 554 * marking everything except those two fields as invalid. Because 555 * KASAN's tracking is not byte-granular, any preceding fields sharing 556 * the same 8-byte aligned word must also be marked valid. 557 */ 558 559 /* Handle the area from the start until v_vnodelist... */ 560 off1 = rounddown2(off1, KASAN_SHADOW_SCALE); 561 kasan_mark(mem, off1, off1, KASAN_UMA_FREED); 562 563 /* ... then the area between v_vnodelist and v_dbatchcpu ... */ 564 off1 = roundup2(end1, KASAN_SHADOW_SCALE); 565 off2 = rounddown2(off2, KASAN_SHADOW_SCALE); 566 if (off2 > off1) 567 kasan_mark((void *)((char *)mem + off1), off2 - off1, 568 off2 - off1, KASAN_UMA_FREED); 569 570 /* ... and finally the area from v_dbatchcpu to the end. */ 571 off2 = roundup2(end2, KASAN_SHADOW_SCALE); 572 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2, 573 KASAN_UMA_FREED); 574 } 575 #endif /* KASAN */ 576 577 /* 578 * Initialize a vnode as it first enters the zone. 579 */ 580 static int 581 vnode_init(void *mem, int size, int flags) 582 { 583 struct vnode *vp; 584 585 vp = mem; 586 bzero(vp, size); 587 /* 588 * Setup locks. 589 */ 590 vp->v_vnlock = &vp->v_lock; 591 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 592 /* 593 * By default, don't allow shared locks unless filesystems opt-in. 594 */ 595 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 596 LK_NOSHARE | LK_IS_VNODE); 597 /* 598 * Initialize bufobj. 599 */ 600 bufobj_init(&vp->v_bufobj, vp); 601 /* 602 * Initialize namecache. 603 */ 604 cache_vnode_init(vp); 605 /* 606 * Initialize rangelocks. 607 */ 608 rangelock_init(&vp->v_rl); 609 610 vp->v_dbatchcpu = NOCPU; 611 612 vp->v_state = VSTATE_DEAD; 613 614 /* 615 * Check vhold_recycle_free for an explanation. 616 */ 617 vp->v_holdcnt = VHOLD_NO_SMR; 618 vp->v_type = VNON; 619 mtx_lock(&vnode_list_mtx); 620 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 621 mtx_unlock(&vnode_list_mtx); 622 return (0); 623 } 624 625 /* 626 * Free a vnode when it is cleared from the zone. 627 */ 628 static void 629 vnode_fini(void *mem, int size) 630 { 631 struct vnode *vp; 632 struct bufobj *bo; 633 634 vp = mem; 635 vdbatch_dequeue(vp); 636 mtx_lock(&vnode_list_mtx); 637 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 638 mtx_unlock(&vnode_list_mtx); 639 rangelock_destroy(&vp->v_rl); 640 lockdestroy(vp->v_vnlock); 641 mtx_destroy(&vp->v_interlock); 642 bo = &vp->v_bufobj; 643 rw_destroy(BO_LOCKPTR(bo)); 644 645 kasan_mark(mem, size, size, 0); 646 } 647 648 /* 649 * Provide the size of NFS nclnode and NFS fh for calculation of the 650 * vnode memory consumption. The size is specified directly to 651 * eliminate dependency on NFS-private header. 652 * 653 * Other filesystems may use bigger or smaller (like UFS and ZFS) 654 * private inode data, but the NFS-based estimation is ample enough. 655 * Still, we care about differences in the size between 64- and 32-bit 656 * platforms. 657 * 658 * Namecache structure size is heuristically 659 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 660 */ 661 #ifdef _LP64 662 #define NFS_NCLNODE_SZ (528 + 64) 663 #define NC_SZ 148 664 #else 665 #define NFS_NCLNODE_SZ (360 + 32) 666 #define NC_SZ 92 667 #endif 668 669 static void 670 vntblinit(void *dummy __unused) 671 { 672 struct vdbatch *vd; 673 uma_ctor ctor; 674 uma_dtor dtor; 675 int cpu, physvnodes, virtvnodes; 676 677 /* 678 * Desiredvnodes is a function of the physical memory size and the 679 * kernel's heap size. Generally speaking, it scales with the 680 * physical memory size. The ratio of desiredvnodes to the physical 681 * memory size is 1:16 until desiredvnodes exceeds 98,304. 682 * Thereafter, the 683 * marginal ratio of desiredvnodes to the physical memory size is 684 * 1:64. However, desiredvnodes is limited by the kernel's heap 685 * size. The memory required by desiredvnodes vnodes and vm objects 686 * must not exceed 1/10th of the kernel's heap size. 687 */ 688 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 689 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 690 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 691 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 692 desiredvnodes = min(physvnodes, virtvnodes); 693 if (desiredvnodes > MAXVNODES_MAX) { 694 if (bootverbose) 695 printf("Reducing kern.maxvnodes %lu -> %lu\n", 696 desiredvnodes, MAXVNODES_MAX); 697 desiredvnodes = MAXVNODES_MAX; 698 } 699 wantfreevnodes = desiredvnodes / 4; 700 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 701 TAILQ_INIT(&vnode_list); 702 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 703 /* 704 * The lock is taken to appease WITNESS. 705 */ 706 mtx_lock(&vnode_list_mtx); 707 vnlru_recalc(); 708 mtx_unlock(&vnode_list_mtx); 709 vnode_list_free_marker = vn_alloc_marker(NULL); 710 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 711 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 712 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 713 714 #ifdef KASAN 715 ctor = vnode_ctor; 716 dtor = vnode_dtor; 717 #else 718 ctor = NULL; 719 dtor = NULL; 720 #endif 721 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor, 722 vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_NOKASAN); 723 uma_zone_set_smr(vnode_zone, vfs_smr); 724 725 /* 726 * Preallocate enough nodes to support one-per buf so that 727 * we can not fail an insert. reassignbuf() callers can not 728 * tolerate the insertion failure. 729 */ 730 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 731 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 732 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 733 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 734 uma_prealloc(buf_trie_zone, nbuf); 735 736 vnodes_created = counter_u64_alloc(M_WAITOK); 737 recycles_count = counter_u64_alloc(M_WAITOK); 738 recycles_free_count = counter_u64_alloc(M_WAITOK); 739 vnode_skipped_requeues = counter_u64_alloc(M_WAITOK); 740 741 /* 742 * Initialize the filesystem syncer. 743 */ 744 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 745 &syncer_mask); 746 syncer_maxdelay = syncer_mask + 1; 747 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 748 cv_init(&sync_wakeup, "syncer"); 749 750 CPU_FOREACH(cpu) { 751 vd = DPCPU_ID_PTR((cpu), vd); 752 bzero(vd, sizeof(*vd)); 753 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 754 } 755 } 756 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 757 758 /* 759 * Mark a mount point as busy. Used to synchronize access and to delay 760 * unmounting. Eventually, mountlist_mtx is not released on failure. 761 * 762 * vfs_busy() is a custom lock, it can block the caller. 763 * vfs_busy() only sleeps if the unmount is active on the mount point. 764 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 765 * vnode belonging to mp. 766 * 767 * Lookup uses vfs_busy() to traverse mount points. 768 * root fs var fs 769 * / vnode lock A / vnode lock (/var) D 770 * /var vnode lock B /log vnode lock(/var/log) E 771 * vfs_busy lock C vfs_busy lock F 772 * 773 * Within each file system, the lock order is C->A->B and F->D->E. 774 * 775 * When traversing across mounts, the system follows that lock order: 776 * 777 * C->A->B 778 * | 779 * +->F->D->E 780 * 781 * The lookup() process for namei("/var") illustrates the process: 782 * 1. VOP_LOOKUP() obtains B while A is held 783 * 2. vfs_busy() obtains a shared lock on F while A and B are held 784 * 3. vput() releases lock on B 785 * 4. vput() releases lock on A 786 * 5. VFS_ROOT() obtains lock on D while shared lock on F is held 787 * 6. vfs_unbusy() releases shared lock on F 788 * 7. vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 789 * Attempt to lock A (instead of vp_crossmp) while D is held would 790 * violate the global order, causing deadlocks. 791 * 792 * dounmount() locks B while F is drained. Note that for stacked 793 * filesystems, D and B in the example above may be the same lock, 794 * which introdues potential lock order reversal deadlock between 795 * dounmount() and step 5 above. These filesystems may avoid the LOR 796 * by setting VV_CROSSLOCK on the covered vnode so that lock B will 797 * remain held until after step 5. 798 */ 799 int 800 vfs_busy(struct mount *mp, int flags) 801 { 802 struct mount_pcpu *mpcpu; 803 804 MPASS((flags & ~MBF_MASK) == 0); 805 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 806 807 if (vfs_op_thread_enter(mp, mpcpu)) { 808 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 809 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 810 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 811 vfs_mp_count_add_pcpu(mpcpu, ref, 1); 812 vfs_mp_count_add_pcpu(mpcpu, lockref, 1); 813 vfs_op_thread_exit(mp, mpcpu); 814 if (flags & MBF_MNTLSTLOCK) 815 mtx_unlock(&mountlist_mtx); 816 return (0); 817 } 818 819 MNT_ILOCK(mp); 820 vfs_assert_mount_counters(mp); 821 MNT_REF(mp); 822 /* 823 * If mount point is currently being unmounted, sleep until the 824 * mount point fate is decided. If thread doing the unmounting fails, 825 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 826 * that this mount point has survived the unmount attempt and vfs_busy 827 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 828 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 829 * about to be really destroyed. vfs_busy needs to release its 830 * reference on the mount point in this case and return with ENOENT, 831 * telling the caller the mount it tried to busy is no longer valid. 832 */ 833 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 834 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), 835 ("%s: non-empty upper mount list with pending unmount", 836 __func__)); 837 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 838 MNT_REL(mp); 839 MNT_IUNLOCK(mp); 840 CTR1(KTR_VFS, "%s: failed busying before sleeping", 841 __func__); 842 return (ENOENT); 843 } 844 if (flags & MBF_MNTLSTLOCK) 845 mtx_unlock(&mountlist_mtx); 846 mp->mnt_kern_flag |= MNTK_MWAIT; 847 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 848 if (flags & MBF_MNTLSTLOCK) 849 mtx_lock(&mountlist_mtx); 850 MNT_ILOCK(mp); 851 } 852 if (flags & MBF_MNTLSTLOCK) 853 mtx_unlock(&mountlist_mtx); 854 mp->mnt_lockref++; 855 MNT_IUNLOCK(mp); 856 return (0); 857 } 858 859 /* 860 * Free a busy filesystem. 861 */ 862 void 863 vfs_unbusy(struct mount *mp) 864 { 865 struct mount_pcpu *mpcpu; 866 int c; 867 868 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 869 870 if (vfs_op_thread_enter(mp, mpcpu)) { 871 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 872 vfs_mp_count_sub_pcpu(mpcpu, lockref, 1); 873 vfs_mp_count_sub_pcpu(mpcpu, ref, 1); 874 vfs_op_thread_exit(mp, mpcpu); 875 return; 876 } 877 878 MNT_ILOCK(mp); 879 vfs_assert_mount_counters(mp); 880 MNT_REL(mp); 881 c = --mp->mnt_lockref; 882 if (mp->mnt_vfs_ops == 0) { 883 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 884 MNT_IUNLOCK(mp); 885 return; 886 } 887 if (c < 0) 888 vfs_dump_mount_counters(mp); 889 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 890 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 891 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 892 mp->mnt_kern_flag &= ~MNTK_DRAINING; 893 wakeup(&mp->mnt_lockref); 894 } 895 MNT_IUNLOCK(mp); 896 } 897 898 /* 899 * Lookup a mount point by filesystem identifier. 900 */ 901 struct mount * 902 vfs_getvfs(fsid_t *fsid) 903 { 904 struct mount *mp; 905 906 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 907 mtx_lock(&mountlist_mtx); 908 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 909 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 910 vfs_ref(mp); 911 mtx_unlock(&mountlist_mtx); 912 return (mp); 913 } 914 } 915 mtx_unlock(&mountlist_mtx); 916 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 917 return ((struct mount *) 0); 918 } 919 920 /* 921 * Lookup a mount point by filesystem identifier, busying it before 922 * returning. 923 * 924 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 925 * cache for popular filesystem identifiers. The cache is lockess, using 926 * the fact that struct mount's are never freed. In worst case we may 927 * get pointer to unmounted or even different filesystem, so we have to 928 * check what we got, and go slow way if so. 929 */ 930 struct mount * 931 vfs_busyfs(fsid_t *fsid) 932 { 933 #define FSID_CACHE_SIZE 256 934 typedef struct mount * volatile vmp_t; 935 static vmp_t cache[FSID_CACHE_SIZE]; 936 struct mount *mp; 937 int error; 938 uint32_t hash; 939 940 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 941 hash = fsid->val[0] ^ fsid->val[1]; 942 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 943 mp = cache[hash]; 944 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 945 goto slow; 946 if (vfs_busy(mp, 0) != 0) { 947 cache[hash] = NULL; 948 goto slow; 949 } 950 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 951 return (mp); 952 else 953 vfs_unbusy(mp); 954 955 slow: 956 mtx_lock(&mountlist_mtx); 957 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 958 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 959 error = vfs_busy(mp, MBF_MNTLSTLOCK); 960 if (error) { 961 cache[hash] = NULL; 962 mtx_unlock(&mountlist_mtx); 963 return (NULL); 964 } 965 cache[hash] = mp; 966 return (mp); 967 } 968 } 969 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 970 mtx_unlock(&mountlist_mtx); 971 return ((struct mount *) 0); 972 } 973 974 /* 975 * Check if a user can access privileged mount options. 976 */ 977 int 978 vfs_suser(struct mount *mp, struct thread *td) 979 { 980 int error; 981 982 if (jailed(td->td_ucred)) { 983 /* 984 * If the jail of the calling thread lacks permission for 985 * this type of file system, deny immediately. 986 */ 987 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 988 return (EPERM); 989 990 /* 991 * If the file system was mounted outside the jail of the 992 * calling thread, deny immediately. 993 */ 994 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 995 return (EPERM); 996 } 997 998 /* 999 * If file system supports delegated administration, we don't check 1000 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 1001 * by the file system itself. 1002 * If this is not the user that did original mount, we check for 1003 * the PRIV_VFS_MOUNT_OWNER privilege. 1004 */ 1005 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 1006 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 1007 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 1008 return (error); 1009 } 1010 return (0); 1011 } 1012 1013 /* 1014 * Get a new unique fsid. Try to make its val[0] unique, since this value 1015 * will be used to create fake device numbers for stat(). Also try (but 1016 * not so hard) make its val[0] unique mod 2^16, since some emulators only 1017 * support 16-bit device numbers. We end up with unique val[0]'s for the 1018 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 1019 * 1020 * Keep in mind that several mounts may be running in parallel. Starting 1021 * the search one past where the previous search terminated is both a 1022 * micro-optimization and a defense against returning the same fsid to 1023 * different mounts. 1024 */ 1025 void 1026 vfs_getnewfsid(struct mount *mp) 1027 { 1028 static uint16_t mntid_base; 1029 struct mount *nmp; 1030 fsid_t tfsid; 1031 int mtype; 1032 1033 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 1034 mtx_lock(&mntid_mtx); 1035 mtype = mp->mnt_vfc->vfc_typenum; 1036 tfsid.val[1] = mtype; 1037 mtype = (mtype & 0xFF) << 24; 1038 for (;;) { 1039 tfsid.val[0] = makedev(255, 1040 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 1041 mntid_base++; 1042 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 1043 break; 1044 vfs_rel(nmp); 1045 } 1046 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 1047 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 1048 mtx_unlock(&mntid_mtx); 1049 } 1050 1051 /* 1052 * Knob to control the precision of file timestamps: 1053 * 1054 * 0 = seconds only; nanoseconds zeroed. 1055 * 1 = seconds and nanoseconds, accurate within 1/HZ. 1056 * 2 = seconds and nanoseconds, truncated to microseconds. 1057 * >=3 = seconds and nanoseconds, maximum precision. 1058 */ 1059 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 1060 1061 static int timestamp_precision = TSP_USEC; 1062 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 1063 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 1064 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 1065 "3+: sec + ns (max. precision))"); 1066 1067 /* 1068 * Get a current timestamp. 1069 */ 1070 void 1071 vfs_timestamp(struct timespec *tsp) 1072 { 1073 struct timeval tv; 1074 1075 switch (timestamp_precision) { 1076 case TSP_SEC: 1077 tsp->tv_sec = time_second; 1078 tsp->tv_nsec = 0; 1079 break; 1080 case TSP_HZ: 1081 getnanotime(tsp); 1082 break; 1083 case TSP_USEC: 1084 microtime(&tv); 1085 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1086 break; 1087 case TSP_NSEC: 1088 default: 1089 nanotime(tsp); 1090 break; 1091 } 1092 } 1093 1094 /* 1095 * Set vnode attributes to VNOVAL 1096 */ 1097 void 1098 vattr_null(struct vattr *vap) 1099 { 1100 1101 vap->va_type = VNON; 1102 vap->va_size = VNOVAL; 1103 vap->va_bytes = VNOVAL; 1104 vap->va_mode = VNOVAL; 1105 vap->va_nlink = VNOVAL; 1106 vap->va_uid = VNOVAL; 1107 vap->va_gid = VNOVAL; 1108 vap->va_fsid = VNOVAL; 1109 vap->va_fileid = VNOVAL; 1110 vap->va_blocksize = VNOVAL; 1111 vap->va_rdev = VNOVAL; 1112 vap->va_atime.tv_sec = VNOVAL; 1113 vap->va_atime.tv_nsec = VNOVAL; 1114 vap->va_mtime.tv_sec = VNOVAL; 1115 vap->va_mtime.tv_nsec = VNOVAL; 1116 vap->va_ctime.tv_sec = VNOVAL; 1117 vap->va_ctime.tv_nsec = VNOVAL; 1118 vap->va_birthtime.tv_sec = VNOVAL; 1119 vap->va_birthtime.tv_nsec = VNOVAL; 1120 vap->va_flags = VNOVAL; 1121 vap->va_gen = VNOVAL; 1122 vap->va_vaflags = 0; 1123 } 1124 1125 /* 1126 * Try to reduce the total number of vnodes. 1127 * 1128 * This routine (and its user) are buggy in at least the following ways: 1129 * - all parameters were picked years ago when RAM sizes were significantly 1130 * smaller 1131 * - it can pick vnodes based on pages used by the vm object, but filesystems 1132 * like ZFS don't use it making the pick broken 1133 * - since ZFS has its own aging policy it gets partially combated by this one 1134 * - a dedicated method should be provided for filesystems to let them decide 1135 * whether the vnode should be recycled 1136 * 1137 * This routine is called when we have too many vnodes. It attempts 1138 * to free <count> vnodes and will potentially free vnodes that still 1139 * have VM backing store (VM backing store is typically the cause 1140 * of a vnode blowout so we want to do this). Therefore, this operation 1141 * is not considered cheap. 1142 * 1143 * A number of conditions may prevent a vnode from being reclaimed. 1144 * the buffer cache may have references on the vnode, a directory 1145 * vnode may still have references due to the namei cache representing 1146 * underlying files, or the vnode may be in active use. It is not 1147 * desirable to reuse such vnodes. These conditions may cause the 1148 * number of vnodes to reach some minimum value regardless of what 1149 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1150 * 1151 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1152 * entries if this argument is strue 1153 * @param trigger Only reclaim vnodes with fewer than this many resident 1154 * pages. 1155 * @param target How many vnodes to reclaim. 1156 * @return The number of vnodes that were reclaimed. 1157 */ 1158 static int 1159 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1160 { 1161 struct vnode *vp, *mvp; 1162 struct mount *mp; 1163 struct vm_object *object; 1164 u_long done; 1165 bool retried; 1166 1167 mtx_assert(&vnode_list_mtx, MA_OWNED); 1168 1169 retried = false; 1170 done = 0; 1171 1172 mvp = vnode_list_reclaim_marker; 1173 restart: 1174 vp = mvp; 1175 while (done < target) { 1176 vp = TAILQ_NEXT(vp, v_vnodelist); 1177 if (__predict_false(vp == NULL)) 1178 break; 1179 1180 if (__predict_false(vp->v_type == VMARKER)) 1181 continue; 1182 1183 /* 1184 * If it's been deconstructed already, it's still 1185 * referenced, or it exceeds the trigger, skip it. 1186 * Also skip free vnodes. We are trying to make space 1187 * to expand the free list, not reduce it. 1188 */ 1189 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1190 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1191 goto next_iter; 1192 1193 if (vp->v_type == VBAD || vp->v_type == VNON) 1194 goto next_iter; 1195 1196 object = atomic_load_ptr(&vp->v_object); 1197 if (object == NULL || object->resident_page_count > trigger) { 1198 goto next_iter; 1199 } 1200 1201 /* 1202 * Handle races against vnode allocation. Filesystems lock the 1203 * vnode some time after it gets returned from getnewvnode, 1204 * despite type and hold count being manipulated earlier. 1205 * Resorting to checking v_mount restores guarantees present 1206 * before the global list was reworked to contain all vnodes. 1207 */ 1208 if (!VI_TRYLOCK(vp)) 1209 goto next_iter; 1210 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1211 VI_UNLOCK(vp); 1212 goto next_iter; 1213 } 1214 if (vp->v_mount == NULL) { 1215 VI_UNLOCK(vp); 1216 goto next_iter; 1217 } 1218 vholdl(vp); 1219 VI_UNLOCK(vp); 1220 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1221 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1222 mtx_unlock(&vnode_list_mtx); 1223 1224 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1225 vdrop_recycle(vp); 1226 goto next_iter_unlocked; 1227 } 1228 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1229 vdrop_recycle(vp); 1230 vn_finished_write(mp); 1231 goto next_iter_unlocked; 1232 } 1233 1234 VI_LOCK(vp); 1235 if (vp->v_usecount > 0 || 1236 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1237 (vp->v_object != NULL && vp->v_object->handle == vp && 1238 vp->v_object->resident_page_count > trigger)) { 1239 VOP_UNLOCK(vp); 1240 vdropl_recycle(vp); 1241 vn_finished_write(mp); 1242 goto next_iter_unlocked; 1243 } 1244 counter_u64_add(recycles_count, 1); 1245 vgonel(vp); 1246 VOP_UNLOCK(vp); 1247 vdropl_recycle(vp); 1248 vn_finished_write(mp); 1249 done++; 1250 next_iter_unlocked: 1251 maybe_yield(); 1252 mtx_lock(&vnode_list_mtx); 1253 goto restart; 1254 next_iter: 1255 MPASS(vp->v_type != VMARKER); 1256 if (!should_yield()) 1257 continue; 1258 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1259 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1260 mtx_unlock(&vnode_list_mtx); 1261 kern_yield(PRI_USER); 1262 mtx_lock(&vnode_list_mtx); 1263 goto restart; 1264 } 1265 if (done == 0 && !retried) { 1266 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1267 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1268 retried = true; 1269 goto restart; 1270 } 1271 return (done); 1272 } 1273 1274 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1275 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1276 0, 1277 "limit on vnode free requests per call to the vnlru_free routine"); 1278 1279 /* 1280 * Attempt to reduce the free list by the requested amount. 1281 */ 1282 static int 1283 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp) 1284 { 1285 struct vnode *vp; 1286 struct mount *mp; 1287 int ocount; 1288 bool retried; 1289 1290 mtx_assert(&vnode_list_mtx, MA_OWNED); 1291 if (count > max_vnlru_free) 1292 count = max_vnlru_free; 1293 ocount = count; 1294 retried = false; 1295 vp = mvp; 1296 for (;;) { 1297 if (count == 0) { 1298 break; 1299 } 1300 vp = TAILQ_NEXT(vp, v_vnodelist); 1301 if (__predict_false(vp == NULL)) { 1302 /* 1303 * The free vnode marker can be past eligible vnodes: 1304 * 1. if vdbatch_process trylock failed 1305 * 2. if vtryrecycle failed 1306 * 1307 * If so, start the scan from scratch. 1308 */ 1309 if (!retried && vnlru_read_freevnodes() > 0) { 1310 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1311 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1312 vp = mvp; 1313 retried = true; 1314 continue; 1315 } 1316 1317 /* 1318 * Give up 1319 */ 1320 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1321 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1322 break; 1323 } 1324 if (__predict_false(vp->v_type == VMARKER)) 1325 continue; 1326 if (vp->v_holdcnt > 0) 1327 continue; 1328 /* 1329 * Don't recycle if our vnode is from different type 1330 * of mount point. Note that mp is type-safe, the 1331 * check does not reach unmapped address even if 1332 * vnode is reclaimed. 1333 */ 1334 if (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1335 mp->mnt_op != mnt_op) { 1336 continue; 1337 } 1338 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1339 continue; 1340 } 1341 if (!vhold_recycle_free(vp)) 1342 continue; 1343 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1344 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1345 mtx_unlock(&vnode_list_mtx); 1346 /* 1347 * FIXME: ignores the return value, meaning it may be nothing 1348 * got recycled but it claims otherwise to the caller. 1349 * 1350 * Originally the value started being ignored in 2005 with 1351 * 114a1006a8204aa156e1f9ad6476cdff89cada7f . 1352 * 1353 * Respecting the value can run into significant stalls if most 1354 * vnodes belong to one file system and it has writes 1355 * suspended. In presence of many threads and millions of 1356 * vnodes they keep contending on the vnode_list_mtx lock only 1357 * to find vnodes they can't recycle. 1358 * 1359 * The solution would be to pre-check if the vnode is likely to 1360 * be recycle-able, but it needs to happen with the 1361 * vnode_list_mtx lock held. This runs into a problem where 1362 * VOP_GETWRITEMOUNT (currently needed to find out about if 1363 * writes are frozen) can take locks which LOR against it. 1364 * 1365 * Check nullfs for one example (null_getwritemount). 1366 */ 1367 vtryrecycle(vp); 1368 count--; 1369 mtx_lock(&vnode_list_mtx); 1370 vp = mvp; 1371 } 1372 return (ocount - count); 1373 } 1374 1375 static int 1376 vnlru_free_locked(int count) 1377 { 1378 1379 mtx_assert(&vnode_list_mtx, MA_OWNED); 1380 return (vnlru_free_impl(count, NULL, vnode_list_free_marker)); 1381 } 1382 1383 void 1384 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp) 1385 { 1386 1387 MPASS(mnt_op != NULL); 1388 MPASS(mvp != NULL); 1389 VNPASS(mvp->v_type == VMARKER, mvp); 1390 mtx_lock(&vnode_list_mtx); 1391 vnlru_free_impl(count, mnt_op, mvp); 1392 mtx_unlock(&vnode_list_mtx); 1393 } 1394 1395 struct vnode * 1396 vnlru_alloc_marker(void) 1397 { 1398 struct vnode *mvp; 1399 1400 mvp = vn_alloc_marker(NULL); 1401 mtx_lock(&vnode_list_mtx); 1402 TAILQ_INSERT_BEFORE(vnode_list_free_marker, mvp, v_vnodelist); 1403 mtx_unlock(&vnode_list_mtx); 1404 return (mvp); 1405 } 1406 1407 void 1408 vnlru_free_marker(struct vnode *mvp) 1409 { 1410 mtx_lock(&vnode_list_mtx); 1411 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1412 mtx_unlock(&vnode_list_mtx); 1413 vn_free_marker(mvp); 1414 } 1415 1416 static void 1417 vnlru_recalc(void) 1418 { 1419 1420 mtx_assert(&vnode_list_mtx, MA_OWNED); 1421 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1422 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1423 vlowat = vhiwat / 2; 1424 } 1425 1426 /* 1427 * Attempt to recycle vnodes in a context that is always safe to block. 1428 * Calling vlrurecycle() from the bowels of filesystem code has some 1429 * interesting deadlock problems. 1430 */ 1431 static struct proc *vnlruproc; 1432 static int vnlruproc_sig; 1433 1434 /* 1435 * The main freevnodes counter is only updated when threads requeue their vnode 1436 * batches. CPUs are conditionally walked to compute a more accurate total. 1437 * 1438 * Limit how much of a slop are we willing to tolerate. Note: the actual value 1439 * at any given moment can still exceed slop, but it should not be by significant 1440 * margin in practice. 1441 */ 1442 #define VNLRU_FREEVNODES_SLOP 126 1443 1444 static void __noinline 1445 vfs_freevnodes_rollup(int8_t *lfreevnodes) 1446 { 1447 1448 atomic_add_long(&freevnodes, *lfreevnodes); 1449 *lfreevnodes = 0; 1450 critical_exit(); 1451 } 1452 1453 static __inline void 1454 vfs_freevnodes_inc(void) 1455 { 1456 int8_t *lfreevnodes; 1457 1458 critical_enter(); 1459 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1460 (*lfreevnodes)++; 1461 if (__predict_false(*lfreevnodes == VNLRU_FREEVNODES_SLOP)) 1462 vfs_freevnodes_rollup(lfreevnodes); 1463 else 1464 critical_exit(); 1465 } 1466 1467 static __inline void 1468 vfs_freevnodes_dec(void) 1469 { 1470 int8_t *lfreevnodes; 1471 1472 critical_enter(); 1473 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1474 (*lfreevnodes)--; 1475 if (__predict_false(*lfreevnodes == -VNLRU_FREEVNODES_SLOP)) 1476 vfs_freevnodes_rollup(lfreevnodes); 1477 else 1478 critical_exit(); 1479 } 1480 1481 static u_long 1482 vnlru_read_freevnodes(void) 1483 { 1484 long slop, rfreevnodes; 1485 int cpu; 1486 1487 rfreevnodes = atomic_load_long(&freevnodes); 1488 1489 if (rfreevnodes > freevnodes_old) 1490 slop = rfreevnodes - freevnodes_old; 1491 else 1492 slop = freevnodes_old - rfreevnodes; 1493 if (slop < VNLRU_FREEVNODES_SLOP) 1494 return (rfreevnodes >= 0 ? rfreevnodes : 0); 1495 freevnodes_old = rfreevnodes; 1496 CPU_FOREACH(cpu) { 1497 freevnodes_old += cpuid_to_pcpu[cpu]->pc_vfs_freevnodes; 1498 } 1499 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1500 } 1501 1502 static bool 1503 vnlru_under(u_long rnumvnodes, u_long limit) 1504 { 1505 u_long rfreevnodes, space; 1506 1507 if (__predict_false(rnumvnodes > desiredvnodes)) 1508 return (true); 1509 1510 space = desiredvnodes - rnumvnodes; 1511 if (space < limit) { 1512 rfreevnodes = vnlru_read_freevnodes(); 1513 if (rfreevnodes > wantfreevnodes) 1514 space += rfreevnodes - wantfreevnodes; 1515 } 1516 return (space < limit); 1517 } 1518 1519 static bool 1520 vnlru_under_unlocked(u_long rnumvnodes, u_long limit) 1521 { 1522 long rfreevnodes, space; 1523 1524 if (__predict_false(rnumvnodes > desiredvnodes)) 1525 return (true); 1526 1527 space = desiredvnodes - rnumvnodes; 1528 if (space < limit) { 1529 rfreevnodes = atomic_load_long(&freevnodes); 1530 if (rfreevnodes > wantfreevnodes) 1531 space += rfreevnodes - wantfreevnodes; 1532 } 1533 return (space < limit); 1534 } 1535 1536 static void 1537 vnlru_kick(void) 1538 { 1539 1540 mtx_assert(&vnode_list_mtx, MA_OWNED); 1541 if (vnlruproc_sig == 0) { 1542 vnlruproc_sig = 1; 1543 wakeup(vnlruproc); 1544 } 1545 } 1546 1547 static void 1548 vnlru_proc(void) 1549 { 1550 u_long rnumvnodes, rfreevnodes, target; 1551 unsigned long onumvnodes; 1552 int done, force, trigger, usevnodes; 1553 bool reclaim_nc_src, want_reread; 1554 1555 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1556 SHUTDOWN_PRI_FIRST); 1557 1558 force = 0; 1559 want_reread = false; 1560 for (;;) { 1561 kproc_suspend_check(vnlruproc); 1562 mtx_lock(&vnode_list_mtx); 1563 rnumvnodes = atomic_load_long(&numvnodes); 1564 1565 if (want_reread) { 1566 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1567 want_reread = false; 1568 } 1569 1570 /* 1571 * If numvnodes is too large (due to desiredvnodes being 1572 * adjusted using its sysctl, or emergency growth), first 1573 * try to reduce it by discarding from the free list. 1574 */ 1575 if (rnumvnodes > desiredvnodes) { 1576 vnlru_free_locked(rnumvnodes - desiredvnodes); 1577 rnumvnodes = atomic_load_long(&numvnodes); 1578 } 1579 /* 1580 * Sleep if the vnode cache is in a good state. This is 1581 * when it is not over-full and has space for about a 4% 1582 * or 9% expansion (by growing its size or inexcessively 1583 * reducing its free list). Otherwise, try to reclaim 1584 * space for a 10% expansion. 1585 */ 1586 if (vstir && force == 0) { 1587 force = 1; 1588 vstir = 0; 1589 } 1590 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1591 vnlruproc_sig = 0; 1592 wakeup(&vnlruproc_sig); 1593 msleep(vnlruproc, &vnode_list_mtx, 1594 PVFS|PDROP, "vlruwt", hz); 1595 continue; 1596 } 1597 rfreevnodes = vnlru_read_freevnodes(); 1598 1599 onumvnodes = rnumvnodes; 1600 /* 1601 * Calculate parameters for recycling. These are the same 1602 * throughout the loop to give some semblance of fairness. 1603 * The trigger point is to avoid recycling vnodes with lots 1604 * of resident pages. We aren't trying to free memory; we 1605 * are trying to recycle or at least free vnodes. 1606 */ 1607 if (rnumvnodes <= desiredvnodes) 1608 usevnodes = rnumvnodes - rfreevnodes; 1609 else 1610 usevnodes = rnumvnodes; 1611 if (usevnodes <= 0) 1612 usevnodes = 1; 1613 /* 1614 * The trigger value is chosen to give a conservatively 1615 * large value to ensure that it alone doesn't prevent 1616 * making progress. The value can easily be so large that 1617 * it is effectively infinite in some congested and 1618 * misconfigured cases, and this is necessary. Normally 1619 * it is about 8 to 100 (pages), which is quite large. 1620 */ 1621 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1622 if (force < 2) 1623 trigger = vsmalltrigger; 1624 reclaim_nc_src = force >= 3; 1625 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1626 target = target / 10 + 1; 1627 done = vlrureclaim(reclaim_nc_src, trigger, target); 1628 mtx_unlock(&vnode_list_mtx); 1629 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1630 uma_reclaim(UMA_RECLAIM_DRAIN); 1631 if (done == 0) { 1632 if (force == 0 || force == 1) { 1633 force = 2; 1634 continue; 1635 } 1636 if (force == 2) { 1637 force = 3; 1638 continue; 1639 } 1640 want_reread = true; 1641 force = 0; 1642 vnlru_nowhere++; 1643 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1644 } else { 1645 want_reread = true; 1646 kern_yield(PRI_USER); 1647 } 1648 } 1649 } 1650 1651 static struct kproc_desc vnlru_kp = { 1652 "vnlru", 1653 vnlru_proc, 1654 &vnlruproc 1655 }; 1656 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1657 &vnlru_kp); 1658 1659 /* 1660 * Routines having to do with the management of the vnode table. 1661 */ 1662 1663 /* 1664 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1665 * before we actually vgone(). This function must be called with the vnode 1666 * held to prevent the vnode from being returned to the free list midway 1667 * through vgone(). 1668 */ 1669 static int 1670 vtryrecycle(struct vnode *vp) 1671 { 1672 struct mount *vnmp; 1673 1674 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1675 VNPASS(vp->v_holdcnt > 0, vp); 1676 /* 1677 * This vnode may found and locked via some other list, if so we 1678 * can't recycle it yet. 1679 */ 1680 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1681 CTR2(KTR_VFS, 1682 "%s: impossible to recycle, vp %p lock is already held", 1683 __func__, vp); 1684 vdrop_recycle(vp); 1685 return (EWOULDBLOCK); 1686 } 1687 /* 1688 * Don't recycle if its filesystem is being suspended. 1689 */ 1690 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1691 VOP_UNLOCK(vp); 1692 CTR2(KTR_VFS, 1693 "%s: impossible to recycle, cannot start the write for %p", 1694 __func__, vp); 1695 vdrop_recycle(vp); 1696 return (EBUSY); 1697 } 1698 /* 1699 * If we got this far, we need to acquire the interlock and see if 1700 * anyone picked up this vnode from another list. If not, we will 1701 * mark it with DOOMED via vgonel() so that anyone who does find it 1702 * will skip over it. 1703 */ 1704 VI_LOCK(vp); 1705 if (vp->v_usecount) { 1706 VOP_UNLOCK(vp); 1707 vdropl_recycle(vp); 1708 vn_finished_write(vnmp); 1709 CTR2(KTR_VFS, 1710 "%s: impossible to recycle, %p is already referenced", 1711 __func__, vp); 1712 return (EBUSY); 1713 } 1714 if (!VN_IS_DOOMED(vp)) { 1715 counter_u64_add(recycles_free_count, 1); 1716 vgonel(vp); 1717 } 1718 VOP_UNLOCK(vp); 1719 vdropl_recycle(vp); 1720 vn_finished_write(vnmp); 1721 return (0); 1722 } 1723 1724 /* 1725 * Allocate a new vnode. 1726 * 1727 * The operation never returns an error. Returning an error was disabled 1728 * in r145385 (dated 2005) with the following comment: 1729 * 1730 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1731 * 1732 * Given the age of this commit (almost 15 years at the time of writing this 1733 * comment) restoring the ability to fail requires a significant audit of 1734 * all codepaths. 1735 * 1736 * The routine can try to free a vnode or stall for up to 1 second waiting for 1737 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1738 */ 1739 static u_long vn_alloc_cyclecount; 1740 static u_long vn_alloc_sleeps; 1741 1742 SYSCTL_ULONG(_vfs, OID_AUTO, vnode_alloc_sleeps, CTLFLAG_RD, &vn_alloc_sleeps, 0, 1743 "Number of times vnode allocation blocked waiting on vnlru"); 1744 1745 static struct vnode * __noinline 1746 vn_alloc_hard(struct mount *mp) 1747 { 1748 u_long rnumvnodes, rfreevnodes; 1749 1750 mtx_lock(&vnode_list_mtx); 1751 rnumvnodes = atomic_load_long(&numvnodes); 1752 if (rnumvnodes + 1 < desiredvnodes) { 1753 vn_alloc_cyclecount = 0; 1754 goto alloc; 1755 } 1756 rfreevnodes = vnlru_read_freevnodes(); 1757 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1758 vn_alloc_cyclecount = 0; 1759 vstir = 1; 1760 } 1761 /* 1762 * Grow the vnode cache if it will not be above its target max 1763 * after growing. Otherwise, if the free list is nonempty, try 1764 * to reclaim 1 item from it before growing the cache (possibly 1765 * above its target max if the reclamation failed or is delayed). 1766 * Otherwise, wait for some space. In all cases, schedule 1767 * vnlru_proc() if we are getting short of space. The watermarks 1768 * should be chosen so that we never wait or even reclaim from 1769 * the free list to below its target minimum. 1770 */ 1771 if (vnlru_free_locked(1) > 0) 1772 goto alloc; 1773 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1774 /* 1775 * Wait for space for a new vnode. 1776 */ 1777 vnlru_kick(); 1778 vn_alloc_sleeps++; 1779 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 1780 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 1781 vnlru_read_freevnodes() > 1) 1782 vnlru_free_locked(1); 1783 } 1784 alloc: 1785 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1786 if (vnlru_under(rnumvnodes, vlowat)) 1787 vnlru_kick(); 1788 mtx_unlock(&vnode_list_mtx); 1789 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1790 } 1791 1792 static struct vnode * 1793 vn_alloc(struct mount *mp) 1794 { 1795 u_long rnumvnodes; 1796 1797 if (__predict_false(vn_alloc_cyclecount != 0)) 1798 return (vn_alloc_hard(mp)); 1799 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1800 if (__predict_false(vnlru_under_unlocked(rnumvnodes, vlowat))) { 1801 atomic_subtract_long(&numvnodes, 1); 1802 return (vn_alloc_hard(mp)); 1803 } 1804 1805 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1806 } 1807 1808 static void 1809 vn_free(struct vnode *vp) 1810 { 1811 1812 atomic_subtract_long(&numvnodes, 1); 1813 uma_zfree_smr(vnode_zone, vp); 1814 } 1815 1816 /* 1817 * Return the next vnode from the free list. 1818 */ 1819 int 1820 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1821 struct vnode **vpp) 1822 { 1823 struct vnode *vp; 1824 struct thread *td; 1825 struct lock_object *lo; 1826 1827 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1828 1829 KASSERT(vops->registered, 1830 ("%s: not registered vector op %p\n", __func__, vops)); 1831 cache_validate_vop_vector(mp, vops); 1832 1833 td = curthread; 1834 if (td->td_vp_reserved != NULL) { 1835 vp = td->td_vp_reserved; 1836 td->td_vp_reserved = NULL; 1837 } else { 1838 vp = vn_alloc(mp); 1839 } 1840 counter_u64_add(vnodes_created, 1); 1841 1842 vn_set_state(vp, VSTATE_UNINITIALIZED); 1843 1844 /* 1845 * Locks are given the generic name "vnode" when created. 1846 * Follow the historic practice of using the filesystem 1847 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1848 * 1849 * Locks live in a witness group keyed on their name. Thus, 1850 * when a lock is renamed, it must also move from the witness 1851 * group of its old name to the witness group of its new name. 1852 * 1853 * The change only needs to be made when the vnode moves 1854 * from one filesystem type to another. We ensure that each 1855 * filesystem use a single static name pointer for its tag so 1856 * that we can compare pointers rather than doing a strcmp(). 1857 */ 1858 lo = &vp->v_vnlock->lock_object; 1859 #ifdef WITNESS 1860 if (lo->lo_name != tag) { 1861 #endif 1862 lo->lo_name = tag; 1863 #ifdef WITNESS 1864 WITNESS_DESTROY(lo); 1865 WITNESS_INIT(lo, tag); 1866 } 1867 #endif 1868 /* 1869 * By default, don't allow shared locks unless filesystems opt-in. 1870 */ 1871 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1872 /* 1873 * Finalize various vnode identity bits. 1874 */ 1875 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1876 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1877 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1878 vp->v_type = VNON; 1879 vp->v_op = vops; 1880 vp->v_irflag = 0; 1881 v_init_counters(vp); 1882 vn_seqc_init(vp); 1883 vp->v_bufobj.bo_ops = &buf_ops_bio; 1884 #ifdef DIAGNOSTIC 1885 if (mp == NULL && vops != &dead_vnodeops) 1886 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1887 #endif 1888 #ifdef MAC 1889 mac_vnode_init(vp); 1890 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1891 mac_vnode_associate_singlelabel(mp, vp); 1892 #endif 1893 if (mp != NULL) { 1894 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1895 } 1896 1897 /* 1898 * For the filesystems which do not use vfs_hash_insert(), 1899 * still initialize v_hash to have vfs_hash_index() useful. 1900 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1901 * its own hashing. 1902 */ 1903 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1904 1905 *vpp = vp; 1906 return (0); 1907 } 1908 1909 void 1910 getnewvnode_reserve(void) 1911 { 1912 struct thread *td; 1913 1914 td = curthread; 1915 MPASS(td->td_vp_reserved == NULL); 1916 td->td_vp_reserved = vn_alloc(NULL); 1917 } 1918 1919 void 1920 getnewvnode_drop_reserve(void) 1921 { 1922 struct thread *td; 1923 1924 td = curthread; 1925 if (td->td_vp_reserved != NULL) { 1926 vn_free(td->td_vp_reserved); 1927 td->td_vp_reserved = NULL; 1928 } 1929 } 1930 1931 static void __noinline 1932 freevnode(struct vnode *vp) 1933 { 1934 struct bufobj *bo; 1935 1936 /* 1937 * The vnode has been marked for destruction, so free it. 1938 * 1939 * The vnode will be returned to the zone where it will 1940 * normally remain until it is needed for another vnode. We 1941 * need to cleanup (or verify that the cleanup has already 1942 * been done) any residual data left from its current use 1943 * so as not to contaminate the freshly allocated vnode. 1944 */ 1945 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 1946 /* 1947 * Paired with vgone. 1948 */ 1949 vn_seqc_write_end_free(vp); 1950 1951 bo = &vp->v_bufobj; 1952 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 1953 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 1954 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 1955 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 1956 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 1957 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 1958 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 1959 ("clean blk trie not empty")); 1960 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 1961 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 1962 ("dirty blk trie not empty")); 1963 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 1964 ("Dangling rangelock waiters")); 1965 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, 1966 ("Leaked inactivation")); 1967 VI_UNLOCK(vp); 1968 cache_assert_no_entries(vp); 1969 1970 #ifdef MAC 1971 mac_vnode_destroy(vp); 1972 #endif 1973 if (vp->v_pollinfo != NULL) { 1974 /* 1975 * Use LK_NOWAIT to shut up witness about the lock. We may get 1976 * here while having another vnode locked when trying to 1977 * satisfy a lookup and needing to recycle. 1978 */ 1979 VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT); 1980 destroy_vpollinfo(vp->v_pollinfo); 1981 VOP_UNLOCK(vp); 1982 vp->v_pollinfo = NULL; 1983 } 1984 vp->v_mountedhere = NULL; 1985 vp->v_unpcb = NULL; 1986 vp->v_rdev = NULL; 1987 vp->v_fifoinfo = NULL; 1988 vp->v_iflag = 0; 1989 vp->v_vflag = 0; 1990 bo->bo_flag = 0; 1991 vn_free(vp); 1992 } 1993 1994 /* 1995 * Delete from old mount point vnode list, if on one. 1996 */ 1997 static void 1998 delmntque(struct vnode *vp) 1999 { 2000 struct mount *mp; 2001 2002 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 2003 2004 mp = vp->v_mount; 2005 MNT_ILOCK(mp); 2006 VI_LOCK(vp); 2007 vp->v_mount = NULL; 2008 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 2009 ("bad mount point vnode list size")); 2010 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2011 mp->mnt_nvnodelistsize--; 2012 MNT_REL(mp); 2013 MNT_IUNLOCK(mp); 2014 /* 2015 * The caller expects the interlock to be still held. 2016 */ 2017 ASSERT_VI_LOCKED(vp, __func__); 2018 } 2019 2020 static int 2021 insmntque1_int(struct vnode *vp, struct mount *mp, bool dtr) 2022 { 2023 2024 KASSERT(vp->v_mount == NULL, 2025 ("insmntque: vnode already on per mount vnode list")); 2026 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 2027 if ((mp->mnt_kern_flag & MNTK_UNLOCKED_INSMNTQUE) == 0) { 2028 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 2029 } else { 2030 KASSERT(!dtr, 2031 ("%s: can't have MNTK_UNLOCKED_INSMNTQUE and cleanup", 2032 __func__)); 2033 } 2034 2035 /* 2036 * We acquire the vnode interlock early to ensure that the 2037 * vnode cannot be recycled by another process releasing a 2038 * holdcnt on it before we get it on both the vnode list 2039 * and the active vnode list. The mount mutex protects only 2040 * manipulation of the vnode list and the vnode freelist 2041 * mutex protects only manipulation of the active vnode list. 2042 * Hence the need to hold the vnode interlock throughout. 2043 */ 2044 MNT_ILOCK(mp); 2045 VI_LOCK(vp); 2046 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 2047 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 2048 mp->mnt_nvnodelistsize == 0)) && 2049 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 2050 VI_UNLOCK(vp); 2051 MNT_IUNLOCK(mp); 2052 if (dtr) { 2053 vp->v_data = NULL; 2054 vp->v_op = &dead_vnodeops; 2055 vgone(vp); 2056 vput(vp); 2057 } 2058 return (EBUSY); 2059 } 2060 vp->v_mount = mp; 2061 MNT_REF(mp); 2062 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2063 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 2064 ("neg mount point vnode list size")); 2065 mp->mnt_nvnodelistsize++; 2066 VI_UNLOCK(vp); 2067 MNT_IUNLOCK(mp); 2068 return (0); 2069 } 2070 2071 /* 2072 * Insert into list of vnodes for the new mount point, if available. 2073 * insmntque() reclaims the vnode on insertion failure, insmntque1() 2074 * leaves handling of the vnode to the caller. 2075 */ 2076 int 2077 insmntque(struct vnode *vp, struct mount *mp) 2078 { 2079 return (insmntque1_int(vp, mp, true)); 2080 } 2081 2082 int 2083 insmntque1(struct vnode *vp, struct mount *mp) 2084 { 2085 return (insmntque1_int(vp, mp, false)); 2086 } 2087 2088 /* 2089 * Flush out and invalidate all buffers associated with a bufobj 2090 * Called with the underlying object locked. 2091 */ 2092 int 2093 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 2094 { 2095 int error; 2096 2097 BO_LOCK(bo); 2098 if (flags & V_SAVE) { 2099 error = bufobj_wwait(bo, slpflag, slptimeo); 2100 if (error) { 2101 BO_UNLOCK(bo); 2102 return (error); 2103 } 2104 if (bo->bo_dirty.bv_cnt > 0) { 2105 BO_UNLOCK(bo); 2106 do { 2107 error = BO_SYNC(bo, MNT_WAIT); 2108 } while (error == ERELOOKUP); 2109 if (error != 0) 2110 return (error); 2111 BO_LOCK(bo); 2112 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 2113 BO_UNLOCK(bo); 2114 return (EBUSY); 2115 } 2116 } 2117 } 2118 /* 2119 * If you alter this loop please notice that interlock is dropped and 2120 * reacquired in flushbuflist. Special care is needed to ensure that 2121 * no race conditions occur from this. 2122 */ 2123 do { 2124 error = flushbuflist(&bo->bo_clean, 2125 flags, bo, slpflag, slptimeo); 2126 if (error == 0 && !(flags & V_CLEANONLY)) 2127 error = flushbuflist(&bo->bo_dirty, 2128 flags, bo, slpflag, slptimeo); 2129 if (error != 0 && error != EAGAIN) { 2130 BO_UNLOCK(bo); 2131 return (error); 2132 } 2133 } while (error != 0); 2134 2135 /* 2136 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 2137 * have write I/O in-progress but if there is a VM object then the 2138 * VM object can also have read-I/O in-progress. 2139 */ 2140 do { 2141 bufobj_wwait(bo, 0, 0); 2142 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 2143 BO_UNLOCK(bo); 2144 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 2145 BO_LOCK(bo); 2146 } 2147 } while (bo->bo_numoutput > 0); 2148 BO_UNLOCK(bo); 2149 2150 /* 2151 * Destroy the copy in the VM cache, too. 2152 */ 2153 if (bo->bo_object != NULL && 2154 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 2155 VM_OBJECT_WLOCK(bo->bo_object); 2156 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 2157 OBJPR_CLEANONLY : 0); 2158 VM_OBJECT_WUNLOCK(bo->bo_object); 2159 } 2160 2161 #ifdef INVARIANTS 2162 BO_LOCK(bo); 2163 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 2164 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 2165 bo->bo_clean.bv_cnt > 0)) 2166 panic("vinvalbuf: flush failed"); 2167 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 2168 bo->bo_dirty.bv_cnt > 0) 2169 panic("vinvalbuf: flush dirty failed"); 2170 BO_UNLOCK(bo); 2171 #endif 2172 return (0); 2173 } 2174 2175 /* 2176 * Flush out and invalidate all buffers associated with a vnode. 2177 * Called with the underlying object locked. 2178 */ 2179 int 2180 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 2181 { 2182 2183 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2184 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 2185 if (vp->v_object != NULL && vp->v_object->handle != vp) 2186 return (0); 2187 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 2188 } 2189 2190 /* 2191 * Flush out buffers on the specified list. 2192 * 2193 */ 2194 static int 2195 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2196 int slptimeo) 2197 { 2198 struct buf *bp, *nbp; 2199 int retval, error; 2200 daddr_t lblkno; 2201 b_xflags_t xflags; 2202 2203 ASSERT_BO_WLOCKED(bo); 2204 2205 retval = 0; 2206 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2207 /* 2208 * If we are flushing both V_NORMAL and V_ALT buffers then 2209 * do not skip any buffers. If we are flushing only V_NORMAL 2210 * buffers then skip buffers marked as BX_ALTDATA. If we are 2211 * flushing only V_ALT buffers then skip buffers not marked 2212 * as BX_ALTDATA. 2213 */ 2214 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2215 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2216 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2217 continue; 2218 } 2219 if (nbp != NULL) { 2220 lblkno = nbp->b_lblkno; 2221 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2222 } 2223 retval = EAGAIN; 2224 error = BUF_TIMELOCK(bp, 2225 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2226 "flushbuf", slpflag, slptimeo); 2227 if (error) { 2228 BO_LOCK(bo); 2229 return (error != ENOLCK ? error : EAGAIN); 2230 } 2231 KASSERT(bp->b_bufobj == bo, 2232 ("bp %p wrong b_bufobj %p should be %p", 2233 bp, bp->b_bufobj, bo)); 2234 /* 2235 * XXX Since there are no node locks for NFS, I 2236 * believe there is a slight chance that a delayed 2237 * write will occur while sleeping just above, so 2238 * check for it. 2239 */ 2240 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2241 (flags & V_SAVE)) { 2242 bremfree(bp); 2243 bp->b_flags |= B_ASYNC; 2244 bwrite(bp); 2245 BO_LOCK(bo); 2246 return (EAGAIN); /* XXX: why not loop ? */ 2247 } 2248 bremfree(bp); 2249 bp->b_flags |= (B_INVAL | B_RELBUF); 2250 bp->b_flags &= ~B_ASYNC; 2251 brelse(bp); 2252 BO_LOCK(bo); 2253 if (nbp == NULL) 2254 break; 2255 nbp = gbincore(bo, lblkno); 2256 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2257 != xflags) 2258 break; /* nbp invalid */ 2259 } 2260 return (retval); 2261 } 2262 2263 int 2264 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2265 { 2266 struct buf *bp; 2267 int error; 2268 daddr_t lblkno; 2269 2270 ASSERT_BO_LOCKED(bo); 2271 2272 for (lblkno = startn;;) { 2273 again: 2274 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2275 if (bp == NULL || bp->b_lblkno >= endn || 2276 bp->b_lblkno < startn) 2277 break; 2278 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2279 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2280 if (error != 0) { 2281 BO_RLOCK(bo); 2282 if (error == ENOLCK) 2283 goto again; 2284 return (error); 2285 } 2286 KASSERT(bp->b_bufobj == bo, 2287 ("bp %p wrong b_bufobj %p should be %p", 2288 bp, bp->b_bufobj, bo)); 2289 lblkno = bp->b_lblkno + 1; 2290 if ((bp->b_flags & B_MANAGED) == 0) 2291 bremfree(bp); 2292 bp->b_flags |= B_RELBUF; 2293 /* 2294 * In the VMIO case, use the B_NOREUSE flag to hint that the 2295 * pages backing each buffer in the range are unlikely to be 2296 * reused. Dirty buffers will have the hint applied once 2297 * they've been written. 2298 */ 2299 if ((bp->b_flags & B_VMIO) != 0) 2300 bp->b_flags |= B_NOREUSE; 2301 brelse(bp); 2302 BO_RLOCK(bo); 2303 } 2304 return (0); 2305 } 2306 2307 /* 2308 * Truncate a file's buffer and pages to a specified length. This 2309 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2310 * sync activity. 2311 */ 2312 int 2313 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2314 { 2315 struct buf *bp, *nbp; 2316 struct bufobj *bo; 2317 daddr_t startlbn; 2318 2319 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2320 vp, blksize, (uintmax_t)length); 2321 2322 /* 2323 * Round up to the *next* lbn. 2324 */ 2325 startlbn = howmany(length, blksize); 2326 2327 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2328 2329 bo = &vp->v_bufobj; 2330 restart_unlocked: 2331 BO_LOCK(bo); 2332 2333 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2334 ; 2335 2336 if (length > 0) { 2337 restartsync: 2338 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2339 if (bp->b_lblkno > 0) 2340 continue; 2341 /* 2342 * Since we hold the vnode lock this should only 2343 * fail if we're racing with the buf daemon. 2344 */ 2345 if (BUF_LOCK(bp, 2346 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2347 BO_LOCKPTR(bo)) == ENOLCK) 2348 goto restart_unlocked; 2349 2350 VNASSERT((bp->b_flags & B_DELWRI), vp, 2351 ("buf(%p) on dirty queue without DELWRI", bp)); 2352 2353 bremfree(bp); 2354 bawrite(bp); 2355 BO_LOCK(bo); 2356 goto restartsync; 2357 } 2358 } 2359 2360 bufobj_wwait(bo, 0, 0); 2361 BO_UNLOCK(bo); 2362 vnode_pager_setsize(vp, length); 2363 2364 return (0); 2365 } 2366 2367 /* 2368 * Invalidate the cached pages of a file's buffer within the range of block 2369 * numbers [startlbn, endlbn). 2370 */ 2371 void 2372 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2373 int blksize) 2374 { 2375 struct bufobj *bo; 2376 off_t start, end; 2377 2378 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2379 2380 start = blksize * startlbn; 2381 end = blksize * endlbn; 2382 2383 bo = &vp->v_bufobj; 2384 BO_LOCK(bo); 2385 MPASS(blksize == bo->bo_bsize); 2386 2387 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2388 ; 2389 2390 BO_UNLOCK(bo); 2391 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2392 } 2393 2394 static int 2395 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2396 daddr_t startlbn, daddr_t endlbn) 2397 { 2398 struct buf *bp, *nbp; 2399 bool anyfreed; 2400 2401 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2402 ASSERT_BO_LOCKED(bo); 2403 2404 do { 2405 anyfreed = false; 2406 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2407 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2408 continue; 2409 if (BUF_LOCK(bp, 2410 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2411 BO_LOCKPTR(bo)) == ENOLCK) { 2412 BO_LOCK(bo); 2413 return (EAGAIN); 2414 } 2415 2416 bremfree(bp); 2417 bp->b_flags |= B_INVAL | B_RELBUF; 2418 bp->b_flags &= ~B_ASYNC; 2419 brelse(bp); 2420 anyfreed = true; 2421 2422 BO_LOCK(bo); 2423 if (nbp != NULL && 2424 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2425 nbp->b_vp != vp || 2426 (nbp->b_flags & B_DELWRI) != 0)) 2427 return (EAGAIN); 2428 } 2429 2430 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2431 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2432 continue; 2433 if (BUF_LOCK(bp, 2434 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2435 BO_LOCKPTR(bo)) == ENOLCK) { 2436 BO_LOCK(bo); 2437 return (EAGAIN); 2438 } 2439 bremfree(bp); 2440 bp->b_flags |= B_INVAL | B_RELBUF; 2441 bp->b_flags &= ~B_ASYNC; 2442 brelse(bp); 2443 anyfreed = true; 2444 2445 BO_LOCK(bo); 2446 if (nbp != NULL && 2447 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2448 (nbp->b_vp != vp) || 2449 (nbp->b_flags & B_DELWRI) == 0)) 2450 return (EAGAIN); 2451 } 2452 } while (anyfreed); 2453 return (0); 2454 } 2455 2456 static void 2457 buf_vlist_remove(struct buf *bp) 2458 { 2459 struct bufv *bv; 2460 b_xflags_t flags; 2461 2462 flags = bp->b_xflags; 2463 2464 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2465 ASSERT_BO_WLOCKED(bp->b_bufobj); 2466 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 && 2467 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN), 2468 ("%s: buffer %p has invalid queue state", __func__, bp)); 2469 2470 if ((flags & BX_VNDIRTY) != 0) 2471 bv = &bp->b_bufobj->bo_dirty; 2472 else 2473 bv = &bp->b_bufobj->bo_clean; 2474 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2475 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2476 bv->bv_cnt--; 2477 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2478 } 2479 2480 /* 2481 * Add the buffer to the sorted clean or dirty block list. 2482 * 2483 * NOTE: xflags is passed as a constant, optimizing this inline function! 2484 */ 2485 static void 2486 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2487 { 2488 struct bufv *bv; 2489 struct buf *n; 2490 int error; 2491 2492 ASSERT_BO_WLOCKED(bo); 2493 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2494 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2495 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2496 ("dead bo %p", bo)); 2497 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2498 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2499 bp->b_xflags |= xflags; 2500 if (xflags & BX_VNDIRTY) 2501 bv = &bo->bo_dirty; 2502 else 2503 bv = &bo->bo_clean; 2504 2505 /* 2506 * Keep the list ordered. Optimize empty list insertion. Assume 2507 * we tend to grow at the tail so lookup_le should usually be cheaper 2508 * than _ge. 2509 */ 2510 if (bv->bv_cnt == 0 || 2511 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2512 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2513 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2514 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2515 else 2516 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2517 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2518 if (error) 2519 panic("buf_vlist_add: Preallocated nodes insufficient."); 2520 bv->bv_cnt++; 2521 } 2522 2523 /* 2524 * Look up a buffer using the buffer tries. 2525 */ 2526 struct buf * 2527 gbincore(struct bufobj *bo, daddr_t lblkno) 2528 { 2529 struct buf *bp; 2530 2531 ASSERT_BO_LOCKED(bo); 2532 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2533 if (bp != NULL) 2534 return (bp); 2535 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2536 } 2537 2538 /* 2539 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2540 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2541 * stability of the result. Like other lockless lookups, the found buf may 2542 * already be invalid by the time this function returns. 2543 */ 2544 struct buf * 2545 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2546 { 2547 struct buf *bp; 2548 2549 ASSERT_BO_UNLOCKED(bo); 2550 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2551 if (bp != NULL) 2552 return (bp); 2553 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2554 } 2555 2556 /* 2557 * Associate a buffer with a vnode. 2558 */ 2559 void 2560 bgetvp(struct vnode *vp, struct buf *bp) 2561 { 2562 struct bufobj *bo; 2563 2564 bo = &vp->v_bufobj; 2565 ASSERT_BO_WLOCKED(bo); 2566 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2567 2568 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2569 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2570 ("bgetvp: bp already attached! %p", bp)); 2571 2572 vhold(vp); 2573 bp->b_vp = vp; 2574 bp->b_bufobj = bo; 2575 /* 2576 * Insert onto list for new vnode. 2577 */ 2578 buf_vlist_add(bp, bo, BX_VNCLEAN); 2579 } 2580 2581 /* 2582 * Disassociate a buffer from a vnode. 2583 */ 2584 void 2585 brelvp(struct buf *bp) 2586 { 2587 struct bufobj *bo; 2588 struct vnode *vp; 2589 2590 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2591 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2592 2593 /* 2594 * Delete from old vnode list, if on one. 2595 */ 2596 vp = bp->b_vp; /* XXX */ 2597 bo = bp->b_bufobj; 2598 BO_LOCK(bo); 2599 buf_vlist_remove(bp); 2600 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2601 bo->bo_flag &= ~BO_ONWORKLST; 2602 mtx_lock(&sync_mtx); 2603 LIST_REMOVE(bo, bo_synclist); 2604 syncer_worklist_len--; 2605 mtx_unlock(&sync_mtx); 2606 } 2607 bp->b_vp = NULL; 2608 bp->b_bufobj = NULL; 2609 BO_UNLOCK(bo); 2610 vdrop(vp); 2611 } 2612 2613 /* 2614 * Add an item to the syncer work queue. 2615 */ 2616 static void 2617 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2618 { 2619 int slot; 2620 2621 ASSERT_BO_WLOCKED(bo); 2622 2623 mtx_lock(&sync_mtx); 2624 if (bo->bo_flag & BO_ONWORKLST) 2625 LIST_REMOVE(bo, bo_synclist); 2626 else { 2627 bo->bo_flag |= BO_ONWORKLST; 2628 syncer_worklist_len++; 2629 } 2630 2631 if (delay > syncer_maxdelay - 2) 2632 delay = syncer_maxdelay - 2; 2633 slot = (syncer_delayno + delay) & syncer_mask; 2634 2635 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2636 mtx_unlock(&sync_mtx); 2637 } 2638 2639 static int 2640 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2641 { 2642 int error, len; 2643 2644 mtx_lock(&sync_mtx); 2645 len = syncer_worklist_len - sync_vnode_count; 2646 mtx_unlock(&sync_mtx); 2647 error = SYSCTL_OUT(req, &len, sizeof(len)); 2648 return (error); 2649 } 2650 2651 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2652 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2653 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2654 2655 static struct proc *updateproc; 2656 static void sched_sync(void); 2657 static struct kproc_desc up_kp = { 2658 "syncer", 2659 sched_sync, 2660 &updateproc 2661 }; 2662 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2663 2664 static int 2665 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2666 { 2667 struct vnode *vp; 2668 struct mount *mp; 2669 2670 *bo = LIST_FIRST(slp); 2671 if (*bo == NULL) 2672 return (0); 2673 vp = bo2vnode(*bo); 2674 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2675 return (1); 2676 /* 2677 * We use vhold in case the vnode does not 2678 * successfully sync. vhold prevents the vnode from 2679 * going away when we unlock the sync_mtx so that 2680 * we can acquire the vnode interlock. 2681 */ 2682 vholdl(vp); 2683 mtx_unlock(&sync_mtx); 2684 VI_UNLOCK(vp); 2685 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2686 vdrop(vp); 2687 mtx_lock(&sync_mtx); 2688 return (*bo == LIST_FIRST(slp)); 2689 } 2690 MPASSERT(mp == NULL || (curthread->td_pflags & TDP_IGNSUSP) != 0 || 2691 (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0, mp, 2692 ("suspended mp syncing vp %p", vp)); 2693 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2694 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2695 VOP_UNLOCK(vp); 2696 vn_finished_write(mp); 2697 BO_LOCK(*bo); 2698 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2699 /* 2700 * Put us back on the worklist. The worklist 2701 * routine will remove us from our current 2702 * position and then add us back in at a later 2703 * position. 2704 */ 2705 vn_syncer_add_to_worklist(*bo, syncdelay); 2706 } 2707 BO_UNLOCK(*bo); 2708 vdrop(vp); 2709 mtx_lock(&sync_mtx); 2710 return (0); 2711 } 2712 2713 static int first_printf = 1; 2714 2715 /* 2716 * System filesystem synchronizer daemon. 2717 */ 2718 static void 2719 sched_sync(void) 2720 { 2721 struct synclist *next, *slp; 2722 struct bufobj *bo; 2723 long starttime; 2724 struct thread *td = curthread; 2725 int last_work_seen; 2726 int net_worklist_len; 2727 int syncer_final_iter; 2728 int error; 2729 2730 last_work_seen = 0; 2731 syncer_final_iter = 0; 2732 syncer_state = SYNCER_RUNNING; 2733 starttime = time_uptime; 2734 td->td_pflags |= TDP_NORUNNINGBUF; 2735 2736 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2737 SHUTDOWN_PRI_LAST); 2738 2739 mtx_lock(&sync_mtx); 2740 for (;;) { 2741 if (syncer_state == SYNCER_FINAL_DELAY && 2742 syncer_final_iter == 0) { 2743 mtx_unlock(&sync_mtx); 2744 kproc_suspend_check(td->td_proc); 2745 mtx_lock(&sync_mtx); 2746 } 2747 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2748 if (syncer_state != SYNCER_RUNNING && 2749 starttime != time_uptime) { 2750 if (first_printf) { 2751 printf("\nSyncing disks, vnodes remaining... "); 2752 first_printf = 0; 2753 } 2754 printf("%d ", net_worklist_len); 2755 } 2756 starttime = time_uptime; 2757 2758 /* 2759 * Push files whose dirty time has expired. Be careful 2760 * of interrupt race on slp queue. 2761 * 2762 * Skip over empty worklist slots when shutting down. 2763 */ 2764 do { 2765 slp = &syncer_workitem_pending[syncer_delayno]; 2766 syncer_delayno += 1; 2767 if (syncer_delayno == syncer_maxdelay) 2768 syncer_delayno = 0; 2769 next = &syncer_workitem_pending[syncer_delayno]; 2770 /* 2771 * If the worklist has wrapped since the 2772 * it was emptied of all but syncer vnodes, 2773 * switch to the FINAL_DELAY state and run 2774 * for one more second. 2775 */ 2776 if (syncer_state == SYNCER_SHUTTING_DOWN && 2777 net_worklist_len == 0 && 2778 last_work_seen == syncer_delayno) { 2779 syncer_state = SYNCER_FINAL_DELAY; 2780 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2781 } 2782 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2783 syncer_worklist_len > 0); 2784 2785 /* 2786 * Keep track of the last time there was anything 2787 * on the worklist other than syncer vnodes. 2788 * Return to the SHUTTING_DOWN state if any 2789 * new work appears. 2790 */ 2791 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2792 last_work_seen = syncer_delayno; 2793 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2794 syncer_state = SYNCER_SHUTTING_DOWN; 2795 while (!LIST_EMPTY(slp)) { 2796 error = sync_vnode(slp, &bo, td); 2797 if (error == 1) { 2798 LIST_REMOVE(bo, bo_synclist); 2799 LIST_INSERT_HEAD(next, bo, bo_synclist); 2800 continue; 2801 } 2802 2803 if (first_printf == 0) { 2804 /* 2805 * Drop the sync mutex, because some watchdog 2806 * drivers need to sleep while patting 2807 */ 2808 mtx_unlock(&sync_mtx); 2809 wdog_kern_pat(WD_LASTVAL); 2810 mtx_lock(&sync_mtx); 2811 } 2812 } 2813 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2814 syncer_final_iter--; 2815 /* 2816 * The variable rushjob allows the kernel to speed up the 2817 * processing of the filesystem syncer process. A rushjob 2818 * value of N tells the filesystem syncer to process the next 2819 * N seconds worth of work on its queue ASAP. Currently rushjob 2820 * is used by the soft update code to speed up the filesystem 2821 * syncer process when the incore state is getting so far 2822 * ahead of the disk that the kernel memory pool is being 2823 * threatened with exhaustion. 2824 */ 2825 if (rushjob > 0) { 2826 rushjob -= 1; 2827 continue; 2828 } 2829 /* 2830 * Just sleep for a short period of time between 2831 * iterations when shutting down to allow some I/O 2832 * to happen. 2833 * 2834 * If it has taken us less than a second to process the 2835 * current work, then wait. Otherwise start right over 2836 * again. We can still lose time if any single round 2837 * takes more than two seconds, but it does not really 2838 * matter as we are just trying to generally pace the 2839 * filesystem activity. 2840 */ 2841 if (syncer_state != SYNCER_RUNNING || 2842 time_uptime == starttime) { 2843 thread_lock(td); 2844 sched_prio(td, PPAUSE); 2845 thread_unlock(td); 2846 } 2847 if (syncer_state != SYNCER_RUNNING) 2848 cv_timedwait(&sync_wakeup, &sync_mtx, 2849 hz / SYNCER_SHUTDOWN_SPEEDUP); 2850 else if (time_uptime == starttime) 2851 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2852 } 2853 } 2854 2855 /* 2856 * Request the syncer daemon to speed up its work. 2857 * We never push it to speed up more than half of its 2858 * normal turn time, otherwise it could take over the cpu. 2859 */ 2860 int 2861 speedup_syncer(void) 2862 { 2863 int ret = 0; 2864 2865 mtx_lock(&sync_mtx); 2866 if (rushjob < syncdelay / 2) { 2867 rushjob += 1; 2868 stat_rush_requests += 1; 2869 ret = 1; 2870 } 2871 mtx_unlock(&sync_mtx); 2872 cv_broadcast(&sync_wakeup); 2873 return (ret); 2874 } 2875 2876 /* 2877 * Tell the syncer to speed up its work and run though its work 2878 * list several times, then tell it to shut down. 2879 */ 2880 static void 2881 syncer_shutdown(void *arg, int howto) 2882 { 2883 2884 if (howto & RB_NOSYNC) 2885 return; 2886 mtx_lock(&sync_mtx); 2887 syncer_state = SYNCER_SHUTTING_DOWN; 2888 rushjob = 0; 2889 mtx_unlock(&sync_mtx); 2890 cv_broadcast(&sync_wakeup); 2891 kproc_shutdown(arg, howto); 2892 } 2893 2894 void 2895 syncer_suspend(void) 2896 { 2897 2898 syncer_shutdown(updateproc, 0); 2899 } 2900 2901 void 2902 syncer_resume(void) 2903 { 2904 2905 mtx_lock(&sync_mtx); 2906 first_printf = 1; 2907 syncer_state = SYNCER_RUNNING; 2908 mtx_unlock(&sync_mtx); 2909 cv_broadcast(&sync_wakeup); 2910 kproc_resume(updateproc); 2911 } 2912 2913 /* 2914 * Move the buffer between the clean and dirty lists of its vnode. 2915 */ 2916 void 2917 reassignbuf(struct buf *bp) 2918 { 2919 struct vnode *vp; 2920 struct bufobj *bo; 2921 int delay; 2922 #ifdef INVARIANTS 2923 struct bufv *bv; 2924 #endif 2925 2926 vp = bp->b_vp; 2927 bo = bp->b_bufobj; 2928 2929 KASSERT((bp->b_flags & B_PAGING) == 0, 2930 ("%s: cannot reassign paging buffer %p", __func__, bp)); 2931 2932 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2933 bp, bp->b_vp, bp->b_flags); 2934 2935 BO_LOCK(bo); 2936 buf_vlist_remove(bp); 2937 2938 /* 2939 * If dirty, put on list of dirty buffers; otherwise insert onto list 2940 * of clean buffers. 2941 */ 2942 if (bp->b_flags & B_DELWRI) { 2943 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2944 switch (vp->v_type) { 2945 case VDIR: 2946 delay = dirdelay; 2947 break; 2948 case VCHR: 2949 delay = metadelay; 2950 break; 2951 default: 2952 delay = filedelay; 2953 } 2954 vn_syncer_add_to_worklist(bo, delay); 2955 } 2956 buf_vlist_add(bp, bo, BX_VNDIRTY); 2957 } else { 2958 buf_vlist_add(bp, bo, BX_VNCLEAN); 2959 2960 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2961 mtx_lock(&sync_mtx); 2962 LIST_REMOVE(bo, bo_synclist); 2963 syncer_worklist_len--; 2964 mtx_unlock(&sync_mtx); 2965 bo->bo_flag &= ~BO_ONWORKLST; 2966 } 2967 } 2968 #ifdef INVARIANTS 2969 bv = &bo->bo_clean; 2970 bp = TAILQ_FIRST(&bv->bv_hd); 2971 KASSERT(bp == NULL || bp->b_bufobj == bo, 2972 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2973 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2974 KASSERT(bp == NULL || bp->b_bufobj == bo, 2975 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2976 bv = &bo->bo_dirty; 2977 bp = TAILQ_FIRST(&bv->bv_hd); 2978 KASSERT(bp == NULL || bp->b_bufobj == bo, 2979 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2980 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2981 KASSERT(bp == NULL || bp->b_bufobj == bo, 2982 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2983 #endif 2984 BO_UNLOCK(bo); 2985 } 2986 2987 static void 2988 v_init_counters(struct vnode *vp) 2989 { 2990 2991 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2992 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2993 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2994 2995 refcount_init(&vp->v_holdcnt, 1); 2996 refcount_init(&vp->v_usecount, 1); 2997 } 2998 2999 /* 3000 * Grab a particular vnode from the free list, increment its 3001 * reference count and lock it. VIRF_DOOMED is set if the vnode 3002 * is being destroyed. Only callers who specify LK_RETRY will 3003 * see doomed vnodes. If inactive processing was delayed in 3004 * vput try to do it here. 3005 * 3006 * usecount is manipulated using atomics without holding any locks. 3007 * 3008 * holdcnt can be manipulated using atomics without holding any locks, 3009 * except when transitioning 1<->0, in which case the interlock is held. 3010 * 3011 * Consumers which don't guarantee liveness of the vnode can use SMR to 3012 * try to get a reference. Note this operation can fail since the vnode 3013 * may be awaiting getting freed by the time they get to it. 3014 */ 3015 enum vgetstate 3016 vget_prep_smr(struct vnode *vp) 3017 { 3018 enum vgetstate vs; 3019 3020 VFS_SMR_ASSERT_ENTERED(); 3021 3022 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3023 vs = VGET_USECOUNT; 3024 } else { 3025 if (vhold_smr(vp)) 3026 vs = VGET_HOLDCNT; 3027 else 3028 vs = VGET_NONE; 3029 } 3030 return (vs); 3031 } 3032 3033 enum vgetstate 3034 vget_prep(struct vnode *vp) 3035 { 3036 enum vgetstate vs; 3037 3038 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3039 vs = VGET_USECOUNT; 3040 } else { 3041 vhold(vp); 3042 vs = VGET_HOLDCNT; 3043 } 3044 return (vs); 3045 } 3046 3047 void 3048 vget_abort(struct vnode *vp, enum vgetstate vs) 3049 { 3050 3051 switch (vs) { 3052 case VGET_USECOUNT: 3053 vrele(vp); 3054 break; 3055 case VGET_HOLDCNT: 3056 vdrop(vp); 3057 break; 3058 default: 3059 __assert_unreachable(); 3060 } 3061 } 3062 3063 int 3064 vget(struct vnode *vp, int flags) 3065 { 3066 enum vgetstate vs; 3067 3068 vs = vget_prep(vp); 3069 return (vget_finish(vp, flags, vs)); 3070 } 3071 3072 int 3073 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 3074 { 3075 int error; 3076 3077 if ((flags & LK_INTERLOCK) != 0) 3078 ASSERT_VI_LOCKED(vp, __func__); 3079 else 3080 ASSERT_VI_UNLOCKED(vp, __func__); 3081 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3082 VNPASS(vp->v_holdcnt > 0, vp); 3083 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3084 3085 error = vn_lock(vp, flags); 3086 if (__predict_false(error != 0)) { 3087 vget_abort(vp, vs); 3088 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 3089 vp); 3090 return (error); 3091 } 3092 3093 vget_finish_ref(vp, vs); 3094 return (0); 3095 } 3096 3097 void 3098 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 3099 { 3100 int old; 3101 3102 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3103 VNPASS(vp->v_holdcnt > 0, vp); 3104 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3105 3106 if (vs == VGET_USECOUNT) 3107 return; 3108 3109 /* 3110 * We hold the vnode. If the usecount is 0 it will be utilized to keep 3111 * the vnode around. Otherwise someone else lended their hold count and 3112 * we have to drop ours. 3113 */ 3114 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3115 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3116 if (old != 0) { 3117 #ifdef INVARIANTS 3118 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3119 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3120 #else 3121 refcount_release(&vp->v_holdcnt); 3122 #endif 3123 } 3124 } 3125 3126 void 3127 vref(struct vnode *vp) 3128 { 3129 enum vgetstate vs; 3130 3131 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3132 vs = vget_prep(vp); 3133 vget_finish_ref(vp, vs); 3134 } 3135 3136 void 3137 vrefact(struct vnode *vp) 3138 { 3139 3140 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3141 #ifdef INVARIANTS 3142 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 3143 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3144 #else 3145 refcount_acquire(&vp->v_usecount); 3146 #endif 3147 } 3148 3149 void 3150 vlazy(struct vnode *vp) 3151 { 3152 struct mount *mp; 3153 3154 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 3155 3156 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 3157 return; 3158 /* 3159 * We may get here for inactive routines after the vnode got doomed. 3160 */ 3161 if (VN_IS_DOOMED(vp)) 3162 return; 3163 mp = vp->v_mount; 3164 mtx_lock(&mp->mnt_listmtx); 3165 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 3166 vp->v_mflag |= VMP_LAZYLIST; 3167 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3168 mp->mnt_lazyvnodelistsize++; 3169 } 3170 mtx_unlock(&mp->mnt_listmtx); 3171 } 3172 3173 static void 3174 vunlazy(struct vnode *vp) 3175 { 3176 struct mount *mp; 3177 3178 ASSERT_VI_LOCKED(vp, __func__); 3179 VNPASS(!VN_IS_DOOMED(vp), vp); 3180 3181 mp = vp->v_mount; 3182 mtx_lock(&mp->mnt_listmtx); 3183 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3184 /* 3185 * Don't remove the vnode from the lazy list if another thread 3186 * has increased the hold count. It may have re-enqueued the 3187 * vnode to the lazy list and is now responsible for its 3188 * removal. 3189 */ 3190 if (vp->v_holdcnt == 0) { 3191 vp->v_mflag &= ~VMP_LAZYLIST; 3192 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3193 mp->mnt_lazyvnodelistsize--; 3194 } 3195 mtx_unlock(&mp->mnt_listmtx); 3196 } 3197 3198 /* 3199 * This routine is only meant to be called from vgonel prior to dooming 3200 * the vnode. 3201 */ 3202 static void 3203 vunlazy_gone(struct vnode *vp) 3204 { 3205 struct mount *mp; 3206 3207 ASSERT_VOP_ELOCKED(vp, __func__); 3208 ASSERT_VI_LOCKED(vp, __func__); 3209 VNPASS(!VN_IS_DOOMED(vp), vp); 3210 3211 if (vp->v_mflag & VMP_LAZYLIST) { 3212 mp = vp->v_mount; 3213 mtx_lock(&mp->mnt_listmtx); 3214 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3215 vp->v_mflag &= ~VMP_LAZYLIST; 3216 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3217 mp->mnt_lazyvnodelistsize--; 3218 mtx_unlock(&mp->mnt_listmtx); 3219 } 3220 } 3221 3222 static void 3223 vdefer_inactive(struct vnode *vp) 3224 { 3225 3226 ASSERT_VI_LOCKED(vp, __func__); 3227 VNPASS(vp->v_holdcnt > 0, vp); 3228 if (VN_IS_DOOMED(vp)) { 3229 vdropl(vp); 3230 return; 3231 } 3232 if (vp->v_iflag & VI_DEFINACT) { 3233 VNPASS(vp->v_holdcnt > 1, vp); 3234 vdropl(vp); 3235 return; 3236 } 3237 if (vp->v_usecount > 0) { 3238 vp->v_iflag &= ~VI_OWEINACT; 3239 vdropl(vp); 3240 return; 3241 } 3242 vlazy(vp); 3243 vp->v_iflag |= VI_DEFINACT; 3244 VI_UNLOCK(vp); 3245 atomic_add_long(&deferred_inact, 1); 3246 } 3247 3248 static void 3249 vdefer_inactive_unlocked(struct vnode *vp) 3250 { 3251 3252 VI_LOCK(vp); 3253 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3254 vdropl(vp); 3255 return; 3256 } 3257 vdefer_inactive(vp); 3258 } 3259 3260 enum vput_op { VRELE, VPUT, VUNREF }; 3261 3262 /* 3263 * Handle ->v_usecount transitioning to 0. 3264 * 3265 * By releasing the last usecount we take ownership of the hold count which 3266 * provides liveness of the vnode, meaning we have to vdrop. 3267 * 3268 * For all vnodes we may need to perform inactive processing. It requires an 3269 * exclusive lock on the vnode, while it is legal to call here with only a 3270 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3271 * inactive processing gets deferred to the syncer. 3272 * 3273 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3274 * on the lock being held all the way until VOP_INACTIVE. This in particular 3275 * happens with UFS which adds half-constructed vnodes to the hash, where they 3276 * can be found by other code. 3277 */ 3278 static void 3279 vput_final(struct vnode *vp, enum vput_op func) 3280 { 3281 int error; 3282 bool want_unlock; 3283 3284 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3285 VNPASS(vp->v_holdcnt > 0, vp); 3286 3287 VI_LOCK(vp); 3288 3289 /* 3290 * By the time we got here someone else might have transitioned 3291 * the count back to > 0. 3292 */ 3293 if (vp->v_usecount > 0) 3294 goto out; 3295 3296 /* 3297 * If the vnode is doomed vgone already performed inactive processing 3298 * (if needed). 3299 */ 3300 if (VN_IS_DOOMED(vp)) 3301 goto out; 3302 3303 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3304 goto out; 3305 3306 if (vp->v_iflag & VI_DOINGINACT) 3307 goto out; 3308 3309 /* 3310 * Locking operations here will drop the interlock and possibly the 3311 * vnode lock, opening a window where the vnode can get doomed all the 3312 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3313 * perform inactive. 3314 */ 3315 vp->v_iflag |= VI_OWEINACT; 3316 want_unlock = false; 3317 error = 0; 3318 switch (func) { 3319 case VRELE: 3320 switch (VOP_ISLOCKED(vp)) { 3321 case LK_EXCLUSIVE: 3322 break; 3323 case LK_EXCLOTHER: 3324 case 0: 3325 want_unlock = true; 3326 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3327 VI_LOCK(vp); 3328 break; 3329 default: 3330 /* 3331 * The lock has at least one sharer, but we have no way 3332 * to conclude whether this is us. Play it safe and 3333 * defer processing. 3334 */ 3335 error = EAGAIN; 3336 break; 3337 } 3338 break; 3339 case VPUT: 3340 want_unlock = true; 3341 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3342 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3343 LK_NOWAIT); 3344 VI_LOCK(vp); 3345 } 3346 break; 3347 case VUNREF: 3348 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3349 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3350 VI_LOCK(vp); 3351 } 3352 break; 3353 } 3354 if (error == 0) { 3355 if (func == VUNREF) { 3356 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp, 3357 ("recursive vunref")); 3358 vp->v_vflag |= VV_UNREF; 3359 } 3360 for (;;) { 3361 error = vinactive(vp); 3362 if (want_unlock) 3363 VOP_UNLOCK(vp); 3364 if (error != ERELOOKUP || !want_unlock) 3365 break; 3366 VOP_LOCK(vp, LK_EXCLUSIVE); 3367 } 3368 if (func == VUNREF) 3369 vp->v_vflag &= ~VV_UNREF; 3370 vdropl(vp); 3371 } else { 3372 vdefer_inactive(vp); 3373 } 3374 return; 3375 out: 3376 if (func == VPUT) 3377 VOP_UNLOCK(vp); 3378 vdropl(vp); 3379 } 3380 3381 /* 3382 * Decrement ->v_usecount for a vnode. 3383 * 3384 * Releasing the last use count requires additional processing, see vput_final 3385 * above for details. 3386 * 3387 * Comment above each variant denotes lock state on entry and exit. 3388 */ 3389 3390 /* 3391 * in: any 3392 * out: same as passed in 3393 */ 3394 void 3395 vrele(struct vnode *vp) 3396 { 3397 3398 ASSERT_VI_UNLOCKED(vp, __func__); 3399 if (!refcount_release(&vp->v_usecount)) 3400 return; 3401 vput_final(vp, VRELE); 3402 } 3403 3404 /* 3405 * in: locked 3406 * out: unlocked 3407 */ 3408 void 3409 vput(struct vnode *vp) 3410 { 3411 3412 ASSERT_VOP_LOCKED(vp, __func__); 3413 ASSERT_VI_UNLOCKED(vp, __func__); 3414 if (!refcount_release(&vp->v_usecount)) { 3415 VOP_UNLOCK(vp); 3416 return; 3417 } 3418 vput_final(vp, VPUT); 3419 } 3420 3421 /* 3422 * in: locked 3423 * out: locked 3424 */ 3425 void 3426 vunref(struct vnode *vp) 3427 { 3428 3429 ASSERT_VOP_LOCKED(vp, __func__); 3430 ASSERT_VI_UNLOCKED(vp, __func__); 3431 if (!refcount_release(&vp->v_usecount)) 3432 return; 3433 vput_final(vp, VUNREF); 3434 } 3435 3436 void 3437 vhold(struct vnode *vp) 3438 { 3439 int old; 3440 3441 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3442 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3443 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3444 ("%s: wrong hold count %d", __func__, old)); 3445 if (old == 0) 3446 vfs_freevnodes_dec(); 3447 } 3448 3449 void 3450 vholdnz(struct vnode *vp) 3451 { 3452 3453 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3454 #ifdef INVARIANTS 3455 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3456 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3457 ("%s: wrong hold count %d", __func__, old)); 3458 #else 3459 atomic_add_int(&vp->v_holdcnt, 1); 3460 #endif 3461 } 3462 3463 /* 3464 * Grab a hold count unless the vnode is freed. 3465 * 3466 * Only use this routine if vfs smr is the only protection you have against 3467 * freeing the vnode. 3468 * 3469 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3470 * is not set. After the flag is set the vnode becomes immutable to anyone but 3471 * the thread which managed to set the flag. 3472 * 3473 * It may be tempting to replace the loop with: 3474 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3475 * if (count & VHOLD_NO_SMR) { 3476 * backpedal and error out; 3477 * } 3478 * 3479 * However, while this is more performant, it hinders debugging by eliminating 3480 * the previously mentioned invariant. 3481 */ 3482 bool 3483 vhold_smr(struct vnode *vp) 3484 { 3485 int count; 3486 3487 VFS_SMR_ASSERT_ENTERED(); 3488 3489 count = atomic_load_int(&vp->v_holdcnt); 3490 for (;;) { 3491 if (count & VHOLD_NO_SMR) { 3492 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3493 ("non-zero hold count with flags %d\n", count)); 3494 return (false); 3495 } 3496 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3497 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3498 if (count == 0) 3499 vfs_freevnodes_dec(); 3500 return (true); 3501 } 3502 } 3503 } 3504 3505 /* 3506 * Hold a free vnode for recycling. 3507 * 3508 * Note: vnode_init references this comment. 3509 * 3510 * Attempts to recycle only need the global vnode list lock and have no use for 3511 * SMR. 3512 * 3513 * However, vnodes get inserted into the global list before they get fully 3514 * initialized and stay there until UMA decides to free the memory. This in 3515 * particular means the target can be found before it becomes usable and after 3516 * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to 3517 * VHOLD_NO_SMR. 3518 * 3519 * Note: the vnode may gain more references after we transition the count 0->1. 3520 */ 3521 static bool 3522 vhold_recycle_free(struct vnode *vp) 3523 { 3524 int count; 3525 3526 mtx_assert(&vnode_list_mtx, MA_OWNED); 3527 3528 count = atomic_load_int(&vp->v_holdcnt); 3529 for (;;) { 3530 if (count & VHOLD_NO_SMR) { 3531 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3532 ("non-zero hold count with flags %d\n", count)); 3533 return (false); 3534 } 3535 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3536 if (count > 0) { 3537 return (false); 3538 } 3539 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3540 vfs_freevnodes_dec(); 3541 return (true); 3542 } 3543 } 3544 } 3545 3546 static void __noinline 3547 vdbatch_process(struct vdbatch *vd) 3548 { 3549 struct vnode *vp; 3550 int i; 3551 3552 mtx_assert(&vd->lock, MA_OWNED); 3553 MPASS(curthread->td_pinned > 0); 3554 MPASS(vd->index == VDBATCH_SIZE); 3555 3556 /* 3557 * Attempt to requeue the passed batch, but give up easily. 3558 * 3559 * Despite batching the mechanism is prone to transient *significant* 3560 * lock contention, where vnode_list_mtx becomes the primary bottleneck 3561 * if multiple CPUs get here (one real-world example is highly parallel 3562 * do-nothing make , which will stat *tons* of vnodes). Since it is 3563 * quasi-LRU (read: not that great even if fully honoured) just dodge 3564 * the problem. Parties which don't like it are welcome to implement 3565 * something better. 3566 */ 3567 critical_enter(); 3568 if (mtx_trylock(&vnode_list_mtx)) { 3569 for (i = 0; i < VDBATCH_SIZE; i++) { 3570 vp = vd->tab[i]; 3571 vd->tab[i] = NULL; 3572 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3573 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3574 MPASS(vp->v_dbatchcpu != NOCPU); 3575 vp->v_dbatchcpu = NOCPU; 3576 } 3577 mtx_unlock(&vnode_list_mtx); 3578 } else { 3579 counter_u64_add(vnode_skipped_requeues, 1); 3580 3581 for (i = 0; i < VDBATCH_SIZE; i++) { 3582 vp = vd->tab[i]; 3583 vd->tab[i] = NULL; 3584 MPASS(vp->v_dbatchcpu != NOCPU); 3585 vp->v_dbatchcpu = NOCPU; 3586 } 3587 } 3588 vd->index = 0; 3589 critical_exit(); 3590 } 3591 3592 static void 3593 vdbatch_enqueue(struct vnode *vp) 3594 { 3595 struct vdbatch *vd; 3596 3597 ASSERT_VI_LOCKED(vp, __func__); 3598 VNPASS(!VN_IS_DOOMED(vp), vp); 3599 3600 if (vp->v_dbatchcpu != NOCPU) { 3601 VI_UNLOCK(vp); 3602 return; 3603 } 3604 3605 sched_pin(); 3606 vd = DPCPU_PTR(vd); 3607 mtx_lock(&vd->lock); 3608 MPASS(vd->index < VDBATCH_SIZE); 3609 MPASS(vd->tab[vd->index] == NULL); 3610 /* 3611 * A hack: we depend on being pinned so that we know what to put in 3612 * ->v_dbatchcpu. 3613 */ 3614 vp->v_dbatchcpu = curcpu; 3615 vd->tab[vd->index] = vp; 3616 vd->index++; 3617 VI_UNLOCK(vp); 3618 if (vd->index == VDBATCH_SIZE) 3619 vdbatch_process(vd); 3620 mtx_unlock(&vd->lock); 3621 sched_unpin(); 3622 } 3623 3624 /* 3625 * This routine must only be called for vnodes which are about to be 3626 * deallocated. Supporting dequeue for arbitrary vndoes would require 3627 * validating that the locked batch matches. 3628 */ 3629 static void 3630 vdbatch_dequeue(struct vnode *vp) 3631 { 3632 struct vdbatch *vd; 3633 int i; 3634 short cpu; 3635 3636 VNPASS(vp->v_type == VBAD || vp->v_type == VNON, vp); 3637 3638 cpu = vp->v_dbatchcpu; 3639 if (cpu == NOCPU) 3640 return; 3641 3642 vd = DPCPU_ID_PTR(cpu, vd); 3643 mtx_lock(&vd->lock); 3644 for (i = 0; i < vd->index; i++) { 3645 if (vd->tab[i] != vp) 3646 continue; 3647 vp->v_dbatchcpu = NOCPU; 3648 vd->index--; 3649 vd->tab[i] = vd->tab[vd->index]; 3650 vd->tab[vd->index] = NULL; 3651 break; 3652 } 3653 mtx_unlock(&vd->lock); 3654 /* 3655 * Either we dequeued the vnode above or the target CPU beat us to it. 3656 */ 3657 MPASS(vp->v_dbatchcpu == NOCPU); 3658 } 3659 3660 /* 3661 * Drop the hold count of the vnode. If this is the last reference to 3662 * the vnode we place it on the free list unless it has been vgone'd 3663 * (marked VIRF_DOOMED) in which case we will free it. 3664 * 3665 * Because the vnode vm object keeps a hold reference on the vnode if 3666 * there is at least one resident non-cached page, the vnode cannot 3667 * leave the active list without the page cleanup done. 3668 */ 3669 static void __noinline 3670 vdropl_final(struct vnode *vp) 3671 { 3672 3673 ASSERT_VI_LOCKED(vp, __func__); 3674 VNPASS(VN_IS_DOOMED(vp), vp); 3675 /* 3676 * Set the VHOLD_NO_SMR flag. 3677 * 3678 * We may be racing against vhold_smr. If they win we can just pretend 3679 * we never got this far, they will vdrop later. 3680 */ 3681 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { 3682 vfs_freevnodes_inc(); 3683 VI_UNLOCK(vp); 3684 /* 3685 * We lost the aforementioned race. Any subsequent access is 3686 * invalid as they might have managed to vdropl on their own. 3687 */ 3688 return; 3689 } 3690 /* 3691 * Don't bump freevnodes as this one is going away. 3692 */ 3693 freevnode(vp); 3694 } 3695 3696 void 3697 vdrop(struct vnode *vp) 3698 { 3699 3700 ASSERT_VI_UNLOCKED(vp, __func__); 3701 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3702 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3703 return; 3704 VI_LOCK(vp); 3705 vdropl(vp); 3706 } 3707 3708 static void __always_inline 3709 vdropl_impl(struct vnode *vp, bool enqueue) 3710 { 3711 3712 ASSERT_VI_LOCKED(vp, __func__); 3713 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3714 if (!refcount_release(&vp->v_holdcnt)) { 3715 VI_UNLOCK(vp); 3716 return; 3717 } 3718 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp); 3719 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 3720 if (VN_IS_DOOMED(vp)) { 3721 vdropl_final(vp); 3722 return; 3723 } 3724 3725 vfs_freevnodes_inc(); 3726 if (vp->v_mflag & VMP_LAZYLIST) { 3727 vunlazy(vp); 3728 } 3729 3730 if (!enqueue) { 3731 VI_UNLOCK(vp); 3732 return; 3733 } 3734 3735 /* 3736 * Also unlocks the interlock. We can't assert on it as we 3737 * released our hold and by now the vnode might have been 3738 * freed. 3739 */ 3740 vdbatch_enqueue(vp); 3741 } 3742 3743 void 3744 vdropl(struct vnode *vp) 3745 { 3746 3747 vdropl_impl(vp, true); 3748 } 3749 3750 /* 3751 * vdrop a vnode when recycling 3752 * 3753 * This is a special case routine only to be used when recycling, differs from 3754 * regular vdrop by not requeieing the vnode on LRU. 3755 * 3756 * Consider a case where vtryrecycle continuously fails with all vnodes (due to 3757 * e.g., frozen writes on the filesystem), filling the batch and causing it to 3758 * be requeued. Then vnlru will end up revisiting the same vnodes. This is a 3759 * loop which can last for as long as writes are frozen. 3760 */ 3761 static void 3762 vdropl_recycle(struct vnode *vp) 3763 { 3764 3765 vdropl_impl(vp, false); 3766 } 3767 3768 static void 3769 vdrop_recycle(struct vnode *vp) 3770 { 3771 3772 VI_LOCK(vp); 3773 vdropl_recycle(vp); 3774 } 3775 3776 /* 3777 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3778 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3779 */ 3780 static int 3781 vinactivef(struct vnode *vp) 3782 { 3783 struct vm_object *obj; 3784 int error; 3785 3786 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3787 ASSERT_VI_LOCKED(vp, "vinactive"); 3788 VNPASS((vp->v_iflag & VI_DOINGINACT) == 0, vp); 3789 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3790 vp->v_iflag |= VI_DOINGINACT; 3791 vp->v_iflag &= ~VI_OWEINACT; 3792 VI_UNLOCK(vp); 3793 /* 3794 * Before moving off the active list, we must be sure that any 3795 * modified pages are converted into the vnode's dirty 3796 * buffers, since these will no longer be checked once the 3797 * vnode is on the inactive list. 3798 * 3799 * The write-out of the dirty pages is asynchronous. At the 3800 * point that VOP_INACTIVE() is called, there could still be 3801 * pending I/O and dirty pages in the object. 3802 */ 3803 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3804 vm_object_mightbedirty(obj)) { 3805 VM_OBJECT_WLOCK(obj); 3806 vm_object_page_clean(obj, 0, 0, 0); 3807 VM_OBJECT_WUNLOCK(obj); 3808 } 3809 error = VOP_INACTIVE(vp); 3810 VI_LOCK(vp); 3811 VNPASS(vp->v_iflag & VI_DOINGINACT, vp); 3812 vp->v_iflag &= ~VI_DOINGINACT; 3813 return (error); 3814 } 3815 3816 int 3817 vinactive(struct vnode *vp) 3818 { 3819 3820 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3821 ASSERT_VI_LOCKED(vp, "vinactive"); 3822 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3823 3824 if ((vp->v_iflag & VI_OWEINACT) == 0) 3825 return (0); 3826 if (vp->v_iflag & VI_DOINGINACT) 3827 return (0); 3828 if (vp->v_usecount > 0) { 3829 vp->v_iflag &= ~VI_OWEINACT; 3830 return (0); 3831 } 3832 return (vinactivef(vp)); 3833 } 3834 3835 /* 3836 * Remove any vnodes in the vnode table belonging to mount point mp. 3837 * 3838 * If FORCECLOSE is not specified, there should not be any active ones, 3839 * return error if any are found (nb: this is a user error, not a 3840 * system error). If FORCECLOSE is specified, detach any active vnodes 3841 * that are found. 3842 * 3843 * If WRITECLOSE is set, only flush out regular file vnodes open for 3844 * writing. 3845 * 3846 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3847 * 3848 * `rootrefs' specifies the base reference count for the root vnode 3849 * of this filesystem. The root vnode is considered busy if its 3850 * v_usecount exceeds this value. On a successful return, vflush(, td) 3851 * will call vrele() on the root vnode exactly rootrefs times. 3852 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3853 * be zero. 3854 */ 3855 #ifdef DIAGNOSTIC 3856 static int busyprt = 0; /* print out busy vnodes */ 3857 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3858 #endif 3859 3860 int 3861 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3862 { 3863 struct vnode *vp, *mvp, *rootvp = NULL; 3864 struct vattr vattr; 3865 int busy = 0, error; 3866 3867 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3868 rootrefs, flags); 3869 if (rootrefs > 0) { 3870 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3871 ("vflush: bad args")); 3872 /* 3873 * Get the filesystem root vnode. We can vput() it 3874 * immediately, since with rootrefs > 0, it won't go away. 3875 */ 3876 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3877 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3878 __func__, error); 3879 return (error); 3880 } 3881 vput(rootvp); 3882 } 3883 loop: 3884 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3885 vholdl(vp); 3886 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3887 if (error) { 3888 vdrop(vp); 3889 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3890 goto loop; 3891 } 3892 /* 3893 * Skip over a vnodes marked VV_SYSTEM. 3894 */ 3895 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3896 VOP_UNLOCK(vp); 3897 vdrop(vp); 3898 continue; 3899 } 3900 /* 3901 * If WRITECLOSE is set, flush out unlinked but still open 3902 * files (even if open only for reading) and regular file 3903 * vnodes open for writing. 3904 */ 3905 if (flags & WRITECLOSE) { 3906 if (vp->v_object != NULL) { 3907 VM_OBJECT_WLOCK(vp->v_object); 3908 vm_object_page_clean(vp->v_object, 0, 0, 0); 3909 VM_OBJECT_WUNLOCK(vp->v_object); 3910 } 3911 do { 3912 error = VOP_FSYNC(vp, MNT_WAIT, td); 3913 } while (error == ERELOOKUP); 3914 if (error != 0) { 3915 VOP_UNLOCK(vp); 3916 vdrop(vp); 3917 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3918 return (error); 3919 } 3920 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3921 VI_LOCK(vp); 3922 3923 if ((vp->v_type == VNON || 3924 (error == 0 && vattr.va_nlink > 0)) && 3925 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3926 VOP_UNLOCK(vp); 3927 vdropl(vp); 3928 continue; 3929 } 3930 } else 3931 VI_LOCK(vp); 3932 /* 3933 * With v_usecount == 0, all we need to do is clear out the 3934 * vnode data structures and we are done. 3935 * 3936 * If FORCECLOSE is set, forcibly close the vnode. 3937 */ 3938 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3939 vgonel(vp); 3940 } else { 3941 busy++; 3942 #ifdef DIAGNOSTIC 3943 if (busyprt) 3944 vn_printf(vp, "vflush: busy vnode "); 3945 #endif 3946 } 3947 VOP_UNLOCK(vp); 3948 vdropl(vp); 3949 } 3950 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3951 /* 3952 * If just the root vnode is busy, and if its refcount 3953 * is equal to `rootrefs', then go ahead and kill it. 3954 */ 3955 VI_LOCK(rootvp); 3956 KASSERT(busy > 0, ("vflush: not busy")); 3957 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3958 ("vflush: usecount %d < rootrefs %d", 3959 rootvp->v_usecount, rootrefs)); 3960 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3961 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3962 vgone(rootvp); 3963 VOP_UNLOCK(rootvp); 3964 busy = 0; 3965 } else 3966 VI_UNLOCK(rootvp); 3967 } 3968 if (busy) { 3969 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3970 busy); 3971 return (EBUSY); 3972 } 3973 for (; rootrefs > 0; rootrefs--) 3974 vrele(rootvp); 3975 return (0); 3976 } 3977 3978 /* 3979 * Recycle an unused vnode to the front of the free list. 3980 */ 3981 int 3982 vrecycle(struct vnode *vp) 3983 { 3984 int recycled; 3985 3986 VI_LOCK(vp); 3987 recycled = vrecyclel(vp); 3988 VI_UNLOCK(vp); 3989 return (recycled); 3990 } 3991 3992 /* 3993 * vrecycle, with the vp interlock held. 3994 */ 3995 int 3996 vrecyclel(struct vnode *vp) 3997 { 3998 int recycled; 3999 4000 ASSERT_VOP_ELOCKED(vp, __func__); 4001 ASSERT_VI_LOCKED(vp, __func__); 4002 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4003 recycled = 0; 4004 if (vp->v_usecount == 0) { 4005 recycled = 1; 4006 vgonel(vp); 4007 } 4008 return (recycled); 4009 } 4010 4011 /* 4012 * Eliminate all activity associated with a vnode 4013 * in preparation for reuse. 4014 */ 4015 void 4016 vgone(struct vnode *vp) 4017 { 4018 VI_LOCK(vp); 4019 vgonel(vp); 4020 VI_UNLOCK(vp); 4021 } 4022 4023 /* 4024 * Notify upper mounts about reclaimed or unlinked vnode. 4025 */ 4026 void 4027 vfs_notify_upper(struct vnode *vp, enum vfs_notify_upper_type event) 4028 { 4029 struct mount *mp; 4030 struct mount_upper_node *ump; 4031 4032 mp = atomic_load_ptr(&vp->v_mount); 4033 if (mp == NULL) 4034 return; 4035 if (TAILQ_EMPTY(&mp->mnt_notify)) 4036 return; 4037 4038 MNT_ILOCK(mp); 4039 mp->mnt_upper_pending++; 4040 KASSERT(mp->mnt_upper_pending > 0, 4041 ("%s: mnt_upper_pending %d", __func__, mp->mnt_upper_pending)); 4042 TAILQ_FOREACH(ump, &mp->mnt_notify, mnt_upper_link) { 4043 MNT_IUNLOCK(mp); 4044 switch (event) { 4045 case VFS_NOTIFY_UPPER_RECLAIM: 4046 VFS_RECLAIM_LOWERVP(ump->mp, vp); 4047 break; 4048 case VFS_NOTIFY_UPPER_UNLINK: 4049 VFS_UNLINK_LOWERVP(ump->mp, vp); 4050 break; 4051 } 4052 MNT_ILOCK(mp); 4053 } 4054 mp->mnt_upper_pending--; 4055 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && 4056 mp->mnt_upper_pending == 0) { 4057 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; 4058 wakeup(&mp->mnt_uppers); 4059 } 4060 MNT_IUNLOCK(mp); 4061 } 4062 4063 /* 4064 * vgone, with the vp interlock held. 4065 */ 4066 static void 4067 vgonel(struct vnode *vp) 4068 { 4069 struct thread *td; 4070 struct mount *mp; 4071 vm_object_t object; 4072 bool active, doinginact, oweinact; 4073 4074 ASSERT_VOP_ELOCKED(vp, "vgonel"); 4075 ASSERT_VI_LOCKED(vp, "vgonel"); 4076 VNASSERT(vp->v_holdcnt, vp, 4077 ("vgonel: vp %p has no reference.", vp)); 4078 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4079 td = curthread; 4080 4081 /* 4082 * Don't vgonel if we're already doomed. 4083 */ 4084 if (VN_IS_DOOMED(vp)) { 4085 VNPASS(vn_get_state(vp) == VSTATE_DESTROYING || \ 4086 vn_get_state(vp) == VSTATE_DEAD, vp); 4087 return; 4088 } 4089 /* 4090 * Paired with freevnode. 4091 */ 4092 vn_seqc_write_begin_locked(vp); 4093 vunlazy_gone(vp); 4094 vn_irflag_set_locked(vp, VIRF_DOOMED); 4095 vn_set_state(vp, VSTATE_DESTROYING); 4096 4097 /* 4098 * Check to see if the vnode is in use. If so, we have to 4099 * call VOP_CLOSE() and VOP_INACTIVE(). 4100 * 4101 * It could be that VOP_INACTIVE() requested reclamation, in 4102 * which case we should avoid recursion, so check 4103 * VI_DOINGINACT. This is not precise but good enough. 4104 */ 4105 active = vp->v_usecount > 0; 4106 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4107 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; 4108 4109 /* 4110 * If we need to do inactive VI_OWEINACT will be set. 4111 */ 4112 if (vp->v_iflag & VI_DEFINACT) { 4113 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 4114 vp->v_iflag &= ~VI_DEFINACT; 4115 vdropl(vp); 4116 } else { 4117 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 4118 VI_UNLOCK(vp); 4119 } 4120 cache_purge_vgone(vp); 4121 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 4122 4123 /* 4124 * If purging an active vnode, it must be closed and 4125 * deactivated before being reclaimed. 4126 */ 4127 if (active) 4128 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 4129 if (!doinginact) { 4130 do { 4131 if (oweinact || active) { 4132 VI_LOCK(vp); 4133 vinactivef(vp); 4134 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4135 VI_UNLOCK(vp); 4136 } 4137 } while (oweinact); 4138 } 4139 if (vp->v_type == VSOCK) 4140 vfs_unp_reclaim(vp); 4141 4142 /* 4143 * Clean out any buffers associated with the vnode. 4144 * If the flush fails, just toss the buffers. 4145 */ 4146 mp = NULL; 4147 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 4148 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 4149 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 4150 while (vinvalbuf(vp, 0, 0, 0) != 0) 4151 ; 4152 } 4153 4154 BO_LOCK(&vp->v_bufobj); 4155 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 4156 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 4157 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 4158 vp->v_bufobj.bo_clean.bv_cnt == 0, 4159 ("vp %p bufobj not invalidated", vp)); 4160 4161 /* 4162 * For VMIO bufobj, BO_DEAD is set later, or in 4163 * vm_object_terminate() after the object's page queue is 4164 * flushed. 4165 */ 4166 object = vp->v_bufobj.bo_object; 4167 if (object == NULL) 4168 vp->v_bufobj.bo_flag |= BO_DEAD; 4169 BO_UNLOCK(&vp->v_bufobj); 4170 4171 /* 4172 * Handle the VM part. Tmpfs handles v_object on its own (the 4173 * OBJT_VNODE check). Nullfs or other bypassing filesystems 4174 * should not touch the object borrowed from the lower vnode 4175 * (the handle check). 4176 */ 4177 if (object != NULL && object->type == OBJT_VNODE && 4178 object->handle == vp) 4179 vnode_destroy_vobject(vp); 4180 4181 /* 4182 * Reclaim the vnode. 4183 */ 4184 if (VOP_RECLAIM(vp)) 4185 panic("vgone: cannot reclaim"); 4186 if (mp != NULL) 4187 vn_finished_secondary_write(mp); 4188 VNASSERT(vp->v_object == NULL, vp, 4189 ("vop_reclaim left v_object vp=%p", vp)); 4190 /* 4191 * Clear the advisory locks and wake up waiting threads. 4192 */ 4193 if (vp->v_lockf != NULL) { 4194 (void)VOP_ADVLOCKPURGE(vp); 4195 vp->v_lockf = NULL; 4196 } 4197 /* 4198 * Delete from old mount point vnode list. 4199 */ 4200 if (vp->v_mount == NULL) { 4201 VI_LOCK(vp); 4202 } else { 4203 delmntque(vp); 4204 ASSERT_VI_LOCKED(vp, "vgonel 2"); 4205 } 4206 /* 4207 * Done with purge, reset to the standard lock and invalidate 4208 * the vnode. 4209 */ 4210 vp->v_vnlock = &vp->v_lock; 4211 vp->v_op = &dead_vnodeops; 4212 vp->v_type = VBAD; 4213 vn_set_state(vp, VSTATE_DEAD); 4214 } 4215 4216 /* 4217 * Print out a description of a vnode. 4218 */ 4219 static const char *const vtypename[] = { 4220 [VNON] = "VNON", 4221 [VREG] = "VREG", 4222 [VDIR] = "VDIR", 4223 [VBLK] = "VBLK", 4224 [VCHR] = "VCHR", 4225 [VLNK] = "VLNK", 4226 [VSOCK] = "VSOCK", 4227 [VFIFO] = "VFIFO", 4228 [VBAD] = "VBAD", 4229 [VMARKER] = "VMARKER", 4230 }; 4231 _Static_assert(nitems(vtypename) == VLASTTYPE + 1, 4232 "vnode type name not added to vtypename"); 4233 4234 static const char *const vstatename[] = { 4235 [VSTATE_UNINITIALIZED] = "VSTATE_UNINITIALIZED", 4236 [VSTATE_CONSTRUCTED] = "VSTATE_CONSTRUCTED", 4237 [VSTATE_DESTROYING] = "VSTATE_DESTROYING", 4238 [VSTATE_DEAD] = "VSTATE_DEAD", 4239 }; 4240 _Static_assert(nitems(vstatename) == VLASTSTATE + 1, 4241 "vnode state name not added to vstatename"); 4242 4243 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 4244 "new hold count flag not added to vn_printf"); 4245 4246 void 4247 vn_printf(struct vnode *vp, const char *fmt, ...) 4248 { 4249 va_list ap; 4250 char buf[256], buf2[16]; 4251 u_long flags; 4252 u_int holdcnt; 4253 short irflag; 4254 4255 va_start(ap, fmt); 4256 vprintf(fmt, ap); 4257 va_end(ap); 4258 printf("%p: ", (void *)vp); 4259 printf("type %s state %s op %p\n", vtypename[vp->v_type], 4260 vstatename[vp->v_state], vp->v_op); 4261 holdcnt = atomic_load_int(&vp->v_holdcnt); 4262 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 4263 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 4264 vp->v_seqc_users); 4265 switch (vp->v_type) { 4266 case VDIR: 4267 printf(" mountedhere %p\n", vp->v_mountedhere); 4268 break; 4269 case VCHR: 4270 printf(" rdev %p\n", vp->v_rdev); 4271 break; 4272 case VSOCK: 4273 printf(" socket %p\n", vp->v_unpcb); 4274 break; 4275 case VFIFO: 4276 printf(" fifoinfo %p\n", vp->v_fifoinfo); 4277 break; 4278 default: 4279 printf("\n"); 4280 break; 4281 } 4282 buf[0] = '\0'; 4283 buf[1] = '\0'; 4284 if (holdcnt & VHOLD_NO_SMR) 4285 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 4286 printf(" hold count flags (%s)\n", buf + 1); 4287 4288 buf[0] = '\0'; 4289 buf[1] = '\0'; 4290 irflag = vn_irflag_read(vp); 4291 if (irflag & VIRF_DOOMED) 4292 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 4293 if (irflag & VIRF_PGREAD) 4294 strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); 4295 if (irflag & VIRF_MOUNTPOINT) 4296 strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf)); 4297 if (irflag & VIRF_TEXT_REF) 4298 strlcat(buf, "|VIRF_TEXT_REF", sizeof(buf)); 4299 flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT | VIRF_TEXT_REF); 4300 if (flags != 0) { 4301 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 4302 strlcat(buf, buf2, sizeof(buf)); 4303 } 4304 if (vp->v_vflag & VV_ROOT) 4305 strlcat(buf, "|VV_ROOT", sizeof(buf)); 4306 if (vp->v_vflag & VV_ISTTY) 4307 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4308 if (vp->v_vflag & VV_NOSYNC) 4309 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4310 if (vp->v_vflag & VV_ETERNALDEV) 4311 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4312 if (vp->v_vflag & VV_CACHEDLABEL) 4313 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4314 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4315 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4316 if (vp->v_vflag & VV_COPYONWRITE) 4317 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4318 if (vp->v_vflag & VV_SYSTEM) 4319 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4320 if (vp->v_vflag & VV_PROCDEP) 4321 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4322 if (vp->v_vflag & VV_DELETED) 4323 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4324 if (vp->v_vflag & VV_MD) 4325 strlcat(buf, "|VV_MD", sizeof(buf)); 4326 if (vp->v_vflag & VV_FORCEINSMQ) 4327 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4328 if (vp->v_vflag & VV_READLINK) 4329 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4330 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4331 VV_CACHEDLABEL | VV_VMSIZEVNLOCK | VV_COPYONWRITE | VV_SYSTEM | 4332 VV_PROCDEP | VV_DELETED | VV_MD | VV_FORCEINSMQ | VV_READLINK); 4333 if (flags != 0) { 4334 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4335 strlcat(buf, buf2, sizeof(buf)); 4336 } 4337 if (vp->v_iflag & VI_MOUNT) 4338 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4339 if (vp->v_iflag & VI_DOINGINACT) 4340 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4341 if (vp->v_iflag & VI_OWEINACT) 4342 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4343 if (vp->v_iflag & VI_DEFINACT) 4344 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4345 if (vp->v_iflag & VI_FOPENING) 4346 strlcat(buf, "|VI_FOPENING", sizeof(buf)); 4347 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOINGINACT | 4348 VI_OWEINACT | VI_DEFINACT | VI_FOPENING); 4349 if (flags != 0) { 4350 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4351 strlcat(buf, buf2, sizeof(buf)); 4352 } 4353 if (vp->v_mflag & VMP_LAZYLIST) 4354 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4355 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4356 if (flags != 0) { 4357 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4358 strlcat(buf, buf2, sizeof(buf)); 4359 } 4360 printf(" flags (%s)", buf + 1); 4361 if (mtx_owned(VI_MTX(vp))) 4362 printf(" VI_LOCKed"); 4363 printf("\n"); 4364 if (vp->v_object != NULL) 4365 printf(" v_object %p ref %d pages %d " 4366 "cleanbuf %d dirtybuf %d\n", 4367 vp->v_object, vp->v_object->ref_count, 4368 vp->v_object->resident_page_count, 4369 vp->v_bufobj.bo_clean.bv_cnt, 4370 vp->v_bufobj.bo_dirty.bv_cnt); 4371 printf(" "); 4372 lockmgr_printinfo(vp->v_vnlock); 4373 if (vp->v_data != NULL) 4374 VOP_PRINT(vp); 4375 } 4376 4377 #ifdef DDB 4378 /* 4379 * List all of the locked vnodes in the system. 4380 * Called when debugging the kernel. 4381 */ 4382 DB_SHOW_COMMAND_FLAGS(lockedvnods, lockedvnodes, DB_CMD_MEMSAFE) 4383 { 4384 struct mount *mp; 4385 struct vnode *vp; 4386 4387 /* 4388 * Note: because this is DDB, we can't obey the locking semantics 4389 * for these structures, which means we could catch an inconsistent 4390 * state and dereference a nasty pointer. Not much to be done 4391 * about that. 4392 */ 4393 db_printf("Locked vnodes\n"); 4394 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4395 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4396 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4397 vn_printf(vp, "vnode "); 4398 } 4399 } 4400 } 4401 4402 /* 4403 * Show details about the given vnode. 4404 */ 4405 DB_SHOW_COMMAND(vnode, db_show_vnode) 4406 { 4407 struct vnode *vp; 4408 4409 if (!have_addr) 4410 return; 4411 vp = (struct vnode *)addr; 4412 vn_printf(vp, "vnode "); 4413 } 4414 4415 /* 4416 * Show details about the given mount point. 4417 */ 4418 DB_SHOW_COMMAND(mount, db_show_mount) 4419 { 4420 struct mount *mp; 4421 struct vfsopt *opt; 4422 struct statfs *sp; 4423 struct vnode *vp; 4424 char buf[512]; 4425 uint64_t mflags; 4426 u_int flags; 4427 4428 if (!have_addr) { 4429 /* No address given, print short info about all mount points. */ 4430 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4431 db_printf("%p %s on %s (%s)\n", mp, 4432 mp->mnt_stat.f_mntfromname, 4433 mp->mnt_stat.f_mntonname, 4434 mp->mnt_stat.f_fstypename); 4435 if (db_pager_quit) 4436 break; 4437 } 4438 db_printf("\nMore info: show mount <addr>\n"); 4439 return; 4440 } 4441 4442 mp = (struct mount *)addr; 4443 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4444 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4445 4446 buf[0] = '\0'; 4447 mflags = mp->mnt_flag; 4448 #define MNT_FLAG(flag) do { \ 4449 if (mflags & (flag)) { \ 4450 if (buf[0] != '\0') \ 4451 strlcat(buf, ", ", sizeof(buf)); \ 4452 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4453 mflags &= ~(flag); \ 4454 } \ 4455 } while (0) 4456 MNT_FLAG(MNT_RDONLY); 4457 MNT_FLAG(MNT_SYNCHRONOUS); 4458 MNT_FLAG(MNT_NOEXEC); 4459 MNT_FLAG(MNT_NOSUID); 4460 MNT_FLAG(MNT_NFS4ACLS); 4461 MNT_FLAG(MNT_UNION); 4462 MNT_FLAG(MNT_ASYNC); 4463 MNT_FLAG(MNT_SUIDDIR); 4464 MNT_FLAG(MNT_SOFTDEP); 4465 MNT_FLAG(MNT_NOSYMFOLLOW); 4466 MNT_FLAG(MNT_GJOURNAL); 4467 MNT_FLAG(MNT_MULTILABEL); 4468 MNT_FLAG(MNT_ACLS); 4469 MNT_FLAG(MNT_NOATIME); 4470 MNT_FLAG(MNT_NOCLUSTERR); 4471 MNT_FLAG(MNT_NOCLUSTERW); 4472 MNT_FLAG(MNT_SUJ); 4473 MNT_FLAG(MNT_EXRDONLY); 4474 MNT_FLAG(MNT_EXPORTED); 4475 MNT_FLAG(MNT_DEFEXPORTED); 4476 MNT_FLAG(MNT_EXPORTANON); 4477 MNT_FLAG(MNT_EXKERB); 4478 MNT_FLAG(MNT_EXPUBLIC); 4479 MNT_FLAG(MNT_LOCAL); 4480 MNT_FLAG(MNT_QUOTA); 4481 MNT_FLAG(MNT_ROOTFS); 4482 MNT_FLAG(MNT_USER); 4483 MNT_FLAG(MNT_IGNORE); 4484 MNT_FLAG(MNT_UPDATE); 4485 MNT_FLAG(MNT_DELEXPORT); 4486 MNT_FLAG(MNT_RELOAD); 4487 MNT_FLAG(MNT_FORCE); 4488 MNT_FLAG(MNT_SNAPSHOT); 4489 MNT_FLAG(MNT_BYFSID); 4490 #undef MNT_FLAG 4491 if (mflags != 0) { 4492 if (buf[0] != '\0') 4493 strlcat(buf, ", ", sizeof(buf)); 4494 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4495 "0x%016jx", mflags); 4496 } 4497 db_printf(" mnt_flag = %s\n", buf); 4498 4499 buf[0] = '\0'; 4500 flags = mp->mnt_kern_flag; 4501 #define MNT_KERN_FLAG(flag) do { \ 4502 if (flags & (flag)) { \ 4503 if (buf[0] != '\0') \ 4504 strlcat(buf, ", ", sizeof(buf)); \ 4505 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4506 flags &= ~(flag); \ 4507 } \ 4508 } while (0) 4509 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4510 MNT_KERN_FLAG(MNTK_ASYNC); 4511 MNT_KERN_FLAG(MNTK_SOFTDEP); 4512 MNT_KERN_FLAG(MNTK_NOMSYNC); 4513 MNT_KERN_FLAG(MNTK_DRAINING); 4514 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4515 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4516 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4517 MNT_KERN_FLAG(MNTK_NO_IOPF); 4518 MNT_KERN_FLAG(MNTK_RECURSE); 4519 MNT_KERN_FLAG(MNTK_UPPER_WAITER); 4520 MNT_KERN_FLAG(MNTK_UNLOCKED_INSMNTQUE); 4521 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4522 MNT_KERN_FLAG(MNTK_VMSETSIZE_BUG); 4523 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4524 MNT_KERN_FLAG(MNTK_TASKQUEUE_WAITER); 4525 MNT_KERN_FLAG(MNTK_NOASYNC); 4526 MNT_KERN_FLAG(MNTK_UNMOUNT); 4527 MNT_KERN_FLAG(MNTK_MWAIT); 4528 MNT_KERN_FLAG(MNTK_SUSPEND); 4529 MNT_KERN_FLAG(MNTK_SUSPEND2); 4530 MNT_KERN_FLAG(MNTK_SUSPENDED); 4531 MNT_KERN_FLAG(MNTK_NULL_NOCACHE); 4532 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4533 #undef MNT_KERN_FLAG 4534 if (flags != 0) { 4535 if (buf[0] != '\0') 4536 strlcat(buf, ", ", sizeof(buf)); 4537 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4538 "0x%08x", flags); 4539 } 4540 db_printf(" mnt_kern_flag = %s\n", buf); 4541 4542 db_printf(" mnt_opt = "); 4543 opt = TAILQ_FIRST(mp->mnt_opt); 4544 if (opt != NULL) { 4545 db_printf("%s", opt->name); 4546 opt = TAILQ_NEXT(opt, link); 4547 while (opt != NULL) { 4548 db_printf(", %s", opt->name); 4549 opt = TAILQ_NEXT(opt, link); 4550 } 4551 } 4552 db_printf("\n"); 4553 4554 sp = &mp->mnt_stat; 4555 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4556 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4557 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4558 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4559 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4560 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4561 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4562 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4563 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4564 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4565 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4566 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4567 4568 db_printf(" mnt_cred = { uid=%u ruid=%u", 4569 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4570 if (jailed(mp->mnt_cred)) 4571 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4572 db_printf(" }\n"); 4573 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4574 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4575 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4576 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4577 db_printf(" mnt_lazyvnodelistsize = %d\n", 4578 mp->mnt_lazyvnodelistsize); 4579 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4580 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4581 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4582 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4583 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4584 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4585 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4586 db_printf(" mnt_secondary_accwrites = %d\n", 4587 mp->mnt_secondary_accwrites); 4588 db_printf(" mnt_gjprovider = %s\n", 4589 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4590 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4591 4592 db_printf("\n\nList of active vnodes\n"); 4593 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4594 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4595 vn_printf(vp, "vnode "); 4596 if (db_pager_quit) 4597 break; 4598 } 4599 } 4600 db_printf("\n\nList of inactive vnodes\n"); 4601 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4602 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4603 vn_printf(vp, "vnode "); 4604 if (db_pager_quit) 4605 break; 4606 } 4607 } 4608 } 4609 #endif /* DDB */ 4610 4611 /* 4612 * Fill in a struct xvfsconf based on a struct vfsconf. 4613 */ 4614 static int 4615 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4616 { 4617 struct xvfsconf xvfsp; 4618 4619 bzero(&xvfsp, sizeof(xvfsp)); 4620 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4621 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4622 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4623 xvfsp.vfc_flags = vfsp->vfc_flags; 4624 /* 4625 * These are unused in userland, we keep them 4626 * to not break binary compatibility. 4627 */ 4628 xvfsp.vfc_vfsops = NULL; 4629 xvfsp.vfc_next = NULL; 4630 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4631 } 4632 4633 #ifdef COMPAT_FREEBSD32 4634 struct xvfsconf32 { 4635 uint32_t vfc_vfsops; 4636 char vfc_name[MFSNAMELEN]; 4637 int32_t vfc_typenum; 4638 int32_t vfc_refcount; 4639 int32_t vfc_flags; 4640 uint32_t vfc_next; 4641 }; 4642 4643 static int 4644 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4645 { 4646 struct xvfsconf32 xvfsp; 4647 4648 bzero(&xvfsp, sizeof(xvfsp)); 4649 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4650 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4651 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4652 xvfsp.vfc_flags = vfsp->vfc_flags; 4653 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4654 } 4655 #endif 4656 4657 /* 4658 * Top level filesystem related information gathering. 4659 */ 4660 static int 4661 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4662 { 4663 struct vfsconf *vfsp; 4664 int error; 4665 4666 error = 0; 4667 vfsconf_slock(); 4668 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4669 #ifdef COMPAT_FREEBSD32 4670 if (req->flags & SCTL_MASK32) 4671 error = vfsconf2x32(req, vfsp); 4672 else 4673 #endif 4674 error = vfsconf2x(req, vfsp); 4675 if (error) 4676 break; 4677 } 4678 vfsconf_sunlock(); 4679 return (error); 4680 } 4681 4682 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4683 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4684 "S,xvfsconf", "List of all configured filesystems"); 4685 4686 #ifndef BURN_BRIDGES 4687 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4688 4689 static int 4690 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4691 { 4692 int *name = (int *)arg1 - 1; /* XXX */ 4693 u_int namelen = arg2 + 1; /* XXX */ 4694 struct vfsconf *vfsp; 4695 4696 log(LOG_WARNING, "userland calling deprecated sysctl, " 4697 "please rebuild world\n"); 4698 4699 #if 1 || defined(COMPAT_PRELITE2) 4700 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4701 if (namelen == 1) 4702 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4703 #endif 4704 4705 switch (name[1]) { 4706 case VFS_MAXTYPENUM: 4707 if (namelen != 2) 4708 return (ENOTDIR); 4709 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4710 case VFS_CONF: 4711 if (namelen != 3) 4712 return (ENOTDIR); /* overloaded */ 4713 vfsconf_slock(); 4714 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4715 if (vfsp->vfc_typenum == name[2]) 4716 break; 4717 } 4718 vfsconf_sunlock(); 4719 if (vfsp == NULL) 4720 return (EOPNOTSUPP); 4721 #ifdef COMPAT_FREEBSD32 4722 if (req->flags & SCTL_MASK32) 4723 return (vfsconf2x32(req, vfsp)); 4724 else 4725 #endif 4726 return (vfsconf2x(req, vfsp)); 4727 } 4728 return (EOPNOTSUPP); 4729 } 4730 4731 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4732 CTLFLAG_MPSAFE, vfs_sysctl, 4733 "Generic filesystem"); 4734 4735 #if 1 || defined(COMPAT_PRELITE2) 4736 4737 static int 4738 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4739 { 4740 int error; 4741 struct vfsconf *vfsp; 4742 struct ovfsconf ovfs; 4743 4744 vfsconf_slock(); 4745 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4746 bzero(&ovfs, sizeof(ovfs)); 4747 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4748 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4749 ovfs.vfc_index = vfsp->vfc_typenum; 4750 ovfs.vfc_refcount = vfsp->vfc_refcount; 4751 ovfs.vfc_flags = vfsp->vfc_flags; 4752 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4753 if (error != 0) { 4754 vfsconf_sunlock(); 4755 return (error); 4756 } 4757 } 4758 vfsconf_sunlock(); 4759 return (0); 4760 } 4761 4762 #endif /* 1 || COMPAT_PRELITE2 */ 4763 #endif /* !BURN_BRIDGES */ 4764 4765 static void 4766 unmount_or_warn(struct mount *mp) 4767 { 4768 int error; 4769 4770 error = dounmount(mp, MNT_FORCE, curthread); 4771 if (error != 0) { 4772 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4773 if (error == EBUSY) 4774 printf("BUSY)\n"); 4775 else 4776 printf("%d)\n", error); 4777 } 4778 } 4779 4780 /* 4781 * Unmount all filesystems. The list is traversed in reverse order 4782 * of mounting to avoid dependencies. 4783 */ 4784 void 4785 vfs_unmountall(void) 4786 { 4787 struct mount *mp, *tmp; 4788 4789 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4790 4791 /* 4792 * Since this only runs when rebooting, it is not interlocked. 4793 */ 4794 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4795 vfs_ref(mp); 4796 4797 /* 4798 * Forcibly unmounting "/dev" before "/" would prevent clean 4799 * unmount of the latter. 4800 */ 4801 if (mp == rootdevmp) 4802 continue; 4803 4804 unmount_or_warn(mp); 4805 } 4806 4807 if (rootdevmp != NULL) 4808 unmount_or_warn(rootdevmp); 4809 } 4810 4811 static void 4812 vfs_deferred_inactive(struct vnode *vp, int lkflags) 4813 { 4814 4815 ASSERT_VI_LOCKED(vp, __func__); 4816 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 4817 if ((vp->v_iflag & VI_OWEINACT) == 0) { 4818 vdropl(vp); 4819 return; 4820 } 4821 if (vn_lock(vp, lkflags) == 0) { 4822 VI_LOCK(vp); 4823 vinactive(vp); 4824 VOP_UNLOCK(vp); 4825 vdropl(vp); 4826 return; 4827 } 4828 vdefer_inactive_unlocked(vp); 4829 } 4830 4831 static int 4832 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 4833 { 4834 4835 return (vp->v_iflag & VI_DEFINACT); 4836 } 4837 4838 static void __noinline 4839 vfs_periodic_inactive(struct mount *mp, int flags) 4840 { 4841 struct vnode *vp, *mvp; 4842 int lkflags; 4843 4844 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4845 if (flags != MNT_WAIT) 4846 lkflags |= LK_NOWAIT; 4847 4848 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 4849 if ((vp->v_iflag & VI_DEFINACT) == 0) { 4850 VI_UNLOCK(vp); 4851 continue; 4852 } 4853 vp->v_iflag &= ~VI_DEFINACT; 4854 vfs_deferred_inactive(vp, lkflags); 4855 } 4856 } 4857 4858 static inline bool 4859 vfs_want_msync(struct vnode *vp) 4860 { 4861 struct vm_object *obj; 4862 4863 /* 4864 * This test may be performed without any locks held. 4865 * We rely on vm_object's type stability. 4866 */ 4867 if (vp->v_vflag & VV_NOSYNC) 4868 return (false); 4869 obj = vp->v_object; 4870 return (obj != NULL && vm_object_mightbedirty(obj)); 4871 } 4872 4873 static int 4874 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 4875 { 4876 4877 if (vp->v_vflag & VV_NOSYNC) 4878 return (false); 4879 if (vp->v_iflag & VI_DEFINACT) 4880 return (true); 4881 return (vfs_want_msync(vp)); 4882 } 4883 4884 static void __noinline 4885 vfs_periodic_msync_inactive(struct mount *mp, int flags) 4886 { 4887 struct vnode *vp, *mvp; 4888 struct vm_object *obj; 4889 int lkflags, objflags; 4890 bool seen_defer; 4891 4892 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4893 if (flags != MNT_WAIT) { 4894 lkflags |= LK_NOWAIT; 4895 objflags = OBJPC_NOSYNC; 4896 } else { 4897 objflags = OBJPC_SYNC; 4898 } 4899 4900 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 4901 seen_defer = false; 4902 if (vp->v_iflag & VI_DEFINACT) { 4903 vp->v_iflag &= ~VI_DEFINACT; 4904 seen_defer = true; 4905 } 4906 if (!vfs_want_msync(vp)) { 4907 if (seen_defer) 4908 vfs_deferred_inactive(vp, lkflags); 4909 else 4910 VI_UNLOCK(vp); 4911 continue; 4912 } 4913 if (vget(vp, lkflags) == 0) { 4914 obj = vp->v_object; 4915 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 4916 VM_OBJECT_WLOCK(obj); 4917 vm_object_page_clean(obj, 0, 0, objflags); 4918 VM_OBJECT_WUNLOCK(obj); 4919 } 4920 vput(vp); 4921 if (seen_defer) 4922 vdrop(vp); 4923 } else { 4924 if (seen_defer) 4925 vdefer_inactive_unlocked(vp); 4926 } 4927 } 4928 } 4929 4930 void 4931 vfs_periodic(struct mount *mp, int flags) 4932 { 4933 4934 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4935 4936 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 4937 vfs_periodic_inactive(mp, flags); 4938 else 4939 vfs_periodic_msync_inactive(mp, flags); 4940 } 4941 4942 static void 4943 destroy_vpollinfo_free(struct vpollinfo *vi) 4944 { 4945 4946 knlist_destroy(&vi->vpi_selinfo.si_note); 4947 mtx_destroy(&vi->vpi_lock); 4948 free(vi, M_VNODEPOLL); 4949 } 4950 4951 static void 4952 destroy_vpollinfo(struct vpollinfo *vi) 4953 { 4954 4955 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4956 seldrain(&vi->vpi_selinfo); 4957 destroy_vpollinfo_free(vi); 4958 } 4959 4960 /* 4961 * Initialize per-vnode helper structure to hold poll-related state. 4962 */ 4963 void 4964 v_addpollinfo(struct vnode *vp) 4965 { 4966 struct vpollinfo *vi; 4967 4968 if (vp->v_pollinfo != NULL) 4969 return; 4970 vi = malloc(sizeof(*vi), M_VNODEPOLL, M_WAITOK | M_ZERO); 4971 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4972 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4973 vfs_knlunlock, vfs_knl_assert_lock); 4974 VI_LOCK(vp); 4975 if (vp->v_pollinfo != NULL) { 4976 VI_UNLOCK(vp); 4977 destroy_vpollinfo_free(vi); 4978 return; 4979 } 4980 vp->v_pollinfo = vi; 4981 VI_UNLOCK(vp); 4982 } 4983 4984 /* 4985 * Record a process's interest in events which might happen to 4986 * a vnode. Because poll uses the historic select-style interface 4987 * internally, this routine serves as both the ``check for any 4988 * pending events'' and the ``record my interest in future events'' 4989 * functions. (These are done together, while the lock is held, 4990 * to avoid race conditions.) 4991 */ 4992 int 4993 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4994 { 4995 4996 v_addpollinfo(vp); 4997 mtx_lock(&vp->v_pollinfo->vpi_lock); 4998 if (vp->v_pollinfo->vpi_revents & events) { 4999 /* 5000 * This leaves events we are not interested 5001 * in available for the other process which 5002 * which presumably had requested them 5003 * (otherwise they would never have been 5004 * recorded). 5005 */ 5006 events &= vp->v_pollinfo->vpi_revents; 5007 vp->v_pollinfo->vpi_revents &= ~events; 5008 5009 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5010 return (events); 5011 } 5012 vp->v_pollinfo->vpi_events |= events; 5013 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 5014 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5015 return (0); 5016 } 5017 5018 /* 5019 * Routine to create and manage a filesystem syncer vnode. 5020 */ 5021 #define sync_close ((int (*)(struct vop_close_args *))nullop) 5022 static int sync_fsync(struct vop_fsync_args *); 5023 static int sync_inactive(struct vop_inactive_args *); 5024 static int sync_reclaim(struct vop_reclaim_args *); 5025 5026 static struct vop_vector sync_vnodeops = { 5027 .vop_bypass = VOP_EOPNOTSUPP, 5028 .vop_close = sync_close, 5029 .vop_fsync = sync_fsync, 5030 .vop_getwritemount = vop_stdgetwritemount, 5031 .vop_inactive = sync_inactive, 5032 .vop_need_inactive = vop_stdneed_inactive, 5033 .vop_reclaim = sync_reclaim, 5034 .vop_lock1 = vop_stdlock, 5035 .vop_unlock = vop_stdunlock, 5036 .vop_islocked = vop_stdislocked, 5037 .vop_fplookup_vexec = VOP_EAGAIN, 5038 .vop_fplookup_symlink = VOP_EAGAIN, 5039 }; 5040 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 5041 5042 /* 5043 * Create a new filesystem syncer vnode for the specified mount point. 5044 */ 5045 void 5046 vfs_allocate_syncvnode(struct mount *mp) 5047 { 5048 struct vnode *vp; 5049 struct bufobj *bo; 5050 static long start, incr, next; 5051 int error; 5052 5053 /* Allocate a new vnode */ 5054 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 5055 if (error != 0) 5056 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 5057 vp->v_type = VNON; 5058 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5059 vp->v_vflag |= VV_FORCEINSMQ; 5060 error = insmntque1(vp, mp); 5061 if (error != 0) 5062 panic("vfs_allocate_syncvnode: insmntque() failed"); 5063 vp->v_vflag &= ~VV_FORCEINSMQ; 5064 vn_set_state(vp, VSTATE_CONSTRUCTED); 5065 VOP_UNLOCK(vp); 5066 /* 5067 * Place the vnode onto the syncer worklist. We attempt to 5068 * scatter them about on the list so that they will go off 5069 * at evenly distributed times even if all the filesystems 5070 * are mounted at once. 5071 */ 5072 next += incr; 5073 if (next == 0 || next > syncer_maxdelay) { 5074 start /= 2; 5075 incr /= 2; 5076 if (start == 0) { 5077 start = syncer_maxdelay / 2; 5078 incr = syncer_maxdelay; 5079 } 5080 next = start; 5081 } 5082 bo = &vp->v_bufobj; 5083 BO_LOCK(bo); 5084 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 5085 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 5086 mtx_lock(&sync_mtx); 5087 sync_vnode_count++; 5088 if (mp->mnt_syncer == NULL) { 5089 mp->mnt_syncer = vp; 5090 vp = NULL; 5091 } 5092 mtx_unlock(&sync_mtx); 5093 BO_UNLOCK(bo); 5094 if (vp != NULL) { 5095 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5096 vgone(vp); 5097 vput(vp); 5098 } 5099 } 5100 5101 void 5102 vfs_deallocate_syncvnode(struct mount *mp) 5103 { 5104 struct vnode *vp; 5105 5106 mtx_lock(&sync_mtx); 5107 vp = mp->mnt_syncer; 5108 if (vp != NULL) 5109 mp->mnt_syncer = NULL; 5110 mtx_unlock(&sync_mtx); 5111 if (vp != NULL) 5112 vrele(vp); 5113 } 5114 5115 /* 5116 * Do a lazy sync of the filesystem. 5117 */ 5118 static int 5119 sync_fsync(struct vop_fsync_args *ap) 5120 { 5121 struct vnode *syncvp = ap->a_vp; 5122 struct mount *mp = syncvp->v_mount; 5123 int error, save; 5124 struct bufobj *bo; 5125 5126 /* 5127 * We only need to do something if this is a lazy evaluation. 5128 */ 5129 if (ap->a_waitfor != MNT_LAZY) 5130 return (0); 5131 5132 /* 5133 * Move ourselves to the back of the sync list. 5134 */ 5135 bo = &syncvp->v_bufobj; 5136 BO_LOCK(bo); 5137 vn_syncer_add_to_worklist(bo, syncdelay); 5138 BO_UNLOCK(bo); 5139 5140 /* 5141 * Walk the list of vnodes pushing all that are dirty and 5142 * not already on the sync list. 5143 */ 5144 if (vfs_busy(mp, MBF_NOWAIT) != 0) 5145 return (0); 5146 VOP_UNLOCK(syncvp); 5147 save = curthread_pflags_set(TDP_SYNCIO); 5148 /* 5149 * The filesystem at hand may be idle with free vnodes stored in the 5150 * batch. Return them instead of letting them stay there indefinitely. 5151 */ 5152 vfs_periodic(mp, MNT_NOWAIT); 5153 error = VFS_SYNC(mp, MNT_LAZY); 5154 curthread_pflags_restore(save); 5155 vn_lock(syncvp, LK_EXCLUSIVE | LK_RETRY); 5156 vfs_unbusy(mp); 5157 return (error); 5158 } 5159 5160 /* 5161 * The syncer vnode is no referenced. 5162 */ 5163 static int 5164 sync_inactive(struct vop_inactive_args *ap) 5165 { 5166 5167 vgone(ap->a_vp); 5168 return (0); 5169 } 5170 5171 /* 5172 * The syncer vnode is no longer needed and is being decommissioned. 5173 * 5174 * Modifications to the worklist must be protected by sync_mtx. 5175 */ 5176 static int 5177 sync_reclaim(struct vop_reclaim_args *ap) 5178 { 5179 struct vnode *vp = ap->a_vp; 5180 struct bufobj *bo; 5181 5182 bo = &vp->v_bufobj; 5183 BO_LOCK(bo); 5184 mtx_lock(&sync_mtx); 5185 if (vp->v_mount->mnt_syncer == vp) 5186 vp->v_mount->mnt_syncer = NULL; 5187 if (bo->bo_flag & BO_ONWORKLST) { 5188 LIST_REMOVE(bo, bo_synclist); 5189 syncer_worklist_len--; 5190 sync_vnode_count--; 5191 bo->bo_flag &= ~BO_ONWORKLST; 5192 } 5193 mtx_unlock(&sync_mtx); 5194 BO_UNLOCK(bo); 5195 5196 return (0); 5197 } 5198 5199 int 5200 vn_need_pageq_flush(struct vnode *vp) 5201 { 5202 struct vm_object *obj; 5203 5204 obj = vp->v_object; 5205 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 5206 vm_object_mightbedirty(obj)); 5207 } 5208 5209 /* 5210 * Check if vnode represents a disk device 5211 */ 5212 bool 5213 vn_isdisk_error(struct vnode *vp, int *errp) 5214 { 5215 int error; 5216 5217 if (vp->v_type != VCHR) { 5218 error = ENOTBLK; 5219 goto out; 5220 } 5221 error = 0; 5222 dev_lock(); 5223 if (vp->v_rdev == NULL) 5224 error = ENXIO; 5225 else if (vp->v_rdev->si_devsw == NULL) 5226 error = ENXIO; 5227 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5228 error = ENOTBLK; 5229 dev_unlock(); 5230 out: 5231 *errp = error; 5232 return (error == 0); 5233 } 5234 5235 bool 5236 vn_isdisk(struct vnode *vp) 5237 { 5238 int error; 5239 5240 return (vn_isdisk_error(vp, &error)); 5241 } 5242 5243 /* 5244 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5245 * the comment above cache_fplookup for details. 5246 */ 5247 int 5248 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5249 { 5250 int error; 5251 5252 VFS_SMR_ASSERT_ENTERED(); 5253 5254 /* Check the owner. */ 5255 if (cred->cr_uid == file_uid) { 5256 if (file_mode & S_IXUSR) 5257 return (0); 5258 goto out_error; 5259 } 5260 5261 /* Otherwise, check the groups (first match) */ 5262 if (groupmember(file_gid, cred)) { 5263 if (file_mode & S_IXGRP) 5264 return (0); 5265 goto out_error; 5266 } 5267 5268 /* Otherwise, check everyone else. */ 5269 if (file_mode & S_IXOTH) 5270 return (0); 5271 out_error: 5272 /* 5273 * Permission check failed, but it is possible denial will get overwritten 5274 * (e.g., when root is traversing through a 700 directory owned by someone 5275 * else). 5276 * 5277 * vaccess() calls priv_check_cred which in turn can descent into MAC 5278 * modules overriding this result. It's quite unclear what semantics 5279 * are allowed for them to operate, thus for safety we don't call them 5280 * from within the SMR section. This also means if any such modules 5281 * are present, we have to let the regular lookup decide. 5282 */ 5283 error = priv_check_cred_vfs_lookup_nomac(cred); 5284 switch (error) { 5285 case 0: 5286 return (0); 5287 case EAGAIN: 5288 /* 5289 * MAC modules present. 5290 */ 5291 return (EAGAIN); 5292 case EPERM: 5293 return (EACCES); 5294 default: 5295 return (error); 5296 } 5297 } 5298 5299 /* 5300 * Common filesystem object access control check routine. Accepts a 5301 * vnode's type, "mode", uid and gid, requested access mode, and credentials. 5302 * Returns 0 on success, or an errno on failure. 5303 */ 5304 int 5305 vaccess(__enum_uint8(vtype) type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5306 accmode_t accmode, struct ucred *cred) 5307 { 5308 accmode_t dac_granted; 5309 accmode_t priv_granted; 5310 5311 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5312 ("invalid bit in accmode")); 5313 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5314 ("VAPPEND without VWRITE")); 5315 5316 /* 5317 * Look for a normal, non-privileged way to access the file/directory 5318 * as requested. If it exists, go with that. 5319 */ 5320 5321 dac_granted = 0; 5322 5323 /* Check the owner. */ 5324 if (cred->cr_uid == file_uid) { 5325 dac_granted |= VADMIN; 5326 if (file_mode & S_IXUSR) 5327 dac_granted |= VEXEC; 5328 if (file_mode & S_IRUSR) 5329 dac_granted |= VREAD; 5330 if (file_mode & S_IWUSR) 5331 dac_granted |= (VWRITE | VAPPEND); 5332 5333 if ((accmode & dac_granted) == accmode) 5334 return (0); 5335 5336 goto privcheck; 5337 } 5338 5339 /* Otherwise, check the groups (first match) */ 5340 if (groupmember(file_gid, cred)) { 5341 if (file_mode & S_IXGRP) 5342 dac_granted |= VEXEC; 5343 if (file_mode & S_IRGRP) 5344 dac_granted |= VREAD; 5345 if (file_mode & S_IWGRP) 5346 dac_granted |= (VWRITE | VAPPEND); 5347 5348 if ((accmode & dac_granted) == accmode) 5349 return (0); 5350 5351 goto privcheck; 5352 } 5353 5354 /* Otherwise, check everyone else. */ 5355 if (file_mode & S_IXOTH) 5356 dac_granted |= VEXEC; 5357 if (file_mode & S_IROTH) 5358 dac_granted |= VREAD; 5359 if (file_mode & S_IWOTH) 5360 dac_granted |= (VWRITE | VAPPEND); 5361 if ((accmode & dac_granted) == accmode) 5362 return (0); 5363 5364 privcheck: 5365 /* 5366 * Build a privilege mask to determine if the set of privileges 5367 * satisfies the requirements when combined with the granted mask 5368 * from above. For each privilege, if the privilege is required, 5369 * bitwise or the request type onto the priv_granted mask. 5370 */ 5371 priv_granted = 0; 5372 5373 if (type == VDIR) { 5374 /* 5375 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5376 * requests, instead of PRIV_VFS_EXEC. 5377 */ 5378 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5379 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5380 priv_granted |= VEXEC; 5381 } else { 5382 /* 5383 * Ensure that at least one execute bit is on. Otherwise, 5384 * a privileged user will always succeed, and we don't want 5385 * this to happen unless the file really is executable. 5386 */ 5387 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5388 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5389 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5390 priv_granted |= VEXEC; 5391 } 5392 5393 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5394 !priv_check_cred(cred, PRIV_VFS_READ)) 5395 priv_granted |= VREAD; 5396 5397 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5398 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5399 priv_granted |= (VWRITE | VAPPEND); 5400 5401 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5402 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5403 priv_granted |= VADMIN; 5404 5405 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5406 return (0); 5407 } 5408 5409 return ((accmode & VADMIN) ? EPERM : EACCES); 5410 } 5411 5412 /* 5413 * Credential check based on process requesting service, and per-attribute 5414 * permissions. 5415 */ 5416 int 5417 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5418 struct thread *td, accmode_t accmode) 5419 { 5420 5421 /* 5422 * Kernel-invoked always succeeds. 5423 */ 5424 if (cred == NOCRED) 5425 return (0); 5426 5427 /* 5428 * Do not allow privileged processes in jail to directly manipulate 5429 * system attributes. 5430 */ 5431 switch (attrnamespace) { 5432 case EXTATTR_NAMESPACE_SYSTEM: 5433 /* Potentially should be: return (EPERM); */ 5434 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5435 case EXTATTR_NAMESPACE_USER: 5436 return (VOP_ACCESS(vp, accmode, cred, td)); 5437 default: 5438 return (EPERM); 5439 } 5440 } 5441 5442 #ifdef DEBUG_VFS_LOCKS 5443 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5444 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5445 "Drop into debugger on lock violation"); 5446 5447 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5448 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5449 0, "Check for interlock across VOPs"); 5450 5451 int vfs_badlock_print = 1; /* Print lock violations. */ 5452 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5453 0, "Print lock violations"); 5454 5455 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5456 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5457 0, "Print vnode details on lock violations"); 5458 5459 #ifdef KDB 5460 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5461 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5462 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5463 #endif 5464 5465 static void 5466 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5467 { 5468 5469 #ifdef KDB 5470 if (vfs_badlock_backtrace) 5471 kdb_backtrace(); 5472 #endif 5473 if (vfs_badlock_vnode) 5474 vn_printf(vp, "vnode "); 5475 if (vfs_badlock_print) 5476 printf("%s: %p %s\n", str, (void *)vp, msg); 5477 if (vfs_badlock_ddb) 5478 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5479 } 5480 5481 void 5482 assert_vi_locked(struct vnode *vp, const char *str) 5483 { 5484 5485 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5486 vfs_badlock("interlock is not locked but should be", str, vp); 5487 } 5488 5489 void 5490 assert_vi_unlocked(struct vnode *vp, const char *str) 5491 { 5492 5493 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5494 vfs_badlock("interlock is locked but should not be", str, vp); 5495 } 5496 5497 void 5498 assert_vop_locked(struct vnode *vp, const char *str) 5499 { 5500 if (KERNEL_PANICKED() || vp == NULL) 5501 return; 5502 5503 #ifdef WITNESS 5504 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && 5505 witness_is_owned(&vp->v_vnlock->lock_object) == -1) 5506 #else 5507 int locked = VOP_ISLOCKED(vp); 5508 if (locked == 0 || locked == LK_EXCLOTHER) 5509 #endif 5510 vfs_badlock("is not locked but should be", str, vp); 5511 } 5512 5513 void 5514 assert_vop_unlocked(struct vnode *vp, const char *str) 5515 { 5516 if (KERNEL_PANICKED() || vp == NULL) 5517 return; 5518 5519 #ifdef WITNESS 5520 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && 5521 witness_is_owned(&vp->v_vnlock->lock_object) == 1) 5522 #else 5523 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5524 #endif 5525 vfs_badlock("is locked but should not be", str, vp); 5526 } 5527 5528 void 5529 assert_vop_elocked(struct vnode *vp, const char *str) 5530 { 5531 if (KERNEL_PANICKED() || vp == NULL) 5532 return; 5533 5534 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5535 vfs_badlock("is not exclusive locked but should be", str, vp); 5536 } 5537 #endif /* DEBUG_VFS_LOCKS */ 5538 5539 void 5540 vop_rename_fail(struct vop_rename_args *ap) 5541 { 5542 5543 if (ap->a_tvp != NULL) 5544 vput(ap->a_tvp); 5545 if (ap->a_tdvp == ap->a_tvp) 5546 vrele(ap->a_tdvp); 5547 else 5548 vput(ap->a_tdvp); 5549 vrele(ap->a_fdvp); 5550 vrele(ap->a_fvp); 5551 } 5552 5553 void 5554 vop_rename_pre(void *ap) 5555 { 5556 struct vop_rename_args *a = ap; 5557 5558 #ifdef DEBUG_VFS_LOCKS 5559 if (a->a_tvp) 5560 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5561 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5562 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5563 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5564 5565 /* Check the source (from). */ 5566 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5567 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5568 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5569 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5570 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5571 5572 /* Check the target. */ 5573 if (a->a_tvp) 5574 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5575 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5576 #endif 5577 /* 5578 * It may be tempting to add vn_seqc_write_begin/end calls here and 5579 * in vop_rename_post but that's not going to work out since some 5580 * filesystems relookup vnodes mid-rename. This is probably a bug. 5581 * 5582 * For now filesystems are expected to do the relevant calls after they 5583 * decide what vnodes to operate on. 5584 */ 5585 if (a->a_tdvp != a->a_fdvp) 5586 vhold(a->a_fdvp); 5587 if (a->a_tvp != a->a_fvp) 5588 vhold(a->a_fvp); 5589 vhold(a->a_tdvp); 5590 if (a->a_tvp) 5591 vhold(a->a_tvp); 5592 } 5593 5594 #ifdef DEBUG_VFS_LOCKS 5595 void 5596 vop_fplookup_vexec_debugpre(void *ap __unused) 5597 { 5598 5599 VFS_SMR_ASSERT_ENTERED(); 5600 } 5601 5602 void 5603 vop_fplookup_vexec_debugpost(void *ap, int rc) 5604 { 5605 struct vop_fplookup_vexec_args *a; 5606 struct vnode *vp; 5607 5608 a = ap; 5609 vp = a->a_vp; 5610 5611 VFS_SMR_ASSERT_ENTERED(); 5612 if (rc == EOPNOTSUPP) 5613 VNPASS(VN_IS_DOOMED(vp), vp); 5614 } 5615 5616 void 5617 vop_fplookup_symlink_debugpre(void *ap __unused) 5618 { 5619 5620 VFS_SMR_ASSERT_ENTERED(); 5621 } 5622 5623 void 5624 vop_fplookup_symlink_debugpost(void *ap __unused, int rc __unused) 5625 { 5626 5627 VFS_SMR_ASSERT_ENTERED(); 5628 } 5629 5630 static void 5631 vop_fsync_debugprepost(struct vnode *vp, const char *name) 5632 { 5633 if (vp->v_type == VCHR) 5634 ; 5635 else if (MNT_EXTENDED_SHARED(vp->v_mount)) 5636 ASSERT_VOP_LOCKED(vp, name); 5637 else 5638 ASSERT_VOP_ELOCKED(vp, name); 5639 } 5640 5641 void 5642 vop_fsync_debugpre(void *a) 5643 { 5644 struct vop_fsync_args *ap; 5645 5646 ap = a; 5647 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5648 } 5649 5650 void 5651 vop_fsync_debugpost(void *a, int rc __unused) 5652 { 5653 struct vop_fsync_args *ap; 5654 5655 ap = a; 5656 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5657 } 5658 5659 void 5660 vop_fdatasync_debugpre(void *a) 5661 { 5662 struct vop_fdatasync_args *ap; 5663 5664 ap = a; 5665 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5666 } 5667 5668 void 5669 vop_fdatasync_debugpost(void *a, int rc __unused) 5670 { 5671 struct vop_fdatasync_args *ap; 5672 5673 ap = a; 5674 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5675 } 5676 5677 void 5678 vop_strategy_debugpre(void *ap) 5679 { 5680 struct vop_strategy_args *a; 5681 struct buf *bp; 5682 5683 a = ap; 5684 bp = a->a_bp; 5685 5686 /* 5687 * Cluster ops lock their component buffers but not the IO container. 5688 */ 5689 if ((bp->b_flags & B_CLUSTER) != 0) 5690 return; 5691 5692 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5693 if (vfs_badlock_print) 5694 printf( 5695 "VOP_STRATEGY: bp is not locked but should be\n"); 5696 if (vfs_badlock_ddb) 5697 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5698 } 5699 } 5700 5701 void 5702 vop_lock_debugpre(void *ap) 5703 { 5704 struct vop_lock1_args *a = ap; 5705 5706 if ((a->a_flags & LK_INTERLOCK) == 0) 5707 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5708 else 5709 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5710 } 5711 5712 void 5713 vop_lock_debugpost(void *ap, int rc) 5714 { 5715 struct vop_lock1_args *a = ap; 5716 5717 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5718 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5719 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5720 } 5721 5722 void 5723 vop_unlock_debugpre(void *ap) 5724 { 5725 struct vop_unlock_args *a = ap; 5726 struct vnode *vp = a->a_vp; 5727 5728 VNPASS(vn_get_state(vp) != VSTATE_UNINITIALIZED, vp); 5729 ASSERT_VOP_LOCKED(vp, "VOP_UNLOCK"); 5730 } 5731 5732 void 5733 vop_need_inactive_debugpre(void *ap) 5734 { 5735 struct vop_need_inactive_args *a = ap; 5736 5737 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5738 } 5739 5740 void 5741 vop_need_inactive_debugpost(void *ap, int rc) 5742 { 5743 struct vop_need_inactive_args *a = ap; 5744 5745 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5746 } 5747 #endif 5748 5749 void 5750 vop_create_pre(void *ap) 5751 { 5752 struct vop_create_args *a; 5753 struct vnode *dvp; 5754 5755 a = ap; 5756 dvp = a->a_dvp; 5757 vn_seqc_write_begin(dvp); 5758 } 5759 5760 void 5761 vop_create_post(void *ap, int rc) 5762 { 5763 struct vop_create_args *a; 5764 struct vnode *dvp; 5765 5766 a = ap; 5767 dvp = a->a_dvp; 5768 vn_seqc_write_end(dvp); 5769 if (!rc) 5770 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5771 } 5772 5773 void 5774 vop_whiteout_pre(void *ap) 5775 { 5776 struct vop_whiteout_args *a; 5777 struct vnode *dvp; 5778 5779 a = ap; 5780 dvp = a->a_dvp; 5781 vn_seqc_write_begin(dvp); 5782 } 5783 5784 void 5785 vop_whiteout_post(void *ap, int rc) 5786 { 5787 struct vop_whiteout_args *a; 5788 struct vnode *dvp; 5789 5790 a = ap; 5791 dvp = a->a_dvp; 5792 vn_seqc_write_end(dvp); 5793 } 5794 5795 void 5796 vop_deleteextattr_pre(void *ap) 5797 { 5798 struct vop_deleteextattr_args *a; 5799 struct vnode *vp; 5800 5801 a = ap; 5802 vp = a->a_vp; 5803 vn_seqc_write_begin(vp); 5804 } 5805 5806 void 5807 vop_deleteextattr_post(void *ap, int rc) 5808 { 5809 struct vop_deleteextattr_args *a; 5810 struct vnode *vp; 5811 5812 a = ap; 5813 vp = a->a_vp; 5814 vn_seqc_write_end(vp); 5815 if (!rc) 5816 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5817 } 5818 5819 void 5820 vop_link_pre(void *ap) 5821 { 5822 struct vop_link_args *a; 5823 struct vnode *vp, *tdvp; 5824 5825 a = ap; 5826 vp = a->a_vp; 5827 tdvp = a->a_tdvp; 5828 vn_seqc_write_begin(vp); 5829 vn_seqc_write_begin(tdvp); 5830 } 5831 5832 void 5833 vop_link_post(void *ap, int rc) 5834 { 5835 struct vop_link_args *a; 5836 struct vnode *vp, *tdvp; 5837 5838 a = ap; 5839 vp = a->a_vp; 5840 tdvp = a->a_tdvp; 5841 vn_seqc_write_end(vp); 5842 vn_seqc_write_end(tdvp); 5843 if (!rc) { 5844 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 5845 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 5846 } 5847 } 5848 5849 void 5850 vop_mkdir_pre(void *ap) 5851 { 5852 struct vop_mkdir_args *a; 5853 struct vnode *dvp; 5854 5855 a = ap; 5856 dvp = a->a_dvp; 5857 vn_seqc_write_begin(dvp); 5858 } 5859 5860 void 5861 vop_mkdir_post(void *ap, int rc) 5862 { 5863 struct vop_mkdir_args *a; 5864 struct vnode *dvp; 5865 5866 a = ap; 5867 dvp = a->a_dvp; 5868 vn_seqc_write_end(dvp); 5869 if (!rc) 5870 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5871 } 5872 5873 #ifdef DEBUG_VFS_LOCKS 5874 void 5875 vop_mkdir_debugpost(void *ap, int rc) 5876 { 5877 struct vop_mkdir_args *a; 5878 5879 a = ap; 5880 if (!rc) 5881 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); 5882 } 5883 #endif 5884 5885 void 5886 vop_mknod_pre(void *ap) 5887 { 5888 struct vop_mknod_args *a; 5889 struct vnode *dvp; 5890 5891 a = ap; 5892 dvp = a->a_dvp; 5893 vn_seqc_write_begin(dvp); 5894 } 5895 5896 void 5897 vop_mknod_post(void *ap, int rc) 5898 { 5899 struct vop_mknod_args *a; 5900 struct vnode *dvp; 5901 5902 a = ap; 5903 dvp = a->a_dvp; 5904 vn_seqc_write_end(dvp); 5905 if (!rc) 5906 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5907 } 5908 5909 void 5910 vop_reclaim_post(void *ap, int rc) 5911 { 5912 struct vop_reclaim_args *a; 5913 struct vnode *vp; 5914 5915 a = ap; 5916 vp = a->a_vp; 5917 ASSERT_VOP_IN_SEQC(vp); 5918 if (!rc) 5919 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 5920 } 5921 5922 void 5923 vop_remove_pre(void *ap) 5924 { 5925 struct vop_remove_args *a; 5926 struct vnode *dvp, *vp; 5927 5928 a = ap; 5929 dvp = a->a_dvp; 5930 vp = a->a_vp; 5931 vn_seqc_write_begin(dvp); 5932 vn_seqc_write_begin(vp); 5933 } 5934 5935 void 5936 vop_remove_post(void *ap, int rc) 5937 { 5938 struct vop_remove_args *a; 5939 struct vnode *dvp, *vp; 5940 5941 a = ap; 5942 dvp = a->a_dvp; 5943 vp = a->a_vp; 5944 vn_seqc_write_end(dvp); 5945 vn_seqc_write_end(vp); 5946 if (!rc) { 5947 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5948 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5949 } 5950 } 5951 5952 void 5953 vop_rename_post(void *ap, int rc) 5954 { 5955 struct vop_rename_args *a = ap; 5956 long hint; 5957 5958 if (!rc) { 5959 hint = NOTE_WRITE; 5960 if (a->a_fdvp == a->a_tdvp) { 5961 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 5962 hint |= NOTE_LINK; 5963 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5964 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5965 } else { 5966 hint |= NOTE_EXTEND; 5967 if (a->a_fvp->v_type == VDIR) 5968 hint |= NOTE_LINK; 5969 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5970 5971 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 5972 a->a_tvp->v_type == VDIR) 5973 hint &= ~NOTE_LINK; 5974 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5975 } 5976 5977 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 5978 if (a->a_tvp) 5979 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 5980 } 5981 if (a->a_tdvp != a->a_fdvp) 5982 vdrop(a->a_fdvp); 5983 if (a->a_tvp != a->a_fvp) 5984 vdrop(a->a_fvp); 5985 vdrop(a->a_tdvp); 5986 if (a->a_tvp) 5987 vdrop(a->a_tvp); 5988 } 5989 5990 void 5991 vop_rmdir_pre(void *ap) 5992 { 5993 struct vop_rmdir_args *a; 5994 struct vnode *dvp, *vp; 5995 5996 a = ap; 5997 dvp = a->a_dvp; 5998 vp = a->a_vp; 5999 vn_seqc_write_begin(dvp); 6000 vn_seqc_write_begin(vp); 6001 } 6002 6003 void 6004 vop_rmdir_post(void *ap, int rc) 6005 { 6006 struct vop_rmdir_args *a; 6007 struct vnode *dvp, *vp; 6008 6009 a = ap; 6010 dvp = a->a_dvp; 6011 vp = a->a_vp; 6012 vn_seqc_write_end(dvp); 6013 vn_seqc_write_end(vp); 6014 if (!rc) { 6015 vp->v_vflag |= VV_UNLINKED; 6016 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 6017 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 6018 } 6019 } 6020 6021 void 6022 vop_setattr_pre(void *ap) 6023 { 6024 struct vop_setattr_args *a; 6025 struct vnode *vp; 6026 6027 a = ap; 6028 vp = a->a_vp; 6029 vn_seqc_write_begin(vp); 6030 } 6031 6032 void 6033 vop_setattr_post(void *ap, int rc) 6034 { 6035 struct vop_setattr_args *a; 6036 struct vnode *vp; 6037 6038 a = ap; 6039 vp = a->a_vp; 6040 vn_seqc_write_end(vp); 6041 if (!rc) 6042 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6043 } 6044 6045 void 6046 vop_setacl_pre(void *ap) 6047 { 6048 struct vop_setacl_args *a; 6049 struct vnode *vp; 6050 6051 a = ap; 6052 vp = a->a_vp; 6053 vn_seqc_write_begin(vp); 6054 } 6055 6056 void 6057 vop_setacl_post(void *ap, int rc __unused) 6058 { 6059 struct vop_setacl_args *a; 6060 struct vnode *vp; 6061 6062 a = ap; 6063 vp = a->a_vp; 6064 vn_seqc_write_end(vp); 6065 } 6066 6067 void 6068 vop_setextattr_pre(void *ap) 6069 { 6070 struct vop_setextattr_args *a; 6071 struct vnode *vp; 6072 6073 a = ap; 6074 vp = a->a_vp; 6075 vn_seqc_write_begin(vp); 6076 } 6077 6078 void 6079 vop_setextattr_post(void *ap, int rc) 6080 { 6081 struct vop_setextattr_args *a; 6082 struct vnode *vp; 6083 6084 a = ap; 6085 vp = a->a_vp; 6086 vn_seqc_write_end(vp); 6087 if (!rc) 6088 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6089 } 6090 6091 void 6092 vop_symlink_pre(void *ap) 6093 { 6094 struct vop_symlink_args *a; 6095 struct vnode *dvp; 6096 6097 a = ap; 6098 dvp = a->a_dvp; 6099 vn_seqc_write_begin(dvp); 6100 } 6101 6102 void 6103 vop_symlink_post(void *ap, int rc) 6104 { 6105 struct vop_symlink_args *a; 6106 struct vnode *dvp; 6107 6108 a = ap; 6109 dvp = a->a_dvp; 6110 vn_seqc_write_end(dvp); 6111 if (!rc) 6112 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6113 } 6114 6115 void 6116 vop_open_post(void *ap, int rc) 6117 { 6118 struct vop_open_args *a = ap; 6119 6120 if (!rc) 6121 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 6122 } 6123 6124 void 6125 vop_close_post(void *ap, int rc) 6126 { 6127 struct vop_close_args *a = ap; 6128 6129 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 6130 !VN_IS_DOOMED(a->a_vp))) { 6131 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 6132 NOTE_CLOSE_WRITE : NOTE_CLOSE); 6133 } 6134 } 6135 6136 void 6137 vop_read_post(void *ap, int rc) 6138 { 6139 struct vop_read_args *a = ap; 6140 6141 if (!rc) 6142 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6143 } 6144 6145 void 6146 vop_read_pgcache_post(void *ap, int rc) 6147 { 6148 struct vop_read_pgcache_args *a = ap; 6149 6150 if (!rc) 6151 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); 6152 } 6153 6154 void 6155 vop_readdir_post(void *ap, int rc) 6156 { 6157 struct vop_readdir_args *a = ap; 6158 6159 if (!rc) 6160 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6161 } 6162 6163 static struct knlist fs_knlist; 6164 6165 static void 6166 vfs_event_init(void *arg) 6167 { 6168 knlist_init_mtx(&fs_knlist, NULL); 6169 } 6170 /* XXX - correct order? */ 6171 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 6172 6173 void 6174 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 6175 { 6176 6177 KNOTE_UNLOCKED(&fs_knlist, event); 6178 } 6179 6180 static int filt_fsattach(struct knote *kn); 6181 static void filt_fsdetach(struct knote *kn); 6182 static int filt_fsevent(struct knote *kn, long hint); 6183 6184 struct filterops fs_filtops = { 6185 .f_isfd = 0, 6186 .f_attach = filt_fsattach, 6187 .f_detach = filt_fsdetach, 6188 .f_event = filt_fsevent 6189 }; 6190 6191 static int 6192 filt_fsattach(struct knote *kn) 6193 { 6194 6195 kn->kn_flags |= EV_CLEAR; 6196 knlist_add(&fs_knlist, kn, 0); 6197 return (0); 6198 } 6199 6200 static void 6201 filt_fsdetach(struct knote *kn) 6202 { 6203 6204 knlist_remove(&fs_knlist, kn, 0); 6205 } 6206 6207 static int 6208 filt_fsevent(struct knote *kn, long hint) 6209 { 6210 6211 kn->kn_fflags |= kn->kn_sfflags & hint; 6212 6213 return (kn->kn_fflags != 0); 6214 } 6215 6216 static int 6217 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 6218 { 6219 struct vfsidctl vc; 6220 int error; 6221 struct mount *mp; 6222 6223 error = SYSCTL_IN(req, &vc, sizeof(vc)); 6224 if (error) 6225 return (error); 6226 if (vc.vc_vers != VFS_CTL_VERS1) 6227 return (EINVAL); 6228 mp = vfs_getvfs(&vc.vc_fsid); 6229 if (mp == NULL) 6230 return (ENOENT); 6231 /* ensure that a specific sysctl goes to the right filesystem. */ 6232 if (strcmp(vc.vc_fstypename, "*") != 0 && 6233 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 6234 vfs_rel(mp); 6235 return (EINVAL); 6236 } 6237 VCTLTOREQ(&vc, req); 6238 error = VFS_SYSCTL(mp, vc.vc_op, req); 6239 vfs_rel(mp); 6240 return (error); 6241 } 6242 6243 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 6244 NULL, 0, sysctl_vfs_ctl, "", 6245 "Sysctl by fsid"); 6246 6247 /* 6248 * Function to initialize a va_filerev field sensibly. 6249 * XXX: Wouldn't a random number make a lot more sense ?? 6250 */ 6251 u_quad_t 6252 init_va_filerev(void) 6253 { 6254 struct bintime bt; 6255 6256 getbinuptime(&bt); 6257 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 6258 } 6259 6260 static int filt_vfsread(struct knote *kn, long hint); 6261 static int filt_vfswrite(struct knote *kn, long hint); 6262 static int filt_vfsvnode(struct knote *kn, long hint); 6263 static void filt_vfsdetach(struct knote *kn); 6264 static struct filterops vfsread_filtops = { 6265 .f_isfd = 1, 6266 .f_detach = filt_vfsdetach, 6267 .f_event = filt_vfsread 6268 }; 6269 static struct filterops vfswrite_filtops = { 6270 .f_isfd = 1, 6271 .f_detach = filt_vfsdetach, 6272 .f_event = filt_vfswrite 6273 }; 6274 static struct filterops vfsvnode_filtops = { 6275 .f_isfd = 1, 6276 .f_detach = filt_vfsdetach, 6277 .f_event = filt_vfsvnode 6278 }; 6279 6280 static void 6281 vfs_knllock(void *arg) 6282 { 6283 struct vnode *vp = arg; 6284 6285 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 6286 } 6287 6288 static void 6289 vfs_knlunlock(void *arg) 6290 { 6291 struct vnode *vp = arg; 6292 6293 VOP_UNLOCK(vp); 6294 } 6295 6296 static void 6297 vfs_knl_assert_lock(void *arg, int what) 6298 { 6299 #ifdef DEBUG_VFS_LOCKS 6300 struct vnode *vp = arg; 6301 6302 if (what == LA_LOCKED) 6303 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6304 else 6305 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6306 #endif 6307 } 6308 6309 int 6310 vfs_kqfilter(struct vop_kqfilter_args *ap) 6311 { 6312 struct vnode *vp = ap->a_vp; 6313 struct knote *kn = ap->a_kn; 6314 struct knlist *knl; 6315 6316 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ && 6317 kn->kn_filter != EVFILT_WRITE), 6318 ("READ/WRITE filter on a FIFO leaked through")); 6319 switch (kn->kn_filter) { 6320 case EVFILT_READ: 6321 kn->kn_fop = &vfsread_filtops; 6322 break; 6323 case EVFILT_WRITE: 6324 kn->kn_fop = &vfswrite_filtops; 6325 break; 6326 case EVFILT_VNODE: 6327 kn->kn_fop = &vfsvnode_filtops; 6328 break; 6329 default: 6330 return (EINVAL); 6331 } 6332 6333 kn->kn_hook = (caddr_t)vp; 6334 6335 v_addpollinfo(vp); 6336 if (vp->v_pollinfo == NULL) 6337 return (ENOMEM); 6338 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6339 vhold(vp); 6340 knlist_add(knl, kn, 0); 6341 6342 return (0); 6343 } 6344 6345 /* 6346 * Detach knote from vnode 6347 */ 6348 static void 6349 filt_vfsdetach(struct knote *kn) 6350 { 6351 struct vnode *vp = (struct vnode *)kn->kn_hook; 6352 6353 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6354 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6355 vdrop(vp); 6356 } 6357 6358 /*ARGSUSED*/ 6359 static int 6360 filt_vfsread(struct knote *kn, long hint) 6361 { 6362 struct vnode *vp = (struct vnode *)kn->kn_hook; 6363 off_t size; 6364 int res; 6365 6366 /* 6367 * filesystem is gone, so set the EOF flag and schedule 6368 * the knote for deletion. 6369 */ 6370 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6371 VI_LOCK(vp); 6372 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6373 VI_UNLOCK(vp); 6374 return (1); 6375 } 6376 6377 if (vn_getsize_locked(vp, &size, curthread->td_ucred) != 0) 6378 return (0); 6379 6380 VI_LOCK(vp); 6381 kn->kn_data = size - kn->kn_fp->f_offset; 6382 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6383 VI_UNLOCK(vp); 6384 return (res); 6385 } 6386 6387 /*ARGSUSED*/ 6388 static int 6389 filt_vfswrite(struct knote *kn, long hint) 6390 { 6391 struct vnode *vp = (struct vnode *)kn->kn_hook; 6392 6393 VI_LOCK(vp); 6394 6395 /* 6396 * filesystem is gone, so set the EOF flag and schedule 6397 * the knote for deletion. 6398 */ 6399 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6400 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6401 6402 kn->kn_data = 0; 6403 VI_UNLOCK(vp); 6404 return (1); 6405 } 6406 6407 static int 6408 filt_vfsvnode(struct knote *kn, long hint) 6409 { 6410 struct vnode *vp = (struct vnode *)kn->kn_hook; 6411 int res; 6412 6413 VI_LOCK(vp); 6414 if (kn->kn_sfflags & hint) 6415 kn->kn_fflags |= hint; 6416 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6417 kn->kn_flags |= EV_EOF; 6418 VI_UNLOCK(vp); 6419 return (1); 6420 } 6421 res = (kn->kn_fflags != 0); 6422 VI_UNLOCK(vp); 6423 return (res); 6424 } 6425 6426 int 6427 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6428 { 6429 int error; 6430 6431 if (dp->d_reclen > ap->a_uio->uio_resid) 6432 return (ENAMETOOLONG); 6433 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6434 if (error) { 6435 if (ap->a_ncookies != NULL) { 6436 if (ap->a_cookies != NULL) 6437 free(ap->a_cookies, M_TEMP); 6438 ap->a_cookies = NULL; 6439 *ap->a_ncookies = 0; 6440 } 6441 return (error); 6442 } 6443 if (ap->a_ncookies == NULL) 6444 return (0); 6445 6446 KASSERT(ap->a_cookies, 6447 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6448 6449 *ap->a_cookies = realloc(*ap->a_cookies, 6450 (*ap->a_ncookies + 1) * sizeof(uint64_t), M_TEMP, M_WAITOK | M_ZERO); 6451 (*ap->a_cookies)[*ap->a_ncookies] = off; 6452 *ap->a_ncookies += 1; 6453 return (0); 6454 } 6455 6456 /* 6457 * The purpose of this routine is to remove granularity from accmode_t, 6458 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6459 * VADMIN and VAPPEND. 6460 * 6461 * If it returns 0, the caller is supposed to continue with the usual 6462 * access checks using 'accmode' as modified by this routine. If it 6463 * returns nonzero value, the caller is supposed to return that value 6464 * as errno. 6465 * 6466 * Note that after this routine runs, accmode may be zero. 6467 */ 6468 int 6469 vfs_unixify_accmode(accmode_t *accmode) 6470 { 6471 /* 6472 * There is no way to specify explicit "deny" rule using 6473 * file mode or POSIX.1e ACLs. 6474 */ 6475 if (*accmode & VEXPLICIT_DENY) { 6476 *accmode = 0; 6477 return (0); 6478 } 6479 6480 /* 6481 * None of these can be translated into usual access bits. 6482 * Also, the common case for NFSv4 ACLs is to not contain 6483 * either of these bits. Caller should check for VWRITE 6484 * on the containing directory instead. 6485 */ 6486 if (*accmode & (VDELETE_CHILD | VDELETE)) 6487 return (EPERM); 6488 6489 if (*accmode & VADMIN_PERMS) { 6490 *accmode &= ~VADMIN_PERMS; 6491 *accmode |= VADMIN; 6492 } 6493 6494 /* 6495 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6496 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6497 */ 6498 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6499 6500 return (0); 6501 } 6502 6503 /* 6504 * Clear out a doomed vnode (if any) and replace it with a new one as long 6505 * as the fs is not being unmounted. Return the root vnode to the caller. 6506 */ 6507 static int __noinline 6508 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6509 { 6510 struct vnode *vp; 6511 int error; 6512 6513 restart: 6514 if (mp->mnt_rootvnode != NULL) { 6515 MNT_ILOCK(mp); 6516 vp = mp->mnt_rootvnode; 6517 if (vp != NULL) { 6518 if (!VN_IS_DOOMED(vp)) { 6519 vrefact(vp); 6520 MNT_IUNLOCK(mp); 6521 error = vn_lock(vp, flags); 6522 if (error == 0) { 6523 *vpp = vp; 6524 return (0); 6525 } 6526 vrele(vp); 6527 goto restart; 6528 } 6529 /* 6530 * Clear the old one. 6531 */ 6532 mp->mnt_rootvnode = NULL; 6533 } 6534 MNT_IUNLOCK(mp); 6535 if (vp != NULL) { 6536 vfs_op_barrier_wait(mp); 6537 vrele(vp); 6538 } 6539 } 6540 error = VFS_CACHEDROOT(mp, flags, vpp); 6541 if (error != 0) 6542 return (error); 6543 if (mp->mnt_vfs_ops == 0) { 6544 MNT_ILOCK(mp); 6545 if (mp->mnt_vfs_ops != 0) { 6546 MNT_IUNLOCK(mp); 6547 return (0); 6548 } 6549 if (mp->mnt_rootvnode == NULL) { 6550 vrefact(*vpp); 6551 mp->mnt_rootvnode = *vpp; 6552 } else { 6553 if (mp->mnt_rootvnode != *vpp) { 6554 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6555 panic("%s: mismatch between vnode returned " 6556 " by VFS_CACHEDROOT and the one cached " 6557 " (%p != %p)", 6558 __func__, *vpp, mp->mnt_rootvnode); 6559 } 6560 } 6561 } 6562 MNT_IUNLOCK(mp); 6563 } 6564 return (0); 6565 } 6566 6567 int 6568 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6569 { 6570 struct mount_pcpu *mpcpu; 6571 struct vnode *vp; 6572 int error; 6573 6574 if (!vfs_op_thread_enter(mp, mpcpu)) 6575 return (vfs_cache_root_fallback(mp, flags, vpp)); 6576 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6577 if (vp == NULL || VN_IS_DOOMED(vp)) { 6578 vfs_op_thread_exit(mp, mpcpu); 6579 return (vfs_cache_root_fallback(mp, flags, vpp)); 6580 } 6581 vrefact(vp); 6582 vfs_op_thread_exit(mp, mpcpu); 6583 error = vn_lock(vp, flags); 6584 if (error != 0) { 6585 vrele(vp); 6586 return (vfs_cache_root_fallback(mp, flags, vpp)); 6587 } 6588 *vpp = vp; 6589 return (0); 6590 } 6591 6592 struct vnode * 6593 vfs_cache_root_clear(struct mount *mp) 6594 { 6595 struct vnode *vp; 6596 6597 /* 6598 * ops > 0 guarantees there is nobody who can see this vnode 6599 */ 6600 MPASS(mp->mnt_vfs_ops > 0); 6601 vp = mp->mnt_rootvnode; 6602 if (vp != NULL) 6603 vn_seqc_write_begin(vp); 6604 mp->mnt_rootvnode = NULL; 6605 return (vp); 6606 } 6607 6608 void 6609 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6610 { 6611 6612 MPASS(mp->mnt_vfs_ops > 0); 6613 vrefact(vp); 6614 mp->mnt_rootvnode = vp; 6615 } 6616 6617 /* 6618 * These are helper functions for filesystems to traverse all 6619 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6620 * 6621 * This interface replaces MNT_VNODE_FOREACH. 6622 */ 6623 6624 struct vnode * 6625 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6626 { 6627 struct vnode *vp; 6628 6629 maybe_yield(); 6630 MNT_ILOCK(mp); 6631 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6632 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6633 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6634 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6635 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6636 continue; 6637 VI_LOCK(vp); 6638 if (VN_IS_DOOMED(vp)) { 6639 VI_UNLOCK(vp); 6640 continue; 6641 } 6642 break; 6643 } 6644 if (vp == NULL) { 6645 __mnt_vnode_markerfree_all(mvp, mp); 6646 /* MNT_IUNLOCK(mp); -- done in above function */ 6647 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6648 return (NULL); 6649 } 6650 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6651 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6652 MNT_IUNLOCK(mp); 6653 return (vp); 6654 } 6655 6656 struct vnode * 6657 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6658 { 6659 struct vnode *vp; 6660 6661 *mvp = vn_alloc_marker(mp); 6662 MNT_ILOCK(mp); 6663 MNT_REF(mp); 6664 6665 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6666 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6667 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6668 continue; 6669 VI_LOCK(vp); 6670 if (VN_IS_DOOMED(vp)) { 6671 VI_UNLOCK(vp); 6672 continue; 6673 } 6674 break; 6675 } 6676 if (vp == NULL) { 6677 MNT_REL(mp); 6678 MNT_IUNLOCK(mp); 6679 vn_free_marker(*mvp); 6680 *mvp = NULL; 6681 return (NULL); 6682 } 6683 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6684 MNT_IUNLOCK(mp); 6685 return (vp); 6686 } 6687 6688 void 6689 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6690 { 6691 6692 if (*mvp == NULL) { 6693 MNT_IUNLOCK(mp); 6694 return; 6695 } 6696 6697 mtx_assert(MNT_MTX(mp), MA_OWNED); 6698 6699 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6700 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6701 MNT_REL(mp); 6702 MNT_IUNLOCK(mp); 6703 vn_free_marker(*mvp); 6704 *mvp = NULL; 6705 } 6706 6707 /* 6708 * These are helper functions for filesystems to traverse their 6709 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6710 */ 6711 static void 6712 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6713 { 6714 6715 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6716 6717 MNT_ILOCK(mp); 6718 MNT_REL(mp); 6719 MNT_IUNLOCK(mp); 6720 vn_free_marker(*mvp); 6721 *mvp = NULL; 6722 } 6723 6724 /* 6725 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6726 * conventional lock order during mnt_vnode_next_lazy iteration. 6727 * 6728 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6729 * The list lock is dropped and reacquired. On success, both locks are held. 6730 * On failure, the mount vnode list lock is held but the vnode interlock is 6731 * not, and the procedure may have yielded. 6732 */ 6733 static bool 6734 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6735 struct vnode *vp) 6736 { 6737 6738 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6739 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6740 ("%s: bad marker", __func__)); 6741 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6742 ("%s: inappropriate vnode", __func__)); 6743 ASSERT_VI_UNLOCKED(vp, __func__); 6744 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6745 6746 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6747 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6748 6749 /* 6750 * Note we may be racing against vdrop which transitioned the hold 6751 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 6752 * if we are the only user after we get the interlock we will just 6753 * vdrop. 6754 */ 6755 vhold(vp); 6756 mtx_unlock(&mp->mnt_listmtx); 6757 VI_LOCK(vp); 6758 if (VN_IS_DOOMED(vp)) { 6759 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 6760 goto out_lost; 6761 } 6762 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 6763 /* 6764 * There is nothing to do if we are the last user. 6765 */ 6766 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 6767 goto out_lost; 6768 mtx_lock(&mp->mnt_listmtx); 6769 return (true); 6770 out_lost: 6771 vdropl(vp); 6772 maybe_yield(); 6773 mtx_lock(&mp->mnt_listmtx); 6774 return (false); 6775 } 6776 6777 static struct vnode * 6778 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6779 void *cbarg) 6780 { 6781 struct vnode *vp; 6782 6783 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6784 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6785 restart: 6786 vp = TAILQ_NEXT(*mvp, v_lazylist); 6787 while (vp != NULL) { 6788 if (vp->v_type == VMARKER) { 6789 vp = TAILQ_NEXT(vp, v_lazylist); 6790 continue; 6791 } 6792 /* 6793 * See if we want to process the vnode. Note we may encounter a 6794 * long string of vnodes we don't care about and hog the list 6795 * as a result. Check for it and requeue the marker. 6796 */ 6797 VNPASS(!VN_IS_DOOMED(vp), vp); 6798 if (!cb(vp, cbarg)) { 6799 if (!should_yield()) { 6800 vp = TAILQ_NEXT(vp, v_lazylist); 6801 continue; 6802 } 6803 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 6804 v_lazylist); 6805 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 6806 v_lazylist); 6807 mtx_unlock(&mp->mnt_listmtx); 6808 kern_yield(PRI_USER); 6809 mtx_lock(&mp->mnt_listmtx); 6810 goto restart; 6811 } 6812 /* 6813 * Try-lock because this is the wrong lock order. 6814 */ 6815 if (!VI_TRYLOCK(vp) && 6816 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 6817 goto restart; 6818 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6819 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6820 ("alien vnode on the lazy list %p %p", vp, mp)); 6821 VNPASS(vp->v_mount == mp, vp); 6822 VNPASS(!VN_IS_DOOMED(vp), vp); 6823 break; 6824 } 6825 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6826 6827 /* Check if we are done */ 6828 if (vp == NULL) { 6829 mtx_unlock(&mp->mnt_listmtx); 6830 mnt_vnode_markerfree_lazy(mvp, mp); 6831 return (NULL); 6832 } 6833 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 6834 mtx_unlock(&mp->mnt_listmtx); 6835 ASSERT_VI_LOCKED(vp, "lazy iter"); 6836 return (vp); 6837 } 6838 6839 struct vnode * 6840 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6841 void *cbarg) 6842 { 6843 6844 maybe_yield(); 6845 mtx_lock(&mp->mnt_listmtx); 6846 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6847 } 6848 6849 struct vnode * 6850 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6851 void *cbarg) 6852 { 6853 struct vnode *vp; 6854 6855 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 6856 return (NULL); 6857 6858 *mvp = vn_alloc_marker(mp); 6859 MNT_ILOCK(mp); 6860 MNT_REF(mp); 6861 MNT_IUNLOCK(mp); 6862 6863 mtx_lock(&mp->mnt_listmtx); 6864 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 6865 if (vp == NULL) { 6866 mtx_unlock(&mp->mnt_listmtx); 6867 mnt_vnode_markerfree_lazy(mvp, mp); 6868 return (NULL); 6869 } 6870 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 6871 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6872 } 6873 6874 void 6875 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6876 { 6877 6878 if (*mvp == NULL) 6879 return; 6880 6881 mtx_lock(&mp->mnt_listmtx); 6882 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6883 mtx_unlock(&mp->mnt_listmtx); 6884 mnt_vnode_markerfree_lazy(mvp, mp); 6885 } 6886 6887 int 6888 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 6889 { 6890 6891 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 6892 cnp->cn_flags &= ~NOEXECCHECK; 6893 return (0); 6894 } 6895 6896 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread)); 6897 } 6898 6899 /* 6900 * Do not use this variant unless you have means other than the hold count 6901 * to prevent the vnode from getting freed. 6902 */ 6903 void 6904 vn_seqc_write_begin_locked(struct vnode *vp) 6905 { 6906 6907 ASSERT_VI_LOCKED(vp, __func__); 6908 VNPASS(vp->v_holdcnt > 0, vp); 6909 VNPASS(vp->v_seqc_users >= 0, vp); 6910 vp->v_seqc_users++; 6911 if (vp->v_seqc_users == 1) 6912 seqc_sleepable_write_begin(&vp->v_seqc); 6913 } 6914 6915 void 6916 vn_seqc_write_begin(struct vnode *vp) 6917 { 6918 6919 VI_LOCK(vp); 6920 vn_seqc_write_begin_locked(vp); 6921 VI_UNLOCK(vp); 6922 } 6923 6924 void 6925 vn_seqc_write_end_locked(struct vnode *vp) 6926 { 6927 6928 ASSERT_VI_LOCKED(vp, __func__); 6929 VNPASS(vp->v_seqc_users > 0, vp); 6930 vp->v_seqc_users--; 6931 if (vp->v_seqc_users == 0) 6932 seqc_sleepable_write_end(&vp->v_seqc); 6933 } 6934 6935 void 6936 vn_seqc_write_end(struct vnode *vp) 6937 { 6938 6939 VI_LOCK(vp); 6940 vn_seqc_write_end_locked(vp); 6941 VI_UNLOCK(vp); 6942 } 6943 6944 /* 6945 * Special case handling for allocating and freeing vnodes. 6946 * 6947 * The counter remains unchanged on free so that a doomed vnode will 6948 * keep testing as in modify as long as it is accessible with SMR. 6949 */ 6950 static void 6951 vn_seqc_init(struct vnode *vp) 6952 { 6953 6954 vp->v_seqc = 0; 6955 vp->v_seqc_users = 0; 6956 } 6957 6958 static void 6959 vn_seqc_write_end_free(struct vnode *vp) 6960 { 6961 6962 VNPASS(seqc_in_modify(vp->v_seqc), vp); 6963 VNPASS(vp->v_seqc_users == 1, vp); 6964 } 6965 6966 void 6967 vn_irflag_set_locked(struct vnode *vp, short toset) 6968 { 6969 short flags; 6970 6971 ASSERT_VI_LOCKED(vp, __func__); 6972 flags = vn_irflag_read(vp); 6973 VNASSERT((flags & toset) == 0, vp, 6974 ("%s: some of the passed flags already set (have %d, passed %d)\n", 6975 __func__, flags, toset)); 6976 atomic_store_short(&vp->v_irflag, flags | toset); 6977 } 6978 6979 void 6980 vn_irflag_set(struct vnode *vp, short toset) 6981 { 6982 6983 VI_LOCK(vp); 6984 vn_irflag_set_locked(vp, toset); 6985 VI_UNLOCK(vp); 6986 } 6987 6988 void 6989 vn_irflag_set_cond_locked(struct vnode *vp, short toset) 6990 { 6991 short flags; 6992 6993 ASSERT_VI_LOCKED(vp, __func__); 6994 flags = vn_irflag_read(vp); 6995 atomic_store_short(&vp->v_irflag, flags | toset); 6996 } 6997 6998 void 6999 vn_irflag_set_cond(struct vnode *vp, short toset) 7000 { 7001 7002 VI_LOCK(vp); 7003 vn_irflag_set_cond_locked(vp, toset); 7004 VI_UNLOCK(vp); 7005 } 7006 7007 void 7008 vn_irflag_unset_locked(struct vnode *vp, short tounset) 7009 { 7010 short flags; 7011 7012 ASSERT_VI_LOCKED(vp, __func__); 7013 flags = vn_irflag_read(vp); 7014 VNASSERT((flags & tounset) == tounset, vp, 7015 ("%s: some of the passed flags not set (have %d, passed %d)\n", 7016 __func__, flags, tounset)); 7017 atomic_store_short(&vp->v_irflag, flags & ~tounset); 7018 } 7019 7020 void 7021 vn_irflag_unset(struct vnode *vp, short tounset) 7022 { 7023 7024 VI_LOCK(vp); 7025 vn_irflag_unset_locked(vp, tounset); 7026 VI_UNLOCK(vp); 7027 } 7028 7029 int 7030 vn_getsize_locked(struct vnode *vp, off_t *size, struct ucred *cred) 7031 { 7032 struct vattr vattr; 7033 int error; 7034 7035 ASSERT_VOP_LOCKED(vp, __func__); 7036 error = VOP_GETATTR(vp, &vattr, cred); 7037 if (__predict_true(error == 0)) { 7038 if (vattr.va_size <= OFF_MAX) 7039 *size = vattr.va_size; 7040 else 7041 error = EFBIG; 7042 } 7043 return (error); 7044 } 7045 7046 int 7047 vn_getsize(struct vnode *vp, off_t *size, struct ucred *cred) 7048 { 7049 int error; 7050 7051 VOP_LOCK(vp, LK_SHARED); 7052 error = vn_getsize_locked(vp, size, cred); 7053 VOP_UNLOCK(vp); 7054 return (error); 7055 } 7056 7057 #ifdef INVARIANTS 7058 void 7059 vn_set_state_validate(struct vnode *vp, __enum_uint8(vstate) state) 7060 { 7061 7062 switch (vp->v_state) { 7063 case VSTATE_UNINITIALIZED: 7064 switch (state) { 7065 case VSTATE_CONSTRUCTED: 7066 case VSTATE_DESTROYING: 7067 return; 7068 default: 7069 break; 7070 } 7071 break; 7072 case VSTATE_CONSTRUCTED: 7073 ASSERT_VOP_ELOCKED(vp, __func__); 7074 switch (state) { 7075 case VSTATE_DESTROYING: 7076 return; 7077 default: 7078 break; 7079 } 7080 break; 7081 case VSTATE_DESTROYING: 7082 ASSERT_VOP_ELOCKED(vp, __func__); 7083 switch (state) { 7084 case VSTATE_DEAD: 7085 return; 7086 default: 7087 break; 7088 } 7089 break; 7090 case VSTATE_DEAD: 7091 switch (state) { 7092 case VSTATE_UNINITIALIZED: 7093 return; 7094 default: 7095 break; 7096 } 7097 break; 7098 } 7099 7100 vn_printf(vp, "invalid state transition %d -> %d\n", vp->v_state, state); 7101 panic("invalid state transition %d -> %d\n", vp->v_state, state); 7102 } 7103 #endif 7104