1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/asan.h> 52 #include <sys/bio.h> 53 #include <sys/buf.h> 54 #include <sys/capsicum.h> 55 #include <sys/condvar.h> 56 #include <sys/conf.h> 57 #include <sys/counter.h> 58 #include <sys/dirent.h> 59 #include <sys/event.h> 60 #include <sys/eventhandler.h> 61 #include <sys/extattr.h> 62 #include <sys/file.h> 63 #include <sys/fcntl.h> 64 #include <sys/jail.h> 65 #include <sys/kdb.h> 66 #include <sys/kernel.h> 67 #include <sys/kthread.h> 68 #include <sys/ktr.h> 69 #include <sys/lockf.h> 70 #include <sys/malloc.h> 71 #include <sys/mount.h> 72 #include <sys/namei.h> 73 #include <sys/pctrie.h> 74 #include <sys/priv.h> 75 #include <sys/reboot.h> 76 #include <sys/refcount.h> 77 #include <sys/rwlock.h> 78 #include <sys/sched.h> 79 #include <sys/sleepqueue.h> 80 #include <sys/smr.h> 81 #include <sys/smp.h> 82 #include <sys/stat.h> 83 #include <sys/sysctl.h> 84 #include <sys/syslog.h> 85 #include <sys/vmmeter.h> 86 #include <sys/vnode.h> 87 #include <sys/watchdog.h> 88 89 #include <machine/stdarg.h> 90 91 #include <security/mac/mac_framework.h> 92 93 #include <vm/vm.h> 94 #include <vm/vm_object.h> 95 #include <vm/vm_extern.h> 96 #include <vm/pmap.h> 97 #include <vm/vm_map.h> 98 #include <vm/vm_page.h> 99 #include <vm/vm_kern.h> 100 #include <vm/uma.h> 101 102 #ifdef DDB 103 #include <ddb/ddb.h> 104 #endif 105 106 static void delmntque(struct vnode *vp); 107 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 108 int slpflag, int slptimeo); 109 static void syncer_shutdown(void *arg, int howto); 110 static int vtryrecycle(struct vnode *vp); 111 static void v_init_counters(struct vnode *); 112 static void vn_seqc_init(struct vnode *); 113 static void vn_seqc_write_end_free(struct vnode *vp); 114 static void vgonel(struct vnode *); 115 static bool vhold_recycle_free(struct vnode *); 116 static void vdropl_recycle(struct vnode *vp); 117 static void vdrop_recycle(struct vnode *vp); 118 static void vfs_knllock(void *arg); 119 static void vfs_knlunlock(void *arg); 120 static void vfs_knl_assert_lock(void *arg, int what); 121 static void destroy_vpollinfo(struct vpollinfo *vi); 122 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 123 daddr_t startlbn, daddr_t endlbn); 124 static void vnlru_recalc(void); 125 126 /* 127 * Number of vnodes in existence. Increased whenever getnewvnode() 128 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 129 */ 130 static u_long __exclusive_cache_line numvnodes; 131 132 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 133 "Number of vnodes in existence"); 134 135 static counter_u64_t vnodes_created; 136 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 137 "Number of vnodes created by getnewvnode"); 138 139 /* 140 * Conversion tables for conversion from vnode types to inode formats 141 * and back. 142 */ 143 enum vtype iftovt_tab[16] = { 144 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 145 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 146 }; 147 int vttoif_tab[10] = { 148 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 149 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 150 }; 151 152 /* 153 * List of allocates vnodes in the system. 154 */ 155 static TAILQ_HEAD(freelst, vnode) vnode_list; 156 static struct vnode *vnode_list_free_marker; 157 static struct vnode *vnode_list_reclaim_marker; 158 159 /* 160 * "Free" vnode target. Free vnodes are rarely completely free, but are 161 * just ones that are cheap to recycle. Usually they are for files which 162 * have been stat'd but not read; these usually have inode and namecache 163 * data attached to them. This target is the preferred minimum size of a 164 * sub-cache consisting mostly of such files. The system balances the size 165 * of this sub-cache with its complement to try to prevent either from 166 * thrashing while the other is relatively inactive. The targets express 167 * a preference for the best balance. 168 * 169 * "Above" this target there are 2 further targets (watermarks) related 170 * to recyling of free vnodes. In the best-operating case, the cache is 171 * exactly full, the free list has size between vlowat and vhiwat above the 172 * free target, and recycling from it and normal use maintains this state. 173 * Sometimes the free list is below vlowat or even empty, but this state 174 * is even better for immediate use provided the cache is not full. 175 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 176 * ones) to reach one of these states. The watermarks are currently hard- 177 * coded as 4% and 9% of the available space higher. These and the default 178 * of 25% for wantfreevnodes are too large if the memory size is large. 179 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 180 * whenever vnlru_proc() becomes active. 181 */ 182 static long wantfreevnodes; 183 static long __exclusive_cache_line freevnodes; 184 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 185 &freevnodes, 0, "Number of \"free\" vnodes"); 186 static long freevnodes_old; 187 188 static counter_u64_t recycles_count; 189 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 190 "Number of vnodes recycled to meet vnode cache targets"); 191 192 static counter_u64_t recycles_free_count; 193 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 194 "Number of free vnodes recycled to meet vnode cache targets"); 195 196 static counter_u64_t deferred_inact; 197 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, &deferred_inact, 198 "Number of times inactive processing was deferred"); 199 200 /* To keep more than one thread at a time from running vfs_getnewfsid */ 201 static struct mtx mntid_mtx; 202 203 /* 204 * Lock for any access to the following: 205 * vnode_list 206 * numvnodes 207 * freevnodes 208 */ 209 static struct mtx __exclusive_cache_line vnode_list_mtx; 210 211 /* Publicly exported FS */ 212 struct nfs_public nfs_pub; 213 214 static uma_zone_t buf_trie_zone; 215 static smr_t buf_trie_smr; 216 217 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 218 static uma_zone_t vnode_zone; 219 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll"); 220 221 __read_frequently smr_t vfs_smr; 222 223 /* 224 * The workitem queue. 225 * 226 * It is useful to delay writes of file data and filesystem metadata 227 * for tens of seconds so that quickly created and deleted files need 228 * not waste disk bandwidth being created and removed. To realize this, 229 * we append vnodes to a "workitem" queue. When running with a soft 230 * updates implementation, most pending metadata dependencies should 231 * not wait for more than a few seconds. Thus, mounted on block devices 232 * are delayed only about a half the time that file data is delayed. 233 * Similarly, directory updates are more critical, so are only delayed 234 * about a third the time that file data is delayed. Thus, there are 235 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 236 * one each second (driven off the filesystem syncer process). The 237 * syncer_delayno variable indicates the next queue that is to be processed. 238 * Items that need to be processed soon are placed in this queue: 239 * 240 * syncer_workitem_pending[syncer_delayno] 241 * 242 * A delay of fifteen seconds is done by placing the request fifteen 243 * entries later in the queue: 244 * 245 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 246 * 247 */ 248 static int syncer_delayno; 249 static long syncer_mask; 250 LIST_HEAD(synclist, bufobj); 251 static struct synclist *syncer_workitem_pending; 252 /* 253 * The sync_mtx protects: 254 * bo->bo_synclist 255 * sync_vnode_count 256 * syncer_delayno 257 * syncer_state 258 * syncer_workitem_pending 259 * syncer_worklist_len 260 * rushjob 261 */ 262 static struct mtx sync_mtx; 263 static struct cv sync_wakeup; 264 265 #define SYNCER_MAXDELAY 32 266 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 267 static int syncdelay = 30; /* max time to delay syncing data */ 268 static int filedelay = 30; /* time to delay syncing files */ 269 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 270 "Time to delay syncing files (in seconds)"); 271 static int dirdelay = 29; /* time to delay syncing directories */ 272 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 273 "Time to delay syncing directories (in seconds)"); 274 static int metadelay = 28; /* time to delay syncing metadata */ 275 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 276 "Time to delay syncing metadata (in seconds)"); 277 static int rushjob; /* number of slots to run ASAP */ 278 static int stat_rush_requests; /* number of times I/O speeded up */ 279 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 280 "Number of times I/O speeded up (rush requests)"); 281 282 #define VDBATCH_SIZE 8 283 struct vdbatch { 284 u_int index; 285 long freevnodes; 286 struct mtx lock; 287 struct vnode *tab[VDBATCH_SIZE]; 288 }; 289 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 290 291 static void vdbatch_dequeue(struct vnode *vp); 292 293 /* 294 * When shutting down the syncer, run it at four times normal speed. 295 */ 296 #define SYNCER_SHUTDOWN_SPEEDUP 4 297 static int sync_vnode_count; 298 static int syncer_worklist_len; 299 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 300 syncer_state; 301 302 /* Target for maximum number of vnodes. */ 303 u_long desiredvnodes; 304 static u_long gapvnodes; /* gap between wanted and desired */ 305 static u_long vhiwat; /* enough extras after expansion */ 306 static u_long vlowat; /* minimal extras before expansion */ 307 static u_long vstir; /* nonzero to stir non-free vnodes */ 308 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 309 310 static u_long vnlru_read_freevnodes(void); 311 312 /* 313 * Note that no attempt is made to sanitize these parameters. 314 */ 315 static int 316 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 317 { 318 u_long val; 319 int error; 320 321 val = desiredvnodes; 322 error = sysctl_handle_long(oidp, &val, 0, req); 323 if (error != 0 || req->newptr == NULL) 324 return (error); 325 326 if (val == desiredvnodes) 327 return (0); 328 mtx_lock(&vnode_list_mtx); 329 desiredvnodes = val; 330 wantfreevnodes = desiredvnodes / 4; 331 vnlru_recalc(); 332 mtx_unlock(&vnode_list_mtx); 333 /* 334 * XXX There is no protection against multiple threads changing 335 * desiredvnodes at the same time. Locking above only helps vnlru and 336 * getnewvnode. 337 */ 338 vfs_hash_changesize(desiredvnodes); 339 cache_changesize(desiredvnodes); 340 return (0); 341 } 342 343 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 344 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 345 "LU", "Target for maximum number of vnodes"); 346 347 static int 348 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 349 { 350 u_long val; 351 int error; 352 353 val = wantfreevnodes; 354 error = sysctl_handle_long(oidp, &val, 0, req); 355 if (error != 0 || req->newptr == NULL) 356 return (error); 357 358 if (val == wantfreevnodes) 359 return (0); 360 mtx_lock(&vnode_list_mtx); 361 wantfreevnodes = val; 362 vnlru_recalc(); 363 mtx_unlock(&vnode_list_mtx); 364 return (0); 365 } 366 367 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 368 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 369 "LU", "Target for minimum number of \"free\" vnodes"); 370 371 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 372 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 373 static int vnlru_nowhere; 374 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 375 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 376 377 static int 378 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 379 { 380 struct vnode *vp; 381 struct nameidata nd; 382 char *buf; 383 unsigned long ndflags; 384 int error; 385 386 if (req->newptr == NULL) 387 return (EINVAL); 388 if (req->newlen >= PATH_MAX) 389 return (E2BIG); 390 391 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 392 error = SYSCTL_IN(req, buf, req->newlen); 393 if (error != 0) 394 goto out; 395 396 buf[req->newlen] = '\0'; 397 398 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1; 399 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf); 400 if ((error = namei(&nd)) != 0) 401 goto out; 402 vp = nd.ni_vp; 403 404 if (VN_IS_DOOMED(vp)) { 405 /* 406 * This vnode is being recycled. Return != 0 to let the caller 407 * know that the sysctl had no effect. Return EAGAIN because a 408 * subsequent call will likely succeed (since namei will create 409 * a new vnode if necessary) 410 */ 411 error = EAGAIN; 412 goto putvnode; 413 } 414 415 counter_u64_add(recycles_count, 1); 416 vgone(vp); 417 putvnode: 418 NDFREE(&nd, 0); 419 out: 420 free(buf, M_TEMP); 421 return (error); 422 } 423 424 static int 425 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 426 { 427 struct thread *td = curthread; 428 struct vnode *vp; 429 struct file *fp; 430 int error; 431 int fd; 432 433 if (req->newptr == NULL) 434 return (EBADF); 435 436 error = sysctl_handle_int(oidp, &fd, 0, req); 437 if (error != 0) 438 return (error); 439 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 440 if (error != 0) 441 return (error); 442 vp = fp->f_vnode; 443 444 error = vn_lock(vp, LK_EXCLUSIVE); 445 if (error != 0) 446 goto drop; 447 448 counter_u64_add(recycles_count, 1); 449 vgone(vp); 450 VOP_UNLOCK(vp); 451 drop: 452 fdrop(fp, td); 453 return (error); 454 } 455 456 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 457 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 458 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 459 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 460 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 461 sysctl_ftry_reclaim_vnode, "I", 462 "Try to reclaim a vnode by its file descriptor"); 463 464 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 465 #define vnsz2log 8 466 #ifndef DEBUG_LOCKS 467 _Static_assert(sizeof(struct vnode) >= 1UL << vnsz2log && 468 sizeof(struct vnode) < 1UL << (vnsz2log + 1), 469 "vnsz2log needs to be updated"); 470 #endif 471 472 /* 473 * Support for the bufobj clean & dirty pctrie. 474 */ 475 static void * 476 buf_trie_alloc(struct pctrie *ptree) 477 { 478 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 479 } 480 481 static void 482 buf_trie_free(struct pctrie *ptree, void *node) 483 { 484 uma_zfree_smr(buf_trie_zone, node); 485 } 486 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 487 buf_trie_smr); 488 489 /* 490 * Initialize the vnode management data structures. 491 * 492 * Reevaluate the following cap on the number of vnodes after the physical 493 * memory size exceeds 512GB. In the limit, as the physical memory size 494 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 495 */ 496 #ifndef MAXVNODES_MAX 497 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 498 #endif 499 500 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 501 502 static struct vnode * 503 vn_alloc_marker(struct mount *mp) 504 { 505 struct vnode *vp; 506 507 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 508 vp->v_type = VMARKER; 509 vp->v_mount = mp; 510 511 return (vp); 512 } 513 514 static void 515 vn_free_marker(struct vnode *vp) 516 { 517 518 MPASS(vp->v_type == VMARKER); 519 free(vp, M_VNODE_MARKER); 520 } 521 522 #ifdef KASAN 523 static int 524 vnode_ctor(void *mem, int size, void *arg __unused, int flags __unused) 525 { 526 kasan_mark(mem, size, roundup2(size, UMA_ALIGN_PTR + 1), 0); 527 return (0); 528 } 529 530 static void 531 vnode_dtor(void *mem, int size, void *arg __unused) 532 { 533 size_t end1, end2, off1, off2; 534 535 _Static_assert(offsetof(struct vnode, v_vnodelist) < 536 offsetof(struct vnode, v_dbatchcpu), 537 "KASAN marks require updating"); 538 539 off1 = offsetof(struct vnode, v_vnodelist); 540 off2 = offsetof(struct vnode, v_dbatchcpu); 541 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist); 542 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu); 543 544 /* 545 * Access to the v_vnodelist and v_dbatchcpu fields are permitted even 546 * after the vnode has been freed. Try to get some KASAN coverage by 547 * marking everything except those two fields as invalid. Because 548 * KASAN's tracking is not byte-granular, any preceding fields sharing 549 * the same 8-byte aligned word must also be marked valid. 550 */ 551 552 /* Handle the area from the start until v_vnodelist... */ 553 off1 = rounddown2(off1, KASAN_SHADOW_SCALE); 554 kasan_mark(mem, off1, off1, KASAN_UMA_FREED); 555 556 /* ... then the area between v_vnodelist and v_dbatchcpu ... */ 557 off1 = roundup2(end1, KASAN_SHADOW_SCALE); 558 off2 = rounddown2(off2, KASAN_SHADOW_SCALE); 559 if (off2 > off1) 560 kasan_mark((void *)((char *)mem + off1), off2 - off1, 561 off2 - off1, KASAN_UMA_FREED); 562 563 /* ... and finally the area from v_dbatchcpu to the end. */ 564 off2 = roundup2(end2, KASAN_SHADOW_SCALE); 565 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2, 566 KASAN_UMA_FREED); 567 } 568 #endif /* KASAN */ 569 570 /* 571 * Initialize a vnode as it first enters the zone. 572 */ 573 static int 574 vnode_init(void *mem, int size, int flags) 575 { 576 struct vnode *vp; 577 578 vp = mem; 579 bzero(vp, size); 580 /* 581 * Setup locks. 582 */ 583 vp->v_vnlock = &vp->v_lock; 584 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 585 /* 586 * By default, don't allow shared locks unless filesystems opt-in. 587 */ 588 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 589 LK_NOSHARE | LK_IS_VNODE); 590 /* 591 * Initialize bufobj. 592 */ 593 bufobj_init(&vp->v_bufobj, vp); 594 /* 595 * Initialize namecache. 596 */ 597 cache_vnode_init(vp); 598 /* 599 * Initialize rangelocks. 600 */ 601 rangelock_init(&vp->v_rl); 602 603 vp->v_dbatchcpu = NOCPU; 604 605 /* 606 * Check vhold_recycle_free for an explanation. 607 */ 608 vp->v_holdcnt = VHOLD_NO_SMR; 609 vp->v_type = VNON; 610 mtx_lock(&vnode_list_mtx); 611 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 612 mtx_unlock(&vnode_list_mtx); 613 return (0); 614 } 615 616 /* 617 * Free a vnode when it is cleared from the zone. 618 */ 619 static void 620 vnode_fini(void *mem, int size) 621 { 622 struct vnode *vp; 623 struct bufobj *bo; 624 625 vp = mem; 626 vdbatch_dequeue(vp); 627 mtx_lock(&vnode_list_mtx); 628 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 629 mtx_unlock(&vnode_list_mtx); 630 rangelock_destroy(&vp->v_rl); 631 lockdestroy(vp->v_vnlock); 632 mtx_destroy(&vp->v_interlock); 633 bo = &vp->v_bufobj; 634 rw_destroy(BO_LOCKPTR(bo)); 635 636 kasan_mark(mem, size, size, 0); 637 } 638 639 /* 640 * Provide the size of NFS nclnode and NFS fh for calculation of the 641 * vnode memory consumption. The size is specified directly to 642 * eliminate dependency on NFS-private header. 643 * 644 * Other filesystems may use bigger or smaller (like UFS and ZFS) 645 * private inode data, but the NFS-based estimation is ample enough. 646 * Still, we care about differences in the size between 64- and 32-bit 647 * platforms. 648 * 649 * Namecache structure size is heuristically 650 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 651 */ 652 #ifdef _LP64 653 #define NFS_NCLNODE_SZ (528 + 64) 654 #define NC_SZ 148 655 #else 656 #define NFS_NCLNODE_SZ (360 + 32) 657 #define NC_SZ 92 658 #endif 659 660 static void 661 vntblinit(void *dummy __unused) 662 { 663 struct vdbatch *vd; 664 uma_ctor ctor; 665 uma_dtor dtor; 666 int cpu, physvnodes, virtvnodes; 667 668 /* 669 * Desiredvnodes is a function of the physical memory size and the 670 * kernel's heap size. Generally speaking, it scales with the 671 * physical memory size. The ratio of desiredvnodes to the physical 672 * memory size is 1:16 until desiredvnodes exceeds 98,304. 673 * Thereafter, the 674 * marginal ratio of desiredvnodes to the physical memory size is 675 * 1:64. However, desiredvnodes is limited by the kernel's heap 676 * size. The memory required by desiredvnodes vnodes and vm objects 677 * must not exceed 1/10th of the kernel's heap size. 678 */ 679 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 680 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 681 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 682 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 683 desiredvnodes = min(physvnodes, virtvnodes); 684 if (desiredvnodes > MAXVNODES_MAX) { 685 if (bootverbose) 686 printf("Reducing kern.maxvnodes %lu -> %lu\n", 687 desiredvnodes, MAXVNODES_MAX); 688 desiredvnodes = MAXVNODES_MAX; 689 } 690 wantfreevnodes = desiredvnodes / 4; 691 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 692 TAILQ_INIT(&vnode_list); 693 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 694 /* 695 * The lock is taken to appease WITNESS. 696 */ 697 mtx_lock(&vnode_list_mtx); 698 vnlru_recalc(); 699 mtx_unlock(&vnode_list_mtx); 700 vnode_list_free_marker = vn_alloc_marker(NULL); 701 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 702 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 703 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 704 705 #ifdef KASAN 706 ctor = vnode_ctor; 707 dtor = vnode_dtor; 708 #else 709 ctor = NULL; 710 dtor = NULL; 711 #endif 712 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor, 713 vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_NOKASAN); 714 uma_zone_set_smr(vnode_zone, vfs_smr); 715 716 /* 717 * Preallocate enough nodes to support one-per buf so that 718 * we can not fail an insert. reassignbuf() callers can not 719 * tolerate the insertion failure. 720 */ 721 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 722 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 723 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 724 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 725 uma_prealloc(buf_trie_zone, nbuf); 726 727 vnodes_created = counter_u64_alloc(M_WAITOK); 728 recycles_count = counter_u64_alloc(M_WAITOK); 729 recycles_free_count = counter_u64_alloc(M_WAITOK); 730 deferred_inact = counter_u64_alloc(M_WAITOK); 731 732 /* 733 * Initialize the filesystem syncer. 734 */ 735 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 736 &syncer_mask); 737 syncer_maxdelay = syncer_mask + 1; 738 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 739 cv_init(&sync_wakeup, "syncer"); 740 741 CPU_FOREACH(cpu) { 742 vd = DPCPU_ID_PTR((cpu), vd); 743 bzero(vd, sizeof(*vd)); 744 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 745 } 746 } 747 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 748 749 /* 750 * Mark a mount point as busy. Used to synchronize access and to delay 751 * unmounting. Eventually, mountlist_mtx is not released on failure. 752 * 753 * vfs_busy() is a custom lock, it can block the caller. 754 * vfs_busy() only sleeps if the unmount is active on the mount point. 755 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 756 * vnode belonging to mp. 757 * 758 * Lookup uses vfs_busy() to traverse mount points. 759 * root fs var fs 760 * / vnode lock A / vnode lock (/var) D 761 * /var vnode lock B /log vnode lock(/var/log) E 762 * vfs_busy lock C vfs_busy lock F 763 * 764 * Within each file system, the lock order is C->A->B and F->D->E. 765 * 766 * When traversing across mounts, the system follows that lock order: 767 * 768 * C->A->B 769 * | 770 * +->F->D->E 771 * 772 * The lookup() process for namei("/var") illustrates the process: 773 * VOP_LOOKUP() obtains B while A is held 774 * vfs_busy() obtains a shared lock on F while A and B are held 775 * vput() releases lock on B 776 * vput() releases lock on A 777 * VFS_ROOT() obtains lock on D while shared lock on F is held 778 * vfs_unbusy() releases shared lock on F 779 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 780 * Attempt to lock A (instead of vp_crossmp) while D is held would 781 * violate the global order, causing deadlocks. 782 * 783 * dounmount() locks B while F is drained. 784 */ 785 int 786 vfs_busy(struct mount *mp, int flags) 787 { 788 struct mount_pcpu *mpcpu; 789 790 MPASS((flags & ~MBF_MASK) == 0); 791 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 792 793 if (vfs_op_thread_enter(mp, mpcpu)) { 794 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 795 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 796 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 797 vfs_mp_count_add_pcpu(mpcpu, ref, 1); 798 vfs_mp_count_add_pcpu(mpcpu, lockref, 1); 799 vfs_op_thread_exit(mp, mpcpu); 800 if (flags & MBF_MNTLSTLOCK) 801 mtx_unlock(&mountlist_mtx); 802 return (0); 803 } 804 805 MNT_ILOCK(mp); 806 vfs_assert_mount_counters(mp); 807 MNT_REF(mp); 808 /* 809 * If mount point is currently being unmounted, sleep until the 810 * mount point fate is decided. If thread doing the unmounting fails, 811 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 812 * that this mount point has survived the unmount attempt and vfs_busy 813 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 814 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 815 * about to be really destroyed. vfs_busy needs to release its 816 * reference on the mount point in this case and return with ENOENT, 817 * telling the caller that mount mount it tried to busy is no longer 818 * valid. 819 */ 820 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 821 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), 822 ("%s: non-empty upper mount list with pending unmount", 823 __func__)); 824 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 825 MNT_REL(mp); 826 MNT_IUNLOCK(mp); 827 CTR1(KTR_VFS, "%s: failed busying before sleeping", 828 __func__); 829 return (ENOENT); 830 } 831 if (flags & MBF_MNTLSTLOCK) 832 mtx_unlock(&mountlist_mtx); 833 mp->mnt_kern_flag |= MNTK_MWAIT; 834 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 835 if (flags & MBF_MNTLSTLOCK) 836 mtx_lock(&mountlist_mtx); 837 MNT_ILOCK(mp); 838 } 839 if (flags & MBF_MNTLSTLOCK) 840 mtx_unlock(&mountlist_mtx); 841 mp->mnt_lockref++; 842 MNT_IUNLOCK(mp); 843 return (0); 844 } 845 846 /* 847 * Free a busy filesystem. 848 */ 849 void 850 vfs_unbusy(struct mount *mp) 851 { 852 struct mount_pcpu *mpcpu; 853 int c; 854 855 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 856 857 if (vfs_op_thread_enter(mp, mpcpu)) { 858 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 859 vfs_mp_count_sub_pcpu(mpcpu, lockref, 1); 860 vfs_mp_count_sub_pcpu(mpcpu, ref, 1); 861 vfs_op_thread_exit(mp, mpcpu); 862 return; 863 } 864 865 MNT_ILOCK(mp); 866 vfs_assert_mount_counters(mp); 867 MNT_REL(mp); 868 c = --mp->mnt_lockref; 869 if (mp->mnt_vfs_ops == 0) { 870 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 871 MNT_IUNLOCK(mp); 872 return; 873 } 874 if (c < 0) 875 vfs_dump_mount_counters(mp); 876 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 877 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 878 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 879 mp->mnt_kern_flag &= ~MNTK_DRAINING; 880 wakeup(&mp->mnt_lockref); 881 } 882 MNT_IUNLOCK(mp); 883 } 884 885 /* 886 * Lookup a mount point by filesystem identifier. 887 */ 888 struct mount * 889 vfs_getvfs(fsid_t *fsid) 890 { 891 struct mount *mp; 892 893 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 894 mtx_lock(&mountlist_mtx); 895 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 896 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 897 vfs_ref(mp); 898 mtx_unlock(&mountlist_mtx); 899 return (mp); 900 } 901 } 902 mtx_unlock(&mountlist_mtx); 903 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 904 return ((struct mount *) 0); 905 } 906 907 /* 908 * Lookup a mount point by filesystem identifier, busying it before 909 * returning. 910 * 911 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 912 * cache for popular filesystem identifiers. The cache is lockess, using 913 * the fact that struct mount's are never freed. In worst case we may 914 * get pointer to unmounted or even different filesystem, so we have to 915 * check what we got, and go slow way if so. 916 */ 917 struct mount * 918 vfs_busyfs(fsid_t *fsid) 919 { 920 #define FSID_CACHE_SIZE 256 921 typedef struct mount * volatile vmp_t; 922 static vmp_t cache[FSID_CACHE_SIZE]; 923 struct mount *mp; 924 int error; 925 uint32_t hash; 926 927 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 928 hash = fsid->val[0] ^ fsid->val[1]; 929 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 930 mp = cache[hash]; 931 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 932 goto slow; 933 if (vfs_busy(mp, 0) != 0) { 934 cache[hash] = NULL; 935 goto slow; 936 } 937 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 938 return (mp); 939 else 940 vfs_unbusy(mp); 941 942 slow: 943 mtx_lock(&mountlist_mtx); 944 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 945 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 946 error = vfs_busy(mp, MBF_MNTLSTLOCK); 947 if (error) { 948 cache[hash] = NULL; 949 mtx_unlock(&mountlist_mtx); 950 return (NULL); 951 } 952 cache[hash] = mp; 953 return (mp); 954 } 955 } 956 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 957 mtx_unlock(&mountlist_mtx); 958 return ((struct mount *) 0); 959 } 960 961 /* 962 * Check if a user can access privileged mount options. 963 */ 964 int 965 vfs_suser(struct mount *mp, struct thread *td) 966 { 967 int error; 968 969 if (jailed(td->td_ucred)) { 970 /* 971 * If the jail of the calling thread lacks permission for 972 * this type of file system, deny immediately. 973 */ 974 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 975 return (EPERM); 976 977 /* 978 * If the file system was mounted outside the jail of the 979 * calling thread, deny immediately. 980 */ 981 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 982 return (EPERM); 983 } 984 985 /* 986 * If file system supports delegated administration, we don't check 987 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 988 * by the file system itself. 989 * If this is not the user that did original mount, we check for 990 * the PRIV_VFS_MOUNT_OWNER privilege. 991 */ 992 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 993 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 994 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 995 return (error); 996 } 997 return (0); 998 } 999 1000 /* 1001 * Get a new unique fsid. Try to make its val[0] unique, since this value 1002 * will be used to create fake device numbers for stat(). Also try (but 1003 * not so hard) make its val[0] unique mod 2^16, since some emulators only 1004 * support 16-bit device numbers. We end up with unique val[0]'s for the 1005 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 1006 * 1007 * Keep in mind that several mounts may be running in parallel. Starting 1008 * the search one past where the previous search terminated is both a 1009 * micro-optimization and a defense against returning the same fsid to 1010 * different mounts. 1011 */ 1012 void 1013 vfs_getnewfsid(struct mount *mp) 1014 { 1015 static uint16_t mntid_base; 1016 struct mount *nmp; 1017 fsid_t tfsid; 1018 int mtype; 1019 1020 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 1021 mtx_lock(&mntid_mtx); 1022 mtype = mp->mnt_vfc->vfc_typenum; 1023 tfsid.val[1] = mtype; 1024 mtype = (mtype & 0xFF) << 24; 1025 for (;;) { 1026 tfsid.val[0] = makedev(255, 1027 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 1028 mntid_base++; 1029 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 1030 break; 1031 vfs_rel(nmp); 1032 } 1033 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 1034 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 1035 mtx_unlock(&mntid_mtx); 1036 } 1037 1038 /* 1039 * Knob to control the precision of file timestamps: 1040 * 1041 * 0 = seconds only; nanoseconds zeroed. 1042 * 1 = seconds and nanoseconds, accurate within 1/HZ. 1043 * 2 = seconds and nanoseconds, truncated to microseconds. 1044 * >=3 = seconds and nanoseconds, maximum precision. 1045 */ 1046 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 1047 1048 static int timestamp_precision = TSP_USEC; 1049 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 1050 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 1051 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 1052 "3+: sec + ns (max. precision))"); 1053 1054 /* 1055 * Get a current timestamp. 1056 */ 1057 void 1058 vfs_timestamp(struct timespec *tsp) 1059 { 1060 struct timeval tv; 1061 1062 switch (timestamp_precision) { 1063 case TSP_SEC: 1064 tsp->tv_sec = time_second; 1065 tsp->tv_nsec = 0; 1066 break; 1067 case TSP_HZ: 1068 getnanotime(tsp); 1069 break; 1070 case TSP_USEC: 1071 microtime(&tv); 1072 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1073 break; 1074 case TSP_NSEC: 1075 default: 1076 nanotime(tsp); 1077 break; 1078 } 1079 } 1080 1081 /* 1082 * Set vnode attributes to VNOVAL 1083 */ 1084 void 1085 vattr_null(struct vattr *vap) 1086 { 1087 1088 vap->va_type = VNON; 1089 vap->va_size = VNOVAL; 1090 vap->va_bytes = VNOVAL; 1091 vap->va_mode = VNOVAL; 1092 vap->va_nlink = VNOVAL; 1093 vap->va_uid = VNOVAL; 1094 vap->va_gid = VNOVAL; 1095 vap->va_fsid = VNOVAL; 1096 vap->va_fileid = VNOVAL; 1097 vap->va_blocksize = VNOVAL; 1098 vap->va_rdev = VNOVAL; 1099 vap->va_atime.tv_sec = VNOVAL; 1100 vap->va_atime.tv_nsec = VNOVAL; 1101 vap->va_mtime.tv_sec = VNOVAL; 1102 vap->va_mtime.tv_nsec = VNOVAL; 1103 vap->va_ctime.tv_sec = VNOVAL; 1104 vap->va_ctime.tv_nsec = VNOVAL; 1105 vap->va_birthtime.tv_sec = VNOVAL; 1106 vap->va_birthtime.tv_nsec = VNOVAL; 1107 vap->va_flags = VNOVAL; 1108 vap->va_gen = VNOVAL; 1109 vap->va_vaflags = 0; 1110 } 1111 1112 /* 1113 * Try to reduce the total number of vnodes. 1114 * 1115 * This routine (and its user) are buggy in at least the following ways: 1116 * - all parameters were picked years ago when RAM sizes were significantly 1117 * smaller 1118 * - it can pick vnodes based on pages used by the vm object, but filesystems 1119 * like ZFS don't use it making the pick broken 1120 * - since ZFS has its own aging policy it gets partially combated by this one 1121 * - a dedicated method should be provided for filesystems to let them decide 1122 * whether the vnode should be recycled 1123 * 1124 * This routine is called when we have too many vnodes. It attempts 1125 * to free <count> vnodes and will potentially free vnodes that still 1126 * have VM backing store (VM backing store is typically the cause 1127 * of a vnode blowout so we want to do this). Therefore, this operation 1128 * is not considered cheap. 1129 * 1130 * A number of conditions may prevent a vnode from being reclaimed. 1131 * the buffer cache may have references on the vnode, a directory 1132 * vnode may still have references due to the namei cache representing 1133 * underlying files, or the vnode may be in active use. It is not 1134 * desirable to reuse such vnodes. These conditions may cause the 1135 * number of vnodes to reach some minimum value regardless of what 1136 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1137 * 1138 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1139 * entries if this argument is strue 1140 * @param trigger Only reclaim vnodes with fewer than this many resident 1141 * pages. 1142 * @param target How many vnodes to reclaim. 1143 * @return The number of vnodes that were reclaimed. 1144 */ 1145 static int 1146 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1147 { 1148 struct vnode *vp, *mvp; 1149 struct mount *mp; 1150 struct vm_object *object; 1151 u_long done; 1152 bool retried; 1153 1154 mtx_assert(&vnode_list_mtx, MA_OWNED); 1155 1156 retried = false; 1157 done = 0; 1158 1159 mvp = vnode_list_reclaim_marker; 1160 restart: 1161 vp = mvp; 1162 while (done < target) { 1163 vp = TAILQ_NEXT(vp, v_vnodelist); 1164 if (__predict_false(vp == NULL)) 1165 break; 1166 1167 if (__predict_false(vp->v_type == VMARKER)) 1168 continue; 1169 1170 /* 1171 * If it's been deconstructed already, it's still 1172 * referenced, or it exceeds the trigger, skip it. 1173 * Also skip free vnodes. We are trying to make space 1174 * to expand the free list, not reduce it. 1175 */ 1176 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1177 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1178 goto next_iter; 1179 1180 if (vp->v_type == VBAD || vp->v_type == VNON) 1181 goto next_iter; 1182 1183 object = atomic_load_ptr(&vp->v_object); 1184 if (object == NULL || object->resident_page_count > trigger) { 1185 goto next_iter; 1186 } 1187 1188 /* 1189 * Handle races against vnode allocation. Filesystems lock the 1190 * vnode some time after it gets returned from getnewvnode, 1191 * despite type and hold count being manipulated earlier. 1192 * Resorting to checking v_mount restores guarantees present 1193 * before the global list was reworked to contain all vnodes. 1194 */ 1195 if (!VI_TRYLOCK(vp)) 1196 goto next_iter; 1197 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1198 VI_UNLOCK(vp); 1199 goto next_iter; 1200 } 1201 if (vp->v_mount == NULL) { 1202 VI_UNLOCK(vp); 1203 goto next_iter; 1204 } 1205 vholdl(vp); 1206 VI_UNLOCK(vp); 1207 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1208 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1209 mtx_unlock(&vnode_list_mtx); 1210 1211 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1212 vdrop_recycle(vp); 1213 goto next_iter_unlocked; 1214 } 1215 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1216 vdrop_recycle(vp); 1217 vn_finished_write(mp); 1218 goto next_iter_unlocked; 1219 } 1220 1221 VI_LOCK(vp); 1222 if (vp->v_usecount > 0 || 1223 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1224 (vp->v_object != NULL && vp->v_object->handle == vp && 1225 vp->v_object->resident_page_count > trigger)) { 1226 VOP_UNLOCK(vp); 1227 vdropl_recycle(vp); 1228 vn_finished_write(mp); 1229 goto next_iter_unlocked; 1230 } 1231 counter_u64_add(recycles_count, 1); 1232 vgonel(vp); 1233 VOP_UNLOCK(vp); 1234 vdropl_recycle(vp); 1235 vn_finished_write(mp); 1236 done++; 1237 next_iter_unlocked: 1238 if (should_yield()) 1239 kern_yield(PRI_USER); 1240 mtx_lock(&vnode_list_mtx); 1241 goto restart; 1242 next_iter: 1243 MPASS(vp->v_type != VMARKER); 1244 if (!should_yield()) 1245 continue; 1246 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1247 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1248 mtx_unlock(&vnode_list_mtx); 1249 kern_yield(PRI_USER); 1250 mtx_lock(&vnode_list_mtx); 1251 goto restart; 1252 } 1253 if (done == 0 && !retried) { 1254 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1255 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1256 retried = true; 1257 goto restart; 1258 } 1259 return (done); 1260 } 1261 1262 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1263 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1264 0, 1265 "limit on vnode free requests per call to the vnlru_free routine"); 1266 1267 /* 1268 * Attempt to reduce the free list by the requested amount. 1269 */ 1270 static int 1271 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp) 1272 { 1273 struct vnode *vp; 1274 struct mount *mp; 1275 int ocount; 1276 1277 mtx_assert(&vnode_list_mtx, MA_OWNED); 1278 if (count > max_vnlru_free) 1279 count = max_vnlru_free; 1280 ocount = count; 1281 vp = mvp; 1282 for (;;) { 1283 if (count == 0) { 1284 break; 1285 } 1286 vp = TAILQ_NEXT(vp, v_vnodelist); 1287 if (__predict_false(vp == NULL)) { 1288 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1289 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1290 break; 1291 } 1292 if (__predict_false(vp->v_type == VMARKER)) 1293 continue; 1294 if (vp->v_holdcnt > 0) 1295 continue; 1296 /* 1297 * Don't recycle if our vnode is from different type 1298 * of mount point. Note that mp is type-safe, the 1299 * check does not reach unmapped address even if 1300 * vnode is reclaimed. 1301 */ 1302 if (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1303 mp->mnt_op != mnt_op) { 1304 continue; 1305 } 1306 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1307 continue; 1308 } 1309 if (!vhold_recycle_free(vp)) 1310 continue; 1311 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1312 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1313 mtx_unlock(&vnode_list_mtx); 1314 /* 1315 * FIXME: ignores the return value, meaning it may be nothing 1316 * got recycled but it claims otherwise to the caller. 1317 * 1318 * Originally the value started being ignored in 2005 with 1319 * 114a1006a8204aa156e1f9ad6476cdff89cada7f . 1320 * 1321 * Respecting the value can run into significant stalls if most 1322 * vnodes belong to one file system and it has writes 1323 * suspended. In presence of many threads and millions of 1324 * vnodes they keep contending on the vnode_list_mtx lock only 1325 * to find vnodes they can't recycle. 1326 * 1327 * The solution would be to pre-check if the vnode is likely to 1328 * be recycle-able, but it needs to happen with the 1329 * vnode_list_mtx lock held. This runs into a problem where 1330 * VOP_GETWRITEMOUNT (currently needed to find out about if 1331 * writes are frozen) can take locks which LOR against it. 1332 * 1333 * Check nullfs for one example (null_getwritemount). 1334 */ 1335 vtryrecycle(vp); 1336 count--; 1337 mtx_lock(&vnode_list_mtx); 1338 vp = mvp; 1339 } 1340 return (ocount - count); 1341 } 1342 1343 static int 1344 vnlru_free_locked(int count) 1345 { 1346 1347 mtx_assert(&vnode_list_mtx, MA_OWNED); 1348 return (vnlru_free_impl(count, NULL, vnode_list_free_marker)); 1349 } 1350 1351 void 1352 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp) 1353 { 1354 1355 MPASS(mnt_op != NULL); 1356 MPASS(mvp != NULL); 1357 VNPASS(mvp->v_type == VMARKER, mvp); 1358 mtx_lock(&vnode_list_mtx); 1359 vnlru_free_impl(count, mnt_op, mvp); 1360 mtx_unlock(&vnode_list_mtx); 1361 } 1362 1363 struct vnode * 1364 vnlru_alloc_marker(void) 1365 { 1366 struct vnode *mvp; 1367 1368 mvp = vn_alloc_marker(NULL); 1369 mtx_lock(&vnode_list_mtx); 1370 TAILQ_INSERT_BEFORE(vnode_list_free_marker, mvp, v_vnodelist); 1371 mtx_unlock(&vnode_list_mtx); 1372 return (mvp); 1373 } 1374 1375 void 1376 vnlru_free_marker(struct vnode *mvp) 1377 { 1378 mtx_lock(&vnode_list_mtx); 1379 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1380 mtx_unlock(&vnode_list_mtx); 1381 vn_free_marker(mvp); 1382 } 1383 1384 static void 1385 vnlru_recalc(void) 1386 { 1387 1388 mtx_assert(&vnode_list_mtx, MA_OWNED); 1389 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1390 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1391 vlowat = vhiwat / 2; 1392 } 1393 1394 /* 1395 * Attempt to recycle vnodes in a context that is always safe to block. 1396 * Calling vlrurecycle() from the bowels of filesystem code has some 1397 * interesting deadlock problems. 1398 */ 1399 static struct proc *vnlruproc; 1400 static int vnlruproc_sig; 1401 1402 /* 1403 * The main freevnodes counter is only updated when threads requeue their vnode 1404 * batches. CPUs are conditionally walked to compute a more accurate total. 1405 * 1406 * Limit how much of a slop are we willing to tolerate. Note: the actual value 1407 * at any given moment can still exceed slop, but it should not be by significant 1408 * margin in practice. 1409 */ 1410 #define VNLRU_FREEVNODES_SLOP 128 1411 1412 static __inline void 1413 vfs_freevnodes_inc(void) 1414 { 1415 struct vdbatch *vd; 1416 1417 critical_enter(); 1418 vd = DPCPU_PTR(vd); 1419 vd->freevnodes++; 1420 critical_exit(); 1421 } 1422 1423 static __inline void 1424 vfs_freevnodes_dec(void) 1425 { 1426 struct vdbatch *vd; 1427 1428 critical_enter(); 1429 vd = DPCPU_PTR(vd); 1430 vd->freevnodes--; 1431 critical_exit(); 1432 } 1433 1434 static u_long 1435 vnlru_read_freevnodes(void) 1436 { 1437 struct vdbatch *vd; 1438 long slop; 1439 int cpu; 1440 1441 mtx_assert(&vnode_list_mtx, MA_OWNED); 1442 if (freevnodes > freevnodes_old) 1443 slop = freevnodes - freevnodes_old; 1444 else 1445 slop = freevnodes_old - freevnodes; 1446 if (slop < VNLRU_FREEVNODES_SLOP) 1447 return (freevnodes >= 0 ? freevnodes : 0); 1448 freevnodes_old = freevnodes; 1449 CPU_FOREACH(cpu) { 1450 vd = DPCPU_ID_PTR((cpu), vd); 1451 freevnodes_old += vd->freevnodes; 1452 } 1453 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1454 } 1455 1456 static bool 1457 vnlru_under(u_long rnumvnodes, u_long limit) 1458 { 1459 u_long rfreevnodes, space; 1460 1461 if (__predict_false(rnumvnodes > desiredvnodes)) 1462 return (true); 1463 1464 space = desiredvnodes - rnumvnodes; 1465 if (space < limit) { 1466 rfreevnodes = vnlru_read_freevnodes(); 1467 if (rfreevnodes > wantfreevnodes) 1468 space += rfreevnodes - wantfreevnodes; 1469 } 1470 return (space < limit); 1471 } 1472 1473 static bool 1474 vnlru_under_unlocked(u_long rnumvnodes, u_long limit) 1475 { 1476 long rfreevnodes, space; 1477 1478 if (__predict_false(rnumvnodes > desiredvnodes)) 1479 return (true); 1480 1481 space = desiredvnodes - rnumvnodes; 1482 if (space < limit) { 1483 rfreevnodes = atomic_load_long(&freevnodes); 1484 if (rfreevnodes > wantfreevnodes) 1485 space += rfreevnodes - wantfreevnodes; 1486 } 1487 return (space < limit); 1488 } 1489 1490 static void 1491 vnlru_kick(void) 1492 { 1493 1494 mtx_assert(&vnode_list_mtx, MA_OWNED); 1495 if (vnlruproc_sig == 0) { 1496 vnlruproc_sig = 1; 1497 wakeup(vnlruproc); 1498 } 1499 } 1500 1501 static void 1502 vnlru_proc(void) 1503 { 1504 u_long rnumvnodes, rfreevnodes, target; 1505 unsigned long onumvnodes; 1506 int done, force, trigger, usevnodes; 1507 bool reclaim_nc_src, want_reread; 1508 1509 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1510 SHUTDOWN_PRI_FIRST); 1511 1512 force = 0; 1513 want_reread = false; 1514 for (;;) { 1515 kproc_suspend_check(vnlruproc); 1516 mtx_lock(&vnode_list_mtx); 1517 rnumvnodes = atomic_load_long(&numvnodes); 1518 1519 if (want_reread) { 1520 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1521 want_reread = false; 1522 } 1523 1524 /* 1525 * If numvnodes is too large (due to desiredvnodes being 1526 * adjusted using its sysctl, or emergency growth), first 1527 * try to reduce it by discarding from the free list. 1528 */ 1529 if (rnumvnodes > desiredvnodes) { 1530 vnlru_free_locked(rnumvnodes - desiredvnodes); 1531 rnumvnodes = atomic_load_long(&numvnodes); 1532 } 1533 /* 1534 * Sleep if the vnode cache is in a good state. This is 1535 * when it is not over-full and has space for about a 4% 1536 * or 9% expansion (by growing its size or inexcessively 1537 * reducing its free list). Otherwise, try to reclaim 1538 * space for a 10% expansion. 1539 */ 1540 if (vstir && force == 0) { 1541 force = 1; 1542 vstir = 0; 1543 } 1544 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1545 vnlruproc_sig = 0; 1546 wakeup(&vnlruproc_sig); 1547 msleep(vnlruproc, &vnode_list_mtx, 1548 PVFS|PDROP, "vlruwt", hz); 1549 continue; 1550 } 1551 rfreevnodes = vnlru_read_freevnodes(); 1552 1553 onumvnodes = rnumvnodes; 1554 /* 1555 * Calculate parameters for recycling. These are the same 1556 * throughout the loop to give some semblance of fairness. 1557 * The trigger point is to avoid recycling vnodes with lots 1558 * of resident pages. We aren't trying to free memory; we 1559 * are trying to recycle or at least free vnodes. 1560 */ 1561 if (rnumvnodes <= desiredvnodes) 1562 usevnodes = rnumvnodes - rfreevnodes; 1563 else 1564 usevnodes = rnumvnodes; 1565 if (usevnodes <= 0) 1566 usevnodes = 1; 1567 /* 1568 * The trigger value is chosen to give a conservatively 1569 * large value to ensure that it alone doesn't prevent 1570 * making progress. The value can easily be so large that 1571 * it is effectively infinite in some congested and 1572 * misconfigured cases, and this is necessary. Normally 1573 * it is about 8 to 100 (pages), which is quite large. 1574 */ 1575 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1576 if (force < 2) 1577 trigger = vsmalltrigger; 1578 reclaim_nc_src = force >= 3; 1579 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1580 target = target / 10 + 1; 1581 done = vlrureclaim(reclaim_nc_src, trigger, target); 1582 mtx_unlock(&vnode_list_mtx); 1583 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1584 uma_reclaim(UMA_RECLAIM_DRAIN); 1585 if (done == 0) { 1586 if (force == 0 || force == 1) { 1587 force = 2; 1588 continue; 1589 } 1590 if (force == 2) { 1591 force = 3; 1592 continue; 1593 } 1594 want_reread = true; 1595 force = 0; 1596 vnlru_nowhere++; 1597 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1598 } else { 1599 want_reread = true; 1600 kern_yield(PRI_USER); 1601 } 1602 } 1603 } 1604 1605 static struct kproc_desc vnlru_kp = { 1606 "vnlru", 1607 vnlru_proc, 1608 &vnlruproc 1609 }; 1610 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1611 &vnlru_kp); 1612 1613 /* 1614 * Routines having to do with the management of the vnode table. 1615 */ 1616 1617 /* 1618 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1619 * before we actually vgone(). This function must be called with the vnode 1620 * held to prevent the vnode from being returned to the free list midway 1621 * through vgone(). 1622 */ 1623 static int 1624 vtryrecycle(struct vnode *vp) 1625 { 1626 struct mount *vnmp; 1627 1628 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1629 VNASSERT(vp->v_holdcnt, vp, 1630 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1631 /* 1632 * This vnode may found and locked via some other list, if so we 1633 * can't recycle it yet. 1634 */ 1635 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1636 CTR2(KTR_VFS, 1637 "%s: impossible to recycle, vp %p lock is already held", 1638 __func__, vp); 1639 vdrop_recycle(vp); 1640 return (EWOULDBLOCK); 1641 } 1642 /* 1643 * Don't recycle if its filesystem is being suspended. 1644 */ 1645 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1646 VOP_UNLOCK(vp); 1647 CTR2(KTR_VFS, 1648 "%s: impossible to recycle, cannot start the write for %p", 1649 __func__, vp); 1650 vdrop_recycle(vp); 1651 return (EBUSY); 1652 } 1653 /* 1654 * If we got this far, we need to acquire the interlock and see if 1655 * anyone picked up this vnode from another list. If not, we will 1656 * mark it with DOOMED via vgonel() so that anyone who does find it 1657 * will skip over it. 1658 */ 1659 VI_LOCK(vp); 1660 if (vp->v_usecount) { 1661 VOP_UNLOCK(vp); 1662 vdropl_recycle(vp); 1663 vn_finished_write(vnmp); 1664 CTR2(KTR_VFS, 1665 "%s: impossible to recycle, %p is already referenced", 1666 __func__, vp); 1667 return (EBUSY); 1668 } 1669 if (!VN_IS_DOOMED(vp)) { 1670 counter_u64_add(recycles_free_count, 1); 1671 vgonel(vp); 1672 } 1673 VOP_UNLOCK(vp); 1674 vdropl_recycle(vp); 1675 vn_finished_write(vnmp); 1676 return (0); 1677 } 1678 1679 /* 1680 * Allocate a new vnode. 1681 * 1682 * The operation never returns an error. Returning an error was disabled 1683 * in r145385 (dated 2005) with the following comment: 1684 * 1685 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1686 * 1687 * Given the age of this commit (almost 15 years at the time of writing this 1688 * comment) restoring the ability to fail requires a significant audit of 1689 * all codepaths. 1690 * 1691 * The routine can try to free a vnode or stall for up to 1 second waiting for 1692 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1693 */ 1694 static u_long vn_alloc_cyclecount; 1695 1696 static struct vnode * __noinline 1697 vn_alloc_hard(struct mount *mp) 1698 { 1699 u_long rnumvnodes, rfreevnodes; 1700 1701 mtx_lock(&vnode_list_mtx); 1702 rnumvnodes = atomic_load_long(&numvnodes); 1703 if (rnumvnodes + 1 < desiredvnodes) { 1704 vn_alloc_cyclecount = 0; 1705 goto alloc; 1706 } 1707 rfreevnodes = vnlru_read_freevnodes(); 1708 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1709 vn_alloc_cyclecount = 0; 1710 vstir = 1; 1711 } 1712 /* 1713 * Grow the vnode cache if it will not be above its target max 1714 * after growing. Otherwise, if the free list is nonempty, try 1715 * to reclaim 1 item from it before growing the cache (possibly 1716 * above its target max if the reclamation failed or is delayed). 1717 * Otherwise, wait for some space. In all cases, schedule 1718 * vnlru_proc() if we are getting short of space. The watermarks 1719 * should be chosen so that we never wait or even reclaim from 1720 * the free list to below its target minimum. 1721 */ 1722 if (vnlru_free_locked(1) > 0) 1723 goto alloc; 1724 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1725 /* 1726 * Wait for space for a new vnode. 1727 */ 1728 vnlru_kick(); 1729 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 1730 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 1731 vnlru_read_freevnodes() > 1) 1732 vnlru_free_locked(1); 1733 } 1734 alloc: 1735 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1736 if (vnlru_under(rnumvnodes, vlowat)) 1737 vnlru_kick(); 1738 mtx_unlock(&vnode_list_mtx); 1739 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1740 } 1741 1742 static struct vnode * 1743 vn_alloc(struct mount *mp) 1744 { 1745 u_long rnumvnodes; 1746 1747 if (__predict_false(vn_alloc_cyclecount != 0)) 1748 return (vn_alloc_hard(mp)); 1749 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1750 if (__predict_false(vnlru_under_unlocked(rnumvnodes, vlowat))) { 1751 atomic_subtract_long(&numvnodes, 1); 1752 return (vn_alloc_hard(mp)); 1753 } 1754 1755 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1756 } 1757 1758 static void 1759 vn_free(struct vnode *vp) 1760 { 1761 1762 atomic_subtract_long(&numvnodes, 1); 1763 uma_zfree_smr(vnode_zone, vp); 1764 } 1765 1766 /* 1767 * Return the next vnode from the free list. 1768 */ 1769 int 1770 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1771 struct vnode **vpp) 1772 { 1773 struct vnode *vp; 1774 struct thread *td; 1775 struct lock_object *lo; 1776 1777 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1778 1779 KASSERT(vops->registered, 1780 ("%s: not registered vector op %p\n", __func__, vops)); 1781 1782 td = curthread; 1783 if (td->td_vp_reserved != NULL) { 1784 vp = td->td_vp_reserved; 1785 td->td_vp_reserved = NULL; 1786 } else { 1787 vp = vn_alloc(mp); 1788 } 1789 counter_u64_add(vnodes_created, 1); 1790 /* 1791 * Locks are given the generic name "vnode" when created. 1792 * Follow the historic practice of using the filesystem 1793 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1794 * 1795 * Locks live in a witness group keyed on their name. Thus, 1796 * when a lock is renamed, it must also move from the witness 1797 * group of its old name to the witness group of its new name. 1798 * 1799 * The change only needs to be made when the vnode moves 1800 * from one filesystem type to another. We ensure that each 1801 * filesystem use a single static name pointer for its tag so 1802 * that we can compare pointers rather than doing a strcmp(). 1803 */ 1804 lo = &vp->v_vnlock->lock_object; 1805 #ifdef WITNESS 1806 if (lo->lo_name != tag) { 1807 #endif 1808 lo->lo_name = tag; 1809 #ifdef WITNESS 1810 WITNESS_DESTROY(lo); 1811 WITNESS_INIT(lo, tag); 1812 } 1813 #endif 1814 /* 1815 * By default, don't allow shared locks unless filesystems opt-in. 1816 */ 1817 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1818 /* 1819 * Finalize various vnode identity bits. 1820 */ 1821 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1822 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1823 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1824 vp->v_type = VNON; 1825 vp->v_op = vops; 1826 vp->v_irflag = 0; 1827 v_init_counters(vp); 1828 vn_seqc_init(vp); 1829 vp->v_bufobj.bo_ops = &buf_ops_bio; 1830 #ifdef DIAGNOSTIC 1831 if (mp == NULL && vops != &dead_vnodeops) 1832 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1833 #endif 1834 #ifdef MAC 1835 mac_vnode_init(vp); 1836 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1837 mac_vnode_associate_singlelabel(mp, vp); 1838 #endif 1839 if (mp != NULL) { 1840 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1841 } 1842 1843 /* 1844 * For the filesystems which do not use vfs_hash_insert(), 1845 * still initialize v_hash to have vfs_hash_index() useful. 1846 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1847 * its own hashing. 1848 */ 1849 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1850 1851 *vpp = vp; 1852 return (0); 1853 } 1854 1855 void 1856 getnewvnode_reserve(void) 1857 { 1858 struct thread *td; 1859 1860 td = curthread; 1861 MPASS(td->td_vp_reserved == NULL); 1862 td->td_vp_reserved = vn_alloc(NULL); 1863 } 1864 1865 void 1866 getnewvnode_drop_reserve(void) 1867 { 1868 struct thread *td; 1869 1870 td = curthread; 1871 if (td->td_vp_reserved != NULL) { 1872 vn_free(td->td_vp_reserved); 1873 td->td_vp_reserved = NULL; 1874 } 1875 } 1876 1877 static void __noinline 1878 freevnode(struct vnode *vp) 1879 { 1880 struct bufobj *bo; 1881 1882 /* 1883 * The vnode has been marked for destruction, so free it. 1884 * 1885 * The vnode will be returned to the zone where it will 1886 * normally remain until it is needed for another vnode. We 1887 * need to cleanup (or verify that the cleanup has already 1888 * been done) any residual data left from its current use 1889 * so as not to contaminate the freshly allocated vnode. 1890 */ 1891 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 1892 /* 1893 * Paired with vgone. 1894 */ 1895 vn_seqc_write_end_free(vp); 1896 1897 bo = &vp->v_bufobj; 1898 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 1899 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 1900 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 1901 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 1902 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 1903 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 1904 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 1905 ("clean blk trie not empty")); 1906 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 1907 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 1908 ("dirty blk trie not empty")); 1909 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 1910 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 1911 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 1912 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 1913 ("Dangling rangelock waiters")); 1914 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, 1915 ("Leaked inactivation")); 1916 VI_UNLOCK(vp); 1917 #ifdef MAC 1918 mac_vnode_destroy(vp); 1919 #endif 1920 if (vp->v_pollinfo != NULL) { 1921 /* 1922 * Use LK_NOWAIT to shut up witness about the lock. We may get 1923 * here while having another vnode locked when trying to 1924 * satisfy a lookup and needing to recycle. 1925 */ 1926 VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT); 1927 destroy_vpollinfo(vp->v_pollinfo); 1928 VOP_UNLOCK(vp); 1929 vp->v_pollinfo = NULL; 1930 } 1931 vp->v_mountedhere = NULL; 1932 vp->v_unpcb = NULL; 1933 vp->v_rdev = NULL; 1934 vp->v_fifoinfo = NULL; 1935 vp->v_iflag = 0; 1936 vp->v_vflag = 0; 1937 bo->bo_flag = 0; 1938 vn_free(vp); 1939 } 1940 1941 /* 1942 * Delete from old mount point vnode list, if on one. 1943 */ 1944 static void 1945 delmntque(struct vnode *vp) 1946 { 1947 struct mount *mp; 1948 1949 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 1950 1951 mp = vp->v_mount; 1952 MNT_ILOCK(mp); 1953 VI_LOCK(vp); 1954 vp->v_mount = NULL; 1955 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1956 ("bad mount point vnode list size")); 1957 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1958 mp->mnt_nvnodelistsize--; 1959 MNT_REL(mp); 1960 MNT_IUNLOCK(mp); 1961 /* 1962 * The caller expects the interlock to be still held. 1963 */ 1964 ASSERT_VI_LOCKED(vp, __func__); 1965 } 1966 1967 static int 1968 insmntque1_int(struct vnode *vp, struct mount *mp, bool dtr) 1969 { 1970 1971 KASSERT(vp->v_mount == NULL, 1972 ("insmntque: vnode already on per mount vnode list")); 1973 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1974 if ((mp->mnt_kern_flag & MNTK_UNLOCKED_INSMNTQUE) == 0) { 1975 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1976 } else { 1977 KASSERT(!dtr, 1978 ("%s: can't have MNTK_UNLOCKED_INSMNTQUE and cleanup", 1979 __func__)); 1980 } 1981 1982 /* 1983 * We acquire the vnode interlock early to ensure that the 1984 * vnode cannot be recycled by another process releasing a 1985 * holdcnt on it before we get it on both the vnode list 1986 * and the active vnode list. The mount mutex protects only 1987 * manipulation of the vnode list and the vnode freelist 1988 * mutex protects only manipulation of the active vnode list. 1989 * Hence the need to hold the vnode interlock throughout. 1990 */ 1991 MNT_ILOCK(mp); 1992 VI_LOCK(vp); 1993 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 1994 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1995 mp->mnt_nvnodelistsize == 0)) && 1996 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1997 VI_UNLOCK(vp); 1998 MNT_IUNLOCK(mp); 1999 if (dtr) { 2000 vp->v_data = NULL; 2001 vp->v_op = &dead_vnodeops; 2002 vgone(vp); 2003 vput(vp); 2004 } 2005 return (EBUSY); 2006 } 2007 vp->v_mount = mp; 2008 MNT_REF(mp); 2009 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2010 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 2011 ("neg mount point vnode list size")); 2012 mp->mnt_nvnodelistsize++; 2013 VI_UNLOCK(vp); 2014 MNT_IUNLOCK(mp); 2015 return (0); 2016 } 2017 2018 /* 2019 * Insert into list of vnodes for the new mount point, if available. 2020 * insmntque() reclaims the vnode on insertion failure, insmntque1() 2021 * leaves handling of the vnode to the caller. 2022 */ 2023 int 2024 insmntque(struct vnode *vp, struct mount *mp) 2025 { 2026 return (insmntque1_int(vp, mp, true)); 2027 } 2028 2029 int 2030 insmntque1(struct vnode *vp, struct mount *mp) 2031 { 2032 return (insmntque1_int(vp, mp, false)); 2033 } 2034 2035 /* 2036 * Flush out and invalidate all buffers associated with a bufobj 2037 * Called with the underlying object locked. 2038 */ 2039 int 2040 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 2041 { 2042 int error; 2043 2044 BO_LOCK(bo); 2045 if (flags & V_SAVE) { 2046 error = bufobj_wwait(bo, slpflag, slptimeo); 2047 if (error) { 2048 BO_UNLOCK(bo); 2049 return (error); 2050 } 2051 if (bo->bo_dirty.bv_cnt > 0) { 2052 BO_UNLOCK(bo); 2053 do { 2054 error = BO_SYNC(bo, MNT_WAIT); 2055 } while (error == ERELOOKUP); 2056 if (error != 0) 2057 return (error); 2058 BO_LOCK(bo); 2059 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 2060 BO_UNLOCK(bo); 2061 return (EBUSY); 2062 } 2063 } 2064 } 2065 /* 2066 * If you alter this loop please notice that interlock is dropped and 2067 * reacquired in flushbuflist. Special care is needed to ensure that 2068 * no race conditions occur from this. 2069 */ 2070 do { 2071 error = flushbuflist(&bo->bo_clean, 2072 flags, bo, slpflag, slptimeo); 2073 if (error == 0 && !(flags & V_CLEANONLY)) 2074 error = flushbuflist(&bo->bo_dirty, 2075 flags, bo, slpflag, slptimeo); 2076 if (error != 0 && error != EAGAIN) { 2077 BO_UNLOCK(bo); 2078 return (error); 2079 } 2080 } while (error != 0); 2081 2082 /* 2083 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 2084 * have write I/O in-progress but if there is a VM object then the 2085 * VM object can also have read-I/O in-progress. 2086 */ 2087 do { 2088 bufobj_wwait(bo, 0, 0); 2089 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 2090 BO_UNLOCK(bo); 2091 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 2092 BO_LOCK(bo); 2093 } 2094 } while (bo->bo_numoutput > 0); 2095 BO_UNLOCK(bo); 2096 2097 /* 2098 * Destroy the copy in the VM cache, too. 2099 */ 2100 if (bo->bo_object != NULL && 2101 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 2102 VM_OBJECT_WLOCK(bo->bo_object); 2103 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 2104 OBJPR_CLEANONLY : 0); 2105 VM_OBJECT_WUNLOCK(bo->bo_object); 2106 } 2107 2108 #ifdef INVARIANTS 2109 BO_LOCK(bo); 2110 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 2111 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 2112 bo->bo_clean.bv_cnt > 0)) 2113 panic("vinvalbuf: flush failed"); 2114 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 2115 bo->bo_dirty.bv_cnt > 0) 2116 panic("vinvalbuf: flush dirty failed"); 2117 BO_UNLOCK(bo); 2118 #endif 2119 return (0); 2120 } 2121 2122 /* 2123 * Flush out and invalidate all buffers associated with a vnode. 2124 * Called with the underlying object locked. 2125 */ 2126 int 2127 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 2128 { 2129 2130 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2131 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 2132 if (vp->v_object != NULL && vp->v_object->handle != vp) 2133 return (0); 2134 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 2135 } 2136 2137 /* 2138 * Flush out buffers on the specified list. 2139 * 2140 */ 2141 static int 2142 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2143 int slptimeo) 2144 { 2145 struct buf *bp, *nbp; 2146 int retval, error; 2147 daddr_t lblkno; 2148 b_xflags_t xflags; 2149 2150 ASSERT_BO_WLOCKED(bo); 2151 2152 retval = 0; 2153 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2154 /* 2155 * If we are flushing both V_NORMAL and V_ALT buffers then 2156 * do not skip any buffers. If we are flushing only V_NORMAL 2157 * buffers then skip buffers marked as BX_ALTDATA. If we are 2158 * flushing only V_ALT buffers then skip buffers not marked 2159 * as BX_ALTDATA. 2160 */ 2161 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2162 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2163 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2164 continue; 2165 } 2166 if (nbp != NULL) { 2167 lblkno = nbp->b_lblkno; 2168 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2169 } 2170 retval = EAGAIN; 2171 error = BUF_TIMELOCK(bp, 2172 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2173 "flushbuf", slpflag, slptimeo); 2174 if (error) { 2175 BO_LOCK(bo); 2176 return (error != ENOLCK ? error : EAGAIN); 2177 } 2178 KASSERT(bp->b_bufobj == bo, 2179 ("bp %p wrong b_bufobj %p should be %p", 2180 bp, bp->b_bufobj, bo)); 2181 /* 2182 * XXX Since there are no node locks for NFS, I 2183 * believe there is a slight chance that a delayed 2184 * write will occur while sleeping just above, so 2185 * check for it. 2186 */ 2187 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2188 (flags & V_SAVE)) { 2189 bremfree(bp); 2190 bp->b_flags |= B_ASYNC; 2191 bwrite(bp); 2192 BO_LOCK(bo); 2193 return (EAGAIN); /* XXX: why not loop ? */ 2194 } 2195 bremfree(bp); 2196 bp->b_flags |= (B_INVAL | B_RELBUF); 2197 bp->b_flags &= ~B_ASYNC; 2198 brelse(bp); 2199 BO_LOCK(bo); 2200 if (nbp == NULL) 2201 break; 2202 nbp = gbincore(bo, lblkno); 2203 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2204 != xflags) 2205 break; /* nbp invalid */ 2206 } 2207 return (retval); 2208 } 2209 2210 int 2211 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2212 { 2213 struct buf *bp; 2214 int error; 2215 daddr_t lblkno; 2216 2217 ASSERT_BO_LOCKED(bo); 2218 2219 for (lblkno = startn;;) { 2220 again: 2221 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2222 if (bp == NULL || bp->b_lblkno >= endn || 2223 bp->b_lblkno < startn) 2224 break; 2225 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2226 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2227 if (error != 0) { 2228 BO_RLOCK(bo); 2229 if (error == ENOLCK) 2230 goto again; 2231 return (error); 2232 } 2233 KASSERT(bp->b_bufobj == bo, 2234 ("bp %p wrong b_bufobj %p should be %p", 2235 bp, bp->b_bufobj, bo)); 2236 lblkno = bp->b_lblkno + 1; 2237 if ((bp->b_flags & B_MANAGED) == 0) 2238 bremfree(bp); 2239 bp->b_flags |= B_RELBUF; 2240 /* 2241 * In the VMIO case, use the B_NOREUSE flag to hint that the 2242 * pages backing each buffer in the range are unlikely to be 2243 * reused. Dirty buffers will have the hint applied once 2244 * they've been written. 2245 */ 2246 if ((bp->b_flags & B_VMIO) != 0) 2247 bp->b_flags |= B_NOREUSE; 2248 brelse(bp); 2249 BO_RLOCK(bo); 2250 } 2251 return (0); 2252 } 2253 2254 /* 2255 * Truncate a file's buffer and pages to a specified length. This 2256 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2257 * sync activity. 2258 */ 2259 int 2260 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2261 { 2262 struct buf *bp, *nbp; 2263 struct bufobj *bo; 2264 daddr_t startlbn; 2265 2266 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2267 vp, blksize, (uintmax_t)length); 2268 2269 /* 2270 * Round up to the *next* lbn. 2271 */ 2272 startlbn = howmany(length, blksize); 2273 2274 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2275 2276 bo = &vp->v_bufobj; 2277 restart_unlocked: 2278 BO_LOCK(bo); 2279 2280 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2281 ; 2282 2283 if (length > 0) { 2284 restartsync: 2285 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2286 if (bp->b_lblkno > 0) 2287 continue; 2288 /* 2289 * Since we hold the vnode lock this should only 2290 * fail if we're racing with the buf daemon. 2291 */ 2292 if (BUF_LOCK(bp, 2293 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2294 BO_LOCKPTR(bo)) == ENOLCK) 2295 goto restart_unlocked; 2296 2297 VNASSERT((bp->b_flags & B_DELWRI), vp, 2298 ("buf(%p) on dirty queue without DELWRI", bp)); 2299 2300 bremfree(bp); 2301 bawrite(bp); 2302 BO_LOCK(bo); 2303 goto restartsync; 2304 } 2305 } 2306 2307 bufobj_wwait(bo, 0, 0); 2308 BO_UNLOCK(bo); 2309 vnode_pager_setsize(vp, length); 2310 2311 return (0); 2312 } 2313 2314 /* 2315 * Invalidate the cached pages of a file's buffer within the range of block 2316 * numbers [startlbn, endlbn). 2317 */ 2318 void 2319 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2320 int blksize) 2321 { 2322 struct bufobj *bo; 2323 off_t start, end; 2324 2325 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2326 2327 start = blksize * startlbn; 2328 end = blksize * endlbn; 2329 2330 bo = &vp->v_bufobj; 2331 BO_LOCK(bo); 2332 MPASS(blksize == bo->bo_bsize); 2333 2334 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2335 ; 2336 2337 BO_UNLOCK(bo); 2338 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2339 } 2340 2341 static int 2342 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2343 daddr_t startlbn, daddr_t endlbn) 2344 { 2345 struct buf *bp, *nbp; 2346 bool anyfreed; 2347 2348 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2349 ASSERT_BO_LOCKED(bo); 2350 2351 do { 2352 anyfreed = false; 2353 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2354 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2355 continue; 2356 if (BUF_LOCK(bp, 2357 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2358 BO_LOCKPTR(bo)) == ENOLCK) { 2359 BO_LOCK(bo); 2360 return (EAGAIN); 2361 } 2362 2363 bremfree(bp); 2364 bp->b_flags |= B_INVAL | B_RELBUF; 2365 bp->b_flags &= ~B_ASYNC; 2366 brelse(bp); 2367 anyfreed = true; 2368 2369 BO_LOCK(bo); 2370 if (nbp != NULL && 2371 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2372 nbp->b_vp != vp || 2373 (nbp->b_flags & B_DELWRI) != 0)) 2374 return (EAGAIN); 2375 } 2376 2377 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2378 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2379 continue; 2380 if (BUF_LOCK(bp, 2381 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2382 BO_LOCKPTR(bo)) == ENOLCK) { 2383 BO_LOCK(bo); 2384 return (EAGAIN); 2385 } 2386 bremfree(bp); 2387 bp->b_flags |= B_INVAL | B_RELBUF; 2388 bp->b_flags &= ~B_ASYNC; 2389 brelse(bp); 2390 anyfreed = true; 2391 2392 BO_LOCK(bo); 2393 if (nbp != NULL && 2394 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2395 (nbp->b_vp != vp) || 2396 (nbp->b_flags & B_DELWRI) == 0)) 2397 return (EAGAIN); 2398 } 2399 } while (anyfreed); 2400 return (0); 2401 } 2402 2403 static void 2404 buf_vlist_remove(struct buf *bp) 2405 { 2406 struct bufv *bv; 2407 b_xflags_t flags; 2408 2409 flags = bp->b_xflags; 2410 2411 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2412 ASSERT_BO_WLOCKED(bp->b_bufobj); 2413 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 && 2414 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN), 2415 ("%s: buffer %p has invalid queue state", __func__, bp)); 2416 2417 if ((flags & BX_VNDIRTY) != 0) 2418 bv = &bp->b_bufobj->bo_dirty; 2419 else 2420 bv = &bp->b_bufobj->bo_clean; 2421 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2422 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2423 bv->bv_cnt--; 2424 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2425 } 2426 2427 /* 2428 * Add the buffer to the sorted clean or dirty block list. 2429 * 2430 * NOTE: xflags is passed as a constant, optimizing this inline function! 2431 */ 2432 static void 2433 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2434 { 2435 struct bufv *bv; 2436 struct buf *n; 2437 int error; 2438 2439 ASSERT_BO_WLOCKED(bo); 2440 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2441 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2442 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2443 ("dead bo %p", bo)); 2444 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2445 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2446 bp->b_xflags |= xflags; 2447 if (xflags & BX_VNDIRTY) 2448 bv = &bo->bo_dirty; 2449 else 2450 bv = &bo->bo_clean; 2451 2452 /* 2453 * Keep the list ordered. Optimize empty list insertion. Assume 2454 * we tend to grow at the tail so lookup_le should usually be cheaper 2455 * than _ge. 2456 */ 2457 if (bv->bv_cnt == 0 || 2458 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2459 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2460 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2461 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2462 else 2463 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2464 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2465 if (error) 2466 panic("buf_vlist_add: Preallocated nodes insufficient."); 2467 bv->bv_cnt++; 2468 } 2469 2470 /* 2471 * Look up a buffer using the buffer tries. 2472 */ 2473 struct buf * 2474 gbincore(struct bufobj *bo, daddr_t lblkno) 2475 { 2476 struct buf *bp; 2477 2478 ASSERT_BO_LOCKED(bo); 2479 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2480 if (bp != NULL) 2481 return (bp); 2482 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2483 } 2484 2485 /* 2486 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2487 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2488 * stability of the result. Like other lockless lookups, the found buf may 2489 * already be invalid by the time this function returns. 2490 */ 2491 struct buf * 2492 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2493 { 2494 struct buf *bp; 2495 2496 ASSERT_BO_UNLOCKED(bo); 2497 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2498 if (bp != NULL) 2499 return (bp); 2500 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2501 } 2502 2503 /* 2504 * Associate a buffer with a vnode. 2505 */ 2506 void 2507 bgetvp(struct vnode *vp, struct buf *bp) 2508 { 2509 struct bufobj *bo; 2510 2511 bo = &vp->v_bufobj; 2512 ASSERT_BO_WLOCKED(bo); 2513 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2514 2515 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2516 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2517 ("bgetvp: bp already attached! %p", bp)); 2518 2519 vhold(vp); 2520 bp->b_vp = vp; 2521 bp->b_bufobj = bo; 2522 /* 2523 * Insert onto list for new vnode. 2524 */ 2525 buf_vlist_add(bp, bo, BX_VNCLEAN); 2526 } 2527 2528 /* 2529 * Disassociate a buffer from a vnode. 2530 */ 2531 void 2532 brelvp(struct buf *bp) 2533 { 2534 struct bufobj *bo; 2535 struct vnode *vp; 2536 2537 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2538 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2539 2540 /* 2541 * Delete from old vnode list, if on one. 2542 */ 2543 vp = bp->b_vp; /* XXX */ 2544 bo = bp->b_bufobj; 2545 BO_LOCK(bo); 2546 buf_vlist_remove(bp); 2547 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2548 bo->bo_flag &= ~BO_ONWORKLST; 2549 mtx_lock(&sync_mtx); 2550 LIST_REMOVE(bo, bo_synclist); 2551 syncer_worklist_len--; 2552 mtx_unlock(&sync_mtx); 2553 } 2554 bp->b_vp = NULL; 2555 bp->b_bufobj = NULL; 2556 BO_UNLOCK(bo); 2557 vdrop(vp); 2558 } 2559 2560 /* 2561 * Add an item to the syncer work queue. 2562 */ 2563 static void 2564 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2565 { 2566 int slot; 2567 2568 ASSERT_BO_WLOCKED(bo); 2569 2570 mtx_lock(&sync_mtx); 2571 if (bo->bo_flag & BO_ONWORKLST) 2572 LIST_REMOVE(bo, bo_synclist); 2573 else { 2574 bo->bo_flag |= BO_ONWORKLST; 2575 syncer_worklist_len++; 2576 } 2577 2578 if (delay > syncer_maxdelay - 2) 2579 delay = syncer_maxdelay - 2; 2580 slot = (syncer_delayno + delay) & syncer_mask; 2581 2582 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2583 mtx_unlock(&sync_mtx); 2584 } 2585 2586 static int 2587 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2588 { 2589 int error, len; 2590 2591 mtx_lock(&sync_mtx); 2592 len = syncer_worklist_len - sync_vnode_count; 2593 mtx_unlock(&sync_mtx); 2594 error = SYSCTL_OUT(req, &len, sizeof(len)); 2595 return (error); 2596 } 2597 2598 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2599 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2600 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2601 2602 static struct proc *updateproc; 2603 static void sched_sync(void); 2604 static struct kproc_desc up_kp = { 2605 "syncer", 2606 sched_sync, 2607 &updateproc 2608 }; 2609 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2610 2611 static int 2612 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2613 { 2614 struct vnode *vp; 2615 struct mount *mp; 2616 2617 *bo = LIST_FIRST(slp); 2618 if (*bo == NULL) 2619 return (0); 2620 vp = bo2vnode(*bo); 2621 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2622 return (1); 2623 /* 2624 * We use vhold in case the vnode does not 2625 * successfully sync. vhold prevents the vnode from 2626 * going away when we unlock the sync_mtx so that 2627 * we can acquire the vnode interlock. 2628 */ 2629 vholdl(vp); 2630 mtx_unlock(&sync_mtx); 2631 VI_UNLOCK(vp); 2632 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2633 vdrop(vp); 2634 mtx_lock(&sync_mtx); 2635 return (*bo == LIST_FIRST(slp)); 2636 } 2637 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2638 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2639 VOP_UNLOCK(vp); 2640 vn_finished_write(mp); 2641 BO_LOCK(*bo); 2642 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2643 /* 2644 * Put us back on the worklist. The worklist 2645 * routine will remove us from our current 2646 * position and then add us back in at a later 2647 * position. 2648 */ 2649 vn_syncer_add_to_worklist(*bo, syncdelay); 2650 } 2651 BO_UNLOCK(*bo); 2652 vdrop(vp); 2653 mtx_lock(&sync_mtx); 2654 return (0); 2655 } 2656 2657 static int first_printf = 1; 2658 2659 /* 2660 * System filesystem synchronizer daemon. 2661 */ 2662 static void 2663 sched_sync(void) 2664 { 2665 struct synclist *next, *slp; 2666 struct bufobj *bo; 2667 long starttime; 2668 struct thread *td = curthread; 2669 int last_work_seen; 2670 int net_worklist_len; 2671 int syncer_final_iter; 2672 int error; 2673 2674 last_work_seen = 0; 2675 syncer_final_iter = 0; 2676 syncer_state = SYNCER_RUNNING; 2677 starttime = time_uptime; 2678 td->td_pflags |= TDP_NORUNNINGBUF; 2679 2680 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2681 SHUTDOWN_PRI_LAST); 2682 2683 mtx_lock(&sync_mtx); 2684 for (;;) { 2685 if (syncer_state == SYNCER_FINAL_DELAY && 2686 syncer_final_iter == 0) { 2687 mtx_unlock(&sync_mtx); 2688 kproc_suspend_check(td->td_proc); 2689 mtx_lock(&sync_mtx); 2690 } 2691 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2692 if (syncer_state != SYNCER_RUNNING && 2693 starttime != time_uptime) { 2694 if (first_printf) { 2695 printf("\nSyncing disks, vnodes remaining... "); 2696 first_printf = 0; 2697 } 2698 printf("%d ", net_worklist_len); 2699 } 2700 starttime = time_uptime; 2701 2702 /* 2703 * Push files whose dirty time has expired. Be careful 2704 * of interrupt race on slp queue. 2705 * 2706 * Skip over empty worklist slots when shutting down. 2707 */ 2708 do { 2709 slp = &syncer_workitem_pending[syncer_delayno]; 2710 syncer_delayno += 1; 2711 if (syncer_delayno == syncer_maxdelay) 2712 syncer_delayno = 0; 2713 next = &syncer_workitem_pending[syncer_delayno]; 2714 /* 2715 * If the worklist has wrapped since the 2716 * it was emptied of all but syncer vnodes, 2717 * switch to the FINAL_DELAY state and run 2718 * for one more second. 2719 */ 2720 if (syncer_state == SYNCER_SHUTTING_DOWN && 2721 net_worklist_len == 0 && 2722 last_work_seen == syncer_delayno) { 2723 syncer_state = SYNCER_FINAL_DELAY; 2724 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2725 } 2726 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2727 syncer_worklist_len > 0); 2728 2729 /* 2730 * Keep track of the last time there was anything 2731 * on the worklist other than syncer vnodes. 2732 * Return to the SHUTTING_DOWN state if any 2733 * new work appears. 2734 */ 2735 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2736 last_work_seen = syncer_delayno; 2737 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2738 syncer_state = SYNCER_SHUTTING_DOWN; 2739 while (!LIST_EMPTY(slp)) { 2740 error = sync_vnode(slp, &bo, td); 2741 if (error == 1) { 2742 LIST_REMOVE(bo, bo_synclist); 2743 LIST_INSERT_HEAD(next, bo, bo_synclist); 2744 continue; 2745 } 2746 2747 if (first_printf == 0) { 2748 /* 2749 * Drop the sync mutex, because some watchdog 2750 * drivers need to sleep while patting 2751 */ 2752 mtx_unlock(&sync_mtx); 2753 wdog_kern_pat(WD_LASTVAL); 2754 mtx_lock(&sync_mtx); 2755 } 2756 } 2757 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2758 syncer_final_iter--; 2759 /* 2760 * The variable rushjob allows the kernel to speed up the 2761 * processing of the filesystem syncer process. A rushjob 2762 * value of N tells the filesystem syncer to process the next 2763 * N seconds worth of work on its queue ASAP. Currently rushjob 2764 * is used by the soft update code to speed up the filesystem 2765 * syncer process when the incore state is getting so far 2766 * ahead of the disk that the kernel memory pool is being 2767 * threatened with exhaustion. 2768 */ 2769 if (rushjob > 0) { 2770 rushjob -= 1; 2771 continue; 2772 } 2773 /* 2774 * Just sleep for a short period of time between 2775 * iterations when shutting down to allow some I/O 2776 * to happen. 2777 * 2778 * If it has taken us less than a second to process the 2779 * current work, then wait. Otherwise start right over 2780 * again. We can still lose time if any single round 2781 * takes more than two seconds, but it does not really 2782 * matter as we are just trying to generally pace the 2783 * filesystem activity. 2784 */ 2785 if (syncer_state != SYNCER_RUNNING || 2786 time_uptime == starttime) { 2787 thread_lock(td); 2788 sched_prio(td, PPAUSE); 2789 thread_unlock(td); 2790 } 2791 if (syncer_state != SYNCER_RUNNING) 2792 cv_timedwait(&sync_wakeup, &sync_mtx, 2793 hz / SYNCER_SHUTDOWN_SPEEDUP); 2794 else if (time_uptime == starttime) 2795 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2796 } 2797 } 2798 2799 /* 2800 * Request the syncer daemon to speed up its work. 2801 * We never push it to speed up more than half of its 2802 * normal turn time, otherwise it could take over the cpu. 2803 */ 2804 int 2805 speedup_syncer(void) 2806 { 2807 int ret = 0; 2808 2809 mtx_lock(&sync_mtx); 2810 if (rushjob < syncdelay / 2) { 2811 rushjob += 1; 2812 stat_rush_requests += 1; 2813 ret = 1; 2814 } 2815 mtx_unlock(&sync_mtx); 2816 cv_broadcast(&sync_wakeup); 2817 return (ret); 2818 } 2819 2820 /* 2821 * Tell the syncer to speed up its work and run though its work 2822 * list several times, then tell it to shut down. 2823 */ 2824 static void 2825 syncer_shutdown(void *arg, int howto) 2826 { 2827 2828 if (howto & RB_NOSYNC) 2829 return; 2830 mtx_lock(&sync_mtx); 2831 syncer_state = SYNCER_SHUTTING_DOWN; 2832 rushjob = 0; 2833 mtx_unlock(&sync_mtx); 2834 cv_broadcast(&sync_wakeup); 2835 kproc_shutdown(arg, howto); 2836 } 2837 2838 void 2839 syncer_suspend(void) 2840 { 2841 2842 syncer_shutdown(updateproc, 0); 2843 } 2844 2845 void 2846 syncer_resume(void) 2847 { 2848 2849 mtx_lock(&sync_mtx); 2850 first_printf = 1; 2851 syncer_state = SYNCER_RUNNING; 2852 mtx_unlock(&sync_mtx); 2853 cv_broadcast(&sync_wakeup); 2854 kproc_resume(updateproc); 2855 } 2856 2857 /* 2858 * Move the buffer between the clean and dirty lists of its vnode. 2859 */ 2860 void 2861 reassignbuf(struct buf *bp) 2862 { 2863 struct vnode *vp; 2864 struct bufobj *bo; 2865 int delay; 2866 #ifdef INVARIANTS 2867 struct bufv *bv; 2868 #endif 2869 2870 vp = bp->b_vp; 2871 bo = bp->b_bufobj; 2872 2873 KASSERT((bp->b_flags & B_PAGING) == 0, 2874 ("%s: cannot reassign paging buffer %p", __func__, bp)); 2875 2876 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2877 bp, bp->b_vp, bp->b_flags); 2878 2879 BO_LOCK(bo); 2880 buf_vlist_remove(bp); 2881 2882 /* 2883 * If dirty, put on list of dirty buffers; otherwise insert onto list 2884 * of clean buffers. 2885 */ 2886 if (bp->b_flags & B_DELWRI) { 2887 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2888 switch (vp->v_type) { 2889 case VDIR: 2890 delay = dirdelay; 2891 break; 2892 case VCHR: 2893 delay = metadelay; 2894 break; 2895 default: 2896 delay = filedelay; 2897 } 2898 vn_syncer_add_to_worklist(bo, delay); 2899 } 2900 buf_vlist_add(bp, bo, BX_VNDIRTY); 2901 } else { 2902 buf_vlist_add(bp, bo, BX_VNCLEAN); 2903 2904 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2905 mtx_lock(&sync_mtx); 2906 LIST_REMOVE(bo, bo_synclist); 2907 syncer_worklist_len--; 2908 mtx_unlock(&sync_mtx); 2909 bo->bo_flag &= ~BO_ONWORKLST; 2910 } 2911 } 2912 #ifdef INVARIANTS 2913 bv = &bo->bo_clean; 2914 bp = TAILQ_FIRST(&bv->bv_hd); 2915 KASSERT(bp == NULL || bp->b_bufobj == bo, 2916 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2917 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2918 KASSERT(bp == NULL || bp->b_bufobj == bo, 2919 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2920 bv = &bo->bo_dirty; 2921 bp = TAILQ_FIRST(&bv->bv_hd); 2922 KASSERT(bp == NULL || bp->b_bufobj == bo, 2923 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2924 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2925 KASSERT(bp == NULL || bp->b_bufobj == bo, 2926 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2927 #endif 2928 BO_UNLOCK(bo); 2929 } 2930 2931 static void 2932 v_init_counters(struct vnode *vp) 2933 { 2934 2935 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2936 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2937 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2938 2939 refcount_init(&vp->v_holdcnt, 1); 2940 refcount_init(&vp->v_usecount, 1); 2941 } 2942 2943 /* 2944 * Grab a particular vnode from the free list, increment its 2945 * reference count and lock it. VIRF_DOOMED is set if the vnode 2946 * is being destroyed. Only callers who specify LK_RETRY will 2947 * see doomed vnodes. If inactive processing was delayed in 2948 * vput try to do it here. 2949 * 2950 * usecount is manipulated using atomics without holding any locks. 2951 * 2952 * holdcnt can be manipulated using atomics without holding any locks, 2953 * except when transitioning 1<->0, in which case the interlock is held. 2954 * 2955 * Consumers which don't guarantee liveness of the vnode can use SMR to 2956 * try to get a reference. Note this operation can fail since the vnode 2957 * may be awaiting getting freed by the time they get to it. 2958 */ 2959 enum vgetstate 2960 vget_prep_smr(struct vnode *vp) 2961 { 2962 enum vgetstate vs; 2963 2964 VFS_SMR_ASSERT_ENTERED(); 2965 2966 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2967 vs = VGET_USECOUNT; 2968 } else { 2969 if (vhold_smr(vp)) 2970 vs = VGET_HOLDCNT; 2971 else 2972 vs = VGET_NONE; 2973 } 2974 return (vs); 2975 } 2976 2977 enum vgetstate 2978 vget_prep(struct vnode *vp) 2979 { 2980 enum vgetstate vs; 2981 2982 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2983 vs = VGET_USECOUNT; 2984 } else { 2985 vhold(vp); 2986 vs = VGET_HOLDCNT; 2987 } 2988 return (vs); 2989 } 2990 2991 void 2992 vget_abort(struct vnode *vp, enum vgetstate vs) 2993 { 2994 2995 switch (vs) { 2996 case VGET_USECOUNT: 2997 vrele(vp); 2998 break; 2999 case VGET_HOLDCNT: 3000 vdrop(vp); 3001 break; 3002 default: 3003 __assert_unreachable(); 3004 } 3005 } 3006 3007 int 3008 vget(struct vnode *vp, int flags) 3009 { 3010 enum vgetstate vs; 3011 3012 vs = vget_prep(vp); 3013 return (vget_finish(vp, flags, vs)); 3014 } 3015 3016 int 3017 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 3018 { 3019 int error; 3020 3021 if ((flags & LK_INTERLOCK) != 0) 3022 ASSERT_VI_LOCKED(vp, __func__); 3023 else 3024 ASSERT_VI_UNLOCKED(vp, __func__); 3025 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3026 VNPASS(vp->v_holdcnt > 0, vp); 3027 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3028 3029 error = vn_lock(vp, flags); 3030 if (__predict_false(error != 0)) { 3031 vget_abort(vp, vs); 3032 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 3033 vp); 3034 return (error); 3035 } 3036 3037 vget_finish_ref(vp, vs); 3038 return (0); 3039 } 3040 3041 void 3042 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 3043 { 3044 int old; 3045 3046 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3047 VNPASS(vp->v_holdcnt > 0, vp); 3048 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3049 3050 if (vs == VGET_USECOUNT) 3051 return; 3052 3053 /* 3054 * We hold the vnode. If the usecount is 0 it will be utilized to keep 3055 * the vnode around. Otherwise someone else lended their hold count and 3056 * we have to drop ours. 3057 */ 3058 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3059 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3060 if (old != 0) { 3061 #ifdef INVARIANTS 3062 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3063 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3064 #else 3065 refcount_release(&vp->v_holdcnt); 3066 #endif 3067 } 3068 } 3069 3070 void 3071 vref(struct vnode *vp) 3072 { 3073 enum vgetstate vs; 3074 3075 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3076 vs = vget_prep(vp); 3077 vget_finish_ref(vp, vs); 3078 } 3079 3080 void 3081 vrefact(struct vnode *vp) 3082 { 3083 3084 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3085 #ifdef INVARIANTS 3086 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 3087 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3088 #else 3089 refcount_acquire(&vp->v_usecount); 3090 #endif 3091 } 3092 3093 void 3094 vlazy(struct vnode *vp) 3095 { 3096 struct mount *mp; 3097 3098 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 3099 3100 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 3101 return; 3102 /* 3103 * We may get here for inactive routines after the vnode got doomed. 3104 */ 3105 if (VN_IS_DOOMED(vp)) 3106 return; 3107 mp = vp->v_mount; 3108 mtx_lock(&mp->mnt_listmtx); 3109 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 3110 vp->v_mflag |= VMP_LAZYLIST; 3111 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3112 mp->mnt_lazyvnodelistsize++; 3113 } 3114 mtx_unlock(&mp->mnt_listmtx); 3115 } 3116 3117 static void 3118 vunlazy(struct vnode *vp) 3119 { 3120 struct mount *mp; 3121 3122 ASSERT_VI_LOCKED(vp, __func__); 3123 VNPASS(!VN_IS_DOOMED(vp), vp); 3124 3125 mp = vp->v_mount; 3126 mtx_lock(&mp->mnt_listmtx); 3127 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3128 /* 3129 * Don't remove the vnode from the lazy list if another thread 3130 * has increased the hold count. It may have re-enqueued the 3131 * vnode to the lazy list and is now responsible for its 3132 * removal. 3133 */ 3134 if (vp->v_holdcnt == 0) { 3135 vp->v_mflag &= ~VMP_LAZYLIST; 3136 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3137 mp->mnt_lazyvnodelistsize--; 3138 } 3139 mtx_unlock(&mp->mnt_listmtx); 3140 } 3141 3142 /* 3143 * This routine is only meant to be called from vgonel prior to dooming 3144 * the vnode. 3145 */ 3146 static void 3147 vunlazy_gone(struct vnode *vp) 3148 { 3149 struct mount *mp; 3150 3151 ASSERT_VOP_ELOCKED(vp, __func__); 3152 ASSERT_VI_LOCKED(vp, __func__); 3153 VNPASS(!VN_IS_DOOMED(vp), vp); 3154 3155 if (vp->v_mflag & VMP_LAZYLIST) { 3156 mp = vp->v_mount; 3157 mtx_lock(&mp->mnt_listmtx); 3158 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3159 vp->v_mflag &= ~VMP_LAZYLIST; 3160 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3161 mp->mnt_lazyvnodelistsize--; 3162 mtx_unlock(&mp->mnt_listmtx); 3163 } 3164 } 3165 3166 static void 3167 vdefer_inactive(struct vnode *vp) 3168 { 3169 3170 ASSERT_VI_LOCKED(vp, __func__); 3171 VNASSERT(vp->v_holdcnt > 0, vp, 3172 ("%s: vnode without hold count", __func__)); 3173 if (VN_IS_DOOMED(vp)) { 3174 vdropl(vp); 3175 return; 3176 } 3177 if (vp->v_iflag & VI_DEFINACT) { 3178 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3179 vdropl(vp); 3180 return; 3181 } 3182 if (vp->v_usecount > 0) { 3183 vp->v_iflag &= ~VI_OWEINACT; 3184 vdropl(vp); 3185 return; 3186 } 3187 vlazy(vp); 3188 vp->v_iflag |= VI_DEFINACT; 3189 VI_UNLOCK(vp); 3190 counter_u64_add(deferred_inact, 1); 3191 } 3192 3193 static void 3194 vdefer_inactive_unlocked(struct vnode *vp) 3195 { 3196 3197 VI_LOCK(vp); 3198 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3199 vdropl(vp); 3200 return; 3201 } 3202 vdefer_inactive(vp); 3203 } 3204 3205 enum vput_op { VRELE, VPUT, VUNREF }; 3206 3207 /* 3208 * Handle ->v_usecount transitioning to 0. 3209 * 3210 * By releasing the last usecount we take ownership of the hold count which 3211 * provides liveness of the vnode, meaning we have to vdrop. 3212 * 3213 * For all vnodes we may need to perform inactive processing. It requires an 3214 * exclusive lock on the vnode, while it is legal to call here with only a 3215 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3216 * inactive processing gets deferred to the syncer. 3217 * 3218 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3219 * on the lock being held all the way until VOP_INACTIVE. This in particular 3220 * happens with UFS which adds half-constructed vnodes to the hash, where they 3221 * can be found by other code. 3222 */ 3223 static void 3224 vput_final(struct vnode *vp, enum vput_op func) 3225 { 3226 int error; 3227 bool want_unlock; 3228 3229 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3230 VNPASS(vp->v_holdcnt > 0, vp); 3231 3232 VI_LOCK(vp); 3233 3234 /* 3235 * By the time we got here someone else might have transitioned 3236 * the count back to > 0. 3237 */ 3238 if (vp->v_usecount > 0) 3239 goto out; 3240 3241 /* 3242 * If the vnode is doomed vgone already performed inactive processing 3243 * (if needed). 3244 */ 3245 if (VN_IS_DOOMED(vp)) 3246 goto out; 3247 3248 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3249 goto out; 3250 3251 if (vp->v_iflag & VI_DOINGINACT) 3252 goto out; 3253 3254 /* 3255 * Locking operations here will drop the interlock and possibly the 3256 * vnode lock, opening a window where the vnode can get doomed all the 3257 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3258 * perform inactive. 3259 */ 3260 vp->v_iflag |= VI_OWEINACT; 3261 want_unlock = false; 3262 error = 0; 3263 switch (func) { 3264 case VRELE: 3265 switch (VOP_ISLOCKED(vp)) { 3266 case LK_EXCLUSIVE: 3267 break; 3268 case LK_EXCLOTHER: 3269 case 0: 3270 want_unlock = true; 3271 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3272 VI_LOCK(vp); 3273 break; 3274 default: 3275 /* 3276 * The lock has at least one sharer, but we have no way 3277 * to conclude whether this is us. Play it safe and 3278 * defer processing. 3279 */ 3280 error = EAGAIN; 3281 break; 3282 } 3283 break; 3284 case VPUT: 3285 want_unlock = true; 3286 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3287 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3288 LK_NOWAIT); 3289 VI_LOCK(vp); 3290 } 3291 break; 3292 case VUNREF: 3293 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3294 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3295 VI_LOCK(vp); 3296 } 3297 break; 3298 } 3299 if (error == 0) { 3300 if (func == VUNREF) { 3301 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp, 3302 ("recursive vunref")); 3303 vp->v_vflag |= VV_UNREF; 3304 } 3305 for (;;) { 3306 error = vinactive(vp); 3307 if (want_unlock) 3308 VOP_UNLOCK(vp); 3309 if (error != ERELOOKUP || !want_unlock) 3310 break; 3311 VOP_LOCK(vp, LK_EXCLUSIVE); 3312 } 3313 if (func == VUNREF) 3314 vp->v_vflag &= ~VV_UNREF; 3315 vdropl(vp); 3316 } else { 3317 vdefer_inactive(vp); 3318 } 3319 return; 3320 out: 3321 if (func == VPUT) 3322 VOP_UNLOCK(vp); 3323 vdropl(vp); 3324 } 3325 3326 /* 3327 * Decrement ->v_usecount for a vnode. 3328 * 3329 * Releasing the last use count requires additional processing, see vput_final 3330 * above for details. 3331 * 3332 * Comment above each variant denotes lock state on entry and exit. 3333 */ 3334 3335 /* 3336 * in: any 3337 * out: same as passed in 3338 */ 3339 void 3340 vrele(struct vnode *vp) 3341 { 3342 3343 ASSERT_VI_UNLOCKED(vp, __func__); 3344 if (!refcount_release(&vp->v_usecount)) 3345 return; 3346 vput_final(vp, VRELE); 3347 } 3348 3349 /* 3350 * in: locked 3351 * out: unlocked 3352 */ 3353 void 3354 vput(struct vnode *vp) 3355 { 3356 3357 ASSERT_VOP_LOCKED(vp, __func__); 3358 ASSERT_VI_UNLOCKED(vp, __func__); 3359 if (!refcount_release(&vp->v_usecount)) { 3360 VOP_UNLOCK(vp); 3361 return; 3362 } 3363 vput_final(vp, VPUT); 3364 } 3365 3366 /* 3367 * in: locked 3368 * out: locked 3369 */ 3370 void 3371 vunref(struct vnode *vp) 3372 { 3373 3374 ASSERT_VOP_LOCKED(vp, __func__); 3375 ASSERT_VI_UNLOCKED(vp, __func__); 3376 if (!refcount_release(&vp->v_usecount)) 3377 return; 3378 vput_final(vp, VUNREF); 3379 } 3380 3381 void 3382 vhold(struct vnode *vp) 3383 { 3384 int old; 3385 3386 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3387 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3388 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3389 ("%s: wrong hold count %d", __func__, old)); 3390 if (old == 0) 3391 vfs_freevnodes_dec(); 3392 } 3393 3394 void 3395 vholdnz(struct vnode *vp) 3396 { 3397 3398 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3399 #ifdef INVARIANTS 3400 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3401 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3402 ("%s: wrong hold count %d", __func__, old)); 3403 #else 3404 atomic_add_int(&vp->v_holdcnt, 1); 3405 #endif 3406 } 3407 3408 /* 3409 * Grab a hold count unless the vnode is freed. 3410 * 3411 * Only use this routine if vfs smr is the only protection you have against 3412 * freeing the vnode. 3413 * 3414 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3415 * is not set. After the flag is set the vnode becomes immutable to anyone but 3416 * the thread which managed to set the flag. 3417 * 3418 * It may be tempting to replace the loop with: 3419 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3420 * if (count & VHOLD_NO_SMR) { 3421 * backpedal and error out; 3422 * } 3423 * 3424 * However, while this is more performant, it hinders debugging by eliminating 3425 * the previously mentioned invariant. 3426 */ 3427 bool 3428 vhold_smr(struct vnode *vp) 3429 { 3430 int count; 3431 3432 VFS_SMR_ASSERT_ENTERED(); 3433 3434 count = atomic_load_int(&vp->v_holdcnt); 3435 for (;;) { 3436 if (count & VHOLD_NO_SMR) { 3437 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3438 ("non-zero hold count with flags %d\n", count)); 3439 return (false); 3440 } 3441 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3442 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3443 if (count == 0) 3444 vfs_freevnodes_dec(); 3445 return (true); 3446 } 3447 } 3448 } 3449 3450 /* 3451 * Hold a free vnode for recycling. 3452 * 3453 * Note: vnode_init references this comment. 3454 * 3455 * Attempts to recycle only need the global vnode list lock and have no use for 3456 * SMR. 3457 * 3458 * However, vnodes get inserted into the global list before they get fully 3459 * initialized and stay there until UMA decides to free the memory. This in 3460 * particular means the target can be found before it becomes usable and after 3461 * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to 3462 * VHOLD_NO_SMR. 3463 * 3464 * Note: the vnode may gain more references after we transition the count 0->1. 3465 */ 3466 static bool 3467 vhold_recycle_free(struct vnode *vp) 3468 { 3469 int count; 3470 3471 mtx_assert(&vnode_list_mtx, MA_OWNED); 3472 3473 count = atomic_load_int(&vp->v_holdcnt); 3474 for (;;) { 3475 if (count & VHOLD_NO_SMR) { 3476 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3477 ("non-zero hold count with flags %d\n", count)); 3478 return (false); 3479 } 3480 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3481 if (count > 0) { 3482 return (false); 3483 } 3484 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3485 vfs_freevnodes_dec(); 3486 return (true); 3487 } 3488 } 3489 } 3490 3491 static void __noinline 3492 vdbatch_process(struct vdbatch *vd) 3493 { 3494 struct vnode *vp; 3495 int i; 3496 3497 mtx_assert(&vd->lock, MA_OWNED); 3498 MPASS(curthread->td_pinned > 0); 3499 MPASS(vd->index == VDBATCH_SIZE); 3500 3501 mtx_lock(&vnode_list_mtx); 3502 critical_enter(); 3503 freevnodes += vd->freevnodes; 3504 for (i = 0; i < VDBATCH_SIZE; i++) { 3505 vp = vd->tab[i]; 3506 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3507 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3508 MPASS(vp->v_dbatchcpu != NOCPU); 3509 vp->v_dbatchcpu = NOCPU; 3510 } 3511 mtx_unlock(&vnode_list_mtx); 3512 vd->freevnodes = 0; 3513 bzero(vd->tab, sizeof(vd->tab)); 3514 vd->index = 0; 3515 critical_exit(); 3516 } 3517 3518 static void 3519 vdbatch_enqueue(struct vnode *vp) 3520 { 3521 struct vdbatch *vd; 3522 3523 ASSERT_VI_LOCKED(vp, __func__); 3524 VNASSERT(!VN_IS_DOOMED(vp), vp, 3525 ("%s: deferring requeue of a doomed vnode", __func__)); 3526 3527 if (vp->v_dbatchcpu != NOCPU) { 3528 VI_UNLOCK(vp); 3529 return; 3530 } 3531 3532 sched_pin(); 3533 vd = DPCPU_PTR(vd); 3534 mtx_lock(&vd->lock); 3535 MPASS(vd->index < VDBATCH_SIZE); 3536 MPASS(vd->tab[vd->index] == NULL); 3537 /* 3538 * A hack: we depend on being pinned so that we know what to put in 3539 * ->v_dbatchcpu. 3540 */ 3541 vp->v_dbatchcpu = curcpu; 3542 vd->tab[vd->index] = vp; 3543 vd->index++; 3544 VI_UNLOCK(vp); 3545 if (vd->index == VDBATCH_SIZE) 3546 vdbatch_process(vd); 3547 mtx_unlock(&vd->lock); 3548 sched_unpin(); 3549 } 3550 3551 /* 3552 * This routine must only be called for vnodes which are about to be 3553 * deallocated. Supporting dequeue for arbitrary vndoes would require 3554 * validating that the locked batch matches. 3555 */ 3556 static void 3557 vdbatch_dequeue(struct vnode *vp) 3558 { 3559 struct vdbatch *vd; 3560 int i; 3561 short cpu; 3562 3563 VNASSERT(vp->v_type == VBAD || vp->v_type == VNON, vp, 3564 ("%s: called for a used vnode\n", __func__)); 3565 3566 cpu = vp->v_dbatchcpu; 3567 if (cpu == NOCPU) 3568 return; 3569 3570 vd = DPCPU_ID_PTR(cpu, vd); 3571 mtx_lock(&vd->lock); 3572 for (i = 0; i < vd->index; i++) { 3573 if (vd->tab[i] != vp) 3574 continue; 3575 vp->v_dbatchcpu = NOCPU; 3576 vd->index--; 3577 vd->tab[i] = vd->tab[vd->index]; 3578 vd->tab[vd->index] = NULL; 3579 break; 3580 } 3581 mtx_unlock(&vd->lock); 3582 /* 3583 * Either we dequeued the vnode above or the target CPU beat us to it. 3584 */ 3585 MPASS(vp->v_dbatchcpu == NOCPU); 3586 } 3587 3588 /* 3589 * Drop the hold count of the vnode. If this is the last reference to 3590 * the vnode we place it on the free list unless it has been vgone'd 3591 * (marked VIRF_DOOMED) in which case we will free it. 3592 * 3593 * Because the vnode vm object keeps a hold reference on the vnode if 3594 * there is at least one resident non-cached page, the vnode cannot 3595 * leave the active list without the page cleanup done. 3596 */ 3597 static void __noinline 3598 vdropl_final(struct vnode *vp) 3599 { 3600 3601 ASSERT_VI_LOCKED(vp, __func__); 3602 VNPASS(VN_IS_DOOMED(vp), vp); 3603 /* 3604 * Set the VHOLD_NO_SMR flag. 3605 * 3606 * We may be racing against vhold_smr. If they win we can just pretend 3607 * we never got this far, they will vdrop later. 3608 */ 3609 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { 3610 vfs_freevnodes_inc(); 3611 VI_UNLOCK(vp); 3612 /* 3613 * We lost the aforementioned race. Any subsequent access is 3614 * invalid as they might have managed to vdropl on their own. 3615 */ 3616 return; 3617 } 3618 /* 3619 * Don't bump freevnodes as this one is going away. 3620 */ 3621 freevnode(vp); 3622 } 3623 3624 void 3625 vdrop(struct vnode *vp) 3626 { 3627 3628 ASSERT_VI_UNLOCKED(vp, __func__); 3629 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3630 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3631 return; 3632 VI_LOCK(vp); 3633 vdropl(vp); 3634 } 3635 3636 static void __always_inline 3637 vdropl_impl(struct vnode *vp, bool enqueue) 3638 { 3639 3640 ASSERT_VI_LOCKED(vp, __func__); 3641 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3642 if (!refcount_release(&vp->v_holdcnt)) { 3643 VI_UNLOCK(vp); 3644 return; 3645 } 3646 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp); 3647 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 3648 if (VN_IS_DOOMED(vp)) { 3649 vdropl_final(vp); 3650 return; 3651 } 3652 3653 vfs_freevnodes_inc(); 3654 if (vp->v_mflag & VMP_LAZYLIST) { 3655 vunlazy(vp); 3656 } 3657 3658 if (!enqueue) { 3659 VI_UNLOCK(vp); 3660 return; 3661 } 3662 3663 /* 3664 * Also unlocks the interlock. We can't assert on it as we 3665 * released our hold and by now the vnode might have been 3666 * freed. 3667 */ 3668 vdbatch_enqueue(vp); 3669 } 3670 3671 void 3672 vdropl(struct vnode *vp) 3673 { 3674 3675 vdropl_impl(vp, true); 3676 } 3677 3678 /* 3679 * vdrop a vnode when recycling 3680 * 3681 * This is a special case routine only to be used when recycling, differs from 3682 * regular vdrop by not requeieing the vnode on LRU. 3683 * 3684 * Consider a case where vtryrecycle continuously fails with all vnodes (due to 3685 * e.g., frozen writes on the filesystem), filling the batch and causing it to 3686 * be requeued. Then vnlru will end up revisiting the same vnodes. This is a 3687 * loop which can last for as long as writes are frozen. 3688 */ 3689 static void 3690 vdropl_recycle(struct vnode *vp) 3691 { 3692 3693 vdropl_impl(vp, false); 3694 } 3695 3696 static void 3697 vdrop_recycle(struct vnode *vp) 3698 { 3699 3700 VI_LOCK(vp); 3701 vdropl_recycle(vp); 3702 } 3703 3704 /* 3705 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3706 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3707 */ 3708 static int 3709 vinactivef(struct vnode *vp) 3710 { 3711 struct vm_object *obj; 3712 int error; 3713 3714 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3715 ASSERT_VI_LOCKED(vp, "vinactive"); 3716 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3717 ("vinactive: recursed on VI_DOINGINACT")); 3718 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3719 vp->v_iflag |= VI_DOINGINACT; 3720 vp->v_iflag &= ~VI_OWEINACT; 3721 VI_UNLOCK(vp); 3722 /* 3723 * Before moving off the active list, we must be sure that any 3724 * modified pages are converted into the vnode's dirty 3725 * buffers, since these will no longer be checked once the 3726 * vnode is on the inactive list. 3727 * 3728 * The write-out of the dirty pages is asynchronous. At the 3729 * point that VOP_INACTIVE() is called, there could still be 3730 * pending I/O and dirty pages in the object. 3731 */ 3732 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3733 vm_object_mightbedirty(obj)) { 3734 VM_OBJECT_WLOCK(obj); 3735 vm_object_page_clean(obj, 0, 0, 0); 3736 VM_OBJECT_WUNLOCK(obj); 3737 } 3738 error = VOP_INACTIVE(vp); 3739 VI_LOCK(vp); 3740 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3741 ("vinactive: lost VI_DOINGINACT")); 3742 vp->v_iflag &= ~VI_DOINGINACT; 3743 return (error); 3744 } 3745 3746 int 3747 vinactive(struct vnode *vp) 3748 { 3749 3750 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3751 ASSERT_VI_LOCKED(vp, "vinactive"); 3752 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3753 3754 if ((vp->v_iflag & VI_OWEINACT) == 0) 3755 return (0); 3756 if (vp->v_iflag & VI_DOINGINACT) 3757 return (0); 3758 if (vp->v_usecount > 0) { 3759 vp->v_iflag &= ~VI_OWEINACT; 3760 return (0); 3761 } 3762 return (vinactivef(vp)); 3763 } 3764 3765 /* 3766 * Remove any vnodes in the vnode table belonging to mount point mp. 3767 * 3768 * If FORCECLOSE is not specified, there should not be any active ones, 3769 * return error if any are found (nb: this is a user error, not a 3770 * system error). If FORCECLOSE is specified, detach any active vnodes 3771 * that are found. 3772 * 3773 * If WRITECLOSE is set, only flush out regular file vnodes open for 3774 * writing. 3775 * 3776 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3777 * 3778 * `rootrefs' specifies the base reference count for the root vnode 3779 * of this filesystem. The root vnode is considered busy if its 3780 * v_usecount exceeds this value. On a successful return, vflush(, td) 3781 * will call vrele() on the root vnode exactly rootrefs times. 3782 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3783 * be zero. 3784 */ 3785 #ifdef DIAGNOSTIC 3786 static int busyprt = 0; /* print out busy vnodes */ 3787 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3788 #endif 3789 3790 int 3791 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3792 { 3793 struct vnode *vp, *mvp, *rootvp = NULL; 3794 struct vattr vattr; 3795 int busy = 0, error; 3796 3797 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3798 rootrefs, flags); 3799 if (rootrefs > 0) { 3800 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3801 ("vflush: bad args")); 3802 /* 3803 * Get the filesystem root vnode. We can vput() it 3804 * immediately, since with rootrefs > 0, it won't go away. 3805 */ 3806 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3807 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3808 __func__, error); 3809 return (error); 3810 } 3811 vput(rootvp); 3812 } 3813 loop: 3814 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3815 vholdl(vp); 3816 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3817 if (error) { 3818 vdrop(vp); 3819 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3820 goto loop; 3821 } 3822 /* 3823 * Skip over a vnodes marked VV_SYSTEM. 3824 */ 3825 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3826 VOP_UNLOCK(vp); 3827 vdrop(vp); 3828 continue; 3829 } 3830 /* 3831 * If WRITECLOSE is set, flush out unlinked but still open 3832 * files (even if open only for reading) and regular file 3833 * vnodes open for writing. 3834 */ 3835 if (flags & WRITECLOSE) { 3836 if (vp->v_object != NULL) { 3837 VM_OBJECT_WLOCK(vp->v_object); 3838 vm_object_page_clean(vp->v_object, 0, 0, 0); 3839 VM_OBJECT_WUNLOCK(vp->v_object); 3840 } 3841 do { 3842 error = VOP_FSYNC(vp, MNT_WAIT, td); 3843 } while (error == ERELOOKUP); 3844 if (error != 0) { 3845 VOP_UNLOCK(vp); 3846 vdrop(vp); 3847 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3848 return (error); 3849 } 3850 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3851 VI_LOCK(vp); 3852 3853 if ((vp->v_type == VNON || 3854 (error == 0 && vattr.va_nlink > 0)) && 3855 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3856 VOP_UNLOCK(vp); 3857 vdropl(vp); 3858 continue; 3859 } 3860 } else 3861 VI_LOCK(vp); 3862 /* 3863 * With v_usecount == 0, all we need to do is clear out the 3864 * vnode data structures and we are done. 3865 * 3866 * If FORCECLOSE is set, forcibly close the vnode. 3867 */ 3868 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3869 vgonel(vp); 3870 } else { 3871 busy++; 3872 #ifdef DIAGNOSTIC 3873 if (busyprt) 3874 vn_printf(vp, "vflush: busy vnode "); 3875 #endif 3876 } 3877 VOP_UNLOCK(vp); 3878 vdropl(vp); 3879 } 3880 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3881 /* 3882 * If just the root vnode is busy, and if its refcount 3883 * is equal to `rootrefs', then go ahead and kill it. 3884 */ 3885 VI_LOCK(rootvp); 3886 KASSERT(busy > 0, ("vflush: not busy")); 3887 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3888 ("vflush: usecount %d < rootrefs %d", 3889 rootvp->v_usecount, rootrefs)); 3890 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3891 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3892 vgone(rootvp); 3893 VOP_UNLOCK(rootvp); 3894 busy = 0; 3895 } else 3896 VI_UNLOCK(rootvp); 3897 } 3898 if (busy) { 3899 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3900 busy); 3901 return (EBUSY); 3902 } 3903 for (; rootrefs > 0; rootrefs--) 3904 vrele(rootvp); 3905 return (0); 3906 } 3907 3908 /* 3909 * Recycle an unused vnode to the front of the free list. 3910 */ 3911 int 3912 vrecycle(struct vnode *vp) 3913 { 3914 int recycled; 3915 3916 VI_LOCK(vp); 3917 recycled = vrecyclel(vp); 3918 VI_UNLOCK(vp); 3919 return (recycled); 3920 } 3921 3922 /* 3923 * vrecycle, with the vp interlock held. 3924 */ 3925 int 3926 vrecyclel(struct vnode *vp) 3927 { 3928 int recycled; 3929 3930 ASSERT_VOP_ELOCKED(vp, __func__); 3931 ASSERT_VI_LOCKED(vp, __func__); 3932 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3933 recycled = 0; 3934 if (vp->v_usecount == 0) { 3935 recycled = 1; 3936 vgonel(vp); 3937 } 3938 return (recycled); 3939 } 3940 3941 /* 3942 * Eliminate all activity associated with a vnode 3943 * in preparation for reuse. 3944 */ 3945 void 3946 vgone(struct vnode *vp) 3947 { 3948 VI_LOCK(vp); 3949 vgonel(vp); 3950 VI_UNLOCK(vp); 3951 } 3952 3953 /* 3954 * Notify upper mounts about reclaimed or unlinked vnode. 3955 */ 3956 void 3957 vfs_notify_upper(struct vnode *vp, enum vfs_notify_upper_type event) 3958 { 3959 struct mount *mp; 3960 struct mount_upper_node *ump; 3961 3962 mp = atomic_load_ptr(&vp->v_mount); 3963 if (mp == NULL) 3964 return; 3965 if (TAILQ_EMPTY(&mp->mnt_notify)) 3966 return; 3967 3968 MNT_ILOCK(mp); 3969 mp->mnt_upper_pending++; 3970 KASSERT(mp->mnt_upper_pending > 0, 3971 ("%s: mnt_upper_pending %d", __func__, mp->mnt_upper_pending)); 3972 TAILQ_FOREACH(ump, &mp->mnt_notify, mnt_upper_link) { 3973 MNT_IUNLOCK(mp); 3974 switch (event) { 3975 case VFS_NOTIFY_UPPER_RECLAIM: 3976 VFS_RECLAIM_LOWERVP(ump->mp, vp); 3977 break; 3978 case VFS_NOTIFY_UPPER_UNLINK: 3979 VFS_UNLINK_LOWERVP(ump->mp, vp); 3980 break; 3981 } 3982 MNT_ILOCK(mp); 3983 } 3984 mp->mnt_upper_pending--; 3985 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && 3986 mp->mnt_upper_pending == 0) { 3987 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; 3988 wakeup(&mp->mnt_uppers); 3989 } 3990 MNT_IUNLOCK(mp); 3991 } 3992 3993 /* 3994 * vgone, with the vp interlock held. 3995 */ 3996 static void 3997 vgonel(struct vnode *vp) 3998 { 3999 struct thread *td; 4000 struct mount *mp; 4001 vm_object_t object; 4002 bool active, doinginact, oweinact; 4003 4004 ASSERT_VOP_ELOCKED(vp, "vgonel"); 4005 ASSERT_VI_LOCKED(vp, "vgonel"); 4006 VNASSERT(vp->v_holdcnt, vp, 4007 ("vgonel: vp %p has no reference.", vp)); 4008 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4009 td = curthread; 4010 4011 /* 4012 * Don't vgonel if we're already doomed. 4013 */ 4014 if (VN_IS_DOOMED(vp)) 4015 return; 4016 /* 4017 * Paired with freevnode. 4018 */ 4019 vn_seqc_write_begin_locked(vp); 4020 vunlazy_gone(vp); 4021 vn_irflag_set_locked(vp, VIRF_DOOMED); 4022 4023 /* 4024 * Check to see if the vnode is in use. If so, we have to 4025 * call VOP_CLOSE() and VOP_INACTIVE(). 4026 * 4027 * It could be that VOP_INACTIVE() requested reclamation, in 4028 * which case we should avoid recursion, so check 4029 * VI_DOINGINACT. This is not precise but good enough. 4030 */ 4031 active = vp->v_usecount > 0; 4032 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4033 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; 4034 4035 /* 4036 * If we need to do inactive VI_OWEINACT will be set. 4037 */ 4038 if (vp->v_iflag & VI_DEFINACT) { 4039 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 4040 vp->v_iflag &= ~VI_DEFINACT; 4041 vdropl(vp); 4042 } else { 4043 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 4044 VI_UNLOCK(vp); 4045 } 4046 cache_purge_vgone(vp); 4047 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 4048 4049 /* 4050 * If purging an active vnode, it must be closed and 4051 * deactivated before being reclaimed. 4052 */ 4053 if (active) 4054 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 4055 if (!doinginact) { 4056 do { 4057 if (oweinact || active) { 4058 VI_LOCK(vp); 4059 vinactivef(vp); 4060 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4061 VI_UNLOCK(vp); 4062 } 4063 } while (oweinact); 4064 } 4065 if (vp->v_type == VSOCK) 4066 vfs_unp_reclaim(vp); 4067 4068 /* 4069 * Clean out any buffers associated with the vnode. 4070 * If the flush fails, just toss the buffers. 4071 */ 4072 mp = NULL; 4073 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 4074 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 4075 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 4076 while (vinvalbuf(vp, 0, 0, 0) != 0) 4077 ; 4078 } 4079 4080 BO_LOCK(&vp->v_bufobj); 4081 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 4082 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 4083 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 4084 vp->v_bufobj.bo_clean.bv_cnt == 0, 4085 ("vp %p bufobj not invalidated", vp)); 4086 4087 /* 4088 * For VMIO bufobj, BO_DEAD is set later, or in 4089 * vm_object_terminate() after the object's page queue is 4090 * flushed. 4091 */ 4092 object = vp->v_bufobj.bo_object; 4093 if (object == NULL) 4094 vp->v_bufobj.bo_flag |= BO_DEAD; 4095 BO_UNLOCK(&vp->v_bufobj); 4096 4097 /* 4098 * Handle the VM part. Tmpfs handles v_object on its own (the 4099 * OBJT_VNODE check). Nullfs or other bypassing filesystems 4100 * should not touch the object borrowed from the lower vnode 4101 * (the handle check). 4102 */ 4103 if (object != NULL && object->type == OBJT_VNODE && 4104 object->handle == vp) 4105 vnode_destroy_vobject(vp); 4106 4107 /* 4108 * Reclaim the vnode. 4109 */ 4110 if (VOP_RECLAIM(vp)) 4111 panic("vgone: cannot reclaim"); 4112 if (mp != NULL) 4113 vn_finished_secondary_write(mp); 4114 VNASSERT(vp->v_object == NULL, vp, 4115 ("vop_reclaim left v_object vp=%p", vp)); 4116 /* 4117 * Clear the advisory locks and wake up waiting threads. 4118 */ 4119 if (vp->v_lockf != NULL) { 4120 (void)VOP_ADVLOCKPURGE(vp); 4121 vp->v_lockf = NULL; 4122 } 4123 /* 4124 * Delete from old mount point vnode list. 4125 */ 4126 if (vp->v_mount == NULL) { 4127 VI_LOCK(vp); 4128 } else { 4129 delmntque(vp); 4130 ASSERT_VI_LOCKED(vp, "vgonel 2"); 4131 } 4132 /* 4133 * Done with purge, reset to the standard lock and invalidate 4134 * the vnode. 4135 */ 4136 vp->v_vnlock = &vp->v_lock; 4137 vp->v_op = &dead_vnodeops; 4138 vp->v_type = VBAD; 4139 } 4140 4141 /* 4142 * Print out a description of a vnode. 4143 */ 4144 static const char * const typename[] = 4145 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 4146 "VMARKER"}; 4147 4148 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 4149 "new hold count flag not added to vn_printf"); 4150 4151 void 4152 vn_printf(struct vnode *vp, const char *fmt, ...) 4153 { 4154 va_list ap; 4155 char buf[256], buf2[16]; 4156 u_long flags; 4157 u_int holdcnt; 4158 short irflag; 4159 4160 va_start(ap, fmt); 4161 vprintf(fmt, ap); 4162 va_end(ap); 4163 printf("%p: ", (void *)vp); 4164 printf("type %s\n", typename[vp->v_type]); 4165 holdcnt = atomic_load_int(&vp->v_holdcnt); 4166 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 4167 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 4168 vp->v_seqc_users); 4169 switch (vp->v_type) { 4170 case VDIR: 4171 printf(" mountedhere %p\n", vp->v_mountedhere); 4172 break; 4173 case VCHR: 4174 printf(" rdev %p\n", vp->v_rdev); 4175 break; 4176 case VSOCK: 4177 printf(" socket %p\n", vp->v_unpcb); 4178 break; 4179 case VFIFO: 4180 printf(" fifoinfo %p\n", vp->v_fifoinfo); 4181 break; 4182 default: 4183 printf("\n"); 4184 break; 4185 } 4186 buf[0] = '\0'; 4187 buf[1] = '\0'; 4188 if (holdcnt & VHOLD_NO_SMR) 4189 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 4190 printf(" hold count flags (%s)\n", buf + 1); 4191 4192 buf[0] = '\0'; 4193 buf[1] = '\0'; 4194 irflag = vn_irflag_read(vp); 4195 if (irflag & VIRF_DOOMED) 4196 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 4197 if (irflag & VIRF_PGREAD) 4198 strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); 4199 if (irflag & VIRF_MOUNTPOINT) 4200 strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf)); 4201 if (irflag & VIRF_TEXT_REF) 4202 strlcat(buf, "|VIRF_TEXT_REF", sizeof(buf)); 4203 flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT | VIRF_TEXT_REF); 4204 if (flags != 0) { 4205 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 4206 strlcat(buf, buf2, sizeof(buf)); 4207 } 4208 if (vp->v_vflag & VV_ROOT) 4209 strlcat(buf, "|VV_ROOT", sizeof(buf)); 4210 if (vp->v_vflag & VV_ISTTY) 4211 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4212 if (vp->v_vflag & VV_NOSYNC) 4213 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4214 if (vp->v_vflag & VV_ETERNALDEV) 4215 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4216 if (vp->v_vflag & VV_CACHEDLABEL) 4217 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4218 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4219 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4220 if (vp->v_vflag & VV_COPYONWRITE) 4221 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4222 if (vp->v_vflag & VV_SYSTEM) 4223 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4224 if (vp->v_vflag & VV_PROCDEP) 4225 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4226 if (vp->v_vflag & VV_DELETED) 4227 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4228 if (vp->v_vflag & VV_MD) 4229 strlcat(buf, "|VV_MD", sizeof(buf)); 4230 if (vp->v_vflag & VV_FORCEINSMQ) 4231 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4232 if (vp->v_vflag & VV_READLINK) 4233 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4234 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4235 VV_CACHEDLABEL | VV_VMSIZEVNLOCK | VV_COPYONWRITE | VV_SYSTEM | 4236 VV_PROCDEP | VV_DELETED | VV_MD | VV_FORCEINSMQ | VV_READLINK); 4237 if (flags != 0) { 4238 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4239 strlcat(buf, buf2, sizeof(buf)); 4240 } 4241 if (vp->v_iflag & VI_MOUNT) 4242 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4243 if (vp->v_iflag & VI_DOINGINACT) 4244 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4245 if (vp->v_iflag & VI_OWEINACT) 4246 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4247 if (vp->v_iflag & VI_DEFINACT) 4248 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4249 if (vp->v_iflag & VI_FOPENING) 4250 strlcat(buf, "|VI_FOPENING", sizeof(buf)); 4251 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOINGINACT | 4252 VI_OWEINACT | VI_DEFINACT | VI_FOPENING); 4253 if (flags != 0) { 4254 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4255 strlcat(buf, buf2, sizeof(buf)); 4256 } 4257 if (vp->v_mflag & VMP_LAZYLIST) 4258 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4259 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4260 if (flags != 0) { 4261 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4262 strlcat(buf, buf2, sizeof(buf)); 4263 } 4264 printf(" flags (%s)", buf + 1); 4265 if (mtx_owned(VI_MTX(vp))) 4266 printf(" VI_LOCKed"); 4267 printf("\n"); 4268 if (vp->v_object != NULL) 4269 printf(" v_object %p ref %d pages %d " 4270 "cleanbuf %d dirtybuf %d\n", 4271 vp->v_object, vp->v_object->ref_count, 4272 vp->v_object->resident_page_count, 4273 vp->v_bufobj.bo_clean.bv_cnt, 4274 vp->v_bufobj.bo_dirty.bv_cnt); 4275 printf(" "); 4276 lockmgr_printinfo(vp->v_vnlock); 4277 if (vp->v_data != NULL) 4278 VOP_PRINT(vp); 4279 } 4280 4281 #ifdef DDB 4282 /* 4283 * List all of the locked vnodes in the system. 4284 * Called when debugging the kernel. 4285 */ 4286 DB_SHOW_COMMAND_FLAGS(lockedvnods, lockedvnodes, DB_CMD_MEMSAFE) 4287 { 4288 struct mount *mp; 4289 struct vnode *vp; 4290 4291 /* 4292 * Note: because this is DDB, we can't obey the locking semantics 4293 * for these structures, which means we could catch an inconsistent 4294 * state and dereference a nasty pointer. Not much to be done 4295 * about that. 4296 */ 4297 db_printf("Locked vnodes\n"); 4298 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4299 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4300 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4301 vn_printf(vp, "vnode "); 4302 } 4303 } 4304 } 4305 4306 /* 4307 * Show details about the given vnode. 4308 */ 4309 DB_SHOW_COMMAND(vnode, db_show_vnode) 4310 { 4311 struct vnode *vp; 4312 4313 if (!have_addr) 4314 return; 4315 vp = (struct vnode *)addr; 4316 vn_printf(vp, "vnode "); 4317 } 4318 4319 /* 4320 * Show details about the given mount point. 4321 */ 4322 DB_SHOW_COMMAND(mount, db_show_mount) 4323 { 4324 struct mount *mp; 4325 struct vfsopt *opt; 4326 struct statfs *sp; 4327 struct vnode *vp; 4328 char buf[512]; 4329 uint64_t mflags; 4330 u_int flags; 4331 4332 if (!have_addr) { 4333 /* No address given, print short info about all mount points. */ 4334 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4335 db_printf("%p %s on %s (%s)\n", mp, 4336 mp->mnt_stat.f_mntfromname, 4337 mp->mnt_stat.f_mntonname, 4338 mp->mnt_stat.f_fstypename); 4339 if (db_pager_quit) 4340 break; 4341 } 4342 db_printf("\nMore info: show mount <addr>\n"); 4343 return; 4344 } 4345 4346 mp = (struct mount *)addr; 4347 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4348 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4349 4350 buf[0] = '\0'; 4351 mflags = mp->mnt_flag; 4352 #define MNT_FLAG(flag) do { \ 4353 if (mflags & (flag)) { \ 4354 if (buf[0] != '\0') \ 4355 strlcat(buf, ", ", sizeof(buf)); \ 4356 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4357 mflags &= ~(flag); \ 4358 } \ 4359 } while (0) 4360 MNT_FLAG(MNT_RDONLY); 4361 MNT_FLAG(MNT_SYNCHRONOUS); 4362 MNT_FLAG(MNT_NOEXEC); 4363 MNT_FLAG(MNT_NOSUID); 4364 MNT_FLAG(MNT_NFS4ACLS); 4365 MNT_FLAG(MNT_UNION); 4366 MNT_FLAG(MNT_ASYNC); 4367 MNT_FLAG(MNT_SUIDDIR); 4368 MNT_FLAG(MNT_SOFTDEP); 4369 MNT_FLAG(MNT_NOSYMFOLLOW); 4370 MNT_FLAG(MNT_GJOURNAL); 4371 MNT_FLAG(MNT_MULTILABEL); 4372 MNT_FLAG(MNT_ACLS); 4373 MNT_FLAG(MNT_NOATIME); 4374 MNT_FLAG(MNT_NOCLUSTERR); 4375 MNT_FLAG(MNT_NOCLUSTERW); 4376 MNT_FLAG(MNT_SUJ); 4377 MNT_FLAG(MNT_EXRDONLY); 4378 MNT_FLAG(MNT_EXPORTED); 4379 MNT_FLAG(MNT_DEFEXPORTED); 4380 MNT_FLAG(MNT_EXPORTANON); 4381 MNT_FLAG(MNT_EXKERB); 4382 MNT_FLAG(MNT_EXPUBLIC); 4383 MNT_FLAG(MNT_LOCAL); 4384 MNT_FLAG(MNT_QUOTA); 4385 MNT_FLAG(MNT_ROOTFS); 4386 MNT_FLAG(MNT_USER); 4387 MNT_FLAG(MNT_IGNORE); 4388 MNT_FLAG(MNT_UPDATE); 4389 MNT_FLAG(MNT_DELEXPORT); 4390 MNT_FLAG(MNT_RELOAD); 4391 MNT_FLAG(MNT_FORCE); 4392 MNT_FLAG(MNT_SNAPSHOT); 4393 MNT_FLAG(MNT_BYFSID); 4394 #undef MNT_FLAG 4395 if (mflags != 0) { 4396 if (buf[0] != '\0') 4397 strlcat(buf, ", ", sizeof(buf)); 4398 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4399 "0x%016jx", mflags); 4400 } 4401 db_printf(" mnt_flag = %s\n", buf); 4402 4403 buf[0] = '\0'; 4404 flags = mp->mnt_kern_flag; 4405 #define MNT_KERN_FLAG(flag) do { \ 4406 if (flags & (flag)) { \ 4407 if (buf[0] != '\0') \ 4408 strlcat(buf, ", ", sizeof(buf)); \ 4409 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4410 flags &= ~(flag); \ 4411 } \ 4412 } while (0) 4413 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4414 MNT_KERN_FLAG(MNTK_ASYNC); 4415 MNT_KERN_FLAG(MNTK_SOFTDEP); 4416 MNT_KERN_FLAG(MNTK_NOMSYNC); 4417 MNT_KERN_FLAG(MNTK_DRAINING); 4418 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4419 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4420 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4421 MNT_KERN_FLAG(MNTK_NO_IOPF); 4422 MNT_KERN_FLAG(MNTK_RECURSE); 4423 MNT_KERN_FLAG(MNTK_UPPER_WAITER); 4424 MNT_KERN_FLAG(MNTK_UNLOCKED_INSMNTQUE); 4425 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4426 MNT_KERN_FLAG(MNTK_VMSETSIZE_BUG); 4427 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4428 MNT_KERN_FLAG(MNTK_TASKQUEUE_WAITER); 4429 MNT_KERN_FLAG(MNTK_NOASYNC); 4430 MNT_KERN_FLAG(MNTK_UNMOUNT); 4431 MNT_KERN_FLAG(MNTK_MWAIT); 4432 MNT_KERN_FLAG(MNTK_SUSPEND); 4433 MNT_KERN_FLAG(MNTK_SUSPEND2); 4434 MNT_KERN_FLAG(MNTK_SUSPENDED); 4435 MNT_KERN_FLAG(MNTK_NULL_NOCACHE); 4436 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4437 #undef MNT_KERN_FLAG 4438 if (flags != 0) { 4439 if (buf[0] != '\0') 4440 strlcat(buf, ", ", sizeof(buf)); 4441 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4442 "0x%08x", flags); 4443 } 4444 db_printf(" mnt_kern_flag = %s\n", buf); 4445 4446 db_printf(" mnt_opt = "); 4447 opt = TAILQ_FIRST(mp->mnt_opt); 4448 if (opt != NULL) { 4449 db_printf("%s", opt->name); 4450 opt = TAILQ_NEXT(opt, link); 4451 while (opt != NULL) { 4452 db_printf(", %s", opt->name); 4453 opt = TAILQ_NEXT(opt, link); 4454 } 4455 } 4456 db_printf("\n"); 4457 4458 sp = &mp->mnt_stat; 4459 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4460 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4461 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4462 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4463 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4464 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4465 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4466 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4467 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4468 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4469 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4470 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4471 4472 db_printf(" mnt_cred = { uid=%u ruid=%u", 4473 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4474 if (jailed(mp->mnt_cred)) 4475 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4476 db_printf(" }\n"); 4477 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4478 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4479 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4480 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4481 db_printf(" mnt_lazyvnodelistsize = %d\n", 4482 mp->mnt_lazyvnodelistsize); 4483 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4484 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4485 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4486 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4487 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4488 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4489 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4490 db_printf(" mnt_secondary_accwrites = %d\n", 4491 mp->mnt_secondary_accwrites); 4492 db_printf(" mnt_gjprovider = %s\n", 4493 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4494 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4495 4496 db_printf("\n\nList of active vnodes\n"); 4497 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4498 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4499 vn_printf(vp, "vnode "); 4500 if (db_pager_quit) 4501 break; 4502 } 4503 } 4504 db_printf("\n\nList of inactive vnodes\n"); 4505 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4506 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4507 vn_printf(vp, "vnode "); 4508 if (db_pager_quit) 4509 break; 4510 } 4511 } 4512 } 4513 #endif /* DDB */ 4514 4515 /* 4516 * Fill in a struct xvfsconf based on a struct vfsconf. 4517 */ 4518 static int 4519 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4520 { 4521 struct xvfsconf xvfsp; 4522 4523 bzero(&xvfsp, sizeof(xvfsp)); 4524 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4525 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4526 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4527 xvfsp.vfc_flags = vfsp->vfc_flags; 4528 /* 4529 * These are unused in userland, we keep them 4530 * to not break binary compatibility. 4531 */ 4532 xvfsp.vfc_vfsops = NULL; 4533 xvfsp.vfc_next = NULL; 4534 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4535 } 4536 4537 #ifdef COMPAT_FREEBSD32 4538 struct xvfsconf32 { 4539 uint32_t vfc_vfsops; 4540 char vfc_name[MFSNAMELEN]; 4541 int32_t vfc_typenum; 4542 int32_t vfc_refcount; 4543 int32_t vfc_flags; 4544 uint32_t vfc_next; 4545 }; 4546 4547 static int 4548 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4549 { 4550 struct xvfsconf32 xvfsp; 4551 4552 bzero(&xvfsp, sizeof(xvfsp)); 4553 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4554 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4555 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4556 xvfsp.vfc_flags = vfsp->vfc_flags; 4557 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4558 } 4559 #endif 4560 4561 /* 4562 * Top level filesystem related information gathering. 4563 */ 4564 static int 4565 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4566 { 4567 struct vfsconf *vfsp; 4568 int error; 4569 4570 error = 0; 4571 vfsconf_slock(); 4572 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4573 #ifdef COMPAT_FREEBSD32 4574 if (req->flags & SCTL_MASK32) 4575 error = vfsconf2x32(req, vfsp); 4576 else 4577 #endif 4578 error = vfsconf2x(req, vfsp); 4579 if (error) 4580 break; 4581 } 4582 vfsconf_sunlock(); 4583 return (error); 4584 } 4585 4586 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4587 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4588 "S,xvfsconf", "List of all configured filesystems"); 4589 4590 #ifndef BURN_BRIDGES 4591 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4592 4593 static int 4594 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4595 { 4596 int *name = (int *)arg1 - 1; /* XXX */ 4597 u_int namelen = arg2 + 1; /* XXX */ 4598 struct vfsconf *vfsp; 4599 4600 log(LOG_WARNING, "userland calling deprecated sysctl, " 4601 "please rebuild world\n"); 4602 4603 #if 1 || defined(COMPAT_PRELITE2) 4604 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4605 if (namelen == 1) 4606 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4607 #endif 4608 4609 switch (name[1]) { 4610 case VFS_MAXTYPENUM: 4611 if (namelen != 2) 4612 return (ENOTDIR); 4613 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4614 case VFS_CONF: 4615 if (namelen != 3) 4616 return (ENOTDIR); /* overloaded */ 4617 vfsconf_slock(); 4618 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4619 if (vfsp->vfc_typenum == name[2]) 4620 break; 4621 } 4622 vfsconf_sunlock(); 4623 if (vfsp == NULL) 4624 return (EOPNOTSUPP); 4625 #ifdef COMPAT_FREEBSD32 4626 if (req->flags & SCTL_MASK32) 4627 return (vfsconf2x32(req, vfsp)); 4628 else 4629 #endif 4630 return (vfsconf2x(req, vfsp)); 4631 } 4632 return (EOPNOTSUPP); 4633 } 4634 4635 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4636 CTLFLAG_MPSAFE, vfs_sysctl, 4637 "Generic filesystem"); 4638 4639 #if 1 || defined(COMPAT_PRELITE2) 4640 4641 static int 4642 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4643 { 4644 int error; 4645 struct vfsconf *vfsp; 4646 struct ovfsconf ovfs; 4647 4648 vfsconf_slock(); 4649 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4650 bzero(&ovfs, sizeof(ovfs)); 4651 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4652 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4653 ovfs.vfc_index = vfsp->vfc_typenum; 4654 ovfs.vfc_refcount = vfsp->vfc_refcount; 4655 ovfs.vfc_flags = vfsp->vfc_flags; 4656 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4657 if (error != 0) { 4658 vfsconf_sunlock(); 4659 return (error); 4660 } 4661 } 4662 vfsconf_sunlock(); 4663 return (0); 4664 } 4665 4666 #endif /* 1 || COMPAT_PRELITE2 */ 4667 #endif /* !BURN_BRIDGES */ 4668 4669 #define KINFO_VNODESLOP 10 4670 #ifdef notyet 4671 /* 4672 * Dump vnode list (via sysctl). 4673 */ 4674 /* ARGSUSED */ 4675 static int 4676 sysctl_vnode(SYSCTL_HANDLER_ARGS) 4677 { 4678 struct xvnode *xvn; 4679 struct mount *mp; 4680 struct vnode *vp; 4681 int error, len, n; 4682 4683 /* 4684 * Stale numvnodes access is not fatal here. 4685 */ 4686 req->lock = 0; 4687 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 4688 if (!req->oldptr) 4689 /* Make an estimate */ 4690 return (SYSCTL_OUT(req, 0, len)); 4691 4692 error = sysctl_wire_old_buffer(req, 0); 4693 if (error != 0) 4694 return (error); 4695 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 4696 n = 0; 4697 mtx_lock(&mountlist_mtx); 4698 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4699 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 4700 continue; 4701 MNT_ILOCK(mp); 4702 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4703 if (n == len) 4704 break; 4705 vref(vp); 4706 xvn[n].xv_size = sizeof *xvn; 4707 xvn[n].xv_vnode = vp; 4708 xvn[n].xv_id = 0; /* XXX compat */ 4709 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 4710 XV_COPY(usecount); 4711 XV_COPY(writecount); 4712 XV_COPY(holdcnt); 4713 XV_COPY(mount); 4714 XV_COPY(numoutput); 4715 XV_COPY(type); 4716 #undef XV_COPY 4717 xvn[n].xv_flag = vp->v_vflag; 4718 4719 switch (vp->v_type) { 4720 case VREG: 4721 case VDIR: 4722 case VLNK: 4723 break; 4724 case VBLK: 4725 case VCHR: 4726 if (vp->v_rdev == NULL) { 4727 vrele(vp); 4728 continue; 4729 } 4730 xvn[n].xv_dev = dev2udev(vp->v_rdev); 4731 break; 4732 case VSOCK: 4733 xvn[n].xv_socket = vp->v_socket; 4734 break; 4735 case VFIFO: 4736 xvn[n].xv_fifo = vp->v_fifoinfo; 4737 break; 4738 case VNON: 4739 case VBAD: 4740 default: 4741 /* shouldn't happen? */ 4742 vrele(vp); 4743 continue; 4744 } 4745 vrele(vp); 4746 ++n; 4747 } 4748 MNT_IUNLOCK(mp); 4749 mtx_lock(&mountlist_mtx); 4750 vfs_unbusy(mp); 4751 if (n == len) 4752 break; 4753 } 4754 mtx_unlock(&mountlist_mtx); 4755 4756 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 4757 free(xvn, M_TEMP); 4758 return (error); 4759 } 4760 4761 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 4762 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 4763 ""); 4764 #endif 4765 4766 static void 4767 unmount_or_warn(struct mount *mp) 4768 { 4769 int error; 4770 4771 error = dounmount(mp, MNT_FORCE, curthread); 4772 if (error != 0) { 4773 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4774 if (error == EBUSY) 4775 printf("BUSY)\n"); 4776 else 4777 printf("%d)\n", error); 4778 } 4779 } 4780 4781 /* 4782 * Unmount all filesystems. The list is traversed in reverse order 4783 * of mounting to avoid dependencies. 4784 */ 4785 void 4786 vfs_unmountall(void) 4787 { 4788 struct mount *mp, *tmp; 4789 4790 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4791 4792 /* 4793 * Since this only runs when rebooting, it is not interlocked. 4794 */ 4795 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4796 vfs_ref(mp); 4797 4798 /* 4799 * Forcibly unmounting "/dev" before "/" would prevent clean 4800 * unmount of the latter. 4801 */ 4802 if (mp == rootdevmp) 4803 continue; 4804 4805 unmount_or_warn(mp); 4806 } 4807 4808 if (rootdevmp != NULL) 4809 unmount_or_warn(rootdevmp); 4810 } 4811 4812 static void 4813 vfs_deferred_inactive(struct vnode *vp, int lkflags) 4814 { 4815 4816 ASSERT_VI_LOCKED(vp, __func__); 4817 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, ("VI_DEFINACT still set")); 4818 if ((vp->v_iflag & VI_OWEINACT) == 0) { 4819 vdropl(vp); 4820 return; 4821 } 4822 if (vn_lock(vp, lkflags) == 0) { 4823 VI_LOCK(vp); 4824 vinactive(vp); 4825 VOP_UNLOCK(vp); 4826 vdropl(vp); 4827 return; 4828 } 4829 vdefer_inactive_unlocked(vp); 4830 } 4831 4832 static int 4833 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 4834 { 4835 4836 return (vp->v_iflag & VI_DEFINACT); 4837 } 4838 4839 static void __noinline 4840 vfs_periodic_inactive(struct mount *mp, int flags) 4841 { 4842 struct vnode *vp, *mvp; 4843 int lkflags; 4844 4845 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4846 if (flags != MNT_WAIT) 4847 lkflags |= LK_NOWAIT; 4848 4849 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 4850 if ((vp->v_iflag & VI_DEFINACT) == 0) { 4851 VI_UNLOCK(vp); 4852 continue; 4853 } 4854 vp->v_iflag &= ~VI_DEFINACT; 4855 vfs_deferred_inactive(vp, lkflags); 4856 } 4857 } 4858 4859 static inline bool 4860 vfs_want_msync(struct vnode *vp) 4861 { 4862 struct vm_object *obj; 4863 4864 /* 4865 * This test may be performed without any locks held. 4866 * We rely on vm_object's type stability. 4867 */ 4868 if (vp->v_vflag & VV_NOSYNC) 4869 return (false); 4870 obj = vp->v_object; 4871 return (obj != NULL && vm_object_mightbedirty(obj)); 4872 } 4873 4874 static int 4875 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 4876 { 4877 4878 if (vp->v_vflag & VV_NOSYNC) 4879 return (false); 4880 if (vp->v_iflag & VI_DEFINACT) 4881 return (true); 4882 return (vfs_want_msync(vp)); 4883 } 4884 4885 static void __noinline 4886 vfs_periodic_msync_inactive(struct mount *mp, int flags) 4887 { 4888 struct vnode *vp, *mvp; 4889 struct vm_object *obj; 4890 int lkflags, objflags; 4891 bool seen_defer; 4892 4893 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4894 if (flags != MNT_WAIT) { 4895 lkflags |= LK_NOWAIT; 4896 objflags = OBJPC_NOSYNC; 4897 } else { 4898 objflags = OBJPC_SYNC; 4899 } 4900 4901 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 4902 seen_defer = false; 4903 if (vp->v_iflag & VI_DEFINACT) { 4904 vp->v_iflag &= ~VI_DEFINACT; 4905 seen_defer = true; 4906 } 4907 if (!vfs_want_msync(vp)) { 4908 if (seen_defer) 4909 vfs_deferred_inactive(vp, lkflags); 4910 else 4911 VI_UNLOCK(vp); 4912 continue; 4913 } 4914 if (vget(vp, lkflags) == 0) { 4915 obj = vp->v_object; 4916 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 4917 VM_OBJECT_WLOCK(obj); 4918 vm_object_page_clean(obj, 0, 0, objflags); 4919 VM_OBJECT_WUNLOCK(obj); 4920 } 4921 vput(vp); 4922 if (seen_defer) 4923 vdrop(vp); 4924 } else { 4925 if (seen_defer) 4926 vdefer_inactive_unlocked(vp); 4927 } 4928 } 4929 } 4930 4931 void 4932 vfs_periodic(struct mount *mp, int flags) 4933 { 4934 4935 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4936 4937 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 4938 vfs_periodic_inactive(mp, flags); 4939 else 4940 vfs_periodic_msync_inactive(mp, flags); 4941 } 4942 4943 static void 4944 destroy_vpollinfo_free(struct vpollinfo *vi) 4945 { 4946 4947 knlist_destroy(&vi->vpi_selinfo.si_note); 4948 mtx_destroy(&vi->vpi_lock); 4949 free(vi, M_VNODEPOLL); 4950 } 4951 4952 static void 4953 destroy_vpollinfo(struct vpollinfo *vi) 4954 { 4955 4956 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4957 seldrain(&vi->vpi_selinfo); 4958 destroy_vpollinfo_free(vi); 4959 } 4960 4961 /* 4962 * Initialize per-vnode helper structure to hold poll-related state. 4963 */ 4964 void 4965 v_addpollinfo(struct vnode *vp) 4966 { 4967 struct vpollinfo *vi; 4968 4969 if (vp->v_pollinfo != NULL) 4970 return; 4971 vi = malloc(sizeof(*vi), M_VNODEPOLL, M_WAITOK | M_ZERO); 4972 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4973 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4974 vfs_knlunlock, vfs_knl_assert_lock); 4975 VI_LOCK(vp); 4976 if (vp->v_pollinfo != NULL) { 4977 VI_UNLOCK(vp); 4978 destroy_vpollinfo_free(vi); 4979 return; 4980 } 4981 vp->v_pollinfo = vi; 4982 VI_UNLOCK(vp); 4983 } 4984 4985 /* 4986 * Record a process's interest in events which might happen to 4987 * a vnode. Because poll uses the historic select-style interface 4988 * internally, this routine serves as both the ``check for any 4989 * pending events'' and the ``record my interest in future events'' 4990 * functions. (These are done together, while the lock is held, 4991 * to avoid race conditions.) 4992 */ 4993 int 4994 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4995 { 4996 4997 v_addpollinfo(vp); 4998 mtx_lock(&vp->v_pollinfo->vpi_lock); 4999 if (vp->v_pollinfo->vpi_revents & events) { 5000 /* 5001 * This leaves events we are not interested 5002 * in available for the other process which 5003 * which presumably had requested them 5004 * (otherwise they would never have been 5005 * recorded). 5006 */ 5007 events &= vp->v_pollinfo->vpi_revents; 5008 vp->v_pollinfo->vpi_revents &= ~events; 5009 5010 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5011 return (events); 5012 } 5013 vp->v_pollinfo->vpi_events |= events; 5014 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 5015 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5016 return (0); 5017 } 5018 5019 /* 5020 * Routine to create and manage a filesystem syncer vnode. 5021 */ 5022 #define sync_close ((int (*)(struct vop_close_args *))nullop) 5023 static int sync_fsync(struct vop_fsync_args *); 5024 static int sync_inactive(struct vop_inactive_args *); 5025 static int sync_reclaim(struct vop_reclaim_args *); 5026 5027 static struct vop_vector sync_vnodeops = { 5028 .vop_bypass = VOP_EOPNOTSUPP, 5029 .vop_close = sync_close, /* close */ 5030 .vop_fsync = sync_fsync, /* fsync */ 5031 .vop_inactive = sync_inactive, /* inactive */ 5032 .vop_need_inactive = vop_stdneed_inactive, /* need_inactive */ 5033 .vop_reclaim = sync_reclaim, /* reclaim */ 5034 .vop_lock1 = vop_stdlock, /* lock */ 5035 .vop_unlock = vop_stdunlock, /* unlock */ 5036 .vop_islocked = vop_stdislocked, /* islocked */ 5037 }; 5038 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 5039 5040 /* 5041 * Create a new filesystem syncer vnode for the specified mount point. 5042 */ 5043 void 5044 vfs_allocate_syncvnode(struct mount *mp) 5045 { 5046 struct vnode *vp; 5047 struct bufobj *bo; 5048 static long start, incr, next; 5049 int error; 5050 5051 /* Allocate a new vnode */ 5052 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 5053 if (error != 0) 5054 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 5055 vp->v_type = VNON; 5056 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5057 vp->v_vflag |= VV_FORCEINSMQ; 5058 error = insmntque1(vp, mp); 5059 if (error != 0) 5060 panic("vfs_allocate_syncvnode: insmntque() failed"); 5061 vp->v_vflag &= ~VV_FORCEINSMQ; 5062 VOP_UNLOCK(vp); 5063 /* 5064 * Place the vnode onto the syncer worklist. We attempt to 5065 * scatter them about on the list so that they will go off 5066 * at evenly distributed times even if all the filesystems 5067 * are mounted at once. 5068 */ 5069 next += incr; 5070 if (next == 0 || next > syncer_maxdelay) { 5071 start /= 2; 5072 incr /= 2; 5073 if (start == 0) { 5074 start = syncer_maxdelay / 2; 5075 incr = syncer_maxdelay; 5076 } 5077 next = start; 5078 } 5079 bo = &vp->v_bufobj; 5080 BO_LOCK(bo); 5081 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 5082 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 5083 mtx_lock(&sync_mtx); 5084 sync_vnode_count++; 5085 if (mp->mnt_syncer == NULL) { 5086 mp->mnt_syncer = vp; 5087 vp = NULL; 5088 } 5089 mtx_unlock(&sync_mtx); 5090 BO_UNLOCK(bo); 5091 if (vp != NULL) { 5092 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5093 vgone(vp); 5094 vput(vp); 5095 } 5096 } 5097 5098 void 5099 vfs_deallocate_syncvnode(struct mount *mp) 5100 { 5101 struct vnode *vp; 5102 5103 mtx_lock(&sync_mtx); 5104 vp = mp->mnt_syncer; 5105 if (vp != NULL) 5106 mp->mnt_syncer = NULL; 5107 mtx_unlock(&sync_mtx); 5108 if (vp != NULL) 5109 vrele(vp); 5110 } 5111 5112 /* 5113 * Do a lazy sync of the filesystem. 5114 */ 5115 static int 5116 sync_fsync(struct vop_fsync_args *ap) 5117 { 5118 struct vnode *syncvp = ap->a_vp; 5119 struct mount *mp = syncvp->v_mount; 5120 int error, save; 5121 struct bufobj *bo; 5122 5123 /* 5124 * We only need to do something if this is a lazy evaluation. 5125 */ 5126 if (ap->a_waitfor != MNT_LAZY) 5127 return (0); 5128 5129 /* 5130 * Move ourselves to the back of the sync list. 5131 */ 5132 bo = &syncvp->v_bufobj; 5133 BO_LOCK(bo); 5134 vn_syncer_add_to_worklist(bo, syncdelay); 5135 BO_UNLOCK(bo); 5136 5137 /* 5138 * Walk the list of vnodes pushing all that are dirty and 5139 * not already on the sync list. 5140 */ 5141 if (vfs_busy(mp, MBF_NOWAIT) != 0) 5142 return (0); 5143 VOP_UNLOCK(syncvp); 5144 save = curthread_pflags_set(TDP_SYNCIO); 5145 /* 5146 * The filesystem at hand may be idle with free vnodes stored in the 5147 * batch. Return them instead of letting them stay there indefinitely. 5148 */ 5149 vfs_periodic(mp, MNT_NOWAIT); 5150 error = VFS_SYNC(mp, MNT_LAZY); 5151 curthread_pflags_restore(save); 5152 vn_lock(syncvp, LK_EXCLUSIVE | LK_RETRY); 5153 vfs_unbusy(mp); 5154 return (error); 5155 } 5156 5157 /* 5158 * The syncer vnode is no referenced. 5159 */ 5160 static int 5161 sync_inactive(struct vop_inactive_args *ap) 5162 { 5163 5164 vgone(ap->a_vp); 5165 return (0); 5166 } 5167 5168 /* 5169 * The syncer vnode is no longer needed and is being decommissioned. 5170 * 5171 * Modifications to the worklist must be protected by sync_mtx. 5172 */ 5173 static int 5174 sync_reclaim(struct vop_reclaim_args *ap) 5175 { 5176 struct vnode *vp = ap->a_vp; 5177 struct bufobj *bo; 5178 5179 bo = &vp->v_bufobj; 5180 BO_LOCK(bo); 5181 mtx_lock(&sync_mtx); 5182 if (vp->v_mount->mnt_syncer == vp) 5183 vp->v_mount->mnt_syncer = NULL; 5184 if (bo->bo_flag & BO_ONWORKLST) { 5185 LIST_REMOVE(bo, bo_synclist); 5186 syncer_worklist_len--; 5187 sync_vnode_count--; 5188 bo->bo_flag &= ~BO_ONWORKLST; 5189 } 5190 mtx_unlock(&sync_mtx); 5191 BO_UNLOCK(bo); 5192 5193 return (0); 5194 } 5195 5196 int 5197 vn_need_pageq_flush(struct vnode *vp) 5198 { 5199 struct vm_object *obj; 5200 5201 obj = vp->v_object; 5202 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 5203 vm_object_mightbedirty(obj)); 5204 } 5205 5206 /* 5207 * Check if vnode represents a disk device 5208 */ 5209 bool 5210 vn_isdisk_error(struct vnode *vp, int *errp) 5211 { 5212 int error; 5213 5214 if (vp->v_type != VCHR) { 5215 error = ENOTBLK; 5216 goto out; 5217 } 5218 error = 0; 5219 dev_lock(); 5220 if (vp->v_rdev == NULL) 5221 error = ENXIO; 5222 else if (vp->v_rdev->si_devsw == NULL) 5223 error = ENXIO; 5224 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5225 error = ENOTBLK; 5226 dev_unlock(); 5227 out: 5228 *errp = error; 5229 return (error == 0); 5230 } 5231 5232 bool 5233 vn_isdisk(struct vnode *vp) 5234 { 5235 int error; 5236 5237 return (vn_isdisk_error(vp, &error)); 5238 } 5239 5240 /* 5241 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5242 * the comment above cache_fplookup for details. 5243 */ 5244 int 5245 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5246 { 5247 int error; 5248 5249 VFS_SMR_ASSERT_ENTERED(); 5250 5251 /* Check the owner. */ 5252 if (cred->cr_uid == file_uid) { 5253 if (file_mode & S_IXUSR) 5254 return (0); 5255 goto out_error; 5256 } 5257 5258 /* Otherwise, check the groups (first match) */ 5259 if (groupmember(file_gid, cred)) { 5260 if (file_mode & S_IXGRP) 5261 return (0); 5262 goto out_error; 5263 } 5264 5265 /* Otherwise, check everyone else. */ 5266 if (file_mode & S_IXOTH) 5267 return (0); 5268 out_error: 5269 /* 5270 * Permission check failed, but it is possible denial will get overwritten 5271 * (e.g., when root is traversing through a 700 directory owned by someone 5272 * else). 5273 * 5274 * vaccess() calls priv_check_cred which in turn can descent into MAC 5275 * modules overriding this result. It's quite unclear what semantics 5276 * are allowed for them to operate, thus for safety we don't call them 5277 * from within the SMR section. This also means if any such modules 5278 * are present, we have to let the regular lookup decide. 5279 */ 5280 error = priv_check_cred_vfs_lookup_nomac(cred); 5281 switch (error) { 5282 case 0: 5283 return (0); 5284 case EAGAIN: 5285 /* 5286 * MAC modules present. 5287 */ 5288 return (EAGAIN); 5289 case EPERM: 5290 return (EACCES); 5291 default: 5292 return (error); 5293 } 5294 } 5295 5296 /* 5297 * Common filesystem object access control check routine. Accepts a 5298 * vnode's type, "mode", uid and gid, requested access mode, and credentials. 5299 * Returns 0 on success, or an errno on failure. 5300 */ 5301 int 5302 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5303 accmode_t accmode, struct ucred *cred) 5304 { 5305 accmode_t dac_granted; 5306 accmode_t priv_granted; 5307 5308 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5309 ("invalid bit in accmode")); 5310 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5311 ("VAPPEND without VWRITE")); 5312 5313 /* 5314 * Look for a normal, non-privileged way to access the file/directory 5315 * as requested. If it exists, go with that. 5316 */ 5317 5318 dac_granted = 0; 5319 5320 /* Check the owner. */ 5321 if (cred->cr_uid == file_uid) { 5322 dac_granted |= VADMIN; 5323 if (file_mode & S_IXUSR) 5324 dac_granted |= VEXEC; 5325 if (file_mode & S_IRUSR) 5326 dac_granted |= VREAD; 5327 if (file_mode & S_IWUSR) 5328 dac_granted |= (VWRITE | VAPPEND); 5329 5330 if ((accmode & dac_granted) == accmode) 5331 return (0); 5332 5333 goto privcheck; 5334 } 5335 5336 /* Otherwise, check the groups (first match) */ 5337 if (groupmember(file_gid, cred)) { 5338 if (file_mode & S_IXGRP) 5339 dac_granted |= VEXEC; 5340 if (file_mode & S_IRGRP) 5341 dac_granted |= VREAD; 5342 if (file_mode & S_IWGRP) 5343 dac_granted |= (VWRITE | VAPPEND); 5344 5345 if ((accmode & dac_granted) == accmode) 5346 return (0); 5347 5348 goto privcheck; 5349 } 5350 5351 /* Otherwise, check everyone else. */ 5352 if (file_mode & S_IXOTH) 5353 dac_granted |= VEXEC; 5354 if (file_mode & S_IROTH) 5355 dac_granted |= VREAD; 5356 if (file_mode & S_IWOTH) 5357 dac_granted |= (VWRITE | VAPPEND); 5358 if ((accmode & dac_granted) == accmode) 5359 return (0); 5360 5361 privcheck: 5362 /* 5363 * Build a privilege mask to determine if the set of privileges 5364 * satisfies the requirements when combined with the granted mask 5365 * from above. For each privilege, if the privilege is required, 5366 * bitwise or the request type onto the priv_granted mask. 5367 */ 5368 priv_granted = 0; 5369 5370 if (type == VDIR) { 5371 /* 5372 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5373 * requests, instead of PRIV_VFS_EXEC. 5374 */ 5375 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5376 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5377 priv_granted |= VEXEC; 5378 } else { 5379 /* 5380 * Ensure that at least one execute bit is on. Otherwise, 5381 * a privileged user will always succeed, and we don't want 5382 * this to happen unless the file really is executable. 5383 */ 5384 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5385 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5386 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5387 priv_granted |= VEXEC; 5388 } 5389 5390 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5391 !priv_check_cred(cred, PRIV_VFS_READ)) 5392 priv_granted |= VREAD; 5393 5394 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5395 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5396 priv_granted |= (VWRITE | VAPPEND); 5397 5398 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5399 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5400 priv_granted |= VADMIN; 5401 5402 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5403 return (0); 5404 } 5405 5406 return ((accmode & VADMIN) ? EPERM : EACCES); 5407 } 5408 5409 /* 5410 * Credential check based on process requesting service, and per-attribute 5411 * permissions. 5412 */ 5413 int 5414 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5415 struct thread *td, accmode_t accmode) 5416 { 5417 5418 /* 5419 * Kernel-invoked always succeeds. 5420 */ 5421 if (cred == NOCRED) 5422 return (0); 5423 5424 /* 5425 * Do not allow privileged processes in jail to directly manipulate 5426 * system attributes. 5427 */ 5428 switch (attrnamespace) { 5429 case EXTATTR_NAMESPACE_SYSTEM: 5430 /* Potentially should be: return (EPERM); */ 5431 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5432 case EXTATTR_NAMESPACE_USER: 5433 return (VOP_ACCESS(vp, accmode, cred, td)); 5434 default: 5435 return (EPERM); 5436 } 5437 } 5438 5439 #ifdef DEBUG_VFS_LOCKS 5440 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5441 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5442 "Drop into debugger on lock violation"); 5443 5444 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5445 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5446 0, "Check for interlock across VOPs"); 5447 5448 int vfs_badlock_print = 1; /* Print lock violations. */ 5449 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5450 0, "Print lock violations"); 5451 5452 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5453 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5454 0, "Print vnode details on lock violations"); 5455 5456 #ifdef KDB 5457 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5458 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5459 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5460 #endif 5461 5462 static void 5463 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5464 { 5465 5466 #ifdef KDB 5467 if (vfs_badlock_backtrace) 5468 kdb_backtrace(); 5469 #endif 5470 if (vfs_badlock_vnode) 5471 vn_printf(vp, "vnode "); 5472 if (vfs_badlock_print) 5473 printf("%s: %p %s\n", str, (void *)vp, msg); 5474 if (vfs_badlock_ddb) 5475 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5476 } 5477 5478 void 5479 assert_vi_locked(struct vnode *vp, const char *str) 5480 { 5481 5482 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5483 vfs_badlock("interlock is not locked but should be", str, vp); 5484 } 5485 5486 void 5487 assert_vi_unlocked(struct vnode *vp, const char *str) 5488 { 5489 5490 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5491 vfs_badlock("interlock is locked but should not be", str, vp); 5492 } 5493 5494 void 5495 assert_vop_locked(struct vnode *vp, const char *str) 5496 { 5497 int locked; 5498 5499 if (KERNEL_PANICKED() || vp == NULL) 5500 return; 5501 5502 locked = VOP_ISLOCKED(vp); 5503 if (locked == 0 || locked == LK_EXCLOTHER) 5504 vfs_badlock("is not locked but should be", str, vp); 5505 } 5506 5507 void 5508 assert_vop_unlocked(struct vnode *vp, const char *str) 5509 { 5510 if (KERNEL_PANICKED() || vp == NULL) 5511 return; 5512 5513 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5514 vfs_badlock("is locked but should not be", str, vp); 5515 } 5516 5517 void 5518 assert_vop_elocked(struct vnode *vp, const char *str) 5519 { 5520 if (KERNEL_PANICKED() || vp == NULL) 5521 return; 5522 5523 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5524 vfs_badlock("is not exclusive locked but should be", str, vp); 5525 } 5526 #endif /* DEBUG_VFS_LOCKS */ 5527 5528 void 5529 vop_rename_fail(struct vop_rename_args *ap) 5530 { 5531 5532 if (ap->a_tvp != NULL) 5533 vput(ap->a_tvp); 5534 if (ap->a_tdvp == ap->a_tvp) 5535 vrele(ap->a_tdvp); 5536 else 5537 vput(ap->a_tdvp); 5538 vrele(ap->a_fdvp); 5539 vrele(ap->a_fvp); 5540 } 5541 5542 void 5543 vop_rename_pre(void *ap) 5544 { 5545 struct vop_rename_args *a = ap; 5546 5547 #ifdef DEBUG_VFS_LOCKS 5548 if (a->a_tvp) 5549 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5550 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5551 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5552 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5553 5554 /* Check the source (from). */ 5555 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5556 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5557 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5558 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5559 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5560 5561 /* Check the target. */ 5562 if (a->a_tvp) 5563 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5564 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5565 #endif 5566 /* 5567 * It may be tempting to add vn_seqc_write_begin/end calls here and 5568 * in vop_rename_post but that's not going to work out since some 5569 * filesystems relookup vnodes mid-rename. This is probably a bug. 5570 * 5571 * For now filesystems are expected to do the relevant calls after they 5572 * decide what vnodes to operate on. 5573 */ 5574 if (a->a_tdvp != a->a_fdvp) 5575 vhold(a->a_fdvp); 5576 if (a->a_tvp != a->a_fvp) 5577 vhold(a->a_fvp); 5578 vhold(a->a_tdvp); 5579 if (a->a_tvp) 5580 vhold(a->a_tvp); 5581 } 5582 5583 #ifdef DEBUG_VFS_LOCKS 5584 void 5585 vop_fplookup_vexec_debugpre(void *ap __unused) 5586 { 5587 5588 VFS_SMR_ASSERT_ENTERED(); 5589 } 5590 5591 void 5592 vop_fplookup_vexec_debugpost(void *ap __unused, int rc __unused) 5593 { 5594 5595 VFS_SMR_ASSERT_ENTERED(); 5596 } 5597 5598 void 5599 vop_fplookup_symlink_debugpre(void *ap __unused) 5600 { 5601 5602 VFS_SMR_ASSERT_ENTERED(); 5603 } 5604 5605 void 5606 vop_fplookup_symlink_debugpost(void *ap __unused, int rc __unused) 5607 { 5608 5609 VFS_SMR_ASSERT_ENTERED(); 5610 } 5611 5612 static void 5613 vop_fsync_debugprepost(struct vnode *vp, const char *name) 5614 { 5615 if (vp->v_type == VCHR) 5616 ; 5617 else if (MNT_EXTENDED_SHARED(vp->v_mount)) 5618 ASSERT_VOP_LOCKED(vp, name); 5619 else 5620 ASSERT_VOP_ELOCKED(vp, name); 5621 } 5622 5623 void 5624 vop_fsync_debugpre(void *a) 5625 { 5626 struct vop_fsync_args *ap; 5627 5628 ap = a; 5629 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5630 } 5631 5632 void 5633 vop_fsync_debugpost(void *a, int rc __unused) 5634 { 5635 struct vop_fsync_args *ap; 5636 5637 ap = a; 5638 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5639 } 5640 5641 void 5642 vop_fdatasync_debugpre(void *a) 5643 { 5644 struct vop_fdatasync_args *ap; 5645 5646 ap = a; 5647 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5648 } 5649 5650 void 5651 vop_fdatasync_debugpost(void *a, int rc __unused) 5652 { 5653 struct vop_fdatasync_args *ap; 5654 5655 ap = a; 5656 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5657 } 5658 5659 void 5660 vop_strategy_debugpre(void *ap) 5661 { 5662 struct vop_strategy_args *a; 5663 struct buf *bp; 5664 5665 a = ap; 5666 bp = a->a_bp; 5667 5668 /* 5669 * Cluster ops lock their component buffers but not the IO container. 5670 */ 5671 if ((bp->b_flags & B_CLUSTER) != 0) 5672 return; 5673 5674 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5675 if (vfs_badlock_print) 5676 printf( 5677 "VOP_STRATEGY: bp is not locked but should be\n"); 5678 if (vfs_badlock_ddb) 5679 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5680 } 5681 } 5682 5683 void 5684 vop_lock_debugpre(void *ap) 5685 { 5686 struct vop_lock1_args *a = ap; 5687 5688 if ((a->a_flags & LK_INTERLOCK) == 0) 5689 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5690 else 5691 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5692 } 5693 5694 void 5695 vop_lock_debugpost(void *ap, int rc) 5696 { 5697 struct vop_lock1_args *a = ap; 5698 5699 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5700 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5701 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5702 } 5703 5704 void 5705 vop_unlock_debugpre(void *ap) 5706 { 5707 struct vop_unlock_args *a = ap; 5708 5709 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 5710 } 5711 5712 void 5713 vop_need_inactive_debugpre(void *ap) 5714 { 5715 struct vop_need_inactive_args *a = ap; 5716 5717 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5718 } 5719 5720 void 5721 vop_need_inactive_debugpost(void *ap, int rc) 5722 { 5723 struct vop_need_inactive_args *a = ap; 5724 5725 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5726 } 5727 #endif 5728 5729 void 5730 vop_create_pre(void *ap) 5731 { 5732 struct vop_create_args *a; 5733 struct vnode *dvp; 5734 5735 a = ap; 5736 dvp = a->a_dvp; 5737 vn_seqc_write_begin(dvp); 5738 } 5739 5740 void 5741 vop_create_post(void *ap, int rc) 5742 { 5743 struct vop_create_args *a; 5744 struct vnode *dvp; 5745 5746 a = ap; 5747 dvp = a->a_dvp; 5748 vn_seqc_write_end(dvp); 5749 if (!rc) 5750 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5751 } 5752 5753 void 5754 vop_whiteout_pre(void *ap) 5755 { 5756 struct vop_whiteout_args *a; 5757 struct vnode *dvp; 5758 5759 a = ap; 5760 dvp = a->a_dvp; 5761 vn_seqc_write_begin(dvp); 5762 } 5763 5764 void 5765 vop_whiteout_post(void *ap, int rc) 5766 { 5767 struct vop_whiteout_args *a; 5768 struct vnode *dvp; 5769 5770 a = ap; 5771 dvp = a->a_dvp; 5772 vn_seqc_write_end(dvp); 5773 } 5774 5775 void 5776 vop_deleteextattr_pre(void *ap) 5777 { 5778 struct vop_deleteextattr_args *a; 5779 struct vnode *vp; 5780 5781 a = ap; 5782 vp = a->a_vp; 5783 vn_seqc_write_begin(vp); 5784 } 5785 5786 void 5787 vop_deleteextattr_post(void *ap, int rc) 5788 { 5789 struct vop_deleteextattr_args *a; 5790 struct vnode *vp; 5791 5792 a = ap; 5793 vp = a->a_vp; 5794 vn_seqc_write_end(vp); 5795 if (!rc) 5796 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5797 } 5798 5799 void 5800 vop_link_pre(void *ap) 5801 { 5802 struct vop_link_args *a; 5803 struct vnode *vp, *tdvp; 5804 5805 a = ap; 5806 vp = a->a_vp; 5807 tdvp = a->a_tdvp; 5808 vn_seqc_write_begin(vp); 5809 vn_seqc_write_begin(tdvp); 5810 } 5811 5812 void 5813 vop_link_post(void *ap, int rc) 5814 { 5815 struct vop_link_args *a; 5816 struct vnode *vp, *tdvp; 5817 5818 a = ap; 5819 vp = a->a_vp; 5820 tdvp = a->a_tdvp; 5821 vn_seqc_write_end(vp); 5822 vn_seqc_write_end(tdvp); 5823 if (!rc) { 5824 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 5825 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 5826 } 5827 } 5828 5829 void 5830 vop_mkdir_pre(void *ap) 5831 { 5832 struct vop_mkdir_args *a; 5833 struct vnode *dvp; 5834 5835 a = ap; 5836 dvp = a->a_dvp; 5837 vn_seqc_write_begin(dvp); 5838 } 5839 5840 void 5841 vop_mkdir_post(void *ap, int rc) 5842 { 5843 struct vop_mkdir_args *a; 5844 struct vnode *dvp; 5845 5846 a = ap; 5847 dvp = a->a_dvp; 5848 vn_seqc_write_end(dvp); 5849 if (!rc) 5850 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5851 } 5852 5853 #ifdef DEBUG_VFS_LOCKS 5854 void 5855 vop_mkdir_debugpost(void *ap, int rc) 5856 { 5857 struct vop_mkdir_args *a; 5858 5859 a = ap; 5860 if (!rc) 5861 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); 5862 } 5863 #endif 5864 5865 void 5866 vop_mknod_pre(void *ap) 5867 { 5868 struct vop_mknod_args *a; 5869 struct vnode *dvp; 5870 5871 a = ap; 5872 dvp = a->a_dvp; 5873 vn_seqc_write_begin(dvp); 5874 } 5875 5876 void 5877 vop_mknod_post(void *ap, int rc) 5878 { 5879 struct vop_mknod_args *a; 5880 struct vnode *dvp; 5881 5882 a = ap; 5883 dvp = a->a_dvp; 5884 vn_seqc_write_end(dvp); 5885 if (!rc) 5886 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5887 } 5888 5889 void 5890 vop_reclaim_post(void *ap, int rc) 5891 { 5892 struct vop_reclaim_args *a; 5893 struct vnode *vp; 5894 5895 a = ap; 5896 vp = a->a_vp; 5897 ASSERT_VOP_IN_SEQC(vp); 5898 if (!rc) 5899 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 5900 } 5901 5902 void 5903 vop_remove_pre(void *ap) 5904 { 5905 struct vop_remove_args *a; 5906 struct vnode *dvp, *vp; 5907 5908 a = ap; 5909 dvp = a->a_dvp; 5910 vp = a->a_vp; 5911 vn_seqc_write_begin(dvp); 5912 vn_seqc_write_begin(vp); 5913 } 5914 5915 void 5916 vop_remove_post(void *ap, int rc) 5917 { 5918 struct vop_remove_args *a; 5919 struct vnode *dvp, *vp; 5920 5921 a = ap; 5922 dvp = a->a_dvp; 5923 vp = a->a_vp; 5924 vn_seqc_write_end(dvp); 5925 vn_seqc_write_end(vp); 5926 if (!rc) { 5927 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5928 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5929 } 5930 } 5931 5932 void 5933 vop_rename_post(void *ap, int rc) 5934 { 5935 struct vop_rename_args *a = ap; 5936 long hint; 5937 5938 if (!rc) { 5939 hint = NOTE_WRITE; 5940 if (a->a_fdvp == a->a_tdvp) { 5941 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 5942 hint |= NOTE_LINK; 5943 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5944 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5945 } else { 5946 hint |= NOTE_EXTEND; 5947 if (a->a_fvp->v_type == VDIR) 5948 hint |= NOTE_LINK; 5949 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5950 5951 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 5952 a->a_tvp->v_type == VDIR) 5953 hint &= ~NOTE_LINK; 5954 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5955 } 5956 5957 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 5958 if (a->a_tvp) 5959 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 5960 } 5961 if (a->a_tdvp != a->a_fdvp) 5962 vdrop(a->a_fdvp); 5963 if (a->a_tvp != a->a_fvp) 5964 vdrop(a->a_fvp); 5965 vdrop(a->a_tdvp); 5966 if (a->a_tvp) 5967 vdrop(a->a_tvp); 5968 } 5969 5970 void 5971 vop_rmdir_pre(void *ap) 5972 { 5973 struct vop_rmdir_args *a; 5974 struct vnode *dvp, *vp; 5975 5976 a = ap; 5977 dvp = a->a_dvp; 5978 vp = a->a_vp; 5979 vn_seqc_write_begin(dvp); 5980 vn_seqc_write_begin(vp); 5981 } 5982 5983 void 5984 vop_rmdir_post(void *ap, int rc) 5985 { 5986 struct vop_rmdir_args *a; 5987 struct vnode *dvp, *vp; 5988 5989 a = ap; 5990 dvp = a->a_dvp; 5991 vp = a->a_vp; 5992 vn_seqc_write_end(dvp); 5993 vn_seqc_write_end(vp); 5994 if (!rc) { 5995 vp->v_vflag |= VV_UNLINKED; 5996 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5997 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5998 } 5999 } 6000 6001 void 6002 vop_setattr_pre(void *ap) 6003 { 6004 struct vop_setattr_args *a; 6005 struct vnode *vp; 6006 6007 a = ap; 6008 vp = a->a_vp; 6009 vn_seqc_write_begin(vp); 6010 } 6011 6012 void 6013 vop_setattr_post(void *ap, int rc) 6014 { 6015 struct vop_setattr_args *a; 6016 struct vnode *vp; 6017 6018 a = ap; 6019 vp = a->a_vp; 6020 vn_seqc_write_end(vp); 6021 if (!rc) 6022 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6023 } 6024 6025 void 6026 vop_setacl_pre(void *ap) 6027 { 6028 struct vop_setacl_args *a; 6029 struct vnode *vp; 6030 6031 a = ap; 6032 vp = a->a_vp; 6033 vn_seqc_write_begin(vp); 6034 } 6035 6036 void 6037 vop_setacl_post(void *ap, int rc __unused) 6038 { 6039 struct vop_setacl_args *a; 6040 struct vnode *vp; 6041 6042 a = ap; 6043 vp = a->a_vp; 6044 vn_seqc_write_end(vp); 6045 } 6046 6047 void 6048 vop_setextattr_pre(void *ap) 6049 { 6050 struct vop_setextattr_args *a; 6051 struct vnode *vp; 6052 6053 a = ap; 6054 vp = a->a_vp; 6055 vn_seqc_write_begin(vp); 6056 } 6057 6058 void 6059 vop_setextattr_post(void *ap, int rc) 6060 { 6061 struct vop_setextattr_args *a; 6062 struct vnode *vp; 6063 6064 a = ap; 6065 vp = a->a_vp; 6066 vn_seqc_write_end(vp); 6067 if (!rc) 6068 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6069 } 6070 6071 void 6072 vop_symlink_pre(void *ap) 6073 { 6074 struct vop_symlink_args *a; 6075 struct vnode *dvp; 6076 6077 a = ap; 6078 dvp = a->a_dvp; 6079 vn_seqc_write_begin(dvp); 6080 } 6081 6082 void 6083 vop_symlink_post(void *ap, int rc) 6084 { 6085 struct vop_symlink_args *a; 6086 struct vnode *dvp; 6087 6088 a = ap; 6089 dvp = a->a_dvp; 6090 vn_seqc_write_end(dvp); 6091 if (!rc) 6092 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6093 } 6094 6095 void 6096 vop_open_post(void *ap, int rc) 6097 { 6098 struct vop_open_args *a = ap; 6099 6100 if (!rc) 6101 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 6102 } 6103 6104 void 6105 vop_close_post(void *ap, int rc) 6106 { 6107 struct vop_close_args *a = ap; 6108 6109 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 6110 !VN_IS_DOOMED(a->a_vp))) { 6111 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 6112 NOTE_CLOSE_WRITE : NOTE_CLOSE); 6113 } 6114 } 6115 6116 void 6117 vop_read_post(void *ap, int rc) 6118 { 6119 struct vop_read_args *a = ap; 6120 6121 if (!rc) 6122 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6123 } 6124 6125 void 6126 vop_read_pgcache_post(void *ap, int rc) 6127 { 6128 struct vop_read_pgcache_args *a = ap; 6129 6130 if (!rc) 6131 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); 6132 } 6133 6134 void 6135 vop_readdir_post(void *ap, int rc) 6136 { 6137 struct vop_readdir_args *a = ap; 6138 6139 if (!rc) 6140 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6141 } 6142 6143 static struct knlist fs_knlist; 6144 6145 static void 6146 vfs_event_init(void *arg) 6147 { 6148 knlist_init_mtx(&fs_knlist, NULL); 6149 } 6150 /* XXX - correct order? */ 6151 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 6152 6153 void 6154 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 6155 { 6156 6157 KNOTE_UNLOCKED(&fs_knlist, event); 6158 } 6159 6160 static int filt_fsattach(struct knote *kn); 6161 static void filt_fsdetach(struct knote *kn); 6162 static int filt_fsevent(struct knote *kn, long hint); 6163 6164 struct filterops fs_filtops = { 6165 .f_isfd = 0, 6166 .f_attach = filt_fsattach, 6167 .f_detach = filt_fsdetach, 6168 .f_event = filt_fsevent 6169 }; 6170 6171 static int 6172 filt_fsattach(struct knote *kn) 6173 { 6174 6175 kn->kn_flags |= EV_CLEAR; 6176 knlist_add(&fs_knlist, kn, 0); 6177 return (0); 6178 } 6179 6180 static void 6181 filt_fsdetach(struct knote *kn) 6182 { 6183 6184 knlist_remove(&fs_knlist, kn, 0); 6185 } 6186 6187 static int 6188 filt_fsevent(struct knote *kn, long hint) 6189 { 6190 6191 kn->kn_fflags |= kn->kn_sfflags & hint; 6192 6193 return (kn->kn_fflags != 0); 6194 } 6195 6196 static int 6197 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 6198 { 6199 struct vfsidctl vc; 6200 int error; 6201 struct mount *mp; 6202 6203 error = SYSCTL_IN(req, &vc, sizeof(vc)); 6204 if (error) 6205 return (error); 6206 if (vc.vc_vers != VFS_CTL_VERS1) 6207 return (EINVAL); 6208 mp = vfs_getvfs(&vc.vc_fsid); 6209 if (mp == NULL) 6210 return (ENOENT); 6211 /* ensure that a specific sysctl goes to the right filesystem. */ 6212 if (strcmp(vc.vc_fstypename, "*") != 0 && 6213 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 6214 vfs_rel(mp); 6215 return (EINVAL); 6216 } 6217 VCTLTOREQ(&vc, req); 6218 error = VFS_SYSCTL(mp, vc.vc_op, req); 6219 vfs_rel(mp); 6220 return (error); 6221 } 6222 6223 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 6224 NULL, 0, sysctl_vfs_ctl, "", 6225 "Sysctl by fsid"); 6226 6227 /* 6228 * Function to initialize a va_filerev field sensibly. 6229 * XXX: Wouldn't a random number make a lot more sense ?? 6230 */ 6231 u_quad_t 6232 init_va_filerev(void) 6233 { 6234 struct bintime bt; 6235 6236 getbinuptime(&bt); 6237 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 6238 } 6239 6240 static int filt_vfsread(struct knote *kn, long hint); 6241 static int filt_vfswrite(struct knote *kn, long hint); 6242 static int filt_vfsvnode(struct knote *kn, long hint); 6243 static void filt_vfsdetach(struct knote *kn); 6244 static struct filterops vfsread_filtops = { 6245 .f_isfd = 1, 6246 .f_detach = filt_vfsdetach, 6247 .f_event = filt_vfsread 6248 }; 6249 static struct filterops vfswrite_filtops = { 6250 .f_isfd = 1, 6251 .f_detach = filt_vfsdetach, 6252 .f_event = filt_vfswrite 6253 }; 6254 static struct filterops vfsvnode_filtops = { 6255 .f_isfd = 1, 6256 .f_detach = filt_vfsdetach, 6257 .f_event = filt_vfsvnode 6258 }; 6259 6260 static void 6261 vfs_knllock(void *arg) 6262 { 6263 struct vnode *vp = arg; 6264 6265 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 6266 } 6267 6268 static void 6269 vfs_knlunlock(void *arg) 6270 { 6271 struct vnode *vp = arg; 6272 6273 VOP_UNLOCK(vp); 6274 } 6275 6276 static void 6277 vfs_knl_assert_lock(void *arg, int what) 6278 { 6279 #ifdef DEBUG_VFS_LOCKS 6280 struct vnode *vp = arg; 6281 6282 if (what == LA_LOCKED) 6283 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6284 else 6285 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6286 #endif 6287 } 6288 6289 int 6290 vfs_kqfilter(struct vop_kqfilter_args *ap) 6291 { 6292 struct vnode *vp = ap->a_vp; 6293 struct knote *kn = ap->a_kn; 6294 struct knlist *knl; 6295 6296 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ && 6297 kn->kn_filter != EVFILT_WRITE), 6298 ("READ/WRITE filter on a FIFO leaked through")); 6299 switch (kn->kn_filter) { 6300 case EVFILT_READ: 6301 kn->kn_fop = &vfsread_filtops; 6302 break; 6303 case EVFILT_WRITE: 6304 kn->kn_fop = &vfswrite_filtops; 6305 break; 6306 case EVFILT_VNODE: 6307 kn->kn_fop = &vfsvnode_filtops; 6308 break; 6309 default: 6310 return (EINVAL); 6311 } 6312 6313 kn->kn_hook = (caddr_t)vp; 6314 6315 v_addpollinfo(vp); 6316 if (vp->v_pollinfo == NULL) 6317 return (ENOMEM); 6318 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6319 vhold(vp); 6320 knlist_add(knl, kn, 0); 6321 6322 return (0); 6323 } 6324 6325 /* 6326 * Detach knote from vnode 6327 */ 6328 static void 6329 filt_vfsdetach(struct knote *kn) 6330 { 6331 struct vnode *vp = (struct vnode *)kn->kn_hook; 6332 6333 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6334 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6335 vdrop(vp); 6336 } 6337 6338 /*ARGSUSED*/ 6339 static int 6340 filt_vfsread(struct knote *kn, long hint) 6341 { 6342 struct vnode *vp = (struct vnode *)kn->kn_hook; 6343 struct vattr va; 6344 int res; 6345 6346 /* 6347 * filesystem is gone, so set the EOF flag and schedule 6348 * the knote for deletion. 6349 */ 6350 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6351 VI_LOCK(vp); 6352 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6353 VI_UNLOCK(vp); 6354 return (1); 6355 } 6356 6357 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 6358 return (0); 6359 6360 VI_LOCK(vp); 6361 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 6362 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6363 VI_UNLOCK(vp); 6364 return (res); 6365 } 6366 6367 /*ARGSUSED*/ 6368 static int 6369 filt_vfswrite(struct knote *kn, long hint) 6370 { 6371 struct vnode *vp = (struct vnode *)kn->kn_hook; 6372 6373 VI_LOCK(vp); 6374 6375 /* 6376 * filesystem is gone, so set the EOF flag and schedule 6377 * the knote for deletion. 6378 */ 6379 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6380 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6381 6382 kn->kn_data = 0; 6383 VI_UNLOCK(vp); 6384 return (1); 6385 } 6386 6387 static int 6388 filt_vfsvnode(struct knote *kn, long hint) 6389 { 6390 struct vnode *vp = (struct vnode *)kn->kn_hook; 6391 int res; 6392 6393 VI_LOCK(vp); 6394 if (kn->kn_sfflags & hint) 6395 kn->kn_fflags |= hint; 6396 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6397 kn->kn_flags |= EV_EOF; 6398 VI_UNLOCK(vp); 6399 return (1); 6400 } 6401 res = (kn->kn_fflags != 0); 6402 VI_UNLOCK(vp); 6403 return (res); 6404 } 6405 6406 /* 6407 * Returns whether the directory is empty or not. 6408 * If it is empty, the return value is 0; otherwise 6409 * the return value is an error value (which may 6410 * be ENOTEMPTY). 6411 */ 6412 int 6413 vfs_emptydir(struct vnode *vp) 6414 { 6415 struct uio uio; 6416 struct iovec iov; 6417 struct dirent *dirent, *dp, *endp; 6418 int error, eof; 6419 6420 error = 0; 6421 eof = 0; 6422 6423 ASSERT_VOP_LOCKED(vp, "vfs_emptydir"); 6424 VNASSERT(vp->v_type == VDIR, vp, ("vp is not a directory")); 6425 6426 dirent = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK); 6427 iov.iov_base = dirent; 6428 iov.iov_len = sizeof(struct dirent); 6429 6430 uio.uio_iov = &iov; 6431 uio.uio_iovcnt = 1; 6432 uio.uio_offset = 0; 6433 uio.uio_resid = sizeof(struct dirent); 6434 uio.uio_segflg = UIO_SYSSPACE; 6435 uio.uio_rw = UIO_READ; 6436 uio.uio_td = curthread; 6437 6438 while (eof == 0 && error == 0) { 6439 error = VOP_READDIR(vp, &uio, curthread->td_ucred, &eof, 6440 NULL, NULL); 6441 if (error != 0) 6442 break; 6443 endp = (void *)((uint8_t *)dirent + 6444 sizeof(struct dirent) - uio.uio_resid); 6445 for (dp = dirent; dp < endp; 6446 dp = (void *)((uint8_t *)dp + GENERIC_DIRSIZ(dp))) { 6447 if (dp->d_type == DT_WHT) 6448 continue; 6449 if (dp->d_namlen == 0) 6450 continue; 6451 if (dp->d_type != DT_DIR && 6452 dp->d_type != DT_UNKNOWN) { 6453 error = ENOTEMPTY; 6454 break; 6455 } 6456 if (dp->d_namlen > 2) { 6457 error = ENOTEMPTY; 6458 break; 6459 } 6460 if (dp->d_namlen == 1 && 6461 dp->d_name[0] != '.') { 6462 error = ENOTEMPTY; 6463 break; 6464 } 6465 if (dp->d_namlen == 2 && 6466 dp->d_name[1] != '.') { 6467 error = ENOTEMPTY; 6468 break; 6469 } 6470 uio.uio_resid = sizeof(struct dirent); 6471 } 6472 } 6473 free(dirent, M_TEMP); 6474 return (error); 6475 } 6476 6477 int 6478 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6479 { 6480 int error; 6481 6482 if (dp->d_reclen > ap->a_uio->uio_resid) 6483 return (ENAMETOOLONG); 6484 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6485 if (error) { 6486 if (ap->a_ncookies != NULL) { 6487 if (ap->a_cookies != NULL) 6488 free(ap->a_cookies, M_TEMP); 6489 ap->a_cookies = NULL; 6490 *ap->a_ncookies = 0; 6491 } 6492 return (error); 6493 } 6494 if (ap->a_ncookies == NULL) 6495 return (0); 6496 6497 KASSERT(ap->a_cookies, 6498 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6499 6500 *ap->a_cookies = realloc(*ap->a_cookies, 6501 (*ap->a_ncookies + 1) * sizeof(uint64_t), M_TEMP, M_WAITOK | M_ZERO); 6502 (*ap->a_cookies)[*ap->a_ncookies] = off; 6503 *ap->a_ncookies += 1; 6504 return (0); 6505 } 6506 6507 /* 6508 * The purpose of this routine is to remove granularity from accmode_t, 6509 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6510 * VADMIN and VAPPEND. 6511 * 6512 * If it returns 0, the caller is supposed to continue with the usual 6513 * access checks using 'accmode' as modified by this routine. If it 6514 * returns nonzero value, the caller is supposed to return that value 6515 * as errno. 6516 * 6517 * Note that after this routine runs, accmode may be zero. 6518 */ 6519 int 6520 vfs_unixify_accmode(accmode_t *accmode) 6521 { 6522 /* 6523 * There is no way to specify explicit "deny" rule using 6524 * file mode or POSIX.1e ACLs. 6525 */ 6526 if (*accmode & VEXPLICIT_DENY) { 6527 *accmode = 0; 6528 return (0); 6529 } 6530 6531 /* 6532 * None of these can be translated into usual access bits. 6533 * Also, the common case for NFSv4 ACLs is to not contain 6534 * either of these bits. Caller should check for VWRITE 6535 * on the containing directory instead. 6536 */ 6537 if (*accmode & (VDELETE_CHILD | VDELETE)) 6538 return (EPERM); 6539 6540 if (*accmode & VADMIN_PERMS) { 6541 *accmode &= ~VADMIN_PERMS; 6542 *accmode |= VADMIN; 6543 } 6544 6545 /* 6546 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6547 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6548 */ 6549 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6550 6551 return (0); 6552 } 6553 6554 /* 6555 * Clear out a doomed vnode (if any) and replace it with a new one as long 6556 * as the fs is not being unmounted. Return the root vnode to the caller. 6557 */ 6558 static int __noinline 6559 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6560 { 6561 struct vnode *vp; 6562 int error; 6563 6564 restart: 6565 if (mp->mnt_rootvnode != NULL) { 6566 MNT_ILOCK(mp); 6567 vp = mp->mnt_rootvnode; 6568 if (vp != NULL) { 6569 if (!VN_IS_DOOMED(vp)) { 6570 vrefact(vp); 6571 MNT_IUNLOCK(mp); 6572 error = vn_lock(vp, flags); 6573 if (error == 0) { 6574 *vpp = vp; 6575 return (0); 6576 } 6577 vrele(vp); 6578 goto restart; 6579 } 6580 /* 6581 * Clear the old one. 6582 */ 6583 mp->mnt_rootvnode = NULL; 6584 } 6585 MNT_IUNLOCK(mp); 6586 if (vp != NULL) { 6587 vfs_op_barrier_wait(mp); 6588 vrele(vp); 6589 } 6590 } 6591 error = VFS_CACHEDROOT(mp, flags, vpp); 6592 if (error != 0) 6593 return (error); 6594 if (mp->mnt_vfs_ops == 0) { 6595 MNT_ILOCK(mp); 6596 if (mp->mnt_vfs_ops != 0) { 6597 MNT_IUNLOCK(mp); 6598 return (0); 6599 } 6600 if (mp->mnt_rootvnode == NULL) { 6601 vrefact(*vpp); 6602 mp->mnt_rootvnode = *vpp; 6603 } else { 6604 if (mp->mnt_rootvnode != *vpp) { 6605 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6606 panic("%s: mismatch between vnode returned " 6607 " by VFS_CACHEDROOT and the one cached " 6608 " (%p != %p)", 6609 __func__, *vpp, mp->mnt_rootvnode); 6610 } 6611 } 6612 } 6613 MNT_IUNLOCK(mp); 6614 } 6615 return (0); 6616 } 6617 6618 int 6619 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6620 { 6621 struct mount_pcpu *mpcpu; 6622 struct vnode *vp; 6623 int error; 6624 6625 if (!vfs_op_thread_enter(mp, mpcpu)) 6626 return (vfs_cache_root_fallback(mp, flags, vpp)); 6627 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6628 if (vp == NULL || VN_IS_DOOMED(vp)) { 6629 vfs_op_thread_exit(mp, mpcpu); 6630 return (vfs_cache_root_fallback(mp, flags, vpp)); 6631 } 6632 vrefact(vp); 6633 vfs_op_thread_exit(mp, mpcpu); 6634 error = vn_lock(vp, flags); 6635 if (error != 0) { 6636 vrele(vp); 6637 return (vfs_cache_root_fallback(mp, flags, vpp)); 6638 } 6639 *vpp = vp; 6640 return (0); 6641 } 6642 6643 struct vnode * 6644 vfs_cache_root_clear(struct mount *mp) 6645 { 6646 struct vnode *vp; 6647 6648 /* 6649 * ops > 0 guarantees there is nobody who can see this vnode 6650 */ 6651 MPASS(mp->mnt_vfs_ops > 0); 6652 vp = mp->mnt_rootvnode; 6653 if (vp != NULL) 6654 vn_seqc_write_begin(vp); 6655 mp->mnt_rootvnode = NULL; 6656 return (vp); 6657 } 6658 6659 void 6660 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6661 { 6662 6663 MPASS(mp->mnt_vfs_ops > 0); 6664 vrefact(vp); 6665 mp->mnt_rootvnode = vp; 6666 } 6667 6668 /* 6669 * These are helper functions for filesystems to traverse all 6670 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6671 * 6672 * This interface replaces MNT_VNODE_FOREACH. 6673 */ 6674 6675 struct vnode * 6676 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6677 { 6678 struct vnode *vp; 6679 6680 if (should_yield()) 6681 kern_yield(PRI_USER); 6682 MNT_ILOCK(mp); 6683 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6684 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6685 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6686 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6687 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6688 continue; 6689 VI_LOCK(vp); 6690 if (VN_IS_DOOMED(vp)) { 6691 VI_UNLOCK(vp); 6692 continue; 6693 } 6694 break; 6695 } 6696 if (vp == NULL) { 6697 __mnt_vnode_markerfree_all(mvp, mp); 6698 /* MNT_IUNLOCK(mp); -- done in above function */ 6699 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6700 return (NULL); 6701 } 6702 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6703 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6704 MNT_IUNLOCK(mp); 6705 return (vp); 6706 } 6707 6708 struct vnode * 6709 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6710 { 6711 struct vnode *vp; 6712 6713 *mvp = vn_alloc_marker(mp); 6714 MNT_ILOCK(mp); 6715 MNT_REF(mp); 6716 6717 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6718 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6719 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6720 continue; 6721 VI_LOCK(vp); 6722 if (VN_IS_DOOMED(vp)) { 6723 VI_UNLOCK(vp); 6724 continue; 6725 } 6726 break; 6727 } 6728 if (vp == NULL) { 6729 MNT_REL(mp); 6730 MNT_IUNLOCK(mp); 6731 vn_free_marker(*mvp); 6732 *mvp = NULL; 6733 return (NULL); 6734 } 6735 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6736 MNT_IUNLOCK(mp); 6737 return (vp); 6738 } 6739 6740 void 6741 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6742 { 6743 6744 if (*mvp == NULL) { 6745 MNT_IUNLOCK(mp); 6746 return; 6747 } 6748 6749 mtx_assert(MNT_MTX(mp), MA_OWNED); 6750 6751 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6752 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6753 MNT_REL(mp); 6754 MNT_IUNLOCK(mp); 6755 vn_free_marker(*mvp); 6756 *mvp = NULL; 6757 } 6758 6759 /* 6760 * These are helper functions for filesystems to traverse their 6761 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6762 */ 6763 static void 6764 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6765 { 6766 6767 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6768 6769 MNT_ILOCK(mp); 6770 MNT_REL(mp); 6771 MNT_IUNLOCK(mp); 6772 vn_free_marker(*mvp); 6773 *mvp = NULL; 6774 } 6775 6776 /* 6777 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6778 * conventional lock order during mnt_vnode_next_lazy iteration. 6779 * 6780 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6781 * The list lock is dropped and reacquired. On success, both locks are held. 6782 * On failure, the mount vnode list lock is held but the vnode interlock is 6783 * not, and the procedure may have yielded. 6784 */ 6785 static bool 6786 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6787 struct vnode *vp) 6788 { 6789 6790 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6791 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6792 ("%s: bad marker", __func__)); 6793 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6794 ("%s: inappropriate vnode", __func__)); 6795 ASSERT_VI_UNLOCKED(vp, __func__); 6796 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6797 6798 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6799 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6800 6801 /* 6802 * Note we may be racing against vdrop which transitioned the hold 6803 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 6804 * if we are the only user after we get the interlock we will just 6805 * vdrop. 6806 */ 6807 vhold(vp); 6808 mtx_unlock(&mp->mnt_listmtx); 6809 VI_LOCK(vp); 6810 if (VN_IS_DOOMED(vp)) { 6811 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 6812 goto out_lost; 6813 } 6814 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 6815 /* 6816 * There is nothing to do if we are the last user. 6817 */ 6818 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 6819 goto out_lost; 6820 mtx_lock(&mp->mnt_listmtx); 6821 return (true); 6822 out_lost: 6823 vdropl(vp); 6824 maybe_yield(); 6825 mtx_lock(&mp->mnt_listmtx); 6826 return (false); 6827 } 6828 6829 static struct vnode * 6830 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6831 void *cbarg) 6832 { 6833 struct vnode *vp; 6834 6835 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6836 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6837 restart: 6838 vp = TAILQ_NEXT(*mvp, v_lazylist); 6839 while (vp != NULL) { 6840 if (vp->v_type == VMARKER) { 6841 vp = TAILQ_NEXT(vp, v_lazylist); 6842 continue; 6843 } 6844 /* 6845 * See if we want to process the vnode. Note we may encounter a 6846 * long string of vnodes we don't care about and hog the list 6847 * as a result. Check for it and requeue the marker. 6848 */ 6849 VNPASS(!VN_IS_DOOMED(vp), vp); 6850 if (!cb(vp, cbarg)) { 6851 if (!should_yield()) { 6852 vp = TAILQ_NEXT(vp, v_lazylist); 6853 continue; 6854 } 6855 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 6856 v_lazylist); 6857 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 6858 v_lazylist); 6859 mtx_unlock(&mp->mnt_listmtx); 6860 kern_yield(PRI_USER); 6861 mtx_lock(&mp->mnt_listmtx); 6862 goto restart; 6863 } 6864 /* 6865 * Try-lock because this is the wrong lock order. 6866 */ 6867 if (!VI_TRYLOCK(vp) && 6868 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 6869 goto restart; 6870 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6871 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6872 ("alien vnode on the lazy list %p %p", vp, mp)); 6873 VNPASS(vp->v_mount == mp, vp); 6874 VNPASS(!VN_IS_DOOMED(vp), vp); 6875 break; 6876 } 6877 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6878 6879 /* Check if we are done */ 6880 if (vp == NULL) { 6881 mtx_unlock(&mp->mnt_listmtx); 6882 mnt_vnode_markerfree_lazy(mvp, mp); 6883 return (NULL); 6884 } 6885 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 6886 mtx_unlock(&mp->mnt_listmtx); 6887 ASSERT_VI_LOCKED(vp, "lazy iter"); 6888 return (vp); 6889 } 6890 6891 struct vnode * 6892 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6893 void *cbarg) 6894 { 6895 6896 if (should_yield()) 6897 kern_yield(PRI_USER); 6898 mtx_lock(&mp->mnt_listmtx); 6899 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6900 } 6901 6902 struct vnode * 6903 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6904 void *cbarg) 6905 { 6906 struct vnode *vp; 6907 6908 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 6909 return (NULL); 6910 6911 *mvp = vn_alloc_marker(mp); 6912 MNT_ILOCK(mp); 6913 MNT_REF(mp); 6914 MNT_IUNLOCK(mp); 6915 6916 mtx_lock(&mp->mnt_listmtx); 6917 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 6918 if (vp == NULL) { 6919 mtx_unlock(&mp->mnt_listmtx); 6920 mnt_vnode_markerfree_lazy(mvp, mp); 6921 return (NULL); 6922 } 6923 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 6924 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6925 } 6926 6927 void 6928 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6929 { 6930 6931 if (*mvp == NULL) 6932 return; 6933 6934 mtx_lock(&mp->mnt_listmtx); 6935 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6936 mtx_unlock(&mp->mnt_listmtx); 6937 mnt_vnode_markerfree_lazy(mvp, mp); 6938 } 6939 6940 int 6941 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 6942 { 6943 6944 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 6945 cnp->cn_flags &= ~NOEXECCHECK; 6946 return (0); 6947 } 6948 6949 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread)); 6950 } 6951 6952 /* 6953 * Do not use this variant unless you have means other than the hold count 6954 * to prevent the vnode from getting freed. 6955 */ 6956 void 6957 vn_seqc_write_begin_locked(struct vnode *vp) 6958 { 6959 6960 ASSERT_VI_LOCKED(vp, __func__); 6961 VNPASS(vp->v_holdcnt > 0, vp); 6962 VNPASS(vp->v_seqc_users >= 0, vp); 6963 vp->v_seqc_users++; 6964 if (vp->v_seqc_users == 1) 6965 seqc_sleepable_write_begin(&vp->v_seqc); 6966 } 6967 6968 void 6969 vn_seqc_write_begin(struct vnode *vp) 6970 { 6971 6972 VI_LOCK(vp); 6973 vn_seqc_write_begin_locked(vp); 6974 VI_UNLOCK(vp); 6975 } 6976 6977 void 6978 vn_seqc_write_end_locked(struct vnode *vp) 6979 { 6980 6981 ASSERT_VI_LOCKED(vp, __func__); 6982 VNPASS(vp->v_seqc_users > 0, vp); 6983 vp->v_seqc_users--; 6984 if (vp->v_seqc_users == 0) 6985 seqc_sleepable_write_end(&vp->v_seqc); 6986 } 6987 6988 void 6989 vn_seqc_write_end(struct vnode *vp) 6990 { 6991 6992 VI_LOCK(vp); 6993 vn_seqc_write_end_locked(vp); 6994 VI_UNLOCK(vp); 6995 } 6996 6997 /* 6998 * Special case handling for allocating and freeing vnodes. 6999 * 7000 * The counter remains unchanged on free so that a doomed vnode will 7001 * keep testing as in modify as long as it is accessible with SMR. 7002 */ 7003 static void 7004 vn_seqc_init(struct vnode *vp) 7005 { 7006 7007 vp->v_seqc = 0; 7008 vp->v_seqc_users = 0; 7009 } 7010 7011 static void 7012 vn_seqc_write_end_free(struct vnode *vp) 7013 { 7014 7015 VNPASS(seqc_in_modify(vp->v_seqc), vp); 7016 VNPASS(vp->v_seqc_users == 1, vp); 7017 } 7018 7019 void 7020 vn_irflag_set_locked(struct vnode *vp, short toset) 7021 { 7022 short flags; 7023 7024 ASSERT_VI_LOCKED(vp, __func__); 7025 flags = vn_irflag_read(vp); 7026 VNASSERT((flags & toset) == 0, vp, 7027 ("%s: some of the passed flags already set (have %d, passed %d)\n", 7028 __func__, flags, toset)); 7029 atomic_store_short(&vp->v_irflag, flags | toset); 7030 } 7031 7032 void 7033 vn_irflag_set(struct vnode *vp, short toset) 7034 { 7035 7036 VI_LOCK(vp); 7037 vn_irflag_set_locked(vp, toset); 7038 VI_UNLOCK(vp); 7039 } 7040 7041 void 7042 vn_irflag_set_cond_locked(struct vnode *vp, short toset) 7043 { 7044 short flags; 7045 7046 ASSERT_VI_LOCKED(vp, __func__); 7047 flags = vn_irflag_read(vp); 7048 atomic_store_short(&vp->v_irflag, flags | toset); 7049 } 7050 7051 void 7052 vn_irflag_set_cond(struct vnode *vp, short toset) 7053 { 7054 7055 VI_LOCK(vp); 7056 vn_irflag_set_cond_locked(vp, toset); 7057 VI_UNLOCK(vp); 7058 } 7059 7060 void 7061 vn_irflag_unset_locked(struct vnode *vp, short tounset) 7062 { 7063 short flags; 7064 7065 ASSERT_VI_LOCKED(vp, __func__); 7066 flags = vn_irflag_read(vp); 7067 VNASSERT((flags & tounset) == tounset, vp, 7068 ("%s: some of the passed flags not set (have %d, passed %d)\n", 7069 __func__, flags, tounset)); 7070 atomic_store_short(&vp->v_irflag, flags & ~tounset); 7071 } 7072 7073 void 7074 vn_irflag_unset(struct vnode *vp, short tounset) 7075 { 7076 7077 VI_LOCK(vp); 7078 vn_irflag_unset_locked(vp, tounset); 7079 VI_UNLOCK(vp); 7080 } 7081