1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/asan.h> 52 #include <sys/bio.h> 53 #include <sys/buf.h> 54 #include <sys/capsicum.h> 55 #include <sys/condvar.h> 56 #include <sys/conf.h> 57 #include <sys/counter.h> 58 #include <sys/dirent.h> 59 #include <sys/event.h> 60 #include <sys/eventhandler.h> 61 #include <sys/extattr.h> 62 #include <sys/file.h> 63 #include <sys/fcntl.h> 64 #include <sys/jail.h> 65 #include <sys/kdb.h> 66 #include <sys/kernel.h> 67 #include <sys/kthread.h> 68 #include <sys/ktr.h> 69 #include <sys/lockf.h> 70 #include <sys/malloc.h> 71 #include <sys/mount.h> 72 #include <sys/namei.h> 73 #include <sys/pctrie.h> 74 #include <sys/priv.h> 75 #include <sys/reboot.h> 76 #include <sys/refcount.h> 77 #include <sys/rwlock.h> 78 #include <sys/sched.h> 79 #include <sys/sleepqueue.h> 80 #include <sys/smr.h> 81 #include <sys/smp.h> 82 #include <sys/stat.h> 83 #include <sys/sysctl.h> 84 #include <sys/syslog.h> 85 #include <sys/vmmeter.h> 86 #include <sys/vnode.h> 87 #include <sys/watchdog.h> 88 89 #include <machine/stdarg.h> 90 91 #include <security/mac/mac_framework.h> 92 93 #include <vm/vm.h> 94 #include <vm/vm_object.h> 95 #include <vm/vm_extern.h> 96 #include <vm/pmap.h> 97 #include <vm/vm_map.h> 98 #include <vm/vm_page.h> 99 #include <vm/vm_kern.h> 100 #include <vm/uma.h> 101 102 #ifdef DDB 103 #include <ddb/ddb.h> 104 #endif 105 106 static void delmntque(struct vnode *vp); 107 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 108 int slpflag, int slptimeo); 109 static void syncer_shutdown(void *arg, int howto); 110 static int vtryrecycle(struct vnode *vp); 111 static void v_init_counters(struct vnode *); 112 static void vn_seqc_init(struct vnode *); 113 static void vn_seqc_write_end_free(struct vnode *vp); 114 static void vgonel(struct vnode *); 115 static bool vhold_recycle_free(struct vnode *); 116 static void vdropl_recycle(struct vnode *vp); 117 static void vdrop_recycle(struct vnode *vp); 118 static void vfs_knllock(void *arg); 119 static void vfs_knlunlock(void *arg); 120 static void vfs_knl_assert_lock(void *arg, int what); 121 static void destroy_vpollinfo(struct vpollinfo *vi); 122 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 123 daddr_t startlbn, daddr_t endlbn); 124 static void vnlru_recalc(void); 125 126 /* 127 * Number of vnodes in existence. Increased whenever getnewvnode() 128 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 129 */ 130 static u_long __exclusive_cache_line numvnodes; 131 132 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 133 "Number of vnodes in existence"); 134 135 static counter_u64_t vnodes_created; 136 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 137 "Number of vnodes created by getnewvnode"); 138 139 /* 140 * Conversion tables for conversion from vnode types to inode formats 141 * and back. 142 */ 143 enum vtype iftovt_tab[16] = { 144 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 145 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 146 }; 147 int vttoif_tab[10] = { 148 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 149 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 150 }; 151 152 /* 153 * List of allocates vnodes in the system. 154 */ 155 static TAILQ_HEAD(freelst, vnode) vnode_list; 156 static struct vnode *vnode_list_free_marker; 157 static struct vnode *vnode_list_reclaim_marker; 158 159 /* 160 * "Free" vnode target. Free vnodes are rarely completely free, but are 161 * just ones that are cheap to recycle. Usually they are for files which 162 * have been stat'd but not read; these usually have inode and namecache 163 * data attached to them. This target is the preferred minimum size of a 164 * sub-cache consisting mostly of such files. The system balances the size 165 * of this sub-cache with its complement to try to prevent either from 166 * thrashing while the other is relatively inactive. The targets express 167 * a preference for the best balance. 168 * 169 * "Above" this target there are 2 further targets (watermarks) related 170 * to recyling of free vnodes. In the best-operating case, the cache is 171 * exactly full, the free list has size between vlowat and vhiwat above the 172 * free target, and recycling from it and normal use maintains this state. 173 * Sometimes the free list is below vlowat or even empty, but this state 174 * is even better for immediate use provided the cache is not full. 175 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 176 * ones) to reach one of these states. The watermarks are currently hard- 177 * coded as 4% and 9% of the available space higher. These and the default 178 * of 25% for wantfreevnodes are too large if the memory size is large. 179 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 180 * whenever vnlru_proc() becomes active. 181 */ 182 static long wantfreevnodes; 183 static long __exclusive_cache_line freevnodes; 184 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 185 &freevnodes, 0, "Number of \"free\" vnodes"); 186 static long freevnodes_old; 187 188 static counter_u64_t recycles_count; 189 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 190 "Number of vnodes recycled to meet vnode cache targets"); 191 192 static counter_u64_t recycles_free_count; 193 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 194 "Number of free vnodes recycled to meet vnode cache targets"); 195 196 static counter_u64_t deferred_inact; 197 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, &deferred_inact, 198 "Number of times inactive processing was deferred"); 199 200 /* To keep more than one thread at a time from running vfs_getnewfsid */ 201 static struct mtx mntid_mtx; 202 203 /* 204 * Lock for any access to the following: 205 * vnode_list 206 * numvnodes 207 * freevnodes 208 */ 209 static struct mtx __exclusive_cache_line vnode_list_mtx; 210 211 /* Publicly exported FS */ 212 struct nfs_public nfs_pub; 213 214 static uma_zone_t buf_trie_zone; 215 static smr_t buf_trie_smr; 216 217 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 218 static uma_zone_t vnode_zone; 219 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll"); 220 221 __read_frequently smr_t vfs_smr; 222 223 /* 224 * The workitem queue. 225 * 226 * It is useful to delay writes of file data and filesystem metadata 227 * for tens of seconds so that quickly created and deleted files need 228 * not waste disk bandwidth being created and removed. To realize this, 229 * we append vnodes to a "workitem" queue. When running with a soft 230 * updates implementation, most pending metadata dependencies should 231 * not wait for more than a few seconds. Thus, mounted on block devices 232 * are delayed only about a half the time that file data is delayed. 233 * Similarly, directory updates are more critical, so are only delayed 234 * about a third the time that file data is delayed. Thus, there are 235 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 236 * one each second (driven off the filesystem syncer process). The 237 * syncer_delayno variable indicates the next queue that is to be processed. 238 * Items that need to be processed soon are placed in this queue: 239 * 240 * syncer_workitem_pending[syncer_delayno] 241 * 242 * A delay of fifteen seconds is done by placing the request fifteen 243 * entries later in the queue: 244 * 245 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 246 * 247 */ 248 static int syncer_delayno; 249 static long syncer_mask; 250 LIST_HEAD(synclist, bufobj); 251 static struct synclist *syncer_workitem_pending; 252 /* 253 * The sync_mtx protects: 254 * bo->bo_synclist 255 * sync_vnode_count 256 * syncer_delayno 257 * syncer_state 258 * syncer_workitem_pending 259 * syncer_worklist_len 260 * rushjob 261 */ 262 static struct mtx sync_mtx; 263 static struct cv sync_wakeup; 264 265 #define SYNCER_MAXDELAY 32 266 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 267 static int syncdelay = 30; /* max time to delay syncing data */ 268 static int filedelay = 30; /* time to delay syncing files */ 269 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 270 "Time to delay syncing files (in seconds)"); 271 static int dirdelay = 29; /* time to delay syncing directories */ 272 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 273 "Time to delay syncing directories (in seconds)"); 274 static int metadelay = 28; /* time to delay syncing metadata */ 275 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 276 "Time to delay syncing metadata (in seconds)"); 277 static int rushjob; /* number of slots to run ASAP */ 278 static int stat_rush_requests; /* number of times I/O speeded up */ 279 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 280 "Number of times I/O speeded up (rush requests)"); 281 282 #define VDBATCH_SIZE 8 283 struct vdbatch { 284 u_int index; 285 long freevnodes; 286 struct mtx lock; 287 struct vnode *tab[VDBATCH_SIZE]; 288 }; 289 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 290 291 static void vdbatch_dequeue(struct vnode *vp); 292 293 /* 294 * When shutting down the syncer, run it at four times normal speed. 295 */ 296 #define SYNCER_SHUTDOWN_SPEEDUP 4 297 static int sync_vnode_count; 298 static int syncer_worklist_len; 299 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 300 syncer_state; 301 302 /* Target for maximum number of vnodes. */ 303 u_long desiredvnodes; 304 static u_long gapvnodes; /* gap between wanted and desired */ 305 static u_long vhiwat; /* enough extras after expansion */ 306 static u_long vlowat; /* minimal extras before expansion */ 307 static u_long vstir; /* nonzero to stir non-free vnodes */ 308 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 309 310 static u_long vnlru_read_freevnodes(void); 311 312 /* 313 * Note that no attempt is made to sanitize these parameters. 314 */ 315 static int 316 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 317 { 318 u_long val; 319 int error; 320 321 val = desiredvnodes; 322 error = sysctl_handle_long(oidp, &val, 0, req); 323 if (error != 0 || req->newptr == NULL) 324 return (error); 325 326 if (val == desiredvnodes) 327 return (0); 328 mtx_lock(&vnode_list_mtx); 329 desiredvnodes = val; 330 wantfreevnodes = desiredvnodes / 4; 331 vnlru_recalc(); 332 mtx_unlock(&vnode_list_mtx); 333 /* 334 * XXX There is no protection against multiple threads changing 335 * desiredvnodes at the same time. Locking above only helps vnlru and 336 * getnewvnode. 337 */ 338 vfs_hash_changesize(desiredvnodes); 339 cache_changesize(desiredvnodes); 340 return (0); 341 } 342 343 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 344 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 345 "LU", "Target for maximum number of vnodes"); 346 347 static int 348 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 349 { 350 u_long val; 351 int error; 352 353 val = wantfreevnodes; 354 error = sysctl_handle_long(oidp, &val, 0, req); 355 if (error != 0 || req->newptr == NULL) 356 return (error); 357 358 if (val == wantfreevnodes) 359 return (0); 360 mtx_lock(&vnode_list_mtx); 361 wantfreevnodes = val; 362 vnlru_recalc(); 363 mtx_unlock(&vnode_list_mtx); 364 return (0); 365 } 366 367 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 368 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 369 "LU", "Target for minimum number of \"free\" vnodes"); 370 371 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 372 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 373 static int vnlru_nowhere; 374 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 375 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 376 377 static int 378 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 379 { 380 struct vnode *vp; 381 struct nameidata nd; 382 char *buf; 383 unsigned long ndflags; 384 int error; 385 386 if (req->newptr == NULL) 387 return (EINVAL); 388 if (req->newlen >= PATH_MAX) 389 return (E2BIG); 390 391 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 392 error = SYSCTL_IN(req, buf, req->newlen); 393 if (error != 0) 394 goto out; 395 396 buf[req->newlen] = '\0'; 397 398 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1 | SAVENAME; 399 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf); 400 if ((error = namei(&nd)) != 0) 401 goto out; 402 vp = nd.ni_vp; 403 404 if (VN_IS_DOOMED(vp)) { 405 /* 406 * This vnode is being recycled. Return != 0 to let the caller 407 * know that the sysctl had no effect. Return EAGAIN because a 408 * subsequent call will likely succeed (since namei will create 409 * a new vnode if necessary) 410 */ 411 error = EAGAIN; 412 goto putvnode; 413 } 414 415 counter_u64_add(recycles_count, 1); 416 vgone(vp); 417 putvnode: 418 NDFREE(&nd, 0); 419 out: 420 free(buf, M_TEMP); 421 return (error); 422 } 423 424 static int 425 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 426 { 427 struct thread *td = curthread; 428 struct vnode *vp; 429 struct file *fp; 430 int error; 431 int fd; 432 433 if (req->newptr == NULL) 434 return (EBADF); 435 436 error = sysctl_handle_int(oidp, &fd, 0, req); 437 if (error != 0) 438 return (error); 439 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 440 if (error != 0) 441 return (error); 442 vp = fp->f_vnode; 443 444 error = vn_lock(vp, LK_EXCLUSIVE); 445 if (error != 0) 446 goto drop; 447 448 counter_u64_add(recycles_count, 1); 449 vgone(vp); 450 VOP_UNLOCK(vp); 451 drop: 452 fdrop(fp, td); 453 return (error); 454 } 455 456 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 457 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 458 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 459 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 460 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 461 sysctl_ftry_reclaim_vnode, "I", 462 "Try to reclaim a vnode by its file descriptor"); 463 464 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 465 #define vnsz2log 8 466 #ifndef DEBUG_LOCKS 467 _Static_assert(sizeof(struct vnode) >= 1UL << vnsz2log && 468 sizeof(struct vnode) < 1UL << (vnsz2log + 1), 469 "vnsz2log needs to be updated"); 470 #endif 471 472 /* 473 * Support for the bufobj clean & dirty pctrie. 474 */ 475 static void * 476 buf_trie_alloc(struct pctrie *ptree) 477 { 478 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 479 } 480 481 static void 482 buf_trie_free(struct pctrie *ptree, void *node) 483 { 484 uma_zfree_smr(buf_trie_zone, node); 485 } 486 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 487 buf_trie_smr); 488 489 /* 490 * Initialize the vnode management data structures. 491 * 492 * Reevaluate the following cap on the number of vnodes after the physical 493 * memory size exceeds 512GB. In the limit, as the physical memory size 494 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 495 */ 496 #ifndef MAXVNODES_MAX 497 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 498 #endif 499 500 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 501 502 static struct vnode * 503 vn_alloc_marker(struct mount *mp) 504 { 505 struct vnode *vp; 506 507 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 508 vp->v_type = VMARKER; 509 vp->v_mount = mp; 510 511 return (vp); 512 } 513 514 static void 515 vn_free_marker(struct vnode *vp) 516 { 517 518 MPASS(vp->v_type == VMARKER); 519 free(vp, M_VNODE_MARKER); 520 } 521 522 #ifdef KASAN 523 static int 524 vnode_ctor(void *mem, int size, void *arg __unused, int flags __unused) 525 { 526 kasan_mark(mem, size, roundup2(size, UMA_ALIGN_PTR + 1), 0); 527 return (0); 528 } 529 530 static void 531 vnode_dtor(void *mem, int size, void *arg __unused) 532 { 533 size_t end1, end2, off1, off2; 534 535 _Static_assert(offsetof(struct vnode, v_vnodelist) < 536 offsetof(struct vnode, v_dbatchcpu), 537 "KASAN marks require updating"); 538 539 off1 = offsetof(struct vnode, v_vnodelist); 540 off2 = offsetof(struct vnode, v_dbatchcpu); 541 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist); 542 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu); 543 544 /* 545 * Access to the v_vnodelist and v_dbatchcpu fields are permitted even 546 * after the vnode has been freed. Try to get some KASAN coverage by 547 * marking everything except those two fields as invalid. Because 548 * KASAN's tracking is not byte-granular, any preceding fields sharing 549 * the same 8-byte aligned word must also be marked valid. 550 */ 551 552 /* Handle the area from the start until v_vnodelist... */ 553 off1 = rounddown2(off1, KASAN_SHADOW_SCALE); 554 kasan_mark(mem, off1, off1, KASAN_UMA_FREED); 555 556 /* ... then the area between v_vnodelist and v_dbatchcpu ... */ 557 off1 = roundup2(end1, KASAN_SHADOW_SCALE); 558 off2 = rounddown2(off2, KASAN_SHADOW_SCALE); 559 if (off2 > off1) 560 kasan_mark((void *)((char *)mem + off1), off2 - off1, 561 off2 - off1, KASAN_UMA_FREED); 562 563 /* ... and finally the area from v_dbatchcpu to the end. */ 564 off2 = roundup2(end2, KASAN_SHADOW_SCALE); 565 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2, 566 KASAN_UMA_FREED); 567 } 568 #endif /* KASAN */ 569 570 /* 571 * Initialize a vnode as it first enters the zone. 572 */ 573 static int 574 vnode_init(void *mem, int size, int flags) 575 { 576 struct vnode *vp; 577 578 vp = mem; 579 bzero(vp, size); 580 /* 581 * Setup locks. 582 */ 583 vp->v_vnlock = &vp->v_lock; 584 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 585 /* 586 * By default, don't allow shared locks unless filesystems opt-in. 587 */ 588 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 589 LK_NOSHARE | LK_IS_VNODE); 590 /* 591 * Initialize bufobj. 592 */ 593 bufobj_init(&vp->v_bufobj, vp); 594 /* 595 * Initialize namecache. 596 */ 597 cache_vnode_init(vp); 598 /* 599 * Initialize rangelocks. 600 */ 601 rangelock_init(&vp->v_rl); 602 603 vp->v_dbatchcpu = NOCPU; 604 605 /* 606 * Check vhold_recycle_free for an explanation. 607 */ 608 vp->v_holdcnt = VHOLD_NO_SMR; 609 vp->v_type = VNON; 610 mtx_lock(&vnode_list_mtx); 611 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 612 mtx_unlock(&vnode_list_mtx); 613 return (0); 614 } 615 616 /* 617 * Free a vnode when it is cleared from the zone. 618 */ 619 static void 620 vnode_fini(void *mem, int size) 621 { 622 struct vnode *vp; 623 struct bufobj *bo; 624 625 vp = mem; 626 vdbatch_dequeue(vp); 627 mtx_lock(&vnode_list_mtx); 628 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 629 mtx_unlock(&vnode_list_mtx); 630 rangelock_destroy(&vp->v_rl); 631 lockdestroy(vp->v_vnlock); 632 mtx_destroy(&vp->v_interlock); 633 bo = &vp->v_bufobj; 634 rw_destroy(BO_LOCKPTR(bo)); 635 636 kasan_mark(mem, size, size, 0); 637 } 638 639 /* 640 * Provide the size of NFS nclnode and NFS fh for calculation of the 641 * vnode memory consumption. The size is specified directly to 642 * eliminate dependency on NFS-private header. 643 * 644 * Other filesystems may use bigger or smaller (like UFS and ZFS) 645 * private inode data, but the NFS-based estimation is ample enough. 646 * Still, we care about differences in the size between 64- and 32-bit 647 * platforms. 648 * 649 * Namecache structure size is heuristically 650 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 651 */ 652 #ifdef _LP64 653 #define NFS_NCLNODE_SZ (528 + 64) 654 #define NC_SZ 148 655 #else 656 #define NFS_NCLNODE_SZ (360 + 32) 657 #define NC_SZ 92 658 #endif 659 660 static void 661 vntblinit(void *dummy __unused) 662 { 663 struct vdbatch *vd; 664 uma_ctor ctor; 665 uma_dtor dtor; 666 int cpu, physvnodes, virtvnodes; 667 668 /* 669 * Desiredvnodes is a function of the physical memory size and the 670 * kernel's heap size. Generally speaking, it scales with the 671 * physical memory size. The ratio of desiredvnodes to the physical 672 * memory size is 1:16 until desiredvnodes exceeds 98,304. 673 * Thereafter, the 674 * marginal ratio of desiredvnodes to the physical memory size is 675 * 1:64. However, desiredvnodes is limited by the kernel's heap 676 * size. The memory required by desiredvnodes vnodes and vm objects 677 * must not exceed 1/10th of the kernel's heap size. 678 */ 679 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 680 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 681 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 682 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 683 desiredvnodes = min(physvnodes, virtvnodes); 684 if (desiredvnodes > MAXVNODES_MAX) { 685 if (bootverbose) 686 printf("Reducing kern.maxvnodes %lu -> %lu\n", 687 desiredvnodes, MAXVNODES_MAX); 688 desiredvnodes = MAXVNODES_MAX; 689 } 690 wantfreevnodes = desiredvnodes / 4; 691 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 692 TAILQ_INIT(&vnode_list); 693 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 694 /* 695 * The lock is taken to appease WITNESS. 696 */ 697 mtx_lock(&vnode_list_mtx); 698 vnlru_recalc(); 699 mtx_unlock(&vnode_list_mtx); 700 vnode_list_free_marker = vn_alloc_marker(NULL); 701 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 702 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 703 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 704 705 #ifdef KASAN 706 ctor = vnode_ctor; 707 dtor = vnode_dtor; 708 #else 709 ctor = NULL; 710 dtor = NULL; 711 #endif 712 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor, 713 vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_NOKASAN); 714 uma_zone_set_smr(vnode_zone, vfs_smr); 715 716 /* 717 * Preallocate enough nodes to support one-per buf so that 718 * we can not fail an insert. reassignbuf() callers can not 719 * tolerate the insertion failure. 720 */ 721 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 722 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 723 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 724 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 725 uma_prealloc(buf_trie_zone, nbuf); 726 727 vnodes_created = counter_u64_alloc(M_WAITOK); 728 recycles_count = counter_u64_alloc(M_WAITOK); 729 recycles_free_count = counter_u64_alloc(M_WAITOK); 730 deferred_inact = counter_u64_alloc(M_WAITOK); 731 732 /* 733 * Initialize the filesystem syncer. 734 */ 735 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 736 &syncer_mask); 737 syncer_maxdelay = syncer_mask + 1; 738 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 739 cv_init(&sync_wakeup, "syncer"); 740 741 CPU_FOREACH(cpu) { 742 vd = DPCPU_ID_PTR((cpu), vd); 743 bzero(vd, sizeof(*vd)); 744 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 745 } 746 } 747 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 748 749 /* 750 * Mark a mount point as busy. Used to synchronize access and to delay 751 * unmounting. Eventually, mountlist_mtx is not released on failure. 752 * 753 * vfs_busy() is a custom lock, it can block the caller. 754 * vfs_busy() only sleeps if the unmount is active on the mount point. 755 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 756 * vnode belonging to mp. 757 * 758 * Lookup uses vfs_busy() to traverse mount points. 759 * root fs var fs 760 * / vnode lock A / vnode lock (/var) D 761 * /var vnode lock B /log vnode lock(/var/log) E 762 * vfs_busy lock C vfs_busy lock F 763 * 764 * Within each file system, the lock order is C->A->B and F->D->E. 765 * 766 * When traversing across mounts, the system follows that lock order: 767 * 768 * C->A->B 769 * | 770 * +->F->D->E 771 * 772 * The lookup() process for namei("/var") illustrates the process: 773 * VOP_LOOKUP() obtains B while A is held 774 * vfs_busy() obtains a shared lock on F while A and B are held 775 * vput() releases lock on B 776 * vput() releases lock on A 777 * VFS_ROOT() obtains lock on D while shared lock on F is held 778 * vfs_unbusy() releases shared lock on F 779 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 780 * Attempt to lock A (instead of vp_crossmp) while D is held would 781 * violate the global order, causing deadlocks. 782 * 783 * dounmount() locks B while F is drained. 784 */ 785 int 786 vfs_busy(struct mount *mp, int flags) 787 { 788 struct mount_pcpu *mpcpu; 789 790 MPASS((flags & ~MBF_MASK) == 0); 791 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 792 793 if (vfs_op_thread_enter(mp, mpcpu)) { 794 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 795 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 796 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 797 vfs_mp_count_add_pcpu(mpcpu, ref, 1); 798 vfs_mp_count_add_pcpu(mpcpu, lockref, 1); 799 vfs_op_thread_exit(mp, mpcpu); 800 if (flags & MBF_MNTLSTLOCK) 801 mtx_unlock(&mountlist_mtx); 802 return (0); 803 } 804 805 MNT_ILOCK(mp); 806 vfs_assert_mount_counters(mp); 807 MNT_REF(mp); 808 /* 809 * If mount point is currently being unmounted, sleep until the 810 * mount point fate is decided. If thread doing the unmounting fails, 811 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 812 * that this mount point has survived the unmount attempt and vfs_busy 813 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 814 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 815 * about to be really destroyed. vfs_busy needs to release its 816 * reference on the mount point in this case and return with ENOENT, 817 * telling the caller that mount mount it tried to busy is no longer 818 * valid. 819 */ 820 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 821 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), 822 ("%s: non-empty upper mount list with pending unmount", 823 __func__)); 824 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 825 MNT_REL(mp); 826 MNT_IUNLOCK(mp); 827 CTR1(KTR_VFS, "%s: failed busying before sleeping", 828 __func__); 829 return (ENOENT); 830 } 831 if (flags & MBF_MNTLSTLOCK) 832 mtx_unlock(&mountlist_mtx); 833 mp->mnt_kern_flag |= MNTK_MWAIT; 834 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 835 if (flags & MBF_MNTLSTLOCK) 836 mtx_lock(&mountlist_mtx); 837 MNT_ILOCK(mp); 838 } 839 if (flags & MBF_MNTLSTLOCK) 840 mtx_unlock(&mountlist_mtx); 841 mp->mnt_lockref++; 842 MNT_IUNLOCK(mp); 843 return (0); 844 } 845 846 /* 847 * Free a busy filesystem. 848 */ 849 void 850 vfs_unbusy(struct mount *mp) 851 { 852 struct mount_pcpu *mpcpu; 853 int c; 854 855 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 856 857 if (vfs_op_thread_enter(mp, mpcpu)) { 858 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 859 vfs_mp_count_sub_pcpu(mpcpu, lockref, 1); 860 vfs_mp_count_sub_pcpu(mpcpu, ref, 1); 861 vfs_op_thread_exit(mp, mpcpu); 862 return; 863 } 864 865 MNT_ILOCK(mp); 866 vfs_assert_mount_counters(mp); 867 MNT_REL(mp); 868 c = --mp->mnt_lockref; 869 if (mp->mnt_vfs_ops == 0) { 870 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 871 MNT_IUNLOCK(mp); 872 return; 873 } 874 if (c < 0) 875 vfs_dump_mount_counters(mp); 876 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 877 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 878 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 879 mp->mnt_kern_flag &= ~MNTK_DRAINING; 880 wakeup(&mp->mnt_lockref); 881 } 882 MNT_IUNLOCK(mp); 883 } 884 885 /* 886 * Lookup a mount point by filesystem identifier. 887 */ 888 struct mount * 889 vfs_getvfs(fsid_t *fsid) 890 { 891 struct mount *mp; 892 893 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 894 mtx_lock(&mountlist_mtx); 895 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 896 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 897 vfs_ref(mp); 898 mtx_unlock(&mountlist_mtx); 899 return (mp); 900 } 901 } 902 mtx_unlock(&mountlist_mtx); 903 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 904 return ((struct mount *) 0); 905 } 906 907 /* 908 * Lookup a mount point by filesystem identifier, busying it before 909 * returning. 910 * 911 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 912 * cache for popular filesystem identifiers. The cache is lockess, using 913 * the fact that struct mount's are never freed. In worst case we may 914 * get pointer to unmounted or even different filesystem, so we have to 915 * check what we got, and go slow way if so. 916 */ 917 struct mount * 918 vfs_busyfs(fsid_t *fsid) 919 { 920 #define FSID_CACHE_SIZE 256 921 typedef struct mount * volatile vmp_t; 922 static vmp_t cache[FSID_CACHE_SIZE]; 923 struct mount *mp; 924 int error; 925 uint32_t hash; 926 927 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 928 hash = fsid->val[0] ^ fsid->val[1]; 929 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 930 mp = cache[hash]; 931 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 932 goto slow; 933 if (vfs_busy(mp, 0) != 0) { 934 cache[hash] = NULL; 935 goto slow; 936 } 937 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 938 return (mp); 939 else 940 vfs_unbusy(mp); 941 942 slow: 943 mtx_lock(&mountlist_mtx); 944 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 945 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 946 error = vfs_busy(mp, MBF_MNTLSTLOCK); 947 if (error) { 948 cache[hash] = NULL; 949 mtx_unlock(&mountlist_mtx); 950 return (NULL); 951 } 952 cache[hash] = mp; 953 return (mp); 954 } 955 } 956 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 957 mtx_unlock(&mountlist_mtx); 958 return ((struct mount *) 0); 959 } 960 961 /* 962 * Check if a user can access privileged mount options. 963 */ 964 int 965 vfs_suser(struct mount *mp, struct thread *td) 966 { 967 int error; 968 969 if (jailed(td->td_ucred)) { 970 /* 971 * If the jail of the calling thread lacks permission for 972 * this type of file system, deny immediately. 973 */ 974 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 975 return (EPERM); 976 977 /* 978 * If the file system was mounted outside the jail of the 979 * calling thread, deny immediately. 980 */ 981 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 982 return (EPERM); 983 } 984 985 /* 986 * If file system supports delegated administration, we don't check 987 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 988 * by the file system itself. 989 * If this is not the user that did original mount, we check for 990 * the PRIV_VFS_MOUNT_OWNER privilege. 991 */ 992 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 993 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 994 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 995 return (error); 996 } 997 return (0); 998 } 999 1000 /* 1001 * Get a new unique fsid. Try to make its val[0] unique, since this value 1002 * will be used to create fake device numbers for stat(). Also try (but 1003 * not so hard) make its val[0] unique mod 2^16, since some emulators only 1004 * support 16-bit device numbers. We end up with unique val[0]'s for the 1005 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 1006 * 1007 * Keep in mind that several mounts may be running in parallel. Starting 1008 * the search one past where the previous search terminated is both a 1009 * micro-optimization and a defense against returning the same fsid to 1010 * different mounts. 1011 */ 1012 void 1013 vfs_getnewfsid(struct mount *mp) 1014 { 1015 static uint16_t mntid_base; 1016 struct mount *nmp; 1017 fsid_t tfsid; 1018 int mtype; 1019 1020 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 1021 mtx_lock(&mntid_mtx); 1022 mtype = mp->mnt_vfc->vfc_typenum; 1023 tfsid.val[1] = mtype; 1024 mtype = (mtype & 0xFF) << 24; 1025 for (;;) { 1026 tfsid.val[0] = makedev(255, 1027 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 1028 mntid_base++; 1029 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 1030 break; 1031 vfs_rel(nmp); 1032 } 1033 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 1034 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 1035 mtx_unlock(&mntid_mtx); 1036 } 1037 1038 /* 1039 * Knob to control the precision of file timestamps: 1040 * 1041 * 0 = seconds only; nanoseconds zeroed. 1042 * 1 = seconds and nanoseconds, accurate within 1/HZ. 1043 * 2 = seconds and nanoseconds, truncated to microseconds. 1044 * >=3 = seconds and nanoseconds, maximum precision. 1045 */ 1046 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 1047 1048 static int timestamp_precision = TSP_USEC; 1049 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 1050 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 1051 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 1052 "3+: sec + ns (max. precision))"); 1053 1054 /* 1055 * Get a current timestamp. 1056 */ 1057 void 1058 vfs_timestamp(struct timespec *tsp) 1059 { 1060 struct timeval tv; 1061 1062 switch (timestamp_precision) { 1063 case TSP_SEC: 1064 tsp->tv_sec = time_second; 1065 tsp->tv_nsec = 0; 1066 break; 1067 case TSP_HZ: 1068 getnanotime(tsp); 1069 break; 1070 case TSP_USEC: 1071 microtime(&tv); 1072 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1073 break; 1074 case TSP_NSEC: 1075 default: 1076 nanotime(tsp); 1077 break; 1078 } 1079 } 1080 1081 /* 1082 * Set vnode attributes to VNOVAL 1083 */ 1084 void 1085 vattr_null(struct vattr *vap) 1086 { 1087 1088 vap->va_type = VNON; 1089 vap->va_size = VNOVAL; 1090 vap->va_bytes = VNOVAL; 1091 vap->va_mode = VNOVAL; 1092 vap->va_nlink = VNOVAL; 1093 vap->va_uid = VNOVAL; 1094 vap->va_gid = VNOVAL; 1095 vap->va_fsid = VNOVAL; 1096 vap->va_fileid = VNOVAL; 1097 vap->va_blocksize = VNOVAL; 1098 vap->va_rdev = VNOVAL; 1099 vap->va_atime.tv_sec = VNOVAL; 1100 vap->va_atime.tv_nsec = VNOVAL; 1101 vap->va_mtime.tv_sec = VNOVAL; 1102 vap->va_mtime.tv_nsec = VNOVAL; 1103 vap->va_ctime.tv_sec = VNOVAL; 1104 vap->va_ctime.tv_nsec = VNOVAL; 1105 vap->va_birthtime.tv_sec = VNOVAL; 1106 vap->va_birthtime.tv_nsec = VNOVAL; 1107 vap->va_flags = VNOVAL; 1108 vap->va_gen = VNOVAL; 1109 vap->va_vaflags = 0; 1110 } 1111 1112 /* 1113 * Try to reduce the total number of vnodes. 1114 * 1115 * This routine (and its user) are buggy in at least the following ways: 1116 * - all parameters were picked years ago when RAM sizes were significantly 1117 * smaller 1118 * - it can pick vnodes based on pages used by the vm object, but filesystems 1119 * like ZFS don't use it making the pick broken 1120 * - since ZFS has its own aging policy it gets partially combated by this one 1121 * - a dedicated method should be provided for filesystems to let them decide 1122 * whether the vnode should be recycled 1123 * 1124 * This routine is called when we have too many vnodes. It attempts 1125 * to free <count> vnodes and will potentially free vnodes that still 1126 * have VM backing store (VM backing store is typically the cause 1127 * of a vnode blowout so we want to do this). Therefore, this operation 1128 * is not considered cheap. 1129 * 1130 * A number of conditions may prevent a vnode from being reclaimed. 1131 * the buffer cache may have references on the vnode, a directory 1132 * vnode may still have references due to the namei cache representing 1133 * underlying files, or the vnode may be in active use. It is not 1134 * desirable to reuse such vnodes. These conditions may cause the 1135 * number of vnodes to reach some minimum value regardless of what 1136 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1137 * 1138 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1139 * entries if this argument is strue 1140 * @param trigger Only reclaim vnodes with fewer than this many resident 1141 * pages. 1142 * @param target How many vnodes to reclaim. 1143 * @return The number of vnodes that were reclaimed. 1144 */ 1145 static int 1146 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1147 { 1148 struct vnode *vp, *mvp; 1149 struct mount *mp; 1150 struct vm_object *object; 1151 u_long done; 1152 bool retried; 1153 1154 mtx_assert(&vnode_list_mtx, MA_OWNED); 1155 1156 retried = false; 1157 done = 0; 1158 1159 mvp = vnode_list_reclaim_marker; 1160 restart: 1161 vp = mvp; 1162 while (done < target) { 1163 vp = TAILQ_NEXT(vp, v_vnodelist); 1164 if (__predict_false(vp == NULL)) 1165 break; 1166 1167 if (__predict_false(vp->v_type == VMARKER)) 1168 continue; 1169 1170 /* 1171 * If it's been deconstructed already, it's still 1172 * referenced, or it exceeds the trigger, skip it. 1173 * Also skip free vnodes. We are trying to make space 1174 * to expand the free list, not reduce it. 1175 */ 1176 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1177 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1178 goto next_iter; 1179 1180 if (vp->v_type == VBAD || vp->v_type == VNON) 1181 goto next_iter; 1182 1183 object = atomic_load_ptr(&vp->v_object); 1184 if (object == NULL || object->resident_page_count > trigger) { 1185 goto next_iter; 1186 } 1187 1188 /* 1189 * Handle races against vnode allocation. Filesystems lock the 1190 * vnode some time after it gets returned from getnewvnode, 1191 * despite type and hold count being manipulated earlier. 1192 * Resorting to checking v_mount restores guarantees present 1193 * before the global list was reworked to contain all vnodes. 1194 */ 1195 if (!VI_TRYLOCK(vp)) 1196 goto next_iter; 1197 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1198 VI_UNLOCK(vp); 1199 goto next_iter; 1200 } 1201 if (vp->v_mount == NULL) { 1202 VI_UNLOCK(vp); 1203 goto next_iter; 1204 } 1205 vholdl(vp); 1206 VI_UNLOCK(vp); 1207 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1208 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1209 mtx_unlock(&vnode_list_mtx); 1210 1211 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1212 vdrop_recycle(vp); 1213 goto next_iter_unlocked; 1214 } 1215 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1216 vdrop_recycle(vp); 1217 vn_finished_write(mp); 1218 goto next_iter_unlocked; 1219 } 1220 1221 VI_LOCK(vp); 1222 if (vp->v_usecount > 0 || 1223 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1224 (vp->v_object != NULL && vp->v_object->handle == vp && 1225 vp->v_object->resident_page_count > trigger)) { 1226 VOP_UNLOCK(vp); 1227 vdropl_recycle(vp); 1228 vn_finished_write(mp); 1229 goto next_iter_unlocked; 1230 } 1231 counter_u64_add(recycles_count, 1); 1232 vgonel(vp); 1233 VOP_UNLOCK(vp); 1234 vdropl_recycle(vp); 1235 vn_finished_write(mp); 1236 done++; 1237 next_iter_unlocked: 1238 if (should_yield()) 1239 kern_yield(PRI_USER); 1240 mtx_lock(&vnode_list_mtx); 1241 goto restart; 1242 next_iter: 1243 MPASS(vp->v_type != VMARKER); 1244 if (!should_yield()) 1245 continue; 1246 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1247 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1248 mtx_unlock(&vnode_list_mtx); 1249 kern_yield(PRI_USER); 1250 mtx_lock(&vnode_list_mtx); 1251 goto restart; 1252 } 1253 if (done == 0 && !retried) { 1254 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1255 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1256 retried = true; 1257 goto restart; 1258 } 1259 return (done); 1260 } 1261 1262 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1263 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1264 0, 1265 "limit on vnode free requests per call to the vnlru_free routine"); 1266 1267 /* 1268 * Attempt to reduce the free list by the requested amount. 1269 */ 1270 static int 1271 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp) 1272 { 1273 struct vnode *vp; 1274 struct mount *mp; 1275 int ocount; 1276 1277 mtx_assert(&vnode_list_mtx, MA_OWNED); 1278 if (count > max_vnlru_free) 1279 count = max_vnlru_free; 1280 ocount = count; 1281 vp = mvp; 1282 for (;;) { 1283 if (count == 0) { 1284 break; 1285 } 1286 vp = TAILQ_NEXT(vp, v_vnodelist); 1287 if (__predict_false(vp == NULL)) { 1288 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1289 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1290 break; 1291 } 1292 if (__predict_false(vp->v_type == VMARKER)) 1293 continue; 1294 if (vp->v_holdcnt > 0) 1295 continue; 1296 /* 1297 * Don't recycle if our vnode is from different type 1298 * of mount point. Note that mp is type-safe, the 1299 * check does not reach unmapped address even if 1300 * vnode is reclaimed. 1301 */ 1302 if (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1303 mp->mnt_op != mnt_op) { 1304 continue; 1305 } 1306 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1307 continue; 1308 } 1309 if (!vhold_recycle_free(vp)) 1310 continue; 1311 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1312 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1313 mtx_unlock(&vnode_list_mtx); 1314 /* 1315 * FIXME: ignores the return value, meaning it may be nothing 1316 * got recycled but it claims otherwise to the caller. 1317 * 1318 * Originally the value started being ignored in 2005 with 1319 * 114a1006a8204aa156e1f9ad6476cdff89cada7f . 1320 * 1321 * Respecting the value can run into significant stalls if most 1322 * vnodes belong to one file system and it has writes 1323 * suspended. In presence of many threads and millions of 1324 * vnodes they keep contending on the vnode_list_mtx lock only 1325 * to find vnodes they can't recycle. 1326 * 1327 * The solution would be to pre-check if the vnode is likely to 1328 * be recycle-able, but it needs to happen with the 1329 * vnode_list_mtx lock held. This runs into a problem where 1330 * VOP_GETWRITEMOUNT (currently needed to find out about if 1331 * writes are frozen) can take locks which LOR against it. 1332 * 1333 * Check nullfs for one example (null_getwritemount). 1334 */ 1335 vtryrecycle(vp); 1336 count--; 1337 mtx_lock(&vnode_list_mtx); 1338 vp = mvp; 1339 } 1340 return (ocount - count); 1341 } 1342 1343 static int 1344 vnlru_free_locked(int count) 1345 { 1346 1347 mtx_assert(&vnode_list_mtx, MA_OWNED); 1348 return (vnlru_free_impl(count, NULL, vnode_list_free_marker)); 1349 } 1350 1351 void 1352 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp) 1353 { 1354 1355 MPASS(mnt_op != NULL); 1356 MPASS(mvp != NULL); 1357 VNPASS(mvp->v_type == VMARKER, mvp); 1358 mtx_lock(&vnode_list_mtx); 1359 vnlru_free_impl(count, mnt_op, mvp); 1360 mtx_unlock(&vnode_list_mtx); 1361 } 1362 1363 struct vnode * 1364 vnlru_alloc_marker(void) 1365 { 1366 struct vnode *mvp; 1367 1368 mvp = vn_alloc_marker(NULL); 1369 mtx_lock(&vnode_list_mtx); 1370 TAILQ_INSERT_BEFORE(vnode_list_free_marker, mvp, v_vnodelist); 1371 mtx_unlock(&vnode_list_mtx); 1372 return (mvp); 1373 } 1374 1375 void 1376 vnlru_free_marker(struct vnode *mvp) 1377 { 1378 mtx_lock(&vnode_list_mtx); 1379 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1380 mtx_unlock(&vnode_list_mtx); 1381 vn_free_marker(mvp); 1382 } 1383 1384 static void 1385 vnlru_recalc(void) 1386 { 1387 1388 mtx_assert(&vnode_list_mtx, MA_OWNED); 1389 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1390 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1391 vlowat = vhiwat / 2; 1392 } 1393 1394 /* 1395 * Attempt to recycle vnodes in a context that is always safe to block. 1396 * Calling vlrurecycle() from the bowels of filesystem code has some 1397 * interesting deadlock problems. 1398 */ 1399 static struct proc *vnlruproc; 1400 static int vnlruproc_sig; 1401 1402 /* 1403 * The main freevnodes counter is only updated when threads requeue their vnode 1404 * batches. CPUs are conditionally walked to compute a more accurate total. 1405 * 1406 * Limit how much of a slop are we willing to tolerate. Note: the actual value 1407 * at any given moment can still exceed slop, but it should not be by significant 1408 * margin in practice. 1409 */ 1410 #define VNLRU_FREEVNODES_SLOP 128 1411 1412 static __inline void 1413 vfs_freevnodes_inc(void) 1414 { 1415 struct vdbatch *vd; 1416 1417 critical_enter(); 1418 vd = DPCPU_PTR(vd); 1419 vd->freevnodes++; 1420 critical_exit(); 1421 } 1422 1423 static __inline void 1424 vfs_freevnodes_dec(void) 1425 { 1426 struct vdbatch *vd; 1427 1428 critical_enter(); 1429 vd = DPCPU_PTR(vd); 1430 vd->freevnodes--; 1431 critical_exit(); 1432 } 1433 1434 static u_long 1435 vnlru_read_freevnodes(void) 1436 { 1437 struct vdbatch *vd; 1438 long slop; 1439 int cpu; 1440 1441 mtx_assert(&vnode_list_mtx, MA_OWNED); 1442 if (freevnodes > freevnodes_old) 1443 slop = freevnodes - freevnodes_old; 1444 else 1445 slop = freevnodes_old - freevnodes; 1446 if (slop < VNLRU_FREEVNODES_SLOP) 1447 return (freevnodes >= 0 ? freevnodes : 0); 1448 freevnodes_old = freevnodes; 1449 CPU_FOREACH(cpu) { 1450 vd = DPCPU_ID_PTR((cpu), vd); 1451 freevnodes_old += vd->freevnodes; 1452 } 1453 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1454 } 1455 1456 static bool 1457 vnlru_under(u_long rnumvnodes, u_long limit) 1458 { 1459 u_long rfreevnodes, space; 1460 1461 if (__predict_false(rnumvnodes > desiredvnodes)) 1462 return (true); 1463 1464 space = desiredvnodes - rnumvnodes; 1465 if (space < limit) { 1466 rfreevnodes = vnlru_read_freevnodes(); 1467 if (rfreevnodes > wantfreevnodes) 1468 space += rfreevnodes - wantfreevnodes; 1469 } 1470 return (space < limit); 1471 } 1472 1473 static bool 1474 vnlru_under_unlocked(u_long rnumvnodes, u_long limit) 1475 { 1476 long rfreevnodes, space; 1477 1478 if (__predict_false(rnumvnodes > desiredvnodes)) 1479 return (true); 1480 1481 space = desiredvnodes - rnumvnodes; 1482 if (space < limit) { 1483 rfreevnodes = atomic_load_long(&freevnodes); 1484 if (rfreevnodes > wantfreevnodes) 1485 space += rfreevnodes - wantfreevnodes; 1486 } 1487 return (space < limit); 1488 } 1489 1490 static void 1491 vnlru_kick(void) 1492 { 1493 1494 mtx_assert(&vnode_list_mtx, MA_OWNED); 1495 if (vnlruproc_sig == 0) { 1496 vnlruproc_sig = 1; 1497 wakeup(vnlruproc); 1498 } 1499 } 1500 1501 static void 1502 vnlru_proc(void) 1503 { 1504 u_long rnumvnodes, rfreevnodes, target; 1505 unsigned long onumvnodes; 1506 int done, force, trigger, usevnodes; 1507 bool reclaim_nc_src, want_reread; 1508 1509 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1510 SHUTDOWN_PRI_FIRST); 1511 1512 force = 0; 1513 want_reread = false; 1514 for (;;) { 1515 kproc_suspend_check(vnlruproc); 1516 mtx_lock(&vnode_list_mtx); 1517 rnumvnodes = atomic_load_long(&numvnodes); 1518 1519 if (want_reread) { 1520 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1521 want_reread = false; 1522 } 1523 1524 /* 1525 * If numvnodes is too large (due to desiredvnodes being 1526 * adjusted using its sysctl, or emergency growth), first 1527 * try to reduce it by discarding from the free list. 1528 */ 1529 if (rnumvnodes > desiredvnodes) { 1530 vnlru_free_locked(rnumvnodes - desiredvnodes); 1531 rnumvnodes = atomic_load_long(&numvnodes); 1532 } 1533 /* 1534 * Sleep if the vnode cache is in a good state. This is 1535 * when it is not over-full and has space for about a 4% 1536 * or 9% expansion (by growing its size or inexcessively 1537 * reducing its free list). Otherwise, try to reclaim 1538 * space for a 10% expansion. 1539 */ 1540 if (vstir && force == 0) { 1541 force = 1; 1542 vstir = 0; 1543 } 1544 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1545 vnlruproc_sig = 0; 1546 wakeup(&vnlruproc_sig); 1547 msleep(vnlruproc, &vnode_list_mtx, 1548 PVFS|PDROP, "vlruwt", hz); 1549 continue; 1550 } 1551 rfreevnodes = vnlru_read_freevnodes(); 1552 1553 onumvnodes = rnumvnodes; 1554 /* 1555 * Calculate parameters for recycling. These are the same 1556 * throughout the loop to give some semblance of fairness. 1557 * The trigger point is to avoid recycling vnodes with lots 1558 * of resident pages. We aren't trying to free memory; we 1559 * are trying to recycle or at least free vnodes. 1560 */ 1561 if (rnumvnodes <= desiredvnodes) 1562 usevnodes = rnumvnodes - rfreevnodes; 1563 else 1564 usevnodes = rnumvnodes; 1565 if (usevnodes <= 0) 1566 usevnodes = 1; 1567 /* 1568 * The trigger value is chosen to give a conservatively 1569 * large value to ensure that it alone doesn't prevent 1570 * making progress. The value can easily be so large that 1571 * it is effectively infinite in some congested and 1572 * misconfigured cases, and this is necessary. Normally 1573 * it is about 8 to 100 (pages), which is quite large. 1574 */ 1575 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1576 if (force < 2) 1577 trigger = vsmalltrigger; 1578 reclaim_nc_src = force >= 3; 1579 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1580 target = target / 10 + 1; 1581 done = vlrureclaim(reclaim_nc_src, trigger, target); 1582 mtx_unlock(&vnode_list_mtx); 1583 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1584 uma_reclaim(UMA_RECLAIM_DRAIN); 1585 if (done == 0) { 1586 if (force == 0 || force == 1) { 1587 force = 2; 1588 continue; 1589 } 1590 if (force == 2) { 1591 force = 3; 1592 continue; 1593 } 1594 want_reread = true; 1595 force = 0; 1596 vnlru_nowhere++; 1597 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1598 } else { 1599 want_reread = true; 1600 kern_yield(PRI_USER); 1601 } 1602 } 1603 } 1604 1605 static struct kproc_desc vnlru_kp = { 1606 "vnlru", 1607 vnlru_proc, 1608 &vnlruproc 1609 }; 1610 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1611 &vnlru_kp); 1612 1613 /* 1614 * Routines having to do with the management of the vnode table. 1615 */ 1616 1617 /* 1618 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1619 * before we actually vgone(). This function must be called with the vnode 1620 * held to prevent the vnode from being returned to the free list midway 1621 * through vgone(). 1622 */ 1623 static int 1624 vtryrecycle(struct vnode *vp) 1625 { 1626 struct mount *vnmp; 1627 1628 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1629 VNASSERT(vp->v_holdcnt, vp, 1630 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1631 /* 1632 * This vnode may found and locked via some other list, if so we 1633 * can't recycle it yet. 1634 */ 1635 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1636 CTR2(KTR_VFS, 1637 "%s: impossible to recycle, vp %p lock is already held", 1638 __func__, vp); 1639 vdrop_recycle(vp); 1640 return (EWOULDBLOCK); 1641 } 1642 /* 1643 * Don't recycle if its filesystem is being suspended. 1644 */ 1645 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1646 VOP_UNLOCK(vp); 1647 CTR2(KTR_VFS, 1648 "%s: impossible to recycle, cannot start the write for %p", 1649 __func__, vp); 1650 vdrop_recycle(vp); 1651 return (EBUSY); 1652 } 1653 /* 1654 * If we got this far, we need to acquire the interlock and see if 1655 * anyone picked up this vnode from another list. If not, we will 1656 * mark it with DOOMED via vgonel() so that anyone who does find it 1657 * will skip over it. 1658 */ 1659 VI_LOCK(vp); 1660 if (vp->v_usecount) { 1661 VOP_UNLOCK(vp); 1662 vdropl_recycle(vp); 1663 vn_finished_write(vnmp); 1664 CTR2(KTR_VFS, 1665 "%s: impossible to recycle, %p is already referenced", 1666 __func__, vp); 1667 return (EBUSY); 1668 } 1669 if (!VN_IS_DOOMED(vp)) { 1670 counter_u64_add(recycles_free_count, 1); 1671 vgonel(vp); 1672 } 1673 VOP_UNLOCK(vp); 1674 vdropl_recycle(vp); 1675 vn_finished_write(vnmp); 1676 return (0); 1677 } 1678 1679 /* 1680 * Allocate a new vnode. 1681 * 1682 * The operation never returns an error. Returning an error was disabled 1683 * in r145385 (dated 2005) with the following comment: 1684 * 1685 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1686 * 1687 * Given the age of this commit (almost 15 years at the time of writing this 1688 * comment) restoring the ability to fail requires a significant audit of 1689 * all codepaths. 1690 * 1691 * The routine can try to free a vnode or stall for up to 1 second waiting for 1692 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1693 */ 1694 static u_long vn_alloc_cyclecount; 1695 1696 static struct vnode * __noinline 1697 vn_alloc_hard(struct mount *mp) 1698 { 1699 u_long rnumvnodes, rfreevnodes; 1700 1701 mtx_lock(&vnode_list_mtx); 1702 rnumvnodes = atomic_load_long(&numvnodes); 1703 if (rnumvnodes + 1 < desiredvnodes) { 1704 vn_alloc_cyclecount = 0; 1705 goto alloc; 1706 } 1707 rfreevnodes = vnlru_read_freevnodes(); 1708 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1709 vn_alloc_cyclecount = 0; 1710 vstir = 1; 1711 } 1712 /* 1713 * Grow the vnode cache if it will not be above its target max 1714 * after growing. Otherwise, if the free list is nonempty, try 1715 * to reclaim 1 item from it before growing the cache (possibly 1716 * above its target max if the reclamation failed or is delayed). 1717 * Otherwise, wait for some space. In all cases, schedule 1718 * vnlru_proc() if we are getting short of space. The watermarks 1719 * should be chosen so that we never wait or even reclaim from 1720 * the free list to below its target minimum. 1721 */ 1722 if (vnlru_free_locked(1) > 0) 1723 goto alloc; 1724 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1725 /* 1726 * Wait for space for a new vnode. 1727 */ 1728 vnlru_kick(); 1729 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 1730 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 1731 vnlru_read_freevnodes() > 1) 1732 vnlru_free_locked(1); 1733 } 1734 alloc: 1735 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1736 if (vnlru_under(rnumvnodes, vlowat)) 1737 vnlru_kick(); 1738 mtx_unlock(&vnode_list_mtx); 1739 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1740 } 1741 1742 static struct vnode * 1743 vn_alloc(struct mount *mp) 1744 { 1745 u_long rnumvnodes; 1746 1747 if (__predict_false(vn_alloc_cyclecount != 0)) 1748 return (vn_alloc_hard(mp)); 1749 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1750 if (__predict_false(vnlru_under_unlocked(rnumvnodes, vlowat))) { 1751 atomic_subtract_long(&numvnodes, 1); 1752 return (vn_alloc_hard(mp)); 1753 } 1754 1755 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1756 } 1757 1758 static void 1759 vn_free(struct vnode *vp) 1760 { 1761 1762 atomic_subtract_long(&numvnodes, 1); 1763 uma_zfree_smr(vnode_zone, vp); 1764 } 1765 1766 /* 1767 * Return the next vnode from the free list. 1768 */ 1769 int 1770 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1771 struct vnode **vpp) 1772 { 1773 struct vnode *vp; 1774 struct thread *td; 1775 struct lock_object *lo; 1776 1777 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1778 1779 KASSERT(vops->registered, 1780 ("%s: not registered vector op %p\n", __func__, vops)); 1781 1782 td = curthread; 1783 if (td->td_vp_reserved != NULL) { 1784 vp = td->td_vp_reserved; 1785 td->td_vp_reserved = NULL; 1786 } else { 1787 vp = vn_alloc(mp); 1788 } 1789 counter_u64_add(vnodes_created, 1); 1790 /* 1791 * Locks are given the generic name "vnode" when created. 1792 * Follow the historic practice of using the filesystem 1793 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1794 * 1795 * Locks live in a witness group keyed on their name. Thus, 1796 * when a lock is renamed, it must also move from the witness 1797 * group of its old name to the witness group of its new name. 1798 * 1799 * The change only needs to be made when the vnode moves 1800 * from one filesystem type to another. We ensure that each 1801 * filesystem use a single static name pointer for its tag so 1802 * that we can compare pointers rather than doing a strcmp(). 1803 */ 1804 lo = &vp->v_vnlock->lock_object; 1805 #ifdef WITNESS 1806 if (lo->lo_name != tag) { 1807 #endif 1808 lo->lo_name = tag; 1809 #ifdef WITNESS 1810 WITNESS_DESTROY(lo); 1811 WITNESS_INIT(lo, tag); 1812 } 1813 #endif 1814 /* 1815 * By default, don't allow shared locks unless filesystems opt-in. 1816 */ 1817 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1818 /* 1819 * Finalize various vnode identity bits. 1820 */ 1821 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1822 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1823 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1824 vp->v_type = VNON; 1825 vp->v_op = vops; 1826 vp->v_irflag = 0; 1827 v_init_counters(vp); 1828 vn_seqc_init(vp); 1829 vp->v_bufobj.bo_ops = &buf_ops_bio; 1830 #ifdef DIAGNOSTIC 1831 if (mp == NULL && vops != &dead_vnodeops) 1832 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1833 #endif 1834 #ifdef MAC 1835 mac_vnode_init(vp); 1836 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1837 mac_vnode_associate_singlelabel(mp, vp); 1838 #endif 1839 if (mp != NULL) { 1840 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1841 } 1842 1843 /* 1844 * For the filesystems which do not use vfs_hash_insert(), 1845 * still initialize v_hash to have vfs_hash_index() useful. 1846 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1847 * its own hashing. 1848 */ 1849 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1850 1851 *vpp = vp; 1852 return (0); 1853 } 1854 1855 void 1856 getnewvnode_reserve(void) 1857 { 1858 struct thread *td; 1859 1860 td = curthread; 1861 MPASS(td->td_vp_reserved == NULL); 1862 td->td_vp_reserved = vn_alloc(NULL); 1863 } 1864 1865 void 1866 getnewvnode_drop_reserve(void) 1867 { 1868 struct thread *td; 1869 1870 td = curthread; 1871 if (td->td_vp_reserved != NULL) { 1872 vn_free(td->td_vp_reserved); 1873 td->td_vp_reserved = NULL; 1874 } 1875 } 1876 1877 static void __noinline 1878 freevnode(struct vnode *vp) 1879 { 1880 struct bufobj *bo; 1881 1882 /* 1883 * The vnode has been marked for destruction, so free it. 1884 * 1885 * The vnode will be returned to the zone where it will 1886 * normally remain until it is needed for another vnode. We 1887 * need to cleanup (or verify that the cleanup has already 1888 * been done) any residual data left from its current use 1889 * so as not to contaminate the freshly allocated vnode. 1890 */ 1891 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 1892 /* 1893 * Paired with vgone. 1894 */ 1895 vn_seqc_write_end_free(vp); 1896 1897 bo = &vp->v_bufobj; 1898 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 1899 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 1900 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 1901 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 1902 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 1903 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 1904 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 1905 ("clean blk trie not empty")); 1906 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 1907 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 1908 ("dirty blk trie not empty")); 1909 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 1910 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 1911 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 1912 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 1913 ("Dangling rangelock waiters")); 1914 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, 1915 ("Leaked inactivation")); 1916 VI_UNLOCK(vp); 1917 #ifdef MAC 1918 mac_vnode_destroy(vp); 1919 #endif 1920 if (vp->v_pollinfo != NULL) { 1921 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1922 destroy_vpollinfo(vp->v_pollinfo); 1923 VOP_UNLOCK(vp); 1924 vp->v_pollinfo = NULL; 1925 } 1926 vp->v_mountedhere = NULL; 1927 vp->v_unpcb = NULL; 1928 vp->v_rdev = NULL; 1929 vp->v_fifoinfo = NULL; 1930 vp->v_iflag = 0; 1931 vp->v_vflag = 0; 1932 bo->bo_flag = 0; 1933 vn_free(vp); 1934 } 1935 1936 /* 1937 * Delete from old mount point vnode list, if on one. 1938 */ 1939 static void 1940 delmntque(struct vnode *vp) 1941 { 1942 struct mount *mp; 1943 1944 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 1945 1946 mp = vp->v_mount; 1947 if (mp == NULL) 1948 return; 1949 MNT_ILOCK(mp); 1950 VI_LOCK(vp); 1951 vp->v_mount = NULL; 1952 VI_UNLOCK(vp); 1953 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1954 ("bad mount point vnode list size")); 1955 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1956 mp->mnt_nvnodelistsize--; 1957 MNT_REL(mp); 1958 MNT_IUNLOCK(mp); 1959 } 1960 1961 static int 1962 insmntque1_int(struct vnode *vp, struct mount *mp, bool dtr) 1963 { 1964 1965 KASSERT(vp->v_mount == NULL, 1966 ("insmntque: vnode already on per mount vnode list")); 1967 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1968 if ((mp->mnt_kern_flag & MNTK_UNLOCKED_INSMNTQUE) == 0) { 1969 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1970 } else { 1971 KASSERT(!dtr, 1972 ("%s: can't have MNTK_UNLOCKED_INSMNTQUE and cleanup", 1973 __func__)); 1974 } 1975 1976 /* 1977 * We acquire the vnode interlock early to ensure that the 1978 * vnode cannot be recycled by another process releasing a 1979 * holdcnt on it before we get it on both the vnode list 1980 * and the active vnode list. The mount mutex protects only 1981 * manipulation of the vnode list and the vnode freelist 1982 * mutex protects only manipulation of the active vnode list. 1983 * Hence the need to hold the vnode interlock throughout. 1984 */ 1985 MNT_ILOCK(mp); 1986 VI_LOCK(vp); 1987 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 1988 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1989 mp->mnt_nvnodelistsize == 0)) && 1990 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1991 VI_UNLOCK(vp); 1992 MNT_IUNLOCK(mp); 1993 if (dtr) { 1994 vp->v_data = NULL; 1995 vp->v_op = &dead_vnodeops; 1996 vgone(vp); 1997 vput(vp); 1998 } 1999 return (EBUSY); 2000 } 2001 vp->v_mount = mp; 2002 MNT_REF(mp); 2003 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2004 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 2005 ("neg mount point vnode list size")); 2006 mp->mnt_nvnodelistsize++; 2007 VI_UNLOCK(vp); 2008 MNT_IUNLOCK(mp); 2009 return (0); 2010 } 2011 2012 /* 2013 * Insert into list of vnodes for the new mount point, if available. 2014 * insmntque() reclaims the vnode on insertion failure, insmntque1() 2015 * leaves handling of the vnode to the caller. 2016 */ 2017 int 2018 insmntque(struct vnode *vp, struct mount *mp) 2019 { 2020 return (insmntque1_int(vp, mp, true)); 2021 } 2022 2023 int 2024 insmntque1(struct vnode *vp, struct mount *mp) 2025 { 2026 return (insmntque1_int(vp, mp, false)); 2027 } 2028 2029 /* 2030 * Flush out and invalidate all buffers associated with a bufobj 2031 * Called with the underlying object locked. 2032 */ 2033 int 2034 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 2035 { 2036 int error; 2037 2038 BO_LOCK(bo); 2039 if (flags & V_SAVE) { 2040 error = bufobj_wwait(bo, slpflag, slptimeo); 2041 if (error) { 2042 BO_UNLOCK(bo); 2043 return (error); 2044 } 2045 if (bo->bo_dirty.bv_cnt > 0) { 2046 BO_UNLOCK(bo); 2047 do { 2048 error = BO_SYNC(bo, MNT_WAIT); 2049 } while (error == ERELOOKUP); 2050 if (error != 0) 2051 return (error); 2052 BO_LOCK(bo); 2053 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 2054 BO_UNLOCK(bo); 2055 return (EBUSY); 2056 } 2057 } 2058 } 2059 /* 2060 * If you alter this loop please notice that interlock is dropped and 2061 * reacquired in flushbuflist. Special care is needed to ensure that 2062 * no race conditions occur from this. 2063 */ 2064 do { 2065 error = flushbuflist(&bo->bo_clean, 2066 flags, bo, slpflag, slptimeo); 2067 if (error == 0 && !(flags & V_CLEANONLY)) 2068 error = flushbuflist(&bo->bo_dirty, 2069 flags, bo, slpflag, slptimeo); 2070 if (error != 0 && error != EAGAIN) { 2071 BO_UNLOCK(bo); 2072 return (error); 2073 } 2074 } while (error != 0); 2075 2076 /* 2077 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 2078 * have write I/O in-progress but if there is a VM object then the 2079 * VM object can also have read-I/O in-progress. 2080 */ 2081 do { 2082 bufobj_wwait(bo, 0, 0); 2083 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 2084 BO_UNLOCK(bo); 2085 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 2086 BO_LOCK(bo); 2087 } 2088 } while (bo->bo_numoutput > 0); 2089 BO_UNLOCK(bo); 2090 2091 /* 2092 * Destroy the copy in the VM cache, too. 2093 */ 2094 if (bo->bo_object != NULL && 2095 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 2096 VM_OBJECT_WLOCK(bo->bo_object); 2097 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 2098 OBJPR_CLEANONLY : 0); 2099 VM_OBJECT_WUNLOCK(bo->bo_object); 2100 } 2101 2102 #ifdef INVARIANTS 2103 BO_LOCK(bo); 2104 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 2105 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 2106 bo->bo_clean.bv_cnt > 0)) 2107 panic("vinvalbuf: flush failed"); 2108 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 2109 bo->bo_dirty.bv_cnt > 0) 2110 panic("vinvalbuf: flush dirty failed"); 2111 BO_UNLOCK(bo); 2112 #endif 2113 return (0); 2114 } 2115 2116 /* 2117 * Flush out and invalidate all buffers associated with a vnode. 2118 * Called with the underlying object locked. 2119 */ 2120 int 2121 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 2122 { 2123 2124 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2125 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 2126 if (vp->v_object != NULL && vp->v_object->handle != vp) 2127 return (0); 2128 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 2129 } 2130 2131 /* 2132 * Flush out buffers on the specified list. 2133 * 2134 */ 2135 static int 2136 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2137 int slptimeo) 2138 { 2139 struct buf *bp, *nbp; 2140 int retval, error; 2141 daddr_t lblkno; 2142 b_xflags_t xflags; 2143 2144 ASSERT_BO_WLOCKED(bo); 2145 2146 retval = 0; 2147 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2148 /* 2149 * If we are flushing both V_NORMAL and V_ALT buffers then 2150 * do not skip any buffers. If we are flushing only V_NORMAL 2151 * buffers then skip buffers marked as BX_ALTDATA. If we are 2152 * flushing only V_ALT buffers then skip buffers not marked 2153 * as BX_ALTDATA. 2154 */ 2155 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2156 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2157 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2158 continue; 2159 } 2160 if (nbp != NULL) { 2161 lblkno = nbp->b_lblkno; 2162 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2163 } 2164 retval = EAGAIN; 2165 error = BUF_TIMELOCK(bp, 2166 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2167 "flushbuf", slpflag, slptimeo); 2168 if (error) { 2169 BO_LOCK(bo); 2170 return (error != ENOLCK ? error : EAGAIN); 2171 } 2172 KASSERT(bp->b_bufobj == bo, 2173 ("bp %p wrong b_bufobj %p should be %p", 2174 bp, bp->b_bufobj, bo)); 2175 /* 2176 * XXX Since there are no node locks for NFS, I 2177 * believe there is a slight chance that a delayed 2178 * write will occur while sleeping just above, so 2179 * check for it. 2180 */ 2181 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2182 (flags & V_SAVE)) { 2183 bremfree(bp); 2184 bp->b_flags |= B_ASYNC; 2185 bwrite(bp); 2186 BO_LOCK(bo); 2187 return (EAGAIN); /* XXX: why not loop ? */ 2188 } 2189 bremfree(bp); 2190 bp->b_flags |= (B_INVAL | B_RELBUF); 2191 bp->b_flags &= ~B_ASYNC; 2192 brelse(bp); 2193 BO_LOCK(bo); 2194 if (nbp == NULL) 2195 break; 2196 nbp = gbincore(bo, lblkno); 2197 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2198 != xflags) 2199 break; /* nbp invalid */ 2200 } 2201 return (retval); 2202 } 2203 2204 int 2205 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2206 { 2207 struct buf *bp; 2208 int error; 2209 daddr_t lblkno; 2210 2211 ASSERT_BO_LOCKED(bo); 2212 2213 for (lblkno = startn;;) { 2214 again: 2215 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2216 if (bp == NULL || bp->b_lblkno >= endn || 2217 bp->b_lblkno < startn) 2218 break; 2219 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2220 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2221 if (error != 0) { 2222 BO_RLOCK(bo); 2223 if (error == ENOLCK) 2224 goto again; 2225 return (error); 2226 } 2227 KASSERT(bp->b_bufobj == bo, 2228 ("bp %p wrong b_bufobj %p should be %p", 2229 bp, bp->b_bufobj, bo)); 2230 lblkno = bp->b_lblkno + 1; 2231 if ((bp->b_flags & B_MANAGED) == 0) 2232 bremfree(bp); 2233 bp->b_flags |= B_RELBUF; 2234 /* 2235 * In the VMIO case, use the B_NOREUSE flag to hint that the 2236 * pages backing each buffer in the range are unlikely to be 2237 * reused. Dirty buffers will have the hint applied once 2238 * they've been written. 2239 */ 2240 if ((bp->b_flags & B_VMIO) != 0) 2241 bp->b_flags |= B_NOREUSE; 2242 brelse(bp); 2243 BO_RLOCK(bo); 2244 } 2245 return (0); 2246 } 2247 2248 /* 2249 * Truncate a file's buffer and pages to a specified length. This 2250 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2251 * sync activity. 2252 */ 2253 int 2254 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2255 { 2256 struct buf *bp, *nbp; 2257 struct bufobj *bo; 2258 daddr_t startlbn; 2259 2260 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2261 vp, blksize, (uintmax_t)length); 2262 2263 /* 2264 * Round up to the *next* lbn. 2265 */ 2266 startlbn = howmany(length, blksize); 2267 2268 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2269 2270 bo = &vp->v_bufobj; 2271 restart_unlocked: 2272 BO_LOCK(bo); 2273 2274 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2275 ; 2276 2277 if (length > 0) { 2278 restartsync: 2279 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2280 if (bp->b_lblkno > 0) 2281 continue; 2282 /* 2283 * Since we hold the vnode lock this should only 2284 * fail if we're racing with the buf daemon. 2285 */ 2286 if (BUF_LOCK(bp, 2287 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2288 BO_LOCKPTR(bo)) == ENOLCK) 2289 goto restart_unlocked; 2290 2291 VNASSERT((bp->b_flags & B_DELWRI), vp, 2292 ("buf(%p) on dirty queue without DELWRI", bp)); 2293 2294 bremfree(bp); 2295 bawrite(bp); 2296 BO_LOCK(bo); 2297 goto restartsync; 2298 } 2299 } 2300 2301 bufobj_wwait(bo, 0, 0); 2302 BO_UNLOCK(bo); 2303 vnode_pager_setsize(vp, length); 2304 2305 return (0); 2306 } 2307 2308 /* 2309 * Invalidate the cached pages of a file's buffer within the range of block 2310 * numbers [startlbn, endlbn). 2311 */ 2312 void 2313 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2314 int blksize) 2315 { 2316 struct bufobj *bo; 2317 off_t start, end; 2318 2319 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2320 2321 start = blksize * startlbn; 2322 end = blksize * endlbn; 2323 2324 bo = &vp->v_bufobj; 2325 BO_LOCK(bo); 2326 MPASS(blksize == bo->bo_bsize); 2327 2328 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2329 ; 2330 2331 BO_UNLOCK(bo); 2332 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2333 } 2334 2335 static int 2336 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2337 daddr_t startlbn, daddr_t endlbn) 2338 { 2339 struct buf *bp, *nbp; 2340 bool anyfreed; 2341 2342 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2343 ASSERT_BO_LOCKED(bo); 2344 2345 do { 2346 anyfreed = false; 2347 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2348 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2349 continue; 2350 if (BUF_LOCK(bp, 2351 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2352 BO_LOCKPTR(bo)) == ENOLCK) { 2353 BO_LOCK(bo); 2354 return (EAGAIN); 2355 } 2356 2357 bremfree(bp); 2358 bp->b_flags |= B_INVAL | B_RELBUF; 2359 bp->b_flags &= ~B_ASYNC; 2360 brelse(bp); 2361 anyfreed = true; 2362 2363 BO_LOCK(bo); 2364 if (nbp != NULL && 2365 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2366 nbp->b_vp != vp || 2367 (nbp->b_flags & B_DELWRI) != 0)) 2368 return (EAGAIN); 2369 } 2370 2371 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2372 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2373 continue; 2374 if (BUF_LOCK(bp, 2375 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2376 BO_LOCKPTR(bo)) == ENOLCK) { 2377 BO_LOCK(bo); 2378 return (EAGAIN); 2379 } 2380 bremfree(bp); 2381 bp->b_flags |= B_INVAL | B_RELBUF; 2382 bp->b_flags &= ~B_ASYNC; 2383 brelse(bp); 2384 anyfreed = true; 2385 2386 BO_LOCK(bo); 2387 if (nbp != NULL && 2388 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2389 (nbp->b_vp != vp) || 2390 (nbp->b_flags & B_DELWRI) == 0)) 2391 return (EAGAIN); 2392 } 2393 } while (anyfreed); 2394 return (0); 2395 } 2396 2397 static void 2398 buf_vlist_remove(struct buf *bp) 2399 { 2400 struct bufv *bv; 2401 b_xflags_t flags; 2402 2403 flags = bp->b_xflags; 2404 2405 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2406 ASSERT_BO_WLOCKED(bp->b_bufobj); 2407 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 && 2408 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN), 2409 ("%s: buffer %p has invalid queue state", __func__, bp)); 2410 2411 if ((flags & BX_VNDIRTY) != 0) 2412 bv = &bp->b_bufobj->bo_dirty; 2413 else 2414 bv = &bp->b_bufobj->bo_clean; 2415 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2416 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2417 bv->bv_cnt--; 2418 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2419 } 2420 2421 /* 2422 * Add the buffer to the sorted clean or dirty block list. 2423 * 2424 * NOTE: xflags is passed as a constant, optimizing this inline function! 2425 */ 2426 static void 2427 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2428 { 2429 struct bufv *bv; 2430 struct buf *n; 2431 int error; 2432 2433 ASSERT_BO_WLOCKED(bo); 2434 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2435 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2436 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2437 ("dead bo %p", bo)); 2438 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2439 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2440 bp->b_xflags |= xflags; 2441 if (xflags & BX_VNDIRTY) 2442 bv = &bo->bo_dirty; 2443 else 2444 bv = &bo->bo_clean; 2445 2446 /* 2447 * Keep the list ordered. Optimize empty list insertion. Assume 2448 * we tend to grow at the tail so lookup_le should usually be cheaper 2449 * than _ge. 2450 */ 2451 if (bv->bv_cnt == 0 || 2452 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2453 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2454 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2455 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2456 else 2457 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2458 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2459 if (error) 2460 panic("buf_vlist_add: Preallocated nodes insufficient."); 2461 bv->bv_cnt++; 2462 } 2463 2464 /* 2465 * Look up a buffer using the buffer tries. 2466 */ 2467 struct buf * 2468 gbincore(struct bufobj *bo, daddr_t lblkno) 2469 { 2470 struct buf *bp; 2471 2472 ASSERT_BO_LOCKED(bo); 2473 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2474 if (bp != NULL) 2475 return (bp); 2476 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2477 } 2478 2479 /* 2480 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2481 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2482 * stability of the result. Like other lockless lookups, the found buf may 2483 * already be invalid by the time this function returns. 2484 */ 2485 struct buf * 2486 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2487 { 2488 struct buf *bp; 2489 2490 ASSERT_BO_UNLOCKED(bo); 2491 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2492 if (bp != NULL) 2493 return (bp); 2494 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2495 } 2496 2497 /* 2498 * Associate a buffer with a vnode. 2499 */ 2500 void 2501 bgetvp(struct vnode *vp, struct buf *bp) 2502 { 2503 struct bufobj *bo; 2504 2505 bo = &vp->v_bufobj; 2506 ASSERT_BO_WLOCKED(bo); 2507 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2508 2509 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2510 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2511 ("bgetvp: bp already attached! %p", bp)); 2512 2513 vhold(vp); 2514 bp->b_vp = vp; 2515 bp->b_bufobj = bo; 2516 /* 2517 * Insert onto list for new vnode. 2518 */ 2519 buf_vlist_add(bp, bo, BX_VNCLEAN); 2520 } 2521 2522 /* 2523 * Disassociate a buffer from a vnode. 2524 */ 2525 void 2526 brelvp(struct buf *bp) 2527 { 2528 struct bufobj *bo; 2529 struct vnode *vp; 2530 2531 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2532 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2533 2534 /* 2535 * Delete from old vnode list, if on one. 2536 */ 2537 vp = bp->b_vp; /* XXX */ 2538 bo = bp->b_bufobj; 2539 BO_LOCK(bo); 2540 buf_vlist_remove(bp); 2541 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2542 bo->bo_flag &= ~BO_ONWORKLST; 2543 mtx_lock(&sync_mtx); 2544 LIST_REMOVE(bo, bo_synclist); 2545 syncer_worklist_len--; 2546 mtx_unlock(&sync_mtx); 2547 } 2548 bp->b_vp = NULL; 2549 bp->b_bufobj = NULL; 2550 BO_UNLOCK(bo); 2551 vdrop(vp); 2552 } 2553 2554 /* 2555 * Add an item to the syncer work queue. 2556 */ 2557 static void 2558 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2559 { 2560 int slot; 2561 2562 ASSERT_BO_WLOCKED(bo); 2563 2564 mtx_lock(&sync_mtx); 2565 if (bo->bo_flag & BO_ONWORKLST) 2566 LIST_REMOVE(bo, bo_synclist); 2567 else { 2568 bo->bo_flag |= BO_ONWORKLST; 2569 syncer_worklist_len++; 2570 } 2571 2572 if (delay > syncer_maxdelay - 2) 2573 delay = syncer_maxdelay - 2; 2574 slot = (syncer_delayno + delay) & syncer_mask; 2575 2576 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2577 mtx_unlock(&sync_mtx); 2578 } 2579 2580 static int 2581 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2582 { 2583 int error, len; 2584 2585 mtx_lock(&sync_mtx); 2586 len = syncer_worklist_len - sync_vnode_count; 2587 mtx_unlock(&sync_mtx); 2588 error = SYSCTL_OUT(req, &len, sizeof(len)); 2589 return (error); 2590 } 2591 2592 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2593 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2594 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2595 2596 static struct proc *updateproc; 2597 static void sched_sync(void); 2598 static struct kproc_desc up_kp = { 2599 "syncer", 2600 sched_sync, 2601 &updateproc 2602 }; 2603 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2604 2605 static int 2606 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2607 { 2608 struct vnode *vp; 2609 struct mount *mp; 2610 2611 *bo = LIST_FIRST(slp); 2612 if (*bo == NULL) 2613 return (0); 2614 vp = bo2vnode(*bo); 2615 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2616 return (1); 2617 /* 2618 * We use vhold in case the vnode does not 2619 * successfully sync. vhold prevents the vnode from 2620 * going away when we unlock the sync_mtx so that 2621 * we can acquire the vnode interlock. 2622 */ 2623 vholdl(vp); 2624 mtx_unlock(&sync_mtx); 2625 VI_UNLOCK(vp); 2626 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2627 vdrop(vp); 2628 mtx_lock(&sync_mtx); 2629 return (*bo == LIST_FIRST(slp)); 2630 } 2631 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2632 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2633 VOP_UNLOCK(vp); 2634 vn_finished_write(mp); 2635 BO_LOCK(*bo); 2636 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2637 /* 2638 * Put us back on the worklist. The worklist 2639 * routine will remove us from our current 2640 * position and then add us back in at a later 2641 * position. 2642 */ 2643 vn_syncer_add_to_worklist(*bo, syncdelay); 2644 } 2645 BO_UNLOCK(*bo); 2646 vdrop(vp); 2647 mtx_lock(&sync_mtx); 2648 return (0); 2649 } 2650 2651 static int first_printf = 1; 2652 2653 /* 2654 * System filesystem synchronizer daemon. 2655 */ 2656 static void 2657 sched_sync(void) 2658 { 2659 struct synclist *next, *slp; 2660 struct bufobj *bo; 2661 long starttime; 2662 struct thread *td = curthread; 2663 int last_work_seen; 2664 int net_worklist_len; 2665 int syncer_final_iter; 2666 int error; 2667 2668 last_work_seen = 0; 2669 syncer_final_iter = 0; 2670 syncer_state = SYNCER_RUNNING; 2671 starttime = time_uptime; 2672 td->td_pflags |= TDP_NORUNNINGBUF; 2673 2674 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2675 SHUTDOWN_PRI_LAST); 2676 2677 mtx_lock(&sync_mtx); 2678 for (;;) { 2679 if (syncer_state == SYNCER_FINAL_DELAY && 2680 syncer_final_iter == 0) { 2681 mtx_unlock(&sync_mtx); 2682 kproc_suspend_check(td->td_proc); 2683 mtx_lock(&sync_mtx); 2684 } 2685 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2686 if (syncer_state != SYNCER_RUNNING && 2687 starttime != time_uptime) { 2688 if (first_printf) { 2689 printf("\nSyncing disks, vnodes remaining... "); 2690 first_printf = 0; 2691 } 2692 printf("%d ", net_worklist_len); 2693 } 2694 starttime = time_uptime; 2695 2696 /* 2697 * Push files whose dirty time has expired. Be careful 2698 * of interrupt race on slp queue. 2699 * 2700 * Skip over empty worklist slots when shutting down. 2701 */ 2702 do { 2703 slp = &syncer_workitem_pending[syncer_delayno]; 2704 syncer_delayno += 1; 2705 if (syncer_delayno == syncer_maxdelay) 2706 syncer_delayno = 0; 2707 next = &syncer_workitem_pending[syncer_delayno]; 2708 /* 2709 * If the worklist has wrapped since the 2710 * it was emptied of all but syncer vnodes, 2711 * switch to the FINAL_DELAY state and run 2712 * for one more second. 2713 */ 2714 if (syncer_state == SYNCER_SHUTTING_DOWN && 2715 net_worklist_len == 0 && 2716 last_work_seen == syncer_delayno) { 2717 syncer_state = SYNCER_FINAL_DELAY; 2718 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2719 } 2720 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2721 syncer_worklist_len > 0); 2722 2723 /* 2724 * Keep track of the last time there was anything 2725 * on the worklist other than syncer vnodes. 2726 * Return to the SHUTTING_DOWN state if any 2727 * new work appears. 2728 */ 2729 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2730 last_work_seen = syncer_delayno; 2731 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2732 syncer_state = SYNCER_SHUTTING_DOWN; 2733 while (!LIST_EMPTY(slp)) { 2734 error = sync_vnode(slp, &bo, td); 2735 if (error == 1) { 2736 LIST_REMOVE(bo, bo_synclist); 2737 LIST_INSERT_HEAD(next, bo, bo_synclist); 2738 continue; 2739 } 2740 2741 if (first_printf == 0) { 2742 /* 2743 * Drop the sync mutex, because some watchdog 2744 * drivers need to sleep while patting 2745 */ 2746 mtx_unlock(&sync_mtx); 2747 wdog_kern_pat(WD_LASTVAL); 2748 mtx_lock(&sync_mtx); 2749 } 2750 } 2751 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2752 syncer_final_iter--; 2753 /* 2754 * The variable rushjob allows the kernel to speed up the 2755 * processing of the filesystem syncer process. A rushjob 2756 * value of N tells the filesystem syncer to process the next 2757 * N seconds worth of work on its queue ASAP. Currently rushjob 2758 * is used by the soft update code to speed up the filesystem 2759 * syncer process when the incore state is getting so far 2760 * ahead of the disk that the kernel memory pool is being 2761 * threatened with exhaustion. 2762 */ 2763 if (rushjob > 0) { 2764 rushjob -= 1; 2765 continue; 2766 } 2767 /* 2768 * Just sleep for a short period of time between 2769 * iterations when shutting down to allow some I/O 2770 * to happen. 2771 * 2772 * If it has taken us less than a second to process the 2773 * current work, then wait. Otherwise start right over 2774 * again. We can still lose time if any single round 2775 * takes more than two seconds, but it does not really 2776 * matter as we are just trying to generally pace the 2777 * filesystem activity. 2778 */ 2779 if (syncer_state != SYNCER_RUNNING || 2780 time_uptime == starttime) { 2781 thread_lock(td); 2782 sched_prio(td, PPAUSE); 2783 thread_unlock(td); 2784 } 2785 if (syncer_state != SYNCER_RUNNING) 2786 cv_timedwait(&sync_wakeup, &sync_mtx, 2787 hz / SYNCER_SHUTDOWN_SPEEDUP); 2788 else if (time_uptime == starttime) 2789 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2790 } 2791 } 2792 2793 /* 2794 * Request the syncer daemon to speed up its work. 2795 * We never push it to speed up more than half of its 2796 * normal turn time, otherwise it could take over the cpu. 2797 */ 2798 int 2799 speedup_syncer(void) 2800 { 2801 int ret = 0; 2802 2803 mtx_lock(&sync_mtx); 2804 if (rushjob < syncdelay / 2) { 2805 rushjob += 1; 2806 stat_rush_requests += 1; 2807 ret = 1; 2808 } 2809 mtx_unlock(&sync_mtx); 2810 cv_broadcast(&sync_wakeup); 2811 return (ret); 2812 } 2813 2814 /* 2815 * Tell the syncer to speed up its work and run though its work 2816 * list several times, then tell it to shut down. 2817 */ 2818 static void 2819 syncer_shutdown(void *arg, int howto) 2820 { 2821 2822 if (howto & RB_NOSYNC) 2823 return; 2824 mtx_lock(&sync_mtx); 2825 syncer_state = SYNCER_SHUTTING_DOWN; 2826 rushjob = 0; 2827 mtx_unlock(&sync_mtx); 2828 cv_broadcast(&sync_wakeup); 2829 kproc_shutdown(arg, howto); 2830 } 2831 2832 void 2833 syncer_suspend(void) 2834 { 2835 2836 syncer_shutdown(updateproc, 0); 2837 } 2838 2839 void 2840 syncer_resume(void) 2841 { 2842 2843 mtx_lock(&sync_mtx); 2844 first_printf = 1; 2845 syncer_state = SYNCER_RUNNING; 2846 mtx_unlock(&sync_mtx); 2847 cv_broadcast(&sync_wakeup); 2848 kproc_resume(updateproc); 2849 } 2850 2851 /* 2852 * Move the buffer between the clean and dirty lists of its vnode. 2853 */ 2854 void 2855 reassignbuf(struct buf *bp) 2856 { 2857 struct vnode *vp; 2858 struct bufobj *bo; 2859 int delay; 2860 #ifdef INVARIANTS 2861 struct bufv *bv; 2862 #endif 2863 2864 vp = bp->b_vp; 2865 bo = bp->b_bufobj; 2866 2867 KASSERT((bp->b_flags & B_PAGING) == 0, 2868 ("%s: cannot reassign paging buffer %p", __func__, bp)); 2869 2870 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2871 bp, bp->b_vp, bp->b_flags); 2872 2873 BO_LOCK(bo); 2874 buf_vlist_remove(bp); 2875 2876 /* 2877 * If dirty, put on list of dirty buffers; otherwise insert onto list 2878 * of clean buffers. 2879 */ 2880 if (bp->b_flags & B_DELWRI) { 2881 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2882 switch (vp->v_type) { 2883 case VDIR: 2884 delay = dirdelay; 2885 break; 2886 case VCHR: 2887 delay = metadelay; 2888 break; 2889 default: 2890 delay = filedelay; 2891 } 2892 vn_syncer_add_to_worklist(bo, delay); 2893 } 2894 buf_vlist_add(bp, bo, BX_VNDIRTY); 2895 } else { 2896 buf_vlist_add(bp, bo, BX_VNCLEAN); 2897 2898 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2899 mtx_lock(&sync_mtx); 2900 LIST_REMOVE(bo, bo_synclist); 2901 syncer_worklist_len--; 2902 mtx_unlock(&sync_mtx); 2903 bo->bo_flag &= ~BO_ONWORKLST; 2904 } 2905 } 2906 #ifdef INVARIANTS 2907 bv = &bo->bo_clean; 2908 bp = TAILQ_FIRST(&bv->bv_hd); 2909 KASSERT(bp == NULL || bp->b_bufobj == bo, 2910 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2911 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2912 KASSERT(bp == NULL || bp->b_bufobj == bo, 2913 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2914 bv = &bo->bo_dirty; 2915 bp = TAILQ_FIRST(&bv->bv_hd); 2916 KASSERT(bp == NULL || bp->b_bufobj == bo, 2917 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2918 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2919 KASSERT(bp == NULL || bp->b_bufobj == bo, 2920 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2921 #endif 2922 BO_UNLOCK(bo); 2923 } 2924 2925 static void 2926 v_init_counters(struct vnode *vp) 2927 { 2928 2929 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2930 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2931 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2932 2933 refcount_init(&vp->v_holdcnt, 1); 2934 refcount_init(&vp->v_usecount, 1); 2935 } 2936 2937 /* 2938 * Grab a particular vnode from the free list, increment its 2939 * reference count and lock it. VIRF_DOOMED is set if the vnode 2940 * is being destroyed. Only callers who specify LK_RETRY will 2941 * see doomed vnodes. If inactive processing was delayed in 2942 * vput try to do it here. 2943 * 2944 * usecount is manipulated using atomics without holding any locks. 2945 * 2946 * holdcnt can be manipulated using atomics without holding any locks, 2947 * except when transitioning 1<->0, in which case the interlock is held. 2948 * 2949 * Consumers which don't guarantee liveness of the vnode can use SMR to 2950 * try to get a reference. Note this operation can fail since the vnode 2951 * may be awaiting getting freed by the time they get to it. 2952 */ 2953 enum vgetstate 2954 vget_prep_smr(struct vnode *vp) 2955 { 2956 enum vgetstate vs; 2957 2958 VFS_SMR_ASSERT_ENTERED(); 2959 2960 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2961 vs = VGET_USECOUNT; 2962 } else { 2963 if (vhold_smr(vp)) 2964 vs = VGET_HOLDCNT; 2965 else 2966 vs = VGET_NONE; 2967 } 2968 return (vs); 2969 } 2970 2971 enum vgetstate 2972 vget_prep(struct vnode *vp) 2973 { 2974 enum vgetstate vs; 2975 2976 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2977 vs = VGET_USECOUNT; 2978 } else { 2979 vhold(vp); 2980 vs = VGET_HOLDCNT; 2981 } 2982 return (vs); 2983 } 2984 2985 void 2986 vget_abort(struct vnode *vp, enum vgetstate vs) 2987 { 2988 2989 switch (vs) { 2990 case VGET_USECOUNT: 2991 vrele(vp); 2992 break; 2993 case VGET_HOLDCNT: 2994 vdrop(vp); 2995 break; 2996 default: 2997 __assert_unreachable(); 2998 } 2999 } 3000 3001 int 3002 vget(struct vnode *vp, int flags) 3003 { 3004 enum vgetstate vs; 3005 3006 vs = vget_prep(vp); 3007 return (vget_finish(vp, flags, vs)); 3008 } 3009 3010 int 3011 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 3012 { 3013 int error; 3014 3015 if ((flags & LK_INTERLOCK) != 0) 3016 ASSERT_VI_LOCKED(vp, __func__); 3017 else 3018 ASSERT_VI_UNLOCKED(vp, __func__); 3019 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3020 VNPASS(vp->v_holdcnt > 0, vp); 3021 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3022 3023 error = vn_lock(vp, flags); 3024 if (__predict_false(error != 0)) { 3025 vget_abort(vp, vs); 3026 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 3027 vp); 3028 return (error); 3029 } 3030 3031 vget_finish_ref(vp, vs); 3032 return (0); 3033 } 3034 3035 void 3036 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 3037 { 3038 int old; 3039 3040 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3041 VNPASS(vp->v_holdcnt > 0, vp); 3042 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3043 3044 if (vs == VGET_USECOUNT) 3045 return; 3046 3047 /* 3048 * We hold the vnode. If the usecount is 0 it will be utilized to keep 3049 * the vnode around. Otherwise someone else lended their hold count and 3050 * we have to drop ours. 3051 */ 3052 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3053 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3054 if (old != 0) { 3055 #ifdef INVARIANTS 3056 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3057 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3058 #else 3059 refcount_release(&vp->v_holdcnt); 3060 #endif 3061 } 3062 } 3063 3064 void 3065 vref(struct vnode *vp) 3066 { 3067 enum vgetstate vs; 3068 3069 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3070 vs = vget_prep(vp); 3071 vget_finish_ref(vp, vs); 3072 } 3073 3074 void 3075 vrefact(struct vnode *vp) 3076 { 3077 3078 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3079 #ifdef INVARIANTS 3080 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 3081 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3082 #else 3083 refcount_acquire(&vp->v_usecount); 3084 #endif 3085 } 3086 3087 void 3088 vlazy(struct vnode *vp) 3089 { 3090 struct mount *mp; 3091 3092 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 3093 3094 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 3095 return; 3096 /* 3097 * We may get here for inactive routines after the vnode got doomed. 3098 */ 3099 if (VN_IS_DOOMED(vp)) 3100 return; 3101 mp = vp->v_mount; 3102 mtx_lock(&mp->mnt_listmtx); 3103 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 3104 vp->v_mflag |= VMP_LAZYLIST; 3105 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3106 mp->mnt_lazyvnodelistsize++; 3107 } 3108 mtx_unlock(&mp->mnt_listmtx); 3109 } 3110 3111 static void 3112 vunlazy(struct vnode *vp) 3113 { 3114 struct mount *mp; 3115 3116 ASSERT_VI_LOCKED(vp, __func__); 3117 VNPASS(!VN_IS_DOOMED(vp), vp); 3118 3119 mp = vp->v_mount; 3120 mtx_lock(&mp->mnt_listmtx); 3121 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3122 /* 3123 * Don't remove the vnode from the lazy list if another thread 3124 * has increased the hold count. It may have re-enqueued the 3125 * vnode to the lazy list and is now responsible for its 3126 * removal. 3127 */ 3128 if (vp->v_holdcnt == 0) { 3129 vp->v_mflag &= ~VMP_LAZYLIST; 3130 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3131 mp->mnt_lazyvnodelistsize--; 3132 } 3133 mtx_unlock(&mp->mnt_listmtx); 3134 } 3135 3136 /* 3137 * This routine is only meant to be called from vgonel prior to dooming 3138 * the vnode. 3139 */ 3140 static void 3141 vunlazy_gone(struct vnode *vp) 3142 { 3143 struct mount *mp; 3144 3145 ASSERT_VOP_ELOCKED(vp, __func__); 3146 ASSERT_VI_LOCKED(vp, __func__); 3147 VNPASS(!VN_IS_DOOMED(vp), vp); 3148 3149 if (vp->v_mflag & VMP_LAZYLIST) { 3150 mp = vp->v_mount; 3151 mtx_lock(&mp->mnt_listmtx); 3152 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3153 vp->v_mflag &= ~VMP_LAZYLIST; 3154 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3155 mp->mnt_lazyvnodelistsize--; 3156 mtx_unlock(&mp->mnt_listmtx); 3157 } 3158 } 3159 3160 static void 3161 vdefer_inactive(struct vnode *vp) 3162 { 3163 3164 ASSERT_VI_LOCKED(vp, __func__); 3165 VNASSERT(vp->v_holdcnt > 0, vp, 3166 ("%s: vnode without hold count", __func__)); 3167 if (VN_IS_DOOMED(vp)) { 3168 vdropl(vp); 3169 return; 3170 } 3171 if (vp->v_iflag & VI_DEFINACT) { 3172 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3173 vdropl(vp); 3174 return; 3175 } 3176 if (vp->v_usecount > 0) { 3177 vp->v_iflag &= ~VI_OWEINACT; 3178 vdropl(vp); 3179 return; 3180 } 3181 vlazy(vp); 3182 vp->v_iflag |= VI_DEFINACT; 3183 VI_UNLOCK(vp); 3184 counter_u64_add(deferred_inact, 1); 3185 } 3186 3187 static void 3188 vdefer_inactive_unlocked(struct vnode *vp) 3189 { 3190 3191 VI_LOCK(vp); 3192 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3193 vdropl(vp); 3194 return; 3195 } 3196 vdefer_inactive(vp); 3197 } 3198 3199 enum vput_op { VRELE, VPUT, VUNREF }; 3200 3201 /* 3202 * Handle ->v_usecount transitioning to 0. 3203 * 3204 * By releasing the last usecount we take ownership of the hold count which 3205 * provides liveness of the vnode, meaning we have to vdrop. 3206 * 3207 * For all vnodes we may need to perform inactive processing. It requires an 3208 * exclusive lock on the vnode, while it is legal to call here with only a 3209 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3210 * inactive processing gets deferred to the syncer. 3211 * 3212 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3213 * on the lock being held all the way until VOP_INACTIVE. This in particular 3214 * happens with UFS which adds half-constructed vnodes to the hash, where they 3215 * can be found by other code. 3216 */ 3217 static void 3218 vput_final(struct vnode *vp, enum vput_op func) 3219 { 3220 int error; 3221 bool want_unlock; 3222 3223 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3224 VNPASS(vp->v_holdcnt > 0, vp); 3225 3226 VI_LOCK(vp); 3227 3228 /* 3229 * By the time we got here someone else might have transitioned 3230 * the count back to > 0. 3231 */ 3232 if (vp->v_usecount > 0) 3233 goto out; 3234 3235 /* 3236 * If the vnode is doomed vgone already performed inactive processing 3237 * (if needed). 3238 */ 3239 if (VN_IS_DOOMED(vp)) 3240 goto out; 3241 3242 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3243 goto out; 3244 3245 if (vp->v_iflag & VI_DOINGINACT) 3246 goto out; 3247 3248 /* 3249 * Locking operations here will drop the interlock and possibly the 3250 * vnode lock, opening a window where the vnode can get doomed all the 3251 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3252 * perform inactive. 3253 */ 3254 vp->v_iflag |= VI_OWEINACT; 3255 want_unlock = false; 3256 error = 0; 3257 switch (func) { 3258 case VRELE: 3259 switch (VOP_ISLOCKED(vp)) { 3260 case LK_EXCLUSIVE: 3261 break; 3262 case LK_EXCLOTHER: 3263 case 0: 3264 want_unlock = true; 3265 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3266 VI_LOCK(vp); 3267 break; 3268 default: 3269 /* 3270 * The lock has at least one sharer, but we have no way 3271 * to conclude whether this is us. Play it safe and 3272 * defer processing. 3273 */ 3274 error = EAGAIN; 3275 break; 3276 } 3277 break; 3278 case VPUT: 3279 want_unlock = true; 3280 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3281 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3282 LK_NOWAIT); 3283 VI_LOCK(vp); 3284 } 3285 break; 3286 case VUNREF: 3287 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3288 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3289 VI_LOCK(vp); 3290 } 3291 break; 3292 } 3293 if (error == 0) { 3294 if (func == VUNREF) { 3295 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp, 3296 ("recursive vunref")); 3297 vp->v_vflag |= VV_UNREF; 3298 } 3299 for (;;) { 3300 error = vinactive(vp); 3301 if (want_unlock) 3302 VOP_UNLOCK(vp); 3303 if (error != ERELOOKUP || !want_unlock) 3304 break; 3305 VOP_LOCK(vp, LK_EXCLUSIVE); 3306 } 3307 if (func == VUNREF) 3308 vp->v_vflag &= ~VV_UNREF; 3309 vdropl(vp); 3310 } else { 3311 vdefer_inactive(vp); 3312 } 3313 return; 3314 out: 3315 if (func == VPUT) 3316 VOP_UNLOCK(vp); 3317 vdropl(vp); 3318 } 3319 3320 /* 3321 * Decrement ->v_usecount for a vnode. 3322 * 3323 * Releasing the last use count requires additional processing, see vput_final 3324 * above for details. 3325 * 3326 * Comment above each variant denotes lock state on entry and exit. 3327 */ 3328 3329 /* 3330 * in: any 3331 * out: same as passed in 3332 */ 3333 void 3334 vrele(struct vnode *vp) 3335 { 3336 3337 ASSERT_VI_UNLOCKED(vp, __func__); 3338 if (!refcount_release(&vp->v_usecount)) 3339 return; 3340 vput_final(vp, VRELE); 3341 } 3342 3343 /* 3344 * in: locked 3345 * out: unlocked 3346 */ 3347 void 3348 vput(struct vnode *vp) 3349 { 3350 3351 ASSERT_VOP_LOCKED(vp, __func__); 3352 ASSERT_VI_UNLOCKED(vp, __func__); 3353 if (!refcount_release(&vp->v_usecount)) { 3354 VOP_UNLOCK(vp); 3355 return; 3356 } 3357 vput_final(vp, VPUT); 3358 } 3359 3360 /* 3361 * in: locked 3362 * out: locked 3363 */ 3364 void 3365 vunref(struct vnode *vp) 3366 { 3367 3368 ASSERT_VOP_LOCKED(vp, __func__); 3369 ASSERT_VI_UNLOCKED(vp, __func__); 3370 if (!refcount_release(&vp->v_usecount)) 3371 return; 3372 vput_final(vp, VUNREF); 3373 } 3374 3375 void 3376 vhold(struct vnode *vp) 3377 { 3378 int old; 3379 3380 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3381 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3382 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3383 ("%s: wrong hold count %d", __func__, old)); 3384 if (old == 0) 3385 vfs_freevnodes_dec(); 3386 } 3387 3388 void 3389 vholdnz(struct vnode *vp) 3390 { 3391 3392 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3393 #ifdef INVARIANTS 3394 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3395 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3396 ("%s: wrong hold count %d", __func__, old)); 3397 #else 3398 atomic_add_int(&vp->v_holdcnt, 1); 3399 #endif 3400 } 3401 3402 /* 3403 * Grab a hold count unless the vnode is freed. 3404 * 3405 * Only use this routine if vfs smr is the only protection you have against 3406 * freeing the vnode. 3407 * 3408 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3409 * is not set. After the flag is set the vnode becomes immutable to anyone but 3410 * the thread which managed to set the flag. 3411 * 3412 * It may be tempting to replace the loop with: 3413 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3414 * if (count & VHOLD_NO_SMR) { 3415 * backpedal and error out; 3416 * } 3417 * 3418 * However, while this is more performant, it hinders debugging by eliminating 3419 * the previously mentioned invariant. 3420 */ 3421 bool 3422 vhold_smr(struct vnode *vp) 3423 { 3424 int count; 3425 3426 VFS_SMR_ASSERT_ENTERED(); 3427 3428 count = atomic_load_int(&vp->v_holdcnt); 3429 for (;;) { 3430 if (count & VHOLD_NO_SMR) { 3431 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3432 ("non-zero hold count with flags %d\n", count)); 3433 return (false); 3434 } 3435 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3436 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3437 if (count == 0) 3438 vfs_freevnodes_dec(); 3439 return (true); 3440 } 3441 } 3442 } 3443 3444 /* 3445 * Hold a free vnode for recycling. 3446 * 3447 * Note: vnode_init references this comment. 3448 * 3449 * Attempts to recycle only need the global vnode list lock and have no use for 3450 * SMR. 3451 * 3452 * However, vnodes get inserted into the global list before they get fully 3453 * initialized and stay there until UMA decides to free the memory. This in 3454 * particular means the target can be found before it becomes usable and after 3455 * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to 3456 * VHOLD_NO_SMR. 3457 * 3458 * Note: the vnode may gain more references after we transition the count 0->1. 3459 */ 3460 static bool 3461 vhold_recycle_free(struct vnode *vp) 3462 { 3463 int count; 3464 3465 mtx_assert(&vnode_list_mtx, MA_OWNED); 3466 3467 count = atomic_load_int(&vp->v_holdcnt); 3468 for (;;) { 3469 if (count & VHOLD_NO_SMR) { 3470 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3471 ("non-zero hold count with flags %d\n", count)); 3472 return (false); 3473 } 3474 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3475 if (count > 0) { 3476 return (false); 3477 } 3478 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3479 vfs_freevnodes_dec(); 3480 return (true); 3481 } 3482 } 3483 } 3484 3485 static void __noinline 3486 vdbatch_process(struct vdbatch *vd) 3487 { 3488 struct vnode *vp; 3489 int i; 3490 3491 mtx_assert(&vd->lock, MA_OWNED); 3492 MPASS(curthread->td_pinned > 0); 3493 MPASS(vd->index == VDBATCH_SIZE); 3494 3495 mtx_lock(&vnode_list_mtx); 3496 critical_enter(); 3497 freevnodes += vd->freevnodes; 3498 for (i = 0; i < VDBATCH_SIZE; i++) { 3499 vp = vd->tab[i]; 3500 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3501 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3502 MPASS(vp->v_dbatchcpu != NOCPU); 3503 vp->v_dbatchcpu = NOCPU; 3504 } 3505 mtx_unlock(&vnode_list_mtx); 3506 vd->freevnodes = 0; 3507 bzero(vd->tab, sizeof(vd->tab)); 3508 vd->index = 0; 3509 critical_exit(); 3510 } 3511 3512 static void 3513 vdbatch_enqueue(struct vnode *vp) 3514 { 3515 struct vdbatch *vd; 3516 3517 ASSERT_VI_LOCKED(vp, __func__); 3518 VNASSERT(!VN_IS_DOOMED(vp), vp, 3519 ("%s: deferring requeue of a doomed vnode", __func__)); 3520 3521 if (vp->v_dbatchcpu != NOCPU) { 3522 VI_UNLOCK(vp); 3523 return; 3524 } 3525 3526 sched_pin(); 3527 vd = DPCPU_PTR(vd); 3528 mtx_lock(&vd->lock); 3529 MPASS(vd->index < VDBATCH_SIZE); 3530 MPASS(vd->tab[vd->index] == NULL); 3531 /* 3532 * A hack: we depend on being pinned so that we know what to put in 3533 * ->v_dbatchcpu. 3534 */ 3535 vp->v_dbatchcpu = curcpu; 3536 vd->tab[vd->index] = vp; 3537 vd->index++; 3538 VI_UNLOCK(vp); 3539 if (vd->index == VDBATCH_SIZE) 3540 vdbatch_process(vd); 3541 mtx_unlock(&vd->lock); 3542 sched_unpin(); 3543 } 3544 3545 /* 3546 * This routine must only be called for vnodes which are about to be 3547 * deallocated. Supporting dequeue for arbitrary vndoes would require 3548 * validating that the locked batch matches. 3549 */ 3550 static void 3551 vdbatch_dequeue(struct vnode *vp) 3552 { 3553 struct vdbatch *vd; 3554 int i; 3555 short cpu; 3556 3557 VNASSERT(vp->v_type == VBAD || vp->v_type == VNON, vp, 3558 ("%s: called for a used vnode\n", __func__)); 3559 3560 cpu = vp->v_dbatchcpu; 3561 if (cpu == NOCPU) 3562 return; 3563 3564 vd = DPCPU_ID_PTR(cpu, vd); 3565 mtx_lock(&vd->lock); 3566 for (i = 0; i < vd->index; i++) { 3567 if (vd->tab[i] != vp) 3568 continue; 3569 vp->v_dbatchcpu = NOCPU; 3570 vd->index--; 3571 vd->tab[i] = vd->tab[vd->index]; 3572 vd->tab[vd->index] = NULL; 3573 break; 3574 } 3575 mtx_unlock(&vd->lock); 3576 /* 3577 * Either we dequeued the vnode above or the target CPU beat us to it. 3578 */ 3579 MPASS(vp->v_dbatchcpu == NOCPU); 3580 } 3581 3582 /* 3583 * Drop the hold count of the vnode. If this is the last reference to 3584 * the vnode we place it on the free list unless it has been vgone'd 3585 * (marked VIRF_DOOMED) in which case we will free it. 3586 * 3587 * Because the vnode vm object keeps a hold reference on the vnode if 3588 * there is at least one resident non-cached page, the vnode cannot 3589 * leave the active list without the page cleanup done. 3590 */ 3591 static void __noinline 3592 vdropl_final(struct vnode *vp) 3593 { 3594 3595 ASSERT_VI_LOCKED(vp, __func__); 3596 VNPASS(VN_IS_DOOMED(vp), vp); 3597 /* 3598 * Set the VHOLD_NO_SMR flag. 3599 * 3600 * We may be racing against vhold_smr. If they win we can just pretend 3601 * we never got this far, they will vdrop later. 3602 */ 3603 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { 3604 vfs_freevnodes_inc(); 3605 VI_UNLOCK(vp); 3606 /* 3607 * We lost the aforementioned race. Any subsequent access is 3608 * invalid as they might have managed to vdropl on their own. 3609 */ 3610 return; 3611 } 3612 /* 3613 * Don't bump freevnodes as this one is going away. 3614 */ 3615 freevnode(vp); 3616 } 3617 3618 void 3619 vdrop(struct vnode *vp) 3620 { 3621 3622 ASSERT_VI_UNLOCKED(vp, __func__); 3623 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3624 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3625 return; 3626 VI_LOCK(vp); 3627 vdropl(vp); 3628 } 3629 3630 static void __always_inline 3631 vdropl_impl(struct vnode *vp, bool enqueue) 3632 { 3633 3634 ASSERT_VI_LOCKED(vp, __func__); 3635 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3636 if (!refcount_release(&vp->v_holdcnt)) { 3637 VI_UNLOCK(vp); 3638 return; 3639 } 3640 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp); 3641 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 3642 if (VN_IS_DOOMED(vp)) { 3643 vdropl_final(vp); 3644 return; 3645 } 3646 3647 vfs_freevnodes_inc(); 3648 if (vp->v_mflag & VMP_LAZYLIST) { 3649 vunlazy(vp); 3650 } 3651 3652 if (!enqueue) { 3653 VI_UNLOCK(vp); 3654 return; 3655 } 3656 3657 /* 3658 * Also unlocks the interlock. We can't assert on it as we 3659 * released our hold and by now the vnode might have been 3660 * freed. 3661 */ 3662 vdbatch_enqueue(vp); 3663 } 3664 3665 void 3666 vdropl(struct vnode *vp) 3667 { 3668 3669 vdropl_impl(vp, true); 3670 } 3671 3672 /* 3673 * vdrop a vnode when recycling 3674 * 3675 * This is a special case routine only to be used when recycling, differs from 3676 * regular vdrop by not requeieing the vnode on LRU. 3677 * 3678 * Consider a case where vtryrecycle continuously fails with all vnodes (due to 3679 * e.g., frozen writes on the filesystem), filling the batch and causing it to 3680 * be requeued. Then vnlru will end up revisiting the same vnodes. This is a 3681 * loop which can last for as long as writes are frozen. 3682 */ 3683 static void 3684 vdropl_recycle(struct vnode *vp) 3685 { 3686 3687 vdropl_impl(vp, false); 3688 } 3689 3690 static void 3691 vdrop_recycle(struct vnode *vp) 3692 { 3693 3694 VI_LOCK(vp); 3695 vdropl_recycle(vp); 3696 } 3697 3698 /* 3699 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3700 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3701 */ 3702 static int 3703 vinactivef(struct vnode *vp) 3704 { 3705 struct vm_object *obj; 3706 int error; 3707 3708 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3709 ASSERT_VI_LOCKED(vp, "vinactive"); 3710 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3711 ("vinactive: recursed on VI_DOINGINACT")); 3712 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3713 vp->v_iflag |= VI_DOINGINACT; 3714 vp->v_iflag &= ~VI_OWEINACT; 3715 VI_UNLOCK(vp); 3716 /* 3717 * Before moving off the active list, we must be sure that any 3718 * modified pages are converted into the vnode's dirty 3719 * buffers, since these will no longer be checked once the 3720 * vnode is on the inactive list. 3721 * 3722 * The write-out of the dirty pages is asynchronous. At the 3723 * point that VOP_INACTIVE() is called, there could still be 3724 * pending I/O and dirty pages in the object. 3725 */ 3726 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3727 vm_object_mightbedirty(obj)) { 3728 VM_OBJECT_WLOCK(obj); 3729 vm_object_page_clean(obj, 0, 0, 0); 3730 VM_OBJECT_WUNLOCK(obj); 3731 } 3732 error = VOP_INACTIVE(vp); 3733 VI_LOCK(vp); 3734 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3735 ("vinactive: lost VI_DOINGINACT")); 3736 vp->v_iflag &= ~VI_DOINGINACT; 3737 return (error); 3738 } 3739 3740 int 3741 vinactive(struct vnode *vp) 3742 { 3743 3744 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3745 ASSERT_VI_LOCKED(vp, "vinactive"); 3746 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3747 3748 if ((vp->v_iflag & VI_OWEINACT) == 0) 3749 return (0); 3750 if (vp->v_iflag & VI_DOINGINACT) 3751 return (0); 3752 if (vp->v_usecount > 0) { 3753 vp->v_iflag &= ~VI_OWEINACT; 3754 return (0); 3755 } 3756 return (vinactivef(vp)); 3757 } 3758 3759 /* 3760 * Remove any vnodes in the vnode table belonging to mount point mp. 3761 * 3762 * If FORCECLOSE is not specified, there should not be any active ones, 3763 * return error if any are found (nb: this is a user error, not a 3764 * system error). If FORCECLOSE is specified, detach any active vnodes 3765 * that are found. 3766 * 3767 * If WRITECLOSE is set, only flush out regular file vnodes open for 3768 * writing. 3769 * 3770 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3771 * 3772 * `rootrefs' specifies the base reference count for the root vnode 3773 * of this filesystem. The root vnode is considered busy if its 3774 * v_usecount exceeds this value. On a successful return, vflush(, td) 3775 * will call vrele() on the root vnode exactly rootrefs times. 3776 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3777 * be zero. 3778 */ 3779 #ifdef DIAGNOSTIC 3780 static int busyprt = 0; /* print out busy vnodes */ 3781 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3782 #endif 3783 3784 int 3785 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3786 { 3787 struct vnode *vp, *mvp, *rootvp = NULL; 3788 struct vattr vattr; 3789 int busy = 0, error; 3790 3791 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3792 rootrefs, flags); 3793 if (rootrefs > 0) { 3794 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3795 ("vflush: bad args")); 3796 /* 3797 * Get the filesystem root vnode. We can vput() it 3798 * immediately, since with rootrefs > 0, it won't go away. 3799 */ 3800 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3801 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3802 __func__, error); 3803 return (error); 3804 } 3805 vput(rootvp); 3806 } 3807 loop: 3808 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3809 vholdl(vp); 3810 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3811 if (error) { 3812 vdrop(vp); 3813 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3814 goto loop; 3815 } 3816 /* 3817 * Skip over a vnodes marked VV_SYSTEM. 3818 */ 3819 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3820 VOP_UNLOCK(vp); 3821 vdrop(vp); 3822 continue; 3823 } 3824 /* 3825 * If WRITECLOSE is set, flush out unlinked but still open 3826 * files (even if open only for reading) and regular file 3827 * vnodes open for writing. 3828 */ 3829 if (flags & WRITECLOSE) { 3830 if (vp->v_object != NULL) { 3831 VM_OBJECT_WLOCK(vp->v_object); 3832 vm_object_page_clean(vp->v_object, 0, 0, 0); 3833 VM_OBJECT_WUNLOCK(vp->v_object); 3834 } 3835 do { 3836 error = VOP_FSYNC(vp, MNT_WAIT, td); 3837 } while (error == ERELOOKUP); 3838 if (error != 0) { 3839 VOP_UNLOCK(vp); 3840 vdrop(vp); 3841 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3842 return (error); 3843 } 3844 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3845 VI_LOCK(vp); 3846 3847 if ((vp->v_type == VNON || 3848 (error == 0 && vattr.va_nlink > 0)) && 3849 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3850 VOP_UNLOCK(vp); 3851 vdropl(vp); 3852 continue; 3853 } 3854 } else 3855 VI_LOCK(vp); 3856 /* 3857 * With v_usecount == 0, all we need to do is clear out the 3858 * vnode data structures and we are done. 3859 * 3860 * If FORCECLOSE is set, forcibly close the vnode. 3861 */ 3862 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3863 vgonel(vp); 3864 } else { 3865 busy++; 3866 #ifdef DIAGNOSTIC 3867 if (busyprt) 3868 vn_printf(vp, "vflush: busy vnode "); 3869 #endif 3870 } 3871 VOP_UNLOCK(vp); 3872 vdropl(vp); 3873 } 3874 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3875 /* 3876 * If just the root vnode is busy, and if its refcount 3877 * is equal to `rootrefs', then go ahead and kill it. 3878 */ 3879 VI_LOCK(rootvp); 3880 KASSERT(busy > 0, ("vflush: not busy")); 3881 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3882 ("vflush: usecount %d < rootrefs %d", 3883 rootvp->v_usecount, rootrefs)); 3884 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3885 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3886 vgone(rootvp); 3887 VOP_UNLOCK(rootvp); 3888 busy = 0; 3889 } else 3890 VI_UNLOCK(rootvp); 3891 } 3892 if (busy) { 3893 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3894 busy); 3895 return (EBUSY); 3896 } 3897 for (; rootrefs > 0; rootrefs--) 3898 vrele(rootvp); 3899 return (0); 3900 } 3901 3902 /* 3903 * Recycle an unused vnode to the front of the free list. 3904 */ 3905 int 3906 vrecycle(struct vnode *vp) 3907 { 3908 int recycled; 3909 3910 VI_LOCK(vp); 3911 recycled = vrecyclel(vp); 3912 VI_UNLOCK(vp); 3913 return (recycled); 3914 } 3915 3916 /* 3917 * vrecycle, with the vp interlock held. 3918 */ 3919 int 3920 vrecyclel(struct vnode *vp) 3921 { 3922 int recycled; 3923 3924 ASSERT_VOP_ELOCKED(vp, __func__); 3925 ASSERT_VI_LOCKED(vp, __func__); 3926 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3927 recycled = 0; 3928 if (vp->v_usecount == 0) { 3929 recycled = 1; 3930 vgonel(vp); 3931 } 3932 return (recycled); 3933 } 3934 3935 /* 3936 * Eliminate all activity associated with a vnode 3937 * in preparation for reuse. 3938 */ 3939 void 3940 vgone(struct vnode *vp) 3941 { 3942 VI_LOCK(vp); 3943 vgonel(vp); 3944 VI_UNLOCK(vp); 3945 } 3946 3947 /* 3948 * Notify upper mounts about reclaimed or unlinked vnode. 3949 */ 3950 void 3951 vfs_notify_upper(struct vnode *vp, enum vfs_notify_upper_type event) 3952 { 3953 struct mount *mp; 3954 struct mount_upper_node *ump; 3955 3956 mp = atomic_load_ptr(&vp->v_mount); 3957 if (mp == NULL) 3958 return; 3959 if (TAILQ_EMPTY(&mp->mnt_notify)) 3960 return; 3961 3962 MNT_ILOCK(mp); 3963 mp->mnt_upper_pending++; 3964 KASSERT(mp->mnt_upper_pending > 0, 3965 ("%s: mnt_upper_pending %d", __func__, mp->mnt_upper_pending)); 3966 TAILQ_FOREACH(ump, &mp->mnt_notify, mnt_upper_link) { 3967 MNT_IUNLOCK(mp); 3968 switch (event) { 3969 case VFS_NOTIFY_UPPER_RECLAIM: 3970 VFS_RECLAIM_LOWERVP(ump->mp, vp); 3971 break; 3972 case VFS_NOTIFY_UPPER_UNLINK: 3973 VFS_UNLINK_LOWERVP(ump->mp, vp); 3974 break; 3975 } 3976 MNT_ILOCK(mp); 3977 } 3978 mp->mnt_upper_pending--; 3979 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && 3980 mp->mnt_upper_pending == 0) { 3981 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; 3982 wakeup(&mp->mnt_uppers); 3983 } 3984 MNT_IUNLOCK(mp); 3985 } 3986 3987 /* 3988 * vgone, with the vp interlock held. 3989 */ 3990 static void 3991 vgonel(struct vnode *vp) 3992 { 3993 struct thread *td; 3994 struct mount *mp; 3995 vm_object_t object; 3996 bool active, doinginact, oweinact; 3997 3998 ASSERT_VOP_ELOCKED(vp, "vgonel"); 3999 ASSERT_VI_LOCKED(vp, "vgonel"); 4000 VNASSERT(vp->v_holdcnt, vp, 4001 ("vgonel: vp %p has no reference.", vp)); 4002 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4003 td = curthread; 4004 4005 /* 4006 * Don't vgonel if we're already doomed. 4007 */ 4008 if (VN_IS_DOOMED(vp)) 4009 return; 4010 /* 4011 * Paired with freevnode. 4012 */ 4013 vn_seqc_write_begin_locked(vp); 4014 vunlazy_gone(vp); 4015 vn_irflag_set_locked(vp, VIRF_DOOMED); 4016 4017 /* 4018 * Check to see if the vnode is in use. If so, we have to 4019 * call VOP_CLOSE() and VOP_INACTIVE(). 4020 * 4021 * It could be that VOP_INACTIVE() requested reclamation, in 4022 * which case we should avoid recursion, so check 4023 * VI_DOINGINACT. This is not precise but good enough. 4024 */ 4025 active = vp->v_usecount > 0; 4026 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4027 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; 4028 4029 /* 4030 * If we need to do inactive VI_OWEINACT will be set. 4031 */ 4032 if (vp->v_iflag & VI_DEFINACT) { 4033 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 4034 vp->v_iflag &= ~VI_DEFINACT; 4035 vdropl(vp); 4036 } else { 4037 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 4038 VI_UNLOCK(vp); 4039 } 4040 cache_purge_vgone(vp); 4041 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 4042 4043 /* 4044 * If purging an active vnode, it must be closed and 4045 * deactivated before being reclaimed. 4046 */ 4047 if (active) 4048 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 4049 if (!doinginact) { 4050 do { 4051 if (oweinact || active) { 4052 VI_LOCK(vp); 4053 vinactivef(vp); 4054 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4055 VI_UNLOCK(vp); 4056 } 4057 } while (oweinact); 4058 } 4059 if (vp->v_type == VSOCK) 4060 vfs_unp_reclaim(vp); 4061 4062 /* 4063 * Clean out any buffers associated with the vnode. 4064 * If the flush fails, just toss the buffers. 4065 */ 4066 mp = NULL; 4067 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 4068 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 4069 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 4070 while (vinvalbuf(vp, 0, 0, 0) != 0) 4071 ; 4072 } 4073 4074 BO_LOCK(&vp->v_bufobj); 4075 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 4076 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 4077 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 4078 vp->v_bufobj.bo_clean.bv_cnt == 0, 4079 ("vp %p bufobj not invalidated", vp)); 4080 4081 /* 4082 * For VMIO bufobj, BO_DEAD is set later, or in 4083 * vm_object_terminate() after the object's page queue is 4084 * flushed. 4085 */ 4086 object = vp->v_bufobj.bo_object; 4087 if (object == NULL) 4088 vp->v_bufobj.bo_flag |= BO_DEAD; 4089 BO_UNLOCK(&vp->v_bufobj); 4090 4091 /* 4092 * Handle the VM part. Tmpfs handles v_object on its own (the 4093 * OBJT_VNODE check). Nullfs or other bypassing filesystems 4094 * should not touch the object borrowed from the lower vnode 4095 * (the handle check). 4096 */ 4097 if (object != NULL && object->type == OBJT_VNODE && 4098 object->handle == vp) 4099 vnode_destroy_vobject(vp); 4100 4101 /* 4102 * Reclaim the vnode. 4103 */ 4104 if (VOP_RECLAIM(vp)) 4105 panic("vgone: cannot reclaim"); 4106 if (mp != NULL) 4107 vn_finished_secondary_write(mp); 4108 VNASSERT(vp->v_object == NULL, vp, 4109 ("vop_reclaim left v_object vp=%p", vp)); 4110 /* 4111 * Clear the advisory locks and wake up waiting threads. 4112 */ 4113 (void)VOP_ADVLOCKPURGE(vp); 4114 vp->v_lockf = NULL; 4115 /* 4116 * Delete from old mount point vnode list. 4117 */ 4118 delmntque(vp); 4119 /* 4120 * Done with purge, reset to the standard lock and invalidate 4121 * the vnode. 4122 */ 4123 VI_LOCK(vp); 4124 vp->v_vnlock = &vp->v_lock; 4125 vp->v_op = &dead_vnodeops; 4126 vp->v_type = VBAD; 4127 } 4128 4129 /* 4130 * Print out a description of a vnode. 4131 */ 4132 static const char * const typename[] = 4133 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 4134 "VMARKER"}; 4135 4136 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 4137 "new hold count flag not added to vn_printf"); 4138 4139 void 4140 vn_printf(struct vnode *vp, const char *fmt, ...) 4141 { 4142 va_list ap; 4143 char buf[256], buf2[16]; 4144 u_long flags; 4145 u_int holdcnt; 4146 short irflag; 4147 4148 va_start(ap, fmt); 4149 vprintf(fmt, ap); 4150 va_end(ap); 4151 printf("%p: ", (void *)vp); 4152 printf("type %s\n", typename[vp->v_type]); 4153 holdcnt = atomic_load_int(&vp->v_holdcnt); 4154 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 4155 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 4156 vp->v_seqc_users); 4157 switch (vp->v_type) { 4158 case VDIR: 4159 printf(" mountedhere %p\n", vp->v_mountedhere); 4160 break; 4161 case VCHR: 4162 printf(" rdev %p\n", vp->v_rdev); 4163 break; 4164 case VSOCK: 4165 printf(" socket %p\n", vp->v_unpcb); 4166 break; 4167 case VFIFO: 4168 printf(" fifoinfo %p\n", vp->v_fifoinfo); 4169 break; 4170 default: 4171 printf("\n"); 4172 break; 4173 } 4174 buf[0] = '\0'; 4175 buf[1] = '\0'; 4176 if (holdcnt & VHOLD_NO_SMR) 4177 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 4178 printf(" hold count flags (%s)\n", buf + 1); 4179 4180 buf[0] = '\0'; 4181 buf[1] = '\0'; 4182 irflag = vn_irflag_read(vp); 4183 if (irflag & VIRF_DOOMED) 4184 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 4185 if (irflag & VIRF_PGREAD) 4186 strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); 4187 if (irflag & VIRF_MOUNTPOINT) 4188 strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf)); 4189 if (irflag & VIRF_TEXT_REF) 4190 strlcat(buf, "|VIRF_TEXT_REF", sizeof(buf)); 4191 flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT | VIRF_TEXT_REF); 4192 if (flags != 0) { 4193 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 4194 strlcat(buf, buf2, sizeof(buf)); 4195 } 4196 if (vp->v_vflag & VV_ROOT) 4197 strlcat(buf, "|VV_ROOT", sizeof(buf)); 4198 if (vp->v_vflag & VV_ISTTY) 4199 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4200 if (vp->v_vflag & VV_NOSYNC) 4201 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4202 if (vp->v_vflag & VV_ETERNALDEV) 4203 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4204 if (vp->v_vflag & VV_CACHEDLABEL) 4205 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4206 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4207 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4208 if (vp->v_vflag & VV_COPYONWRITE) 4209 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4210 if (vp->v_vflag & VV_SYSTEM) 4211 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4212 if (vp->v_vflag & VV_PROCDEP) 4213 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4214 if (vp->v_vflag & VV_DELETED) 4215 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4216 if (vp->v_vflag & VV_MD) 4217 strlcat(buf, "|VV_MD", sizeof(buf)); 4218 if (vp->v_vflag & VV_FORCEINSMQ) 4219 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4220 if (vp->v_vflag & VV_READLINK) 4221 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4222 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4223 VV_CACHEDLABEL | VV_VMSIZEVNLOCK | VV_COPYONWRITE | VV_SYSTEM | 4224 VV_PROCDEP | VV_DELETED | VV_MD | VV_FORCEINSMQ | VV_READLINK); 4225 if (flags != 0) { 4226 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4227 strlcat(buf, buf2, sizeof(buf)); 4228 } 4229 if (vp->v_iflag & VI_MOUNT) 4230 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4231 if (vp->v_iflag & VI_DOINGINACT) 4232 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4233 if (vp->v_iflag & VI_OWEINACT) 4234 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4235 if (vp->v_iflag & VI_DEFINACT) 4236 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4237 if (vp->v_iflag & VI_FOPENING) 4238 strlcat(buf, "|VI_FOPENING", sizeof(buf)); 4239 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOINGINACT | 4240 VI_OWEINACT | VI_DEFINACT | VI_FOPENING); 4241 if (flags != 0) { 4242 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4243 strlcat(buf, buf2, sizeof(buf)); 4244 } 4245 if (vp->v_mflag & VMP_LAZYLIST) 4246 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4247 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4248 if (flags != 0) { 4249 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4250 strlcat(buf, buf2, sizeof(buf)); 4251 } 4252 printf(" flags (%s)", buf + 1); 4253 if (mtx_owned(VI_MTX(vp))) 4254 printf(" VI_LOCKed"); 4255 printf("\n"); 4256 if (vp->v_object != NULL) 4257 printf(" v_object %p ref %d pages %d " 4258 "cleanbuf %d dirtybuf %d\n", 4259 vp->v_object, vp->v_object->ref_count, 4260 vp->v_object->resident_page_count, 4261 vp->v_bufobj.bo_clean.bv_cnt, 4262 vp->v_bufobj.bo_dirty.bv_cnt); 4263 printf(" "); 4264 lockmgr_printinfo(vp->v_vnlock); 4265 if (vp->v_data != NULL) 4266 VOP_PRINT(vp); 4267 } 4268 4269 #ifdef DDB 4270 /* 4271 * List all of the locked vnodes in the system. 4272 * Called when debugging the kernel. 4273 */ 4274 DB_SHOW_COMMAND_FLAGS(lockedvnods, lockedvnodes, DB_CMD_MEMSAFE) 4275 { 4276 struct mount *mp; 4277 struct vnode *vp; 4278 4279 /* 4280 * Note: because this is DDB, we can't obey the locking semantics 4281 * for these structures, which means we could catch an inconsistent 4282 * state and dereference a nasty pointer. Not much to be done 4283 * about that. 4284 */ 4285 db_printf("Locked vnodes\n"); 4286 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4287 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4288 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4289 vn_printf(vp, "vnode "); 4290 } 4291 } 4292 } 4293 4294 /* 4295 * Show details about the given vnode. 4296 */ 4297 DB_SHOW_COMMAND(vnode, db_show_vnode) 4298 { 4299 struct vnode *vp; 4300 4301 if (!have_addr) 4302 return; 4303 vp = (struct vnode *)addr; 4304 vn_printf(vp, "vnode "); 4305 } 4306 4307 /* 4308 * Show details about the given mount point. 4309 */ 4310 DB_SHOW_COMMAND(mount, db_show_mount) 4311 { 4312 struct mount *mp; 4313 struct vfsopt *opt; 4314 struct statfs *sp; 4315 struct vnode *vp; 4316 char buf[512]; 4317 uint64_t mflags; 4318 u_int flags; 4319 4320 if (!have_addr) { 4321 /* No address given, print short info about all mount points. */ 4322 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4323 db_printf("%p %s on %s (%s)\n", mp, 4324 mp->mnt_stat.f_mntfromname, 4325 mp->mnt_stat.f_mntonname, 4326 mp->mnt_stat.f_fstypename); 4327 if (db_pager_quit) 4328 break; 4329 } 4330 db_printf("\nMore info: show mount <addr>\n"); 4331 return; 4332 } 4333 4334 mp = (struct mount *)addr; 4335 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4336 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4337 4338 buf[0] = '\0'; 4339 mflags = mp->mnt_flag; 4340 #define MNT_FLAG(flag) do { \ 4341 if (mflags & (flag)) { \ 4342 if (buf[0] != '\0') \ 4343 strlcat(buf, ", ", sizeof(buf)); \ 4344 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4345 mflags &= ~(flag); \ 4346 } \ 4347 } while (0) 4348 MNT_FLAG(MNT_RDONLY); 4349 MNT_FLAG(MNT_SYNCHRONOUS); 4350 MNT_FLAG(MNT_NOEXEC); 4351 MNT_FLAG(MNT_NOSUID); 4352 MNT_FLAG(MNT_NFS4ACLS); 4353 MNT_FLAG(MNT_UNION); 4354 MNT_FLAG(MNT_ASYNC); 4355 MNT_FLAG(MNT_SUIDDIR); 4356 MNT_FLAG(MNT_SOFTDEP); 4357 MNT_FLAG(MNT_NOSYMFOLLOW); 4358 MNT_FLAG(MNT_GJOURNAL); 4359 MNT_FLAG(MNT_MULTILABEL); 4360 MNT_FLAG(MNT_ACLS); 4361 MNT_FLAG(MNT_NOATIME); 4362 MNT_FLAG(MNT_NOCLUSTERR); 4363 MNT_FLAG(MNT_NOCLUSTERW); 4364 MNT_FLAG(MNT_SUJ); 4365 MNT_FLAG(MNT_EXRDONLY); 4366 MNT_FLAG(MNT_EXPORTED); 4367 MNT_FLAG(MNT_DEFEXPORTED); 4368 MNT_FLAG(MNT_EXPORTANON); 4369 MNT_FLAG(MNT_EXKERB); 4370 MNT_FLAG(MNT_EXPUBLIC); 4371 MNT_FLAG(MNT_LOCAL); 4372 MNT_FLAG(MNT_QUOTA); 4373 MNT_FLAG(MNT_ROOTFS); 4374 MNT_FLAG(MNT_USER); 4375 MNT_FLAG(MNT_IGNORE); 4376 MNT_FLAG(MNT_UPDATE); 4377 MNT_FLAG(MNT_DELEXPORT); 4378 MNT_FLAG(MNT_RELOAD); 4379 MNT_FLAG(MNT_FORCE); 4380 MNT_FLAG(MNT_SNAPSHOT); 4381 MNT_FLAG(MNT_BYFSID); 4382 #undef MNT_FLAG 4383 if (mflags != 0) { 4384 if (buf[0] != '\0') 4385 strlcat(buf, ", ", sizeof(buf)); 4386 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4387 "0x%016jx", mflags); 4388 } 4389 db_printf(" mnt_flag = %s\n", buf); 4390 4391 buf[0] = '\0'; 4392 flags = mp->mnt_kern_flag; 4393 #define MNT_KERN_FLAG(flag) do { \ 4394 if (flags & (flag)) { \ 4395 if (buf[0] != '\0') \ 4396 strlcat(buf, ", ", sizeof(buf)); \ 4397 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4398 flags &= ~(flag); \ 4399 } \ 4400 } while (0) 4401 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4402 MNT_KERN_FLAG(MNTK_ASYNC); 4403 MNT_KERN_FLAG(MNTK_SOFTDEP); 4404 MNT_KERN_FLAG(MNTK_NOMSYNC); 4405 MNT_KERN_FLAG(MNTK_DRAINING); 4406 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4407 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4408 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4409 MNT_KERN_FLAG(MNTK_NO_IOPF); 4410 MNT_KERN_FLAG(MNTK_RECURSE); 4411 MNT_KERN_FLAG(MNTK_UPPER_WAITER); 4412 MNT_KERN_FLAG(MNTK_UNLOCKED_INSMNTQUE); 4413 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4414 MNT_KERN_FLAG(MNTK_VMSETSIZE_BUG); 4415 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4416 MNT_KERN_FLAG(MNTK_TASKQUEUE_WAITER); 4417 MNT_KERN_FLAG(MNTK_NOASYNC); 4418 MNT_KERN_FLAG(MNTK_UNMOUNT); 4419 MNT_KERN_FLAG(MNTK_MWAIT); 4420 MNT_KERN_FLAG(MNTK_SUSPEND); 4421 MNT_KERN_FLAG(MNTK_SUSPEND2); 4422 MNT_KERN_FLAG(MNTK_SUSPENDED); 4423 MNT_KERN_FLAG(MNTK_NULL_NOCACHE); 4424 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4425 #undef MNT_KERN_FLAG 4426 if (flags != 0) { 4427 if (buf[0] != '\0') 4428 strlcat(buf, ", ", sizeof(buf)); 4429 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4430 "0x%08x", flags); 4431 } 4432 db_printf(" mnt_kern_flag = %s\n", buf); 4433 4434 db_printf(" mnt_opt = "); 4435 opt = TAILQ_FIRST(mp->mnt_opt); 4436 if (opt != NULL) { 4437 db_printf("%s", opt->name); 4438 opt = TAILQ_NEXT(opt, link); 4439 while (opt != NULL) { 4440 db_printf(", %s", opt->name); 4441 opt = TAILQ_NEXT(opt, link); 4442 } 4443 } 4444 db_printf("\n"); 4445 4446 sp = &mp->mnt_stat; 4447 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4448 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4449 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4450 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4451 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4452 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4453 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4454 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4455 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4456 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4457 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4458 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4459 4460 db_printf(" mnt_cred = { uid=%u ruid=%u", 4461 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4462 if (jailed(mp->mnt_cred)) 4463 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4464 db_printf(" }\n"); 4465 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4466 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4467 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4468 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4469 db_printf(" mnt_lazyvnodelistsize = %d\n", 4470 mp->mnt_lazyvnodelistsize); 4471 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4472 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4473 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4474 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4475 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4476 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4477 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4478 db_printf(" mnt_secondary_accwrites = %d\n", 4479 mp->mnt_secondary_accwrites); 4480 db_printf(" mnt_gjprovider = %s\n", 4481 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4482 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4483 4484 db_printf("\n\nList of active vnodes\n"); 4485 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4486 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4487 vn_printf(vp, "vnode "); 4488 if (db_pager_quit) 4489 break; 4490 } 4491 } 4492 db_printf("\n\nList of inactive vnodes\n"); 4493 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4494 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4495 vn_printf(vp, "vnode "); 4496 if (db_pager_quit) 4497 break; 4498 } 4499 } 4500 } 4501 #endif /* DDB */ 4502 4503 /* 4504 * Fill in a struct xvfsconf based on a struct vfsconf. 4505 */ 4506 static int 4507 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4508 { 4509 struct xvfsconf xvfsp; 4510 4511 bzero(&xvfsp, sizeof(xvfsp)); 4512 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4513 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4514 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4515 xvfsp.vfc_flags = vfsp->vfc_flags; 4516 /* 4517 * These are unused in userland, we keep them 4518 * to not break binary compatibility. 4519 */ 4520 xvfsp.vfc_vfsops = NULL; 4521 xvfsp.vfc_next = NULL; 4522 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4523 } 4524 4525 #ifdef COMPAT_FREEBSD32 4526 struct xvfsconf32 { 4527 uint32_t vfc_vfsops; 4528 char vfc_name[MFSNAMELEN]; 4529 int32_t vfc_typenum; 4530 int32_t vfc_refcount; 4531 int32_t vfc_flags; 4532 uint32_t vfc_next; 4533 }; 4534 4535 static int 4536 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4537 { 4538 struct xvfsconf32 xvfsp; 4539 4540 bzero(&xvfsp, sizeof(xvfsp)); 4541 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4542 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4543 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4544 xvfsp.vfc_flags = vfsp->vfc_flags; 4545 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4546 } 4547 #endif 4548 4549 /* 4550 * Top level filesystem related information gathering. 4551 */ 4552 static int 4553 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4554 { 4555 struct vfsconf *vfsp; 4556 int error; 4557 4558 error = 0; 4559 vfsconf_slock(); 4560 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4561 #ifdef COMPAT_FREEBSD32 4562 if (req->flags & SCTL_MASK32) 4563 error = vfsconf2x32(req, vfsp); 4564 else 4565 #endif 4566 error = vfsconf2x(req, vfsp); 4567 if (error) 4568 break; 4569 } 4570 vfsconf_sunlock(); 4571 return (error); 4572 } 4573 4574 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4575 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4576 "S,xvfsconf", "List of all configured filesystems"); 4577 4578 #ifndef BURN_BRIDGES 4579 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4580 4581 static int 4582 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4583 { 4584 int *name = (int *)arg1 - 1; /* XXX */ 4585 u_int namelen = arg2 + 1; /* XXX */ 4586 struct vfsconf *vfsp; 4587 4588 log(LOG_WARNING, "userland calling deprecated sysctl, " 4589 "please rebuild world\n"); 4590 4591 #if 1 || defined(COMPAT_PRELITE2) 4592 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4593 if (namelen == 1) 4594 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4595 #endif 4596 4597 switch (name[1]) { 4598 case VFS_MAXTYPENUM: 4599 if (namelen != 2) 4600 return (ENOTDIR); 4601 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4602 case VFS_CONF: 4603 if (namelen != 3) 4604 return (ENOTDIR); /* overloaded */ 4605 vfsconf_slock(); 4606 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4607 if (vfsp->vfc_typenum == name[2]) 4608 break; 4609 } 4610 vfsconf_sunlock(); 4611 if (vfsp == NULL) 4612 return (EOPNOTSUPP); 4613 #ifdef COMPAT_FREEBSD32 4614 if (req->flags & SCTL_MASK32) 4615 return (vfsconf2x32(req, vfsp)); 4616 else 4617 #endif 4618 return (vfsconf2x(req, vfsp)); 4619 } 4620 return (EOPNOTSUPP); 4621 } 4622 4623 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4624 CTLFLAG_MPSAFE, vfs_sysctl, 4625 "Generic filesystem"); 4626 4627 #if 1 || defined(COMPAT_PRELITE2) 4628 4629 static int 4630 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4631 { 4632 int error; 4633 struct vfsconf *vfsp; 4634 struct ovfsconf ovfs; 4635 4636 vfsconf_slock(); 4637 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4638 bzero(&ovfs, sizeof(ovfs)); 4639 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4640 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4641 ovfs.vfc_index = vfsp->vfc_typenum; 4642 ovfs.vfc_refcount = vfsp->vfc_refcount; 4643 ovfs.vfc_flags = vfsp->vfc_flags; 4644 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4645 if (error != 0) { 4646 vfsconf_sunlock(); 4647 return (error); 4648 } 4649 } 4650 vfsconf_sunlock(); 4651 return (0); 4652 } 4653 4654 #endif /* 1 || COMPAT_PRELITE2 */ 4655 #endif /* !BURN_BRIDGES */ 4656 4657 #define KINFO_VNODESLOP 10 4658 #ifdef notyet 4659 /* 4660 * Dump vnode list (via sysctl). 4661 */ 4662 /* ARGSUSED */ 4663 static int 4664 sysctl_vnode(SYSCTL_HANDLER_ARGS) 4665 { 4666 struct xvnode *xvn; 4667 struct mount *mp; 4668 struct vnode *vp; 4669 int error, len, n; 4670 4671 /* 4672 * Stale numvnodes access is not fatal here. 4673 */ 4674 req->lock = 0; 4675 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 4676 if (!req->oldptr) 4677 /* Make an estimate */ 4678 return (SYSCTL_OUT(req, 0, len)); 4679 4680 error = sysctl_wire_old_buffer(req, 0); 4681 if (error != 0) 4682 return (error); 4683 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 4684 n = 0; 4685 mtx_lock(&mountlist_mtx); 4686 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4687 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 4688 continue; 4689 MNT_ILOCK(mp); 4690 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4691 if (n == len) 4692 break; 4693 vref(vp); 4694 xvn[n].xv_size = sizeof *xvn; 4695 xvn[n].xv_vnode = vp; 4696 xvn[n].xv_id = 0; /* XXX compat */ 4697 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 4698 XV_COPY(usecount); 4699 XV_COPY(writecount); 4700 XV_COPY(holdcnt); 4701 XV_COPY(mount); 4702 XV_COPY(numoutput); 4703 XV_COPY(type); 4704 #undef XV_COPY 4705 xvn[n].xv_flag = vp->v_vflag; 4706 4707 switch (vp->v_type) { 4708 case VREG: 4709 case VDIR: 4710 case VLNK: 4711 break; 4712 case VBLK: 4713 case VCHR: 4714 if (vp->v_rdev == NULL) { 4715 vrele(vp); 4716 continue; 4717 } 4718 xvn[n].xv_dev = dev2udev(vp->v_rdev); 4719 break; 4720 case VSOCK: 4721 xvn[n].xv_socket = vp->v_socket; 4722 break; 4723 case VFIFO: 4724 xvn[n].xv_fifo = vp->v_fifoinfo; 4725 break; 4726 case VNON: 4727 case VBAD: 4728 default: 4729 /* shouldn't happen? */ 4730 vrele(vp); 4731 continue; 4732 } 4733 vrele(vp); 4734 ++n; 4735 } 4736 MNT_IUNLOCK(mp); 4737 mtx_lock(&mountlist_mtx); 4738 vfs_unbusy(mp); 4739 if (n == len) 4740 break; 4741 } 4742 mtx_unlock(&mountlist_mtx); 4743 4744 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 4745 free(xvn, M_TEMP); 4746 return (error); 4747 } 4748 4749 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 4750 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 4751 ""); 4752 #endif 4753 4754 static void 4755 unmount_or_warn(struct mount *mp) 4756 { 4757 int error; 4758 4759 error = dounmount(mp, MNT_FORCE, curthread); 4760 if (error != 0) { 4761 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4762 if (error == EBUSY) 4763 printf("BUSY)\n"); 4764 else 4765 printf("%d)\n", error); 4766 } 4767 } 4768 4769 /* 4770 * Unmount all filesystems. The list is traversed in reverse order 4771 * of mounting to avoid dependencies. 4772 */ 4773 void 4774 vfs_unmountall(void) 4775 { 4776 struct mount *mp, *tmp; 4777 4778 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4779 4780 /* 4781 * Since this only runs when rebooting, it is not interlocked. 4782 */ 4783 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4784 vfs_ref(mp); 4785 4786 /* 4787 * Forcibly unmounting "/dev" before "/" would prevent clean 4788 * unmount of the latter. 4789 */ 4790 if (mp == rootdevmp) 4791 continue; 4792 4793 unmount_or_warn(mp); 4794 } 4795 4796 if (rootdevmp != NULL) 4797 unmount_or_warn(rootdevmp); 4798 } 4799 4800 static void 4801 vfs_deferred_inactive(struct vnode *vp, int lkflags) 4802 { 4803 4804 ASSERT_VI_LOCKED(vp, __func__); 4805 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, ("VI_DEFINACT still set")); 4806 if ((vp->v_iflag & VI_OWEINACT) == 0) { 4807 vdropl(vp); 4808 return; 4809 } 4810 if (vn_lock(vp, lkflags) == 0) { 4811 VI_LOCK(vp); 4812 vinactive(vp); 4813 VOP_UNLOCK(vp); 4814 vdropl(vp); 4815 return; 4816 } 4817 vdefer_inactive_unlocked(vp); 4818 } 4819 4820 static int 4821 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 4822 { 4823 4824 return (vp->v_iflag & VI_DEFINACT); 4825 } 4826 4827 static void __noinline 4828 vfs_periodic_inactive(struct mount *mp, int flags) 4829 { 4830 struct vnode *vp, *mvp; 4831 int lkflags; 4832 4833 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4834 if (flags != MNT_WAIT) 4835 lkflags |= LK_NOWAIT; 4836 4837 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 4838 if ((vp->v_iflag & VI_DEFINACT) == 0) { 4839 VI_UNLOCK(vp); 4840 continue; 4841 } 4842 vp->v_iflag &= ~VI_DEFINACT; 4843 vfs_deferred_inactive(vp, lkflags); 4844 } 4845 } 4846 4847 static inline bool 4848 vfs_want_msync(struct vnode *vp) 4849 { 4850 struct vm_object *obj; 4851 4852 /* 4853 * This test may be performed without any locks held. 4854 * We rely on vm_object's type stability. 4855 */ 4856 if (vp->v_vflag & VV_NOSYNC) 4857 return (false); 4858 obj = vp->v_object; 4859 return (obj != NULL && vm_object_mightbedirty(obj)); 4860 } 4861 4862 static int 4863 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 4864 { 4865 4866 if (vp->v_vflag & VV_NOSYNC) 4867 return (false); 4868 if (vp->v_iflag & VI_DEFINACT) 4869 return (true); 4870 return (vfs_want_msync(vp)); 4871 } 4872 4873 static void __noinline 4874 vfs_periodic_msync_inactive(struct mount *mp, int flags) 4875 { 4876 struct vnode *vp, *mvp; 4877 struct vm_object *obj; 4878 int lkflags, objflags; 4879 bool seen_defer; 4880 4881 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4882 if (flags != MNT_WAIT) { 4883 lkflags |= LK_NOWAIT; 4884 objflags = OBJPC_NOSYNC; 4885 } else { 4886 objflags = OBJPC_SYNC; 4887 } 4888 4889 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 4890 seen_defer = false; 4891 if (vp->v_iflag & VI_DEFINACT) { 4892 vp->v_iflag &= ~VI_DEFINACT; 4893 seen_defer = true; 4894 } 4895 if (!vfs_want_msync(vp)) { 4896 if (seen_defer) 4897 vfs_deferred_inactive(vp, lkflags); 4898 else 4899 VI_UNLOCK(vp); 4900 continue; 4901 } 4902 if (vget(vp, lkflags) == 0) { 4903 obj = vp->v_object; 4904 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 4905 VM_OBJECT_WLOCK(obj); 4906 vm_object_page_clean(obj, 0, 0, objflags); 4907 VM_OBJECT_WUNLOCK(obj); 4908 } 4909 vput(vp); 4910 if (seen_defer) 4911 vdrop(vp); 4912 } else { 4913 if (seen_defer) 4914 vdefer_inactive_unlocked(vp); 4915 } 4916 } 4917 } 4918 4919 void 4920 vfs_periodic(struct mount *mp, int flags) 4921 { 4922 4923 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4924 4925 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 4926 vfs_periodic_inactive(mp, flags); 4927 else 4928 vfs_periodic_msync_inactive(mp, flags); 4929 } 4930 4931 static void 4932 destroy_vpollinfo_free(struct vpollinfo *vi) 4933 { 4934 4935 knlist_destroy(&vi->vpi_selinfo.si_note); 4936 mtx_destroy(&vi->vpi_lock); 4937 free(vi, M_VNODEPOLL); 4938 } 4939 4940 static void 4941 destroy_vpollinfo(struct vpollinfo *vi) 4942 { 4943 4944 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4945 seldrain(&vi->vpi_selinfo); 4946 destroy_vpollinfo_free(vi); 4947 } 4948 4949 /* 4950 * Initialize per-vnode helper structure to hold poll-related state. 4951 */ 4952 void 4953 v_addpollinfo(struct vnode *vp) 4954 { 4955 struct vpollinfo *vi; 4956 4957 if (vp->v_pollinfo != NULL) 4958 return; 4959 vi = malloc(sizeof(*vi), M_VNODEPOLL, M_WAITOK | M_ZERO); 4960 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4961 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4962 vfs_knlunlock, vfs_knl_assert_lock); 4963 VI_LOCK(vp); 4964 if (vp->v_pollinfo != NULL) { 4965 VI_UNLOCK(vp); 4966 destroy_vpollinfo_free(vi); 4967 return; 4968 } 4969 vp->v_pollinfo = vi; 4970 VI_UNLOCK(vp); 4971 } 4972 4973 /* 4974 * Record a process's interest in events which might happen to 4975 * a vnode. Because poll uses the historic select-style interface 4976 * internally, this routine serves as both the ``check for any 4977 * pending events'' and the ``record my interest in future events'' 4978 * functions. (These are done together, while the lock is held, 4979 * to avoid race conditions.) 4980 */ 4981 int 4982 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4983 { 4984 4985 v_addpollinfo(vp); 4986 mtx_lock(&vp->v_pollinfo->vpi_lock); 4987 if (vp->v_pollinfo->vpi_revents & events) { 4988 /* 4989 * This leaves events we are not interested 4990 * in available for the other process which 4991 * which presumably had requested them 4992 * (otherwise they would never have been 4993 * recorded). 4994 */ 4995 events &= vp->v_pollinfo->vpi_revents; 4996 vp->v_pollinfo->vpi_revents &= ~events; 4997 4998 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4999 return (events); 5000 } 5001 vp->v_pollinfo->vpi_events |= events; 5002 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 5003 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5004 return (0); 5005 } 5006 5007 /* 5008 * Routine to create and manage a filesystem syncer vnode. 5009 */ 5010 #define sync_close ((int (*)(struct vop_close_args *))nullop) 5011 static int sync_fsync(struct vop_fsync_args *); 5012 static int sync_inactive(struct vop_inactive_args *); 5013 static int sync_reclaim(struct vop_reclaim_args *); 5014 5015 static struct vop_vector sync_vnodeops = { 5016 .vop_bypass = VOP_EOPNOTSUPP, 5017 .vop_close = sync_close, /* close */ 5018 .vop_fsync = sync_fsync, /* fsync */ 5019 .vop_inactive = sync_inactive, /* inactive */ 5020 .vop_need_inactive = vop_stdneed_inactive, /* need_inactive */ 5021 .vop_reclaim = sync_reclaim, /* reclaim */ 5022 .vop_lock1 = vop_stdlock, /* lock */ 5023 .vop_unlock = vop_stdunlock, /* unlock */ 5024 .vop_islocked = vop_stdislocked, /* islocked */ 5025 }; 5026 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 5027 5028 /* 5029 * Create a new filesystem syncer vnode for the specified mount point. 5030 */ 5031 void 5032 vfs_allocate_syncvnode(struct mount *mp) 5033 { 5034 struct vnode *vp; 5035 struct bufobj *bo; 5036 static long start, incr, next; 5037 int error; 5038 5039 /* Allocate a new vnode */ 5040 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 5041 if (error != 0) 5042 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 5043 vp->v_type = VNON; 5044 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5045 vp->v_vflag |= VV_FORCEINSMQ; 5046 error = insmntque1(vp, mp); 5047 if (error != 0) 5048 panic("vfs_allocate_syncvnode: insmntque() failed"); 5049 vp->v_vflag &= ~VV_FORCEINSMQ; 5050 VOP_UNLOCK(vp); 5051 /* 5052 * Place the vnode onto the syncer worklist. We attempt to 5053 * scatter them about on the list so that they will go off 5054 * at evenly distributed times even if all the filesystems 5055 * are mounted at once. 5056 */ 5057 next += incr; 5058 if (next == 0 || next > syncer_maxdelay) { 5059 start /= 2; 5060 incr /= 2; 5061 if (start == 0) { 5062 start = syncer_maxdelay / 2; 5063 incr = syncer_maxdelay; 5064 } 5065 next = start; 5066 } 5067 bo = &vp->v_bufobj; 5068 BO_LOCK(bo); 5069 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 5070 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 5071 mtx_lock(&sync_mtx); 5072 sync_vnode_count++; 5073 if (mp->mnt_syncer == NULL) { 5074 mp->mnt_syncer = vp; 5075 vp = NULL; 5076 } 5077 mtx_unlock(&sync_mtx); 5078 BO_UNLOCK(bo); 5079 if (vp != NULL) { 5080 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5081 vgone(vp); 5082 vput(vp); 5083 } 5084 } 5085 5086 void 5087 vfs_deallocate_syncvnode(struct mount *mp) 5088 { 5089 struct vnode *vp; 5090 5091 mtx_lock(&sync_mtx); 5092 vp = mp->mnt_syncer; 5093 if (vp != NULL) 5094 mp->mnt_syncer = NULL; 5095 mtx_unlock(&sync_mtx); 5096 if (vp != NULL) 5097 vrele(vp); 5098 } 5099 5100 /* 5101 * Do a lazy sync of the filesystem. 5102 */ 5103 static int 5104 sync_fsync(struct vop_fsync_args *ap) 5105 { 5106 struct vnode *syncvp = ap->a_vp; 5107 struct mount *mp = syncvp->v_mount; 5108 int error, save; 5109 struct bufobj *bo; 5110 5111 /* 5112 * We only need to do something if this is a lazy evaluation. 5113 */ 5114 if (ap->a_waitfor != MNT_LAZY) 5115 return (0); 5116 5117 /* 5118 * Move ourselves to the back of the sync list. 5119 */ 5120 bo = &syncvp->v_bufobj; 5121 BO_LOCK(bo); 5122 vn_syncer_add_to_worklist(bo, syncdelay); 5123 BO_UNLOCK(bo); 5124 5125 /* 5126 * Walk the list of vnodes pushing all that are dirty and 5127 * not already on the sync list. 5128 */ 5129 if (vfs_busy(mp, MBF_NOWAIT) != 0) 5130 return (0); 5131 VOP_UNLOCK(syncvp); 5132 save = curthread_pflags_set(TDP_SYNCIO); 5133 /* 5134 * The filesystem at hand may be idle with free vnodes stored in the 5135 * batch. Return them instead of letting them stay there indefinitely. 5136 */ 5137 vfs_periodic(mp, MNT_NOWAIT); 5138 error = VFS_SYNC(mp, MNT_LAZY); 5139 curthread_pflags_restore(save); 5140 vn_lock(syncvp, LK_EXCLUSIVE | LK_RETRY); 5141 vfs_unbusy(mp); 5142 return (error); 5143 } 5144 5145 /* 5146 * The syncer vnode is no referenced. 5147 */ 5148 static int 5149 sync_inactive(struct vop_inactive_args *ap) 5150 { 5151 5152 vgone(ap->a_vp); 5153 return (0); 5154 } 5155 5156 /* 5157 * The syncer vnode is no longer needed and is being decommissioned. 5158 * 5159 * Modifications to the worklist must be protected by sync_mtx. 5160 */ 5161 static int 5162 sync_reclaim(struct vop_reclaim_args *ap) 5163 { 5164 struct vnode *vp = ap->a_vp; 5165 struct bufobj *bo; 5166 5167 bo = &vp->v_bufobj; 5168 BO_LOCK(bo); 5169 mtx_lock(&sync_mtx); 5170 if (vp->v_mount->mnt_syncer == vp) 5171 vp->v_mount->mnt_syncer = NULL; 5172 if (bo->bo_flag & BO_ONWORKLST) { 5173 LIST_REMOVE(bo, bo_synclist); 5174 syncer_worklist_len--; 5175 sync_vnode_count--; 5176 bo->bo_flag &= ~BO_ONWORKLST; 5177 } 5178 mtx_unlock(&sync_mtx); 5179 BO_UNLOCK(bo); 5180 5181 return (0); 5182 } 5183 5184 int 5185 vn_need_pageq_flush(struct vnode *vp) 5186 { 5187 struct vm_object *obj; 5188 5189 obj = vp->v_object; 5190 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 5191 vm_object_mightbedirty(obj)); 5192 } 5193 5194 /* 5195 * Check if vnode represents a disk device 5196 */ 5197 bool 5198 vn_isdisk_error(struct vnode *vp, int *errp) 5199 { 5200 int error; 5201 5202 if (vp->v_type != VCHR) { 5203 error = ENOTBLK; 5204 goto out; 5205 } 5206 error = 0; 5207 dev_lock(); 5208 if (vp->v_rdev == NULL) 5209 error = ENXIO; 5210 else if (vp->v_rdev->si_devsw == NULL) 5211 error = ENXIO; 5212 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5213 error = ENOTBLK; 5214 dev_unlock(); 5215 out: 5216 *errp = error; 5217 return (error == 0); 5218 } 5219 5220 bool 5221 vn_isdisk(struct vnode *vp) 5222 { 5223 int error; 5224 5225 return (vn_isdisk_error(vp, &error)); 5226 } 5227 5228 /* 5229 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5230 * the comment above cache_fplookup for details. 5231 */ 5232 int 5233 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5234 { 5235 int error; 5236 5237 VFS_SMR_ASSERT_ENTERED(); 5238 5239 /* Check the owner. */ 5240 if (cred->cr_uid == file_uid) { 5241 if (file_mode & S_IXUSR) 5242 return (0); 5243 goto out_error; 5244 } 5245 5246 /* Otherwise, check the groups (first match) */ 5247 if (groupmember(file_gid, cred)) { 5248 if (file_mode & S_IXGRP) 5249 return (0); 5250 goto out_error; 5251 } 5252 5253 /* Otherwise, check everyone else. */ 5254 if (file_mode & S_IXOTH) 5255 return (0); 5256 out_error: 5257 /* 5258 * Permission check failed, but it is possible denial will get overwritten 5259 * (e.g., when root is traversing through a 700 directory owned by someone 5260 * else). 5261 * 5262 * vaccess() calls priv_check_cred which in turn can descent into MAC 5263 * modules overriding this result. It's quite unclear what semantics 5264 * are allowed for them to operate, thus for safety we don't call them 5265 * from within the SMR section. This also means if any such modules 5266 * are present, we have to let the regular lookup decide. 5267 */ 5268 error = priv_check_cred_vfs_lookup_nomac(cred); 5269 switch (error) { 5270 case 0: 5271 return (0); 5272 case EAGAIN: 5273 /* 5274 * MAC modules present. 5275 */ 5276 return (EAGAIN); 5277 case EPERM: 5278 return (EACCES); 5279 default: 5280 return (error); 5281 } 5282 } 5283 5284 /* 5285 * Common filesystem object access control check routine. Accepts a 5286 * vnode's type, "mode", uid and gid, requested access mode, and credentials. 5287 * Returns 0 on success, or an errno on failure. 5288 */ 5289 int 5290 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5291 accmode_t accmode, struct ucred *cred) 5292 { 5293 accmode_t dac_granted; 5294 accmode_t priv_granted; 5295 5296 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5297 ("invalid bit in accmode")); 5298 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5299 ("VAPPEND without VWRITE")); 5300 5301 /* 5302 * Look for a normal, non-privileged way to access the file/directory 5303 * as requested. If it exists, go with that. 5304 */ 5305 5306 dac_granted = 0; 5307 5308 /* Check the owner. */ 5309 if (cred->cr_uid == file_uid) { 5310 dac_granted |= VADMIN; 5311 if (file_mode & S_IXUSR) 5312 dac_granted |= VEXEC; 5313 if (file_mode & S_IRUSR) 5314 dac_granted |= VREAD; 5315 if (file_mode & S_IWUSR) 5316 dac_granted |= (VWRITE | VAPPEND); 5317 5318 if ((accmode & dac_granted) == accmode) 5319 return (0); 5320 5321 goto privcheck; 5322 } 5323 5324 /* Otherwise, check the groups (first match) */ 5325 if (groupmember(file_gid, cred)) { 5326 if (file_mode & S_IXGRP) 5327 dac_granted |= VEXEC; 5328 if (file_mode & S_IRGRP) 5329 dac_granted |= VREAD; 5330 if (file_mode & S_IWGRP) 5331 dac_granted |= (VWRITE | VAPPEND); 5332 5333 if ((accmode & dac_granted) == accmode) 5334 return (0); 5335 5336 goto privcheck; 5337 } 5338 5339 /* Otherwise, check everyone else. */ 5340 if (file_mode & S_IXOTH) 5341 dac_granted |= VEXEC; 5342 if (file_mode & S_IROTH) 5343 dac_granted |= VREAD; 5344 if (file_mode & S_IWOTH) 5345 dac_granted |= (VWRITE | VAPPEND); 5346 if ((accmode & dac_granted) == accmode) 5347 return (0); 5348 5349 privcheck: 5350 /* 5351 * Build a privilege mask to determine if the set of privileges 5352 * satisfies the requirements when combined with the granted mask 5353 * from above. For each privilege, if the privilege is required, 5354 * bitwise or the request type onto the priv_granted mask. 5355 */ 5356 priv_granted = 0; 5357 5358 if (type == VDIR) { 5359 /* 5360 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5361 * requests, instead of PRIV_VFS_EXEC. 5362 */ 5363 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5364 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5365 priv_granted |= VEXEC; 5366 } else { 5367 /* 5368 * Ensure that at least one execute bit is on. Otherwise, 5369 * a privileged user will always succeed, and we don't want 5370 * this to happen unless the file really is executable. 5371 */ 5372 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5373 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5374 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5375 priv_granted |= VEXEC; 5376 } 5377 5378 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5379 !priv_check_cred(cred, PRIV_VFS_READ)) 5380 priv_granted |= VREAD; 5381 5382 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5383 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5384 priv_granted |= (VWRITE | VAPPEND); 5385 5386 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5387 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5388 priv_granted |= VADMIN; 5389 5390 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5391 return (0); 5392 } 5393 5394 return ((accmode & VADMIN) ? EPERM : EACCES); 5395 } 5396 5397 /* 5398 * Credential check based on process requesting service, and per-attribute 5399 * permissions. 5400 */ 5401 int 5402 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5403 struct thread *td, accmode_t accmode) 5404 { 5405 5406 /* 5407 * Kernel-invoked always succeeds. 5408 */ 5409 if (cred == NOCRED) 5410 return (0); 5411 5412 /* 5413 * Do not allow privileged processes in jail to directly manipulate 5414 * system attributes. 5415 */ 5416 switch (attrnamespace) { 5417 case EXTATTR_NAMESPACE_SYSTEM: 5418 /* Potentially should be: return (EPERM); */ 5419 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5420 case EXTATTR_NAMESPACE_USER: 5421 return (VOP_ACCESS(vp, accmode, cred, td)); 5422 default: 5423 return (EPERM); 5424 } 5425 } 5426 5427 #ifdef DEBUG_VFS_LOCKS 5428 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5429 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5430 "Drop into debugger on lock violation"); 5431 5432 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5433 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5434 0, "Check for interlock across VOPs"); 5435 5436 int vfs_badlock_print = 1; /* Print lock violations. */ 5437 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5438 0, "Print lock violations"); 5439 5440 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5441 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5442 0, "Print vnode details on lock violations"); 5443 5444 #ifdef KDB 5445 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5446 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5447 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5448 #endif 5449 5450 static void 5451 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5452 { 5453 5454 #ifdef KDB 5455 if (vfs_badlock_backtrace) 5456 kdb_backtrace(); 5457 #endif 5458 if (vfs_badlock_vnode) 5459 vn_printf(vp, "vnode "); 5460 if (vfs_badlock_print) 5461 printf("%s: %p %s\n", str, (void *)vp, msg); 5462 if (vfs_badlock_ddb) 5463 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5464 } 5465 5466 void 5467 assert_vi_locked(struct vnode *vp, const char *str) 5468 { 5469 5470 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5471 vfs_badlock("interlock is not locked but should be", str, vp); 5472 } 5473 5474 void 5475 assert_vi_unlocked(struct vnode *vp, const char *str) 5476 { 5477 5478 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5479 vfs_badlock("interlock is locked but should not be", str, vp); 5480 } 5481 5482 void 5483 assert_vop_locked(struct vnode *vp, const char *str) 5484 { 5485 int locked; 5486 5487 if (KERNEL_PANICKED() || vp == NULL) 5488 return; 5489 5490 locked = VOP_ISLOCKED(vp); 5491 if (locked == 0 || locked == LK_EXCLOTHER) 5492 vfs_badlock("is not locked but should be", str, vp); 5493 } 5494 5495 void 5496 assert_vop_unlocked(struct vnode *vp, const char *str) 5497 { 5498 if (KERNEL_PANICKED() || vp == NULL) 5499 return; 5500 5501 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5502 vfs_badlock("is locked but should not be", str, vp); 5503 } 5504 5505 void 5506 assert_vop_elocked(struct vnode *vp, const char *str) 5507 { 5508 if (KERNEL_PANICKED() || vp == NULL) 5509 return; 5510 5511 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5512 vfs_badlock("is not exclusive locked but should be", str, vp); 5513 } 5514 #endif /* DEBUG_VFS_LOCKS */ 5515 5516 void 5517 vop_rename_fail(struct vop_rename_args *ap) 5518 { 5519 5520 if (ap->a_tvp != NULL) 5521 vput(ap->a_tvp); 5522 if (ap->a_tdvp == ap->a_tvp) 5523 vrele(ap->a_tdvp); 5524 else 5525 vput(ap->a_tdvp); 5526 vrele(ap->a_fdvp); 5527 vrele(ap->a_fvp); 5528 } 5529 5530 void 5531 vop_rename_pre(void *ap) 5532 { 5533 struct vop_rename_args *a = ap; 5534 5535 #ifdef DEBUG_VFS_LOCKS 5536 if (a->a_tvp) 5537 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5538 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5539 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5540 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5541 5542 /* Check the source (from). */ 5543 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5544 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5545 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5546 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5547 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5548 5549 /* Check the target. */ 5550 if (a->a_tvp) 5551 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5552 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5553 #endif 5554 /* 5555 * It may be tempting to add vn_seqc_write_begin/end calls here and 5556 * in vop_rename_post but that's not going to work out since some 5557 * filesystems relookup vnodes mid-rename. This is probably a bug. 5558 * 5559 * For now filesystems are expected to do the relevant calls after they 5560 * decide what vnodes to operate on. 5561 */ 5562 if (a->a_tdvp != a->a_fdvp) 5563 vhold(a->a_fdvp); 5564 if (a->a_tvp != a->a_fvp) 5565 vhold(a->a_fvp); 5566 vhold(a->a_tdvp); 5567 if (a->a_tvp) 5568 vhold(a->a_tvp); 5569 } 5570 5571 #ifdef DEBUG_VFS_LOCKS 5572 void 5573 vop_fplookup_vexec_debugpre(void *ap __unused) 5574 { 5575 5576 VFS_SMR_ASSERT_ENTERED(); 5577 } 5578 5579 void 5580 vop_fplookup_vexec_debugpost(void *ap __unused, int rc __unused) 5581 { 5582 5583 VFS_SMR_ASSERT_ENTERED(); 5584 } 5585 5586 void 5587 vop_fplookup_symlink_debugpre(void *ap __unused) 5588 { 5589 5590 VFS_SMR_ASSERT_ENTERED(); 5591 } 5592 5593 void 5594 vop_fplookup_symlink_debugpost(void *ap __unused, int rc __unused) 5595 { 5596 5597 VFS_SMR_ASSERT_ENTERED(); 5598 } 5599 5600 static void 5601 vop_fsync_debugprepost(struct vnode *vp, const char *name) 5602 { 5603 if (vp->v_type == VCHR) 5604 ; 5605 else if (MNT_EXTENDED_SHARED(vp->v_mount)) 5606 ASSERT_VOP_LOCKED(vp, name); 5607 else 5608 ASSERT_VOP_ELOCKED(vp, name); 5609 } 5610 5611 void 5612 vop_fsync_debugpre(void *a) 5613 { 5614 struct vop_fsync_args *ap; 5615 5616 ap = a; 5617 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5618 } 5619 5620 void 5621 vop_fsync_debugpost(void *a, int rc __unused) 5622 { 5623 struct vop_fsync_args *ap; 5624 5625 ap = a; 5626 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5627 } 5628 5629 void 5630 vop_fdatasync_debugpre(void *a) 5631 { 5632 struct vop_fdatasync_args *ap; 5633 5634 ap = a; 5635 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5636 } 5637 5638 void 5639 vop_fdatasync_debugpost(void *a, int rc __unused) 5640 { 5641 struct vop_fdatasync_args *ap; 5642 5643 ap = a; 5644 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5645 } 5646 5647 void 5648 vop_strategy_debugpre(void *ap) 5649 { 5650 struct vop_strategy_args *a; 5651 struct buf *bp; 5652 5653 a = ap; 5654 bp = a->a_bp; 5655 5656 /* 5657 * Cluster ops lock their component buffers but not the IO container. 5658 */ 5659 if ((bp->b_flags & B_CLUSTER) != 0) 5660 return; 5661 5662 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5663 if (vfs_badlock_print) 5664 printf( 5665 "VOP_STRATEGY: bp is not locked but should be\n"); 5666 if (vfs_badlock_ddb) 5667 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5668 } 5669 } 5670 5671 void 5672 vop_lock_debugpre(void *ap) 5673 { 5674 struct vop_lock1_args *a = ap; 5675 5676 if ((a->a_flags & LK_INTERLOCK) == 0) 5677 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5678 else 5679 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5680 } 5681 5682 void 5683 vop_lock_debugpost(void *ap, int rc) 5684 { 5685 struct vop_lock1_args *a = ap; 5686 5687 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5688 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5689 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5690 } 5691 5692 void 5693 vop_unlock_debugpre(void *ap) 5694 { 5695 struct vop_unlock_args *a = ap; 5696 5697 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 5698 } 5699 5700 void 5701 vop_need_inactive_debugpre(void *ap) 5702 { 5703 struct vop_need_inactive_args *a = ap; 5704 5705 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5706 } 5707 5708 void 5709 vop_need_inactive_debugpost(void *ap, int rc) 5710 { 5711 struct vop_need_inactive_args *a = ap; 5712 5713 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5714 } 5715 #endif 5716 5717 void 5718 vop_create_pre(void *ap) 5719 { 5720 struct vop_create_args *a; 5721 struct vnode *dvp; 5722 5723 a = ap; 5724 dvp = a->a_dvp; 5725 vn_seqc_write_begin(dvp); 5726 } 5727 5728 void 5729 vop_create_post(void *ap, int rc) 5730 { 5731 struct vop_create_args *a; 5732 struct vnode *dvp; 5733 5734 a = ap; 5735 dvp = a->a_dvp; 5736 vn_seqc_write_end(dvp); 5737 if (!rc) 5738 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5739 } 5740 5741 void 5742 vop_whiteout_pre(void *ap) 5743 { 5744 struct vop_whiteout_args *a; 5745 struct vnode *dvp; 5746 5747 a = ap; 5748 dvp = a->a_dvp; 5749 vn_seqc_write_begin(dvp); 5750 } 5751 5752 void 5753 vop_whiteout_post(void *ap, int rc) 5754 { 5755 struct vop_whiteout_args *a; 5756 struct vnode *dvp; 5757 5758 a = ap; 5759 dvp = a->a_dvp; 5760 vn_seqc_write_end(dvp); 5761 } 5762 5763 void 5764 vop_deleteextattr_pre(void *ap) 5765 { 5766 struct vop_deleteextattr_args *a; 5767 struct vnode *vp; 5768 5769 a = ap; 5770 vp = a->a_vp; 5771 vn_seqc_write_begin(vp); 5772 } 5773 5774 void 5775 vop_deleteextattr_post(void *ap, int rc) 5776 { 5777 struct vop_deleteextattr_args *a; 5778 struct vnode *vp; 5779 5780 a = ap; 5781 vp = a->a_vp; 5782 vn_seqc_write_end(vp); 5783 if (!rc) 5784 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5785 } 5786 5787 void 5788 vop_link_pre(void *ap) 5789 { 5790 struct vop_link_args *a; 5791 struct vnode *vp, *tdvp; 5792 5793 a = ap; 5794 vp = a->a_vp; 5795 tdvp = a->a_tdvp; 5796 vn_seqc_write_begin(vp); 5797 vn_seqc_write_begin(tdvp); 5798 } 5799 5800 void 5801 vop_link_post(void *ap, int rc) 5802 { 5803 struct vop_link_args *a; 5804 struct vnode *vp, *tdvp; 5805 5806 a = ap; 5807 vp = a->a_vp; 5808 tdvp = a->a_tdvp; 5809 vn_seqc_write_end(vp); 5810 vn_seqc_write_end(tdvp); 5811 if (!rc) { 5812 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 5813 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 5814 } 5815 } 5816 5817 void 5818 vop_mkdir_pre(void *ap) 5819 { 5820 struct vop_mkdir_args *a; 5821 struct vnode *dvp; 5822 5823 a = ap; 5824 dvp = a->a_dvp; 5825 vn_seqc_write_begin(dvp); 5826 } 5827 5828 void 5829 vop_mkdir_post(void *ap, int rc) 5830 { 5831 struct vop_mkdir_args *a; 5832 struct vnode *dvp; 5833 5834 a = ap; 5835 dvp = a->a_dvp; 5836 vn_seqc_write_end(dvp); 5837 if (!rc) 5838 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5839 } 5840 5841 #ifdef DEBUG_VFS_LOCKS 5842 void 5843 vop_mkdir_debugpost(void *ap, int rc) 5844 { 5845 struct vop_mkdir_args *a; 5846 5847 a = ap; 5848 if (!rc) 5849 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); 5850 } 5851 #endif 5852 5853 void 5854 vop_mknod_pre(void *ap) 5855 { 5856 struct vop_mknod_args *a; 5857 struct vnode *dvp; 5858 5859 a = ap; 5860 dvp = a->a_dvp; 5861 vn_seqc_write_begin(dvp); 5862 } 5863 5864 void 5865 vop_mknod_post(void *ap, int rc) 5866 { 5867 struct vop_mknod_args *a; 5868 struct vnode *dvp; 5869 5870 a = ap; 5871 dvp = a->a_dvp; 5872 vn_seqc_write_end(dvp); 5873 if (!rc) 5874 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5875 } 5876 5877 void 5878 vop_reclaim_post(void *ap, int rc) 5879 { 5880 struct vop_reclaim_args *a; 5881 struct vnode *vp; 5882 5883 a = ap; 5884 vp = a->a_vp; 5885 ASSERT_VOP_IN_SEQC(vp); 5886 if (!rc) 5887 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 5888 } 5889 5890 void 5891 vop_remove_pre(void *ap) 5892 { 5893 struct vop_remove_args *a; 5894 struct vnode *dvp, *vp; 5895 5896 a = ap; 5897 dvp = a->a_dvp; 5898 vp = a->a_vp; 5899 vn_seqc_write_begin(dvp); 5900 vn_seqc_write_begin(vp); 5901 } 5902 5903 void 5904 vop_remove_post(void *ap, int rc) 5905 { 5906 struct vop_remove_args *a; 5907 struct vnode *dvp, *vp; 5908 5909 a = ap; 5910 dvp = a->a_dvp; 5911 vp = a->a_vp; 5912 vn_seqc_write_end(dvp); 5913 vn_seqc_write_end(vp); 5914 if (!rc) { 5915 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5916 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5917 } 5918 } 5919 5920 void 5921 vop_rename_post(void *ap, int rc) 5922 { 5923 struct vop_rename_args *a = ap; 5924 long hint; 5925 5926 if (!rc) { 5927 hint = NOTE_WRITE; 5928 if (a->a_fdvp == a->a_tdvp) { 5929 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 5930 hint |= NOTE_LINK; 5931 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5932 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5933 } else { 5934 hint |= NOTE_EXTEND; 5935 if (a->a_fvp->v_type == VDIR) 5936 hint |= NOTE_LINK; 5937 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5938 5939 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 5940 a->a_tvp->v_type == VDIR) 5941 hint &= ~NOTE_LINK; 5942 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5943 } 5944 5945 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 5946 if (a->a_tvp) 5947 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 5948 } 5949 if (a->a_tdvp != a->a_fdvp) 5950 vdrop(a->a_fdvp); 5951 if (a->a_tvp != a->a_fvp) 5952 vdrop(a->a_fvp); 5953 vdrop(a->a_tdvp); 5954 if (a->a_tvp) 5955 vdrop(a->a_tvp); 5956 } 5957 5958 void 5959 vop_rmdir_pre(void *ap) 5960 { 5961 struct vop_rmdir_args *a; 5962 struct vnode *dvp, *vp; 5963 5964 a = ap; 5965 dvp = a->a_dvp; 5966 vp = a->a_vp; 5967 vn_seqc_write_begin(dvp); 5968 vn_seqc_write_begin(vp); 5969 } 5970 5971 void 5972 vop_rmdir_post(void *ap, int rc) 5973 { 5974 struct vop_rmdir_args *a; 5975 struct vnode *dvp, *vp; 5976 5977 a = ap; 5978 dvp = a->a_dvp; 5979 vp = a->a_vp; 5980 vn_seqc_write_end(dvp); 5981 vn_seqc_write_end(vp); 5982 if (!rc) { 5983 vp->v_vflag |= VV_UNLINKED; 5984 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5985 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5986 } 5987 } 5988 5989 void 5990 vop_setattr_pre(void *ap) 5991 { 5992 struct vop_setattr_args *a; 5993 struct vnode *vp; 5994 5995 a = ap; 5996 vp = a->a_vp; 5997 vn_seqc_write_begin(vp); 5998 } 5999 6000 void 6001 vop_setattr_post(void *ap, int rc) 6002 { 6003 struct vop_setattr_args *a; 6004 struct vnode *vp; 6005 6006 a = ap; 6007 vp = a->a_vp; 6008 vn_seqc_write_end(vp); 6009 if (!rc) 6010 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6011 } 6012 6013 void 6014 vop_setacl_pre(void *ap) 6015 { 6016 struct vop_setacl_args *a; 6017 struct vnode *vp; 6018 6019 a = ap; 6020 vp = a->a_vp; 6021 vn_seqc_write_begin(vp); 6022 } 6023 6024 void 6025 vop_setacl_post(void *ap, int rc __unused) 6026 { 6027 struct vop_setacl_args *a; 6028 struct vnode *vp; 6029 6030 a = ap; 6031 vp = a->a_vp; 6032 vn_seqc_write_end(vp); 6033 } 6034 6035 void 6036 vop_setextattr_pre(void *ap) 6037 { 6038 struct vop_setextattr_args *a; 6039 struct vnode *vp; 6040 6041 a = ap; 6042 vp = a->a_vp; 6043 vn_seqc_write_begin(vp); 6044 } 6045 6046 void 6047 vop_setextattr_post(void *ap, int rc) 6048 { 6049 struct vop_setextattr_args *a; 6050 struct vnode *vp; 6051 6052 a = ap; 6053 vp = a->a_vp; 6054 vn_seqc_write_end(vp); 6055 if (!rc) 6056 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6057 } 6058 6059 void 6060 vop_symlink_pre(void *ap) 6061 { 6062 struct vop_symlink_args *a; 6063 struct vnode *dvp; 6064 6065 a = ap; 6066 dvp = a->a_dvp; 6067 vn_seqc_write_begin(dvp); 6068 } 6069 6070 void 6071 vop_symlink_post(void *ap, int rc) 6072 { 6073 struct vop_symlink_args *a; 6074 struct vnode *dvp; 6075 6076 a = ap; 6077 dvp = a->a_dvp; 6078 vn_seqc_write_end(dvp); 6079 if (!rc) 6080 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6081 } 6082 6083 void 6084 vop_open_post(void *ap, int rc) 6085 { 6086 struct vop_open_args *a = ap; 6087 6088 if (!rc) 6089 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 6090 } 6091 6092 void 6093 vop_close_post(void *ap, int rc) 6094 { 6095 struct vop_close_args *a = ap; 6096 6097 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 6098 !VN_IS_DOOMED(a->a_vp))) { 6099 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 6100 NOTE_CLOSE_WRITE : NOTE_CLOSE); 6101 } 6102 } 6103 6104 void 6105 vop_read_post(void *ap, int rc) 6106 { 6107 struct vop_read_args *a = ap; 6108 6109 if (!rc) 6110 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6111 } 6112 6113 void 6114 vop_read_pgcache_post(void *ap, int rc) 6115 { 6116 struct vop_read_pgcache_args *a = ap; 6117 6118 if (!rc) 6119 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); 6120 } 6121 6122 void 6123 vop_readdir_post(void *ap, int rc) 6124 { 6125 struct vop_readdir_args *a = ap; 6126 6127 if (!rc) 6128 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6129 } 6130 6131 static struct knlist fs_knlist; 6132 6133 static void 6134 vfs_event_init(void *arg) 6135 { 6136 knlist_init_mtx(&fs_knlist, NULL); 6137 } 6138 /* XXX - correct order? */ 6139 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 6140 6141 void 6142 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 6143 { 6144 6145 KNOTE_UNLOCKED(&fs_knlist, event); 6146 } 6147 6148 static int filt_fsattach(struct knote *kn); 6149 static void filt_fsdetach(struct knote *kn); 6150 static int filt_fsevent(struct knote *kn, long hint); 6151 6152 struct filterops fs_filtops = { 6153 .f_isfd = 0, 6154 .f_attach = filt_fsattach, 6155 .f_detach = filt_fsdetach, 6156 .f_event = filt_fsevent 6157 }; 6158 6159 static int 6160 filt_fsattach(struct knote *kn) 6161 { 6162 6163 kn->kn_flags |= EV_CLEAR; 6164 knlist_add(&fs_knlist, kn, 0); 6165 return (0); 6166 } 6167 6168 static void 6169 filt_fsdetach(struct knote *kn) 6170 { 6171 6172 knlist_remove(&fs_knlist, kn, 0); 6173 } 6174 6175 static int 6176 filt_fsevent(struct knote *kn, long hint) 6177 { 6178 6179 kn->kn_fflags |= kn->kn_sfflags & hint; 6180 6181 return (kn->kn_fflags != 0); 6182 } 6183 6184 static int 6185 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 6186 { 6187 struct vfsidctl vc; 6188 int error; 6189 struct mount *mp; 6190 6191 error = SYSCTL_IN(req, &vc, sizeof(vc)); 6192 if (error) 6193 return (error); 6194 if (vc.vc_vers != VFS_CTL_VERS1) 6195 return (EINVAL); 6196 mp = vfs_getvfs(&vc.vc_fsid); 6197 if (mp == NULL) 6198 return (ENOENT); 6199 /* ensure that a specific sysctl goes to the right filesystem. */ 6200 if (strcmp(vc.vc_fstypename, "*") != 0 && 6201 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 6202 vfs_rel(mp); 6203 return (EINVAL); 6204 } 6205 VCTLTOREQ(&vc, req); 6206 error = VFS_SYSCTL(mp, vc.vc_op, req); 6207 vfs_rel(mp); 6208 return (error); 6209 } 6210 6211 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 6212 NULL, 0, sysctl_vfs_ctl, "", 6213 "Sysctl by fsid"); 6214 6215 /* 6216 * Function to initialize a va_filerev field sensibly. 6217 * XXX: Wouldn't a random number make a lot more sense ?? 6218 */ 6219 u_quad_t 6220 init_va_filerev(void) 6221 { 6222 struct bintime bt; 6223 6224 getbinuptime(&bt); 6225 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 6226 } 6227 6228 static int filt_vfsread(struct knote *kn, long hint); 6229 static int filt_vfswrite(struct knote *kn, long hint); 6230 static int filt_vfsvnode(struct knote *kn, long hint); 6231 static void filt_vfsdetach(struct knote *kn); 6232 static struct filterops vfsread_filtops = { 6233 .f_isfd = 1, 6234 .f_detach = filt_vfsdetach, 6235 .f_event = filt_vfsread 6236 }; 6237 static struct filterops vfswrite_filtops = { 6238 .f_isfd = 1, 6239 .f_detach = filt_vfsdetach, 6240 .f_event = filt_vfswrite 6241 }; 6242 static struct filterops vfsvnode_filtops = { 6243 .f_isfd = 1, 6244 .f_detach = filt_vfsdetach, 6245 .f_event = filt_vfsvnode 6246 }; 6247 6248 static void 6249 vfs_knllock(void *arg) 6250 { 6251 struct vnode *vp = arg; 6252 6253 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 6254 } 6255 6256 static void 6257 vfs_knlunlock(void *arg) 6258 { 6259 struct vnode *vp = arg; 6260 6261 VOP_UNLOCK(vp); 6262 } 6263 6264 static void 6265 vfs_knl_assert_lock(void *arg, int what) 6266 { 6267 #ifdef DEBUG_VFS_LOCKS 6268 struct vnode *vp = arg; 6269 6270 if (what == LA_LOCKED) 6271 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6272 else 6273 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6274 #endif 6275 } 6276 6277 int 6278 vfs_kqfilter(struct vop_kqfilter_args *ap) 6279 { 6280 struct vnode *vp = ap->a_vp; 6281 struct knote *kn = ap->a_kn; 6282 struct knlist *knl; 6283 6284 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ && 6285 kn->kn_filter != EVFILT_WRITE), 6286 ("READ/WRITE filter on a FIFO leaked through")); 6287 switch (kn->kn_filter) { 6288 case EVFILT_READ: 6289 kn->kn_fop = &vfsread_filtops; 6290 break; 6291 case EVFILT_WRITE: 6292 kn->kn_fop = &vfswrite_filtops; 6293 break; 6294 case EVFILT_VNODE: 6295 kn->kn_fop = &vfsvnode_filtops; 6296 break; 6297 default: 6298 return (EINVAL); 6299 } 6300 6301 kn->kn_hook = (caddr_t)vp; 6302 6303 v_addpollinfo(vp); 6304 if (vp->v_pollinfo == NULL) 6305 return (ENOMEM); 6306 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6307 vhold(vp); 6308 knlist_add(knl, kn, 0); 6309 6310 return (0); 6311 } 6312 6313 /* 6314 * Detach knote from vnode 6315 */ 6316 static void 6317 filt_vfsdetach(struct knote *kn) 6318 { 6319 struct vnode *vp = (struct vnode *)kn->kn_hook; 6320 6321 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6322 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6323 vdrop(vp); 6324 } 6325 6326 /*ARGSUSED*/ 6327 static int 6328 filt_vfsread(struct knote *kn, long hint) 6329 { 6330 struct vnode *vp = (struct vnode *)kn->kn_hook; 6331 struct vattr va; 6332 int res; 6333 6334 /* 6335 * filesystem is gone, so set the EOF flag and schedule 6336 * the knote for deletion. 6337 */ 6338 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6339 VI_LOCK(vp); 6340 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6341 VI_UNLOCK(vp); 6342 return (1); 6343 } 6344 6345 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 6346 return (0); 6347 6348 VI_LOCK(vp); 6349 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 6350 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6351 VI_UNLOCK(vp); 6352 return (res); 6353 } 6354 6355 /*ARGSUSED*/ 6356 static int 6357 filt_vfswrite(struct knote *kn, long hint) 6358 { 6359 struct vnode *vp = (struct vnode *)kn->kn_hook; 6360 6361 VI_LOCK(vp); 6362 6363 /* 6364 * filesystem is gone, so set the EOF flag and schedule 6365 * the knote for deletion. 6366 */ 6367 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6368 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6369 6370 kn->kn_data = 0; 6371 VI_UNLOCK(vp); 6372 return (1); 6373 } 6374 6375 static int 6376 filt_vfsvnode(struct knote *kn, long hint) 6377 { 6378 struct vnode *vp = (struct vnode *)kn->kn_hook; 6379 int res; 6380 6381 VI_LOCK(vp); 6382 if (kn->kn_sfflags & hint) 6383 kn->kn_fflags |= hint; 6384 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6385 kn->kn_flags |= EV_EOF; 6386 VI_UNLOCK(vp); 6387 return (1); 6388 } 6389 res = (kn->kn_fflags != 0); 6390 VI_UNLOCK(vp); 6391 return (res); 6392 } 6393 6394 /* 6395 * Returns whether the directory is empty or not. 6396 * If it is empty, the return value is 0; otherwise 6397 * the return value is an error value (which may 6398 * be ENOTEMPTY). 6399 */ 6400 int 6401 vfs_emptydir(struct vnode *vp) 6402 { 6403 struct uio uio; 6404 struct iovec iov; 6405 struct dirent *dirent, *dp, *endp; 6406 int error, eof; 6407 6408 error = 0; 6409 eof = 0; 6410 6411 ASSERT_VOP_LOCKED(vp, "vfs_emptydir"); 6412 VNASSERT(vp->v_type == VDIR, vp, ("vp is not a directory")); 6413 6414 dirent = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK); 6415 iov.iov_base = dirent; 6416 iov.iov_len = sizeof(struct dirent); 6417 6418 uio.uio_iov = &iov; 6419 uio.uio_iovcnt = 1; 6420 uio.uio_offset = 0; 6421 uio.uio_resid = sizeof(struct dirent); 6422 uio.uio_segflg = UIO_SYSSPACE; 6423 uio.uio_rw = UIO_READ; 6424 uio.uio_td = curthread; 6425 6426 while (eof == 0 && error == 0) { 6427 error = VOP_READDIR(vp, &uio, curthread->td_ucred, &eof, 6428 NULL, NULL); 6429 if (error != 0) 6430 break; 6431 endp = (void *)((uint8_t *)dirent + 6432 sizeof(struct dirent) - uio.uio_resid); 6433 for (dp = dirent; dp < endp; 6434 dp = (void *)((uint8_t *)dp + GENERIC_DIRSIZ(dp))) { 6435 if (dp->d_type == DT_WHT) 6436 continue; 6437 if (dp->d_namlen == 0) 6438 continue; 6439 if (dp->d_type != DT_DIR && 6440 dp->d_type != DT_UNKNOWN) { 6441 error = ENOTEMPTY; 6442 break; 6443 } 6444 if (dp->d_namlen > 2) { 6445 error = ENOTEMPTY; 6446 break; 6447 } 6448 if (dp->d_namlen == 1 && 6449 dp->d_name[0] != '.') { 6450 error = ENOTEMPTY; 6451 break; 6452 } 6453 if (dp->d_namlen == 2 && 6454 dp->d_name[1] != '.') { 6455 error = ENOTEMPTY; 6456 break; 6457 } 6458 uio.uio_resid = sizeof(struct dirent); 6459 } 6460 } 6461 free(dirent, M_TEMP); 6462 return (error); 6463 } 6464 6465 int 6466 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6467 { 6468 int error; 6469 6470 if (dp->d_reclen > ap->a_uio->uio_resid) 6471 return (ENAMETOOLONG); 6472 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6473 if (error) { 6474 if (ap->a_ncookies != NULL) { 6475 if (ap->a_cookies != NULL) 6476 free(ap->a_cookies, M_TEMP); 6477 ap->a_cookies = NULL; 6478 *ap->a_ncookies = 0; 6479 } 6480 return (error); 6481 } 6482 if (ap->a_ncookies == NULL) 6483 return (0); 6484 6485 KASSERT(ap->a_cookies, 6486 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6487 6488 *ap->a_cookies = realloc(*ap->a_cookies, 6489 (*ap->a_ncookies + 1) * sizeof(uint64_t), M_TEMP, M_WAITOK | M_ZERO); 6490 (*ap->a_cookies)[*ap->a_ncookies] = off; 6491 *ap->a_ncookies += 1; 6492 return (0); 6493 } 6494 6495 /* 6496 * The purpose of this routine is to remove granularity from accmode_t, 6497 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6498 * VADMIN and VAPPEND. 6499 * 6500 * If it returns 0, the caller is supposed to continue with the usual 6501 * access checks using 'accmode' as modified by this routine. If it 6502 * returns nonzero value, the caller is supposed to return that value 6503 * as errno. 6504 * 6505 * Note that after this routine runs, accmode may be zero. 6506 */ 6507 int 6508 vfs_unixify_accmode(accmode_t *accmode) 6509 { 6510 /* 6511 * There is no way to specify explicit "deny" rule using 6512 * file mode or POSIX.1e ACLs. 6513 */ 6514 if (*accmode & VEXPLICIT_DENY) { 6515 *accmode = 0; 6516 return (0); 6517 } 6518 6519 /* 6520 * None of these can be translated into usual access bits. 6521 * Also, the common case for NFSv4 ACLs is to not contain 6522 * either of these bits. Caller should check for VWRITE 6523 * on the containing directory instead. 6524 */ 6525 if (*accmode & (VDELETE_CHILD | VDELETE)) 6526 return (EPERM); 6527 6528 if (*accmode & VADMIN_PERMS) { 6529 *accmode &= ~VADMIN_PERMS; 6530 *accmode |= VADMIN; 6531 } 6532 6533 /* 6534 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6535 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6536 */ 6537 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6538 6539 return (0); 6540 } 6541 6542 /* 6543 * Clear out a doomed vnode (if any) and replace it with a new one as long 6544 * as the fs is not being unmounted. Return the root vnode to the caller. 6545 */ 6546 static int __noinline 6547 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6548 { 6549 struct vnode *vp; 6550 int error; 6551 6552 restart: 6553 if (mp->mnt_rootvnode != NULL) { 6554 MNT_ILOCK(mp); 6555 vp = mp->mnt_rootvnode; 6556 if (vp != NULL) { 6557 if (!VN_IS_DOOMED(vp)) { 6558 vrefact(vp); 6559 MNT_IUNLOCK(mp); 6560 error = vn_lock(vp, flags); 6561 if (error == 0) { 6562 *vpp = vp; 6563 return (0); 6564 } 6565 vrele(vp); 6566 goto restart; 6567 } 6568 /* 6569 * Clear the old one. 6570 */ 6571 mp->mnt_rootvnode = NULL; 6572 } 6573 MNT_IUNLOCK(mp); 6574 if (vp != NULL) { 6575 vfs_op_barrier_wait(mp); 6576 vrele(vp); 6577 } 6578 } 6579 error = VFS_CACHEDROOT(mp, flags, vpp); 6580 if (error != 0) 6581 return (error); 6582 if (mp->mnt_vfs_ops == 0) { 6583 MNT_ILOCK(mp); 6584 if (mp->mnt_vfs_ops != 0) { 6585 MNT_IUNLOCK(mp); 6586 return (0); 6587 } 6588 if (mp->mnt_rootvnode == NULL) { 6589 vrefact(*vpp); 6590 mp->mnt_rootvnode = *vpp; 6591 } else { 6592 if (mp->mnt_rootvnode != *vpp) { 6593 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6594 panic("%s: mismatch between vnode returned " 6595 " by VFS_CACHEDROOT and the one cached " 6596 " (%p != %p)", 6597 __func__, *vpp, mp->mnt_rootvnode); 6598 } 6599 } 6600 } 6601 MNT_IUNLOCK(mp); 6602 } 6603 return (0); 6604 } 6605 6606 int 6607 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6608 { 6609 struct mount_pcpu *mpcpu; 6610 struct vnode *vp; 6611 int error; 6612 6613 if (!vfs_op_thread_enter(mp, mpcpu)) 6614 return (vfs_cache_root_fallback(mp, flags, vpp)); 6615 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6616 if (vp == NULL || VN_IS_DOOMED(vp)) { 6617 vfs_op_thread_exit(mp, mpcpu); 6618 return (vfs_cache_root_fallback(mp, flags, vpp)); 6619 } 6620 vrefact(vp); 6621 vfs_op_thread_exit(mp, mpcpu); 6622 error = vn_lock(vp, flags); 6623 if (error != 0) { 6624 vrele(vp); 6625 return (vfs_cache_root_fallback(mp, flags, vpp)); 6626 } 6627 *vpp = vp; 6628 return (0); 6629 } 6630 6631 struct vnode * 6632 vfs_cache_root_clear(struct mount *mp) 6633 { 6634 struct vnode *vp; 6635 6636 /* 6637 * ops > 0 guarantees there is nobody who can see this vnode 6638 */ 6639 MPASS(mp->mnt_vfs_ops > 0); 6640 vp = mp->mnt_rootvnode; 6641 if (vp != NULL) 6642 vn_seqc_write_begin(vp); 6643 mp->mnt_rootvnode = NULL; 6644 return (vp); 6645 } 6646 6647 void 6648 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6649 { 6650 6651 MPASS(mp->mnt_vfs_ops > 0); 6652 vrefact(vp); 6653 mp->mnt_rootvnode = vp; 6654 } 6655 6656 /* 6657 * These are helper functions for filesystems to traverse all 6658 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6659 * 6660 * This interface replaces MNT_VNODE_FOREACH. 6661 */ 6662 6663 struct vnode * 6664 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6665 { 6666 struct vnode *vp; 6667 6668 if (should_yield()) 6669 kern_yield(PRI_USER); 6670 MNT_ILOCK(mp); 6671 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6672 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6673 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6674 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6675 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6676 continue; 6677 VI_LOCK(vp); 6678 if (VN_IS_DOOMED(vp)) { 6679 VI_UNLOCK(vp); 6680 continue; 6681 } 6682 break; 6683 } 6684 if (vp == NULL) { 6685 __mnt_vnode_markerfree_all(mvp, mp); 6686 /* MNT_IUNLOCK(mp); -- done in above function */ 6687 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6688 return (NULL); 6689 } 6690 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6691 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6692 MNT_IUNLOCK(mp); 6693 return (vp); 6694 } 6695 6696 struct vnode * 6697 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6698 { 6699 struct vnode *vp; 6700 6701 *mvp = vn_alloc_marker(mp); 6702 MNT_ILOCK(mp); 6703 MNT_REF(mp); 6704 6705 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6706 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6707 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6708 continue; 6709 VI_LOCK(vp); 6710 if (VN_IS_DOOMED(vp)) { 6711 VI_UNLOCK(vp); 6712 continue; 6713 } 6714 break; 6715 } 6716 if (vp == NULL) { 6717 MNT_REL(mp); 6718 MNT_IUNLOCK(mp); 6719 vn_free_marker(*mvp); 6720 *mvp = NULL; 6721 return (NULL); 6722 } 6723 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6724 MNT_IUNLOCK(mp); 6725 return (vp); 6726 } 6727 6728 void 6729 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6730 { 6731 6732 if (*mvp == NULL) { 6733 MNT_IUNLOCK(mp); 6734 return; 6735 } 6736 6737 mtx_assert(MNT_MTX(mp), MA_OWNED); 6738 6739 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6740 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6741 MNT_REL(mp); 6742 MNT_IUNLOCK(mp); 6743 vn_free_marker(*mvp); 6744 *mvp = NULL; 6745 } 6746 6747 /* 6748 * These are helper functions for filesystems to traverse their 6749 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6750 */ 6751 static void 6752 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6753 { 6754 6755 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6756 6757 MNT_ILOCK(mp); 6758 MNT_REL(mp); 6759 MNT_IUNLOCK(mp); 6760 vn_free_marker(*mvp); 6761 *mvp = NULL; 6762 } 6763 6764 /* 6765 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6766 * conventional lock order during mnt_vnode_next_lazy iteration. 6767 * 6768 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6769 * The list lock is dropped and reacquired. On success, both locks are held. 6770 * On failure, the mount vnode list lock is held but the vnode interlock is 6771 * not, and the procedure may have yielded. 6772 */ 6773 static bool 6774 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6775 struct vnode *vp) 6776 { 6777 6778 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6779 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6780 ("%s: bad marker", __func__)); 6781 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6782 ("%s: inappropriate vnode", __func__)); 6783 ASSERT_VI_UNLOCKED(vp, __func__); 6784 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6785 6786 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6787 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6788 6789 /* 6790 * Note we may be racing against vdrop which transitioned the hold 6791 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 6792 * if we are the only user after we get the interlock we will just 6793 * vdrop. 6794 */ 6795 vhold(vp); 6796 mtx_unlock(&mp->mnt_listmtx); 6797 VI_LOCK(vp); 6798 if (VN_IS_DOOMED(vp)) { 6799 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 6800 goto out_lost; 6801 } 6802 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 6803 /* 6804 * There is nothing to do if we are the last user. 6805 */ 6806 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 6807 goto out_lost; 6808 mtx_lock(&mp->mnt_listmtx); 6809 return (true); 6810 out_lost: 6811 vdropl(vp); 6812 maybe_yield(); 6813 mtx_lock(&mp->mnt_listmtx); 6814 return (false); 6815 } 6816 6817 static struct vnode * 6818 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6819 void *cbarg) 6820 { 6821 struct vnode *vp; 6822 6823 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6824 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6825 restart: 6826 vp = TAILQ_NEXT(*mvp, v_lazylist); 6827 while (vp != NULL) { 6828 if (vp->v_type == VMARKER) { 6829 vp = TAILQ_NEXT(vp, v_lazylist); 6830 continue; 6831 } 6832 /* 6833 * See if we want to process the vnode. Note we may encounter a 6834 * long string of vnodes we don't care about and hog the list 6835 * as a result. Check for it and requeue the marker. 6836 */ 6837 VNPASS(!VN_IS_DOOMED(vp), vp); 6838 if (!cb(vp, cbarg)) { 6839 if (!should_yield()) { 6840 vp = TAILQ_NEXT(vp, v_lazylist); 6841 continue; 6842 } 6843 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 6844 v_lazylist); 6845 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 6846 v_lazylist); 6847 mtx_unlock(&mp->mnt_listmtx); 6848 kern_yield(PRI_USER); 6849 mtx_lock(&mp->mnt_listmtx); 6850 goto restart; 6851 } 6852 /* 6853 * Try-lock because this is the wrong lock order. 6854 */ 6855 if (!VI_TRYLOCK(vp) && 6856 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 6857 goto restart; 6858 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6859 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6860 ("alien vnode on the lazy list %p %p", vp, mp)); 6861 VNPASS(vp->v_mount == mp, vp); 6862 VNPASS(!VN_IS_DOOMED(vp), vp); 6863 break; 6864 } 6865 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6866 6867 /* Check if we are done */ 6868 if (vp == NULL) { 6869 mtx_unlock(&mp->mnt_listmtx); 6870 mnt_vnode_markerfree_lazy(mvp, mp); 6871 return (NULL); 6872 } 6873 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 6874 mtx_unlock(&mp->mnt_listmtx); 6875 ASSERT_VI_LOCKED(vp, "lazy iter"); 6876 return (vp); 6877 } 6878 6879 struct vnode * 6880 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6881 void *cbarg) 6882 { 6883 6884 if (should_yield()) 6885 kern_yield(PRI_USER); 6886 mtx_lock(&mp->mnt_listmtx); 6887 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6888 } 6889 6890 struct vnode * 6891 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6892 void *cbarg) 6893 { 6894 struct vnode *vp; 6895 6896 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 6897 return (NULL); 6898 6899 *mvp = vn_alloc_marker(mp); 6900 MNT_ILOCK(mp); 6901 MNT_REF(mp); 6902 MNT_IUNLOCK(mp); 6903 6904 mtx_lock(&mp->mnt_listmtx); 6905 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 6906 if (vp == NULL) { 6907 mtx_unlock(&mp->mnt_listmtx); 6908 mnt_vnode_markerfree_lazy(mvp, mp); 6909 return (NULL); 6910 } 6911 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 6912 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6913 } 6914 6915 void 6916 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6917 { 6918 6919 if (*mvp == NULL) 6920 return; 6921 6922 mtx_lock(&mp->mnt_listmtx); 6923 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6924 mtx_unlock(&mp->mnt_listmtx); 6925 mnt_vnode_markerfree_lazy(mvp, mp); 6926 } 6927 6928 int 6929 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 6930 { 6931 6932 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 6933 cnp->cn_flags &= ~NOEXECCHECK; 6934 return (0); 6935 } 6936 6937 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread)); 6938 } 6939 6940 /* 6941 * Do not use this variant unless you have means other than the hold count 6942 * to prevent the vnode from getting freed. 6943 */ 6944 void 6945 vn_seqc_write_begin_locked(struct vnode *vp) 6946 { 6947 6948 ASSERT_VI_LOCKED(vp, __func__); 6949 VNPASS(vp->v_holdcnt > 0, vp); 6950 VNPASS(vp->v_seqc_users >= 0, vp); 6951 vp->v_seqc_users++; 6952 if (vp->v_seqc_users == 1) 6953 seqc_sleepable_write_begin(&vp->v_seqc); 6954 } 6955 6956 void 6957 vn_seqc_write_begin(struct vnode *vp) 6958 { 6959 6960 VI_LOCK(vp); 6961 vn_seqc_write_begin_locked(vp); 6962 VI_UNLOCK(vp); 6963 } 6964 6965 void 6966 vn_seqc_write_end_locked(struct vnode *vp) 6967 { 6968 6969 ASSERT_VI_LOCKED(vp, __func__); 6970 VNPASS(vp->v_seqc_users > 0, vp); 6971 vp->v_seqc_users--; 6972 if (vp->v_seqc_users == 0) 6973 seqc_sleepable_write_end(&vp->v_seqc); 6974 } 6975 6976 void 6977 vn_seqc_write_end(struct vnode *vp) 6978 { 6979 6980 VI_LOCK(vp); 6981 vn_seqc_write_end_locked(vp); 6982 VI_UNLOCK(vp); 6983 } 6984 6985 /* 6986 * Special case handling for allocating and freeing vnodes. 6987 * 6988 * The counter remains unchanged on free so that a doomed vnode will 6989 * keep testing as in modify as long as it is accessible with SMR. 6990 */ 6991 static void 6992 vn_seqc_init(struct vnode *vp) 6993 { 6994 6995 vp->v_seqc = 0; 6996 vp->v_seqc_users = 0; 6997 } 6998 6999 static void 7000 vn_seqc_write_end_free(struct vnode *vp) 7001 { 7002 7003 VNPASS(seqc_in_modify(vp->v_seqc), vp); 7004 VNPASS(vp->v_seqc_users == 1, vp); 7005 } 7006 7007 void 7008 vn_irflag_set_locked(struct vnode *vp, short toset) 7009 { 7010 short flags; 7011 7012 ASSERT_VI_LOCKED(vp, __func__); 7013 flags = vn_irflag_read(vp); 7014 VNASSERT((flags & toset) == 0, vp, 7015 ("%s: some of the passed flags already set (have %d, passed %d)\n", 7016 __func__, flags, toset)); 7017 atomic_store_short(&vp->v_irflag, flags | toset); 7018 } 7019 7020 void 7021 vn_irflag_set(struct vnode *vp, short toset) 7022 { 7023 7024 VI_LOCK(vp); 7025 vn_irflag_set_locked(vp, toset); 7026 VI_UNLOCK(vp); 7027 } 7028 7029 void 7030 vn_irflag_set_cond_locked(struct vnode *vp, short toset) 7031 { 7032 short flags; 7033 7034 ASSERT_VI_LOCKED(vp, __func__); 7035 flags = vn_irflag_read(vp); 7036 atomic_store_short(&vp->v_irflag, flags | toset); 7037 } 7038 7039 void 7040 vn_irflag_set_cond(struct vnode *vp, short toset) 7041 { 7042 7043 VI_LOCK(vp); 7044 vn_irflag_set_cond_locked(vp, toset); 7045 VI_UNLOCK(vp); 7046 } 7047 7048 void 7049 vn_irflag_unset_locked(struct vnode *vp, short tounset) 7050 { 7051 short flags; 7052 7053 ASSERT_VI_LOCKED(vp, __func__); 7054 flags = vn_irflag_read(vp); 7055 VNASSERT((flags & tounset) == tounset, vp, 7056 ("%s: some of the passed flags not set (have %d, passed %d)\n", 7057 __func__, flags, tounset)); 7058 atomic_store_short(&vp->v_irflag, flags & ~tounset); 7059 } 7060 7061 void 7062 vn_irflag_unset(struct vnode *vp, short tounset) 7063 { 7064 7065 VI_LOCK(vp); 7066 vn_irflag_unset_locked(vp, tounset); 7067 VI_UNLOCK(vp); 7068 } 7069