1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/asan.h> 52 #include <sys/bio.h> 53 #include <sys/buf.h> 54 #include <sys/capsicum.h> 55 #include <sys/condvar.h> 56 #include <sys/conf.h> 57 #include <sys/counter.h> 58 #include <sys/dirent.h> 59 #include <sys/event.h> 60 #include <sys/eventhandler.h> 61 #include <sys/extattr.h> 62 #include <sys/file.h> 63 #include <sys/fcntl.h> 64 #include <sys/jail.h> 65 #include <sys/kdb.h> 66 #include <sys/kernel.h> 67 #include <sys/kthread.h> 68 #include <sys/ktr.h> 69 #include <sys/lockf.h> 70 #include <sys/malloc.h> 71 #include <sys/mount.h> 72 #include <sys/namei.h> 73 #include <sys/pctrie.h> 74 #include <sys/priv.h> 75 #include <sys/reboot.h> 76 #include <sys/refcount.h> 77 #include <sys/rwlock.h> 78 #include <sys/sched.h> 79 #include <sys/sleepqueue.h> 80 #include <sys/smr.h> 81 #include <sys/smp.h> 82 #include <sys/stat.h> 83 #include <sys/sysctl.h> 84 #include <sys/syslog.h> 85 #include <sys/vmmeter.h> 86 #include <sys/vnode.h> 87 #include <sys/watchdog.h> 88 89 #include <machine/stdarg.h> 90 91 #include <security/mac/mac_framework.h> 92 93 #include <vm/vm.h> 94 #include <vm/vm_object.h> 95 #include <vm/vm_extern.h> 96 #include <vm/pmap.h> 97 #include <vm/vm_map.h> 98 #include <vm/vm_page.h> 99 #include <vm/vm_kern.h> 100 #include <vm/uma.h> 101 102 #ifdef DDB 103 #include <ddb/ddb.h> 104 #endif 105 106 static void delmntque(struct vnode *vp); 107 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 108 int slpflag, int slptimeo); 109 static void syncer_shutdown(void *arg, int howto); 110 static int vtryrecycle(struct vnode *vp); 111 static void v_init_counters(struct vnode *); 112 static void vn_seqc_init(struct vnode *); 113 static void vn_seqc_write_end_free(struct vnode *vp); 114 static void vgonel(struct vnode *); 115 static bool vhold_recycle_free(struct vnode *); 116 static void vdropl_recycle(struct vnode *vp); 117 static void vdrop_recycle(struct vnode *vp); 118 static void vfs_knllock(void *arg); 119 static void vfs_knlunlock(void *arg); 120 static void vfs_knl_assert_lock(void *arg, int what); 121 static void destroy_vpollinfo(struct vpollinfo *vi); 122 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 123 daddr_t startlbn, daddr_t endlbn); 124 static void vnlru_recalc(void); 125 126 /* 127 * Number of vnodes in existence. Increased whenever getnewvnode() 128 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 129 */ 130 static u_long __exclusive_cache_line numvnodes; 131 132 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 133 "Number of vnodes in existence"); 134 135 static counter_u64_t vnodes_created; 136 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 137 "Number of vnodes created by getnewvnode"); 138 139 /* 140 * Conversion tables for conversion from vnode types to inode formats 141 * and back. 142 */ 143 enum vtype iftovt_tab[16] = { 144 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 145 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 146 }; 147 int vttoif_tab[10] = { 148 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 149 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 150 }; 151 152 /* 153 * List of allocates vnodes in the system. 154 */ 155 static TAILQ_HEAD(freelst, vnode) vnode_list; 156 static struct vnode *vnode_list_free_marker; 157 static struct vnode *vnode_list_reclaim_marker; 158 159 /* 160 * "Free" vnode target. Free vnodes are rarely completely free, but are 161 * just ones that are cheap to recycle. Usually they are for files which 162 * have been stat'd but not read; these usually have inode and namecache 163 * data attached to them. This target is the preferred minimum size of a 164 * sub-cache consisting mostly of such files. The system balances the size 165 * of this sub-cache with its complement to try to prevent either from 166 * thrashing while the other is relatively inactive. The targets express 167 * a preference for the best balance. 168 * 169 * "Above" this target there are 2 further targets (watermarks) related 170 * to recyling of free vnodes. In the best-operating case, the cache is 171 * exactly full, the free list has size between vlowat and vhiwat above the 172 * free target, and recycling from it and normal use maintains this state. 173 * Sometimes the free list is below vlowat or even empty, but this state 174 * is even better for immediate use provided the cache is not full. 175 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 176 * ones) to reach one of these states. The watermarks are currently hard- 177 * coded as 4% and 9% of the available space higher. These and the default 178 * of 25% for wantfreevnodes are too large if the memory size is large. 179 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 180 * whenever vnlru_proc() becomes active. 181 */ 182 static long wantfreevnodes; 183 static long __exclusive_cache_line freevnodes; 184 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 185 &freevnodes, 0, "Number of \"free\" vnodes"); 186 static long freevnodes_old; 187 188 static counter_u64_t recycles_count; 189 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 190 "Number of vnodes recycled to meet vnode cache targets"); 191 192 static counter_u64_t recycles_free_count; 193 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 194 "Number of free vnodes recycled to meet vnode cache targets"); 195 196 static counter_u64_t deferred_inact; 197 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, &deferred_inact, 198 "Number of times inactive processing was deferred"); 199 200 /* To keep more than one thread at a time from running vfs_getnewfsid */ 201 static struct mtx mntid_mtx; 202 203 /* 204 * Lock for any access to the following: 205 * vnode_list 206 * numvnodes 207 * freevnodes 208 */ 209 static struct mtx __exclusive_cache_line vnode_list_mtx; 210 211 /* Publicly exported FS */ 212 struct nfs_public nfs_pub; 213 214 static uma_zone_t buf_trie_zone; 215 static smr_t buf_trie_smr; 216 217 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 218 static uma_zone_t vnode_zone; 219 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll"); 220 221 __read_frequently smr_t vfs_smr; 222 223 /* 224 * The workitem queue. 225 * 226 * It is useful to delay writes of file data and filesystem metadata 227 * for tens of seconds so that quickly created and deleted files need 228 * not waste disk bandwidth being created and removed. To realize this, 229 * we append vnodes to a "workitem" queue. When running with a soft 230 * updates implementation, most pending metadata dependencies should 231 * not wait for more than a few seconds. Thus, mounted on block devices 232 * are delayed only about a half the time that file data is delayed. 233 * Similarly, directory updates are more critical, so are only delayed 234 * about a third the time that file data is delayed. Thus, there are 235 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 236 * one each second (driven off the filesystem syncer process). The 237 * syncer_delayno variable indicates the next queue that is to be processed. 238 * Items that need to be processed soon are placed in this queue: 239 * 240 * syncer_workitem_pending[syncer_delayno] 241 * 242 * A delay of fifteen seconds is done by placing the request fifteen 243 * entries later in the queue: 244 * 245 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 246 * 247 */ 248 static int syncer_delayno; 249 static long syncer_mask; 250 LIST_HEAD(synclist, bufobj); 251 static struct synclist *syncer_workitem_pending; 252 /* 253 * The sync_mtx protects: 254 * bo->bo_synclist 255 * sync_vnode_count 256 * syncer_delayno 257 * syncer_state 258 * syncer_workitem_pending 259 * syncer_worklist_len 260 * rushjob 261 */ 262 static struct mtx sync_mtx; 263 static struct cv sync_wakeup; 264 265 #define SYNCER_MAXDELAY 32 266 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 267 static int syncdelay = 30; /* max time to delay syncing data */ 268 static int filedelay = 30; /* time to delay syncing files */ 269 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 270 "Time to delay syncing files (in seconds)"); 271 static int dirdelay = 29; /* time to delay syncing directories */ 272 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 273 "Time to delay syncing directories (in seconds)"); 274 static int metadelay = 28; /* time to delay syncing metadata */ 275 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 276 "Time to delay syncing metadata (in seconds)"); 277 static int rushjob; /* number of slots to run ASAP */ 278 static int stat_rush_requests; /* number of times I/O speeded up */ 279 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 280 "Number of times I/O speeded up (rush requests)"); 281 282 #define VDBATCH_SIZE 8 283 struct vdbatch { 284 u_int index; 285 long freevnodes; 286 struct mtx lock; 287 struct vnode *tab[VDBATCH_SIZE]; 288 }; 289 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 290 291 static void vdbatch_dequeue(struct vnode *vp); 292 293 /* 294 * When shutting down the syncer, run it at four times normal speed. 295 */ 296 #define SYNCER_SHUTDOWN_SPEEDUP 4 297 static int sync_vnode_count; 298 static int syncer_worklist_len; 299 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 300 syncer_state; 301 302 /* Target for maximum number of vnodes. */ 303 u_long desiredvnodes; 304 static u_long gapvnodes; /* gap between wanted and desired */ 305 static u_long vhiwat; /* enough extras after expansion */ 306 static u_long vlowat; /* minimal extras before expansion */ 307 static u_long vstir; /* nonzero to stir non-free vnodes */ 308 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 309 310 static u_long vnlru_read_freevnodes(void); 311 312 /* 313 * Note that no attempt is made to sanitize these parameters. 314 */ 315 static int 316 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 317 { 318 u_long val; 319 int error; 320 321 val = desiredvnodes; 322 error = sysctl_handle_long(oidp, &val, 0, req); 323 if (error != 0 || req->newptr == NULL) 324 return (error); 325 326 if (val == desiredvnodes) 327 return (0); 328 mtx_lock(&vnode_list_mtx); 329 desiredvnodes = val; 330 wantfreevnodes = desiredvnodes / 4; 331 vnlru_recalc(); 332 mtx_unlock(&vnode_list_mtx); 333 /* 334 * XXX There is no protection against multiple threads changing 335 * desiredvnodes at the same time. Locking above only helps vnlru and 336 * getnewvnode. 337 */ 338 vfs_hash_changesize(desiredvnodes); 339 cache_changesize(desiredvnodes); 340 return (0); 341 } 342 343 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 344 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 345 "LU", "Target for maximum number of vnodes"); 346 347 static int 348 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 349 { 350 u_long val; 351 int error; 352 353 val = wantfreevnodes; 354 error = sysctl_handle_long(oidp, &val, 0, req); 355 if (error != 0 || req->newptr == NULL) 356 return (error); 357 358 if (val == wantfreevnodes) 359 return (0); 360 mtx_lock(&vnode_list_mtx); 361 wantfreevnodes = val; 362 vnlru_recalc(); 363 mtx_unlock(&vnode_list_mtx); 364 return (0); 365 } 366 367 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 368 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 369 "LU", "Target for minimum number of \"free\" vnodes"); 370 371 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 372 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 373 static int vnlru_nowhere; 374 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 375 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 376 377 static int 378 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 379 { 380 struct vnode *vp; 381 struct nameidata nd; 382 char *buf; 383 unsigned long ndflags; 384 int error; 385 386 if (req->newptr == NULL) 387 return (EINVAL); 388 if (req->newlen >= PATH_MAX) 389 return (E2BIG); 390 391 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 392 error = SYSCTL_IN(req, buf, req->newlen); 393 if (error != 0) 394 goto out; 395 396 buf[req->newlen] = '\0'; 397 398 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1 | SAVENAME; 399 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf); 400 if ((error = namei(&nd)) != 0) 401 goto out; 402 vp = nd.ni_vp; 403 404 if (VN_IS_DOOMED(vp)) { 405 /* 406 * This vnode is being recycled. Return != 0 to let the caller 407 * know that the sysctl had no effect. Return EAGAIN because a 408 * subsequent call will likely succeed (since namei will create 409 * a new vnode if necessary) 410 */ 411 error = EAGAIN; 412 goto putvnode; 413 } 414 415 counter_u64_add(recycles_count, 1); 416 vgone(vp); 417 putvnode: 418 NDFREE(&nd, 0); 419 out: 420 free(buf, M_TEMP); 421 return (error); 422 } 423 424 static int 425 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 426 { 427 struct thread *td = curthread; 428 struct vnode *vp; 429 struct file *fp; 430 int error; 431 int fd; 432 433 if (req->newptr == NULL) 434 return (EBADF); 435 436 error = sysctl_handle_int(oidp, &fd, 0, req); 437 if (error != 0) 438 return (error); 439 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 440 if (error != 0) 441 return (error); 442 vp = fp->f_vnode; 443 444 error = vn_lock(vp, LK_EXCLUSIVE); 445 if (error != 0) 446 goto drop; 447 448 counter_u64_add(recycles_count, 1); 449 vgone(vp); 450 VOP_UNLOCK(vp); 451 drop: 452 fdrop(fp, td); 453 return (error); 454 } 455 456 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 457 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 458 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 459 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 460 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 461 sysctl_ftry_reclaim_vnode, "I", 462 "Try to reclaim a vnode by its file descriptor"); 463 464 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 465 #define vnsz2log 8 466 #ifndef DEBUG_LOCKS 467 _Static_assert(sizeof(struct vnode) >= 1UL << vnsz2log && 468 sizeof(struct vnode) < 1UL << (vnsz2log + 1), 469 "vnsz2log needs to be updated"); 470 #endif 471 472 /* 473 * Support for the bufobj clean & dirty pctrie. 474 */ 475 static void * 476 buf_trie_alloc(struct pctrie *ptree) 477 { 478 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 479 } 480 481 static void 482 buf_trie_free(struct pctrie *ptree, void *node) 483 { 484 uma_zfree_smr(buf_trie_zone, node); 485 } 486 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 487 buf_trie_smr); 488 489 /* 490 * Initialize the vnode management data structures. 491 * 492 * Reevaluate the following cap on the number of vnodes after the physical 493 * memory size exceeds 512GB. In the limit, as the physical memory size 494 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 495 */ 496 #ifndef MAXVNODES_MAX 497 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 498 #endif 499 500 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 501 502 static struct vnode * 503 vn_alloc_marker(struct mount *mp) 504 { 505 struct vnode *vp; 506 507 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 508 vp->v_type = VMARKER; 509 vp->v_mount = mp; 510 511 return (vp); 512 } 513 514 static void 515 vn_free_marker(struct vnode *vp) 516 { 517 518 MPASS(vp->v_type == VMARKER); 519 free(vp, M_VNODE_MARKER); 520 } 521 522 #ifdef KASAN 523 static int 524 vnode_ctor(void *mem, int size, void *arg __unused, int flags __unused) 525 { 526 kasan_mark(mem, size, roundup2(size, UMA_ALIGN_PTR + 1), 0); 527 return (0); 528 } 529 530 static void 531 vnode_dtor(void *mem, int size, void *arg __unused) 532 { 533 size_t end1, end2, off1, off2; 534 535 _Static_assert(offsetof(struct vnode, v_vnodelist) < 536 offsetof(struct vnode, v_dbatchcpu), 537 "KASAN marks require updating"); 538 539 off1 = offsetof(struct vnode, v_vnodelist); 540 off2 = offsetof(struct vnode, v_dbatchcpu); 541 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist); 542 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu); 543 544 /* 545 * Access to the v_vnodelist and v_dbatchcpu fields are permitted even 546 * after the vnode has been freed. Try to get some KASAN coverage by 547 * marking everything except those two fields as invalid. Because 548 * KASAN's tracking is not byte-granular, any preceding fields sharing 549 * the same 8-byte aligned word must also be marked valid. 550 */ 551 552 /* Handle the area from the start until v_vnodelist... */ 553 off1 = rounddown2(off1, KASAN_SHADOW_SCALE); 554 kasan_mark(mem, off1, off1, KASAN_UMA_FREED); 555 556 /* ... then the area between v_vnodelist and v_dbatchcpu ... */ 557 off1 = roundup2(end1, KASAN_SHADOW_SCALE); 558 off2 = rounddown2(off2, KASAN_SHADOW_SCALE); 559 if (off2 > off1) 560 kasan_mark((void *)((char *)mem + off1), off2 - off1, 561 off2 - off1, KASAN_UMA_FREED); 562 563 /* ... and finally the area from v_dbatchcpu to the end. */ 564 off2 = roundup2(end2, KASAN_SHADOW_SCALE); 565 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2, 566 KASAN_UMA_FREED); 567 } 568 #endif /* KASAN */ 569 570 /* 571 * Initialize a vnode as it first enters the zone. 572 */ 573 static int 574 vnode_init(void *mem, int size, int flags) 575 { 576 struct vnode *vp; 577 578 vp = mem; 579 bzero(vp, size); 580 /* 581 * Setup locks. 582 */ 583 vp->v_vnlock = &vp->v_lock; 584 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 585 /* 586 * By default, don't allow shared locks unless filesystems opt-in. 587 */ 588 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 589 LK_NOSHARE | LK_IS_VNODE); 590 /* 591 * Initialize bufobj. 592 */ 593 bufobj_init(&vp->v_bufobj, vp); 594 /* 595 * Initialize namecache. 596 */ 597 cache_vnode_init(vp); 598 /* 599 * Initialize rangelocks. 600 */ 601 rangelock_init(&vp->v_rl); 602 603 vp->v_dbatchcpu = NOCPU; 604 605 /* 606 * Check vhold_recycle_free for an explanation. 607 */ 608 vp->v_holdcnt = VHOLD_NO_SMR; 609 vp->v_type = VNON; 610 mtx_lock(&vnode_list_mtx); 611 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 612 mtx_unlock(&vnode_list_mtx); 613 return (0); 614 } 615 616 /* 617 * Free a vnode when it is cleared from the zone. 618 */ 619 static void 620 vnode_fini(void *mem, int size) 621 { 622 struct vnode *vp; 623 struct bufobj *bo; 624 625 vp = mem; 626 vdbatch_dequeue(vp); 627 mtx_lock(&vnode_list_mtx); 628 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 629 mtx_unlock(&vnode_list_mtx); 630 rangelock_destroy(&vp->v_rl); 631 lockdestroy(vp->v_vnlock); 632 mtx_destroy(&vp->v_interlock); 633 bo = &vp->v_bufobj; 634 rw_destroy(BO_LOCKPTR(bo)); 635 636 kasan_mark(mem, size, size, 0); 637 } 638 639 /* 640 * Provide the size of NFS nclnode and NFS fh for calculation of the 641 * vnode memory consumption. The size is specified directly to 642 * eliminate dependency on NFS-private header. 643 * 644 * Other filesystems may use bigger or smaller (like UFS and ZFS) 645 * private inode data, but the NFS-based estimation is ample enough. 646 * Still, we care about differences in the size between 64- and 32-bit 647 * platforms. 648 * 649 * Namecache structure size is heuristically 650 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 651 */ 652 #ifdef _LP64 653 #define NFS_NCLNODE_SZ (528 + 64) 654 #define NC_SZ 148 655 #else 656 #define NFS_NCLNODE_SZ (360 + 32) 657 #define NC_SZ 92 658 #endif 659 660 static void 661 vntblinit(void *dummy __unused) 662 { 663 struct vdbatch *vd; 664 uma_ctor ctor; 665 uma_dtor dtor; 666 int cpu, physvnodes, virtvnodes; 667 668 /* 669 * Desiredvnodes is a function of the physical memory size and the 670 * kernel's heap size. Generally speaking, it scales with the 671 * physical memory size. The ratio of desiredvnodes to the physical 672 * memory size is 1:16 until desiredvnodes exceeds 98,304. 673 * Thereafter, the 674 * marginal ratio of desiredvnodes to the physical memory size is 675 * 1:64. However, desiredvnodes is limited by the kernel's heap 676 * size. The memory required by desiredvnodes vnodes and vm objects 677 * must not exceed 1/10th of the kernel's heap size. 678 */ 679 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 680 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 681 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 682 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 683 desiredvnodes = min(physvnodes, virtvnodes); 684 if (desiredvnodes > MAXVNODES_MAX) { 685 if (bootverbose) 686 printf("Reducing kern.maxvnodes %lu -> %lu\n", 687 desiredvnodes, MAXVNODES_MAX); 688 desiredvnodes = MAXVNODES_MAX; 689 } 690 wantfreevnodes = desiredvnodes / 4; 691 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 692 TAILQ_INIT(&vnode_list); 693 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 694 /* 695 * The lock is taken to appease WITNESS. 696 */ 697 mtx_lock(&vnode_list_mtx); 698 vnlru_recalc(); 699 mtx_unlock(&vnode_list_mtx); 700 vnode_list_free_marker = vn_alloc_marker(NULL); 701 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 702 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 703 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 704 705 #ifdef KASAN 706 ctor = vnode_ctor; 707 dtor = vnode_dtor; 708 #else 709 ctor = NULL; 710 dtor = NULL; 711 #endif 712 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor, 713 vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_NOKASAN); 714 uma_zone_set_smr(vnode_zone, vfs_smr); 715 716 /* 717 * Preallocate enough nodes to support one-per buf so that 718 * we can not fail an insert. reassignbuf() callers can not 719 * tolerate the insertion failure. 720 */ 721 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 722 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 723 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 724 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 725 uma_prealloc(buf_trie_zone, nbuf); 726 727 vnodes_created = counter_u64_alloc(M_WAITOK); 728 recycles_count = counter_u64_alloc(M_WAITOK); 729 recycles_free_count = counter_u64_alloc(M_WAITOK); 730 deferred_inact = counter_u64_alloc(M_WAITOK); 731 732 /* 733 * Initialize the filesystem syncer. 734 */ 735 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 736 &syncer_mask); 737 syncer_maxdelay = syncer_mask + 1; 738 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 739 cv_init(&sync_wakeup, "syncer"); 740 741 CPU_FOREACH(cpu) { 742 vd = DPCPU_ID_PTR((cpu), vd); 743 bzero(vd, sizeof(*vd)); 744 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 745 } 746 } 747 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 748 749 /* 750 * Mark a mount point as busy. Used to synchronize access and to delay 751 * unmounting. Eventually, mountlist_mtx is not released on failure. 752 * 753 * vfs_busy() is a custom lock, it can block the caller. 754 * vfs_busy() only sleeps if the unmount is active on the mount point. 755 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 756 * vnode belonging to mp. 757 * 758 * Lookup uses vfs_busy() to traverse mount points. 759 * root fs var fs 760 * / vnode lock A / vnode lock (/var) D 761 * /var vnode lock B /log vnode lock(/var/log) E 762 * vfs_busy lock C vfs_busy lock F 763 * 764 * Within each file system, the lock order is C->A->B and F->D->E. 765 * 766 * When traversing across mounts, the system follows that lock order: 767 * 768 * C->A->B 769 * | 770 * +->F->D->E 771 * 772 * The lookup() process for namei("/var") illustrates the process: 773 * VOP_LOOKUP() obtains B while A is held 774 * vfs_busy() obtains a shared lock on F while A and B are held 775 * vput() releases lock on B 776 * vput() releases lock on A 777 * VFS_ROOT() obtains lock on D while shared lock on F is held 778 * vfs_unbusy() releases shared lock on F 779 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 780 * Attempt to lock A (instead of vp_crossmp) while D is held would 781 * violate the global order, causing deadlocks. 782 * 783 * dounmount() locks B while F is drained. 784 */ 785 int 786 vfs_busy(struct mount *mp, int flags) 787 { 788 struct mount_pcpu *mpcpu; 789 790 MPASS((flags & ~MBF_MASK) == 0); 791 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 792 793 if (vfs_op_thread_enter(mp, mpcpu)) { 794 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 795 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 796 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 797 vfs_mp_count_add_pcpu(mpcpu, ref, 1); 798 vfs_mp_count_add_pcpu(mpcpu, lockref, 1); 799 vfs_op_thread_exit(mp, mpcpu); 800 if (flags & MBF_MNTLSTLOCK) 801 mtx_unlock(&mountlist_mtx); 802 return (0); 803 } 804 805 MNT_ILOCK(mp); 806 vfs_assert_mount_counters(mp); 807 MNT_REF(mp); 808 /* 809 * If mount point is currently being unmounted, sleep until the 810 * mount point fate is decided. If thread doing the unmounting fails, 811 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 812 * that this mount point has survived the unmount attempt and vfs_busy 813 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 814 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 815 * about to be really destroyed. vfs_busy needs to release its 816 * reference on the mount point in this case and return with ENOENT, 817 * telling the caller that mount mount it tried to busy is no longer 818 * valid. 819 */ 820 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 821 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), 822 ("%s: non-empty upper mount list with pending unmount", 823 __func__)); 824 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 825 MNT_REL(mp); 826 MNT_IUNLOCK(mp); 827 CTR1(KTR_VFS, "%s: failed busying before sleeping", 828 __func__); 829 return (ENOENT); 830 } 831 if (flags & MBF_MNTLSTLOCK) 832 mtx_unlock(&mountlist_mtx); 833 mp->mnt_kern_flag |= MNTK_MWAIT; 834 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 835 if (flags & MBF_MNTLSTLOCK) 836 mtx_lock(&mountlist_mtx); 837 MNT_ILOCK(mp); 838 } 839 if (flags & MBF_MNTLSTLOCK) 840 mtx_unlock(&mountlist_mtx); 841 mp->mnt_lockref++; 842 MNT_IUNLOCK(mp); 843 return (0); 844 } 845 846 /* 847 * Free a busy filesystem. 848 */ 849 void 850 vfs_unbusy(struct mount *mp) 851 { 852 struct mount_pcpu *mpcpu; 853 int c; 854 855 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 856 857 if (vfs_op_thread_enter(mp, mpcpu)) { 858 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 859 vfs_mp_count_sub_pcpu(mpcpu, lockref, 1); 860 vfs_mp_count_sub_pcpu(mpcpu, ref, 1); 861 vfs_op_thread_exit(mp, mpcpu); 862 return; 863 } 864 865 MNT_ILOCK(mp); 866 vfs_assert_mount_counters(mp); 867 MNT_REL(mp); 868 c = --mp->mnt_lockref; 869 if (mp->mnt_vfs_ops == 0) { 870 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 871 MNT_IUNLOCK(mp); 872 return; 873 } 874 if (c < 0) 875 vfs_dump_mount_counters(mp); 876 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 877 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 878 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 879 mp->mnt_kern_flag &= ~MNTK_DRAINING; 880 wakeup(&mp->mnt_lockref); 881 } 882 MNT_IUNLOCK(mp); 883 } 884 885 /* 886 * Lookup a mount point by filesystem identifier. 887 */ 888 struct mount * 889 vfs_getvfs(fsid_t *fsid) 890 { 891 struct mount *mp; 892 893 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 894 mtx_lock(&mountlist_mtx); 895 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 896 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 897 vfs_ref(mp); 898 mtx_unlock(&mountlist_mtx); 899 return (mp); 900 } 901 } 902 mtx_unlock(&mountlist_mtx); 903 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 904 return ((struct mount *) 0); 905 } 906 907 /* 908 * Lookup a mount point by filesystem identifier, busying it before 909 * returning. 910 * 911 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 912 * cache for popular filesystem identifiers. The cache is lockess, using 913 * the fact that struct mount's are never freed. In worst case we may 914 * get pointer to unmounted or even different filesystem, so we have to 915 * check what we got, and go slow way if so. 916 */ 917 struct mount * 918 vfs_busyfs(fsid_t *fsid) 919 { 920 #define FSID_CACHE_SIZE 256 921 typedef struct mount * volatile vmp_t; 922 static vmp_t cache[FSID_CACHE_SIZE]; 923 struct mount *mp; 924 int error; 925 uint32_t hash; 926 927 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 928 hash = fsid->val[0] ^ fsid->val[1]; 929 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 930 mp = cache[hash]; 931 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 932 goto slow; 933 if (vfs_busy(mp, 0) != 0) { 934 cache[hash] = NULL; 935 goto slow; 936 } 937 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 938 return (mp); 939 else 940 vfs_unbusy(mp); 941 942 slow: 943 mtx_lock(&mountlist_mtx); 944 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 945 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 946 error = vfs_busy(mp, MBF_MNTLSTLOCK); 947 if (error) { 948 cache[hash] = NULL; 949 mtx_unlock(&mountlist_mtx); 950 return (NULL); 951 } 952 cache[hash] = mp; 953 return (mp); 954 } 955 } 956 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 957 mtx_unlock(&mountlist_mtx); 958 return ((struct mount *) 0); 959 } 960 961 /* 962 * Check if a user can access privileged mount options. 963 */ 964 int 965 vfs_suser(struct mount *mp, struct thread *td) 966 { 967 int error; 968 969 if (jailed(td->td_ucred)) { 970 /* 971 * If the jail of the calling thread lacks permission for 972 * this type of file system, deny immediately. 973 */ 974 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 975 return (EPERM); 976 977 /* 978 * If the file system was mounted outside the jail of the 979 * calling thread, deny immediately. 980 */ 981 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 982 return (EPERM); 983 } 984 985 /* 986 * If file system supports delegated administration, we don't check 987 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 988 * by the file system itself. 989 * If this is not the user that did original mount, we check for 990 * the PRIV_VFS_MOUNT_OWNER privilege. 991 */ 992 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 993 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 994 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 995 return (error); 996 } 997 return (0); 998 } 999 1000 /* 1001 * Get a new unique fsid. Try to make its val[0] unique, since this value 1002 * will be used to create fake device numbers for stat(). Also try (but 1003 * not so hard) make its val[0] unique mod 2^16, since some emulators only 1004 * support 16-bit device numbers. We end up with unique val[0]'s for the 1005 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 1006 * 1007 * Keep in mind that several mounts may be running in parallel. Starting 1008 * the search one past where the previous search terminated is both a 1009 * micro-optimization and a defense against returning the same fsid to 1010 * different mounts. 1011 */ 1012 void 1013 vfs_getnewfsid(struct mount *mp) 1014 { 1015 static uint16_t mntid_base; 1016 struct mount *nmp; 1017 fsid_t tfsid; 1018 int mtype; 1019 1020 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 1021 mtx_lock(&mntid_mtx); 1022 mtype = mp->mnt_vfc->vfc_typenum; 1023 tfsid.val[1] = mtype; 1024 mtype = (mtype & 0xFF) << 24; 1025 for (;;) { 1026 tfsid.val[0] = makedev(255, 1027 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 1028 mntid_base++; 1029 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 1030 break; 1031 vfs_rel(nmp); 1032 } 1033 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 1034 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 1035 mtx_unlock(&mntid_mtx); 1036 } 1037 1038 /* 1039 * Knob to control the precision of file timestamps: 1040 * 1041 * 0 = seconds only; nanoseconds zeroed. 1042 * 1 = seconds and nanoseconds, accurate within 1/HZ. 1043 * 2 = seconds and nanoseconds, truncated to microseconds. 1044 * >=3 = seconds and nanoseconds, maximum precision. 1045 */ 1046 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 1047 1048 static int timestamp_precision = TSP_USEC; 1049 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 1050 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 1051 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 1052 "3+: sec + ns (max. precision))"); 1053 1054 /* 1055 * Get a current timestamp. 1056 */ 1057 void 1058 vfs_timestamp(struct timespec *tsp) 1059 { 1060 struct timeval tv; 1061 1062 switch (timestamp_precision) { 1063 case TSP_SEC: 1064 tsp->tv_sec = time_second; 1065 tsp->tv_nsec = 0; 1066 break; 1067 case TSP_HZ: 1068 getnanotime(tsp); 1069 break; 1070 case TSP_USEC: 1071 microtime(&tv); 1072 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1073 break; 1074 case TSP_NSEC: 1075 default: 1076 nanotime(tsp); 1077 break; 1078 } 1079 } 1080 1081 /* 1082 * Set vnode attributes to VNOVAL 1083 */ 1084 void 1085 vattr_null(struct vattr *vap) 1086 { 1087 1088 vap->va_type = VNON; 1089 vap->va_size = VNOVAL; 1090 vap->va_bytes = VNOVAL; 1091 vap->va_mode = VNOVAL; 1092 vap->va_nlink = VNOVAL; 1093 vap->va_uid = VNOVAL; 1094 vap->va_gid = VNOVAL; 1095 vap->va_fsid = VNOVAL; 1096 vap->va_fileid = VNOVAL; 1097 vap->va_blocksize = VNOVAL; 1098 vap->va_rdev = VNOVAL; 1099 vap->va_atime.tv_sec = VNOVAL; 1100 vap->va_atime.tv_nsec = VNOVAL; 1101 vap->va_mtime.tv_sec = VNOVAL; 1102 vap->va_mtime.tv_nsec = VNOVAL; 1103 vap->va_ctime.tv_sec = VNOVAL; 1104 vap->va_ctime.tv_nsec = VNOVAL; 1105 vap->va_birthtime.tv_sec = VNOVAL; 1106 vap->va_birthtime.tv_nsec = VNOVAL; 1107 vap->va_flags = VNOVAL; 1108 vap->va_gen = VNOVAL; 1109 vap->va_vaflags = 0; 1110 } 1111 1112 /* 1113 * Try to reduce the total number of vnodes. 1114 * 1115 * This routine (and its user) are buggy in at least the following ways: 1116 * - all parameters were picked years ago when RAM sizes were significantly 1117 * smaller 1118 * - it can pick vnodes based on pages used by the vm object, but filesystems 1119 * like ZFS don't use it making the pick broken 1120 * - since ZFS has its own aging policy it gets partially combated by this one 1121 * - a dedicated method should be provided for filesystems to let them decide 1122 * whether the vnode should be recycled 1123 * 1124 * This routine is called when we have too many vnodes. It attempts 1125 * to free <count> vnodes and will potentially free vnodes that still 1126 * have VM backing store (VM backing store is typically the cause 1127 * of a vnode blowout so we want to do this). Therefore, this operation 1128 * is not considered cheap. 1129 * 1130 * A number of conditions may prevent a vnode from being reclaimed. 1131 * the buffer cache may have references on the vnode, a directory 1132 * vnode may still have references due to the namei cache representing 1133 * underlying files, or the vnode may be in active use. It is not 1134 * desirable to reuse such vnodes. These conditions may cause the 1135 * number of vnodes to reach some minimum value regardless of what 1136 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1137 * 1138 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1139 * entries if this argument is strue 1140 * @param trigger Only reclaim vnodes with fewer than this many resident 1141 * pages. 1142 * @param target How many vnodes to reclaim. 1143 * @return The number of vnodes that were reclaimed. 1144 */ 1145 static int 1146 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1147 { 1148 struct vnode *vp, *mvp; 1149 struct mount *mp; 1150 struct vm_object *object; 1151 u_long done; 1152 bool retried; 1153 1154 mtx_assert(&vnode_list_mtx, MA_OWNED); 1155 1156 retried = false; 1157 done = 0; 1158 1159 mvp = vnode_list_reclaim_marker; 1160 restart: 1161 vp = mvp; 1162 while (done < target) { 1163 vp = TAILQ_NEXT(vp, v_vnodelist); 1164 if (__predict_false(vp == NULL)) 1165 break; 1166 1167 if (__predict_false(vp->v_type == VMARKER)) 1168 continue; 1169 1170 /* 1171 * If it's been deconstructed already, it's still 1172 * referenced, or it exceeds the trigger, skip it. 1173 * Also skip free vnodes. We are trying to make space 1174 * to expand the free list, not reduce it. 1175 */ 1176 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1177 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1178 goto next_iter; 1179 1180 if (vp->v_type == VBAD || vp->v_type == VNON) 1181 goto next_iter; 1182 1183 object = atomic_load_ptr(&vp->v_object); 1184 if (object == NULL || object->resident_page_count > trigger) { 1185 goto next_iter; 1186 } 1187 1188 /* 1189 * Handle races against vnode allocation. Filesystems lock the 1190 * vnode some time after it gets returned from getnewvnode, 1191 * despite type and hold count being manipulated earlier. 1192 * Resorting to checking v_mount restores guarantees present 1193 * before the global list was reworked to contain all vnodes. 1194 */ 1195 if (!VI_TRYLOCK(vp)) 1196 goto next_iter; 1197 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1198 VI_UNLOCK(vp); 1199 goto next_iter; 1200 } 1201 if (vp->v_mount == NULL) { 1202 VI_UNLOCK(vp); 1203 goto next_iter; 1204 } 1205 vholdl(vp); 1206 VI_UNLOCK(vp); 1207 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1208 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1209 mtx_unlock(&vnode_list_mtx); 1210 1211 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1212 vdrop_recycle(vp); 1213 goto next_iter_unlocked; 1214 } 1215 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1216 vdrop_recycle(vp); 1217 vn_finished_write(mp); 1218 goto next_iter_unlocked; 1219 } 1220 1221 VI_LOCK(vp); 1222 if (vp->v_usecount > 0 || 1223 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1224 (vp->v_object != NULL && vp->v_object->handle == vp && 1225 vp->v_object->resident_page_count > trigger)) { 1226 VOP_UNLOCK(vp); 1227 vdropl_recycle(vp); 1228 vn_finished_write(mp); 1229 goto next_iter_unlocked; 1230 } 1231 counter_u64_add(recycles_count, 1); 1232 vgonel(vp); 1233 VOP_UNLOCK(vp); 1234 vdropl_recycle(vp); 1235 vn_finished_write(mp); 1236 done++; 1237 next_iter_unlocked: 1238 if (should_yield()) 1239 kern_yield(PRI_USER); 1240 mtx_lock(&vnode_list_mtx); 1241 goto restart; 1242 next_iter: 1243 MPASS(vp->v_type != VMARKER); 1244 if (!should_yield()) 1245 continue; 1246 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1247 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1248 mtx_unlock(&vnode_list_mtx); 1249 kern_yield(PRI_USER); 1250 mtx_lock(&vnode_list_mtx); 1251 goto restart; 1252 } 1253 if (done == 0 && !retried) { 1254 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1255 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1256 retried = true; 1257 goto restart; 1258 } 1259 return (done); 1260 } 1261 1262 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1263 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1264 0, 1265 "limit on vnode free requests per call to the vnlru_free routine"); 1266 1267 /* 1268 * Attempt to reduce the free list by the requested amount. 1269 */ 1270 static int 1271 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp) 1272 { 1273 struct vnode *vp; 1274 struct mount *mp; 1275 int ocount; 1276 1277 mtx_assert(&vnode_list_mtx, MA_OWNED); 1278 if (count > max_vnlru_free) 1279 count = max_vnlru_free; 1280 ocount = count; 1281 vp = mvp; 1282 for (;;) { 1283 if (count == 0) { 1284 break; 1285 } 1286 vp = TAILQ_NEXT(vp, v_vnodelist); 1287 if (__predict_false(vp == NULL)) { 1288 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1289 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1290 break; 1291 } 1292 if (__predict_false(vp->v_type == VMARKER)) 1293 continue; 1294 if (vp->v_holdcnt > 0) 1295 continue; 1296 /* 1297 * Don't recycle if our vnode is from different type 1298 * of mount point. Note that mp is type-safe, the 1299 * check does not reach unmapped address even if 1300 * vnode is reclaimed. 1301 */ 1302 if (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1303 mp->mnt_op != mnt_op) { 1304 continue; 1305 } 1306 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1307 continue; 1308 } 1309 if (!vhold_recycle_free(vp)) 1310 continue; 1311 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1312 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1313 mtx_unlock(&vnode_list_mtx); 1314 /* 1315 * FIXME: ignores the return value, meaning it may be nothing 1316 * got recycled but it claims otherwise to the caller. 1317 * 1318 * Originally the value started being ignored in 2005 with 1319 * 114a1006a8204aa156e1f9ad6476cdff89cada7f . 1320 * 1321 * Respecting the value can run into significant stalls if most 1322 * vnodes belong to one file system and it has writes 1323 * suspended. In presence of many threads and millions of 1324 * vnodes they keep contending on the vnode_list_mtx lock only 1325 * to find vnodes they can't recycle. 1326 * 1327 * The solution would be to pre-check if the vnode is likely to 1328 * be recycle-able, but it needs to happen with the 1329 * vnode_list_mtx lock held. This runs into a problem where 1330 * VOP_GETWRITEMOUNT (currently needed to find out about if 1331 * writes are frozen) can take locks which LOR against it. 1332 * 1333 * Check nullfs for one example (null_getwritemount). 1334 */ 1335 vtryrecycle(vp); 1336 count--; 1337 mtx_lock(&vnode_list_mtx); 1338 vp = mvp; 1339 } 1340 return (ocount - count); 1341 } 1342 1343 static int 1344 vnlru_free_locked(int count) 1345 { 1346 1347 mtx_assert(&vnode_list_mtx, MA_OWNED); 1348 return (vnlru_free_impl(count, NULL, vnode_list_free_marker)); 1349 } 1350 1351 void 1352 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp) 1353 { 1354 1355 MPASS(mnt_op != NULL); 1356 MPASS(mvp != NULL); 1357 VNPASS(mvp->v_type == VMARKER, mvp); 1358 mtx_lock(&vnode_list_mtx); 1359 vnlru_free_impl(count, mnt_op, mvp); 1360 mtx_unlock(&vnode_list_mtx); 1361 } 1362 1363 struct vnode * 1364 vnlru_alloc_marker(void) 1365 { 1366 struct vnode *mvp; 1367 1368 mvp = vn_alloc_marker(NULL); 1369 mtx_lock(&vnode_list_mtx); 1370 TAILQ_INSERT_BEFORE(vnode_list_free_marker, mvp, v_vnodelist); 1371 mtx_unlock(&vnode_list_mtx); 1372 return (mvp); 1373 } 1374 1375 void 1376 vnlru_free_marker(struct vnode *mvp) 1377 { 1378 mtx_lock(&vnode_list_mtx); 1379 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1380 mtx_unlock(&vnode_list_mtx); 1381 vn_free_marker(mvp); 1382 } 1383 1384 static void 1385 vnlru_recalc(void) 1386 { 1387 1388 mtx_assert(&vnode_list_mtx, MA_OWNED); 1389 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1390 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1391 vlowat = vhiwat / 2; 1392 } 1393 1394 /* 1395 * Attempt to recycle vnodes in a context that is always safe to block. 1396 * Calling vlrurecycle() from the bowels of filesystem code has some 1397 * interesting deadlock problems. 1398 */ 1399 static struct proc *vnlruproc; 1400 static int vnlruproc_sig; 1401 1402 /* 1403 * The main freevnodes counter is only updated when threads requeue their vnode 1404 * batches. CPUs are conditionally walked to compute a more accurate total. 1405 * 1406 * Limit how much of a slop are we willing to tolerate. Note: the actual value 1407 * at any given moment can still exceed slop, but it should not be by significant 1408 * margin in practice. 1409 */ 1410 #define VNLRU_FREEVNODES_SLOP 128 1411 1412 static __inline void 1413 vfs_freevnodes_inc(void) 1414 { 1415 struct vdbatch *vd; 1416 1417 critical_enter(); 1418 vd = DPCPU_PTR(vd); 1419 vd->freevnodes++; 1420 critical_exit(); 1421 } 1422 1423 static __inline void 1424 vfs_freevnodes_dec(void) 1425 { 1426 struct vdbatch *vd; 1427 1428 critical_enter(); 1429 vd = DPCPU_PTR(vd); 1430 vd->freevnodes--; 1431 critical_exit(); 1432 } 1433 1434 static u_long 1435 vnlru_read_freevnodes(void) 1436 { 1437 struct vdbatch *vd; 1438 long slop; 1439 int cpu; 1440 1441 mtx_assert(&vnode_list_mtx, MA_OWNED); 1442 if (freevnodes > freevnodes_old) 1443 slop = freevnodes - freevnodes_old; 1444 else 1445 slop = freevnodes_old - freevnodes; 1446 if (slop < VNLRU_FREEVNODES_SLOP) 1447 return (freevnodes >= 0 ? freevnodes : 0); 1448 freevnodes_old = freevnodes; 1449 CPU_FOREACH(cpu) { 1450 vd = DPCPU_ID_PTR((cpu), vd); 1451 freevnodes_old += vd->freevnodes; 1452 } 1453 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1454 } 1455 1456 static bool 1457 vnlru_under(u_long rnumvnodes, u_long limit) 1458 { 1459 u_long rfreevnodes, space; 1460 1461 if (__predict_false(rnumvnodes > desiredvnodes)) 1462 return (true); 1463 1464 space = desiredvnodes - rnumvnodes; 1465 if (space < limit) { 1466 rfreevnodes = vnlru_read_freevnodes(); 1467 if (rfreevnodes > wantfreevnodes) 1468 space += rfreevnodes - wantfreevnodes; 1469 } 1470 return (space < limit); 1471 } 1472 1473 static bool 1474 vnlru_under_unlocked(u_long rnumvnodes, u_long limit) 1475 { 1476 long rfreevnodes, space; 1477 1478 if (__predict_false(rnumvnodes > desiredvnodes)) 1479 return (true); 1480 1481 space = desiredvnodes - rnumvnodes; 1482 if (space < limit) { 1483 rfreevnodes = atomic_load_long(&freevnodes); 1484 if (rfreevnodes > wantfreevnodes) 1485 space += rfreevnodes - wantfreevnodes; 1486 } 1487 return (space < limit); 1488 } 1489 1490 static void 1491 vnlru_kick(void) 1492 { 1493 1494 mtx_assert(&vnode_list_mtx, MA_OWNED); 1495 if (vnlruproc_sig == 0) { 1496 vnlruproc_sig = 1; 1497 wakeup(vnlruproc); 1498 } 1499 } 1500 1501 static void 1502 vnlru_proc(void) 1503 { 1504 u_long rnumvnodes, rfreevnodes, target; 1505 unsigned long onumvnodes; 1506 int done, force, trigger, usevnodes; 1507 bool reclaim_nc_src, want_reread; 1508 1509 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1510 SHUTDOWN_PRI_FIRST); 1511 1512 force = 0; 1513 want_reread = false; 1514 for (;;) { 1515 kproc_suspend_check(vnlruproc); 1516 mtx_lock(&vnode_list_mtx); 1517 rnumvnodes = atomic_load_long(&numvnodes); 1518 1519 if (want_reread) { 1520 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1521 want_reread = false; 1522 } 1523 1524 /* 1525 * If numvnodes is too large (due to desiredvnodes being 1526 * adjusted using its sysctl, or emergency growth), first 1527 * try to reduce it by discarding from the free list. 1528 */ 1529 if (rnumvnodes > desiredvnodes) { 1530 vnlru_free_locked(rnumvnodes - desiredvnodes); 1531 rnumvnodes = atomic_load_long(&numvnodes); 1532 } 1533 /* 1534 * Sleep if the vnode cache is in a good state. This is 1535 * when it is not over-full and has space for about a 4% 1536 * or 9% expansion (by growing its size or inexcessively 1537 * reducing its free list). Otherwise, try to reclaim 1538 * space for a 10% expansion. 1539 */ 1540 if (vstir && force == 0) { 1541 force = 1; 1542 vstir = 0; 1543 } 1544 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1545 vnlruproc_sig = 0; 1546 wakeup(&vnlruproc_sig); 1547 msleep(vnlruproc, &vnode_list_mtx, 1548 PVFS|PDROP, "vlruwt", hz); 1549 continue; 1550 } 1551 rfreevnodes = vnlru_read_freevnodes(); 1552 1553 onumvnodes = rnumvnodes; 1554 /* 1555 * Calculate parameters for recycling. These are the same 1556 * throughout the loop to give some semblance of fairness. 1557 * The trigger point is to avoid recycling vnodes with lots 1558 * of resident pages. We aren't trying to free memory; we 1559 * are trying to recycle or at least free vnodes. 1560 */ 1561 if (rnumvnodes <= desiredvnodes) 1562 usevnodes = rnumvnodes - rfreevnodes; 1563 else 1564 usevnodes = rnumvnodes; 1565 if (usevnodes <= 0) 1566 usevnodes = 1; 1567 /* 1568 * The trigger value is is chosen to give a conservatively 1569 * large value to ensure that it alone doesn't prevent 1570 * making progress. The value can easily be so large that 1571 * it is effectively infinite in some congested and 1572 * misconfigured cases, and this is necessary. Normally 1573 * it is about 8 to 100 (pages), which is quite large. 1574 */ 1575 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1576 if (force < 2) 1577 trigger = vsmalltrigger; 1578 reclaim_nc_src = force >= 3; 1579 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1580 target = target / 10 + 1; 1581 done = vlrureclaim(reclaim_nc_src, trigger, target); 1582 mtx_unlock(&vnode_list_mtx); 1583 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1584 uma_reclaim(UMA_RECLAIM_DRAIN); 1585 if (done == 0) { 1586 if (force == 0 || force == 1) { 1587 force = 2; 1588 continue; 1589 } 1590 if (force == 2) { 1591 force = 3; 1592 continue; 1593 } 1594 want_reread = true; 1595 force = 0; 1596 vnlru_nowhere++; 1597 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1598 } else { 1599 want_reread = true; 1600 kern_yield(PRI_USER); 1601 } 1602 } 1603 } 1604 1605 static struct kproc_desc vnlru_kp = { 1606 "vnlru", 1607 vnlru_proc, 1608 &vnlruproc 1609 }; 1610 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1611 &vnlru_kp); 1612 1613 /* 1614 * Routines having to do with the management of the vnode table. 1615 */ 1616 1617 /* 1618 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1619 * before we actually vgone(). This function must be called with the vnode 1620 * held to prevent the vnode from being returned to the free list midway 1621 * through vgone(). 1622 */ 1623 static int 1624 vtryrecycle(struct vnode *vp) 1625 { 1626 struct mount *vnmp; 1627 1628 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1629 VNASSERT(vp->v_holdcnt, vp, 1630 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1631 /* 1632 * This vnode may found and locked via some other list, if so we 1633 * can't recycle it yet. 1634 */ 1635 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1636 CTR2(KTR_VFS, 1637 "%s: impossible to recycle, vp %p lock is already held", 1638 __func__, vp); 1639 vdrop_recycle(vp); 1640 return (EWOULDBLOCK); 1641 } 1642 /* 1643 * Don't recycle if its filesystem is being suspended. 1644 */ 1645 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1646 VOP_UNLOCK(vp); 1647 CTR2(KTR_VFS, 1648 "%s: impossible to recycle, cannot start the write for %p", 1649 __func__, vp); 1650 vdrop_recycle(vp); 1651 return (EBUSY); 1652 } 1653 /* 1654 * If we got this far, we need to acquire the interlock and see if 1655 * anyone picked up this vnode from another list. If not, we will 1656 * mark it with DOOMED via vgonel() so that anyone who does find it 1657 * will skip over it. 1658 */ 1659 VI_LOCK(vp); 1660 if (vp->v_usecount) { 1661 VOP_UNLOCK(vp); 1662 vdropl_recycle(vp); 1663 vn_finished_write(vnmp); 1664 CTR2(KTR_VFS, 1665 "%s: impossible to recycle, %p is already referenced", 1666 __func__, vp); 1667 return (EBUSY); 1668 } 1669 if (!VN_IS_DOOMED(vp)) { 1670 counter_u64_add(recycles_free_count, 1); 1671 vgonel(vp); 1672 } 1673 VOP_UNLOCK(vp); 1674 vdropl_recycle(vp); 1675 vn_finished_write(vnmp); 1676 return (0); 1677 } 1678 1679 /* 1680 * Allocate a new vnode. 1681 * 1682 * The operation never returns an error. Returning an error was disabled 1683 * in r145385 (dated 2005) with the following comment: 1684 * 1685 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1686 * 1687 * Given the age of this commit (almost 15 years at the time of writing this 1688 * comment) restoring the ability to fail requires a significant audit of 1689 * all codepaths. 1690 * 1691 * The routine can try to free a vnode or stall for up to 1 second waiting for 1692 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1693 */ 1694 static u_long vn_alloc_cyclecount; 1695 1696 static struct vnode * __noinline 1697 vn_alloc_hard(struct mount *mp) 1698 { 1699 u_long rnumvnodes, rfreevnodes; 1700 1701 mtx_lock(&vnode_list_mtx); 1702 rnumvnodes = atomic_load_long(&numvnodes); 1703 if (rnumvnodes + 1 < desiredvnodes) { 1704 vn_alloc_cyclecount = 0; 1705 goto alloc; 1706 } 1707 rfreevnodes = vnlru_read_freevnodes(); 1708 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1709 vn_alloc_cyclecount = 0; 1710 vstir = 1; 1711 } 1712 /* 1713 * Grow the vnode cache if it will not be above its target max 1714 * after growing. Otherwise, if the free list is nonempty, try 1715 * to reclaim 1 item from it before growing the cache (possibly 1716 * above its target max if the reclamation failed or is delayed). 1717 * Otherwise, wait for some space. In all cases, schedule 1718 * vnlru_proc() if we are getting short of space. The watermarks 1719 * should be chosen so that we never wait or even reclaim from 1720 * the free list to below its target minimum. 1721 */ 1722 if (vnlru_free_locked(1) > 0) 1723 goto alloc; 1724 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1725 /* 1726 * Wait for space for a new vnode. 1727 */ 1728 vnlru_kick(); 1729 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 1730 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 1731 vnlru_read_freevnodes() > 1) 1732 vnlru_free_locked(1); 1733 } 1734 alloc: 1735 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1736 if (vnlru_under(rnumvnodes, vlowat)) 1737 vnlru_kick(); 1738 mtx_unlock(&vnode_list_mtx); 1739 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1740 } 1741 1742 static struct vnode * 1743 vn_alloc(struct mount *mp) 1744 { 1745 u_long rnumvnodes; 1746 1747 if (__predict_false(vn_alloc_cyclecount != 0)) 1748 return (vn_alloc_hard(mp)); 1749 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1750 if (__predict_false(vnlru_under_unlocked(rnumvnodes, vlowat))) { 1751 atomic_subtract_long(&numvnodes, 1); 1752 return (vn_alloc_hard(mp)); 1753 } 1754 1755 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1756 } 1757 1758 static void 1759 vn_free(struct vnode *vp) 1760 { 1761 1762 atomic_subtract_long(&numvnodes, 1); 1763 uma_zfree_smr(vnode_zone, vp); 1764 } 1765 1766 /* 1767 * Return the next vnode from the free list. 1768 */ 1769 int 1770 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1771 struct vnode **vpp) 1772 { 1773 struct vnode *vp; 1774 struct thread *td; 1775 struct lock_object *lo; 1776 1777 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1778 1779 KASSERT(vops->registered, 1780 ("%s: not registered vector op %p\n", __func__, vops)); 1781 1782 td = curthread; 1783 if (td->td_vp_reserved != NULL) { 1784 vp = td->td_vp_reserved; 1785 td->td_vp_reserved = NULL; 1786 } else { 1787 vp = vn_alloc(mp); 1788 } 1789 counter_u64_add(vnodes_created, 1); 1790 /* 1791 * Locks are given the generic name "vnode" when created. 1792 * Follow the historic practice of using the filesystem 1793 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1794 * 1795 * Locks live in a witness group keyed on their name. Thus, 1796 * when a lock is renamed, it must also move from the witness 1797 * group of its old name to the witness group of its new name. 1798 * 1799 * The change only needs to be made when the vnode moves 1800 * from one filesystem type to another. We ensure that each 1801 * filesystem use a single static name pointer for its tag so 1802 * that we can compare pointers rather than doing a strcmp(). 1803 */ 1804 lo = &vp->v_vnlock->lock_object; 1805 #ifdef WITNESS 1806 if (lo->lo_name != tag) { 1807 #endif 1808 lo->lo_name = tag; 1809 #ifdef WITNESS 1810 WITNESS_DESTROY(lo); 1811 WITNESS_INIT(lo, tag); 1812 } 1813 #endif 1814 /* 1815 * By default, don't allow shared locks unless filesystems opt-in. 1816 */ 1817 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1818 /* 1819 * Finalize various vnode identity bits. 1820 */ 1821 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1822 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1823 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1824 vp->v_type = VNON; 1825 vp->v_op = vops; 1826 vp->v_irflag = 0; 1827 v_init_counters(vp); 1828 vn_seqc_init(vp); 1829 vp->v_bufobj.bo_ops = &buf_ops_bio; 1830 #ifdef DIAGNOSTIC 1831 if (mp == NULL && vops != &dead_vnodeops) 1832 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1833 #endif 1834 #ifdef MAC 1835 mac_vnode_init(vp); 1836 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1837 mac_vnode_associate_singlelabel(mp, vp); 1838 #endif 1839 if (mp != NULL) { 1840 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1841 } 1842 1843 /* 1844 * For the filesystems which do not use vfs_hash_insert(), 1845 * still initialize v_hash to have vfs_hash_index() useful. 1846 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1847 * its own hashing. 1848 */ 1849 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1850 1851 *vpp = vp; 1852 return (0); 1853 } 1854 1855 void 1856 getnewvnode_reserve(void) 1857 { 1858 struct thread *td; 1859 1860 td = curthread; 1861 MPASS(td->td_vp_reserved == NULL); 1862 td->td_vp_reserved = vn_alloc(NULL); 1863 } 1864 1865 void 1866 getnewvnode_drop_reserve(void) 1867 { 1868 struct thread *td; 1869 1870 td = curthread; 1871 if (td->td_vp_reserved != NULL) { 1872 vn_free(td->td_vp_reserved); 1873 td->td_vp_reserved = NULL; 1874 } 1875 } 1876 1877 static void __noinline 1878 freevnode(struct vnode *vp) 1879 { 1880 struct bufobj *bo; 1881 1882 /* 1883 * The vnode has been marked for destruction, so free it. 1884 * 1885 * The vnode will be returned to the zone where it will 1886 * normally remain until it is needed for another vnode. We 1887 * need to cleanup (or verify that the cleanup has already 1888 * been done) any residual data left from its current use 1889 * so as not to contaminate the freshly allocated vnode. 1890 */ 1891 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 1892 /* 1893 * Paired with vgone. 1894 */ 1895 vn_seqc_write_end_free(vp); 1896 1897 bo = &vp->v_bufobj; 1898 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 1899 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 1900 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 1901 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 1902 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 1903 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 1904 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 1905 ("clean blk trie not empty")); 1906 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 1907 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 1908 ("dirty blk trie not empty")); 1909 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 1910 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 1911 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 1912 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 1913 ("Dangling rangelock waiters")); 1914 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, 1915 ("Leaked inactivation")); 1916 VI_UNLOCK(vp); 1917 #ifdef MAC 1918 mac_vnode_destroy(vp); 1919 #endif 1920 if (vp->v_pollinfo != NULL) { 1921 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1922 destroy_vpollinfo(vp->v_pollinfo); 1923 VOP_UNLOCK(vp); 1924 vp->v_pollinfo = NULL; 1925 } 1926 vp->v_mountedhere = NULL; 1927 vp->v_unpcb = NULL; 1928 vp->v_rdev = NULL; 1929 vp->v_fifoinfo = NULL; 1930 vp->v_iflag = 0; 1931 vp->v_vflag = 0; 1932 bo->bo_flag = 0; 1933 vn_free(vp); 1934 } 1935 1936 /* 1937 * Delete from old mount point vnode list, if on one. 1938 */ 1939 static void 1940 delmntque(struct vnode *vp) 1941 { 1942 struct mount *mp; 1943 1944 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 1945 1946 mp = vp->v_mount; 1947 if (mp == NULL) 1948 return; 1949 MNT_ILOCK(mp); 1950 VI_LOCK(vp); 1951 vp->v_mount = NULL; 1952 VI_UNLOCK(vp); 1953 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1954 ("bad mount point vnode list size")); 1955 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1956 mp->mnt_nvnodelistsize--; 1957 MNT_REL(mp); 1958 MNT_IUNLOCK(mp); 1959 } 1960 1961 static int 1962 insmntque1_int(struct vnode *vp, struct mount *mp, bool dtr) 1963 { 1964 1965 KASSERT(vp->v_mount == NULL, 1966 ("insmntque: vnode already on per mount vnode list")); 1967 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1968 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1969 1970 /* 1971 * We acquire the vnode interlock early to ensure that the 1972 * vnode cannot be recycled by another process releasing a 1973 * holdcnt on it before we get it on both the vnode list 1974 * and the active vnode list. The mount mutex protects only 1975 * manipulation of the vnode list and the vnode freelist 1976 * mutex protects only manipulation of the active vnode list. 1977 * Hence the need to hold the vnode interlock throughout. 1978 */ 1979 MNT_ILOCK(mp); 1980 VI_LOCK(vp); 1981 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 1982 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1983 mp->mnt_nvnodelistsize == 0)) && 1984 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1985 VI_UNLOCK(vp); 1986 MNT_IUNLOCK(mp); 1987 if (dtr) { 1988 vp->v_data = NULL; 1989 vp->v_op = &dead_vnodeops; 1990 vgone(vp); 1991 vput(vp); 1992 } 1993 return (EBUSY); 1994 } 1995 vp->v_mount = mp; 1996 MNT_REF(mp); 1997 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1998 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1999 ("neg mount point vnode list size")); 2000 mp->mnt_nvnodelistsize++; 2001 VI_UNLOCK(vp); 2002 MNT_IUNLOCK(mp); 2003 return (0); 2004 } 2005 2006 /* 2007 * Insert into list of vnodes for the new mount point, if available. 2008 * insmntque() reclaims the vnode on insertion failure, insmntque1() 2009 * leaves handling of the vnode to the caller. 2010 */ 2011 int 2012 insmntque(struct vnode *vp, struct mount *mp) 2013 { 2014 return (insmntque1_int(vp, mp, true)); 2015 } 2016 2017 int 2018 insmntque1(struct vnode *vp, struct mount *mp) 2019 { 2020 return (insmntque1_int(vp, mp, false)); 2021 } 2022 2023 /* 2024 * Flush out and invalidate all buffers associated with a bufobj 2025 * Called with the underlying object locked. 2026 */ 2027 int 2028 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 2029 { 2030 int error; 2031 2032 BO_LOCK(bo); 2033 if (flags & V_SAVE) { 2034 error = bufobj_wwait(bo, slpflag, slptimeo); 2035 if (error) { 2036 BO_UNLOCK(bo); 2037 return (error); 2038 } 2039 if (bo->bo_dirty.bv_cnt > 0) { 2040 BO_UNLOCK(bo); 2041 do { 2042 error = BO_SYNC(bo, MNT_WAIT); 2043 } while (error == ERELOOKUP); 2044 if (error != 0) 2045 return (error); 2046 BO_LOCK(bo); 2047 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 2048 BO_UNLOCK(bo); 2049 return (EBUSY); 2050 } 2051 } 2052 } 2053 /* 2054 * If you alter this loop please notice that interlock is dropped and 2055 * reacquired in flushbuflist. Special care is needed to ensure that 2056 * no race conditions occur from this. 2057 */ 2058 do { 2059 error = flushbuflist(&bo->bo_clean, 2060 flags, bo, slpflag, slptimeo); 2061 if (error == 0 && !(flags & V_CLEANONLY)) 2062 error = flushbuflist(&bo->bo_dirty, 2063 flags, bo, slpflag, slptimeo); 2064 if (error != 0 && error != EAGAIN) { 2065 BO_UNLOCK(bo); 2066 return (error); 2067 } 2068 } while (error != 0); 2069 2070 /* 2071 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 2072 * have write I/O in-progress but if there is a VM object then the 2073 * VM object can also have read-I/O in-progress. 2074 */ 2075 do { 2076 bufobj_wwait(bo, 0, 0); 2077 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 2078 BO_UNLOCK(bo); 2079 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 2080 BO_LOCK(bo); 2081 } 2082 } while (bo->bo_numoutput > 0); 2083 BO_UNLOCK(bo); 2084 2085 /* 2086 * Destroy the copy in the VM cache, too. 2087 */ 2088 if (bo->bo_object != NULL && 2089 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 2090 VM_OBJECT_WLOCK(bo->bo_object); 2091 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 2092 OBJPR_CLEANONLY : 0); 2093 VM_OBJECT_WUNLOCK(bo->bo_object); 2094 } 2095 2096 #ifdef INVARIANTS 2097 BO_LOCK(bo); 2098 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 2099 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 2100 bo->bo_clean.bv_cnt > 0)) 2101 panic("vinvalbuf: flush failed"); 2102 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 2103 bo->bo_dirty.bv_cnt > 0) 2104 panic("vinvalbuf: flush dirty failed"); 2105 BO_UNLOCK(bo); 2106 #endif 2107 return (0); 2108 } 2109 2110 /* 2111 * Flush out and invalidate all buffers associated with a vnode. 2112 * Called with the underlying object locked. 2113 */ 2114 int 2115 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 2116 { 2117 2118 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2119 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 2120 if (vp->v_object != NULL && vp->v_object->handle != vp) 2121 return (0); 2122 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 2123 } 2124 2125 /* 2126 * Flush out buffers on the specified list. 2127 * 2128 */ 2129 static int 2130 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2131 int slptimeo) 2132 { 2133 struct buf *bp, *nbp; 2134 int retval, error; 2135 daddr_t lblkno; 2136 b_xflags_t xflags; 2137 2138 ASSERT_BO_WLOCKED(bo); 2139 2140 retval = 0; 2141 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2142 /* 2143 * If we are flushing both V_NORMAL and V_ALT buffers then 2144 * do not skip any buffers. If we are flushing only V_NORMAL 2145 * buffers then skip buffers marked as BX_ALTDATA. If we are 2146 * flushing only V_ALT buffers then skip buffers not marked 2147 * as BX_ALTDATA. 2148 */ 2149 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2150 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2151 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2152 continue; 2153 } 2154 if (nbp != NULL) { 2155 lblkno = nbp->b_lblkno; 2156 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2157 } 2158 retval = EAGAIN; 2159 error = BUF_TIMELOCK(bp, 2160 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2161 "flushbuf", slpflag, slptimeo); 2162 if (error) { 2163 BO_LOCK(bo); 2164 return (error != ENOLCK ? error : EAGAIN); 2165 } 2166 KASSERT(bp->b_bufobj == bo, 2167 ("bp %p wrong b_bufobj %p should be %p", 2168 bp, bp->b_bufobj, bo)); 2169 /* 2170 * XXX Since there are no node locks for NFS, I 2171 * believe there is a slight chance that a delayed 2172 * write will occur while sleeping just above, so 2173 * check for it. 2174 */ 2175 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2176 (flags & V_SAVE)) { 2177 bremfree(bp); 2178 bp->b_flags |= B_ASYNC; 2179 bwrite(bp); 2180 BO_LOCK(bo); 2181 return (EAGAIN); /* XXX: why not loop ? */ 2182 } 2183 bremfree(bp); 2184 bp->b_flags |= (B_INVAL | B_RELBUF); 2185 bp->b_flags &= ~B_ASYNC; 2186 brelse(bp); 2187 BO_LOCK(bo); 2188 if (nbp == NULL) 2189 break; 2190 nbp = gbincore(bo, lblkno); 2191 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2192 != xflags) 2193 break; /* nbp invalid */ 2194 } 2195 return (retval); 2196 } 2197 2198 int 2199 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2200 { 2201 struct buf *bp; 2202 int error; 2203 daddr_t lblkno; 2204 2205 ASSERT_BO_LOCKED(bo); 2206 2207 for (lblkno = startn;;) { 2208 again: 2209 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2210 if (bp == NULL || bp->b_lblkno >= endn || 2211 bp->b_lblkno < startn) 2212 break; 2213 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2214 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2215 if (error != 0) { 2216 BO_RLOCK(bo); 2217 if (error == ENOLCK) 2218 goto again; 2219 return (error); 2220 } 2221 KASSERT(bp->b_bufobj == bo, 2222 ("bp %p wrong b_bufobj %p should be %p", 2223 bp, bp->b_bufobj, bo)); 2224 lblkno = bp->b_lblkno + 1; 2225 if ((bp->b_flags & B_MANAGED) == 0) 2226 bremfree(bp); 2227 bp->b_flags |= B_RELBUF; 2228 /* 2229 * In the VMIO case, use the B_NOREUSE flag to hint that the 2230 * pages backing each buffer in the range are unlikely to be 2231 * reused. Dirty buffers will have the hint applied once 2232 * they've been written. 2233 */ 2234 if ((bp->b_flags & B_VMIO) != 0) 2235 bp->b_flags |= B_NOREUSE; 2236 brelse(bp); 2237 BO_RLOCK(bo); 2238 } 2239 return (0); 2240 } 2241 2242 /* 2243 * Truncate a file's buffer and pages to a specified length. This 2244 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2245 * sync activity. 2246 */ 2247 int 2248 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2249 { 2250 struct buf *bp, *nbp; 2251 struct bufobj *bo; 2252 daddr_t startlbn; 2253 2254 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2255 vp, blksize, (uintmax_t)length); 2256 2257 /* 2258 * Round up to the *next* lbn. 2259 */ 2260 startlbn = howmany(length, blksize); 2261 2262 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2263 2264 bo = &vp->v_bufobj; 2265 restart_unlocked: 2266 BO_LOCK(bo); 2267 2268 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2269 ; 2270 2271 if (length > 0) { 2272 restartsync: 2273 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2274 if (bp->b_lblkno > 0) 2275 continue; 2276 /* 2277 * Since we hold the vnode lock this should only 2278 * fail if we're racing with the buf daemon. 2279 */ 2280 if (BUF_LOCK(bp, 2281 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2282 BO_LOCKPTR(bo)) == ENOLCK) 2283 goto restart_unlocked; 2284 2285 VNASSERT((bp->b_flags & B_DELWRI), vp, 2286 ("buf(%p) on dirty queue without DELWRI", bp)); 2287 2288 bremfree(bp); 2289 bawrite(bp); 2290 BO_LOCK(bo); 2291 goto restartsync; 2292 } 2293 } 2294 2295 bufobj_wwait(bo, 0, 0); 2296 BO_UNLOCK(bo); 2297 vnode_pager_setsize(vp, length); 2298 2299 return (0); 2300 } 2301 2302 /* 2303 * Invalidate the cached pages of a file's buffer within the range of block 2304 * numbers [startlbn, endlbn). 2305 */ 2306 void 2307 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2308 int blksize) 2309 { 2310 struct bufobj *bo; 2311 off_t start, end; 2312 2313 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2314 2315 start = blksize * startlbn; 2316 end = blksize * endlbn; 2317 2318 bo = &vp->v_bufobj; 2319 BO_LOCK(bo); 2320 MPASS(blksize == bo->bo_bsize); 2321 2322 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2323 ; 2324 2325 BO_UNLOCK(bo); 2326 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2327 } 2328 2329 static int 2330 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2331 daddr_t startlbn, daddr_t endlbn) 2332 { 2333 struct buf *bp, *nbp; 2334 bool anyfreed; 2335 2336 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2337 ASSERT_BO_LOCKED(bo); 2338 2339 do { 2340 anyfreed = false; 2341 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2342 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2343 continue; 2344 if (BUF_LOCK(bp, 2345 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2346 BO_LOCKPTR(bo)) == ENOLCK) { 2347 BO_LOCK(bo); 2348 return (EAGAIN); 2349 } 2350 2351 bremfree(bp); 2352 bp->b_flags |= B_INVAL | B_RELBUF; 2353 bp->b_flags &= ~B_ASYNC; 2354 brelse(bp); 2355 anyfreed = true; 2356 2357 BO_LOCK(bo); 2358 if (nbp != NULL && 2359 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2360 nbp->b_vp != vp || 2361 (nbp->b_flags & B_DELWRI) != 0)) 2362 return (EAGAIN); 2363 } 2364 2365 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2366 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2367 continue; 2368 if (BUF_LOCK(bp, 2369 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2370 BO_LOCKPTR(bo)) == ENOLCK) { 2371 BO_LOCK(bo); 2372 return (EAGAIN); 2373 } 2374 bremfree(bp); 2375 bp->b_flags |= B_INVAL | B_RELBUF; 2376 bp->b_flags &= ~B_ASYNC; 2377 brelse(bp); 2378 anyfreed = true; 2379 2380 BO_LOCK(bo); 2381 if (nbp != NULL && 2382 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2383 (nbp->b_vp != vp) || 2384 (nbp->b_flags & B_DELWRI) == 0)) 2385 return (EAGAIN); 2386 } 2387 } while (anyfreed); 2388 return (0); 2389 } 2390 2391 static void 2392 buf_vlist_remove(struct buf *bp) 2393 { 2394 struct bufv *bv; 2395 b_xflags_t flags; 2396 2397 flags = bp->b_xflags; 2398 2399 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2400 ASSERT_BO_WLOCKED(bp->b_bufobj); 2401 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 && 2402 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN), 2403 ("%s: buffer %p has invalid queue state", __func__, bp)); 2404 2405 if ((flags & BX_VNDIRTY) != 0) 2406 bv = &bp->b_bufobj->bo_dirty; 2407 else 2408 bv = &bp->b_bufobj->bo_clean; 2409 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2410 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2411 bv->bv_cnt--; 2412 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2413 } 2414 2415 /* 2416 * Add the buffer to the sorted clean or dirty block list. 2417 * 2418 * NOTE: xflags is passed as a constant, optimizing this inline function! 2419 */ 2420 static void 2421 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2422 { 2423 struct bufv *bv; 2424 struct buf *n; 2425 int error; 2426 2427 ASSERT_BO_WLOCKED(bo); 2428 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2429 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2430 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2431 ("dead bo %p", bo)); 2432 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2433 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2434 bp->b_xflags |= xflags; 2435 if (xflags & BX_VNDIRTY) 2436 bv = &bo->bo_dirty; 2437 else 2438 bv = &bo->bo_clean; 2439 2440 /* 2441 * Keep the list ordered. Optimize empty list insertion. Assume 2442 * we tend to grow at the tail so lookup_le should usually be cheaper 2443 * than _ge. 2444 */ 2445 if (bv->bv_cnt == 0 || 2446 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2447 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2448 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2449 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2450 else 2451 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2452 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2453 if (error) 2454 panic("buf_vlist_add: Preallocated nodes insufficient."); 2455 bv->bv_cnt++; 2456 } 2457 2458 /* 2459 * Look up a buffer using the buffer tries. 2460 */ 2461 struct buf * 2462 gbincore(struct bufobj *bo, daddr_t lblkno) 2463 { 2464 struct buf *bp; 2465 2466 ASSERT_BO_LOCKED(bo); 2467 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2468 if (bp != NULL) 2469 return (bp); 2470 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2471 } 2472 2473 /* 2474 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2475 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2476 * stability of the result. Like other lockless lookups, the found buf may 2477 * already be invalid by the time this function returns. 2478 */ 2479 struct buf * 2480 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2481 { 2482 struct buf *bp; 2483 2484 ASSERT_BO_UNLOCKED(bo); 2485 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2486 if (bp != NULL) 2487 return (bp); 2488 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2489 } 2490 2491 /* 2492 * Associate a buffer with a vnode. 2493 */ 2494 void 2495 bgetvp(struct vnode *vp, struct buf *bp) 2496 { 2497 struct bufobj *bo; 2498 2499 bo = &vp->v_bufobj; 2500 ASSERT_BO_WLOCKED(bo); 2501 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2502 2503 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2504 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2505 ("bgetvp: bp already attached! %p", bp)); 2506 2507 vhold(vp); 2508 bp->b_vp = vp; 2509 bp->b_bufobj = bo; 2510 /* 2511 * Insert onto list for new vnode. 2512 */ 2513 buf_vlist_add(bp, bo, BX_VNCLEAN); 2514 } 2515 2516 /* 2517 * Disassociate a buffer from a vnode. 2518 */ 2519 void 2520 brelvp(struct buf *bp) 2521 { 2522 struct bufobj *bo; 2523 struct vnode *vp; 2524 2525 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2526 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2527 2528 /* 2529 * Delete from old vnode list, if on one. 2530 */ 2531 vp = bp->b_vp; /* XXX */ 2532 bo = bp->b_bufobj; 2533 BO_LOCK(bo); 2534 buf_vlist_remove(bp); 2535 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2536 bo->bo_flag &= ~BO_ONWORKLST; 2537 mtx_lock(&sync_mtx); 2538 LIST_REMOVE(bo, bo_synclist); 2539 syncer_worklist_len--; 2540 mtx_unlock(&sync_mtx); 2541 } 2542 bp->b_vp = NULL; 2543 bp->b_bufobj = NULL; 2544 BO_UNLOCK(bo); 2545 vdrop(vp); 2546 } 2547 2548 /* 2549 * Add an item to the syncer work queue. 2550 */ 2551 static void 2552 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2553 { 2554 int slot; 2555 2556 ASSERT_BO_WLOCKED(bo); 2557 2558 mtx_lock(&sync_mtx); 2559 if (bo->bo_flag & BO_ONWORKLST) 2560 LIST_REMOVE(bo, bo_synclist); 2561 else { 2562 bo->bo_flag |= BO_ONWORKLST; 2563 syncer_worklist_len++; 2564 } 2565 2566 if (delay > syncer_maxdelay - 2) 2567 delay = syncer_maxdelay - 2; 2568 slot = (syncer_delayno + delay) & syncer_mask; 2569 2570 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2571 mtx_unlock(&sync_mtx); 2572 } 2573 2574 static int 2575 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2576 { 2577 int error, len; 2578 2579 mtx_lock(&sync_mtx); 2580 len = syncer_worklist_len - sync_vnode_count; 2581 mtx_unlock(&sync_mtx); 2582 error = SYSCTL_OUT(req, &len, sizeof(len)); 2583 return (error); 2584 } 2585 2586 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2587 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2588 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2589 2590 static struct proc *updateproc; 2591 static void sched_sync(void); 2592 static struct kproc_desc up_kp = { 2593 "syncer", 2594 sched_sync, 2595 &updateproc 2596 }; 2597 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2598 2599 static int 2600 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2601 { 2602 struct vnode *vp; 2603 struct mount *mp; 2604 2605 *bo = LIST_FIRST(slp); 2606 if (*bo == NULL) 2607 return (0); 2608 vp = bo2vnode(*bo); 2609 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2610 return (1); 2611 /* 2612 * We use vhold in case the vnode does not 2613 * successfully sync. vhold prevents the vnode from 2614 * going away when we unlock the sync_mtx so that 2615 * we can acquire the vnode interlock. 2616 */ 2617 vholdl(vp); 2618 mtx_unlock(&sync_mtx); 2619 VI_UNLOCK(vp); 2620 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2621 vdrop(vp); 2622 mtx_lock(&sync_mtx); 2623 return (*bo == LIST_FIRST(slp)); 2624 } 2625 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2626 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2627 VOP_UNLOCK(vp); 2628 vn_finished_write(mp); 2629 BO_LOCK(*bo); 2630 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2631 /* 2632 * Put us back on the worklist. The worklist 2633 * routine will remove us from our current 2634 * position and then add us back in at a later 2635 * position. 2636 */ 2637 vn_syncer_add_to_worklist(*bo, syncdelay); 2638 } 2639 BO_UNLOCK(*bo); 2640 vdrop(vp); 2641 mtx_lock(&sync_mtx); 2642 return (0); 2643 } 2644 2645 static int first_printf = 1; 2646 2647 /* 2648 * System filesystem synchronizer daemon. 2649 */ 2650 static void 2651 sched_sync(void) 2652 { 2653 struct synclist *next, *slp; 2654 struct bufobj *bo; 2655 long starttime; 2656 struct thread *td = curthread; 2657 int last_work_seen; 2658 int net_worklist_len; 2659 int syncer_final_iter; 2660 int error; 2661 2662 last_work_seen = 0; 2663 syncer_final_iter = 0; 2664 syncer_state = SYNCER_RUNNING; 2665 starttime = time_uptime; 2666 td->td_pflags |= TDP_NORUNNINGBUF; 2667 2668 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2669 SHUTDOWN_PRI_LAST); 2670 2671 mtx_lock(&sync_mtx); 2672 for (;;) { 2673 if (syncer_state == SYNCER_FINAL_DELAY && 2674 syncer_final_iter == 0) { 2675 mtx_unlock(&sync_mtx); 2676 kproc_suspend_check(td->td_proc); 2677 mtx_lock(&sync_mtx); 2678 } 2679 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2680 if (syncer_state != SYNCER_RUNNING && 2681 starttime != time_uptime) { 2682 if (first_printf) { 2683 printf("\nSyncing disks, vnodes remaining... "); 2684 first_printf = 0; 2685 } 2686 printf("%d ", net_worklist_len); 2687 } 2688 starttime = time_uptime; 2689 2690 /* 2691 * Push files whose dirty time has expired. Be careful 2692 * of interrupt race on slp queue. 2693 * 2694 * Skip over empty worklist slots when shutting down. 2695 */ 2696 do { 2697 slp = &syncer_workitem_pending[syncer_delayno]; 2698 syncer_delayno += 1; 2699 if (syncer_delayno == syncer_maxdelay) 2700 syncer_delayno = 0; 2701 next = &syncer_workitem_pending[syncer_delayno]; 2702 /* 2703 * If the worklist has wrapped since the 2704 * it was emptied of all but syncer vnodes, 2705 * switch to the FINAL_DELAY state and run 2706 * for one more second. 2707 */ 2708 if (syncer_state == SYNCER_SHUTTING_DOWN && 2709 net_worklist_len == 0 && 2710 last_work_seen == syncer_delayno) { 2711 syncer_state = SYNCER_FINAL_DELAY; 2712 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2713 } 2714 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2715 syncer_worklist_len > 0); 2716 2717 /* 2718 * Keep track of the last time there was anything 2719 * on the worklist other than syncer vnodes. 2720 * Return to the SHUTTING_DOWN state if any 2721 * new work appears. 2722 */ 2723 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2724 last_work_seen = syncer_delayno; 2725 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2726 syncer_state = SYNCER_SHUTTING_DOWN; 2727 while (!LIST_EMPTY(slp)) { 2728 error = sync_vnode(slp, &bo, td); 2729 if (error == 1) { 2730 LIST_REMOVE(bo, bo_synclist); 2731 LIST_INSERT_HEAD(next, bo, bo_synclist); 2732 continue; 2733 } 2734 2735 if (first_printf == 0) { 2736 /* 2737 * Drop the sync mutex, because some watchdog 2738 * drivers need to sleep while patting 2739 */ 2740 mtx_unlock(&sync_mtx); 2741 wdog_kern_pat(WD_LASTVAL); 2742 mtx_lock(&sync_mtx); 2743 } 2744 } 2745 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2746 syncer_final_iter--; 2747 /* 2748 * The variable rushjob allows the kernel to speed up the 2749 * processing of the filesystem syncer process. A rushjob 2750 * value of N tells the filesystem syncer to process the next 2751 * N seconds worth of work on its queue ASAP. Currently rushjob 2752 * is used by the soft update code to speed up the filesystem 2753 * syncer process when the incore state is getting so far 2754 * ahead of the disk that the kernel memory pool is being 2755 * threatened with exhaustion. 2756 */ 2757 if (rushjob > 0) { 2758 rushjob -= 1; 2759 continue; 2760 } 2761 /* 2762 * Just sleep for a short period of time between 2763 * iterations when shutting down to allow some I/O 2764 * to happen. 2765 * 2766 * If it has taken us less than a second to process the 2767 * current work, then wait. Otherwise start right over 2768 * again. We can still lose time if any single round 2769 * takes more than two seconds, but it does not really 2770 * matter as we are just trying to generally pace the 2771 * filesystem activity. 2772 */ 2773 if (syncer_state != SYNCER_RUNNING || 2774 time_uptime == starttime) { 2775 thread_lock(td); 2776 sched_prio(td, PPAUSE); 2777 thread_unlock(td); 2778 } 2779 if (syncer_state != SYNCER_RUNNING) 2780 cv_timedwait(&sync_wakeup, &sync_mtx, 2781 hz / SYNCER_SHUTDOWN_SPEEDUP); 2782 else if (time_uptime == starttime) 2783 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2784 } 2785 } 2786 2787 /* 2788 * Request the syncer daemon to speed up its work. 2789 * We never push it to speed up more than half of its 2790 * normal turn time, otherwise it could take over the cpu. 2791 */ 2792 int 2793 speedup_syncer(void) 2794 { 2795 int ret = 0; 2796 2797 mtx_lock(&sync_mtx); 2798 if (rushjob < syncdelay / 2) { 2799 rushjob += 1; 2800 stat_rush_requests += 1; 2801 ret = 1; 2802 } 2803 mtx_unlock(&sync_mtx); 2804 cv_broadcast(&sync_wakeup); 2805 return (ret); 2806 } 2807 2808 /* 2809 * Tell the syncer to speed up its work and run though its work 2810 * list several times, then tell it to shut down. 2811 */ 2812 static void 2813 syncer_shutdown(void *arg, int howto) 2814 { 2815 2816 if (howto & RB_NOSYNC) 2817 return; 2818 mtx_lock(&sync_mtx); 2819 syncer_state = SYNCER_SHUTTING_DOWN; 2820 rushjob = 0; 2821 mtx_unlock(&sync_mtx); 2822 cv_broadcast(&sync_wakeup); 2823 kproc_shutdown(arg, howto); 2824 } 2825 2826 void 2827 syncer_suspend(void) 2828 { 2829 2830 syncer_shutdown(updateproc, 0); 2831 } 2832 2833 void 2834 syncer_resume(void) 2835 { 2836 2837 mtx_lock(&sync_mtx); 2838 first_printf = 1; 2839 syncer_state = SYNCER_RUNNING; 2840 mtx_unlock(&sync_mtx); 2841 cv_broadcast(&sync_wakeup); 2842 kproc_resume(updateproc); 2843 } 2844 2845 /* 2846 * Move the buffer between the clean and dirty lists of its vnode. 2847 */ 2848 void 2849 reassignbuf(struct buf *bp) 2850 { 2851 struct vnode *vp; 2852 struct bufobj *bo; 2853 int delay; 2854 #ifdef INVARIANTS 2855 struct bufv *bv; 2856 #endif 2857 2858 vp = bp->b_vp; 2859 bo = bp->b_bufobj; 2860 2861 KASSERT((bp->b_flags & B_PAGING) == 0, 2862 ("%s: cannot reassign paging buffer %p", __func__, bp)); 2863 2864 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2865 bp, bp->b_vp, bp->b_flags); 2866 2867 BO_LOCK(bo); 2868 buf_vlist_remove(bp); 2869 2870 /* 2871 * If dirty, put on list of dirty buffers; otherwise insert onto list 2872 * of clean buffers. 2873 */ 2874 if (bp->b_flags & B_DELWRI) { 2875 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2876 switch (vp->v_type) { 2877 case VDIR: 2878 delay = dirdelay; 2879 break; 2880 case VCHR: 2881 delay = metadelay; 2882 break; 2883 default: 2884 delay = filedelay; 2885 } 2886 vn_syncer_add_to_worklist(bo, delay); 2887 } 2888 buf_vlist_add(bp, bo, BX_VNDIRTY); 2889 } else { 2890 buf_vlist_add(bp, bo, BX_VNCLEAN); 2891 2892 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2893 mtx_lock(&sync_mtx); 2894 LIST_REMOVE(bo, bo_synclist); 2895 syncer_worklist_len--; 2896 mtx_unlock(&sync_mtx); 2897 bo->bo_flag &= ~BO_ONWORKLST; 2898 } 2899 } 2900 #ifdef INVARIANTS 2901 bv = &bo->bo_clean; 2902 bp = TAILQ_FIRST(&bv->bv_hd); 2903 KASSERT(bp == NULL || bp->b_bufobj == bo, 2904 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2905 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2906 KASSERT(bp == NULL || bp->b_bufobj == bo, 2907 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2908 bv = &bo->bo_dirty; 2909 bp = TAILQ_FIRST(&bv->bv_hd); 2910 KASSERT(bp == NULL || bp->b_bufobj == bo, 2911 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2912 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2913 KASSERT(bp == NULL || bp->b_bufobj == bo, 2914 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2915 #endif 2916 BO_UNLOCK(bo); 2917 } 2918 2919 static void 2920 v_init_counters(struct vnode *vp) 2921 { 2922 2923 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2924 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2925 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2926 2927 refcount_init(&vp->v_holdcnt, 1); 2928 refcount_init(&vp->v_usecount, 1); 2929 } 2930 2931 /* 2932 * Grab a particular vnode from the free list, increment its 2933 * reference count and lock it. VIRF_DOOMED is set if the vnode 2934 * is being destroyed. Only callers who specify LK_RETRY will 2935 * see doomed vnodes. If inactive processing was delayed in 2936 * vput try to do it here. 2937 * 2938 * usecount is manipulated using atomics without holding any locks. 2939 * 2940 * holdcnt can be manipulated using atomics without holding any locks, 2941 * except when transitioning 1<->0, in which case the interlock is held. 2942 * 2943 * Consumers which don't guarantee liveness of the vnode can use SMR to 2944 * try to get a reference. Note this operation can fail since the vnode 2945 * may be awaiting getting freed by the time they get to it. 2946 */ 2947 enum vgetstate 2948 vget_prep_smr(struct vnode *vp) 2949 { 2950 enum vgetstate vs; 2951 2952 VFS_SMR_ASSERT_ENTERED(); 2953 2954 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2955 vs = VGET_USECOUNT; 2956 } else { 2957 if (vhold_smr(vp)) 2958 vs = VGET_HOLDCNT; 2959 else 2960 vs = VGET_NONE; 2961 } 2962 return (vs); 2963 } 2964 2965 enum vgetstate 2966 vget_prep(struct vnode *vp) 2967 { 2968 enum vgetstate vs; 2969 2970 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2971 vs = VGET_USECOUNT; 2972 } else { 2973 vhold(vp); 2974 vs = VGET_HOLDCNT; 2975 } 2976 return (vs); 2977 } 2978 2979 void 2980 vget_abort(struct vnode *vp, enum vgetstate vs) 2981 { 2982 2983 switch (vs) { 2984 case VGET_USECOUNT: 2985 vrele(vp); 2986 break; 2987 case VGET_HOLDCNT: 2988 vdrop(vp); 2989 break; 2990 default: 2991 __assert_unreachable(); 2992 } 2993 } 2994 2995 int 2996 vget(struct vnode *vp, int flags) 2997 { 2998 enum vgetstate vs; 2999 3000 vs = vget_prep(vp); 3001 return (vget_finish(vp, flags, vs)); 3002 } 3003 3004 int 3005 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 3006 { 3007 int error; 3008 3009 if ((flags & LK_INTERLOCK) != 0) 3010 ASSERT_VI_LOCKED(vp, __func__); 3011 else 3012 ASSERT_VI_UNLOCKED(vp, __func__); 3013 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3014 VNPASS(vp->v_holdcnt > 0, vp); 3015 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3016 3017 error = vn_lock(vp, flags); 3018 if (__predict_false(error != 0)) { 3019 vget_abort(vp, vs); 3020 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 3021 vp); 3022 return (error); 3023 } 3024 3025 vget_finish_ref(vp, vs); 3026 return (0); 3027 } 3028 3029 void 3030 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 3031 { 3032 int old; 3033 3034 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3035 VNPASS(vp->v_holdcnt > 0, vp); 3036 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3037 3038 if (vs == VGET_USECOUNT) 3039 return; 3040 3041 /* 3042 * We hold the vnode. If the usecount is 0 it will be utilized to keep 3043 * the vnode around. Otherwise someone else lended their hold count and 3044 * we have to drop ours. 3045 */ 3046 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3047 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3048 if (old != 0) { 3049 #ifdef INVARIANTS 3050 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3051 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3052 #else 3053 refcount_release(&vp->v_holdcnt); 3054 #endif 3055 } 3056 } 3057 3058 void 3059 vref(struct vnode *vp) 3060 { 3061 enum vgetstate vs; 3062 3063 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3064 vs = vget_prep(vp); 3065 vget_finish_ref(vp, vs); 3066 } 3067 3068 void 3069 vrefact(struct vnode *vp) 3070 { 3071 3072 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3073 #ifdef INVARIANTS 3074 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 3075 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3076 #else 3077 refcount_acquire(&vp->v_usecount); 3078 #endif 3079 } 3080 3081 void 3082 vlazy(struct vnode *vp) 3083 { 3084 struct mount *mp; 3085 3086 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 3087 3088 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 3089 return; 3090 /* 3091 * We may get here for inactive routines after the vnode got doomed. 3092 */ 3093 if (VN_IS_DOOMED(vp)) 3094 return; 3095 mp = vp->v_mount; 3096 mtx_lock(&mp->mnt_listmtx); 3097 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 3098 vp->v_mflag |= VMP_LAZYLIST; 3099 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3100 mp->mnt_lazyvnodelistsize++; 3101 } 3102 mtx_unlock(&mp->mnt_listmtx); 3103 } 3104 3105 static void 3106 vunlazy(struct vnode *vp) 3107 { 3108 struct mount *mp; 3109 3110 ASSERT_VI_LOCKED(vp, __func__); 3111 VNPASS(!VN_IS_DOOMED(vp), vp); 3112 3113 mp = vp->v_mount; 3114 mtx_lock(&mp->mnt_listmtx); 3115 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3116 /* 3117 * Don't remove the vnode from the lazy list if another thread 3118 * has increased the hold count. It may have re-enqueued the 3119 * vnode to the lazy list and is now responsible for its 3120 * removal. 3121 */ 3122 if (vp->v_holdcnt == 0) { 3123 vp->v_mflag &= ~VMP_LAZYLIST; 3124 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3125 mp->mnt_lazyvnodelistsize--; 3126 } 3127 mtx_unlock(&mp->mnt_listmtx); 3128 } 3129 3130 /* 3131 * This routine is only meant to be called from vgonel prior to dooming 3132 * the vnode. 3133 */ 3134 static void 3135 vunlazy_gone(struct vnode *vp) 3136 { 3137 struct mount *mp; 3138 3139 ASSERT_VOP_ELOCKED(vp, __func__); 3140 ASSERT_VI_LOCKED(vp, __func__); 3141 VNPASS(!VN_IS_DOOMED(vp), vp); 3142 3143 if (vp->v_mflag & VMP_LAZYLIST) { 3144 mp = vp->v_mount; 3145 mtx_lock(&mp->mnt_listmtx); 3146 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3147 vp->v_mflag &= ~VMP_LAZYLIST; 3148 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3149 mp->mnt_lazyvnodelistsize--; 3150 mtx_unlock(&mp->mnt_listmtx); 3151 } 3152 } 3153 3154 static void 3155 vdefer_inactive(struct vnode *vp) 3156 { 3157 3158 ASSERT_VI_LOCKED(vp, __func__); 3159 VNASSERT(vp->v_holdcnt > 0, vp, 3160 ("%s: vnode without hold count", __func__)); 3161 if (VN_IS_DOOMED(vp)) { 3162 vdropl(vp); 3163 return; 3164 } 3165 if (vp->v_iflag & VI_DEFINACT) { 3166 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3167 vdropl(vp); 3168 return; 3169 } 3170 if (vp->v_usecount > 0) { 3171 vp->v_iflag &= ~VI_OWEINACT; 3172 vdropl(vp); 3173 return; 3174 } 3175 vlazy(vp); 3176 vp->v_iflag |= VI_DEFINACT; 3177 VI_UNLOCK(vp); 3178 counter_u64_add(deferred_inact, 1); 3179 } 3180 3181 static void 3182 vdefer_inactive_unlocked(struct vnode *vp) 3183 { 3184 3185 VI_LOCK(vp); 3186 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3187 vdropl(vp); 3188 return; 3189 } 3190 vdefer_inactive(vp); 3191 } 3192 3193 enum vput_op { VRELE, VPUT, VUNREF }; 3194 3195 /* 3196 * Handle ->v_usecount transitioning to 0. 3197 * 3198 * By releasing the last usecount we take ownership of the hold count which 3199 * provides liveness of the vnode, meaning we have to vdrop. 3200 * 3201 * For all vnodes we may need to perform inactive processing. It requires an 3202 * exclusive lock on the vnode, while it is legal to call here with only a 3203 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3204 * inactive processing gets deferred to the syncer. 3205 * 3206 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3207 * on the lock being held all the way until VOP_INACTIVE. This in particular 3208 * happens with UFS which adds half-constructed vnodes to the hash, where they 3209 * can be found by other code. 3210 */ 3211 static void 3212 vput_final(struct vnode *vp, enum vput_op func) 3213 { 3214 int error; 3215 bool want_unlock; 3216 3217 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3218 VNPASS(vp->v_holdcnt > 0, vp); 3219 3220 VI_LOCK(vp); 3221 3222 /* 3223 * By the time we got here someone else might have transitioned 3224 * the count back to > 0. 3225 */ 3226 if (vp->v_usecount > 0) 3227 goto out; 3228 3229 /* 3230 * If the vnode is doomed vgone already performed inactive processing 3231 * (if needed). 3232 */ 3233 if (VN_IS_DOOMED(vp)) 3234 goto out; 3235 3236 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3237 goto out; 3238 3239 if (vp->v_iflag & VI_DOINGINACT) 3240 goto out; 3241 3242 /* 3243 * Locking operations here will drop the interlock and possibly the 3244 * vnode lock, opening a window where the vnode can get doomed all the 3245 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3246 * perform inactive. 3247 */ 3248 vp->v_iflag |= VI_OWEINACT; 3249 want_unlock = false; 3250 error = 0; 3251 switch (func) { 3252 case VRELE: 3253 switch (VOP_ISLOCKED(vp)) { 3254 case LK_EXCLUSIVE: 3255 break; 3256 case LK_EXCLOTHER: 3257 case 0: 3258 want_unlock = true; 3259 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3260 VI_LOCK(vp); 3261 break; 3262 default: 3263 /* 3264 * The lock has at least one sharer, but we have no way 3265 * to conclude whether this is us. Play it safe and 3266 * defer processing. 3267 */ 3268 error = EAGAIN; 3269 break; 3270 } 3271 break; 3272 case VPUT: 3273 want_unlock = true; 3274 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3275 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3276 LK_NOWAIT); 3277 VI_LOCK(vp); 3278 } 3279 break; 3280 case VUNREF: 3281 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3282 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3283 VI_LOCK(vp); 3284 } 3285 break; 3286 } 3287 if (error == 0) { 3288 if (func == VUNREF) { 3289 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp, 3290 ("recursive vunref")); 3291 vp->v_vflag |= VV_UNREF; 3292 } 3293 for (;;) { 3294 error = vinactive(vp); 3295 if (want_unlock) 3296 VOP_UNLOCK(vp); 3297 if (error != ERELOOKUP || !want_unlock) 3298 break; 3299 VOP_LOCK(vp, LK_EXCLUSIVE); 3300 } 3301 if (func == VUNREF) 3302 vp->v_vflag &= ~VV_UNREF; 3303 vdropl(vp); 3304 } else { 3305 vdefer_inactive(vp); 3306 } 3307 return; 3308 out: 3309 if (func == VPUT) 3310 VOP_UNLOCK(vp); 3311 vdropl(vp); 3312 } 3313 3314 /* 3315 * Decrement ->v_usecount for a vnode. 3316 * 3317 * Releasing the last use count requires additional processing, see vput_final 3318 * above for details. 3319 * 3320 * Comment above each variant denotes lock state on entry and exit. 3321 */ 3322 3323 /* 3324 * in: any 3325 * out: same as passed in 3326 */ 3327 void 3328 vrele(struct vnode *vp) 3329 { 3330 3331 ASSERT_VI_UNLOCKED(vp, __func__); 3332 if (!refcount_release(&vp->v_usecount)) 3333 return; 3334 vput_final(vp, VRELE); 3335 } 3336 3337 /* 3338 * in: locked 3339 * out: unlocked 3340 */ 3341 void 3342 vput(struct vnode *vp) 3343 { 3344 3345 ASSERT_VOP_LOCKED(vp, __func__); 3346 ASSERT_VI_UNLOCKED(vp, __func__); 3347 if (!refcount_release(&vp->v_usecount)) { 3348 VOP_UNLOCK(vp); 3349 return; 3350 } 3351 vput_final(vp, VPUT); 3352 } 3353 3354 /* 3355 * in: locked 3356 * out: locked 3357 */ 3358 void 3359 vunref(struct vnode *vp) 3360 { 3361 3362 ASSERT_VOP_LOCKED(vp, __func__); 3363 ASSERT_VI_UNLOCKED(vp, __func__); 3364 if (!refcount_release(&vp->v_usecount)) 3365 return; 3366 vput_final(vp, VUNREF); 3367 } 3368 3369 void 3370 vhold(struct vnode *vp) 3371 { 3372 int old; 3373 3374 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3375 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3376 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3377 ("%s: wrong hold count %d", __func__, old)); 3378 if (old == 0) 3379 vfs_freevnodes_dec(); 3380 } 3381 3382 void 3383 vholdnz(struct vnode *vp) 3384 { 3385 3386 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3387 #ifdef INVARIANTS 3388 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3389 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3390 ("%s: wrong hold count %d", __func__, old)); 3391 #else 3392 atomic_add_int(&vp->v_holdcnt, 1); 3393 #endif 3394 } 3395 3396 /* 3397 * Grab a hold count unless the vnode is freed. 3398 * 3399 * Only use this routine if vfs smr is the only protection you have against 3400 * freeing the vnode. 3401 * 3402 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3403 * is not set. After the flag is set the vnode becomes immutable to anyone but 3404 * the thread which managed to set the flag. 3405 * 3406 * It may be tempting to replace the loop with: 3407 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3408 * if (count & VHOLD_NO_SMR) { 3409 * backpedal and error out; 3410 * } 3411 * 3412 * However, while this is more performant, it hinders debugging by eliminating 3413 * the previously mentioned invariant. 3414 */ 3415 bool 3416 vhold_smr(struct vnode *vp) 3417 { 3418 int count; 3419 3420 VFS_SMR_ASSERT_ENTERED(); 3421 3422 count = atomic_load_int(&vp->v_holdcnt); 3423 for (;;) { 3424 if (count & VHOLD_NO_SMR) { 3425 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3426 ("non-zero hold count with flags %d\n", count)); 3427 return (false); 3428 } 3429 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3430 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3431 if (count == 0) 3432 vfs_freevnodes_dec(); 3433 return (true); 3434 } 3435 } 3436 } 3437 3438 /* 3439 * Hold a free vnode for recycling. 3440 * 3441 * Note: vnode_init references this comment. 3442 * 3443 * Attempts to recycle only need the global vnode list lock and have no use for 3444 * SMR. 3445 * 3446 * However, vnodes get inserted into the global list before they get fully 3447 * initialized and stay there until UMA decides to free the memory. This in 3448 * particular means the target can be found before it becomes usable and after 3449 * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to 3450 * VHOLD_NO_SMR. 3451 * 3452 * Note: the vnode may gain more references after we transition the count 0->1. 3453 */ 3454 static bool 3455 vhold_recycle_free(struct vnode *vp) 3456 { 3457 int count; 3458 3459 mtx_assert(&vnode_list_mtx, MA_OWNED); 3460 3461 count = atomic_load_int(&vp->v_holdcnt); 3462 for (;;) { 3463 if (count & VHOLD_NO_SMR) { 3464 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3465 ("non-zero hold count with flags %d\n", count)); 3466 return (false); 3467 } 3468 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3469 if (count > 0) { 3470 return (false); 3471 } 3472 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3473 vfs_freevnodes_dec(); 3474 return (true); 3475 } 3476 } 3477 } 3478 3479 static void __noinline 3480 vdbatch_process(struct vdbatch *vd) 3481 { 3482 struct vnode *vp; 3483 int i; 3484 3485 mtx_assert(&vd->lock, MA_OWNED); 3486 MPASS(curthread->td_pinned > 0); 3487 MPASS(vd->index == VDBATCH_SIZE); 3488 3489 mtx_lock(&vnode_list_mtx); 3490 critical_enter(); 3491 freevnodes += vd->freevnodes; 3492 for (i = 0; i < VDBATCH_SIZE; i++) { 3493 vp = vd->tab[i]; 3494 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3495 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3496 MPASS(vp->v_dbatchcpu != NOCPU); 3497 vp->v_dbatchcpu = NOCPU; 3498 } 3499 mtx_unlock(&vnode_list_mtx); 3500 vd->freevnodes = 0; 3501 bzero(vd->tab, sizeof(vd->tab)); 3502 vd->index = 0; 3503 critical_exit(); 3504 } 3505 3506 static void 3507 vdbatch_enqueue(struct vnode *vp) 3508 { 3509 struct vdbatch *vd; 3510 3511 ASSERT_VI_LOCKED(vp, __func__); 3512 VNASSERT(!VN_IS_DOOMED(vp), vp, 3513 ("%s: deferring requeue of a doomed vnode", __func__)); 3514 3515 if (vp->v_dbatchcpu != NOCPU) { 3516 VI_UNLOCK(vp); 3517 return; 3518 } 3519 3520 sched_pin(); 3521 vd = DPCPU_PTR(vd); 3522 mtx_lock(&vd->lock); 3523 MPASS(vd->index < VDBATCH_SIZE); 3524 MPASS(vd->tab[vd->index] == NULL); 3525 /* 3526 * A hack: we depend on being pinned so that we know what to put in 3527 * ->v_dbatchcpu. 3528 */ 3529 vp->v_dbatchcpu = curcpu; 3530 vd->tab[vd->index] = vp; 3531 vd->index++; 3532 VI_UNLOCK(vp); 3533 if (vd->index == VDBATCH_SIZE) 3534 vdbatch_process(vd); 3535 mtx_unlock(&vd->lock); 3536 sched_unpin(); 3537 } 3538 3539 /* 3540 * This routine must only be called for vnodes which are about to be 3541 * deallocated. Supporting dequeue for arbitrary vndoes would require 3542 * validating that the locked batch matches. 3543 */ 3544 static void 3545 vdbatch_dequeue(struct vnode *vp) 3546 { 3547 struct vdbatch *vd; 3548 int i; 3549 short cpu; 3550 3551 VNASSERT(vp->v_type == VBAD || vp->v_type == VNON, vp, 3552 ("%s: called for a used vnode\n", __func__)); 3553 3554 cpu = vp->v_dbatchcpu; 3555 if (cpu == NOCPU) 3556 return; 3557 3558 vd = DPCPU_ID_PTR(cpu, vd); 3559 mtx_lock(&vd->lock); 3560 for (i = 0; i < vd->index; i++) { 3561 if (vd->tab[i] != vp) 3562 continue; 3563 vp->v_dbatchcpu = NOCPU; 3564 vd->index--; 3565 vd->tab[i] = vd->tab[vd->index]; 3566 vd->tab[vd->index] = NULL; 3567 break; 3568 } 3569 mtx_unlock(&vd->lock); 3570 /* 3571 * Either we dequeued the vnode above or the target CPU beat us to it. 3572 */ 3573 MPASS(vp->v_dbatchcpu == NOCPU); 3574 } 3575 3576 /* 3577 * Drop the hold count of the vnode. If this is the last reference to 3578 * the vnode we place it on the free list unless it has been vgone'd 3579 * (marked VIRF_DOOMED) in which case we will free it. 3580 * 3581 * Because the vnode vm object keeps a hold reference on the vnode if 3582 * there is at least one resident non-cached page, the vnode cannot 3583 * leave the active list without the page cleanup done. 3584 */ 3585 static void __noinline 3586 vdropl_final(struct vnode *vp) 3587 { 3588 3589 ASSERT_VI_LOCKED(vp, __func__); 3590 VNPASS(VN_IS_DOOMED(vp), vp); 3591 /* 3592 * Set the VHOLD_NO_SMR flag. 3593 * 3594 * We may be racing against vhold_smr. If they win we can just pretend 3595 * we never got this far, they will vdrop later. 3596 */ 3597 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { 3598 vfs_freevnodes_inc(); 3599 VI_UNLOCK(vp); 3600 /* 3601 * We lost the aforementioned race. Any subsequent access is 3602 * invalid as they might have managed to vdropl on their own. 3603 */ 3604 return; 3605 } 3606 /* 3607 * Don't bump freevnodes as this one is going away. 3608 */ 3609 freevnode(vp); 3610 } 3611 3612 void 3613 vdrop(struct vnode *vp) 3614 { 3615 3616 ASSERT_VI_UNLOCKED(vp, __func__); 3617 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3618 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3619 return; 3620 VI_LOCK(vp); 3621 vdropl(vp); 3622 } 3623 3624 static void __always_inline 3625 vdropl_impl(struct vnode *vp, bool enqueue) 3626 { 3627 3628 ASSERT_VI_LOCKED(vp, __func__); 3629 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3630 if (!refcount_release(&vp->v_holdcnt)) { 3631 VI_UNLOCK(vp); 3632 return; 3633 } 3634 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp); 3635 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 3636 if (VN_IS_DOOMED(vp)) { 3637 vdropl_final(vp); 3638 return; 3639 } 3640 3641 vfs_freevnodes_inc(); 3642 if (vp->v_mflag & VMP_LAZYLIST) { 3643 vunlazy(vp); 3644 } 3645 /* 3646 * Also unlocks the interlock. We can't assert on it as we 3647 * released our hold and by now the vnode might have been 3648 * freed. 3649 */ 3650 vdbatch_enqueue(vp); 3651 } 3652 3653 void 3654 vdropl(struct vnode *vp) 3655 { 3656 3657 vdropl_impl(vp, true); 3658 } 3659 3660 /* 3661 * vdrop a vnode when recycling 3662 * 3663 * This is a special case routine only to be used when recycling, differs from 3664 * regular vdrop by not requeieing the vnode on LRU. 3665 * 3666 * Consider a case where vtryrecycle continuously fails with all vnodes (due to 3667 * e.g., frozen writes on the filesystem), filling the batch and causing it to 3668 * be requeued. Then vnlru will end up revisiting the same vnodes. This is a 3669 * loop which can last for as long as writes are frozen. 3670 */ 3671 static void 3672 vdropl_recycle(struct vnode *vp) 3673 { 3674 3675 vdropl_impl(vp, false); 3676 } 3677 3678 static void 3679 vdrop_recycle(struct vnode *vp) 3680 { 3681 3682 VI_LOCK(vp); 3683 vdropl_recycle(vp); 3684 } 3685 3686 /* 3687 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3688 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3689 */ 3690 static int 3691 vinactivef(struct vnode *vp) 3692 { 3693 struct vm_object *obj; 3694 int error; 3695 3696 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3697 ASSERT_VI_LOCKED(vp, "vinactive"); 3698 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3699 ("vinactive: recursed on VI_DOINGINACT")); 3700 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3701 vp->v_iflag |= VI_DOINGINACT; 3702 vp->v_iflag &= ~VI_OWEINACT; 3703 VI_UNLOCK(vp); 3704 /* 3705 * Before moving off the active list, we must be sure that any 3706 * modified pages are converted into the vnode's dirty 3707 * buffers, since these will no longer be checked once the 3708 * vnode is on the inactive list. 3709 * 3710 * The write-out of the dirty pages is asynchronous. At the 3711 * point that VOP_INACTIVE() is called, there could still be 3712 * pending I/O and dirty pages in the object. 3713 */ 3714 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3715 vm_object_mightbedirty(obj)) { 3716 VM_OBJECT_WLOCK(obj); 3717 vm_object_page_clean(obj, 0, 0, 0); 3718 VM_OBJECT_WUNLOCK(obj); 3719 } 3720 error = VOP_INACTIVE(vp); 3721 VI_LOCK(vp); 3722 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3723 ("vinactive: lost VI_DOINGINACT")); 3724 vp->v_iflag &= ~VI_DOINGINACT; 3725 return (error); 3726 } 3727 3728 int 3729 vinactive(struct vnode *vp) 3730 { 3731 3732 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3733 ASSERT_VI_LOCKED(vp, "vinactive"); 3734 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3735 3736 if ((vp->v_iflag & VI_OWEINACT) == 0) 3737 return (0); 3738 if (vp->v_iflag & VI_DOINGINACT) 3739 return (0); 3740 if (vp->v_usecount > 0) { 3741 vp->v_iflag &= ~VI_OWEINACT; 3742 return (0); 3743 } 3744 return (vinactivef(vp)); 3745 } 3746 3747 /* 3748 * Remove any vnodes in the vnode table belonging to mount point mp. 3749 * 3750 * If FORCECLOSE is not specified, there should not be any active ones, 3751 * return error if any are found (nb: this is a user error, not a 3752 * system error). If FORCECLOSE is specified, detach any active vnodes 3753 * that are found. 3754 * 3755 * If WRITECLOSE is set, only flush out regular file vnodes open for 3756 * writing. 3757 * 3758 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3759 * 3760 * `rootrefs' specifies the base reference count for the root vnode 3761 * of this filesystem. The root vnode is considered busy if its 3762 * v_usecount exceeds this value. On a successful return, vflush(, td) 3763 * will call vrele() on the root vnode exactly rootrefs times. 3764 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3765 * be zero. 3766 */ 3767 #ifdef DIAGNOSTIC 3768 static int busyprt = 0; /* print out busy vnodes */ 3769 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3770 #endif 3771 3772 int 3773 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3774 { 3775 struct vnode *vp, *mvp, *rootvp = NULL; 3776 struct vattr vattr; 3777 int busy = 0, error; 3778 3779 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3780 rootrefs, flags); 3781 if (rootrefs > 0) { 3782 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3783 ("vflush: bad args")); 3784 /* 3785 * Get the filesystem root vnode. We can vput() it 3786 * immediately, since with rootrefs > 0, it won't go away. 3787 */ 3788 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3789 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3790 __func__, error); 3791 return (error); 3792 } 3793 vput(rootvp); 3794 } 3795 loop: 3796 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3797 vholdl(vp); 3798 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3799 if (error) { 3800 vdrop(vp); 3801 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3802 goto loop; 3803 } 3804 /* 3805 * Skip over a vnodes marked VV_SYSTEM. 3806 */ 3807 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3808 VOP_UNLOCK(vp); 3809 vdrop(vp); 3810 continue; 3811 } 3812 /* 3813 * If WRITECLOSE is set, flush out unlinked but still open 3814 * files (even if open only for reading) and regular file 3815 * vnodes open for writing. 3816 */ 3817 if (flags & WRITECLOSE) { 3818 if (vp->v_object != NULL) { 3819 VM_OBJECT_WLOCK(vp->v_object); 3820 vm_object_page_clean(vp->v_object, 0, 0, 0); 3821 VM_OBJECT_WUNLOCK(vp->v_object); 3822 } 3823 do { 3824 error = VOP_FSYNC(vp, MNT_WAIT, td); 3825 } while (error == ERELOOKUP); 3826 if (error != 0) { 3827 VOP_UNLOCK(vp); 3828 vdrop(vp); 3829 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3830 return (error); 3831 } 3832 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3833 VI_LOCK(vp); 3834 3835 if ((vp->v_type == VNON || 3836 (error == 0 && vattr.va_nlink > 0)) && 3837 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3838 VOP_UNLOCK(vp); 3839 vdropl(vp); 3840 continue; 3841 } 3842 } else 3843 VI_LOCK(vp); 3844 /* 3845 * With v_usecount == 0, all we need to do is clear out the 3846 * vnode data structures and we are done. 3847 * 3848 * If FORCECLOSE is set, forcibly close the vnode. 3849 */ 3850 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3851 vgonel(vp); 3852 } else { 3853 busy++; 3854 #ifdef DIAGNOSTIC 3855 if (busyprt) 3856 vn_printf(vp, "vflush: busy vnode "); 3857 #endif 3858 } 3859 VOP_UNLOCK(vp); 3860 vdropl(vp); 3861 } 3862 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3863 /* 3864 * If just the root vnode is busy, and if its refcount 3865 * is equal to `rootrefs', then go ahead and kill it. 3866 */ 3867 VI_LOCK(rootvp); 3868 KASSERT(busy > 0, ("vflush: not busy")); 3869 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3870 ("vflush: usecount %d < rootrefs %d", 3871 rootvp->v_usecount, rootrefs)); 3872 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3873 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3874 vgone(rootvp); 3875 VOP_UNLOCK(rootvp); 3876 busy = 0; 3877 } else 3878 VI_UNLOCK(rootvp); 3879 } 3880 if (busy) { 3881 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3882 busy); 3883 return (EBUSY); 3884 } 3885 for (; rootrefs > 0; rootrefs--) 3886 vrele(rootvp); 3887 return (0); 3888 } 3889 3890 /* 3891 * Recycle an unused vnode to the front of the free list. 3892 */ 3893 int 3894 vrecycle(struct vnode *vp) 3895 { 3896 int recycled; 3897 3898 VI_LOCK(vp); 3899 recycled = vrecyclel(vp); 3900 VI_UNLOCK(vp); 3901 return (recycled); 3902 } 3903 3904 /* 3905 * vrecycle, with the vp interlock held. 3906 */ 3907 int 3908 vrecyclel(struct vnode *vp) 3909 { 3910 int recycled; 3911 3912 ASSERT_VOP_ELOCKED(vp, __func__); 3913 ASSERT_VI_LOCKED(vp, __func__); 3914 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3915 recycled = 0; 3916 if (vp->v_usecount == 0) { 3917 recycled = 1; 3918 vgonel(vp); 3919 } 3920 return (recycled); 3921 } 3922 3923 /* 3924 * Eliminate all activity associated with a vnode 3925 * in preparation for reuse. 3926 */ 3927 void 3928 vgone(struct vnode *vp) 3929 { 3930 VI_LOCK(vp); 3931 vgonel(vp); 3932 VI_UNLOCK(vp); 3933 } 3934 3935 /* 3936 * Notify upper mounts about reclaimed or unlinked vnode. 3937 */ 3938 void 3939 vfs_notify_upper(struct vnode *vp, int event) 3940 { 3941 struct mount *mp; 3942 struct mount_upper_node *ump; 3943 3944 mp = atomic_load_ptr(&vp->v_mount); 3945 if (mp == NULL) 3946 return; 3947 if (TAILQ_EMPTY(&mp->mnt_notify)) 3948 return; 3949 3950 MNT_ILOCK(mp); 3951 mp->mnt_upper_pending++; 3952 KASSERT(mp->mnt_upper_pending > 0, 3953 ("%s: mnt_upper_pending %d", __func__, mp->mnt_upper_pending)); 3954 TAILQ_FOREACH(ump, &mp->mnt_notify, mnt_upper_link) { 3955 MNT_IUNLOCK(mp); 3956 switch (event) { 3957 case VFS_NOTIFY_UPPER_RECLAIM: 3958 VFS_RECLAIM_LOWERVP(ump->mp, vp); 3959 break; 3960 case VFS_NOTIFY_UPPER_UNLINK: 3961 VFS_UNLINK_LOWERVP(ump->mp, vp); 3962 break; 3963 default: 3964 KASSERT(0, ("invalid event %d", event)); 3965 break; 3966 } 3967 MNT_ILOCK(mp); 3968 } 3969 mp->mnt_upper_pending--; 3970 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && 3971 mp->mnt_upper_pending == 0) { 3972 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; 3973 wakeup(&mp->mnt_uppers); 3974 } 3975 MNT_IUNLOCK(mp); 3976 } 3977 3978 /* 3979 * vgone, with the vp interlock held. 3980 */ 3981 static void 3982 vgonel(struct vnode *vp) 3983 { 3984 struct thread *td; 3985 struct mount *mp; 3986 vm_object_t object; 3987 bool active, doinginact, oweinact; 3988 3989 ASSERT_VOP_ELOCKED(vp, "vgonel"); 3990 ASSERT_VI_LOCKED(vp, "vgonel"); 3991 VNASSERT(vp->v_holdcnt, vp, 3992 ("vgonel: vp %p has no reference.", vp)); 3993 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3994 td = curthread; 3995 3996 /* 3997 * Don't vgonel if we're already doomed. 3998 */ 3999 if (VN_IS_DOOMED(vp)) 4000 return; 4001 /* 4002 * Paired with freevnode. 4003 */ 4004 vn_seqc_write_begin_locked(vp); 4005 vunlazy_gone(vp); 4006 vn_irflag_set_locked(vp, VIRF_DOOMED); 4007 4008 /* 4009 * Check to see if the vnode is in use. If so, we have to 4010 * call VOP_CLOSE() and VOP_INACTIVE(). 4011 * 4012 * It could be that VOP_INACTIVE() requested reclamation, in 4013 * which case we should avoid recursion, so check 4014 * VI_DOINGINACT. This is not precise but good enough. 4015 */ 4016 active = vp->v_usecount > 0; 4017 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4018 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; 4019 4020 /* 4021 * If we need to do inactive VI_OWEINACT will be set. 4022 */ 4023 if (vp->v_iflag & VI_DEFINACT) { 4024 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 4025 vp->v_iflag &= ~VI_DEFINACT; 4026 vdropl(vp); 4027 } else { 4028 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 4029 VI_UNLOCK(vp); 4030 } 4031 cache_purge_vgone(vp); 4032 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 4033 4034 /* 4035 * If purging an active vnode, it must be closed and 4036 * deactivated before being reclaimed. 4037 */ 4038 if (active) 4039 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 4040 if (!doinginact) { 4041 do { 4042 if (oweinact || active) { 4043 VI_LOCK(vp); 4044 vinactivef(vp); 4045 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4046 VI_UNLOCK(vp); 4047 } 4048 } while (oweinact); 4049 } 4050 if (vp->v_type == VSOCK) 4051 vfs_unp_reclaim(vp); 4052 4053 /* 4054 * Clean out any buffers associated with the vnode. 4055 * If the flush fails, just toss the buffers. 4056 */ 4057 mp = NULL; 4058 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 4059 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 4060 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 4061 while (vinvalbuf(vp, 0, 0, 0) != 0) 4062 ; 4063 } 4064 4065 BO_LOCK(&vp->v_bufobj); 4066 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 4067 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 4068 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 4069 vp->v_bufobj.bo_clean.bv_cnt == 0, 4070 ("vp %p bufobj not invalidated", vp)); 4071 4072 /* 4073 * For VMIO bufobj, BO_DEAD is set later, or in 4074 * vm_object_terminate() after the object's page queue is 4075 * flushed. 4076 */ 4077 object = vp->v_bufobj.bo_object; 4078 if (object == NULL) 4079 vp->v_bufobj.bo_flag |= BO_DEAD; 4080 BO_UNLOCK(&vp->v_bufobj); 4081 4082 /* 4083 * Handle the VM part. Tmpfs handles v_object on its own (the 4084 * OBJT_VNODE check). Nullfs or other bypassing filesystems 4085 * should not touch the object borrowed from the lower vnode 4086 * (the handle check). 4087 */ 4088 if (object != NULL && object->type == OBJT_VNODE && 4089 object->handle == vp) 4090 vnode_destroy_vobject(vp); 4091 4092 /* 4093 * Reclaim the vnode. 4094 */ 4095 if (VOP_RECLAIM(vp)) 4096 panic("vgone: cannot reclaim"); 4097 if (mp != NULL) 4098 vn_finished_secondary_write(mp); 4099 VNASSERT(vp->v_object == NULL, vp, 4100 ("vop_reclaim left v_object vp=%p", vp)); 4101 /* 4102 * Clear the advisory locks and wake up waiting threads. 4103 */ 4104 (void)VOP_ADVLOCKPURGE(vp); 4105 vp->v_lockf = NULL; 4106 /* 4107 * Delete from old mount point vnode list. 4108 */ 4109 delmntque(vp); 4110 /* 4111 * Done with purge, reset to the standard lock and invalidate 4112 * the vnode. 4113 */ 4114 VI_LOCK(vp); 4115 vp->v_vnlock = &vp->v_lock; 4116 vp->v_op = &dead_vnodeops; 4117 vp->v_type = VBAD; 4118 } 4119 4120 /* 4121 * Print out a description of a vnode. 4122 */ 4123 static const char * const typename[] = 4124 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 4125 "VMARKER"}; 4126 4127 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 4128 "new hold count flag not added to vn_printf"); 4129 4130 void 4131 vn_printf(struct vnode *vp, const char *fmt, ...) 4132 { 4133 va_list ap; 4134 char buf[256], buf2[16]; 4135 u_long flags; 4136 u_int holdcnt; 4137 short irflag; 4138 4139 va_start(ap, fmt); 4140 vprintf(fmt, ap); 4141 va_end(ap); 4142 printf("%p: ", (void *)vp); 4143 printf("type %s\n", typename[vp->v_type]); 4144 holdcnt = atomic_load_int(&vp->v_holdcnt); 4145 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 4146 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 4147 vp->v_seqc_users); 4148 switch (vp->v_type) { 4149 case VDIR: 4150 printf(" mountedhere %p\n", vp->v_mountedhere); 4151 break; 4152 case VCHR: 4153 printf(" rdev %p\n", vp->v_rdev); 4154 break; 4155 case VSOCK: 4156 printf(" socket %p\n", vp->v_unpcb); 4157 break; 4158 case VFIFO: 4159 printf(" fifoinfo %p\n", vp->v_fifoinfo); 4160 break; 4161 default: 4162 printf("\n"); 4163 break; 4164 } 4165 buf[0] = '\0'; 4166 buf[1] = '\0'; 4167 if (holdcnt & VHOLD_NO_SMR) 4168 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 4169 printf(" hold count flags (%s)\n", buf + 1); 4170 4171 buf[0] = '\0'; 4172 buf[1] = '\0'; 4173 irflag = vn_irflag_read(vp); 4174 if (irflag & VIRF_DOOMED) 4175 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 4176 if (irflag & VIRF_PGREAD) 4177 strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); 4178 if (irflag & VIRF_MOUNTPOINT) 4179 strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf)); 4180 if (irflag & VIRF_TEXT_REF) 4181 strlcat(buf, "|VIRF_TEXT_REF", sizeof(buf)); 4182 flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT | VIRF_TEXT_REF); 4183 if (flags != 0) { 4184 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 4185 strlcat(buf, buf2, sizeof(buf)); 4186 } 4187 if (vp->v_vflag & VV_ROOT) 4188 strlcat(buf, "|VV_ROOT", sizeof(buf)); 4189 if (vp->v_vflag & VV_ISTTY) 4190 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4191 if (vp->v_vflag & VV_NOSYNC) 4192 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4193 if (vp->v_vflag & VV_ETERNALDEV) 4194 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4195 if (vp->v_vflag & VV_CACHEDLABEL) 4196 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4197 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4198 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4199 if (vp->v_vflag & VV_COPYONWRITE) 4200 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4201 if (vp->v_vflag & VV_SYSTEM) 4202 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4203 if (vp->v_vflag & VV_PROCDEP) 4204 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4205 if (vp->v_vflag & VV_DELETED) 4206 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4207 if (vp->v_vflag & VV_MD) 4208 strlcat(buf, "|VV_MD", sizeof(buf)); 4209 if (vp->v_vflag & VV_FORCEINSMQ) 4210 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4211 if (vp->v_vflag & VV_READLINK) 4212 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4213 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4214 VV_CACHEDLABEL | VV_VMSIZEVNLOCK | VV_COPYONWRITE | VV_SYSTEM | 4215 VV_PROCDEP | VV_DELETED | VV_MD | VV_FORCEINSMQ | VV_READLINK); 4216 if (flags != 0) { 4217 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4218 strlcat(buf, buf2, sizeof(buf)); 4219 } 4220 if (vp->v_iflag & VI_MOUNT) 4221 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4222 if (vp->v_iflag & VI_DOINGINACT) 4223 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4224 if (vp->v_iflag & VI_OWEINACT) 4225 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4226 if (vp->v_iflag & VI_DEFINACT) 4227 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4228 if (vp->v_iflag & VI_FOPENING) 4229 strlcat(buf, "|VI_FOPENING", sizeof(buf)); 4230 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOINGINACT | 4231 VI_OWEINACT | VI_DEFINACT | VI_FOPENING); 4232 if (flags != 0) { 4233 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4234 strlcat(buf, buf2, sizeof(buf)); 4235 } 4236 if (vp->v_mflag & VMP_LAZYLIST) 4237 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4238 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4239 if (flags != 0) { 4240 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4241 strlcat(buf, buf2, sizeof(buf)); 4242 } 4243 printf(" flags (%s)", buf + 1); 4244 if (mtx_owned(VI_MTX(vp))) 4245 printf(" VI_LOCKed"); 4246 printf("\n"); 4247 if (vp->v_object != NULL) 4248 printf(" v_object %p ref %d pages %d " 4249 "cleanbuf %d dirtybuf %d\n", 4250 vp->v_object, vp->v_object->ref_count, 4251 vp->v_object->resident_page_count, 4252 vp->v_bufobj.bo_clean.bv_cnt, 4253 vp->v_bufobj.bo_dirty.bv_cnt); 4254 printf(" "); 4255 lockmgr_printinfo(vp->v_vnlock); 4256 if (vp->v_data != NULL) 4257 VOP_PRINT(vp); 4258 } 4259 4260 #ifdef DDB 4261 /* 4262 * List all of the locked vnodes in the system. 4263 * Called when debugging the kernel. 4264 */ 4265 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 4266 { 4267 struct mount *mp; 4268 struct vnode *vp; 4269 4270 /* 4271 * Note: because this is DDB, we can't obey the locking semantics 4272 * for these structures, which means we could catch an inconsistent 4273 * state and dereference a nasty pointer. Not much to be done 4274 * about that. 4275 */ 4276 db_printf("Locked vnodes\n"); 4277 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4278 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4279 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4280 vn_printf(vp, "vnode "); 4281 } 4282 } 4283 } 4284 4285 /* 4286 * Show details about the given vnode. 4287 */ 4288 DB_SHOW_COMMAND(vnode, db_show_vnode) 4289 { 4290 struct vnode *vp; 4291 4292 if (!have_addr) 4293 return; 4294 vp = (struct vnode *)addr; 4295 vn_printf(vp, "vnode "); 4296 } 4297 4298 /* 4299 * Show details about the given mount point. 4300 */ 4301 DB_SHOW_COMMAND(mount, db_show_mount) 4302 { 4303 struct mount *mp; 4304 struct vfsopt *opt; 4305 struct statfs *sp; 4306 struct vnode *vp; 4307 char buf[512]; 4308 uint64_t mflags; 4309 u_int flags; 4310 4311 if (!have_addr) { 4312 /* No address given, print short info about all mount points. */ 4313 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4314 db_printf("%p %s on %s (%s)\n", mp, 4315 mp->mnt_stat.f_mntfromname, 4316 mp->mnt_stat.f_mntonname, 4317 mp->mnt_stat.f_fstypename); 4318 if (db_pager_quit) 4319 break; 4320 } 4321 db_printf("\nMore info: show mount <addr>\n"); 4322 return; 4323 } 4324 4325 mp = (struct mount *)addr; 4326 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4327 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4328 4329 buf[0] = '\0'; 4330 mflags = mp->mnt_flag; 4331 #define MNT_FLAG(flag) do { \ 4332 if (mflags & (flag)) { \ 4333 if (buf[0] != '\0') \ 4334 strlcat(buf, ", ", sizeof(buf)); \ 4335 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4336 mflags &= ~(flag); \ 4337 } \ 4338 } while (0) 4339 MNT_FLAG(MNT_RDONLY); 4340 MNT_FLAG(MNT_SYNCHRONOUS); 4341 MNT_FLAG(MNT_NOEXEC); 4342 MNT_FLAG(MNT_NOSUID); 4343 MNT_FLAG(MNT_NFS4ACLS); 4344 MNT_FLAG(MNT_UNION); 4345 MNT_FLAG(MNT_ASYNC); 4346 MNT_FLAG(MNT_SUIDDIR); 4347 MNT_FLAG(MNT_SOFTDEP); 4348 MNT_FLAG(MNT_NOSYMFOLLOW); 4349 MNT_FLAG(MNT_GJOURNAL); 4350 MNT_FLAG(MNT_MULTILABEL); 4351 MNT_FLAG(MNT_ACLS); 4352 MNT_FLAG(MNT_NOATIME); 4353 MNT_FLAG(MNT_NOCLUSTERR); 4354 MNT_FLAG(MNT_NOCLUSTERW); 4355 MNT_FLAG(MNT_SUJ); 4356 MNT_FLAG(MNT_EXRDONLY); 4357 MNT_FLAG(MNT_EXPORTED); 4358 MNT_FLAG(MNT_DEFEXPORTED); 4359 MNT_FLAG(MNT_EXPORTANON); 4360 MNT_FLAG(MNT_EXKERB); 4361 MNT_FLAG(MNT_EXPUBLIC); 4362 MNT_FLAG(MNT_LOCAL); 4363 MNT_FLAG(MNT_QUOTA); 4364 MNT_FLAG(MNT_ROOTFS); 4365 MNT_FLAG(MNT_USER); 4366 MNT_FLAG(MNT_IGNORE); 4367 MNT_FLAG(MNT_UPDATE); 4368 MNT_FLAG(MNT_DELEXPORT); 4369 MNT_FLAG(MNT_RELOAD); 4370 MNT_FLAG(MNT_FORCE); 4371 MNT_FLAG(MNT_SNAPSHOT); 4372 MNT_FLAG(MNT_BYFSID); 4373 #undef MNT_FLAG 4374 if (mflags != 0) { 4375 if (buf[0] != '\0') 4376 strlcat(buf, ", ", sizeof(buf)); 4377 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4378 "0x%016jx", mflags); 4379 } 4380 db_printf(" mnt_flag = %s\n", buf); 4381 4382 buf[0] = '\0'; 4383 flags = mp->mnt_kern_flag; 4384 #define MNT_KERN_FLAG(flag) do { \ 4385 if (flags & (flag)) { \ 4386 if (buf[0] != '\0') \ 4387 strlcat(buf, ", ", sizeof(buf)); \ 4388 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4389 flags &= ~(flag); \ 4390 } \ 4391 } while (0) 4392 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4393 MNT_KERN_FLAG(MNTK_ASYNC); 4394 MNT_KERN_FLAG(MNTK_SOFTDEP); 4395 MNT_KERN_FLAG(MNTK_DRAINING); 4396 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4397 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4398 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4399 MNT_KERN_FLAG(MNTK_NO_IOPF); 4400 MNT_KERN_FLAG(MNTK_RECURSE); 4401 MNT_KERN_FLAG(MNTK_UPPER_WAITER); 4402 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 4403 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4404 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4405 MNT_KERN_FLAG(MNTK_TASKQUEUE_WAITER); 4406 MNT_KERN_FLAG(MNTK_NOASYNC); 4407 MNT_KERN_FLAG(MNTK_UNMOUNT); 4408 MNT_KERN_FLAG(MNTK_MWAIT); 4409 MNT_KERN_FLAG(MNTK_SUSPEND); 4410 MNT_KERN_FLAG(MNTK_SUSPEND2); 4411 MNT_KERN_FLAG(MNTK_SUSPENDED); 4412 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4413 #undef MNT_KERN_FLAG 4414 if (flags != 0) { 4415 if (buf[0] != '\0') 4416 strlcat(buf, ", ", sizeof(buf)); 4417 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4418 "0x%08x", flags); 4419 } 4420 db_printf(" mnt_kern_flag = %s\n", buf); 4421 4422 db_printf(" mnt_opt = "); 4423 opt = TAILQ_FIRST(mp->mnt_opt); 4424 if (opt != NULL) { 4425 db_printf("%s", opt->name); 4426 opt = TAILQ_NEXT(opt, link); 4427 while (opt != NULL) { 4428 db_printf(", %s", opt->name); 4429 opt = TAILQ_NEXT(opt, link); 4430 } 4431 } 4432 db_printf("\n"); 4433 4434 sp = &mp->mnt_stat; 4435 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4436 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4437 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4438 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4439 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4440 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4441 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4442 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4443 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4444 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4445 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4446 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4447 4448 db_printf(" mnt_cred = { uid=%u ruid=%u", 4449 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4450 if (jailed(mp->mnt_cred)) 4451 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4452 db_printf(" }\n"); 4453 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4454 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4455 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4456 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4457 db_printf(" mnt_lazyvnodelistsize = %d\n", 4458 mp->mnt_lazyvnodelistsize); 4459 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4460 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4461 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4462 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4463 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4464 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4465 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4466 db_printf(" mnt_secondary_accwrites = %d\n", 4467 mp->mnt_secondary_accwrites); 4468 db_printf(" mnt_gjprovider = %s\n", 4469 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4470 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4471 4472 db_printf("\n\nList of active vnodes\n"); 4473 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4474 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4475 vn_printf(vp, "vnode "); 4476 if (db_pager_quit) 4477 break; 4478 } 4479 } 4480 db_printf("\n\nList of inactive vnodes\n"); 4481 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4482 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4483 vn_printf(vp, "vnode "); 4484 if (db_pager_quit) 4485 break; 4486 } 4487 } 4488 } 4489 #endif /* DDB */ 4490 4491 /* 4492 * Fill in a struct xvfsconf based on a struct vfsconf. 4493 */ 4494 static int 4495 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4496 { 4497 struct xvfsconf xvfsp; 4498 4499 bzero(&xvfsp, sizeof(xvfsp)); 4500 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4501 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4502 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4503 xvfsp.vfc_flags = vfsp->vfc_flags; 4504 /* 4505 * These are unused in userland, we keep them 4506 * to not break binary compatibility. 4507 */ 4508 xvfsp.vfc_vfsops = NULL; 4509 xvfsp.vfc_next = NULL; 4510 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4511 } 4512 4513 #ifdef COMPAT_FREEBSD32 4514 struct xvfsconf32 { 4515 uint32_t vfc_vfsops; 4516 char vfc_name[MFSNAMELEN]; 4517 int32_t vfc_typenum; 4518 int32_t vfc_refcount; 4519 int32_t vfc_flags; 4520 uint32_t vfc_next; 4521 }; 4522 4523 static int 4524 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4525 { 4526 struct xvfsconf32 xvfsp; 4527 4528 bzero(&xvfsp, sizeof(xvfsp)); 4529 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4530 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4531 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4532 xvfsp.vfc_flags = vfsp->vfc_flags; 4533 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4534 } 4535 #endif 4536 4537 /* 4538 * Top level filesystem related information gathering. 4539 */ 4540 static int 4541 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4542 { 4543 struct vfsconf *vfsp; 4544 int error; 4545 4546 error = 0; 4547 vfsconf_slock(); 4548 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4549 #ifdef COMPAT_FREEBSD32 4550 if (req->flags & SCTL_MASK32) 4551 error = vfsconf2x32(req, vfsp); 4552 else 4553 #endif 4554 error = vfsconf2x(req, vfsp); 4555 if (error) 4556 break; 4557 } 4558 vfsconf_sunlock(); 4559 return (error); 4560 } 4561 4562 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4563 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4564 "S,xvfsconf", "List of all configured filesystems"); 4565 4566 #ifndef BURN_BRIDGES 4567 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4568 4569 static int 4570 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4571 { 4572 int *name = (int *)arg1 - 1; /* XXX */ 4573 u_int namelen = arg2 + 1; /* XXX */ 4574 struct vfsconf *vfsp; 4575 4576 log(LOG_WARNING, "userland calling deprecated sysctl, " 4577 "please rebuild world\n"); 4578 4579 #if 1 || defined(COMPAT_PRELITE2) 4580 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4581 if (namelen == 1) 4582 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4583 #endif 4584 4585 switch (name[1]) { 4586 case VFS_MAXTYPENUM: 4587 if (namelen != 2) 4588 return (ENOTDIR); 4589 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4590 case VFS_CONF: 4591 if (namelen != 3) 4592 return (ENOTDIR); /* overloaded */ 4593 vfsconf_slock(); 4594 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4595 if (vfsp->vfc_typenum == name[2]) 4596 break; 4597 } 4598 vfsconf_sunlock(); 4599 if (vfsp == NULL) 4600 return (EOPNOTSUPP); 4601 #ifdef COMPAT_FREEBSD32 4602 if (req->flags & SCTL_MASK32) 4603 return (vfsconf2x32(req, vfsp)); 4604 else 4605 #endif 4606 return (vfsconf2x(req, vfsp)); 4607 } 4608 return (EOPNOTSUPP); 4609 } 4610 4611 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4612 CTLFLAG_MPSAFE, vfs_sysctl, 4613 "Generic filesystem"); 4614 4615 #if 1 || defined(COMPAT_PRELITE2) 4616 4617 static int 4618 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4619 { 4620 int error; 4621 struct vfsconf *vfsp; 4622 struct ovfsconf ovfs; 4623 4624 vfsconf_slock(); 4625 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4626 bzero(&ovfs, sizeof(ovfs)); 4627 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4628 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4629 ovfs.vfc_index = vfsp->vfc_typenum; 4630 ovfs.vfc_refcount = vfsp->vfc_refcount; 4631 ovfs.vfc_flags = vfsp->vfc_flags; 4632 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4633 if (error != 0) { 4634 vfsconf_sunlock(); 4635 return (error); 4636 } 4637 } 4638 vfsconf_sunlock(); 4639 return (0); 4640 } 4641 4642 #endif /* 1 || COMPAT_PRELITE2 */ 4643 #endif /* !BURN_BRIDGES */ 4644 4645 #define KINFO_VNODESLOP 10 4646 #ifdef notyet 4647 /* 4648 * Dump vnode list (via sysctl). 4649 */ 4650 /* ARGSUSED */ 4651 static int 4652 sysctl_vnode(SYSCTL_HANDLER_ARGS) 4653 { 4654 struct xvnode *xvn; 4655 struct mount *mp; 4656 struct vnode *vp; 4657 int error, len, n; 4658 4659 /* 4660 * Stale numvnodes access is not fatal here. 4661 */ 4662 req->lock = 0; 4663 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 4664 if (!req->oldptr) 4665 /* Make an estimate */ 4666 return (SYSCTL_OUT(req, 0, len)); 4667 4668 error = sysctl_wire_old_buffer(req, 0); 4669 if (error != 0) 4670 return (error); 4671 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 4672 n = 0; 4673 mtx_lock(&mountlist_mtx); 4674 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4675 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 4676 continue; 4677 MNT_ILOCK(mp); 4678 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4679 if (n == len) 4680 break; 4681 vref(vp); 4682 xvn[n].xv_size = sizeof *xvn; 4683 xvn[n].xv_vnode = vp; 4684 xvn[n].xv_id = 0; /* XXX compat */ 4685 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 4686 XV_COPY(usecount); 4687 XV_COPY(writecount); 4688 XV_COPY(holdcnt); 4689 XV_COPY(mount); 4690 XV_COPY(numoutput); 4691 XV_COPY(type); 4692 #undef XV_COPY 4693 xvn[n].xv_flag = vp->v_vflag; 4694 4695 switch (vp->v_type) { 4696 case VREG: 4697 case VDIR: 4698 case VLNK: 4699 break; 4700 case VBLK: 4701 case VCHR: 4702 if (vp->v_rdev == NULL) { 4703 vrele(vp); 4704 continue; 4705 } 4706 xvn[n].xv_dev = dev2udev(vp->v_rdev); 4707 break; 4708 case VSOCK: 4709 xvn[n].xv_socket = vp->v_socket; 4710 break; 4711 case VFIFO: 4712 xvn[n].xv_fifo = vp->v_fifoinfo; 4713 break; 4714 case VNON: 4715 case VBAD: 4716 default: 4717 /* shouldn't happen? */ 4718 vrele(vp); 4719 continue; 4720 } 4721 vrele(vp); 4722 ++n; 4723 } 4724 MNT_IUNLOCK(mp); 4725 mtx_lock(&mountlist_mtx); 4726 vfs_unbusy(mp); 4727 if (n == len) 4728 break; 4729 } 4730 mtx_unlock(&mountlist_mtx); 4731 4732 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 4733 free(xvn, M_TEMP); 4734 return (error); 4735 } 4736 4737 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 4738 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 4739 ""); 4740 #endif 4741 4742 static void 4743 unmount_or_warn(struct mount *mp) 4744 { 4745 int error; 4746 4747 error = dounmount(mp, MNT_FORCE, curthread); 4748 if (error != 0) { 4749 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4750 if (error == EBUSY) 4751 printf("BUSY)\n"); 4752 else 4753 printf("%d)\n", error); 4754 } 4755 } 4756 4757 /* 4758 * Unmount all filesystems. The list is traversed in reverse order 4759 * of mounting to avoid dependencies. 4760 */ 4761 void 4762 vfs_unmountall(void) 4763 { 4764 struct mount *mp, *tmp; 4765 4766 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4767 4768 /* 4769 * Since this only runs when rebooting, it is not interlocked. 4770 */ 4771 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4772 vfs_ref(mp); 4773 4774 /* 4775 * Forcibly unmounting "/dev" before "/" would prevent clean 4776 * unmount of the latter. 4777 */ 4778 if (mp == rootdevmp) 4779 continue; 4780 4781 unmount_or_warn(mp); 4782 } 4783 4784 if (rootdevmp != NULL) 4785 unmount_or_warn(rootdevmp); 4786 } 4787 4788 static void 4789 vfs_deferred_inactive(struct vnode *vp, int lkflags) 4790 { 4791 4792 ASSERT_VI_LOCKED(vp, __func__); 4793 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, ("VI_DEFINACT still set")); 4794 if ((vp->v_iflag & VI_OWEINACT) == 0) { 4795 vdropl(vp); 4796 return; 4797 } 4798 if (vn_lock(vp, lkflags) == 0) { 4799 VI_LOCK(vp); 4800 vinactive(vp); 4801 VOP_UNLOCK(vp); 4802 vdropl(vp); 4803 return; 4804 } 4805 vdefer_inactive_unlocked(vp); 4806 } 4807 4808 static int 4809 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 4810 { 4811 4812 return (vp->v_iflag & VI_DEFINACT); 4813 } 4814 4815 static void __noinline 4816 vfs_periodic_inactive(struct mount *mp, int flags) 4817 { 4818 struct vnode *vp, *mvp; 4819 int lkflags; 4820 4821 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4822 if (flags != MNT_WAIT) 4823 lkflags |= LK_NOWAIT; 4824 4825 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 4826 if ((vp->v_iflag & VI_DEFINACT) == 0) { 4827 VI_UNLOCK(vp); 4828 continue; 4829 } 4830 vp->v_iflag &= ~VI_DEFINACT; 4831 vfs_deferred_inactive(vp, lkflags); 4832 } 4833 } 4834 4835 static inline bool 4836 vfs_want_msync(struct vnode *vp) 4837 { 4838 struct vm_object *obj; 4839 4840 /* 4841 * This test may be performed without any locks held. 4842 * We rely on vm_object's type stability. 4843 */ 4844 if (vp->v_vflag & VV_NOSYNC) 4845 return (false); 4846 obj = vp->v_object; 4847 return (obj != NULL && vm_object_mightbedirty(obj)); 4848 } 4849 4850 static int 4851 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 4852 { 4853 4854 if (vp->v_vflag & VV_NOSYNC) 4855 return (false); 4856 if (vp->v_iflag & VI_DEFINACT) 4857 return (true); 4858 return (vfs_want_msync(vp)); 4859 } 4860 4861 static void __noinline 4862 vfs_periodic_msync_inactive(struct mount *mp, int flags) 4863 { 4864 struct vnode *vp, *mvp; 4865 struct vm_object *obj; 4866 int lkflags, objflags; 4867 bool seen_defer; 4868 4869 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4870 if (flags != MNT_WAIT) { 4871 lkflags |= LK_NOWAIT; 4872 objflags = OBJPC_NOSYNC; 4873 } else { 4874 objflags = OBJPC_SYNC; 4875 } 4876 4877 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 4878 seen_defer = false; 4879 if (vp->v_iflag & VI_DEFINACT) { 4880 vp->v_iflag &= ~VI_DEFINACT; 4881 seen_defer = true; 4882 } 4883 if (!vfs_want_msync(vp)) { 4884 if (seen_defer) 4885 vfs_deferred_inactive(vp, lkflags); 4886 else 4887 VI_UNLOCK(vp); 4888 continue; 4889 } 4890 if (vget(vp, lkflags) == 0) { 4891 obj = vp->v_object; 4892 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 4893 VM_OBJECT_WLOCK(obj); 4894 vm_object_page_clean(obj, 0, 0, objflags); 4895 VM_OBJECT_WUNLOCK(obj); 4896 } 4897 vput(vp); 4898 if (seen_defer) 4899 vdrop(vp); 4900 } else { 4901 if (seen_defer) 4902 vdefer_inactive_unlocked(vp); 4903 } 4904 } 4905 } 4906 4907 void 4908 vfs_periodic(struct mount *mp, int flags) 4909 { 4910 4911 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4912 4913 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 4914 vfs_periodic_inactive(mp, flags); 4915 else 4916 vfs_periodic_msync_inactive(mp, flags); 4917 } 4918 4919 static void 4920 destroy_vpollinfo_free(struct vpollinfo *vi) 4921 { 4922 4923 knlist_destroy(&vi->vpi_selinfo.si_note); 4924 mtx_destroy(&vi->vpi_lock); 4925 free(vi, M_VNODEPOLL); 4926 } 4927 4928 static void 4929 destroy_vpollinfo(struct vpollinfo *vi) 4930 { 4931 4932 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4933 seldrain(&vi->vpi_selinfo); 4934 destroy_vpollinfo_free(vi); 4935 } 4936 4937 /* 4938 * Initialize per-vnode helper structure to hold poll-related state. 4939 */ 4940 void 4941 v_addpollinfo(struct vnode *vp) 4942 { 4943 struct vpollinfo *vi; 4944 4945 if (vp->v_pollinfo != NULL) 4946 return; 4947 vi = malloc(sizeof(*vi), M_VNODEPOLL, M_WAITOK | M_ZERO); 4948 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4949 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4950 vfs_knlunlock, vfs_knl_assert_lock); 4951 VI_LOCK(vp); 4952 if (vp->v_pollinfo != NULL) { 4953 VI_UNLOCK(vp); 4954 destroy_vpollinfo_free(vi); 4955 return; 4956 } 4957 vp->v_pollinfo = vi; 4958 VI_UNLOCK(vp); 4959 } 4960 4961 /* 4962 * Record a process's interest in events which might happen to 4963 * a vnode. Because poll uses the historic select-style interface 4964 * internally, this routine serves as both the ``check for any 4965 * pending events'' and the ``record my interest in future events'' 4966 * functions. (These are done together, while the lock is held, 4967 * to avoid race conditions.) 4968 */ 4969 int 4970 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4971 { 4972 4973 v_addpollinfo(vp); 4974 mtx_lock(&vp->v_pollinfo->vpi_lock); 4975 if (vp->v_pollinfo->vpi_revents & events) { 4976 /* 4977 * This leaves events we are not interested 4978 * in available for the other process which 4979 * which presumably had requested them 4980 * (otherwise they would never have been 4981 * recorded). 4982 */ 4983 events &= vp->v_pollinfo->vpi_revents; 4984 vp->v_pollinfo->vpi_revents &= ~events; 4985 4986 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4987 return (events); 4988 } 4989 vp->v_pollinfo->vpi_events |= events; 4990 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 4991 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4992 return (0); 4993 } 4994 4995 /* 4996 * Routine to create and manage a filesystem syncer vnode. 4997 */ 4998 #define sync_close ((int (*)(struct vop_close_args *))nullop) 4999 static int sync_fsync(struct vop_fsync_args *); 5000 static int sync_inactive(struct vop_inactive_args *); 5001 static int sync_reclaim(struct vop_reclaim_args *); 5002 5003 static struct vop_vector sync_vnodeops = { 5004 .vop_bypass = VOP_EOPNOTSUPP, 5005 .vop_close = sync_close, /* close */ 5006 .vop_fsync = sync_fsync, /* fsync */ 5007 .vop_inactive = sync_inactive, /* inactive */ 5008 .vop_need_inactive = vop_stdneed_inactive, /* need_inactive */ 5009 .vop_reclaim = sync_reclaim, /* reclaim */ 5010 .vop_lock1 = vop_stdlock, /* lock */ 5011 .vop_unlock = vop_stdunlock, /* unlock */ 5012 .vop_islocked = vop_stdislocked, /* islocked */ 5013 }; 5014 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 5015 5016 /* 5017 * Create a new filesystem syncer vnode for the specified mount point. 5018 */ 5019 void 5020 vfs_allocate_syncvnode(struct mount *mp) 5021 { 5022 struct vnode *vp; 5023 struct bufobj *bo; 5024 static long start, incr, next; 5025 int error; 5026 5027 /* Allocate a new vnode */ 5028 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 5029 if (error != 0) 5030 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 5031 vp->v_type = VNON; 5032 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5033 vp->v_vflag |= VV_FORCEINSMQ; 5034 error = insmntque(vp, mp); 5035 if (error != 0) 5036 panic("vfs_allocate_syncvnode: insmntque() failed"); 5037 vp->v_vflag &= ~VV_FORCEINSMQ; 5038 VOP_UNLOCK(vp); 5039 /* 5040 * Place the vnode onto the syncer worklist. We attempt to 5041 * scatter them about on the list so that they will go off 5042 * at evenly distributed times even if all the filesystems 5043 * are mounted at once. 5044 */ 5045 next += incr; 5046 if (next == 0 || next > syncer_maxdelay) { 5047 start /= 2; 5048 incr /= 2; 5049 if (start == 0) { 5050 start = syncer_maxdelay / 2; 5051 incr = syncer_maxdelay; 5052 } 5053 next = start; 5054 } 5055 bo = &vp->v_bufobj; 5056 BO_LOCK(bo); 5057 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 5058 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 5059 mtx_lock(&sync_mtx); 5060 sync_vnode_count++; 5061 if (mp->mnt_syncer == NULL) { 5062 mp->mnt_syncer = vp; 5063 vp = NULL; 5064 } 5065 mtx_unlock(&sync_mtx); 5066 BO_UNLOCK(bo); 5067 if (vp != NULL) { 5068 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5069 vgone(vp); 5070 vput(vp); 5071 } 5072 } 5073 5074 void 5075 vfs_deallocate_syncvnode(struct mount *mp) 5076 { 5077 struct vnode *vp; 5078 5079 mtx_lock(&sync_mtx); 5080 vp = mp->mnt_syncer; 5081 if (vp != NULL) 5082 mp->mnt_syncer = NULL; 5083 mtx_unlock(&sync_mtx); 5084 if (vp != NULL) 5085 vrele(vp); 5086 } 5087 5088 /* 5089 * Do a lazy sync of the filesystem. 5090 */ 5091 static int 5092 sync_fsync(struct vop_fsync_args *ap) 5093 { 5094 struct vnode *syncvp = ap->a_vp; 5095 struct mount *mp = syncvp->v_mount; 5096 int error, save; 5097 struct bufobj *bo; 5098 5099 /* 5100 * We only need to do something if this is a lazy evaluation. 5101 */ 5102 if (ap->a_waitfor != MNT_LAZY) 5103 return (0); 5104 5105 /* 5106 * Move ourselves to the back of the sync list. 5107 */ 5108 bo = &syncvp->v_bufobj; 5109 BO_LOCK(bo); 5110 vn_syncer_add_to_worklist(bo, syncdelay); 5111 BO_UNLOCK(bo); 5112 5113 /* 5114 * Walk the list of vnodes pushing all that are dirty and 5115 * not already on the sync list. 5116 */ 5117 if (vfs_busy(mp, MBF_NOWAIT) != 0) 5118 return (0); 5119 VOP_UNLOCK(syncvp); 5120 save = curthread_pflags_set(TDP_SYNCIO); 5121 /* 5122 * The filesystem at hand may be idle with free vnodes stored in the 5123 * batch. Return them instead of letting them stay there indefinitely. 5124 */ 5125 vfs_periodic(mp, MNT_NOWAIT); 5126 error = VFS_SYNC(mp, MNT_LAZY); 5127 curthread_pflags_restore(save); 5128 vn_lock(syncvp, LK_EXCLUSIVE | LK_RETRY); 5129 vfs_unbusy(mp); 5130 return (error); 5131 } 5132 5133 /* 5134 * The syncer vnode is no referenced. 5135 */ 5136 static int 5137 sync_inactive(struct vop_inactive_args *ap) 5138 { 5139 5140 vgone(ap->a_vp); 5141 return (0); 5142 } 5143 5144 /* 5145 * The syncer vnode is no longer needed and is being decommissioned. 5146 * 5147 * Modifications to the worklist must be protected by sync_mtx. 5148 */ 5149 static int 5150 sync_reclaim(struct vop_reclaim_args *ap) 5151 { 5152 struct vnode *vp = ap->a_vp; 5153 struct bufobj *bo; 5154 5155 bo = &vp->v_bufobj; 5156 BO_LOCK(bo); 5157 mtx_lock(&sync_mtx); 5158 if (vp->v_mount->mnt_syncer == vp) 5159 vp->v_mount->mnt_syncer = NULL; 5160 if (bo->bo_flag & BO_ONWORKLST) { 5161 LIST_REMOVE(bo, bo_synclist); 5162 syncer_worklist_len--; 5163 sync_vnode_count--; 5164 bo->bo_flag &= ~BO_ONWORKLST; 5165 } 5166 mtx_unlock(&sync_mtx); 5167 BO_UNLOCK(bo); 5168 5169 return (0); 5170 } 5171 5172 int 5173 vn_need_pageq_flush(struct vnode *vp) 5174 { 5175 struct vm_object *obj; 5176 5177 obj = vp->v_object; 5178 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 5179 vm_object_mightbedirty(obj)); 5180 } 5181 5182 /* 5183 * Check if vnode represents a disk device 5184 */ 5185 bool 5186 vn_isdisk_error(struct vnode *vp, int *errp) 5187 { 5188 int error; 5189 5190 if (vp->v_type != VCHR) { 5191 error = ENOTBLK; 5192 goto out; 5193 } 5194 error = 0; 5195 dev_lock(); 5196 if (vp->v_rdev == NULL) 5197 error = ENXIO; 5198 else if (vp->v_rdev->si_devsw == NULL) 5199 error = ENXIO; 5200 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5201 error = ENOTBLK; 5202 dev_unlock(); 5203 out: 5204 *errp = error; 5205 return (error == 0); 5206 } 5207 5208 bool 5209 vn_isdisk(struct vnode *vp) 5210 { 5211 int error; 5212 5213 return (vn_isdisk_error(vp, &error)); 5214 } 5215 5216 /* 5217 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5218 * the comment above cache_fplookup for details. 5219 */ 5220 int 5221 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5222 { 5223 int error; 5224 5225 VFS_SMR_ASSERT_ENTERED(); 5226 5227 /* Check the owner. */ 5228 if (cred->cr_uid == file_uid) { 5229 if (file_mode & S_IXUSR) 5230 return (0); 5231 goto out_error; 5232 } 5233 5234 /* Otherwise, check the groups (first match) */ 5235 if (groupmember(file_gid, cred)) { 5236 if (file_mode & S_IXGRP) 5237 return (0); 5238 goto out_error; 5239 } 5240 5241 /* Otherwise, check everyone else. */ 5242 if (file_mode & S_IXOTH) 5243 return (0); 5244 out_error: 5245 /* 5246 * Permission check failed, but it is possible denial will get overwritten 5247 * (e.g., when root is traversing through a 700 directory owned by someone 5248 * else). 5249 * 5250 * vaccess() calls priv_check_cred which in turn can descent into MAC 5251 * modules overriding this result. It's quite unclear what semantics 5252 * are allowed for them to operate, thus for safety we don't call them 5253 * from within the SMR section. This also means if any such modules 5254 * are present, we have to let the regular lookup decide. 5255 */ 5256 error = priv_check_cred_vfs_lookup_nomac(cred); 5257 switch (error) { 5258 case 0: 5259 return (0); 5260 case EAGAIN: 5261 /* 5262 * MAC modules present. 5263 */ 5264 return (EAGAIN); 5265 case EPERM: 5266 return (EACCES); 5267 default: 5268 return (error); 5269 } 5270 } 5271 5272 /* 5273 * Common filesystem object access control check routine. Accepts a 5274 * vnode's type, "mode", uid and gid, requested access mode, and credentials. 5275 * Returns 0 on success, or an errno on failure. 5276 */ 5277 int 5278 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5279 accmode_t accmode, struct ucred *cred) 5280 { 5281 accmode_t dac_granted; 5282 accmode_t priv_granted; 5283 5284 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5285 ("invalid bit in accmode")); 5286 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5287 ("VAPPEND without VWRITE")); 5288 5289 /* 5290 * Look for a normal, non-privileged way to access the file/directory 5291 * as requested. If it exists, go with that. 5292 */ 5293 5294 dac_granted = 0; 5295 5296 /* Check the owner. */ 5297 if (cred->cr_uid == file_uid) { 5298 dac_granted |= VADMIN; 5299 if (file_mode & S_IXUSR) 5300 dac_granted |= VEXEC; 5301 if (file_mode & S_IRUSR) 5302 dac_granted |= VREAD; 5303 if (file_mode & S_IWUSR) 5304 dac_granted |= (VWRITE | VAPPEND); 5305 5306 if ((accmode & dac_granted) == accmode) 5307 return (0); 5308 5309 goto privcheck; 5310 } 5311 5312 /* Otherwise, check the groups (first match) */ 5313 if (groupmember(file_gid, cred)) { 5314 if (file_mode & S_IXGRP) 5315 dac_granted |= VEXEC; 5316 if (file_mode & S_IRGRP) 5317 dac_granted |= VREAD; 5318 if (file_mode & S_IWGRP) 5319 dac_granted |= (VWRITE | VAPPEND); 5320 5321 if ((accmode & dac_granted) == accmode) 5322 return (0); 5323 5324 goto privcheck; 5325 } 5326 5327 /* Otherwise, check everyone else. */ 5328 if (file_mode & S_IXOTH) 5329 dac_granted |= VEXEC; 5330 if (file_mode & S_IROTH) 5331 dac_granted |= VREAD; 5332 if (file_mode & S_IWOTH) 5333 dac_granted |= (VWRITE | VAPPEND); 5334 if ((accmode & dac_granted) == accmode) 5335 return (0); 5336 5337 privcheck: 5338 /* 5339 * Build a privilege mask to determine if the set of privileges 5340 * satisfies the requirements when combined with the granted mask 5341 * from above. For each privilege, if the privilege is required, 5342 * bitwise or the request type onto the priv_granted mask. 5343 */ 5344 priv_granted = 0; 5345 5346 if (type == VDIR) { 5347 /* 5348 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5349 * requests, instead of PRIV_VFS_EXEC. 5350 */ 5351 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5352 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5353 priv_granted |= VEXEC; 5354 } else { 5355 /* 5356 * Ensure that at least one execute bit is on. Otherwise, 5357 * a privileged user will always succeed, and we don't want 5358 * this to happen unless the file really is executable. 5359 */ 5360 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5361 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5362 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5363 priv_granted |= VEXEC; 5364 } 5365 5366 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5367 !priv_check_cred(cred, PRIV_VFS_READ)) 5368 priv_granted |= VREAD; 5369 5370 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5371 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5372 priv_granted |= (VWRITE | VAPPEND); 5373 5374 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5375 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5376 priv_granted |= VADMIN; 5377 5378 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5379 return (0); 5380 } 5381 5382 return ((accmode & VADMIN) ? EPERM : EACCES); 5383 } 5384 5385 /* 5386 * Credential check based on process requesting service, and per-attribute 5387 * permissions. 5388 */ 5389 int 5390 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5391 struct thread *td, accmode_t accmode) 5392 { 5393 5394 /* 5395 * Kernel-invoked always succeeds. 5396 */ 5397 if (cred == NOCRED) 5398 return (0); 5399 5400 /* 5401 * Do not allow privileged processes in jail to directly manipulate 5402 * system attributes. 5403 */ 5404 switch (attrnamespace) { 5405 case EXTATTR_NAMESPACE_SYSTEM: 5406 /* Potentially should be: return (EPERM); */ 5407 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5408 case EXTATTR_NAMESPACE_USER: 5409 return (VOP_ACCESS(vp, accmode, cred, td)); 5410 default: 5411 return (EPERM); 5412 } 5413 } 5414 5415 #ifdef DEBUG_VFS_LOCKS 5416 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5417 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5418 "Drop into debugger on lock violation"); 5419 5420 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5421 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5422 0, "Check for interlock across VOPs"); 5423 5424 int vfs_badlock_print = 1; /* Print lock violations. */ 5425 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5426 0, "Print lock violations"); 5427 5428 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5429 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5430 0, "Print vnode details on lock violations"); 5431 5432 #ifdef KDB 5433 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5434 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5435 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5436 #endif 5437 5438 static void 5439 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5440 { 5441 5442 #ifdef KDB 5443 if (vfs_badlock_backtrace) 5444 kdb_backtrace(); 5445 #endif 5446 if (vfs_badlock_vnode) 5447 vn_printf(vp, "vnode "); 5448 if (vfs_badlock_print) 5449 printf("%s: %p %s\n", str, (void *)vp, msg); 5450 if (vfs_badlock_ddb) 5451 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5452 } 5453 5454 void 5455 assert_vi_locked(struct vnode *vp, const char *str) 5456 { 5457 5458 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5459 vfs_badlock("interlock is not locked but should be", str, vp); 5460 } 5461 5462 void 5463 assert_vi_unlocked(struct vnode *vp, const char *str) 5464 { 5465 5466 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5467 vfs_badlock("interlock is locked but should not be", str, vp); 5468 } 5469 5470 void 5471 assert_vop_locked(struct vnode *vp, const char *str) 5472 { 5473 int locked; 5474 5475 if (KERNEL_PANICKED() || vp == NULL) 5476 return; 5477 5478 locked = VOP_ISLOCKED(vp); 5479 if (locked == 0 || locked == LK_EXCLOTHER) 5480 vfs_badlock("is not locked but should be", str, vp); 5481 } 5482 5483 void 5484 assert_vop_unlocked(struct vnode *vp, const char *str) 5485 { 5486 if (KERNEL_PANICKED() || vp == NULL) 5487 return; 5488 5489 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5490 vfs_badlock("is locked but should not be", str, vp); 5491 } 5492 5493 void 5494 assert_vop_elocked(struct vnode *vp, const char *str) 5495 { 5496 if (KERNEL_PANICKED() || vp == NULL) 5497 return; 5498 5499 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5500 vfs_badlock("is not exclusive locked but should be", str, vp); 5501 } 5502 #endif /* DEBUG_VFS_LOCKS */ 5503 5504 void 5505 vop_rename_fail(struct vop_rename_args *ap) 5506 { 5507 5508 if (ap->a_tvp != NULL) 5509 vput(ap->a_tvp); 5510 if (ap->a_tdvp == ap->a_tvp) 5511 vrele(ap->a_tdvp); 5512 else 5513 vput(ap->a_tdvp); 5514 vrele(ap->a_fdvp); 5515 vrele(ap->a_fvp); 5516 } 5517 5518 void 5519 vop_rename_pre(void *ap) 5520 { 5521 struct vop_rename_args *a = ap; 5522 5523 #ifdef DEBUG_VFS_LOCKS 5524 if (a->a_tvp) 5525 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5526 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5527 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5528 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5529 5530 /* Check the source (from). */ 5531 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5532 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5533 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5534 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5535 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5536 5537 /* Check the target. */ 5538 if (a->a_tvp) 5539 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5540 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5541 #endif 5542 /* 5543 * It may be tempting to add vn_seqc_write_begin/end calls here and 5544 * in vop_rename_post but that's not going to work out since some 5545 * filesystems relookup vnodes mid-rename. This is probably a bug. 5546 * 5547 * For now filesystems are expected to do the relevant calls after they 5548 * decide what vnodes to operate on. 5549 */ 5550 if (a->a_tdvp != a->a_fdvp) 5551 vhold(a->a_fdvp); 5552 if (a->a_tvp != a->a_fvp) 5553 vhold(a->a_fvp); 5554 vhold(a->a_tdvp); 5555 if (a->a_tvp) 5556 vhold(a->a_tvp); 5557 } 5558 5559 #ifdef DEBUG_VFS_LOCKS 5560 void 5561 vop_fplookup_vexec_debugpre(void *ap __unused) 5562 { 5563 5564 VFS_SMR_ASSERT_ENTERED(); 5565 } 5566 5567 void 5568 vop_fplookup_vexec_debugpost(void *ap __unused, int rc __unused) 5569 { 5570 5571 VFS_SMR_ASSERT_ENTERED(); 5572 } 5573 5574 void 5575 vop_fplookup_symlink_debugpre(void *ap __unused) 5576 { 5577 5578 VFS_SMR_ASSERT_ENTERED(); 5579 } 5580 5581 void 5582 vop_fplookup_symlink_debugpost(void *ap __unused, int rc __unused) 5583 { 5584 5585 VFS_SMR_ASSERT_ENTERED(); 5586 } 5587 5588 static void 5589 vop_fsync_debugprepost(struct vnode *vp, const char *name) 5590 { 5591 if (vp->v_type == VCHR) 5592 ; 5593 else if (MNT_EXTENDED_SHARED(vp->v_mount)) 5594 ASSERT_VOP_LOCKED(vp, name); 5595 else 5596 ASSERT_VOP_ELOCKED(vp, name); 5597 } 5598 5599 void 5600 vop_fsync_debugpre(void *a) 5601 { 5602 struct vop_fsync_args *ap; 5603 5604 ap = a; 5605 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5606 } 5607 5608 void 5609 vop_fsync_debugpost(void *a, int rc __unused) 5610 { 5611 struct vop_fsync_args *ap; 5612 5613 ap = a; 5614 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5615 } 5616 5617 void 5618 vop_fdatasync_debugpre(void *a) 5619 { 5620 struct vop_fdatasync_args *ap; 5621 5622 ap = a; 5623 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5624 } 5625 5626 void 5627 vop_fdatasync_debugpost(void *a, int rc __unused) 5628 { 5629 struct vop_fdatasync_args *ap; 5630 5631 ap = a; 5632 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5633 } 5634 5635 void 5636 vop_strategy_debugpre(void *ap) 5637 { 5638 struct vop_strategy_args *a; 5639 struct buf *bp; 5640 5641 a = ap; 5642 bp = a->a_bp; 5643 5644 /* 5645 * Cluster ops lock their component buffers but not the IO container. 5646 */ 5647 if ((bp->b_flags & B_CLUSTER) != 0) 5648 return; 5649 5650 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5651 if (vfs_badlock_print) 5652 printf( 5653 "VOP_STRATEGY: bp is not locked but should be\n"); 5654 if (vfs_badlock_ddb) 5655 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5656 } 5657 } 5658 5659 void 5660 vop_lock_debugpre(void *ap) 5661 { 5662 struct vop_lock1_args *a = ap; 5663 5664 if ((a->a_flags & LK_INTERLOCK) == 0) 5665 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5666 else 5667 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5668 } 5669 5670 void 5671 vop_lock_debugpost(void *ap, int rc) 5672 { 5673 struct vop_lock1_args *a = ap; 5674 5675 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5676 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5677 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5678 } 5679 5680 void 5681 vop_unlock_debugpre(void *ap) 5682 { 5683 struct vop_unlock_args *a = ap; 5684 5685 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 5686 } 5687 5688 void 5689 vop_need_inactive_debugpre(void *ap) 5690 { 5691 struct vop_need_inactive_args *a = ap; 5692 5693 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5694 } 5695 5696 void 5697 vop_need_inactive_debugpost(void *ap, int rc) 5698 { 5699 struct vop_need_inactive_args *a = ap; 5700 5701 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5702 } 5703 #endif 5704 5705 void 5706 vop_create_pre(void *ap) 5707 { 5708 struct vop_create_args *a; 5709 struct vnode *dvp; 5710 5711 a = ap; 5712 dvp = a->a_dvp; 5713 vn_seqc_write_begin(dvp); 5714 } 5715 5716 void 5717 vop_create_post(void *ap, int rc) 5718 { 5719 struct vop_create_args *a; 5720 struct vnode *dvp; 5721 5722 a = ap; 5723 dvp = a->a_dvp; 5724 vn_seqc_write_end(dvp); 5725 if (!rc) 5726 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5727 } 5728 5729 void 5730 vop_whiteout_pre(void *ap) 5731 { 5732 struct vop_whiteout_args *a; 5733 struct vnode *dvp; 5734 5735 a = ap; 5736 dvp = a->a_dvp; 5737 vn_seqc_write_begin(dvp); 5738 } 5739 5740 void 5741 vop_whiteout_post(void *ap, int rc) 5742 { 5743 struct vop_whiteout_args *a; 5744 struct vnode *dvp; 5745 5746 a = ap; 5747 dvp = a->a_dvp; 5748 vn_seqc_write_end(dvp); 5749 } 5750 5751 void 5752 vop_deleteextattr_pre(void *ap) 5753 { 5754 struct vop_deleteextattr_args *a; 5755 struct vnode *vp; 5756 5757 a = ap; 5758 vp = a->a_vp; 5759 vn_seqc_write_begin(vp); 5760 } 5761 5762 void 5763 vop_deleteextattr_post(void *ap, int rc) 5764 { 5765 struct vop_deleteextattr_args *a; 5766 struct vnode *vp; 5767 5768 a = ap; 5769 vp = a->a_vp; 5770 vn_seqc_write_end(vp); 5771 if (!rc) 5772 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5773 } 5774 5775 void 5776 vop_link_pre(void *ap) 5777 { 5778 struct vop_link_args *a; 5779 struct vnode *vp, *tdvp; 5780 5781 a = ap; 5782 vp = a->a_vp; 5783 tdvp = a->a_tdvp; 5784 vn_seqc_write_begin(vp); 5785 vn_seqc_write_begin(tdvp); 5786 } 5787 5788 void 5789 vop_link_post(void *ap, int rc) 5790 { 5791 struct vop_link_args *a; 5792 struct vnode *vp, *tdvp; 5793 5794 a = ap; 5795 vp = a->a_vp; 5796 tdvp = a->a_tdvp; 5797 vn_seqc_write_end(vp); 5798 vn_seqc_write_end(tdvp); 5799 if (!rc) { 5800 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 5801 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 5802 } 5803 } 5804 5805 void 5806 vop_mkdir_pre(void *ap) 5807 { 5808 struct vop_mkdir_args *a; 5809 struct vnode *dvp; 5810 5811 a = ap; 5812 dvp = a->a_dvp; 5813 vn_seqc_write_begin(dvp); 5814 } 5815 5816 void 5817 vop_mkdir_post(void *ap, int rc) 5818 { 5819 struct vop_mkdir_args *a; 5820 struct vnode *dvp; 5821 5822 a = ap; 5823 dvp = a->a_dvp; 5824 vn_seqc_write_end(dvp); 5825 if (!rc) 5826 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5827 } 5828 5829 #ifdef DEBUG_VFS_LOCKS 5830 void 5831 vop_mkdir_debugpost(void *ap, int rc) 5832 { 5833 struct vop_mkdir_args *a; 5834 5835 a = ap; 5836 if (!rc) 5837 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); 5838 } 5839 #endif 5840 5841 void 5842 vop_mknod_pre(void *ap) 5843 { 5844 struct vop_mknod_args *a; 5845 struct vnode *dvp; 5846 5847 a = ap; 5848 dvp = a->a_dvp; 5849 vn_seqc_write_begin(dvp); 5850 } 5851 5852 void 5853 vop_mknod_post(void *ap, int rc) 5854 { 5855 struct vop_mknod_args *a; 5856 struct vnode *dvp; 5857 5858 a = ap; 5859 dvp = a->a_dvp; 5860 vn_seqc_write_end(dvp); 5861 if (!rc) 5862 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5863 } 5864 5865 void 5866 vop_reclaim_post(void *ap, int rc) 5867 { 5868 struct vop_reclaim_args *a; 5869 struct vnode *vp; 5870 5871 a = ap; 5872 vp = a->a_vp; 5873 ASSERT_VOP_IN_SEQC(vp); 5874 if (!rc) 5875 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 5876 } 5877 5878 void 5879 vop_remove_pre(void *ap) 5880 { 5881 struct vop_remove_args *a; 5882 struct vnode *dvp, *vp; 5883 5884 a = ap; 5885 dvp = a->a_dvp; 5886 vp = a->a_vp; 5887 vn_seqc_write_begin(dvp); 5888 vn_seqc_write_begin(vp); 5889 } 5890 5891 void 5892 vop_remove_post(void *ap, int rc) 5893 { 5894 struct vop_remove_args *a; 5895 struct vnode *dvp, *vp; 5896 5897 a = ap; 5898 dvp = a->a_dvp; 5899 vp = a->a_vp; 5900 vn_seqc_write_end(dvp); 5901 vn_seqc_write_end(vp); 5902 if (!rc) { 5903 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5904 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5905 } 5906 } 5907 5908 void 5909 vop_rename_post(void *ap, int rc) 5910 { 5911 struct vop_rename_args *a = ap; 5912 long hint; 5913 5914 if (!rc) { 5915 hint = NOTE_WRITE; 5916 if (a->a_fdvp == a->a_tdvp) { 5917 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 5918 hint |= NOTE_LINK; 5919 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5920 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5921 } else { 5922 hint |= NOTE_EXTEND; 5923 if (a->a_fvp->v_type == VDIR) 5924 hint |= NOTE_LINK; 5925 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5926 5927 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 5928 a->a_tvp->v_type == VDIR) 5929 hint &= ~NOTE_LINK; 5930 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5931 } 5932 5933 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 5934 if (a->a_tvp) 5935 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 5936 } 5937 if (a->a_tdvp != a->a_fdvp) 5938 vdrop(a->a_fdvp); 5939 if (a->a_tvp != a->a_fvp) 5940 vdrop(a->a_fvp); 5941 vdrop(a->a_tdvp); 5942 if (a->a_tvp) 5943 vdrop(a->a_tvp); 5944 } 5945 5946 void 5947 vop_rmdir_pre(void *ap) 5948 { 5949 struct vop_rmdir_args *a; 5950 struct vnode *dvp, *vp; 5951 5952 a = ap; 5953 dvp = a->a_dvp; 5954 vp = a->a_vp; 5955 vn_seqc_write_begin(dvp); 5956 vn_seqc_write_begin(vp); 5957 } 5958 5959 void 5960 vop_rmdir_post(void *ap, int rc) 5961 { 5962 struct vop_rmdir_args *a; 5963 struct vnode *dvp, *vp; 5964 5965 a = ap; 5966 dvp = a->a_dvp; 5967 vp = a->a_vp; 5968 vn_seqc_write_end(dvp); 5969 vn_seqc_write_end(vp); 5970 if (!rc) { 5971 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5972 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5973 } 5974 } 5975 5976 void 5977 vop_setattr_pre(void *ap) 5978 { 5979 struct vop_setattr_args *a; 5980 struct vnode *vp; 5981 5982 a = ap; 5983 vp = a->a_vp; 5984 vn_seqc_write_begin(vp); 5985 } 5986 5987 void 5988 vop_setattr_post(void *ap, int rc) 5989 { 5990 struct vop_setattr_args *a; 5991 struct vnode *vp; 5992 5993 a = ap; 5994 vp = a->a_vp; 5995 vn_seqc_write_end(vp); 5996 if (!rc) 5997 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 5998 } 5999 6000 void 6001 vop_setacl_pre(void *ap) 6002 { 6003 struct vop_setacl_args *a; 6004 struct vnode *vp; 6005 6006 a = ap; 6007 vp = a->a_vp; 6008 vn_seqc_write_begin(vp); 6009 } 6010 6011 void 6012 vop_setacl_post(void *ap, int rc __unused) 6013 { 6014 struct vop_setacl_args *a; 6015 struct vnode *vp; 6016 6017 a = ap; 6018 vp = a->a_vp; 6019 vn_seqc_write_end(vp); 6020 } 6021 6022 void 6023 vop_setextattr_pre(void *ap) 6024 { 6025 struct vop_setextattr_args *a; 6026 struct vnode *vp; 6027 6028 a = ap; 6029 vp = a->a_vp; 6030 vn_seqc_write_begin(vp); 6031 } 6032 6033 void 6034 vop_setextattr_post(void *ap, int rc) 6035 { 6036 struct vop_setextattr_args *a; 6037 struct vnode *vp; 6038 6039 a = ap; 6040 vp = a->a_vp; 6041 vn_seqc_write_end(vp); 6042 if (!rc) 6043 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6044 } 6045 6046 void 6047 vop_symlink_pre(void *ap) 6048 { 6049 struct vop_symlink_args *a; 6050 struct vnode *dvp; 6051 6052 a = ap; 6053 dvp = a->a_dvp; 6054 vn_seqc_write_begin(dvp); 6055 } 6056 6057 void 6058 vop_symlink_post(void *ap, int rc) 6059 { 6060 struct vop_symlink_args *a; 6061 struct vnode *dvp; 6062 6063 a = ap; 6064 dvp = a->a_dvp; 6065 vn_seqc_write_end(dvp); 6066 if (!rc) 6067 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6068 } 6069 6070 void 6071 vop_open_post(void *ap, int rc) 6072 { 6073 struct vop_open_args *a = ap; 6074 6075 if (!rc) 6076 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 6077 } 6078 6079 void 6080 vop_close_post(void *ap, int rc) 6081 { 6082 struct vop_close_args *a = ap; 6083 6084 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 6085 !VN_IS_DOOMED(a->a_vp))) { 6086 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 6087 NOTE_CLOSE_WRITE : NOTE_CLOSE); 6088 } 6089 } 6090 6091 void 6092 vop_read_post(void *ap, int rc) 6093 { 6094 struct vop_read_args *a = ap; 6095 6096 if (!rc) 6097 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6098 } 6099 6100 void 6101 vop_read_pgcache_post(void *ap, int rc) 6102 { 6103 struct vop_read_pgcache_args *a = ap; 6104 6105 if (!rc) 6106 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); 6107 } 6108 6109 void 6110 vop_readdir_post(void *ap, int rc) 6111 { 6112 struct vop_readdir_args *a = ap; 6113 6114 if (!rc) 6115 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6116 } 6117 6118 static struct knlist fs_knlist; 6119 6120 static void 6121 vfs_event_init(void *arg) 6122 { 6123 knlist_init_mtx(&fs_knlist, NULL); 6124 } 6125 /* XXX - correct order? */ 6126 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 6127 6128 void 6129 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 6130 { 6131 6132 KNOTE_UNLOCKED(&fs_knlist, event); 6133 } 6134 6135 static int filt_fsattach(struct knote *kn); 6136 static void filt_fsdetach(struct knote *kn); 6137 static int filt_fsevent(struct knote *kn, long hint); 6138 6139 struct filterops fs_filtops = { 6140 .f_isfd = 0, 6141 .f_attach = filt_fsattach, 6142 .f_detach = filt_fsdetach, 6143 .f_event = filt_fsevent 6144 }; 6145 6146 static int 6147 filt_fsattach(struct knote *kn) 6148 { 6149 6150 kn->kn_flags |= EV_CLEAR; 6151 knlist_add(&fs_knlist, kn, 0); 6152 return (0); 6153 } 6154 6155 static void 6156 filt_fsdetach(struct knote *kn) 6157 { 6158 6159 knlist_remove(&fs_knlist, kn, 0); 6160 } 6161 6162 static int 6163 filt_fsevent(struct knote *kn, long hint) 6164 { 6165 6166 kn->kn_fflags |= kn->kn_sfflags & hint; 6167 6168 return (kn->kn_fflags != 0); 6169 } 6170 6171 static int 6172 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 6173 { 6174 struct vfsidctl vc; 6175 int error; 6176 struct mount *mp; 6177 6178 error = SYSCTL_IN(req, &vc, sizeof(vc)); 6179 if (error) 6180 return (error); 6181 if (vc.vc_vers != VFS_CTL_VERS1) 6182 return (EINVAL); 6183 mp = vfs_getvfs(&vc.vc_fsid); 6184 if (mp == NULL) 6185 return (ENOENT); 6186 /* ensure that a specific sysctl goes to the right filesystem. */ 6187 if (strcmp(vc.vc_fstypename, "*") != 0 && 6188 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 6189 vfs_rel(mp); 6190 return (EINVAL); 6191 } 6192 VCTLTOREQ(&vc, req); 6193 error = VFS_SYSCTL(mp, vc.vc_op, req); 6194 vfs_rel(mp); 6195 return (error); 6196 } 6197 6198 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 6199 NULL, 0, sysctl_vfs_ctl, "", 6200 "Sysctl by fsid"); 6201 6202 /* 6203 * Function to initialize a va_filerev field sensibly. 6204 * XXX: Wouldn't a random number make a lot more sense ?? 6205 */ 6206 u_quad_t 6207 init_va_filerev(void) 6208 { 6209 struct bintime bt; 6210 6211 getbinuptime(&bt); 6212 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 6213 } 6214 6215 static int filt_vfsread(struct knote *kn, long hint); 6216 static int filt_vfswrite(struct knote *kn, long hint); 6217 static int filt_vfsvnode(struct knote *kn, long hint); 6218 static void filt_vfsdetach(struct knote *kn); 6219 static struct filterops vfsread_filtops = { 6220 .f_isfd = 1, 6221 .f_detach = filt_vfsdetach, 6222 .f_event = filt_vfsread 6223 }; 6224 static struct filterops vfswrite_filtops = { 6225 .f_isfd = 1, 6226 .f_detach = filt_vfsdetach, 6227 .f_event = filt_vfswrite 6228 }; 6229 static struct filterops vfsvnode_filtops = { 6230 .f_isfd = 1, 6231 .f_detach = filt_vfsdetach, 6232 .f_event = filt_vfsvnode 6233 }; 6234 6235 static void 6236 vfs_knllock(void *arg) 6237 { 6238 struct vnode *vp = arg; 6239 6240 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 6241 } 6242 6243 static void 6244 vfs_knlunlock(void *arg) 6245 { 6246 struct vnode *vp = arg; 6247 6248 VOP_UNLOCK(vp); 6249 } 6250 6251 static void 6252 vfs_knl_assert_lock(void *arg, int what) 6253 { 6254 #ifdef DEBUG_VFS_LOCKS 6255 struct vnode *vp = arg; 6256 6257 if (what == LA_LOCKED) 6258 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6259 else 6260 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6261 #endif 6262 } 6263 6264 int 6265 vfs_kqfilter(struct vop_kqfilter_args *ap) 6266 { 6267 struct vnode *vp = ap->a_vp; 6268 struct knote *kn = ap->a_kn; 6269 struct knlist *knl; 6270 6271 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ && 6272 kn->kn_filter != EVFILT_WRITE), 6273 ("READ/WRITE filter on a FIFO leaked through")); 6274 switch (kn->kn_filter) { 6275 case EVFILT_READ: 6276 kn->kn_fop = &vfsread_filtops; 6277 break; 6278 case EVFILT_WRITE: 6279 kn->kn_fop = &vfswrite_filtops; 6280 break; 6281 case EVFILT_VNODE: 6282 kn->kn_fop = &vfsvnode_filtops; 6283 break; 6284 default: 6285 return (EINVAL); 6286 } 6287 6288 kn->kn_hook = (caddr_t)vp; 6289 6290 v_addpollinfo(vp); 6291 if (vp->v_pollinfo == NULL) 6292 return (ENOMEM); 6293 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6294 vhold(vp); 6295 knlist_add(knl, kn, 0); 6296 6297 return (0); 6298 } 6299 6300 /* 6301 * Detach knote from vnode 6302 */ 6303 static void 6304 filt_vfsdetach(struct knote *kn) 6305 { 6306 struct vnode *vp = (struct vnode *)kn->kn_hook; 6307 6308 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6309 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6310 vdrop(vp); 6311 } 6312 6313 /*ARGSUSED*/ 6314 static int 6315 filt_vfsread(struct knote *kn, long hint) 6316 { 6317 struct vnode *vp = (struct vnode *)kn->kn_hook; 6318 struct vattr va; 6319 int res; 6320 6321 /* 6322 * filesystem is gone, so set the EOF flag and schedule 6323 * the knote for deletion. 6324 */ 6325 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6326 VI_LOCK(vp); 6327 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6328 VI_UNLOCK(vp); 6329 return (1); 6330 } 6331 6332 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 6333 return (0); 6334 6335 VI_LOCK(vp); 6336 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 6337 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6338 VI_UNLOCK(vp); 6339 return (res); 6340 } 6341 6342 /*ARGSUSED*/ 6343 static int 6344 filt_vfswrite(struct knote *kn, long hint) 6345 { 6346 struct vnode *vp = (struct vnode *)kn->kn_hook; 6347 6348 VI_LOCK(vp); 6349 6350 /* 6351 * filesystem is gone, so set the EOF flag and schedule 6352 * the knote for deletion. 6353 */ 6354 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6355 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6356 6357 kn->kn_data = 0; 6358 VI_UNLOCK(vp); 6359 return (1); 6360 } 6361 6362 static int 6363 filt_vfsvnode(struct knote *kn, long hint) 6364 { 6365 struct vnode *vp = (struct vnode *)kn->kn_hook; 6366 int res; 6367 6368 VI_LOCK(vp); 6369 if (kn->kn_sfflags & hint) 6370 kn->kn_fflags |= hint; 6371 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6372 kn->kn_flags |= EV_EOF; 6373 VI_UNLOCK(vp); 6374 return (1); 6375 } 6376 res = (kn->kn_fflags != 0); 6377 VI_UNLOCK(vp); 6378 return (res); 6379 } 6380 6381 /* 6382 * Returns whether the directory is empty or not. 6383 * If it is empty, the return value is 0; otherwise 6384 * the return value is an error value (which may 6385 * be ENOTEMPTY). 6386 */ 6387 int 6388 vfs_emptydir(struct vnode *vp) 6389 { 6390 struct uio uio; 6391 struct iovec iov; 6392 struct dirent *dirent, *dp, *endp; 6393 int error, eof; 6394 6395 error = 0; 6396 eof = 0; 6397 6398 ASSERT_VOP_LOCKED(vp, "vfs_emptydir"); 6399 VNASSERT(vp->v_type == VDIR, vp, ("vp is not a directory")); 6400 6401 dirent = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK); 6402 iov.iov_base = dirent; 6403 iov.iov_len = sizeof(struct dirent); 6404 6405 uio.uio_iov = &iov; 6406 uio.uio_iovcnt = 1; 6407 uio.uio_offset = 0; 6408 uio.uio_resid = sizeof(struct dirent); 6409 uio.uio_segflg = UIO_SYSSPACE; 6410 uio.uio_rw = UIO_READ; 6411 uio.uio_td = curthread; 6412 6413 while (eof == 0 && error == 0) { 6414 error = VOP_READDIR(vp, &uio, curthread->td_ucred, &eof, 6415 NULL, NULL); 6416 if (error != 0) 6417 break; 6418 endp = (void *)((uint8_t *)dirent + 6419 sizeof(struct dirent) - uio.uio_resid); 6420 for (dp = dirent; dp < endp; 6421 dp = (void *)((uint8_t *)dp + GENERIC_DIRSIZ(dp))) { 6422 if (dp->d_type == DT_WHT) 6423 continue; 6424 if (dp->d_namlen == 0) 6425 continue; 6426 if (dp->d_type != DT_DIR && 6427 dp->d_type != DT_UNKNOWN) { 6428 error = ENOTEMPTY; 6429 break; 6430 } 6431 if (dp->d_namlen > 2) { 6432 error = ENOTEMPTY; 6433 break; 6434 } 6435 if (dp->d_namlen == 1 && 6436 dp->d_name[0] != '.') { 6437 error = ENOTEMPTY; 6438 break; 6439 } 6440 if (dp->d_namlen == 2 && 6441 dp->d_name[1] != '.') { 6442 error = ENOTEMPTY; 6443 break; 6444 } 6445 uio.uio_resid = sizeof(struct dirent); 6446 } 6447 } 6448 free(dirent, M_TEMP); 6449 return (error); 6450 } 6451 6452 int 6453 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6454 { 6455 int error; 6456 6457 if (dp->d_reclen > ap->a_uio->uio_resid) 6458 return (ENAMETOOLONG); 6459 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6460 if (error) { 6461 if (ap->a_ncookies != NULL) { 6462 if (ap->a_cookies != NULL) 6463 free(ap->a_cookies, M_TEMP); 6464 ap->a_cookies = NULL; 6465 *ap->a_ncookies = 0; 6466 } 6467 return (error); 6468 } 6469 if (ap->a_ncookies == NULL) 6470 return (0); 6471 6472 KASSERT(ap->a_cookies, 6473 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6474 6475 *ap->a_cookies = realloc(*ap->a_cookies, 6476 (*ap->a_ncookies + 1) * sizeof(uint64_t), M_TEMP, M_WAITOK | M_ZERO); 6477 (*ap->a_cookies)[*ap->a_ncookies] = off; 6478 *ap->a_ncookies += 1; 6479 return (0); 6480 } 6481 6482 /* 6483 * The purpose of this routine is to remove granularity from accmode_t, 6484 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6485 * VADMIN and VAPPEND. 6486 * 6487 * If it returns 0, the caller is supposed to continue with the usual 6488 * access checks using 'accmode' as modified by this routine. If it 6489 * returns nonzero value, the caller is supposed to return that value 6490 * as errno. 6491 * 6492 * Note that after this routine runs, accmode may be zero. 6493 */ 6494 int 6495 vfs_unixify_accmode(accmode_t *accmode) 6496 { 6497 /* 6498 * There is no way to specify explicit "deny" rule using 6499 * file mode or POSIX.1e ACLs. 6500 */ 6501 if (*accmode & VEXPLICIT_DENY) { 6502 *accmode = 0; 6503 return (0); 6504 } 6505 6506 /* 6507 * None of these can be translated into usual access bits. 6508 * Also, the common case for NFSv4 ACLs is to not contain 6509 * either of these bits. Caller should check for VWRITE 6510 * on the containing directory instead. 6511 */ 6512 if (*accmode & (VDELETE_CHILD | VDELETE)) 6513 return (EPERM); 6514 6515 if (*accmode & VADMIN_PERMS) { 6516 *accmode &= ~VADMIN_PERMS; 6517 *accmode |= VADMIN; 6518 } 6519 6520 /* 6521 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6522 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6523 */ 6524 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6525 6526 return (0); 6527 } 6528 6529 /* 6530 * Clear out a doomed vnode (if any) and replace it with a new one as long 6531 * as the fs is not being unmounted. Return the root vnode to the caller. 6532 */ 6533 static int __noinline 6534 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6535 { 6536 struct vnode *vp; 6537 int error; 6538 6539 restart: 6540 if (mp->mnt_rootvnode != NULL) { 6541 MNT_ILOCK(mp); 6542 vp = mp->mnt_rootvnode; 6543 if (vp != NULL) { 6544 if (!VN_IS_DOOMED(vp)) { 6545 vrefact(vp); 6546 MNT_IUNLOCK(mp); 6547 error = vn_lock(vp, flags); 6548 if (error == 0) { 6549 *vpp = vp; 6550 return (0); 6551 } 6552 vrele(vp); 6553 goto restart; 6554 } 6555 /* 6556 * Clear the old one. 6557 */ 6558 mp->mnt_rootvnode = NULL; 6559 } 6560 MNT_IUNLOCK(mp); 6561 if (vp != NULL) { 6562 vfs_op_barrier_wait(mp); 6563 vrele(vp); 6564 } 6565 } 6566 error = VFS_CACHEDROOT(mp, flags, vpp); 6567 if (error != 0) 6568 return (error); 6569 if (mp->mnt_vfs_ops == 0) { 6570 MNT_ILOCK(mp); 6571 if (mp->mnt_vfs_ops != 0) { 6572 MNT_IUNLOCK(mp); 6573 return (0); 6574 } 6575 if (mp->mnt_rootvnode == NULL) { 6576 vrefact(*vpp); 6577 mp->mnt_rootvnode = *vpp; 6578 } else { 6579 if (mp->mnt_rootvnode != *vpp) { 6580 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6581 panic("%s: mismatch between vnode returned " 6582 " by VFS_CACHEDROOT and the one cached " 6583 " (%p != %p)", 6584 __func__, *vpp, mp->mnt_rootvnode); 6585 } 6586 } 6587 } 6588 MNT_IUNLOCK(mp); 6589 } 6590 return (0); 6591 } 6592 6593 int 6594 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6595 { 6596 struct mount_pcpu *mpcpu; 6597 struct vnode *vp; 6598 int error; 6599 6600 if (!vfs_op_thread_enter(mp, mpcpu)) 6601 return (vfs_cache_root_fallback(mp, flags, vpp)); 6602 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6603 if (vp == NULL || VN_IS_DOOMED(vp)) { 6604 vfs_op_thread_exit(mp, mpcpu); 6605 return (vfs_cache_root_fallback(mp, flags, vpp)); 6606 } 6607 vrefact(vp); 6608 vfs_op_thread_exit(mp, mpcpu); 6609 error = vn_lock(vp, flags); 6610 if (error != 0) { 6611 vrele(vp); 6612 return (vfs_cache_root_fallback(mp, flags, vpp)); 6613 } 6614 *vpp = vp; 6615 return (0); 6616 } 6617 6618 struct vnode * 6619 vfs_cache_root_clear(struct mount *mp) 6620 { 6621 struct vnode *vp; 6622 6623 /* 6624 * ops > 0 guarantees there is nobody who can see this vnode 6625 */ 6626 MPASS(mp->mnt_vfs_ops > 0); 6627 vp = mp->mnt_rootvnode; 6628 if (vp != NULL) 6629 vn_seqc_write_begin(vp); 6630 mp->mnt_rootvnode = NULL; 6631 return (vp); 6632 } 6633 6634 void 6635 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6636 { 6637 6638 MPASS(mp->mnt_vfs_ops > 0); 6639 vrefact(vp); 6640 mp->mnt_rootvnode = vp; 6641 } 6642 6643 /* 6644 * These are helper functions for filesystems to traverse all 6645 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6646 * 6647 * This interface replaces MNT_VNODE_FOREACH. 6648 */ 6649 6650 struct vnode * 6651 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6652 { 6653 struct vnode *vp; 6654 6655 if (should_yield()) 6656 kern_yield(PRI_USER); 6657 MNT_ILOCK(mp); 6658 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6659 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6660 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6661 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6662 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6663 continue; 6664 VI_LOCK(vp); 6665 if (VN_IS_DOOMED(vp)) { 6666 VI_UNLOCK(vp); 6667 continue; 6668 } 6669 break; 6670 } 6671 if (vp == NULL) { 6672 __mnt_vnode_markerfree_all(mvp, mp); 6673 /* MNT_IUNLOCK(mp); -- done in above function */ 6674 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6675 return (NULL); 6676 } 6677 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6678 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6679 MNT_IUNLOCK(mp); 6680 return (vp); 6681 } 6682 6683 struct vnode * 6684 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6685 { 6686 struct vnode *vp; 6687 6688 *mvp = vn_alloc_marker(mp); 6689 MNT_ILOCK(mp); 6690 MNT_REF(mp); 6691 6692 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6693 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6694 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6695 continue; 6696 VI_LOCK(vp); 6697 if (VN_IS_DOOMED(vp)) { 6698 VI_UNLOCK(vp); 6699 continue; 6700 } 6701 break; 6702 } 6703 if (vp == NULL) { 6704 MNT_REL(mp); 6705 MNT_IUNLOCK(mp); 6706 vn_free_marker(*mvp); 6707 *mvp = NULL; 6708 return (NULL); 6709 } 6710 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6711 MNT_IUNLOCK(mp); 6712 return (vp); 6713 } 6714 6715 void 6716 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6717 { 6718 6719 if (*mvp == NULL) { 6720 MNT_IUNLOCK(mp); 6721 return; 6722 } 6723 6724 mtx_assert(MNT_MTX(mp), MA_OWNED); 6725 6726 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6727 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6728 MNT_REL(mp); 6729 MNT_IUNLOCK(mp); 6730 vn_free_marker(*mvp); 6731 *mvp = NULL; 6732 } 6733 6734 /* 6735 * These are helper functions for filesystems to traverse their 6736 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6737 */ 6738 static void 6739 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6740 { 6741 6742 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6743 6744 MNT_ILOCK(mp); 6745 MNT_REL(mp); 6746 MNT_IUNLOCK(mp); 6747 vn_free_marker(*mvp); 6748 *mvp = NULL; 6749 } 6750 6751 /* 6752 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6753 * conventional lock order during mnt_vnode_next_lazy iteration. 6754 * 6755 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6756 * The list lock is dropped and reacquired. On success, both locks are held. 6757 * On failure, the mount vnode list lock is held but the vnode interlock is 6758 * not, and the procedure may have yielded. 6759 */ 6760 static bool 6761 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6762 struct vnode *vp) 6763 { 6764 6765 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6766 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6767 ("%s: bad marker", __func__)); 6768 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6769 ("%s: inappropriate vnode", __func__)); 6770 ASSERT_VI_UNLOCKED(vp, __func__); 6771 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6772 6773 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6774 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6775 6776 /* 6777 * Note we may be racing against vdrop which transitioned the hold 6778 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 6779 * if we are the only user after we get the interlock we will just 6780 * vdrop. 6781 */ 6782 vhold(vp); 6783 mtx_unlock(&mp->mnt_listmtx); 6784 VI_LOCK(vp); 6785 if (VN_IS_DOOMED(vp)) { 6786 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 6787 goto out_lost; 6788 } 6789 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 6790 /* 6791 * There is nothing to do if we are the last user. 6792 */ 6793 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 6794 goto out_lost; 6795 mtx_lock(&mp->mnt_listmtx); 6796 return (true); 6797 out_lost: 6798 vdropl(vp); 6799 maybe_yield(); 6800 mtx_lock(&mp->mnt_listmtx); 6801 return (false); 6802 } 6803 6804 static struct vnode * 6805 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6806 void *cbarg) 6807 { 6808 struct vnode *vp; 6809 6810 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6811 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6812 restart: 6813 vp = TAILQ_NEXT(*mvp, v_lazylist); 6814 while (vp != NULL) { 6815 if (vp->v_type == VMARKER) { 6816 vp = TAILQ_NEXT(vp, v_lazylist); 6817 continue; 6818 } 6819 /* 6820 * See if we want to process the vnode. Note we may encounter a 6821 * long string of vnodes we don't care about and hog the list 6822 * as a result. Check for it and requeue the marker. 6823 */ 6824 VNPASS(!VN_IS_DOOMED(vp), vp); 6825 if (!cb(vp, cbarg)) { 6826 if (!should_yield()) { 6827 vp = TAILQ_NEXT(vp, v_lazylist); 6828 continue; 6829 } 6830 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 6831 v_lazylist); 6832 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 6833 v_lazylist); 6834 mtx_unlock(&mp->mnt_listmtx); 6835 kern_yield(PRI_USER); 6836 mtx_lock(&mp->mnt_listmtx); 6837 goto restart; 6838 } 6839 /* 6840 * Try-lock because this is the wrong lock order. 6841 */ 6842 if (!VI_TRYLOCK(vp) && 6843 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 6844 goto restart; 6845 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6846 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6847 ("alien vnode on the lazy list %p %p", vp, mp)); 6848 VNPASS(vp->v_mount == mp, vp); 6849 VNPASS(!VN_IS_DOOMED(vp), vp); 6850 break; 6851 } 6852 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6853 6854 /* Check if we are done */ 6855 if (vp == NULL) { 6856 mtx_unlock(&mp->mnt_listmtx); 6857 mnt_vnode_markerfree_lazy(mvp, mp); 6858 return (NULL); 6859 } 6860 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 6861 mtx_unlock(&mp->mnt_listmtx); 6862 ASSERT_VI_LOCKED(vp, "lazy iter"); 6863 return (vp); 6864 } 6865 6866 struct vnode * 6867 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6868 void *cbarg) 6869 { 6870 6871 if (should_yield()) 6872 kern_yield(PRI_USER); 6873 mtx_lock(&mp->mnt_listmtx); 6874 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6875 } 6876 6877 struct vnode * 6878 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6879 void *cbarg) 6880 { 6881 struct vnode *vp; 6882 6883 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 6884 return (NULL); 6885 6886 *mvp = vn_alloc_marker(mp); 6887 MNT_ILOCK(mp); 6888 MNT_REF(mp); 6889 MNT_IUNLOCK(mp); 6890 6891 mtx_lock(&mp->mnt_listmtx); 6892 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 6893 if (vp == NULL) { 6894 mtx_unlock(&mp->mnt_listmtx); 6895 mnt_vnode_markerfree_lazy(mvp, mp); 6896 return (NULL); 6897 } 6898 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 6899 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6900 } 6901 6902 void 6903 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6904 { 6905 6906 if (*mvp == NULL) 6907 return; 6908 6909 mtx_lock(&mp->mnt_listmtx); 6910 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6911 mtx_unlock(&mp->mnt_listmtx); 6912 mnt_vnode_markerfree_lazy(mvp, mp); 6913 } 6914 6915 int 6916 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 6917 { 6918 6919 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 6920 cnp->cn_flags &= ~NOEXECCHECK; 6921 return (0); 6922 } 6923 6924 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread)); 6925 } 6926 6927 /* 6928 * Do not use this variant unless you have means other than the hold count 6929 * to prevent the vnode from getting freed. 6930 */ 6931 void 6932 vn_seqc_write_begin_locked(struct vnode *vp) 6933 { 6934 6935 ASSERT_VI_LOCKED(vp, __func__); 6936 VNPASS(vp->v_holdcnt > 0, vp); 6937 VNPASS(vp->v_seqc_users >= 0, vp); 6938 vp->v_seqc_users++; 6939 if (vp->v_seqc_users == 1) 6940 seqc_sleepable_write_begin(&vp->v_seqc); 6941 } 6942 6943 void 6944 vn_seqc_write_begin(struct vnode *vp) 6945 { 6946 6947 VI_LOCK(vp); 6948 vn_seqc_write_begin_locked(vp); 6949 VI_UNLOCK(vp); 6950 } 6951 6952 void 6953 vn_seqc_write_end_locked(struct vnode *vp) 6954 { 6955 6956 ASSERT_VI_LOCKED(vp, __func__); 6957 VNPASS(vp->v_seqc_users > 0, vp); 6958 vp->v_seqc_users--; 6959 if (vp->v_seqc_users == 0) 6960 seqc_sleepable_write_end(&vp->v_seqc); 6961 } 6962 6963 void 6964 vn_seqc_write_end(struct vnode *vp) 6965 { 6966 6967 VI_LOCK(vp); 6968 vn_seqc_write_end_locked(vp); 6969 VI_UNLOCK(vp); 6970 } 6971 6972 /* 6973 * Special case handling for allocating and freeing vnodes. 6974 * 6975 * The counter remains unchanged on free so that a doomed vnode will 6976 * keep testing as in modify as long as it is accessible with SMR. 6977 */ 6978 static void 6979 vn_seqc_init(struct vnode *vp) 6980 { 6981 6982 vp->v_seqc = 0; 6983 vp->v_seqc_users = 0; 6984 } 6985 6986 static void 6987 vn_seqc_write_end_free(struct vnode *vp) 6988 { 6989 6990 VNPASS(seqc_in_modify(vp->v_seqc), vp); 6991 VNPASS(vp->v_seqc_users == 1, vp); 6992 } 6993 6994 void 6995 vn_irflag_set_locked(struct vnode *vp, short toset) 6996 { 6997 short flags; 6998 6999 ASSERT_VI_LOCKED(vp, __func__); 7000 flags = vn_irflag_read(vp); 7001 VNASSERT((flags & toset) == 0, vp, 7002 ("%s: some of the passed flags already set (have %d, passed %d)\n", 7003 __func__, flags, toset)); 7004 atomic_store_short(&vp->v_irflag, flags | toset); 7005 } 7006 7007 void 7008 vn_irflag_set(struct vnode *vp, short toset) 7009 { 7010 7011 VI_LOCK(vp); 7012 vn_irflag_set_locked(vp, toset); 7013 VI_UNLOCK(vp); 7014 } 7015 7016 void 7017 vn_irflag_set_cond_locked(struct vnode *vp, short toset) 7018 { 7019 short flags; 7020 7021 ASSERT_VI_LOCKED(vp, __func__); 7022 flags = vn_irflag_read(vp); 7023 atomic_store_short(&vp->v_irflag, flags | toset); 7024 } 7025 7026 void 7027 vn_irflag_set_cond(struct vnode *vp, short toset) 7028 { 7029 7030 VI_LOCK(vp); 7031 vn_irflag_set_cond_locked(vp, toset); 7032 VI_UNLOCK(vp); 7033 } 7034 7035 void 7036 vn_irflag_unset_locked(struct vnode *vp, short tounset) 7037 { 7038 short flags; 7039 7040 ASSERT_VI_LOCKED(vp, __func__); 7041 flags = vn_irflag_read(vp); 7042 VNASSERT((flags & tounset) == tounset, vp, 7043 ("%s: some of the passed flags not set (have %d, passed %d)\n", 7044 __func__, flags, tounset)); 7045 atomic_store_short(&vp->v_irflag, flags & ~tounset); 7046 } 7047 7048 void 7049 vn_irflag_unset(struct vnode *vp, short tounset) 7050 { 7051 7052 VI_LOCK(vp); 7053 vn_irflag_unset_locked(vp, tounset); 7054 VI_UNLOCK(vp); 7055 } 7056