1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/asan.h> 52 #include <sys/bio.h> 53 #include <sys/buf.h> 54 #include <sys/capsicum.h> 55 #include <sys/condvar.h> 56 #include <sys/conf.h> 57 #include <sys/counter.h> 58 #include <sys/dirent.h> 59 #include <sys/event.h> 60 #include <sys/eventhandler.h> 61 #include <sys/extattr.h> 62 #include <sys/file.h> 63 #include <sys/fcntl.h> 64 #include <sys/jail.h> 65 #include <sys/kdb.h> 66 #include <sys/kernel.h> 67 #include <sys/kthread.h> 68 #include <sys/ktr.h> 69 #include <sys/lockf.h> 70 #include <sys/malloc.h> 71 #include <sys/mount.h> 72 #include <sys/namei.h> 73 #include <sys/pctrie.h> 74 #include <sys/priv.h> 75 #include <sys/reboot.h> 76 #include <sys/refcount.h> 77 #include <sys/rwlock.h> 78 #include <sys/sched.h> 79 #include <sys/sleepqueue.h> 80 #include <sys/smr.h> 81 #include <sys/smp.h> 82 #include <sys/stat.h> 83 #include <sys/sysctl.h> 84 #include <sys/syslog.h> 85 #include <sys/vmmeter.h> 86 #include <sys/vnode.h> 87 #include <sys/watchdog.h> 88 89 #include <machine/stdarg.h> 90 91 #include <security/mac/mac_framework.h> 92 93 #include <vm/vm.h> 94 #include <vm/vm_object.h> 95 #include <vm/vm_extern.h> 96 #include <vm/pmap.h> 97 #include <vm/vm_map.h> 98 #include <vm/vm_page.h> 99 #include <vm/vm_kern.h> 100 #include <vm/uma.h> 101 102 #ifdef DDB 103 #include <ddb/ddb.h> 104 #endif 105 106 static void delmntque(struct vnode *vp); 107 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 108 int slpflag, int slptimeo); 109 static void syncer_shutdown(void *arg, int howto); 110 static int vtryrecycle(struct vnode *vp); 111 static void v_init_counters(struct vnode *); 112 static void vn_seqc_init(struct vnode *); 113 static void vn_seqc_write_end_free(struct vnode *vp); 114 static void vgonel(struct vnode *); 115 static bool vhold_recycle_free(struct vnode *); 116 static void vfs_knllock(void *arg); 117 static void vfs_knlunlock(void *arg); 118 static void vfs_knl_assert_lock(void *arg, int what); 119 static void destroy_vpollinfo(struct vpollinfo *vi); 120 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 121 daddr_t startlbn, daddr_t endlbn); 122 static void vnlru_recalc(void); 123 124 /* 125 * These fences are intended for cases where some synchronization is 126 * needed between access of v_iflags and lockless vnode refcount (v_holdcnt 127 * and v_usecount) updates. Access to v_iflags is generally synchronized 128 * by the interlock, but we have some internal assertions that check vnode 129 * flags without acquiring the lock. Thus, these fences are INVARIANTS-only 130 * for now. 131 */ 132 #ifdef INVARIANTS 133 #define VNODE_REFCOUNT_FENCE_ACQ() atomic_thread_fence_acq() 134 #define VNODE_REFCOUNT_FENCE_REL() atomic_thread_fence_rel() 135 #else 136 #define VNODE_REFCOUNT_FENCE_ACQ() 137 #define VNODE_REFCOUNT_FENCE_REL() 138 #endif 139 140 /* 141 * Number of vnodes in existence. Increased whenever getnewvnode() 142 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 143 */ 144 static u_long __exclusive_cache_line numvnodes; 145 146 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 147 "Number of vnodes in existence"); 148 149 static counter_u64_t vnodes_created; 150 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 151 "Number of vnodes created by getnewvnode"); 152 153 /* 154 * Conversion tables for conversion from vnode types to inode formats 155 * and back. 156 */ 157 enum vtype iftovt_tab[16] = { 158 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 159 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 160 }; 161 int vttoif_tab[10] = { 162 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 163 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 164 }; 165 166 /* 167 * List of allocates vnodes in the system. 168 */ 169 static TAILQ_HEAD(freelst, vnode) vnode_list; 170 static struct vnode *vnode_list_free_marker; 171 static struct vnode *vnode_list_reclaim_marker; 172 173 /* 174 * "Free" vnode target. Free vnodes are rarely completely free, but are 175 * just ones that are cheap to recycle. Usually they are for files which 176 * have been stat'd but not read; these usually have inode and namecache 177 * data attached to them. This target is the preferred minimum size of a 178 * sub-cache consisting mostly of such files. The system balances the size 179 * of this sub-cache with its complement to try to prevent either from 180 * thrashing while the other is relatively inactive. The targets express 181 * a preference for the best balance. 182 * 183 * "Above" this target there are 2 further targets (watermarks) related 184 * to recyling of free vnodes. In the best-operating case, the cache is 185 * exactly full, the free list has size between vlowat and vhiwat above the 186 * free target, and recycling from it and normal use maintains this state. 187 * Sometimes the free list is below vlowat or even empty, but this state 188 * is even better for immediate use provided the cache is not full. 189 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 190 * ones) to reach one of these states. The watermarks are currently hard- 191 * coded as 4% and 9% of the available space higher. These and the default 192 * of 25% for wantfreevnodes are too large if the memory size is large. 193 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 194 * whenever vnlru_proc() becomes active. 195 */ 196 static long wantfreevnodes; 197 static long __exclusive_cache_line freevnodes; 198 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 199 &freevnodes, 0, "Number of \"free\" vnodes"); 200 static long freevnodes_old; 201 202 static counter_u64_t recycles_count; 203 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 204 "Number of vnodes recycled to meet vnode cache targets"); 205 206 static counter_u64_t recycles_free_count; 207 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 208 "Number of free vnodes recycled to meet vnode cache targets"); 209 210 static counter_u64_t deferred_inact; 211 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, &deferred_inact, 212 "Number of times inactive processing was deferred"); 213 214 /* To keep more than one thread at a time from running vfs_getnewfsid */ 215 static struct mtx mntid_mtx; 216 217 /* 218 * Lock for any access to the following: 219 * vnode_list 220 * numvnodes 221 * freevnodes 222 */ 223 static struct mtx __exclusive_cache_line vnode_list_mtx; 224 225 /* Publicly exported FS */ 226 struct nfs_public nfs_pub; 227 228 static uma_zone_t buf_trie_zone; 229 static smr_t buf_trie_smr; 230 231 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 232 static uma_zone_t vnode_zone; 233 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll"); 234 235 __read_frequently smr_t vfs_smr; 236 237 /* 238 * The workitem queue. 239 * 240 * It is useful to delay writes of file data and filesystem metadata 241 * for tens of seconds so that quickly created and deleted files need 242 * not waste disk bandwidth being created and removed. To realize this, 243 * we append vnodes to a "workitem" queue. When running with a soft 244 * updates implementation, most pending metadata dependencies should 245 * not wait for more than a few seconds. Thus, mounted on block devices 246 * are delayed only about a half the time that file data is delayed. 247 * Similarly, directory updates are more critical, so are only delayed 248 * about a third the time that file data is delayed. Thus, there are 249 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 250 * one each second (driven off the filesystem syncer process). The 251 * syncer_delayno variable indicates the next queue that is to be processed. 252 * Items that need to be processed soon are placed in this queue: 253 * 254 * syncer_workitem_pending[syncer_delayno] 255 * 256 * A delay of fifteen seconds is done by placing the request fifteen 257 * entries later in the queue: 258 * 259 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 260 * 261 */ 262 static int syncer_delayno; 263 static long syncer_mask; 264 LIST_HEAD(synclist, bufobj); 265 static struct synclist *syncer_workitem_pending; 266 /* 267 * The sync_mtx protects: 268 * bo->bo_synclist 269 * sync_vnode_count 270 * syncer_delayno 271 * syncer_state 272 * syncer_workitem_pending 273 * syncer_worklist_len 274 * rushjob 275 */ 276 static struct mtx sync_mtx; 277 static struct cv sync_wakeup; 278 279 #define SYNCER_MAXDELAY 32 280 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 281 static int syncdelay = 30; /* max time to delay syncing data */ 282 static int filedelay = 30; /* time to delay syncing files */ 283 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 284 "Time to delay syncing files (in seconds)"); 285 static int dirdelay = 29; /* time to delay syncing directories */ 286 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 287 "Time to delay syncing directories (in seconds)"); 288 static int metadelay = 28; /* time to delay syncing metadata */ 289 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 290 "Time to delay syncing metadata (in seconds)"); 291 static int rushjob; /* number of slots to run ASAP */ 292 static int stat_rush_requests; /* number of times I/O speeded up */ 293 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 294 "Number of times I/O speeded up (rush requests)"); 295 296 #define VDBATCH_SIZE 8 297 struct vdbatch { 298 u_int index; 299 long freevnodes; 300 struct mtx lock; 301 struct vnode *tab[VDBATCH_SIZE]; 302 }; 303 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 304 305 static void vdbatch_dequeue(struct vnode *vp); 306 307 /* 308 * When shutting down the syncer, run it at four times normal speed. 309 */ 310 #define SYNCER_SHUTDOWN_SPEEDUP 4 311 static int sync_vnode_count; 312 static int syncer_worklist_len; 313 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 314 syncer_state; 315 316 /* Target for maximum number of vnodes. */ 317 u_long desiredvnodes; 318 static u_long gapvnodes; /* gap between wanted and desired */ 319 static u_long vhiwat; /* enough extras after expansion */ 320 static u_long vlowat; /* minimal extras before expansion */ 321 static u_long vstir; /* nonzero to stir non-free vnodes */ 322 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 323 324 static u_long vnlru_read_freevnodes(void); 325 326 /* 327 * Note that no attempt is made to sanitize these parameters. 328 */ 329 static int 330 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 331 { 332 u_long val; 333 int error; 334 335 val = desiredvnodes; 336 error = sysctl_handle_long(oidp, &val, 0, req); 337 if (error != 0 || req->newptr == NULL) 338 return (error); 339 340 if (val == desiredvnodes) 341 return (0); 342 mtx_lock(&vnode_list_mtx); 343 desiredvnodes = val; 344 wantfreevnodes = desiredvnodes / 4; 345 vnlru_recalc(); 346 mtx_unlock(&vnode_list_mtx); 347 /* 348 * XXX There is no protection against multiple threads changing 349 * desiredvnodes at the same time. Locking above only helps vnlru and 350 * getnewvnode. 351 */ 352 vfs_hash_changesize(desiredvnodes); 353 cache_changesize(desiredvnodes); 354 return (0); 355 } 356 357 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 358 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 359 "LU", "Target for maximum number of vnodes"); 360 361 static int 362 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 363 { 364 u_long val; 365 int error; 366 367 val = wantfreevnodes; 368 error = sysctl_handle_long(oidp, &val, 0, req); 369 if (error != 0 || req->newptr == NULL) 370 return (error); 371 372 if (val == wantfreevnodes) 373 return (0); 374 mtx_lock(&vnode_list_mtx); 375 wantfreevnodes = val; 376 vnlru_recalc(); 377 mtx_unlock(&vnode_list_mtx); 378 return (0); 379 } 380 381 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 382 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 383 "LU", "Target for minimum number of \"free\" vnodes"); 384 385 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 386 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 387 static int vnlru_nowhere; 388 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 389 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 390 391 static int 392 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 393 { 394 struct vnode *vp; 395 struct nameidata nd; 396 char *buf; 397 unsigned long ndflags; 398 int error; 399 400 if (req->newptr == NULL) 401 return (EINVAL); 402 if (req->newlen >= PATH_MAX) 403 return (E2BIG); 404 405 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 406 error = SYSCTL_IN(req, buf, req->newlen); 407 if (error != 0) 408 goto out; 409 410 buf[req->newlen] = '\0'; 411 412 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1 | SAVENAME; 413 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf, curthread); 414 if ((error = namei(&nd)) != 0) 415 goto out; 416 vp = nd.ni_vp; 417 418 if (VN_IS_DOOMED(vp)) { 419 /* 420 * This vnode is being recycled. Return != 0 to let the caller 421 * know that the sysctl had no effect. Return EAGAIN because a 422 * subsequent call will likely succeed (since namei will create 423 * a new vnode if necessary) 424 */ 425 error = EAGAIN; 426 goto putvnode; 427 } 428 429 counter_u64_add(recycles_count, 1); 430 vgone(vp); 431 putvnode: 432 NDFREE(&nd, 0); 433 out: 434 free(buf, M_TEMP); 435 return (error); 436 } 437 438 static int 439 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 440 { 441 struct thread *td = curthread; 442 struct vnode *vp; 443 struct file *fp; 444 int error; 445 int fd; 446 447 if (req->newptr == NULL) 448 return (EBADF); 449 450 error = sysctl_handle_int(oidp, &fd, 0, req); 451 if (error != 0) 452 return (error); 453 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 454 if (error != 0) 455 return (error); 456 vp = fp->f_vnode; 457 458 error = vn_lock(vp, LK_EXCLUSIVE); 459 if (error != 0) 460 goto drop; 461 462 counter_u64_add(recycles_count, 1); 463 vgone(vp); 464 VOP_UNLOCK(vp); 465 drop: 466 fdrop(fp, td); 467 return (error); 468 } 469 470 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 471 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 472 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 473 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 474 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 475 sysctl_ftry_reclaim_vnode, "I", 476 "Try to reclaim a vnode by its file descriptor"); 477 478 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 479 static int vnsz2log; 480 481 /* 482 * Support for the bufobj clean & dirty pctrie. 483 */ 484 static void * 485 buf_trie_alloc(struct pctrie *ptree) 486 { 487 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 488 } 489 490 static void 491 buf_trie_free(struct pctrie *ptree, void *node) 492 { 493 uma_zfree_smr(buf_trie_zone, node); 494 } 495 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 496 buf_trie_smr); 497 498 /* 499 * Initialize the vnode management data structures. 500 * 501 * Reevaluate the following cap on the number of vnodes after the physical 502 * memory size exceeds 512GB. In the limit, as the physical memory size 503 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 504 */ 505 #ifndef MAXVNODES_MAX 506 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 507 #endif 508 509 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 510 511 static struct vnode * 512 vn_alloc_marker(struct mount *mp) 513 { 514 struct vnode *vp; 515 516 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 517 vp->v_type = VMARKER; 518 vp->v_mount = mp; 519 520 return (vp); 521 } 522 523 static void 524 vn_free_marker(struct vnode *vp) 525 { 526 527 MPASS(vp->v_type == VMARKER); 528 free(vp, M_VNODE_MARKER); 529 } 530 531 #ifdef KASAN 532 static int 533 vnode_ctor(void *mem, int size, void *arg __unused, int flags __unused) 534 { 535 kasan_mark(mem, size, roundup2(size, UMA_ALIGN_PTR + 1), 0); 536 return (0); 537 } 538 539 static void 540 vnode_dtor(void *mem, int size, void *arg __unused) 541 { 542 size_t end1, end2, off1, off2; 543 544 _Static_assert(offsetof(struct vnode, v_vnodelist) < 545 offsetof(struct vnode, v_dbatchcpu), 546 "KASAN marks require updating"); 547 548 off1 = offsetof(struct vnode, v_vnodelist); 549 off2 = offsetof(struct vnode, v_dbatchcpu); 550 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist); 551 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu); 552 553 /* 554 * Access to the v_vnodelist and v_dbatchcpu fields are permitted even 555 * after the vnode has been freed. Try to get some KASAN coverage by 556 * marking everything except those two fields as invalid. Because 557 * KASAN's tracking is not byte-granular, any preceding fields sharing 558 * the same 8-byte aligned word must also be marked valid. 559 */ 560 561 /* Handle the area from the start until v_vnodelist... */ 562 off1 = rounddown2(off1, KASAN_SHADOW_SCALE); 563 kasan_mark(mem, off1, off1, KASAN_UMA_FREED); 564 565 /* ... then the area between v_vnodelist and v_dbatchcpu ... */ 566 off1 = roundup2(end1, KASAN_SHADOW_SCALE); 567 off2 = rounddown2(off2, KASAN_SHADOW_SCALE); 568 if (off2 > off1) 569 kasan_mark((void *)((char *)mem + off1), off2 - off1, 570 off2 - off1, KASAN_UMA_FREED); 571 572 /* ... and finally the area from v_dbatchcpu to the end. */ 573 off2 = roundup2(end2, KASAN_SHADOW_SCALE); 574 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2, 575 KASAN_UMA_FREED); 576 } 577 #endif /* KASAN */ 578 579 /* 580 * Initialize a vnode as it first enters the zone. 581 */ 582 static int 583 vnode_init(void *mem, int size, int flags) 584 { 585 struct vnode *vp; 586 587 vp = mem; 588 bzero(vp, size); 589 /* 590 * Setup locks. 591 */ 592 vp->v_vnlock = &vp->v_lock; 593 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 594 /* 595 * By default, don't allow shared locks unless filesystems opt-in. 596 */ 597 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 598 LK_NOSHARE | LK_IS_VNODE); 599 /* 600 * Initialize bufobj. 601 */ 602 bufobj_init(&vp->v_bufobj, vp); 603 /* 604 * Initialize namecache. 605 */ 606 cache_vnode_init(vp); 607 /* 608 * Initialize rangelocks. 609 */ 610 rangelock_init(&vp->v_rl); 611 612 vp->v_dbatchcpu = NOCPU; 613 614 /* 615 * Check vhold_recycle_free for an explanation. 616 */ 617 vp->v_holdcnt = VHOLD_NO_SMR; 618 vp->v_type = VNON; 619 mtx_lock(&vnode_list_mtx); 620 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 621 mtx_unlock(&vnode_list_mtx); 622 return (0); 623 } 624 625 /* 626 * Free a vnode when it is cleared from the zone. 627 */ 628 static void 629 vnode_fini(void *mem, int size) 630 { 631 struct vnode *vp; 632 struct bufobj *bo; 633 634 vp = mem; 635 vdbatch_dequeue(vp); 636 mtx_lock(&vnode_list_mtx); 637 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 638 mtx_unlock(&vnode_list_mtx); 639 rangelock_destroy(&vp->v_rl); 640 lockdestroy(vp->v_vnlock); 641 mtx_destroy(&vp->v_interlock); 642 bo = &vp->v_bufobj; 643 rw_destroy(BO_LOCKPTR(bo)); 644 645 kasan_mark(mem, size, size, 0); 646 } 647 648 /* 649 * Provide the size of NFS nclnode and NFS fh for calculation of the 650 * vnode memory consumption. The size is specified directly to 651 * eliminate dependency on NFS-private header. 652 * 653 * Other filesystems may use bigger or smaller (like UFS and ZFS) 654 * private inode data, but the NFS-based estimation is ample enough. 655 * Still, we care about differences in the size between 64- and 32-bit 656 * platforms. 657 * 658 * Namecache structure size is heuristically 659 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 660 */ 661 #ifdef _LP64 662 #define NFS_NCLNODE_SZ (528 + 64) 663 #define NC_SZ 148 664 #else 665 #define NFS_NCLNODE_SZ (360 + 32) 666 #define NC_SZ 92 667 #endif 668 669 static void 670 vntblinit(void *dummy __unused) 671 { 672 struct vdbatch *vd; 673 uma_ctor ctor; 674 uma_dtor dtor; 675 int cpu, physvnodes, virtvnodes; 676 u_int i; 677 678 /* 679 * Desiredvnodes is a function of the physical memory size and the 680 * kernel's heap size. Generally speaking, it scales with the 681 * physical memory size. The ratio of desiredvnodes to the physical 682 * memory size is 1:16 until desiredvnodes exceeds 98,304. 683 * Thereafter, the 684 * marginal ratio of desiredvnodes to the physical memory size is 685 * 1:64. However, desiredvnodes is limited by the kernel's heap 686 * size. The memory required by desiredvnodes vnodes and vm objects 687 * must not exceed 1/10th of the kernel's heap size. 688 */ 689 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 690 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 691 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 692 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 693 desiredvnodes = min(physvnodes, virtvnodes); 694 if (desiredvnodes > MAXVNODES_MAX) { 695 if (bootverbose) 696 printf("Reducing kern.maxvnodes %lu -> %lu\n", 697 desiredvnodes, MAXVNODES_MAX); 698 desiredvnodes = MAXVNODES_MAX; 699 } 700 wantfreevnodes = desiredvnodes / 4; 701 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 702 TAILQ_INIT(&vnode_list); 703 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 704 /* 705 * The lock is taken to appease WITNESS. 706 */ 707 mtx_lock(&vnode_list_mtx); 708 vnlru_recalc(); 709 mtx_unlock(&vnode_list_mtx); 710 vnode_list_free_marker = vn_alloc_marker(NULL); 711 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 712 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 713 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 714 715 #ifdef KASAN 716 ctor = vnode_ctor; 717 dtor = vnode_dtor; 718 #else 719 ctor = NULL; 720 dtor = NULL; 721 #endif 722 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor, 723 vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_NOKASAN); 724 uma_zone_set_smr(vnode_zone, vfs_smr); 725 726 /* 727 * Preallocate enough nodes to support one-per buf so that 728 * we can not fail an insert. reassignbuf() callers can not 729 * tolerate the insertion failure. 730 */ 731 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 732 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 733 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 734 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 735 uma_prealloc(buf_trie_zone, nbuf); 736 737 vnodes_created = counter_u64_alloc(M_WAITOK); 738 recycles_count = counter_u64_alloc(M_WAITOK); 739 recycles_free_count = counter_u64_alloc(M_WAITOK); 740 deferred_inact = counter_u64_alloc(M_WAITOK); 741 742 /* 743 * Initialize the filesystem syncer. 744 */ 745 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 746 &syncer_mask); 747 syncer_maxdelay = syncer_mask + 1; 748 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 749 cv_init(&sync_wakeup, "syncer"); 750 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 751 vnsz2log++; 752 vnsz2log--; 753 754 CPU_FOREACH(cpu) { 755 vd = DPCPU_ID_PTR((cpu), vd); 756 bzero(vd, sizeof(*vd)); 757 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 758 } 759 } 760 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 761 762 /* 763 * Mark a mount point as busy. Used to synchronize access and to delay 764 * unmounting. Eventually, mountlist_mtx is not released on failure. 765 * 766 * vfs_busy() is a custom lock, it can block the caller. 767 * vfs_busy() only sleeps if the unmount is active on the mount point. 768 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 769 * vnode belonging to mp. 770 * 771 * Lookup uses vfs_busy() to traverse mount points. 772 * root fs var fs 773 * / vnode lock A / vnode lock (/var) D 774 * /var vnode lock B /log vnode lock(/var/log) E 775 * vfs_busy lock C vfs_busy lock F 776 * 777 * Within each file system, the lock order is C->A->B and F->D->E. 778 * 779 * When traversing across mounts, the system follows that lock order: 780 * 781 * C->A->B 782 * | 783 * +->F->D->E 784 * 785 * The lookup() process for namei("/var") illustrates the process: 786 * VOP_LOOKUP() obtains B while A is held 787 * vfs_busy() obtains a shared lock on F while A and B are held 788 * vput() releases lock on B 789 * vput() releases lock on A 790 * VFS_ROOT() obtains lock on D while shared lock on F is held 791 * vfs_unbusy() releases shared lock on F 792 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 793 * Attempt to lock A (instead of vp_crossmp) while D is held would 794 * violate the global order, causing deadlocks. 795 * 796 * dounmount() locks B while F is drained. 797 */ 798 int 799 vfs_busy(struct mount *mp, int flags) 800 { 801 struct mount_pcpu *mpcpu; 802 803 MPASS((flags & ~MBF_MASK) == 0); 804 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 805 806 if (vfs_op_thread_enter(mp, mpcpu)) { 807 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 808 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 809 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 810 vfs_mp_count_add_pcpu(mpcpu, ref, 1); 811 vfs_mp_count_add_pcpu(mpcpu, lockref, 1); 812 vfs_op_thread_exit(mp, mpcpu); 813 if (flags & MBF_MNTLSTLOCK) 814 mtx_unlock(&mountlist_mtx); 815 return (0); 816 } 817 818 MNT_ILOCK(mp); 819 vfs_assert_mount_counters(mp); 820 MNT_REF(mp); 821 /* 822 * If mount point is currently being unmounted, sleep until the 823 * mount point fate is decided. If thread doing the unmounting fails, 824 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 825 * that this mount point has survived the unmount attempt and vfs_busy 826 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 827 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 828 * about to be really destroyed. vfs_busy needs to release its 829 * reference on the mount point in this case and return with ENOENT, 830 * telling the caller that mount mount it tried to busy is no longer 831 * valid. 832 */ 833 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 834 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 835 MNT_REL(mp); 836 MNT_IUNLOCK(mp); 837 CTR1(KTR_VFS, "%s: failed busying before sleeping", 838 __func__); 839 return (ENOENT); 840 } 841 if (flags & MBF_MNTLSTLOCK) 842 mtx_unlock(&mountlist_mtx); 843 mp->mnt_kern_flag |= MNTK_MWAIT; 844 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 845 if (flags & MBF_MNTLSTLOCK) 846 mtx_lock(&mountlist_mtx); 847 MNT_ILOCK(mp); 848 } 849 if (flags & MBF_MNTLSTLOCK) 850 mtx_unlock(&mountlist_mtx); 851 mp->mnt_lockref++; 852 MNT_IUNLOCK(mp); 853 return (0); 854 } 855 856 /* 857 * Free a busy filesystem. 858 */ 859 void 860 vfs_unbusy(struct mount *mp) 861 { 862 struct mount_pcpu *mpcpu; 863 int c; 864 865 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 866 867 if (vfs_op_thread_enter(mp, mpcpu)) { 868 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 869 vfs_mp_count_sub_pcpu(mpcpu, lockref, 1); 870 vfs_mp_count_sub_pcpu(mpcpu, ref, 1); 871 vfs_op_thread_exit(mp, mpcpu); 872 return; 873 } 874 875 MNT_ILOCK(mp); 876 vfs_assert_mount_counters(mp); 877 MNT_REL(mp); 878 c = --mp->mnt_lockref; 879 if (mp->mnt_vfs_ops == 0) { 880 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 881 MNT_IUNLOCK(mp); 882 return; 883 } 884 if (c < 0) 885 vfs_dump_mount_counters(mp); 886 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 887 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 888 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 889 mp->mnt_kern_flag &= ~MNTK_DRAINING; 890 wakeup(&mp->mnt_lockref); 891 } 892 MNT_IUNLOCK(mp); 893 } 894 895 /* 896 * Lookup a mount point by filesystem identifier. 897 */ 898 struct mount * 899 vfs_getvfs(fsid_t *fsid) 900 { 901 struct mount *mp; 902 903 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 904 mtx_lock(&mountlist_mtx); 905 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 906 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 907 vfs_ref(mp); 908 mtx_unlock(&mountlist_mtx); 909 return (mp); 910 } 911 } 912 mtx_unlock(&mountlist_mtx); 913 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 914 return ((struct mount *) 0); 915 } 916 917 /* 918 * Lookup a mount point by filesystem identifier, busying it before 919 * returning. 920 * 921 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 922 * cache for popular filesystem identifiers. The cache is lockess, using 923 * the fact that struct mount's are never freed. In worst case we may 924 * get pointer to unmounted or even different filesystem, so we have to 925 * check what we got, and go slow way if so. 926 */ 927 struct mount * 928 vfs_busyfs(fsid_t *fsid) 929 { 930 #define FSID_CACHE_SIZE 256 931 typedef struct mount * volatile vmp_t; 932 static vmp_t cache[FSID_CACHE_SIZE]; 933 struct mount *mp; 934 int error; 935 uint32_t hash; 936 937 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 938 hash = fsid->val[0] ^ fsid->val[1]; 939 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 940 mp = cache[hash]; 941 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 942 goto slow; 943 if (vfs_busy(mp, 0) != 0) { 944 cache[hash] = NULL; 945 goto slow; 946 } 947 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 948 return (mp); 949 else 950 vfs_unbusy(mp); 951 952 slow: 953 mtx_lock(&mountlist_mtx); 954 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 955 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 956 error = vfs_busy(mp, MBF_MNTLSTLOCK); 957 if (error) { 958 cache[hash] = NULL; 959 mtx_unlock(&mountlist_mtx); 960 return (NULL); 961 } 962 cache[hash] = mp; 963 return (mp); 964 } 965 } 966 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 967 mtx_unlock(&mountlist_mtx); 968 return ((struct mount *) 0); 969 } 970 971 /* 972 * Check if a user can access privileged mount options. 973 */ 974 int 975 vfs_suser(struct mount *mp, struct thread *td) 976 { 977 int error; 978 979 if (jailed(td->td_ucred)) { 980 /* 981 * If the jail of the calling thread lacks permission for 982 * this type of file system, deny immediately. 983 */ 984 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 985 return (EPERM); 986 987 /* 988 * If the file system was mounted outside the jail of the 989 * calling thread, deny immediately. 990 */ 991 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 992 return (EPERM); 993 } 994 995 /* 996 * If file system supports delegated administration, we don't check 997 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 998 * by the file system itself. 999 * If this is not the user that did original mount, we check for 1000 * the PRIV_VFS_MOUNT_OWNER privilege. 1001 */ 1002 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 1003 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 1004 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 1005 return (error); 1006 } 1007 return (0); 1008 } 1009 1010 /* 1011 * Get a new unique fsid. Try to make its val[0] unique, since this value 1012 * will be used to create fake device numbers for stat(). Also try (but 1013 * not so hard) make its val[0] unique mod 2^16, since some emulators only 1014 * support 16-bit device numbers. We end up with unique val[0]'s for the 1015 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 1016 * 1017 * Keep in mind that several mounts may be running in parallel. Starting 1018 * the search one past where the previous search terminated is both a 1019 * micro-optimization and a defense against returning the same fsid to 1020 * different mounts. 1021 */ 1022 void 1023 vfs_getnewfsid(struct mount *mp) 1024 { 1025 static uint16_t mntid_base; 1026 struct mount *nmp; 1027 fsid_t tfsid; 1028 int mtype; 1029 1030 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 1031 mtx_lock(&mntid_mtx); 1032 mtype = mp->mnt_vfc->vfc_typenum; 1033 tfsid.val[1] = mtype; 1034 mtype = (mtype & 0xFF) << 24; 1035 for (;;) { 1036 tfsid.val[0] = makedev(255, 1037 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 1038 mntid_base++; 1039 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 1040 break; 1041 vfs_rel(nmp); 1042 } 1043 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 1044 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 1045 mtx_unlock(&mntid_mtx); 1046 } 1047 1048 /* 1049 * Knob to control the precision of file timestamps: 1050 * 1051 * 0 = seconds only; nanoseconds zeroed. 1052 * 1 = seconds and nanoseconds, accurate within 1/HZ. 1053 * 2 = seconds and nanoseconds, truncated to microseconds. 1054 * >=3 = seconds and nanoseconds, maximum precision. 1055 */ 1056 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 1057 1058 static int timestamp_precision = TSP_USEC; 1059 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 1060 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 1061 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 1062 "3+: sec + ns (max. precision))"); 1063 1064 /* 1065 * Get a current timestamp. 1066 */ 1067 void 1068 vfs_timestamp(struct timespec *tsp) 1069 { 1070 struct timeval tv; 1071 1072 switch (timestamp_precision) { 1073 case TSP_SEC: 1074 tsp->tv_sec = time_second; 1075 tsp->tv_nsec = 0; 1076 break; 1077 case TSP_HZ: 1078 getnanotime(tsp); 1079 break; 1080 case TSP_USEC: 1081 microtime(&tv); 1082 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1083 break; 1084 case TSP_NSEC: 1085 default: 1086 nanotime(tsp); 1087 break; 1088 } 1089 } 1090 1091 /* 1092 * Set vnode attributes to VNOVAL 1093 */ 1094 void 1095 vattr_null(struct vattr *vap) 1096 { 1097 1098 vap->va_type = VNON; 1099 vap->va_size = VNOVAL; 1100 vap->va_bytes = VNOVAL; 1101 vap->va_mode = VNOVAL; 1102 vap->va_nlink = VNOVAL; 1103 vap->va_uid = VNOVAL; 1104 vap->va_gid = VNOVAL; 1105 vap->va_fsid = VNOVAL; 1106 vap->va_fileid = VNOVAL; 1107 vap->va_blocksize = VNOVAL; 1108 vap->va_rdev = VNOVAL; 1109 vap->va_atime.tv_sec = VNOVAL; 1110 vap->va_atime.tv_nsec = VNOVAL; 1111 vap->va_mtime.tv_sec = VNOVAL; 1112 vap->va_mtime.tv_nsec = VNOVAL; 1113 vap->va_ctime.tv_sec = VNOVAL; 1114 vap->va_ctime.tv_nsec = VNOVAL; 1115 vap->va_birthtime.tv_sec = VNOVAL; 1116 vap->va_birthtime.tv_nsec = VNOVAL; 1117 vap->va_flags = VNOVAL; 1118 vap->va_gen = VNOVAL; 1119 vap->va_vaflags = 0; 1120 } 1121 1122 /* 1123 * Try to reduce the total number of vnodes. 1124 * 1125 * This routine (and its user) are buggy in at least the following ways: 1126 * - all parameters were picked years ago when RAM sizes were significantly 1127 * smaller 1128 * - it can pick vnodes based on pages used by the vm object, but filesystems 1129 * like ZFS don't use it making the pick broken 1130 * - since ZFS has its own aging policy it gets partially combated by this one 1131 * - a dedicated method should be provided for filesystems to let them decide 1132 * whether the vnode should be recycled 1133 * 1134 * This routine is called when we have too many vnodes. It attempts 1135 * to free <count> vnodes and will potentially free vnodes that still 1136 * have VM backing store (VM backing store is typically the cause 1137 * of a vnode blowout so we want to do this). Therefore, this operation 1138 * is not considered cheap. 1139 * 1140 * A number of conditions may prevent a vnode from being reclaimed. 1141 * the buffer cache may have references on the vnode, a directory 1142 * vnode may still have references due to the namei cache representing 1143 * underlying files, or the vnode may be in active use. It is not 1144 * desirable to reuse such vnodes. These conditions may cause the 1145 * number of vnodes to reach some minimum value regardless of what 1146 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1147 * 1148 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1149 * entries if this argument is strue 1150 * @param trigger Only reclaim vnodes with fewer than this many resident 1151 * pages. 1152 * @param target How many vnodes to reclaim. 1153 * @return The number of vnodes that were reclaimed. 1154 */ 1155 static int 1156 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1157 { 1158 struct vnode *vp, *mvp; 1159 struct mount *mp; 1160 struct vm_object *object; 1161 u_long done; 1162 bool retried; 1163 1164 mtx_assert(&vnode_list_mtx, MA_OWNED); 1165 1166 retried = false; 1167 done = 0; 1168 1169 mvp = vnode_list_reclaim_marker; 1170 restart: 1171 vp = mvp; 1172 while (done < target) { 1173 vp = TAILQ_NEXT(vp, v_vnodelist); 1174 if (__predict_false(vp == NULL)) 1175 break; 1176 1177 if (__predict_false(vp->v_type == VMARKER)) 1178 continue; 1179 1180 /* 1181 * If it's been deconstructed already, it's still 1182 * referenced, or it exceeds the trigger, skip it. 1183 * Also skip free vnodes. We are trying to make space 1184 * to expand the free list, not reduce it. 1185 */ 1186 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1187 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1188 goto next_iter; 1189 1190 if (vp->v_type == VBAD || vp->v_type == VNON) 1191 goto next_iter; 1192 1193 object = atomic_load_ptr(&vp->v_object); 1194 if (object == NULL || object->resident_page_count > trigger) { 1195 goto next_iter; 1196 } 1197 1198 /* 1199 * Handle races against vnode allocation. Filesystems lock the 1200 * vnode some time after it gets returned from getnewvnode, 1201 * despite type and hold count being manipulated earlier. 1202 * Resorting to checking v_mount restores guarantees present 1203 * before the global list was reworked to contain all vnodes. 1204 */ 1205 if (!VI_TRYLOCK(vp)) 1206 goto next_iter; 1207 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1208 VI_UNLOCK(vp); 1209 goto next_iter; 1210 } 1211 if (vp->v_mount == NULL) { 1212 VI_UNLOCK(vp); 1213 goto next_iter; 1214 } 1215 vholdl(vp); 1216 VI_UNLOCK(vp); 1217 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1218 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1219 mtx_unlock(&vnode_list_mtx); 1220 1221 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1222 vdrop(vp); 1223 goto next_iter_unlocked; 1224 } 1225 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1226 vdrop(vp); 1227 vn_finished_write(mp); 1228 goto next_iter_unlocked; 1229 } 1230 1231 VI_LOCK(vp); 1232 if (vp->v_usecount > 0 || 1233 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1234 (vp->v_object != NULL && vp->v_object->handle == vp && 1235 vp->v_object->resident_page_count > trigger)) { 1236 VOP_UNLOCK(vp); 1237 vdropl(vp); 1238 vn_finished_write(mp); 1239 goto next_iter_unlocked; 1240 } 1241 counter_u64_add(recycles_count, 1); 1242 vgonel(vp); 1243 VOP_UNLOCK(vp); 1244 vdropl(vp); 1245 vn_finished_write(mp); 1246 done++; 1247 next_iter_unlocked: 1248 if (should_yield()) 1249 kern_yield(PRI_USER); 1250 mtx_lock(&vnode_list_mtx); 1251 goto restart; 1252 next_iter: 1253 MPASS(vp->v_type != VMARKER); 1254 if (!should_yield()) 1255 continue; 1256 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1257 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1258 mtx_unlock(&vnode_list_mtx); 1259 kern_yield(PRI_USER); 1260 mtx_lock(&vnode_list_mtx); 1261 goto restart; 1262 } 1263 if (done == 0 && !retried) { 1264 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1265 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1266 retried = true; 1267 goto restart; 1268 } 1269 return (done); 1270 } 1271 1272 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1273 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1274 0, 1275 "limit on vnode free requests per call to the vnlru_free routine"); 1276 1277 /* 1278 * Attempt to reduce the free list by the requested amount. 1279 */ 1280 static int 1281 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp) 1282 { 1283 struct vnode *vp; 1284 struct mount *mp; 1285 int ocount; 1286 1287 mtx_assert(&vnode_list_mtx, MA_OWNED); 1288 if (count > max_vnlru_free) 1289 count = max_vnlru_free; 1290 ocount = count; 1291 vp = mvp; 1292 for (;;) { 1293 if (count == 0) { 1294 break; 1295 } 1296 vp = TAILQ_NEXT(vp, v_vnodelist); 1297 if (__predict_false(vp == NULL)) { 1298 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1299 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1300 break; 1301 } 1302 if (__predict_false(vp->v_type == VMARKER)) 1303 continue; 1304 if (vp->v_holdcnt > 0) 1305 continue; 1306 /* 1307 * Don't recycle if our vnode is from different type 1308 * of mount point. Note that mp is type-safe, the 1309 * check does not reach unmapped address even if 1310 * vnode is reclaimed. 1311 */ 1312 if (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1313 mp->mnt_op != mnt_op) { 1314 continue; 1315 } 1316 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1317 continue; 1318 } 1319 if (!vhold_recycle_free(vp)) 1320 continue; 1321 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1322 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1323 mtx_unlock(&vnode_list_mtx); 1324 if (vtryrecycle(vp) == 0) 1325 count--; 1326 mtx_lock(&vnode_list_mtx); 1327 vp = mvp; 1328 } 1329 return (ocount - count); 1330 } 1331 1332 static int 1333 vnlru_free_locked(int count) 1334 { 1335 1336 mtx_assert(&vnode_list_mtx, MA_OWNED); 1337 return (vnlru_free_impl(count, NULL, vnode_list_free_marker)); 1338 } 1339 1340 void 1341 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp) 1342 { 1343 1344 MPASS(mnt_op != NULL); 1345 MPASS(mvp != NULL); 1346 VNPASS(mvp->v_type == VMARKER, mvp); 1347 mtx_lock(&vnode_list_mtx); 1348 vnlru_free_impl(count, mnt_op, mvp); 1349 mtx_unlock(&vnode_list_mtx); 1350 } 1351 1352 struct vnode * 1353 vnlru_alloc_marker(void) 1354 { 1355 struct vnode *mvp; 1356 1357 mvp = vn_alloc_marker(NULL); 1358 mtx_lock(&vnode_list_mtx); 1359 TAILQ_INSERT_BEFORE(vnode_list_free_marker, mvp, v_vnodelist); 1360 mtx_unlock(&vnode_list_mtx); 1361 return (mvp); 1362 } 1363 1364 void 1365 vnlru_free_marker(struct vnode *mvp) 1366 { 1367 mtx_lock(&vnode_list_mtx); 1368 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1369 mtx_unlock(&vnode_list_mtx); 1370 vn_free_marker(mvp); 1371 } 1372 1373 static void 1374 vnlru_recalc(void) 1375 { 1376 1377 mtx_assert(&vnode_list_mtx, MA_OWNED); 1378 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1379 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1380 vlowat = vhiwat / 2; 1381 } 1382 1383 /* 1384 * Attempt to recycle vnodes in a context that is always safe to block. 1385 * Calling vlrurecycle() from the bowels of filesystem code has some 1386 * interesting deadlock problems. 1387 */ 1388 static struct proc *vnlruproc; 1389 static int vnlruproc_sig; 1390 1391 /* 1392 * The main freevnodes counter is only updated when threads requeue their vnode 1393 * batches. CPUs are conditionally walked to compute a more accurate total. 1394 * 1395 * Limit how much of a slop are we willing to tolerate. Note: the actual value 1396 * at any given moment can still exceed slop, but it should not be by significant 1397 * margin in practice. 1398 */ 1399 #define VNLRU_FREEVNODES_SLOP 128 1400 1401 static __inline void 1402 vfs_freevnodes_inc(void) 1403 { 1404 struct vdbatch *vd; 1405 1406 critical_enter(); 1407 vd = DPCPU_PTR(vd); 1408 vd->freevnodes++; 1409 critical_exit(); 1410 } 1411 1412 static __inline void 1413 vfs_freevnodes_dec(void) 1414 { 1415 struct vdbatch *vd; 1416 1417 critical_enter(); 1418 vd = DPCPU_PTR(vd); 1419 vd->freevnodes--; 1420 critical_exit(); 1421 } 1422 1423 static u_long 1424 vnlru_read_freevnodes(void) 1425 { 1426 struct vdbatch *vd; 1427 long slop; 1428 int cpu; 1429 1430 mtx_assert(&vnode_list_mtx, MA_OWNED); 1431 if (freevnodes > freevnodes_old) 1432 slop = freevnodes - freevnodes_old; 1433 else 1434 slop = freevnodes_old - freevnodes; 1435 if (slop < VNLRU_FREEVNODES_SLOP) 1436 return (freevnodes >= 0 ? freevnodes : 0); 1437 freevnodes_old = freevnodes; 1438 CPU_FOREACH(cpu) { 1439 vd = DPCPU_ID_PTR((cpu), vd); 1440 freevnodes_old += vd->freevnodes; 1441 } 1442 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1443 } 1444 1445 static bool 1446 vnlru_under(u_long rnumvnodes, u_long limit) 1447 { 1448 u_long rfreevnodes, space; 1449 1450 if (__predict_false(rnumvnodes > desiredvnodes)) 1451 return (true); 1452 1453 space = desiredvnodes - rnumvnodes; 1454 if (space < limit) { 1455 rfreevnodes = vnlru_read_freevnodes(); 1456 if (rfreevnodes > wantfreevnodes) 1457 space += rfreevnodes - wantfreevnodes; 1458 } 1459 return (space < limit); 1460 } 1461 1462 static bool 1463 vnlru_under_unlocked(u_long rnumvnodes, u_long limit) 1464 { 1465 long rfreevnodes, space; 1466 1467 if (__predict_false(rnumvnodes > desiredvnodes)) 1468 return (true); 1469 1470 space = desiredvnodes - rnumvnodes; 1471 if (space < limit) { 1472 rfreevnodes = atomic_load_long(&freevnodes); 1473 if (rfreevnodes > wantfreevnodes) 1474 space += rfreevnodes - wantfreevnodes; 1475 } 1476 return (space < limit); 1477 } 1478 1479 static void 1480 vnlru_kick(void) 1481 { 1482 1483 mtx_assert(&vnode_list_mtx, MA_OWNED); 1484 if (vnlruproc_sig == 0) { 1485 vnlruproc_sig = 1; 1486 wakeup(vnlruproc); 1487 } 1488 } 1489 1490 static void 1491 vnlru_proc(void) 1492 { 1493 u_long rnumvnodes, rfreevnodes, target; 1494 unsigned long onumvnodes; 1495 int done, force, trigger, usevnodes; 1496 bool reclaim_nc_src, want_reread; 1497 1498 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1499 SHUTDOWN_PRI_FIRST); 1500 1501 force = 0; 1502 want_reread = false; 1503 for (;;) { 1504 kproc_suspend_check(vnlruproc); 1505 mtx_lock(&vnode_list_mtx); 1506 rnumvnodes = atomic_load_long(&numvnodes); 1507 1508 if (want_reread) { 1509 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1510 want_reread = false; 1511 } 1512 1513 /* 1514 * If numvnodes is too large (due to desiredvnodes being 1515 * adjusted using its sysctl, or emergency growth), first 1516 * try to reduce it by discarding from the free list. 1517 */ 1518 if (rnumvnodes > desiredvnodes) { 1519 vnlru_free_locked(rnumvnodes - desiredvnodes); 1520 rnumvnodes = atomic_load_long(&numvnodes); 1521 } 1522 /* 1523 * Sleep if the vnode cache is in a good state. This is 1524 * when it is not over-full and has space for about a 4% 1525 * or 9% expansion (by growing its size or inexcessively 1526 * reducing its free list). Otherwise, try to reclaim 1527 * space for a 10% expansion. 1528 */ 1529 if (vstir && force == 0) { 1530 force = 1; 1531 vstir = 0; 1532 } 1533 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1534 vnlruproc_sig = 0; 1535 wakeup(&vnlruproc_sig); 1536 msleep(vnlruproc, &vnode_list_mtx, 1537 PVFS|PDROP, "vlruwt", hz); 1538 continue; 1539 } 1540 rfreevnodes = vnlru_read_freevnodes(); 1541 1542 onumvnodes = rnumvnodes; 1543 /* 1544 * Calculate parameters for recycling. These are the same 1545 * throughout the loop to give some semblance of fairness. 1546 * The trigger point is to avoid recycling vnodes with lots 1547 * of resident pages. We aren't trying to free memory; we 1548 * are trying to recycle or at least free vnodes. 1549 */ 1550 if (rnumvnodes <= desiredvnodes) 1551 usevnodes = rnumvnodes - rfreevnodes; 1552 else 1553 usevnodes = rnumvnodes; 1554 if (usevnodes <= 0) 1555 usevnodes = 1; 1556 /* 1557 * The trigger value is is chosen to give a conservatively 1558 * large value to ensure that it alone doesn't prevent 1559 * making progress. The value can easily be so large that 1560 * it is effectively infinite in some congested and 1561 * misconfigured cases, and this is necessary. Normally 1562 * it is about 8 to 100 (pages), which is quite large. 1563 */ 1564 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1565 if (force < 2) 1566 trigger = vsmalltrigger; 1567 reclaim_nc_src = force >= 3; 1568 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1569 target = target / 10 + 1; 1570 done = vlrureclaim(reclaim_nc_src, trigger, target); 1571 mtx_unlock(&vnode_list_mtx); 1572 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1573 uma_reclaim(UMA_RECLAIM_DRAIN); 1574 if (done == 0) { 1575 if (force == 0 || force == 1) { 1576 force = 2; 1577 continue; 1578 } 1579 if (force == 2) { 1580 force = 3; 1581 continue; 1582 } 1583 want_reread = true; 1584 force = 0; 1585 vnlru_nowhere++; 1586 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1587 } else { 1588 want_reread = true; 1589 kern_yield(PRI_USER); 1590 } 1591 } 1592 } 1593 1594 static struct kproc_desc vnlru_kp = { 1595 "vnlru", 1596 vnlru_proc, 1597 &vnlruproc 1598 }; 1599 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1600 &vnlru_kp); 1601 1602 /* 1603 * Routines having to do with the management of the vnode table. 1604 */ 1605 1606 /* 1607 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1608 * before we actually vgone(). This function must be called with the vnode 1609 * held to prevent the vnode from being returned to the free list midway 1610 * through vgone(). 1611 */ 1612 static int 1613 vtryrecycle(struct vnode *vp) 1614 { 1615 struct mount *vnmp; 1616 1617 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1618 VNASSERT(vp->v_holdcnt, vp, 1619 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1620 /* 1621 * This vnode may found and locked via some other list, if so we 1622 * can't recycle it yet. 1623 */ 1624 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1625 CTR2(KTR_VFS, 1626 "%s: impossible to recycle, vp %p lock is already held", 1627 __func__, vp); 1628 vdrop(vp); 1629 return (EWOULDBLOCK); 1630 } 1631 /* 1632 * Don't recycle if its filesystem is being suspended. 1633 */ 1634 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1635 VOP_UNLOCK(vp); 1636 CTR2(KTR_VFS, 1637 "%s: impossible to recycle, cannot start the write for %p", 1638 __func__, vp); 1639 vdrop(vp); 1640 return (EBUSY); 1641 } 1642 /* 1643 * If we got this far, we need to acquire the interlock and see if 1644 * anyone picked up this vnode from another list. If not, we will 1645 * mark it with DOOMED via vgonel() so that anyone who does find it 1646 * will skip over it. 1647 */ 1648 VI_LOCK(vp); 1649 if (vp->v_usecount) { 1650 VOP_UNLOCK(vp); 1651 vdropl(vp); 1652 vn_finished_write(vnmp); 1653 CTR2(KTR_VFS, 1654 "%s: impossible to recycle, %p is already referenced", 1655 __func__, vp); 1656 return (EBUSY); 1657 } 1658 if (!VN_IS_DOOMED(vp)) { 1659 counter_u64_add(recycles_free_count, 1); 1660 vgonel(vp); 1661 } 1662 VOP_UNLOCK(vp); 1663 vdropl(vp); 1664 vn_finished_write(vnmp); 1665 return (0); 1666 } 1667 1668 /* 1669 * Allocate a new vnode. 1670 * 1671 * The operation never returns an error. Returning an error was disabled 1672 * in r145385 (dated 2005) with the following comment: 1673 * 1674 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1675 * 1676 * Given the age of this commit (almost 15 years at the time of writing this 1677 * comment) restoring the ability to fail requires a significant audit of 1678 * all codepaths. 1679 * 1680 * The routine can try to free a vnode or stall for up to 1 second waiting for 1681 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1682 */ 1683 static u_long vn_alloc_cyclecount; 1684 1685 static struct vnode * __noinline 1686 vn_alloc_hard(struct mount *mp) 1687 { 1688 u_long rnumvnodes, rfreevnodes; 1689 1690 mtx_lock(&vnode_list_mtx); 1691 rnumvnodes = atomic_load_long(&numvnodes); 1692 if (rnumvnodes + 1 < desiredvnodes) { 1693 vn_alloc_cyclecount = 0; 1694 goto alloc; 1695 } 1696 rfreevnodes = vnlru_read_freevnodes(); 1697 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1698 vn_alloc_cyclecount = 0; 1699 vstir = 1; 1700 } 1701 /* 1702 * Grow the vnode cache if it will not be above its target max 1703 * after growing. Otherwise, if the free list is nonempty, try 1704 * to reclaim 1 item from it before growing the cache (possibly 1705 * above its target max if the reclamation failed or is delayed). 1706 * Otherwise, wait for some space. In all cases, schedule 1707 * vnlru_proc() if we are getting short of space. The watermarks 1708 * should be chosen so that we never wait or even reclaim from 1709 * the free list to below its target minimum. 1710 */ 1711 if (vnlru_free_locked(1) > 0) 1712 goto alloc; 1713 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1714 /* 1715 * Wait for space for a new vnode. 1716 */ 1717 vnlru_kick(); 1718 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 1719 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 1720 vnlru_read_freevnodes() > 1) 1721 vnlru_free_locked(1); 1722 } 1723 alloc: 1724 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1725 if (vnlru_under(rnumvnodes, vlowat)) 1726 vnlru_kick(); 1727 mtx_unlock(&vnode_list_mtx); 1728 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1729 } 1730 1731 static struct vnode * 1732 vn_alloc(struct mount *mp) 1733 { 1734 u_long rnumvnodes; 1735 1736 if (__predict_false(vn_alloc_cyclecount != 0)) 1737 return (vn_alloc_hard(mp)); 1738 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1739 if (__predict_false(vnlru_under_unlocked(rnumvnodes, vlowat))) { 1740 atomic_subtract_long(&numvnodes, 1); 1741 return (vn_alloc_hard(mp)); 1742 } 1743 1744 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1745 } 1746 1747 static void 1748 vn_free(struct vnode *vp) 1749 { 1750 1751 atomic_subtract_long(&numvnodes, 1); 1752 uma_zfree_smr(vnode_zone, vp); 1753 } 1754 1755 /* 1756 * Return the next vnode from the free list. 1757 */ 1758 int 1759 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1760 struct vnode **vpp) 1761 { 1762 struct vnode *vp; 1763 struct thread *td; 1764 struct lock_object *lo; 1765 1766 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1767 1768 KASSERT(vops->registered, 1769 ("%s: not registered vector op %p\n", __func__, vops)); 1770 1771 td = curthread; 1772 if (td->td_vp_reserved != NULL) { 1773 vp = td->td_vp_reserved; 1774 td->td_vp_reserved = NULL; 1775 } else { 1776 vp = vn_alloc(mp); 1777 } 1778 counter_u64_add(vnodes_created, 1); 1779 /* 1780 * Locks are given the generic name "vnode" when created. 1781 * Follow the historic practice of using the filesystem 1782 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1783 * 1784 * Locks live in a witness group keyed on their name. Thus, 1785 * when a lock is renamed, it must also move from the witness 1786 * group of its old name to the witness group of its new name. 1787 * 1788 * The change only needs to be made when the vnode moves 1789 * from one filesystem type to another. We ensure that each 1790 * filesystem use a single static name pointer for its tag so 1791 * that we can compare pointers rather than doing a strcmp(). 1792 */ 1793 lo = &vp->v_vnlock->lock_object; 1794 #ifdef WITNESS 1795 if (lo->lo_name != tag) { 1796 #endif 1797 lo->lo_name = tag; 1798 #ifdef WITNESS 1799 WITNESS_DESTROY(lo); 1800 WITNESS_INIT(lo, tag); 1801 } 1802 #endif 1803 /* 1804 * By default, don't allow shared locks unless filesystems opt-in. 1805 */ 1806 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1807 /* 1808 * Finalize various vnode identity bits. 1809 */ 1810 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1811 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1812 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1813 vp->v_type = VNON; 1814 vp->v_op = vops; 1815 vp->v_irflag = 0; 1816 v_init_counters(vp); 1817 vn_seqc_init(vp); 1818 vp->v_bufobj.bo_ops = &buf_ops_bio; 1819 #ifdef DIAGNOSTIC 1820 if (mp == NULL && vops != &dead_vnodeops) 1821 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1822 #endif 1823 #ifdef MAC 1824 mac_vnode_init(vp); 1825 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1826 mac_vnode_associate_singlelabel(mp, vp); 1827 #endif 1828 if (mp != NULL) { 1829 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1830 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1831 vp->v_vflag |= VV_NOKNOTE; 1832 } 1833 1834 /* 1835 * For the filesystems which do not use vfs_hash_insert(), 1836 * still initialize v_hash to have vfs_hash_index() useful. 1837 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1838 * its own hashing. 1839 */ 1840 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1841 1842 *vpp = vp; 1843 return (0); 1844 } 1845 1846 void 1847 getnewvnode_reserve(void) 1848 { 1849 struct thread *td; 1850 1851 td = curthread; 1852 MPASS(td->td_vp_reserved == NULL); 1853 td->td_vp_reserved = vn_alloc(NULL); 1854 } 1855 1856 void 1857 getnewvnode_drop_reserve(void) 1858 { 1859 struct thread *td; 1860 1861 td = curthread; 1862 if (td->td_vp_reserved != NULL) { 1863 vn_free(td->td_vp_reserved); 1864 td->td_vp_reserved = NULL; 1865 } 1866 } 1867 1868 static void __noinline 1869 freevnode(struct vnode *vp) 1870 { 1871 struct bufobj *bo; 1872 1873 /* 1874 * The vnode has been marked for destruction, so free it. 1875 * 1876 * The vnode will be returned to the zone where it will 1877 * normally remain until it is needed for another vnode. We 1878 * need to cleanup (or verify that the cleanup has already 1879 * been done) any residual data left from its current use 1880 * so as not to contaminate the freshly allocated vnode. 1881 */ 1882 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 1883 /* 1884 * Paired with vgone. 1885 */ 1886 vn_seqc_write_end_free(vp); 1887 1888 bo = &vp->v_bufobj; 1889 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 1890 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 1891 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 1892 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 1893 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 1894 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 1895 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 1896 ("clean blk trie not empty")); 1897 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 1898 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 1899 ("dirty blk trie not empty")); 1900 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 1901 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 1902 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 1903 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 1904 ("Dangling rangelock waiters")); 1905 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, 1906 ("Leaked inactivation")); 1907 VI_UNLOCK(vp); 1908 #ifdef MAC 1909 mac_vnode_destroy(vp); 1910 #endif 1911 if (vp->v_pollinfo != NULL) { 1912 destroy_vpollinfo(vp->v_pollinfo); 1913 vp->v_pollinfo = NULL; 1914 } 1915 vp->v_mountedhere = NULL; 1916 vp->v_unpcb = NULL; 1917 vp->v_rdev = NULL; 1918 vp->v_fifoinfo = NULL; 1919 vp->v_iflag = 0; 1920 vp->v_vflag = 0; 1921 bo->bo_flag = 0; 1922 vn_free(vp); 1923 } 1924 1925 /* 1926 * Delete from old mount point vnode list, if on one. 1927 */ 1928 static void 1929 delmntque(struct vnode *vp) 1930 { 1931 struct mount *mp; 1932 1933 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 1934 1935 mp = vp->v_mount; 1936 if (mp == NULL) 1937 return; 1938 MNT_ILOCK(mp); 1939 VI_LOCK(vp); 1940 vp->v_mount = NULL; 1941 VI_UNLOCK(vp); 1942 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1943 ("bad mount point vnode list size")); 1944 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1945 mp->mnt_nvnodelistsize--; 1946 MNT_REL(mp); 1947 MNT_IUNLOCK(mp); 1948 } 1949 1950 static void 1951 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1952 { 1953 1954 vp->v_data = NULL; 1955 vp->v_op = &dead_vnodeops; 1956 vgone(vp); 1957 vput(vp); 1958 } 1959 1960 /* 1961 * Insert into list of vnodes for the new mount point, if available. 1962 */ 1963 int 1964 insmntque1(struct vnode *vp, struct mount *mp, 1965 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1966 { 1967 1968 KASSERT(vp->v_mount == NULL, 1969 ("insmntque: vnode already on per mount vnode list")); 1970 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1971 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1972 1973 /* 1974 * We acquire the vnode interlock early to ensure that the 1975 * vnode cannot be recycled by another process releasing a 1976 * holdcnt on it before we get it on both the vnode list 1977 * and the active vnode list. The mount mutex protects only 1978 * manipulation of the vnode list and the vnode freelist 1979 * mutex protects only manipulation of the active vnode list. 1980 * Hence the need to hold the vnode interlock throughout. 1981 */ 1982 MNT_ILOCK(mp); 1983 VI_LOCK(vp); 1984 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 1985 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1986 mp->mnt_nvnodelistsize == 0)) && 1987 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1988 VI_UNLOCK(vp); 1989 MNT_IUNLOCK(mp); 1990 if (dtr != NULL) 1991 dtr(vp, dtr_arg); 1992 return (EBUSY); 1993 } 1994 vp->v_mount = mp; 1995 MNT_REF(mp); 1996 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1997 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1998 ("neg mount point vnode list size")); 1999 mp->mnt_nvnodelistsize++; 2000 VI_UNLOCK(vp); 2001 MNT_IUNLOCK(mp); 2002 return (0); 2003 } 2004 2005 int 2006 insmntque(struct vnode *vp, struct mount *mp) 2007 { 2008 2009 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 2010 } 2011 2012 /* 2013 * Flush out and invalidate all buffers associated with a bufobj 2014 * Called with the underlying object locked. 2015 */ 2016 int 2017 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 2018 { 2019 int error; 2020 2021 BO_LOCK(bo); 2022 if (flags & V_SAVE) { 2023 error = bufobj_wwait(bo, slpflag, slptimeo); 2024 if (error) { 2025 BO_UNLOCK(bo); 2026 return (error); 2027 } 2028 if (bo->bo_dirty.bv_cnt > 0) { 2029 BO_UNLOCK(bo); 2030 do { 2031 error = BO_SYNC(bo, MNT_WAIT); 2032 } while (error == ERELOOKUP); 2033 if (error != 0) 2034 return (error); 2035 BO_LOCK(bo); 2036 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 2037 BO_UNLOCK(bo); 2038 return (EBUSY); 2039 } 2040 } 2041 } 2042 /* 2043 * If you alter this loop please notice that interlock is dropped and 2044 * reacquired in flushbuflist. Special care is needed to ensure that 2045 * no race conditions occur from this. 2046 */ 2047 do { 2048 error = flushbuflist(&bo->bo_clean, 2049 flags, bo, slpflag, slptimeo); 2050 if (error == 0 && !(flags & V_CLEANONLY)) 2051 error = flushbuflist(&bo->bo_dirty, 2052 flags, bo, slpflag, slptimeo); 2053 if (error != 0 && error != EAGAIN) { 2054 BO_UNLOCK(bo); 2055 return (error); 2056 } 2057 } while (error != 0); 2058 2059 /* 2060 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 2061 * have write I/O in-progress but if there is a VM object then the 2062 * VM object can also have read-I/O in-progress. 2063 */ 2064 do { 2065 bufobj_wwait(bo, 0, 0); 2066 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 2067 BO_UNLOCK(bo); 2068 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 2069 BO_LOCK(bo); 2070 } 2071 } while (bo->bo_numoutput > 0); 2072 BO_UNLOCK(bo); 2073 2074 /* 2075 * Destroy the copy in the VM cache, too. 2076 */ 2077 if (bo->bo_object != NULL && 2078 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 2079 VM_OBJECT_WLOCK(bo->bo_object); 2080 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 2081 OBJPR_CLEANONLY : 0); 2082 VM_OBJECT_WUNLOCK(bo->bo_object); 2083 } 2084 2085 #ifdef INVARIANTS 2086 BO_LOCK(bo); 2087 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 2088 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 2089 bo->bo_clean.bv_cnt > 0)) 2090 panic("vinvalbuf: flush failed"); 2091 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 2092 bo->bo_dirty.bv_cnt > 0) 2093 panic("vinvalbuf: flush dirty failed"); 2094 BO_UNLOCK(bo); 2095 #endif 2096 return (0); 2097 } 2098 2099 /* 2100 * Flush out and invalidate all buffers associated with a vnode. 2101 * Called with the underlying object locked. 2102 */ 2103 int 2104 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 2105 { 2106 2107 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2108 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 2109 if (vp->v_object != NULL && vp->v_object->handle != vp) 2110 return (0); 2111 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 2112 } 2113 2114 /* 2115 * Flush out buffers on the specified list. 2116 * 2117 */ 2118 static int 2119 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2120 int slptimeo) 2121 { 2122 struct buf *bp, *nbp; 2123 int retval, error; 2124 daddr_t lblkno; 2125 b_xflags_t xflags; 2126 2127 ASSERT_BO_WLOCKED(bo); 2128 2129 retval = 0; 2130 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2131 /* 2132 * If we are flushing both V_NORMAL and V_ALT buffers then 2133 * do not skip any buffers. If we are flushing only V_NORMAL 2134 * buffers then skip buffers marked as BX_ALTDATA. If we are 2135 * flushing only V_ALT buffers then skip buffers not marked 2136 * as BX_ALTDATA. 2137 */ 2138 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2139 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2140 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2141 continue; 2142 } 2143 if (nbp != NULL) { 2144 lblkno = nbp->b_lblkno; 2145 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2146 } 2147 retval = EAGAIN; 2148 error = BUF_TIMELOCK(bp, 2149 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2150 "flushbuf", slpflag, slptimeo); 2151 if (error) { 2152 BO_LOCK(bo); 2153 return (error != ENOLCK ? error : EAGAIN); 2154 } 2155 KASSERT(bp->b_bufobj == bo, 2156 ("bp %p wrong b_bufobj %p should be %p", 2157 bp, bp->b_bufobj, bo)); 2158 /* 2159 * XXX Since there are no node locks for NFS, I 2160 * believe there is a slight chance that a delayed 2161 * write will occur while sleeping just above, so 2162 * check for it. 2163 */ 2164 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2165 (flags & V_SAVE)) { 2166 bremfree(bp); 2167 bp->b_flags |= B_ASYNC; 2168 bwrite(bp); 2169 BO_LOCK(bo); 2170 return (EAGAIN); /* XXX: why not loop ? */ 2171 } 2172 bremfree(bp); 2173 bp->b_flags |= (B_INVAL | B_RELBUF); 2174 bp->b_flags &= ~B_ASYNC; 2175 brelse(bp); 2176 BO_LOCK(bo); 2177 if (nbp == NULL) 2178 break; 2179 nbp = gbincore(bo, lblkno); 2180 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2181 != xflags) 2182 break; /* nbp invalid */ 2183 } 2184 return (retval); 2185 } 2186 2187 int 2188 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2189 { 2190 struct buf *bp; 2191 int error; 2192 daddr_t lblkno; 2193 2194 ASSERT_BO_LOCKED(bo); 2195 2196 for (lblkno = startn;;) { 2197 again: 2198 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2199 if (bp == NULL || bp->b_lblkno >= endn || 2200 bp->b_lblkno < startn) 2201 break; 2202 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2203 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2204 if (error != 0) { 2205 BO_RLOCK(bo); 2206 if (error == ENOLCK) 2207 goto again; 2208 return (error); 2209 } 2210 KASSERT(bp->b_bufobj == bo, 2211 ("bp %p wrong b_bufobj %p should be %p", 2212 bp, bp->b_bufobj, bo)); 2213 lblkno = bp->b_lblkno + 1; 2214 if ((bp->b_flags & B_MANAGED) == 0) 2215 bremfree(bp); 2216 bp->b_flags |= B_RELBUF; 2217 /* 2218 * In the VMIO case, use the B_NOREUSE flag to hint that the 2219 * pages backing each buffer in the range are unlikely to be 2220 * reused. Dirty buffers will have the hint applied once 2221 * they've been written. 2222 */ 2223 if ((bp->b_flags & B_VMIO) != 0) 2224 bp->b_flags |= B_NOREUSE; 2225 brelse(bp); 2226 BO_RLOCK(bo); 2227 } 2228 return (0); 2229 } 2230 2231 /* 2232 * Truncate a file's buffer and pages to a specified length. This 2233 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2234 * sync activity. 2235 */ 2236 int 2237 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2238 { 2239 struct buf *bp, *nbp; 2240 struct bufobj *bo; 2241 daddr_t startlbn; 2242 2243 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2244 vp, blksize, (uintmax_t)length); 2245 2246 /* 2247 * Round up to the *next* lbn. 2248 */ 2249 startlbn = howmany(length, blksize); 2250 2251 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2252 2253 bo = &vp->v_bufobj; 2254 restart_unlocked: 2255 BO_LOCK(bo); 2256 2257 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2258 ; 2259 2260 if (length > 0) { 2261 restartsync: 2262 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2263 if (bp->b_lblkno > 0) 2264 continue; 2265 /* 2266 * Since we hold the vnode lock this should only 2267 * fail if we're racing with the buf daemon. 2268 */ 2269 if (BUF_LOCK(bp, 2270 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2271 BO_LOCKPTR(bo)) == ENOLCK) 2272 goto restart_unlocked; 2273 2274 VNASSERT((bp->b_flags & B_DELWRI), vp, 2275 ("buf(%p) on dirty queue without DELWRI", bp)); 2276 2277 bremfree(bp); 2278 bawrite(bp); 2279 BO_LOCK(bo); 2280 goto restartsync; 2281 } 2282 } 2283 2284 bufobj_wwait(bo, 0, 0); 2285 BO_UNLOCK(bo); 2286 vnode_pager_setsize(vp, length); 2287 2288 return (0); 2289 } 2290 2291 /* 2292 * Invalidate the cached pages of a file's buffer within the range of block 2293 * numbers [startlbn, endlbn). 2294 */ 2295 void 2296 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2297 int blksize) 2298 { 2299 struct bufobj *bo; 2300 off_t start, end; 2301 2302 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2303 2304 start = blksize * startlbn; 2305 end = blksize * endlbn; 2306 2307 bo = &vp->v_bufobj; 2308 BO_LOCK(bo); 2309 MPASS(blksize == bo->bo_bsize); 2310 2311 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2312 ; 2313 2314 BO_UNLOCK(bo); 2315 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2316 } 2317 2318 static int 2319 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2320 daddr_t startlbn, daddr_t endlbn) 2321 { 2322 struct buf *bp, *nbp; 2323 bool anyfreed; 2324 2325 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2326 ASSERT_BO_LOCKED(bo); 2327 2328 do { 2329 anyfreed = false; 2330 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2331 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2332 continue; 2333 if (BUF_LOCK(bp, 2334 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2335 BO_LOCKPTR(bo)) == ENOLCK) { 2336 BO_LOCK(bo); 2337 return (EAGAIN); 2338 } 2339 2340 bremfree(bp); 2341 bp->b_flags |= B_INVAL | B_RELBUF; 2342 bp->b_flags &= ~B_ASYNC; 2343 brelse(bp); 2344 anyfreed = true; 2345 2346 BO_LOCK(bo); 2347 if (nbp != NULL && 2348 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2349 nbp->b_vp != vp || 2350 (nbp->b_flags & B_DELWRI) != 0)) 2351 return (EAGAIN); 2352 } 2353 2354 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2355 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2356 continue; 2357 if (BUF_LOCK(bp, 2358 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2359 BO_LOCKPTR(bo)) == ENOLCK) { 2360 BO_LOCK(bo); 2361 return (EAGAIN); 2362 } 2363 bremfree(bp); 2364 bp->b_flags |= B_INVAL | B_RELBUF; 2365 bp->b_flags &= ~B_ASYNC; 2366 brelse(bp); 2367 anyfreed = true; 2368 2369 BO_LOCK(bo); 2370 if (nbp != NULL && 2371 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2372 (nbp->b_vp != vp) || 2373 (nbp->b_flags & B_DELWRI) == 0)) 2374 return (EAGAIN); 2375 } 2376 } while (anyfreed); 2377 return (0); 2378 } 2379 2380 static void 2381 buf_vlist_remove(struct buf *bp) 2382 { 2383 struct bufv *bv; 2384 b_xflags_t flags; 2385 2386 flags = bp->b_xflags; 2387 2388 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2389 ASSERT_BO_WLOCKED(bp->b_bufobj); 2390 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 && 2391 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN), 2392 ("%s: buffer %p has invalid queue state", __func__, bp)); 2393 2394 if ((flags & BX_VNDIRTY) != 0) 2395 bv = &bp->b_bufobj->bo_dirty; 2396 else 2397 bv = &bp->b_bufobj->bo_clean; 2398 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2399 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2400 bv->bv_cnt--; 2401 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2402 } 2403 2404 /* 2405 * Add the buffer to the sorted clean or dirty block list. 2406 * 2407 * NOTE: xflags is passed as a constant, optimizing this inline function! 2408 */ 2409 static void 2410 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2411 { 2412 struct bufv *bv; 2413 struct buf *n; 2414 int error; 2415 2416 ASSERT_BO_WLOCKED(bo); 2417 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2418 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2419 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2420 ("dead bo %p", bo)); 2421 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2422 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2423 bp->b_xflags |= xflags; 2424 if (xflags & BX_VNDIRTY) 2425 bv = &bo->bo_dirty; 2426 else 2427 bv = &bo->bo_clean; 2428 2429 /* 2430 * Keep the list ordered. Optimize empty list insertion. Assume 2431 * we tend to grow at the tail so lookup_le should usually be cheaper 2432 * than _ge. 2433 */ 2434 if (bv->bv_cnt == 0 || 2435 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2436 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2437 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2438 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2439 else 2440 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2441 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2442 if (error) 2443 panic("buf_vlist_add: Preallocated nodes insufficient."); 2444 bv->bv_cnt++; 2445 } 2446 2447 /* 2448 * Look up a buffer using the buffer tries. 2449 */ 2450 struct buf * 2451 gbincore(struct bufobj *bo, daddr_t lblkno) 2452 { 2453 struct buf *bp; 2454 2455 ASSERT_BO_LOCKED(bo); 2456 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2457 if (bp != NULL) 2458 return (bp); 2459 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2460 } 2461 2462 /* 2463 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2464 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2465 * stability of the result. Like other lockless lookups, the found buf may 2466 * already be invalid by the time this function returns. 2467 */ 2468 struct buf * 2469 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2470 { 2471 struct buf *bp; 2472 2473 ASSERT_BO_UNLOCKED(bo); 2474 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2475 if (bp != NULL) 2476 return (bp); 2477 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2478 } 2479 2480 /* 2481 * Associate a buffer with a vnode. 2482 */ 2483 void 2484 bgetvp(struct vnode *vp, struct buf *bp) 2485 { 2486 struct bufobj *bo; 2487 2488 bo = &vp->v_bufobj; 2489 ASSERT_BO_WLOCKED(bo); 2490 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2491 2492 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2493 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2494 ("bgetvp: bp already attached! %p", bp)); 2495 2496 vhold(vp); 2497 bp->b_vp = vp; 2498 bp->b_bufobj = bo; 2499 /* 2500 * Insert onto list for new vnode. 2501 */ 2502 buf_vlist_add(bp, bo, BX_VNCLEAN); 2503 } 2504 2505 /* 2506 * Disassociate a buffer from a vnode. 2507 */ 2508 void 2509 brelvp(struct buf *bp) 2510 { 2511 struct bufobj *bo; 2512 struct vnode *vp; 2513 2514 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2515 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2516 2517 /* 2518 * Delete from old vnode list, if on one. 2519 */ 2520 vp = bp->b_vp; /* XXX */ 2521 bo = bp->b_bufobj; 2522 BO_LOCK(bo); 2523 buf_vlist_remove(bp); 2524 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2525 bo->bo_flag &= ~BO_ONWORKLST; 2526 mtx_lock(&sync_mtx); 2527 LIST_REMOVE(bo, bo_synclist); 2528 syncer_worklist_len--; 2529 mtx_unlock(&sync_mtx); 2530 } 2531 bp->b_vp = NULL; 2532 bp->b_bufobj = NULL; 2533 BO_UNLOCK(bo); 2534 vdrop(vp); 2535 } 2536 2537 /* 2538 * Add an item to the syncer work queue. 2539 */ 2540 static void 2541 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2542 { 2543 int slot; 2544 2545 ASSERT_BO_WLOCKED(bo); 2546 2547 mtx_lock(&sync_mtx); 2548 if (bo->bo_flag & BO_ONWORKLST) 2549 LIST_REMOVE(bo, bo_synclist); 2550 else { 2551 bo->bo_flag |= BO_ONWORKLST; 2552 syncer_worklist_len++; 2553 } 2554 2555 if (delay > syncer_maxdelay - 2) 2556 delay = syncer_maxdelay - 2; 2557 slot = (syncer_delayno + delay) & syncer_mask; 2558 2559 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2560 mtx_unlock(&sync_mtx); 2561 } 2562 2563 static int 2564 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2565 { 2566 int error, len; 2567 2568 mtx_lock(&sync_mtx); 2569 len = syncer_worklist_len - sync_vnode_count; 2570 mtx_unlock(&sync_mtx); 2571 error = SYSCTL_OUT(req, &len, sizeof(len)); 2572 return (error); 2573 } 2574 2575 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2576 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2577 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2578 2579 static struct proc *updateproc; 2580 static void sched_sync(void); 2581 static struct kproc_desc up_kp = { 2582 "syncer", 2583 sched_sync, 2584 &updateproc 2585 }; 2586 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2587 2588 static int 2589 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2590 { 2591 struct vnode *vp; 2592 struct mount *mp; 2593 2594 *bo = LIST_FIRST(slp); 2595 if (*bo == NULL) 2596 return (0); 2597 vp = bo2vnode(*bo); 2598 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2599 return (1); 2600 /* 2601 * We use vhold in case the vnode does not 2602 * successfully sync. vhold prevents the vnode from 2603 * going away when we unlock the sync_mtx so that 2604 * we can acquire the vnode interlock. 2605 */ 2606 vholdl(vp); 2607 mtx_unlock(&sync_mtx); 2608 VI_UNLOCK(vp); 2609 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2610 vdrop(vp); 2611 mtx_lock(&sync_mtx); 2612 return (*bo == LIST_FIRST(slp)); 2613 } 2614 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2615 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2616 VOP_UNLOCK(vp); 2617 vn_finished_write(mp); 2618 BO_LOCK(*bo); 2619 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2620 /* 2621 * Put us back on the worklist. The worklist 2622 * routine will remove us from our current 2623 * position and then add us back in at a later 2624 * position. 2625 */ 2626 vn_syncer_add_to_worklist(*bo, syncdelay); 2627 } 2628 BO_UNLOCK(*bo); 2629 vdrop(vp); 2630 mtx_lock(&sync_mtx); 2631 return (0); 2632 } 2633 2634 static int first_printf = 1; 2635 2636 /* 2637 * System filesystem synchronizer daemon. 2638 */ 2639 static void 2640 sched_sync(void) 2641 { 2642 struct synclist *next, *slp; 2643 struct bufobj *bo; 2644 long starttime; 2645 struct thread *td = curthread; 2646 int last_work_seen; 2647 int net_worklist_len; 2648 int syncer_final_iter; 2649 int error; 2650 2651 last_work_seen = 0; 2652 syncer_final_iter = 0; 2653 syncer_state = SYNCER_RUNNING; 2654 starttime = time_uptime; 2655 td->td_pflags |= TDP_NORUNNINGBUF; 2656 2657 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2658 SHUTDOWN_PRI_LAST); 2659 2660 mtx_lock(&sync_mtx); 2661 for (;;) { 2662 if (syncer_state == SYNCER_FINAL_DELAY && 2663 syncer_final_iter == 0) { 2664 mtx_unlock(&sync_mtx); 2665 kproc_suspend_check(td->td_proc); 2666 mtx_lock(&sync_mtx); 2667 } 2668 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2669 if (syncer_state != SYNCER_RUNNING && 2670 starttime != time_uptime) { 2671 if (first_printf) { 2672 printf("\nSyncing disks, vnodes remaining... "); 2673 first_printf = 0; 2674 } 2675 printf("%d ", net_worklist_len); 2676 } 2677 starttime = time_uptime; 2678 2679 /* 2680 * Push files whose dirty time has expired. Be careful 2681 * of interrupt race on slp queue. 2682 * 2683 * Skip over empty worklist slots when shutting down. 2684 */ 2685 do { 2686 slp = &syncer_workitem_pending[syncer_delayno]; 2687 syncer_delayno += 1; 2688 if (syncer_delayno == syncer_maxdelay) 2689 syncer_delayno = 0; 2690 next = &syncer_workitem_pending[syncer_delayno]; 2691 /* 2692 * If the worklist has wrapped since the 2693 * it was emptied of all but syncer vnodes, 2694 * switch to the FINAL_DELAY state and run 2695 * for one more second. 2696 */ 2697 if (syncer_state == SYNCER_SHUTTING_DOWN && 2698 net_worklist_len == 0 && 2699 last_work_seen == syncer_delayno) { 2700 syncer_state = SYNCER_FINAL_DELAY; 2701 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2702 } 2703 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2704 syncer_worklist_len > 0); 2705 2706 /* 2707 * Keep track of the last time there was anything 2708 * on the worklist other than syncer vnodes. 2709 * Return to the SHUTTING_DOWN state if any 2710 * new work appears. 2711 */ 2712 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2713 last_work_seen = syncer_delayno; 2714 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2715 syncer_state = SYNCER_SHUTTING_DOWN; 2716 while (!LIST_EMPTY(slp)) { 2717 error = sync_vnode(slp, &bo, td); 2718 if (error == 1) { 2719 LIST_REMOVE(bo, bo_synclist); 2720 LIST_INSERT_HEAD(next, bo, bo_synclist); 2721 continue; 2722 } 2723 2724 if (first_printf == 0) { 2725 /* 2726 * Drop the sync mutex, because some watchdog 2727 * drivers need to sleep while patting 2728 */ 2729 mtx_unlock(&sync_mtx); 2730 wdog_kern_pat(WD_LASTVAL); 2731 mtx_lock(&sync_mtx); 2732 } 2733 } 2734 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2735 syncer_final_iter--; 2736 /* 2737 * The variable rushjob allows the kernel to speed up the 2738 * processing of the filesystem syncer process. A rushjob 2739 * value of N tells the filesystem syncer to process the next 2740 * N seconds worth of work on its queue ASAP. Currently rushjob 2741 * is used by the soft update code to speed up the filesystem 2742 * syncer process when the incore state is getting so far 2743 * ahead of the disk that the kernel memory pool is being 2744 * threatened with exhaustion. 2745 */ 2746 if (rushjob > 0) { 2747 rushjob -= 1; 2748 continue; 2749 } 2750 /* 2751 * Just sleep for a short period of time between 2752 * iterations when shutting down to allow some I/O 2753 * to happen. 2754 * 2755 * If it has taken us less than a second to process the 2756 * current work, then wait. Otherwise start right over 2757 * again. We can still lose time if any single round 2758 * takes more than two seconds, but it does not really 2759 * matter as we are just trying to generally pace the 2760 * filesystem activity. 2761 */ 2762 if (syncer_state != SYNCER_RUNNING || 2763 time_uptime == starttime) { 2764 thread_lock(td); 2765 sched_prio(td, PPAUSE); 2766 thread_unlock(td); 2767 } 2768 if (syncer_state != SYNCER_RUNNING) 2769 cv_timedwait(&sync_wakeup, &sync_mtx, 2770 hz / SYNCER_SHUTDOWN_SPEEDUP); 2771 else if (time_uptime == starttime) 2772 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2773 } 2774 } 2775 2776 /* 2777 * Request the syncer daemon to speed up its work. 2778 * We never push it to speed up more than half of its 2779 * normal turn time, otherwise it could take over the cpu. 2780 */ 2781 int 2782 speedup_syncer(void) 2783 { 2784 int ret = 0; 2785 2786 mtx_lock(&sync_mtx); 2787 if (rushjob < syncdelay / 2) { 2788 rushjob += 1; 2789 stat_rush_requests += 1; 2790 ret = 1; 2791 } 2792 mtx_unlock(&sync_mtx); 2793 cv_broadcast(&sync_wakeup); 2794 return (ret); 2795 } 2796 2797 /* 2798 * Tell the syncer to speed up its work and run though its work 2799 * list several times, then tell it to shut down. 2800 */ 2801 static void 2802 syncer_shutdown(void *arg, int howto) 2803 { 2804 2805 if (howto & RB_NOSYNC) 2806 return; 2807 mtx_lock(&sync_mtx); 2808 syncer_state = SYNCER_SHUTTING_DOWN; 2809 rushjob = 0; 2810 mtx_unlock(&sync_mtx); 2811 cv_broadcast(&sync_wakeup); 2812 kproc_shutdown(arg, howto); 2813 } 2814 2815 void 2816 syncer_suspend(void) 2817 { 2818 2819 syncer_shutdown(updateproc, 0); 2820 } 2821 2822 void 2823 syncer_resume(void) 2824 { 2825 2826 mtx_lock(&sync_mtx); 2827 first_printf = 1; 2828 syncer_state = SYNCER_RUNNING; 2829 mtx_unlock(&sync_mtx); 2830 cv_broadcast(&sync_wakeup); 2831 kproc_resume(updateproc); 2832 } 2833 2834 /* 2835 * Move the buffer between the clean and dirty lists of its vnode. 2836 */ 2837 void 2838 reassignbuf(struct buf *bp) 2839 { 2840 struct vnode *vp; 2841 struct bufobj *bo; 2842 int delay; 2843 #ifdef INVARIANTS 2844 struct bufv *bv; 2845 #endif 2846 2847 vp = bp->b_vp; 2848 bo = bp->b_bufobj; 2849 2850 KASSERT((bp->b_flags & B_PAGING) == 0, 2851 ("%s: cannot reassign paging buffer %p", __func__, bp)); 2852 2853 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2854 bp, bp->b_vp, bp->b_flags); 2855 2856 BO_LOCK(bo); 2857 buf_vlist_remove(bp); 2858 2859 /* 2860 * If dirty, put on list of dirty buffers; otherwise insert onto list 2861 * of clean buffers. 2862 */ 2863 if (bp->b_flags & B_DELWRI) { 2864 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2865 switch (vp->v_type) { 2866 case VDIR: 2867 delay = dirdelay; 2868 break; 2869 case VCHR: 2870 delay = metadelay; 2871 break; 2872 default: 2873 delay = filedelay; 2874 } 2875 vn_syncer_add_to_worklist(bo, delay); 2876 } 2877 buf_vlist_add(bp, bo, BX_VNDIRTY); 2878 } else { 2879 buf_vlist_add(bp, bo, BX_VNCLEAN); 2880 2881 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2882 mtx_lock(&sync_mtx); 2883 LIST_REMOVE(bo, bo_synclist); 2884 syncer_worklist_len--; 2885 mtx_unlock(&sync_mtx); 2886 bo->bo_flag &= ~BO_ONWORKLST; 2887 } 2888 } 2889 #ifdef INVARIANTS 2890 bv = &bo->bo_clean; 2891 bp = TAILQ_FIRST(&bv->bv_hd); 2892 KASSERT(bp == NULL || bp->b_bufobj == bo, 2893 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2894 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2895 KASSERT(bp == NULL || bp->b_bufobj == bo, 2896 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2897 bv = &bo->bo_dirty; 2898 bp = TAILQ_FIRST(&bv->bv_hd); 2899 KASSERT(bp == NULL || bp->b_bufobj == bo, 2900 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2901 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2902 KASSERT(bp == NULL || bp->b_bufobj == bo, 2903 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2904 #endif 2905 BO_UNLOCK(bo); 2906 } 2907 2908 static void 2909 v_init_counters(struct vnode *vp) 2910 { 2911 2912 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2913 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2914 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2915 2916 refcount_init(&vp->v_holdcnt, 1); 2917 refcount_init(&vp->v_usecount, 1); 2918 } 2919 2920 /* 2921 * Grab a particular vnode from the free list, increment its 2922 * reference count and lock it. VIRF_DOOMED is set if the vnode 2923 * is being destroyed. Only callers who specify LK_RETRY will 2924 * see doomed vnodes. If inactive processing was delayed in 2925 * vput try to do it here. 2926 * 2927 * usecount is manipulated using atomics without holding any locks. 2928 * 2929 * holdcnt can be manipulated using atomics without holding any locks, 2930 * except when transitioning 1<->0, in which case the interlock is held. 2931 * 2932 * Consumers which don't guarantee liveness of the vnode can use SMR to 2933 * try to get a reference. Note this operation can fail since the vnode 2934 * may be awaiting getting freed by the time they get to it. 2935 */ 2936 enum vgetstate 2937 vget_prep_smr(struct vnode *vp) 2938 { 2939 enum vgetstate vs; 2940 2941 VFS_SMR_ASSERT_ENTERED(); 2942 2943 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2944 vs = VGET_USECOUNT; 2945 } else { 2946 if (vhold_smr(vp)) 2947 vs = VGET_HOLDCNT; 2948 else 2949 vs = VGET_NONE; 2950 } 2951 return (vs); 2952 } 2953 2954 enum vgetstate 2955 vget_prep(struct vnode *vp) 2956 { 2957 enum vgetstate vs; 2958 2959 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2960 vs = VGET_USECOUNT; 2961 } else { 2962 vhold(vp); 2963 vs = VGET_HOLDCNT; 2964 } 2965 return (vs); 2966 } 2967 2968 void 2969 vget_abort(struct vnode *vp, enum vgetstate vs) 2970 { 2971 2972 switch (vs) { 2973 case VGET_USECOUNT: 2974 vrele(vp); 2975 break; 2976 case VGET_HOLDCNT: 2977 vdrop(vp); 2978 break; 2979 default: 2980 __assert_unreachable(); 2981 } 2982 } 2983 2984 int 2985 vget(struct vnode *vp, int flags) 2986 { 2987 enum vgetstate vs; 2988 2989 vs = vget_prep(vp); 2990 return (vget_finish(vp, flags, vs)); 2991 } 2992 2993 int 2994 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 2995 { 2996 int error; 2997 2998 if ((flags & LK_INTERLOCK) != 0) 2999 ASSERT_VI_LOCKED(vp, __func__); 3000 else 3001 ASSERT_VI_UNLOCKED(vp, __func__); 3002 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3003 VNPASS(vp->v_holdcnt > 0, vp); 3004 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3005 3006 error = vn_lock(vp, flags); 3007 if (__predict_false(error != 0)) { 3008 vget_abort(vp, vs); 3009 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 3010 vp); 3011 return (error); 3012 } 3013 3014 vget_finish_ref(vp, vs); 3015 return (0); 3016 } 3017 3018 void 3019 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 3020 { 3021 int old; 3022 3023 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3024 VNPASS(vp->v_holdcnt > 0, vp); 3025 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3026 3027 if (vs == VGET_USECOUNT) 3028 return; 3029 3030 /* 3031 * We hold the vnode. If the usecount is 0 it will be utilized to keep 3032 * the vnode around. Otherwise someone else lended their hold count and 3033 * we have to drop ours. 3034 */ 3035 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3036 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3037 if (old != 0) { 3038 #ifdef INVARIANTS 3039 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3040 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3041 #else 3042 refcount_release(&vp->v_holdcnt); 3043 #endif 3044 } 3045 } 3046 3047 void 3048 vref(struct vnode *vp) 3049 { 3050 enum vgetstate vs; 3051 3052 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3053 vs = vget_prep(vp); 3054 vget_finish_ref(vp, vs); 3055 } 3056 3057 void 3058 vrefact(struct vnode *vp) 3059 { 3060 3061 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3062 #ifdef INVARIANTS 3063 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 3064 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3065 #else 3066 refcount_acquire(&vp->v_usecount); 3067 #endif 3068 } 3069 3070 void 3071 vlazy(struct vnode *vp) 3072 { 3073 struct mount *mp; 3074 3075 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 3076 3077 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 3078 return; 3079 /* 3080 * We may get here for inactive routines after the vnode got doomed. 3081 */ 3082 if (VN_IS_DOOMED(vp)) 3083 return; 3084 mp = vp->v_mount; 3085 mtx_lock(&mp->mnt_listmtx); 3086 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 3087 vp->v_mflag |= VMP_LAZYLIST; 3088 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3089 mp->mnt_lazyvnodelistsize++; 3090 } 3091 mtx_unlock(&mp->mnt_listmtx); 3092 } 3093 3094 static void 3095 vunlazy(struct vnode *vp) 3096 { 3097 struct mount *mp; 3098 3099 ASSERT_VI_LOCKED(vp, __func__); 3100 VNPASS(!VN_IS_DOOMED(vp), vp); 3101 3102 mp = vp->v_mount; 3103 mtx_lock(&mp->mnt_listmtx); 3104 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3105 /* 3106 * Don't remove the vnode from the lazy list if another thread 3107 * has increased the hold count. It may have re-enqueued the 3108 * vnode to the lazy list and is now responsible for its 3109 * removal. 3110 */ 3111 if (vp->v_holdcnt == 0) { 3112 vp->v_mflag &= ~VMP_LAZYLIST; 3113 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3114 mp->mnt_lazyvnodelistsize--; 3115 } 3116 mtx_unlock(&mp->mnt_listmtx); 3117 } 3118 3119 /* 3120 * This routine is only meant to be called from vgonel prior to dooming 3121 * the vnode. 3122 */ 3123 static void 3124 vunlazy_gone(struct vnode *vp) 3125 { 3126 struct mount *mp; 3127 3128 ASSERT_VOP_ELOCKED(vp, __func__); 3129 ASSERT_VI_LOCKED(vp, __func__); 3130 VNPASS(!VN_IS_DOOMED(vp), vp); 3131 3132 if (vp->v_mflag & VMP_LAZYLIST) { 3133 mp = vp->v_mount; 3134 mtx_lock(&mp->mnt_listmtx); 3135 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3136 vp->v_mflag &= ~VMP_LAZYLIST; 3137 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3138 mp->mnt_lazyvnodelistsize--; 3139 mtx_unlock(&mp->mnt_listmtx); 3140 } 3141 } 3142 3143 static void 3144 vdefer_inactive(struct vnode *vp) 3145 { 3146 3147 ASSERT_VI_LOCKED(vp, __func__); 3148 VNASSERT(vp->v_holdcnt > 0, vp, 3149 ("%s: vnode without hold count", __func__)); 3150 if (VN_IS_DOOMED(vp)) { 3151 vdropl(vp); 3152 return; 3153 } 3154 if (vp->v_iflag & VI_DEFINACT) { 3155 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3156 vdropl(vp); 3157 return; 3158 } 3159 if (vp->v_usecount > 0) { 3160 vp->v_iflag &= ~VI_OWEINACT; 3161 vdropl(vp); 3162 return; 3163 } 3164 vlazy(vp); 3165 vp->v_iflag |= VI_DEFINACT; 3166 VI_UNLOCK(vp); 3167 counter_u64_add(deferred_inact, 1); 3168 } 3169 3170 static void 3171 vdefer_inactive_unlocked(struct vnode *vp) 3172 { 3173 3174 VI_LOCK(vp); 3175 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3176 vdropl(vp); 3177 return; 3178 } 3179 vdefer_inactive(vp); 3180 } 3181 3182 enum vput_op { VRELE, VPUT, VUNREF }; 3183 3184 /* 3185 * Handle ->v_usecount transitioning to 0. 3186 * 3187 * By releasing the last usecount we take ownership of the hold count which 3188 * provides liveness of the vnode, meaning we have to vdrop. 3189 * 3190 * For all vnodes we may need to perform inactive processing. It requires an 3191 * exclusive lock on the vnode, while it is legal to call here with only a 3192 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3193 * inactive processing gets deferred to the syncer. 3194 * 3195 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3196 * on the lock being held all the way until VOP_INACTIVE. This in particular 3197 * happens with UFS which adds half-constructed vnodes to the hash, where they 3198 * can be found by other code. 3199 */ 3200 static void 3201 vput_final(struct vnode *vp, enum vput_op func) 3202 { 3203 int error; 3204 bool want_unlock; 3205 3206 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3207 VNPASS(vp->v_holdcnt > 0, vp); 3208 3209 VI_LOCK(vp); 3210 3211 /* 3212 * By the time we got here someone else might have transitioned 3213 * the count back to > 0. 3214 */ 3215 if (vp->v_usecount > 0) 3216 goto out; 3217 3218 /* 3219 * If the vnode is doomed vgone already performed inactive processing 3220 * (if needed). 3221 */ 3222 if (VN_IS_DOOMED(vp)) 3223 goto out; 3224 3225 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3226 goto out; 3227 3228 if (vp->v_iflag & VI_DOINGINACT) 3229 goto out; 3230 3231 /* 3232 * Locking operations here will drop the interlock and possibly the 3233 * vnode lock, opening a window where the vnode can get doomed all the 3234 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3235 * perform inactive. 3236 */ 3237 vp->v_iflag |= VI_OWEINACT; 3238 want_unlock = false; 3239 error = 0; 3240 switch (func) { 3241 case VRELE: 3242 switch (VOP_ISLOCKED(vp)) { 3243 case LK_EXCLUSIVE: 3244 break; 3245 case LK_EXCLOTHER: 3246 case 0: 3247 want_unlock = true; 3248 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3249 VI_LOCK(vp); 3250 break; 3251 default: 3252 /* 3253 * The lock has at least one sharer, but we have no way 3254 * to conclude whether this is us. Play it safe and 3255 * defer processing. 3256 */ 3257 error = EAGAIN; 3258 break; 3259 } 3260 break; 3261 case VPUT: 3262 want_unlock = true; 3263 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3264 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3265 LK_NOWAIT); 3266 VI_LOCK(vp); 3267 } 3268 break; 3269 case VUNREF: 3270 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3271 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3272 VI_LOCK(vp); 3273 } 3274 break; 3275 } 3276 if (error == 0) { 3277 if (func == VUNREF) { 3278 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp, 3279 ("recursive vunref")); 3280 vp->v_vflag |= VV_UNREF; 3281 } 3282 for (;;) { 3283 error = vinactive(vp); 3284 if (want_unlock) 3285 VOP_UNLOCK(vp); 3286 if (error != ERELOOKUP || !want_unlock) 3287 break; 3288 VOP_LOCK(vp, LK_EXCLUSIVE); 3289 } 3290 if (func == VUNREF) 3291 vp->v_vflag &= ~VV_UNREF; 3292 vdropl(vp); 3293 } else { 3294 vdefer_inactive(vp); 3295 } 3296 return; 3297 out: 3298 if (func == VPUT) 3299 VOP_UNLOCK(vp); 3300 vdropl(vp); 3301 } 3302 3303 /* 3304 * Decrement ->v_usecount for a vnode. 3305 * 3306 * Releasing the last use count requires additional processing, see vput_final 3307 * above for details. 3308 * 3309 * Comment above each variant denotes lock state on entry and exit. 3310 */ 3311 3312 /* 3313 * in: any 3314 * out: same as passed in 3315 */ 3316 void 3317 vrele(struct vnode *vp) 3318 { 3319 3320 ASSERT_VI_UNLOCKED(vp, __func__); 3321 if (!refcount_release(&vp->v_usecount)) 3322 return; 3323 vput_final(vp, VRELE); 3324 } 3325 3326 /* 3327 * in: locked 3328 * out: unlocked 3329 */ 3330 void 3331 vput(struct vnode *vp) 3332 { 3333 3334 ASSERT_VOP_LOCKED(vp, __func__); 3335 ASSERT_VI_UNLOCKED(vp, __func__); 3336 if (!refcount_release(&vp->v_usecount)) { 3337 VOP_UNLOCK(vp); 3338 return; 3339 } 3340 vput_final(vp, VPUT); 3341 } 3342 3343 /* 3344 * in: locked 3345 * out: locked 3346 */ 3347 void 3348 vunref(struct vnode *vp) 3349 { 3350 3351 ASSERT_VOP_LOCKED(vp, __func__); 3352 ASSERT_VI_UNLOCKED(vp, __func__); 3353 if (!refcount_release(&vp->v_usecount)) 3354 return; 3355 vput_final(vp, VUNREF); 3356 } 3357 3358 void 3359 vhold(struct vnode *vp) 3360 { 3361 int old; 3362 3363 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3364 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3365 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3366 ("%s: wrong hold count %d", __func__, old)); 3367 if (old == 0) 3368 vfs_freevnodes_dec(); 3369 } 3370 3371 void 3372 vholdnz(struct vnode *vp) 3373 { 3374 3375 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3376 #ifdef INVARIANTS 3377 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3378 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3379 ("%s: wrong hold count %d", __func__, old)); 3380 #else 3381 atomic_add_int(&vp->v_holdcnt, 1); 3382 #endif 3383 } 3384 3385 /* 3386 * Grab a hold count unless the vnode is freed. 3387 * 3388 * Only use this routine if vfs smr is the only protection you have against 3389 * freeing the vnode. 3390 * 3391 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3392 * is not set. After the flag is set the vnode becomes immutable to anyone but 3393 * the thread which managed to set the flag. 3394 * 3395 * It may be tempting to replace the loop with: 3396 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3397 * if (count & VHOLD_NO_SMR) { 3398 * backpedal and error out; 3399 * } 3400 * 3401 * However, while this is more performant, it hinders debugging by eliminating 3402 * the previously mentioned invariant. 3403 */ 3404 bool 3405 vhold_smr(struct vnode *vp) 3406 { 3407 int count; 3408 3409 VFS_SMR_ASSERT_ENTERED(); 3410 3411 count = atomic_load_int(&vp->v_holdcnt); 3412 for (;;) { 3413 if (count & VHOLD_NO_SMR) { 3414 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3415 ("non-zero hold count with flags %d\n", count)); 3416 return (false); 3417 } 3418 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3419 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3420 if (count == 0) 3421 vfs_freevnodes_dec(); 3422 return (true); 3423 } 3424 } 3425 } 3426 3427 /* 3428 * Hold a free vnode for recycling. 3429 * 3430 * Note: vnode_init references this comment. 3431 * 3432 * Attempts to recycle only need the global vnode list lock and have no use for 3433 * SMR. 3434 * 3435 * However, vnodes get inserted into the global list before they get fully 3436 * initialized and stay there until UMA decides to free the memory. This in 3437 * particular means the target can be found before it becomes usable and after 3438 * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to 3439 * VHOLD_NO_SMR. 3440 * 3441 * Note: the vnode may gain more references after we transition the count 0->1. 3442 */ 3443 static bool 3444 vhold_recycle_free(struct vnode *vp) 3445 { 3446 int count; 3447 3448 mtx_assert(&vnode_list_mtx, MA_OWNED); 3449 3450 count = atomic_load_int(&vp->v_holdcnt); 3451 for (;;) { 3452 if (count & VHOLD_NO_SMR) { 3453 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3454 ("non-zero hold count with flags %d\n", count)); 3455 return (false); 3456 } 3457 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3458 if (count > 0) { 3459 return (false); 3460 } 3461 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3462 vfs_freevnodes_dec(); 3463 return (true); 3464 } 3465 } 3466 } 3467 3468 static void __noinline 3469 vdbatch_process(struct vdbatch *vd) 3470 { 3471 struct vnode *vp; 3472 int i; 3473 3474 mtx_assert(&vd->lock, MA_OWNED); 3475 MPASS(curthread->td_pinned > 0); 3476 MPASS(vd->index == VDBATCH_SIZE); 3477 3478 mtx_lock(&vnode_list_mtx); 3479 critical_enter(); 3480 freevnodes += vd->freevnodes; 3481 for (i = 0; i < VDBATCH_SIZE; i++) { 3482 vp = vd->tab[i]; 3483 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3484 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3485 MPASS(vp->v_dbatchcpu != NOCPU); 3486 vp->v_dbatchcpu = NOCPU; 3487 } 3488 mtx_unlock(&vnode_list_mtx); 3489 vd->freevnodes = 0; 3490 bzero(vd->tab, sizeof(vd->tab)); 3491 vd->index = 0; 3492 critical_exit(); 3493 } 3494 3495 static void 3496 vdbatch_enqueue(struct vnode *vp) 3497 { 3498 struct vdbatch *vd; 3499 3500 ASSERT_VI_LOCKED(vp, __func__); 3501 VNASSERT(!VN_IS_DOOMED(vp), vp, 3502 ("%s: deferring requeue of a doomed vnode", __func__)); 3503 3504 if (vp->v_dbatchcpu != NOCPU) { 3505 VI_UNLOCK(vp); 3506 return; 3507 } 3508 3509 sched_pin(); 3510 vd = DPCPU_PTR(vd); 3511 mtx_lock(&vd->lock); 3512 MPASS(vd->index < VDBATCH_SIZE); 3513 MPASS(vd->tab[vd->index] == NULL); 3514 /* 3515 * A hack: we depend on being pinned so that we know what to put in 3516 * ->v_dbatchcpu. 3517 */ 3518 vp->v_dbatchcpu = curcpu; 3519 vd->tab[vd->index] = vp; 3520 vd->index++; 3521 VI_UNLOCK(vp); 3522 if (vd->index == VDBATCH_SIZE) 3523 vdbatch_process(vd); 3524 mtx_unlock(&vd->lock); 3525 sched_unpin(); 3526 } 3527 3528 /* 3529 * This routine must only be called for vnodes which are about to be 3530 * deallocated. Supporting dequeue for arbitrary vndoes would require 3531 * validating that the locked batch matches. 3532 */ 3533 static void 3534 vdbatch_dequeue(struct vnode *vp) 3535 { 3536 struct vdbatch *vd; 3537 int i; 3538 short cpu; 3539 3540 VNASSERT(vp->v_type == VBAD || vp->v_type == VNON, vp, 3541 ("%s: called for a used vnode\n", __func__)); 3542 3543 cpu = vp->v_dbatchcpu; 3544 if (cpu == NOCPU) 3545 return; 3546 3547 vd = DPCPU_ID_PTR(cpu, vd); 3548 mtx_lock(&vd->lock); 3549 for (i = 0; i < vd->index; i++) { 3550 if (vd->tab[i] != vp) 3551 continue; 3552 vp->v_dbatchcpu = NOCPU; 3553 vd->index--; 3554 vd->tab[i] = vd->tab[vd->index]; 3555 vd->tab[vd->index] = NULL; 3556 break; 3557 } 3558 mtx_unlock(&vd->lock); 3559 /* 3560 * Either we dequeued the vnode above or the target CPU beat us to it. 3561 */ 3562 MPASS(vp->v_dbatchcpu == NOCPU); 3563 } 3564 3565 /* 3566 * Drop the hold count of the vnode. If this is the last reference to 3567 * the vnode we place it on the free list unless it has been vgone'd 3568 * (marked VIRF_DOOMED) in which case we will free it. 3569 * 3570 * Because the vnode vm object keeps a hold reference on the vnode if 3571 * there is at least one resident non-cached page, the vnode cannot 3572 * leave the active list without the page cleanup done. 3573 */ 3574 static void __noinline 3575 vdropl_final(struct vnode *vp) 3576 { 3577 3578 ASSERT_VI_LOCKED(vp, __func__); 3579 VNPASS(VN_IS_DOOMED(vp), vp); 3580 /* 3581 * Set the VHOLD_NO_SMR flag. 3582 * 3583 * We may be racing against vhold_smr. If they win we can just pretend 3584 * we never got this far, they will vdrop later. 3585 */ 3586 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { 3587 vfs_freevnodes_inc(); 3588 VI_UNLOCK(vp); 3589 /* 3590 * We lost the aforementioned race. Any subsequent access is 3591 * invalid as they might have managed to vdropl on their own. 3592 */ 3593 return; 3594 } 3595 /* 3596 * Don't bump freevnodes as this one is going away. 3597 */ 3598 freevnode(vp); 3599 } 3600 3601 void 3602 vdrop(struct vnode *vp) 3603 { 3604 3605 ASSERT_VI_UNLOCKED(vp, __func__); 3606 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3607 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3608 return; 3609 VI_LOCK(vp); 3610 vdropl(vp); 3611 } 3612 3613 void 3614 vdropl(struct vnode *vp) 3615 { 3616 3617 ASSERT_VI_LOCKED(vp, __func__); 3618 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3619 if (!refcount_release(&vp->v_holdcnt)) { 3620 VI_UNLOCK(vp); 3621 return; 3622 } 3623 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp); 3624 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 3625 if (VN_IS_DOOMED(vp)) { 3626 vdropl_final(vp); 3627 return; 3628 } 3629 3630 vfs_freevnodes_inc(); 3631 if (vp->v_mflag & VMP_LAZYLIST) { 3632 vunlazy(vp); 3633 } 3634 /* 3635 * Also unlocks the interlock. We can't assert on it as we 3636 * released our hold and by now the vnode might have been 3637 * freed. 3638 */ 3639 vdbatch_enqueue(vp); 3640 } 3641 3642 /* 3643 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3644 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3645 */ 3646 static int 3647 vinactivef(struct vnode *vp) 3648 { 3649 struct vm_object *obj; 3650 int error; 3651 3652 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3653 ASSERT_VI_LOCKED(vp, "vinactive"); 3654 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3655 ("vinactive: recursed on VI_DOINGINACT")); 3656 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3657 vp->v_iflag |= VI_DOINGINACT; 3658 vp->v_iflag &= ~VI_OWEINACT; 3659 VI_UNLOCK(vp); 3660 /* 3661 * Before moving off the active list, we must be sure that any 3662 * modified pages are converted into the vnode's dirty 3663 * buffers, since these will no longer be checked once the 3664 * vnode is on the inactive list. 3665 * 3666 * The write-out of the dirty pages is asynchronous. At the 3667 * point that VOP_INACTIVE() is called, there could still be 3668 * pending I/O and dirty pages in the object. 3669 */ 3670 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3671 vm_object_mightbedirty(obj)) { 3672 VM_OBJECT_WLOCK(obj); 3673 vm_object_page_clean(obj, 0, 0, 0); 3674 VM_OBJECT_WUNLOCK(obj); 3675 } 3676 error = VOP_INACTIVE(vp); 3677 VI_LOCK(vp); 3678 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3679 ("vinactive: lost VI_DOINGINACT")); 3680 vp->v_iflag &= ~VI_DOINGINACT; 3681 return (error); 3682 } 3683 3684 int 3685 vinactive(struct vnode *vp) 3686 { 3687 3688 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3689 ASSERT_VI_LOCKED(vp, "vinactive"); 3690 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3691 3692 if ((vp->v_iflag & VI_OWEINACT) == 0) 3693 return (0); 3694 if (vp->v_iflag & VI_DOINGINACT) 3695 return (0); 3696 if (vp->v_usecount > 0) { 3697 vp->v_iflag &= ~VI_OWEINACT; 3698 return (0); 3699 } 3700 return (vinactivef(vp)); 3701 } 3702 3703 /* 3704 * Remove any vnodes in the vnode table belonging to mount point mp. 3705 * 3706 * If FORCECLOSE is not specified, there should not be any active ones, 3707 * return error if any are found (nb: this is a user error, not a 3708 * system error). If FORCECLOSE is specified, detach any active vnodes 3709 * that are found. 3710 * 3711 * If WRITECLOSE is set, only flush out regular file vnodes open for 3712 * writing. 3713 * 3714 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3715 * 3716 * `rootrefs' specifies the base reference count for the root vnode 3717 * of this filesystem. The root vnode is considered busy if its 3718 * v_usecount exceeds this value. On a successful return, vflush(, td) 3719 * will call vrele() on the root vnode exactly rootrefs times. 3720 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3721 * be zero. 3722 */ 3723 #ifdef DIAGNOSTIC 3724 static int busyprt = 0; /* print out busy vnodes */ 3725 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3726 #endif 3727 3728 int 3729 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3730 { 3731 struct vnode *vp, *mvp, *rootvp = NULL; 3732 struct vattr vattr; 3733 int busy = 0, error; 3734 3735 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3736 rootrefs, flags); 3737 if (rootrefs > 0) { 3738 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3739 ("vflush: bad args")); 3740 /* 3741 * Get the filesystem root vnode. We can vput() it 3742 * immediately, since with rootrefs > 0, it won't go away. 3743 */ 3744 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3745 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3746 __func__, error); 3747 return (error); 3748 } 3749 vput(rootvp); 3750 } 3751 loop: 3752 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3753 vholdl(vp); 3754 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3755 if (error) { 3756 vdrop(vp); 3757 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3758 goto loop; 3759 } 3760 /* 3761 * Skip over a vnodes marked VV_SYSTEM. 3762 */ 3763 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3764 VOP_UNLOCK(vp); 3765 vdrop(vp); 3766 continue; 3767 } 3768 /* 3769 * If WRITECLOSE is set, flush out unlinked but still open 3770 * files (even if open only for reading) and regular file 3771 * vnodes open for writing. 3772 */ 3773 if (flags & WRITECLOSE) { 3774 if (vp->v_object != NULL) { 3775 VM_OBJECT_WLOCK(vp->v_object); 3776 vm_object_page_clean(vp->v_object, 0, 0, 0); 3777 VM_OBJECT_WUNLOCK(vp->v_object); 3778 } 3779 do { 3780 error = VOP_FSYNC(vp, MNT_WAIT, td); 3781 } while (error == ERELOOKUP); 3782 if (error != 0) { 3783 VOP_UNLOCK(vp); 3784 vdrop(vp); 3785 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3786 return (error); 3787 } 3788 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3789 VI_LOCK(vp); 3790 3791 if ((vp->v_type == VNON || 3792 (error == 0 && vattr.va_nlink > 0)) && 3793 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3794 VOP_UNLOCK(vp); 3795 vdropl(vp); 3796 continue; 3797 } 3798 } else 3799 VI_LOCK(vp); 3800 /* 3801 * With v_usecount == 0, all we need to do is clear out the 3802 * vnode data structures and we are done. 3803 * 3804 * If FORCECLOSE is set, forcibly close the vnode. 3805 */ 3806 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3807 vgonel(vp); 3808 } else { 3809 busy++; 3810 #ifdef DIAGNOSTIC 3811 if (busyprt) 3812 vn_printf(vp, "vflush: busy vnode "); 3813 #endif 3814 } 3815 VOP_UNLOCK(vp); 3816 vdropl(vp); 3817 } 3818 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3819 /* 3820 * If just the root vnode is busy, and if its refcount 3821 * is equal to `rootrefs', then go ahead and kill it. 3822 */ 3823 VI_LOCK(rootvp); 3824 KASSERT(busy > 0, ("vflush: not busy")); 3825 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3826 ("vflush: usecount %d < rootrefs %d", 3827 rootvp->v_usecount, rootrefs)); 3828 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3829 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3830 vgone(rootvp); 3831 VOP_UNLOCK(rootvp); 3832 busy = 0; 3833 } else 3834 VI_UNLOCK(rootvp); 3835 } 3836 if (busy) { 3837 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3838 busy); 3839 return (EBUSY); 3840 } 3841 for (; rootrefs > 0; rootrefs--) 3842 vrele(rootvp); 3843 return (0); 3844 } 3845 3846 /* 3847 * Recycle an unused vnode to the front of the free list. 3848 */ 3849 int 3850 vrecycle(struct vnode *vp) 3851 { 3852 int recycled; 3853 3854 VI_LOCK(vp); 3855 recycled = vrecyclel(vp); 3856 VI_UNLOCK(vp); 3857 return (recycled); 3858 } 3859 3860 /* 3861 * vrecycle, with the vp interlock held. 3862 */ 3863 int 3864 vrecyclel(struct vnode *vp) 3865 { 3866 int recycled; 3867 3868 ASSERT_VOP_ELOCKED(vp, __func__); 3869 ASSERT_VI_LOCKED(vp, __func__); 3870 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3871 recycled = 0; 3872 if (vp->v_usecount == 0) { 3873 recycled = 1; 3874 vgonel(vp); 3875 } 3876 return (recycled); 3877 } 3878 3879 /* 3880 * Eliminate all activity associated with a vnode 3881 * in preparation for reuse. 3882 */ 3883 void 3884 vgone(struct vnode *vp) 3885 { 3886 VI_LOCK(vp); 3887 vgonel(vp); 3888 VI_UNLOCK(vp); 3889 } 3890 3891 static void 3892 notify_lowervp_vfs_dummy(struct mount *mp __unused, 3893 struct vnode *lowervp __unused) 3894 { 3895 } 3896 3897 /* 3898 * Notify upper mounts about reclaimed or unlinked vnode. 3899 */ 3900 void 3901 vfs_notify_upper(struct vnode *vp, int event) 3902 { 3903 static struct vfsops vgonel_vfsops = { 3904 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 3905 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 3906 }; 3907 struct mount *mp, *ump, *mmp; 3908 3909 mp = vp->v_mount; 3910 if (mp == NULL) 3911 return; 3912 if (TAILQ_EMPTY(&mp->mnt_uppers)) 3913 return; 3914 3915 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 3916 mmp->mnt_op = &vgonel_vfsops; 3917 mmp->mnt_kern_flag |= MNTK_MARKER; 3918 MNT_ILOCK(mp); 3919 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 3920 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 3921 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 3922 ump = TAILQ_NEXT(ump, mnt_upper_link); 3923 continue; 3924 } 3925 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 3926 MNT_IUNLOCK(mp); 3927 switch (event) { 3928 case VFS_NOTIFY_UPPER_RECLAIM: 3929 VFS_RECLAIM_LOWERVP(ump, vp); 3930 break; 3931 case VFS_NOTIFY_UPPER_UNLINK: 3932 VFS_UNLINK_LOWERVP(ump, vp); 3933 break; 3934 default: 3935 KASSERT(0, ("invalid event %d", event)); 3936 break; 3937 } 3938 MNT_ILOCK(mp); 3939 ump = TAILQ_NEXT(mmp, mnt_upper_link); 3940 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 3941 } 3942 free(mmp, M_TEMP); 3943 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 3944 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 3945 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 3946 wakeup(&mp->mnt_uppers); 3947 } 3948 MNT_IUNLOCK(mp); 3949 } 3950 3951 /* 3952 * vgone, with the vp interlock held. 3953 */ 3954 static void 3955 vgonel(struct vnode *vp) 3956 { 3957 struct thread *td; 3958 struct mount *mp; 3959 vm_object_t object; 3960 bool active, doinginact, oweinact; 3961 3962 ASSERT_VOP_ELOCKED(vp, "vgonel"); 3963 ASSERT_VI_LOCKED(vp, "vgonel"); 3964 VNASSERT(vp->v_holdcnt, vp, 3965 ("vgonel: vp %p has no reference.", vp)); 3966 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3967 td = curthread; 3968 3969 /* 3970 * Don't vgonel if we're already doomed. 3971 */ 3972 if (VN_IS_DOOMED(vp)) 3973 return; 3974 /* 3975 * Paired with freevnode. 3976 */ 3977 vn_seqc_write_begin_locked(vp); 3978 vunlazy_gone(vp); 3979 vn_irflag_set_locked(vp, VIRF_DOOMED); 3980 3981 /* 3982 * Check to see if the vnode is in use. If so, we have to 3983 * call VOP_CLOSE() and VOP_INACTIVE(). 3984 * 3985 * It could be that VOP_INACTIVE() requested reclamation, in 3986 * which case we should avoid recursion, so check 3987 * VI_DOINGINACT. This is not precise but good enough. 3988 */ 3989 active = vp->v_usecount > 0; 3990 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 3991 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; 3992 3993 /* 3994 * If we need to do inactive VI_OWEINACT will be set. 3995 */ 3996 if (vp->v_iflag & VI_DEFINACT) { 3997 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3998 vp->v_iflag &= ~VI_DEFINACT; 3999 vdropl(vp); 4000 } else { 4001 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 4002 VI_UNLOCK(vp); 4003 } 4004 cache_purge_vgone(vp); 4005 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 4006 4007 /* 4008 * If purging an active vnode, it must be closed and 4009 * deactivated before being reclaimed. 4010 */ 4011 if (active) 4012 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 4013 if (!doinginact) { 4014 do { 4015 if (oweinact || active) { 4016 VI_LOCK(vp); 4017 vinactivef(vp); 4018 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4019 VI_UNLOCK(vp); 4020 } 4021 } while (oweinact); 4022 } 4023 if (vp->v_type == VSOCK) 4024 vfs_unp_reclaim(vp); 4025 4026 /* 4027 * Clean out any buffers associated with the vnode. 4028 * If the flush fails, just toss the buffers. 4029 */ 4030 mp = NULL; 4031 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 4032 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 4033 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 4034 while (vinvalbuf(vp, 0, 0, 0) != 0) 4035 ; 4036 } 4037 4038 BO_LOCK(&vp->v_bufobj); 4039 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 4040 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 4041 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 4042 vp->v_bufobj.bo_clean.bv_cnt == 0, 4043 ("vp %p bufobj not invalidated", vp)); 4044 4045 /* 4046 * For VMIO bufobj, BO_DEAD is set later, or in 4047 * vm_object_terminate() after the object's page queue is 4048 * flushed. 4049 */ 4050 object = vp->v_bufobj.bo_object; 4051 if (object == NULL) 4052 vp->v_bufobj.bo_flag |= BO_DEAD; 4053 BO_UNLOCK(&vp->v_bufobj); 4054 4055 /* 4056 * Handle the VM part. Tmpfs handles v_object on its own (the 4057 * OBJT_VNODE check). Nullfs or other bypassing filesystems 4058 * should not touch the object borrowed from the lower vnode 4059 * (the handle check). 4060 */ 4061 if (object != NULL && object->type == OBJT_VNODE && 4062 object->handle == vp) 4063 vnode_destroy_vobject(vp); 4064 4065 /* 4066 * Reclaim the vnode. 4067 */ 4068 if (VOP_RECLAIM(vp)) 4069 panic("vgone: cannot reclaim"); 4070 if (mp != NULL) 4071 vn_finished_secondary_write(mp); 4072 VNASSERT(vp->v_object == NULL, vp, 4073 ("vop_reclaim left v_object vp=%p", vp)); 4074 /* 4075 * Clear the advisory locks and wake up waiting threads. 4076 */ 4077 (void)VOP_ADVLOCKPURGE(vp); 4078 vp->v_lockf = NULL; 4079 /* 4080 * Delete from old mount point vnode list. 4081 */ 4082 delmntque(vp); 4083 /* 4084 * Done with purge, reset to the standard lock and invalidate 4085 * the vnode. 4086 */ 4087 VI_LOCK(vp); 4088 vp->v_vnlock = &vp->v_lock; 4089 vp->v_op = &dead_vnodeops; 4090 vp->v_type = VBAD; 4091 } 4092 4093 /* 4094 * Print out a description of a vnode. 4095 */ 4096 static const char * const typename[] = 4097 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 4098 "VMARKER"}; 4099 4100 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 4101 "new hold count flag not added to vn_printf"); 4102 4103 void 4104 vn_printf(struct vnode *vp, const char *fmt, ...) 4105 { 4106 va_list ap; 4107 char buf[256], buf2[16]; 4108 u_long flags; 4109 u_int holdcnt; 4110 short irflag; 4111 4112 va_start(ap, fmt); 4113 vprintf(fmt, ap); 4114 va_end(ap); 4115 printf("%p: ", (void *)vp); 4116 printf("type %s\n", typename[vp->v_type]); 4117 holdcnt = atomic_load_int(&vp->v_holdcnt); 4118 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 4119 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 4120 vp->v_seqc_users); 4121 switch (vp->v_type) { 4122 case VDIR: 4123 printf(" mountedhere %p\n", vp->v_mountedhere); 4124 break; 4125 case VCHR: 4126 printf(" rdev %p\n", vp->v_rdev); 4127 break; 4128 case VSOCK: 4129 printf(" socket %p\n", vp->v_unpcb); 4130 break; 4131 case VFIFO: 4132 printf(" fifoinfo %p\n", vp->v_fifoinfo); 4133 break; 4134 default: 4135 printf("\n"); 4136 break; 4137 } 4138 buf[0] = '\0'; 4139 buf[1] = '\0'; 4140 if (holdcnt & VHOLD_NO_SMR) 4141 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 4142 printf(" hold count flags (%s)\n", buf + 1); 4143 4144 buf[0] = '\0'; 4145 buf[1] = '\0'; 4146 irflag = vn_irflag_read(vp); 4147 if (irflag & VIRF_DOOMED) 4148 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 4149 if (irflag & VIRF_PGREAD) 4150 strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); 4151 if (irflag & VIRF_MOUNTPOINT) 4152 strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf)); 4153 flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT); 4154 if (flags != 0) { 4155 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 4156 strlcat(buf, buf2, sizeof(buf)); 4157 } 4158 if (vp->v_vflag & VV_ROOT) 4159 strlcat(buf, "|VV_ROOT", sizeof(buf)); 4160 if (vp->v_vflag & VV_ISTTY) 4161 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4162 if (vp->v_vflag & VV_NOSYNC) 4163 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4164 if (vp->v_vflag & VV_ETERNALDEV) 4165 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4166 if (vp->v_vflag & VV_CACHEDLABEL) 4167 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4168 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4169 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4170 if (vp->v_vflag & VV_COPYONWRITE) 4171 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4172 if (vp->v_vflag & VV_SYSTEM) 4173 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4174 if (vp->v_vflag & VV_PROCDEP) 4175 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4176 if (vp->v_vflag & VV_NOKNOTE) 4177 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 4178 if (vp->v_vflag & VV_DELETED) 4179 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4180 if (vp->v_vflag & VV_MD) 4181 strlcat(buf, "|VV_MD", sizeof(buf)); 4182 if (vp->v_vflag & VV_FORCEINSMQ) 4183 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4184 if (vp->v_vflag & VV_READLINK) 4185 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4186 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4187 VV_CACHEDLABEL | VV_VMSIZEVNLOCK | VV_COPYONWRITE | VV_SYSTEM | 4188 VV_PROCDEP | VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ | 4189 VV_READLINK); 4190 if (flags != 0) { 4191 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4192 strlcat(buf, buf2, sizeof(buf)); 4193 } 4194 if (vp->v_iflag & VI_TEXT_REF) 4195 strlcat(buf, "|VI_TEXT_REF", sizeof(buf)); 4196 if (vp->v_iflag & VI_MOUNT) 4197 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4198 if (vp->v_iflag & VI_DOINGINACT) 4199 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4200 if (vp->v_iflag & VI_OWEINACT) 4201 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4202 if (vp->v_iflag & VI_DEFINACT) 4203 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4204 if (vp->v_iflag & VI_FOPENING) 4205 strlcat(buf, "|VI_FOPENING", sizeof(buf)); 4206 flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_DOINGINACT | 4207 VI_OWEINACT | VI_DEFINACT | VI_FOPENING); 4208 if (flags != 0) { 4209 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4210 strlcat(buf, buf2, sizeof(buf)); 4211 } 4212 if (vp->v_mflag & VMP_LAZYLIST) 4213 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4214 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4215 if (flags != 0) { 4216 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4217 strlcat(buf, buf2, sizeof(buf)); 4218 } 4219 printf(" flags (%s)", buf + 1); 4220 if (mtx_owned(VI_MTX(vp))) 4221 printf(" VI_LOCKed"); 4222 printf("\n"); 4223 if (vp->v_object != NULL) 4224 printf(" v_object %p ref %d pages %d " 4225 "cleanbuf %d dirtybuf %d\n", 4226 vp->v_object, vp->v_object->ref_count, 4227 vp->v_object->resident_page_count, 4228 vp->v_bufobj.bo_clean.bv_cnt, 4229 vp->v_bufobj.bo_dirty.bv_cnt); 4230 printf(" "); 4231 lockmgr_printinfo(vp->v_vnlock); 4232 if (vp->v_data != NULL) 4233 VOP_PRINT(vp); 4234 } 4235 4236 #ifdef DDB 4237 /* 4238 * List all of the locked vnodes in the system. 4239 * Called when debugging the kernel. 4240 */ 4241 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 4242 { 4243 struct mount *mp; 4244 struct vnode *vp; 4245 4246 /* 4247 * Note: because this is DDB, we can't obey the locking semantics 4248 * for these structures, which means we could catch an inconsistent 4249 * state and dereference a nasty pointer. Not much to be done 4250 * about that. 4251 */ 4252 db_printf("Locked vnodes\n"); 4253 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4254 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4255 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4256 vn_printf(vp, "vnode "); 4257 } 4258 } 4259 } 4260 4261 /* 4262 * Show details about the given vnode. 4263 */ 4264 DB_SHOW_COMMAND(vnode, db_show_vnode) 4265 { 4266 struct vnode *vp; 4267 4268 if (!have_addr) 4269 return; 4270 vp = (struct vnode *)addr; 4271 vn_printf(vp, "vnode "); 4272 } 4273 4274 /* 4275 * Show details about the given mount point. 4276 */ 4277 DB_SHOW_COMMAND(mount, db_show_mount) 4278 { 4279 struct mount *mp; 4280 struct vfsopt *opt; 4281 struct statfs *sp; 4282 struct vnode *vp; 4283 char buf[512]; 4284 uint64_t mflags; 4285 u_int flags; 4286 4287 if (!have_addr) { 4288 /* No address given, print short info about all mount points. */ 4289 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4290 db_printf("%p %s on %s (%s)\n", mp, 4291 mp->mnt_stat.f_mntfromname, 4292 mp->mnt_stat.f_mntonname, 4293 mp->mnt_stat.f_fstypename); 4294 if (db_pager_quit) 4295 break; 4296 } 4297 db_printf("\nMore info: show mount <addr>\n"); 4298 return; 4299 } 4300 4301 mp = (struct mount *)addr; 4302 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4303 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4304 4305 buf[0] = '\0'; 4306 mflags = mp->mnt_flag; 4307 #define MNT_FLAG(flag) do { \ 4308 if (mflags & (flag)) { \ 4309 if (buf[0] != '\0') \ 4310 strlcat(buf, ", ", sizeof(buf)); \ 4311 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4312 mflags &= ~(flag); \ 4313 } \ 4314 } while (0) 4315 MNT_FLAG(MNT_RDONLY); 4316 MNT_FLAG(MNT_SYNCHRONOUS); 4317 MNT_FLAG(MNT_NOEXEC); 4318 MNT_FLAG(MNT_NOSUID); 4319 MNT_FLAG(MNT_NFS4ACLS); 4320 MNT_FLAG(MNT_UNION); 4321 MNT_FLAG(MNT_ASYNC); 4322 MNT_FLAG(MNT_SUIDDIR); 4323 MNT_FLAG(MNT_SOFTDEP); 4324 MNT_FLAG(MNT_NOSYMFOLLOW); 4325 MNT_FLAG(MNT_GJOURNAL); 4326 MNT_FLAG(MNT_MULTILABEL); 4327 MNT_FLAG(MNT_ACLS); 4328 MNT_FLAG(MNT_NOATIME); 4329 MNT_FLAG(MNT_NOCLUSTERR); 4330 MNT_FLAG(MNT_NOCLUSTERW); 4331 MNT_FLAG(MNT_SUJ); 4332 MNT_FLAG(MNT_EXRDONLY); 4333 MNT_FLAG(MNT_EXPORTED); 4334 MNT_FLAG(MNT_DEFEXPORTED); 4335 MNT_FLAG(MNT_EXPORTANON); 4336 MNT_FLAG(MNT_EXKERB); 4337 MNT_FLAG(MNT_EXPUBLIC); 4338 MNT_FLAG(MNT_LOCAL); 4339 MNT_FLAG(MNT_QUOTA); 4340 MNT_FLAG(MNT_ROOTFS); 4341 MNT_FLAG(MNT_USER); 4342 MNT_FLAG(MNT_IGNORE); 4343 MNT_FLAG(MNT_UPDATE); 4344 MNT_FLAG(MNT_DELEXPORT); 4345 MNT_FLAG(MNT_RELOAD); 4346 MNT_FLAG(MNT_FORCE); 4347 MNT_FLAG(MNT_SNAPSHOT); 4348 MNT_FLAG(MNT_BYFSID); 4349 #undef MNT_FLAG 4350 if (mflags != 0) { 4351 if (buf[0] != '\0') 4352 strlcat(buf, ", ", sizeof(buf)); 4353 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4354 "0x%016jx", mflags); 4355 } 4356 db_printf(" mnt_flag = %s\n", buf); 4357 4358 buf[0] = '\0'; 4359 flags = mp->mnt_kern_flag; 4360 #define MNT_KERN_FLAG(flag) do { \ 4361 if (flags & (flag)) { \ 4362 if (buf[0] != '\0') \ 4363 strlcat(buf, ", ", sizeof(buf)); \ 4364 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4365 flags &= ~(flag); \ 4366 } \ 4367 } while (0) 4368 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4369 MNT_KERN_FLAG(MNTK_ASYNC); 4370 MNT_KERN_FLAG(MNTK_SOFTDEP); 4371 MNT_KERN_FLAG(MNTK_DRAINING); 4372 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4373 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4374 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4375 MNT_KERN_FLAG(MNTK_NO_IOPF); 4376 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 4377 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 4378 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 4379 MNT_KERN_FLAG(MNTK_MARKER); 4380 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4381 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4382 MNT_KERN_FLAG(MNTK_NOASYNC); 4383 MNT_KERN_FLAG(MNTK_UNMOUNT); 4384 MNT_KERN_FLAG(MNTK_MWAIT); 4385 MNT_KERN_FLAG(MNTK_SUSPEND); 4386 MNT_KERN_FLAG(MNTK_SUSPEND2); 4387 MNT_KERN_FLAG(MNTK_SUSPENDED); 4388 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4389 MNT_KERN_FLAG(MNTK_NOKNOTE); 4390 #undef MNT_KERN_FLAG 4391 if (flags != 0) { 4392 if (buf[0] != '\0') 4393 strlcat(buf, ", ", sizeof(buf)); 4394 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4395 "0x%08x", flags); 4396 } 4397 db_printf(" mnt_kern_flag = %s\n", buf); 4398 4399 db_printf(" mnt_opt = "); 4400 opt = TAILQ_FIRST(mp->mnt_opt); 4401 if (opt != NULL) { 4402 db_printf("%s", opt->name); 4403 opt = TAILQ_NEXT(opt, link); 4404 while (opt != NULL) { 4405 db_printf(", %s", opt->name); 4406 opt = TAILQ_NEXT(opt, link); 4407 } 4408 } 4409 db_printf("\n"); 4410 4411 sp = &mp->mnt_stat; 4412 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4413 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4414 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4415 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4416 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4417 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4418 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4419 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4420 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4421 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4422 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4423 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4424 4425 db_printf(" mnt_cred = { uid=%u ruid=%u", 4426 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4427 if (jailed(mp->mnt_cred)) 4428 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4429 db_printf(" }\n"); 4430 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4431 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4432 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4433 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4434 db_printf(" mnt_lazyvnodelistsize = %d\n", 4435 mp->mnt_lazyvnodelistsize); 4436 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4437 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4438 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4439 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4440 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4441 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4442 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4443 db_printf(" mnt_secondary_accwrites = %d\n", 4444 mp->mnt_secondary_accwrites); 4445 db_printf(" mnt_gjprovider = %s\n", 4446 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4447 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4448 4449 db_printf("\n\nList of active vnodes\n"); 4450 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4451 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4452 vn_printf(vp, "vnode "); 4453 if (db_pager_quit) 4454 break; 4455 } 4456 } 4457 db_printf("\n\nList of inactive vnodes\n"); 4458 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4459 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4460 vn_printf(vp, "vnode "); 4461 if (db_pager_quit) 4462 break; 4463 } 4464 } 4465 } 4466 #endif /* DDB */ 4467 4468 /* 4469 * Fill in a struct xvfsconf based on a struct vfsconf. 4470 */ 4471 static int 4472 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4473 { 4474 struct xvfsconf xvfsp; 4475 4476 bzero(&xvfsp, sizeof(xvfsp)); 4477 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4478 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4479 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4480 xvfsp.vfc_flags = vfsp->vfc_flags; 4481 /* 4482 * These are unused in userland, we keep them 4483 * to not break binary compatibility. 4484 */ 4485 xvfsp.vfc_vfsops = NULL; 4486 xvfsp.vfc_next = NULL; 4487 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4488 } 4489 4490 #ifdef COMPAT_FREEBSD32 4491 struct xvfsconf32 { 4492 uint32_t vfc_vfsops; 4493 char vfc_name[MFSNAMELEN]; 4494 int32_t vfc_typenum; 4495 int32_t vfc_refcount; 4496 int32_t vfc_flags; 4497 uint32_t vfc_next; 4498 }; 4499 4500 static int 4501 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4502 { 4503 struct xvfsconf32 xvfsp; 4504 4505 bzero(&xvfsp, sizeof(xvfsp)); 4506 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4507 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4508 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4509 xvfsp.vfc_flags = vfsp->vfc_flags; 4510 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4511 } 4512 #endif 4513 4514 /* 4515 * Top level filesystem related information gathering. 4516 */ 4517 static int 4518 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4519 { 4520 struct vfsconf *vfsp; 4521 int error; 4522 4523 error = 0; 4524 vfsconf_slock(); 4525 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4526 #ifdef COMPAT_FREEBSD32 4527 if (req->flags & SCTL_MASK32) 4528 error = vfsconf2x32(req, vfsp); 4529 else 4530 #endif 4531 error = vfsconf2x(req, vfsp); 4532 if (error) 4533 break; 4534 } 4535 vfsconf_sunlock(); 4536 return (error); 4537 } 4538 4539 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4540 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4541 "S,xvfsconf", "List of all configured filesystems"); 4542 4543 #ifndef BURN_BRIDGES 4544 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4545 4546 static int 4547 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4548 { 4549 int *name = (int *)arg1 - 1; /* XXX */ 4550 u_int namelen = arg2 + 1; /* XXX */ 4551 struct vfsconf *vfsp; 4552 4553 log(LOG_WARNING, "userland calling deprecated sysctl, " 4554 "please rebuild world\n"); 4555 4556 #if 1 || defined(COMPAT_PRELITE2) 4557 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4558 if (namelen == 1) 4559 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4560 #endif 4561 4562 switch (name[1]) { 4563 case VFS_MAXTYPENUM: 4564 if (namelen != 2) 4565 return (ENOTDIR); 4566 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4567 case VFS_CONF: 4568 if (namelen != 3) 4569 return (ENOTDIR); /* overloaded */ 4570 vfsconf_slock(); 4571 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4572 if (vfsp->vfc_typenum == name[2]) 4573 break; 4574 } 4575 vfsconf_sunlock(); 4576 if (vfsp == NULL) 4577 return (EOPNOTSUPP); 4578 #ifdef COMPAT_FREEBSD32 4579 if (req->flags & SCTL_MASK32) 4580 return (vfsconf2x32(req, vfsp)); 4581 else 4582 #endif 4583 return (vfsconf2x(req, vfsp)); 4584 } 4585 return (EOPNOTSUPP); 4586 } 4587 4588 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4589 CTLFLAG_MPSAFE, vfs_sysctl, 4590 "Generic filesystem"); 4591 4592 #if 1 || defined(COMPAT_PRELITE2) 4593 4594 static int 4595 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4596 { 4597 int error; 4598 struct vfsconf *vfsp; 4599 struct ovfsconf ovfs; 4600 4601 vfsconf_slock(); 4602 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4603 bzero(&ovfs, sizeof(ovfs)); 4604 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4605 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4606 ovfs.vfc_index = vfsp->vfc_typenum; 4607 ovfs.vfc_refcount = vfsp->vfc_refcount; 4608 ovfs.vfc_flags = vfsp->vfc_flags; 4609 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4610 if (error != 0) { 4611 vfsconf_sunlock(); 4612 return (error); 4613 } 4614 } 4615 vfsconf_sunlock(); 4616 return (0); 4617 } 4618 4619 #endif /* 1 || COMPAT_PRELITE2 */ 4620 #endif /* !BURN_BRIDGES */ 4621 4622 #define KINFO_VNODESLOP 10 4623 #ifdef notyet 4624 /* 4625 * Dump vnode list (via sysctl). 4626 */ 4627 /* ARGSUSED */ 4628 static int 4629 sysctl_vnode(SYSCTL_HANDLER_ARGS) 4630 { 4631 struct xvnode *xvn; 4632 struct mount *mp; 4633 struct vnode *vp; 4634 int error, len, n; 4635 4636 /* 4637 * Stale numvnodes access is not fatal here. 4638 */ 4639 req->lock = 0; 4640 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 4641 if (!req->oldptr) 4642 /* Make an estimate */ 4643 return (SYSCTL_OUT(req, 0, len)); 4644 4645 error = sysctl_wire_old_buffer(req, 0); 4646 if (error != 0) 4647 return (error); 4648 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 4649 n = 0; 4650 mtx_lock(&mountlist_mtx); 4651 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4652 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 4653 continue; 4654 MNT_ILOCK(mp); 4655 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4656 if (n == len) 4657 break; 4658 vref(vp); 4659 xvn[n].xv_size = sizeof *xvn; 4660 xvn[n].xv_vnode = vp; 4661 xvn[n].xv_id = 0; /* XXX compat */ 4662 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 4663 XV_COPY(usecount); 4664 XV_COPY(writecount); 4665 XV_COPY(holdcnt); 4666 XV_COPY(mount); 4667 XV_COPY(numoutput); 4668 XV_COPY(type); 4669 #undef XV_COPY 4670 xvn[n].xv_flag = vp->v_vflag; 4671 4672 switch (vp->v_type) { 4673 case VREG: 4674 case VDIR: 4675 case VLNK: 4676 break; 4677 case VBLK: 4678 case VCHR: 4679 if (vp->v_rdev == NULL) { 4680 vrele(vp); 4681 continue; 4682 } 4683 xvn[n].xv_dev = dev2udev(vp->v_rdev); 4684 break; 4685 case VSOCK: 4686 xvn[n].xv_socket = vp->v_socket; 4687 break; 4688 case VFIFO: 4689 xvn[n].xv_fifo = vp->v_fifoinfo; 4690 break; 4691 case VNON: 4692 case VBAD: 4693 default: 4694 /* shouldn't happen? */ 4695 vrele(vp); 4696 continue; 4697 } 4698 vrele(vp); 4699 ++n; 4700 } 4701 MNT_IUNLOCK(mp); 4702 mtx_lock(&mountlist_mtx); 4703 vfs_unbusy(mp); 4704 if (n == len) 4705 break; 4706 } 4707 mtx_unlock(&mountlist_mtx); 4708 4709 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 4710 free(xvn, M_TEMP); 4711 return (error); 4712 } 4713 4714 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 4715 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 4716 ""); 4717 #endif 4718 4719 static void 4720 unmount_or_warn(struct mount *mp) 4721 { 4722 int error; 4723 4724 error = dounmount(mp, MNT_FORCE, curthread); 4725 if (error != 0) { 4726 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4727 if (error == EBUSY) 4728 printf("BUSY)\n"); 4729 else 4730 printf("%d)\n", error); 4731 } 4732 } 4733 4734 /* 4735 * Unmount all filesystems. The list is traversed in reverse order 4736 * of mounting to avoid dependencies. 4737 */ 4738 void 4739 vfs_unmountall(void) 4740 { 4741 struct mount *mp, *tmp; 4742 4743 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4744 4745 /* 4746 * Since this only runs when rebooting, it is not interlocked. 4747 */ 4748 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4749 vfs_ref(mp); 4750 4751 /* 4752 * Forcibly unmounting "/dev" before "/" would prevent clean 4753 * unmount of the latter. 4754 */ 4755 if (mp == rootdevmp) 4756 continue; 4757 4758 unmount_or_warn(mp); 4759 } 4760 4761 if (rootdevmp != NULL) 4762 unmount_or_warn(rootdevmp); 4763 } 4764 4765 static void 4766 vfs_deferred_inactive(struct vnode *vp, int lkflags) 4767 { 4768 4769 ASSERT_VI_LOCKED(vp, __func__); 4770 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, ("VI_DEFINACT still set")); 4771 if ((vp->v_iflag & VI_OWEINACT) == 0) { 4772 vdropl(vp); 4773 return; 4774 } 4775 if (vn_lock(vp, lkflags) == 0) { 4776 VI_LOCK(vp); 4777 vinactive(vp); 4778 VOP_UNLOCK(vp); 4779 vdropl(vp); 4780 return; 4781 } 4782 vdefer_inactive_unlocked(vp); 4783 } 4784 4785 static int 4786 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 4787 { 4788 4789 return (vp->v_iflag & VI_DEFINACT); 4790 } 4791 4792 static void __noinline 4793 vfs_periodic_inactive(struct mount *mp, int flags) 4794 { 4795 struct vnode *vp, *mvp; 4796 int lkflags; 4797 4798 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4799 if (flags != MNT_WAIT) 4800 lkflags |= LK_NOWAIT; 4801 4802 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 4803 if ((vp->v_iflag & VI_DEFINACT) == 0) { 4804 VI_UNLOCK(vp); 4805 continue; 4806 } 4807 vp->v_iflag &= ~VI_DEFINACT; 4808 vfs_deferred_inactive(vp, lkflags); 4809 } 4810 } 4811 4812 static inline bool 4813 vfs_want_msync(struct vnode *vp) 4814 { 4815 struct vm_object *obj; 4816 4817 /* 4818 * This test may be performed without any locks held. 4819 * We rely on vm_object's type stability. 4820 */ 4821 if (vp->v_vflag & VV_NOSYNC) 4822 return (false); 4823 obj = vp->v_object; 4824 return (obj != NULL && vm_object_mightbedirty(obj)); 4825 } 4826 4827 static int 4828 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 4829 { 4830 4831 if (vp->v_vflag & VV_NOSYNC) 4832 return (false); 4833 if (vp->v_iflag & VI_DEFINACT) 4834 return (true); 4835 return (vfs_want_msync(vp)); 4836 } 4837 4838 static void __noinline 4839 vfs_periodic_msync_inactive(struct mount *mp, int flags) 4840 { 4841 struct vnode *vp, *mvp; 4842 struct vm_object *obj; 4843 int lkflags, objflags; 4844 bool seen_defer; 4845 4846 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4847 if (flags != MNT_WAIT) { 4848 lkflags |= LK_NOWAIT; 4849 objflags = OBJPC_NOSYNC; 4850 } else { 4851 objflags = OBJPC_SYNC; 4852 } 4853 4854 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 4855 seen_defer = false; 4856 if (vp->v_iflag & VI_DEFINACT) { 4857 vp->v_iflag &= ~VI_DEFINACT; 4858 seen_defer = true; 4859 } 4860 if (!vfs_want_msync(vp)) { 4861 if (seen_defer) 4862 vfs_deferred_inactive(vp, lkflags); 4863 else 4864 VI_UNLOCK(vp); 4865 continue; 4866 } 4867 if (vget(vp, lkflags) == 0) { 4868 obj = vp->v_object; 4869 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 4870 VM_OBJECT_WLOCK(obj); 4871 vm_object_page_clean(obj, 0, 0, objflags); 4872 VM_OBJECT_WUNLOCK(obj); 4873 } 4874 vput(vp); 4875 if (seen_defer) 4876 vdrop(vp); 4877 } else { 4878 if (seen_defer) 4879 vdefer_inactive_unlocked(vp); 4880 } 4881 } 4882 } 4883 4884 void 4885 vfs_periodic(struct mount *mp, int flags) 4886 { 4887 4888 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4889 4890 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 4891 vfs_periodic_inactive(mp, flags); 4892 else 4893 vfs_periodic_msync_inactive(mp, flags); 4894 } 4895 4896 static void 4897 destroy_vpollinfo_free(struct vpollinfo *vi) 4898 { 4899 4900 knlist_destroy(&vi->vpi_selinfo.si_note); 4901 mtx_destroy(&vi->vpi_lock); 4902 free(vi, M_VNODEPOLL); 4903 } 4904 4905 static void 4906 destroy_vpollinfo(struct vpollinfo *vi) 4907 { 4908 4909 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4910 seldrain(&vi->vpi_selinfo); 4911 destroy_vpollinfo_free(vi); 4912 } 4913 4914 /* 4915 * Initialize per-vnode helper structure to hold poll-related state. 4916 */ 4917 void 4918 v_addpollinfo(struct vnode *vp) 4919 { 4920 struct vpollinfo *vi; 4921 4922 if (vp->v_pollinfo != NULL) 4923 return; 4924 vi = malloc(sizeof(*vi), M_VNODEPOLL, M_WAITOK | M_ZERO); 4925 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4926 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4927 vfs_knlunlock, vfs_knl_assert_lock); 4928 VI_LOCK(vp); 4929 if (vp->v_pollinfo != NULL) { 4930 VI_UNLOCK(vp); 4931 destroy_vpollinfo_free(vi); 4932 return; 4933 } 4934 vp->v_pollinfo = vi; 4935 VI_UNLOCK(vp); 4936 } 4937 4938 /* 4939 * Record a process's interest in events which might happen to 4940 * a vnode. Because poll uses the historic select-style interface 4941 * internally, this routine serves as both the ``check for any 4942 * pending events'' and the ``record my interest in future events'' 4943 * functions. (These are done together, while the lock is held, 4944 * to avoid race conditions.) 4945 */ 4946 int 4947 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4948 { 4949 4950 v_addpollinfo(vp); 4951 mtx_lock(&vp->v_pollinfo->vpi_lock); 4952 if (vp->v_pollinfo->vpi_revents & events) { 4953 /* 4954 * This leaves events we are not interested 4955 * in available for the other process which 4956 * which presumably had requested them 4957 * (otherwise they would never have been 4958 * recorded). 4959 */ 4960 events &= vp->v_pollinfo->vpi_revents; 4961 vp->v_pollinfo->vpi_revents &= ~events; 4962 4963 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4964 return (events); 4965 } 4966 vp->v_pollinfo->vpi_events |= events; 4967 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 4968 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4969 return (0); 4970 } 4971 4972 /* 4973 * Routine to create and manage a filesystem syncer vnode. 4974 */ 4975 #define sync_close ((int (*)(struct vop_close_args *))nullop) 4976 static int sync_fsync(struct vop_fsync_args *); 4977 static int sync_inactive(struct vop_inactive_args *); 4978 static int sync_reclaim(struct vop_reclaim_args *); 4979 4980 static struct vop_vector sync_vnodeops = { 4981 .vop_bypass = VOP_EOPNOTSUPP, 4982 .vop_close = sync_close, /* close */ 4983 .vop_fsync = sync_fsync, /* fsync */ 4984 .vop_inactive = sync_inactive, /* inactive */ 4985 .vop_need_inactive = vop_stdneed_inactive, /* need_inactive */ 4986 .vop_reclaim = sync_reclaim, /* reclaim */ 4987 .vop_lock1 = vop_stdlock, /* lock */ 4988 .vop_unlock = vop_stdunlock, /* unlock */ 4989 .vop_islocked = vop_stdislocked, /* islocked */ 4990 }; 4991 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 4992 4993 /* 4994 * Create a new filesystem syncer vnode for the specified mount point. 4995 */ 4996 void 4997 vfs_allocate_syncvnode(struct mount *mp) 4998 { 4999 struct vnode *vp; 5000 struct bufobj *bo; 5001 static long start, incr, next; 5002 int error; 5003 5004 /* Allocate a new vnode */ 5005 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 5006 if (error != 0) 5007 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 5008 vp->v_type = VNON; 5009 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5010 vp->v_vflag |= VV_FORCEINSMQ; 5011 error = insmntque(vp, mp); 5012 if (error != 0) 5013 panic("vfs_allocate_syncvnode: insmntque() failed"); 5014 vp->v_vflag &= ~VV_FORCEINSMQ; 5015 VOP_UNLOCK(vp); 5016 /* 5017 * Place the vnode onto the syncer worklist. We attempt to 5018 * scatter them about on the list so that they will go off 5019 * at evenly distributed times even if all the filesystems 5020 * are mounted at once. 5021 */ 5022 next += incr; 5023 if (next == 0 || next > syncer_maxdelay) { 5024 start /= 2; 5025 incr /= 2; 5026 if (start == 0) { 5027 start = syncer_maxdelay / 2; 5028 incr = syncer_maxdelay; 5029 } 5030 next = start; 5031 } 5032 bo = &vp->v_bufobj; 5033 BO_LOCK(bo); 5034 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 5035 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 5036 mtx_lock(&sync_mtx); 5037 sync_vnode_count++; 5038 if (mp->mnt_syncer == NULL) { 5039 mp->mnt_syncer = vp; 5040 vp = NULL; 5041 } 5042 mtx_unlock(&sync_mtx); 5043 BO_UNLOCK(bo); 5044 if (vp != NULL) { 5045 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5046 vgone(vp); 5047 vput(vp); 5048 } 5049 } 5050 5051 void 5052 vfs_deallocate_syncvnode(struct mount *mp) 5053 { 5054 struct vnode *vp; 5055 5056 mtx_lock(&sync_mtx); 5057 vp = mp->mnt_syncer; 5058 if (vp != NULL) 5059 mp->mnt_syncer = NULL; 5060 mtx_unlock(&sync_mtx); 5061 if (vp != NULL) 5062 vrele(vp); 5063 } 5064 5065 /* 5066 * Do a lazy sync of the filesystem. 5067 */ 5068 static int 5069 sync_fsync(struct vop_fsync_args *ap) 5070 { 5071 struct vnode *syncvp = ap->a_vp; 5072 struct mount *mp = syncvp->v_mount; 5073 int error, save; 5074 struct bufobj *bo; 5075 5076 /* 5077 * We only need to do something if this is a lazy evaluation. 5078 */ 5079 if (ap->a_waitfor != MNT_LAZY) 5080 return (0); 5081 5082 /* 5083 * Move ourselves to the back of the sync list. 5084 */ 5085 bo = &syncvp->v_bufobj; 5086 BO_LOCK(bo); 5087 vn_syncer_add_to_worklist(bo, syncdelay); 5088 BO_UNLOCK(bo); 5089 5090 /* 5091 * Walk the list of vnodes pushing all that are dirty and 5092 * not already on the sync list. 5093 */ 5094 if (vfs_busy(mp, MBF_NOWAIT) != 0) 5095 return (0); 5096 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 5097 vfs_unbusy(mp); 5098 return (0); 5099 } 5100 save = curthread_pflags_set(TDP_SYNCIO); 5101 /* 5102 * The filesystem at hand may be idle with free vnodes stored in the 5103 * batch. Return them instead of letting them stay there indefinitely. 5104 */ 5105 vfs_periodic(mp, MNT_NOWAIT); 5106 error = VFS_SYNC(mp, MNT_LAZY); 5107 curthread_pflags_restore(save); 5108 vn_finished_write(mp); 5109 vfs_unbusy(mp); 5110 return (error); 5111 } 5112 5113 /* 5114 * The syncer vnode is no referenced. 5115 */ 5116 static int 5117 sync_inactive(struct vop_inactive_args *ap) 5118 { 5119 5120 vgone(ap->a_vp); 5121 return (0); 5122 } 5123 5124 /* 5125 * The syncer vnode is no longer needed and is being decommissioned. 5126 * 5127 * Modifications to the worklist must be protected by sync_mtx. 5128 */ 5129 static int 5130 sync_reclaim(struct vop_reclaim_args *ap) 5131 { 5132 struct vnode *vp = ap->a_vp; 5133 struct bufobj *bo; 5134 5135 bo = &vp->v_bufobj; 5136 BO_LOCK(bo); 5137 mtx_lock(&sync_mtx); 5138 if (vp->v_mount->mnt_syncer == vp) 5139 vp->v_mount->mnt_syncer = NULL; 5140 if (bo->bo_flag & BO_ONWORKLST) { 5141 LIST_REMOVE(bo, bo_synclist); 5142 syncer_worklist_len--; 5143 sync_vnode_count--; 5144 bo->bo_flag &= ~BO_ONWORKLST; 5145 } 5146 mtx_unlock(&sync_mtx); 5147 BO_UNLOCK(bo); 5148 5149 return (0); 5150 } 5151 5152 int 5153 vn_need_pageq_flush(struct vnode *vp) 5154 { 5155 struct vm_object *obj; 5156 5157 obj = vp->v_object; 5158 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 5159 vm_object_mightbedirty(obj)); 5160 } 5161 5162 /* 5163 * Check if vnode represents a disk device 5164 */ 5165 bool 5166 vn_isdisk_error(struct vnode *vp, int *errp) 5167 { 5168 int error; 5169 5170 if (vp->v_type != VCHR) { 5171 error = ENOTBLK; 5172 goto out; 5173 } 5174 error = 0; 5175 dev_lock(); 5176 if (vp->v_rdev == NULL) 5177 error = ENXIO; 5178 else if (vp->v_rdev->si_devsw == NULL) 5179 error = ENXIO; 5180 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5181 error = ENOTBLK; 5182 dev_unlock(); 5183 out: 5184 *errp = error; 5185 return (error == 0); 5186 } 5187 5188 bool 5189 vn_isdisk(struct vnode *vp) 5190 { 5191 int error; 5192 5193 return (vn_isdisk_error(vp, &error)); 5194 } 5195 5196 /* 5197 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5198 * the comment above cache_fplookup for details. 5199 */ 5200 int 5201 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5202 { 5203 int error; 5204 5205 VFS_SMR_ASSERT_ENTERED(); 5206 5207 /* Check the owner. */ 5208 if (cred->cr_uid == file_uid) { 5209 if (file_mode & S_IXUSR) 5210 return (0); 5211 goto out_error; 5212 } 5213 5214 /* Otherwise, check the groups (first match) */ 5215 if (groupmember(file_gid, cred)) { 5216 if (file_mode & S_IXGRP) 5217 return (0); 5218 goto out_error; 5219 } 5220 5221 /* Otherwise, check everyone else. */ 5222 if (file_mode & S_IXOTH) 5223 return (0); 5224 out_error: 5225 /* 5226 * Permission check failed, but it is possible denial will get overwritten 5227 * (e.g., when root is traversing through a 700 directory owned by someone 5228 * else). 5229 * 5230 * vaccess() calls priv_check_cred which in turn can descent into MAC 5231 * modules overriding this result. It's quite unclear what semantics 5232 * are allowed for them to operate, thus for safety we don't call them 5233 * from within the SMR section. This also means if any such modules 5234 * are present, we have to let the regular lookup decide. 5235 */ 5236 error = priv_check_cred_vfs_lookup_nomac(cred); 5237 switch (error) { 5238 case 0: 5239 return (0); 5240 case EAGAIN: 5241 /* 5242 * MAC modules present. 5243 */ 5244 return (EAGAIN); 5245 case EPERM: 5246 return (EACCES); 5247 default: 5248 return (error); 5249 } 5250 } 5251 5252 /* 5253 * Common filesystem object access control check routine. Accepts a 5254 * vnode's type, "mode", uid and gid, requested access mode, and credentials. 5255 * Returns 0 on success, or an errno on failure. 5256 */ 5257 int 5258 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5259 accmode_t accmode, struct ucred *cred) 5260 { 5261 accmode_t dac_granted; 5262 accmode_t priv_granted; 5263 5264 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5265 ("invalid bit in accmode")); 5266 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5267 ("VAPPEND without VWRITE")); 5268 5269 /* 5270 * Look for a normal, non-privileged way to access the file/directory 5271 * as requested. If it exists, go with that. 5272 */ 5273 5274 dac_granted = 0; 5275 5276 /* Check the owner. */ 5277 if (cred->cr_uid == file_uid) { 5278 dac_granted |= VADMIN; 5279 if (file_mode & S_IXUSR) 5280 dac_granted |= VEXEC; 5281 if (file_mode & S_IRUSR) 5282 dac_granted |= VREAD; 5283 if (file_mode & S_IWUSR) 5284 dac_granted |= (VWRITE | VAPPEND); 5285 5286 if ((accmode & dac_granted) == accmode) 5287 return (0); 5288 5289 goto privcheck; 5290 } 5291 5292 /* Otherwise, check the groups (first match) */ 5293 if (groupmember(file_gid, cred)) { 5294 if (file_mode & S_IXGRP) 5295 dac_granted |= VEXEC; 5296 if (file_mode & S_IRGRP) 5297 dac_granted |= VREAD; 5298 if (file_mode & S_IWGRP) 5299 dac_granted |= (VWRITE | VAPPEND); 5300 5301 if ((accmode & dac_granted) == accmode) 5302 return (0); 5303 5304 goto privcheck; 5305 } 5306 5307 /* Otherwise, check everyone else. */ 5308 if (file_mode & S_IXOTH) 5309 dac_granted |= VEXEC; 5310 if (file_mode & S_IROTH) 5311 dac_granted |= VREAD; 5312 if (file_mode & S_IWOTH) 5313 dac_granted |= (VWRITE | VAPPEND); 5314 if ((accmode & dac_granted) == accmode) 5315 return (0); 5316 5317 privcheck: 5318 /* 5319 * Build a privilege mask to determine if the set of privileges 5320 * satisfies the requirements when combined with the granted mask 5321 * from above. For each privilege, if the privilege is required, 5322 * bitwise or the request type onto the priv_granted mask. 5323 */ 5324 priv_granted = 0; 5325 5326 if (type == VDIR) { 5327 /* 5328 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5329 * requests, instead of PRIV_VFS_EXEC. 5330 */ 5331 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5332 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5333 priv_granted |= VEXEC; 5334 } else { 5335 /* 5336 * Ensure that at least one execute bit is on. Otherwise, 5337 * a privileged user will always succeed, and we don't want 5338 * this to happen unless the file really is executable. 5339 */ 5340 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5341 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5342 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5343 priv_granted |= VEXEC; 5344 } 5345 5346 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5347 !priv_check_cred(cred, PRIV_VFS_READ)) 5348 priv_granted |= VREAD; 5349 5350 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5351 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5352 priv_granted |= (VWRITE | VAPPEND); 5353 5354 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5355 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5356 priv_granted |= VADMIN; 5357 5358 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5359 return (0); 5360 } 5361 5362 return ((accmode & VADMIN) ? EPERM : EACCES); 5363 } 5364 5365 /* 5366 * Credential check based on process requesting service, and per-attribute 5367 * permissions. 5368 */ 5369 int 5370 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5371 struct thread *td, accmode_t accmode) 5372 { 5373 5374 /* 5375 * Kernel-invoked always succeeds. 5376 */ 5377 if (cred == NOCRED) 5378 return (0); 5379 5380 /* 5381 * Do not allow privileged processes in jail to directly manipulate 5382 * system attributes. 5383 */ 5384 switch (attrnamespace) { 5385 case EXTATTR_NAMESPACE_SYSTEM: 5386 /* Potentially should be: return (EPERM); */ 5387 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5388 case EXTATTR_NAMESPACE_USER: 5389 return (VOP_ACCESS(vp, accmode, cred, td)); 5390 default: 5391 return (EPERM); 5392 } 5393 } 5394 5395 #ifdef DEBUG_VFS_LOCKS 5396 /* 5397 * This only exists to suppress warnings from unlocked specfs accesses. It is 5398 * no longer ok to have an unlocked VFS. 5399 */ 5400 #define IGNORE_LOCK(vp) (KERNEL_PANICKED() || (vp) == NULL || \ 5401 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 5402 5403 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5404 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5405 "Drop into debugger on lock violation"); 5406 5407 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5408 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5409 0, "Check for interlock across VOPs"); 5410 5411 int vfs_badlock_print = 1; /* Print lock violations. */ 5412 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5413 0, "Print lock violations"); 5414 5415 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5416 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5417 0, "Print vnode details on lock violations"); 5418 5419 #ifdef KDB 5420 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5421 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5422 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5423 #endif 5424 5425 static void 5426 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5427 { 5428 5429 #ifdef KDB 5430 if (vfs_badlock_backtrace) 5431 kdb_backtrace(); 5432 #endif 5433 if (vfs_badlock_vnode) 5434 vn_printf(vp, "vnode "); 5435 if (vfs_badlock_print) 5436 printf("%s: %p %s\n", str, (void *)vp, msg); 5437 if (vfs_badlock_ddb) 5438 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5439 } 5440 5441 void 5442 assert_vi_locked(struct vnode *vp, const char *str) 5443 { 5444 5445 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5446 vfs_badlock("interlock is not locked but should be", str, vp); 5447 } 5448 5449 void 5450 assert_vi_unlocked(struct vnode *vp, const char *str) 5451 { 5452 5453 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5454 vfs_badlock("interlock is locked but should not be", str, vp); 5455 } 5456 5457 void 5458 assert_vop_locked(struct vnode *vp, const char *str) 5459 { 5460 int locked; 5461 5462 if (!IGNORE_LOCK(vp)) { 5463 locked = VOP_ISLOCKED(vp); 5464 if (locked == 0 || locked == LK_EXCLOTHER) 5465 vfs_badlock("is not locked but should be", str, vp); 5466 } 5467 } 5468 5469 void 5470 assert_vop_unlocked(struct vnode *vp, const char *str) 5471 { 5472 5473 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5474 vfs_badlock("is locked but should not be", str, vp); 5475 } 5476 5477 void 5478 assert_vop_elocked(struct vnode *vp, const char *str) 5479 { 5480 5481 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5482 vfs_badlock("is not exclusive locked but should be", str, vp); 5483 } 5484 #endif /* DEBUG_VFS_LOCKS */ 5485 5486 void 5487 vop_rename_fail(struct vop_rename_args *ap) 5488 { 5489 5490 if (ap->a_tvp != NULL) 5491 vput(ap->a_tvp); 5492 if (ap->a_tdvp == ap->a_tvp) 5493 vrele(ap->a_tdvp); 5494 else 5495 vput(ap->a_tdvp); 5496 vrele(ap->a_fdvp); 5497 vrele(ap->a_fvp); 5498 } 5499 5500 void 5501 vop_rename_pre(void *ap) 5502 { 5503 struct vop_rename_args *a = ap; 5504 5505 #ifdef DEBUG_VFS_LOCKS 5506 if (a->a_tvp) 5507 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5508 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5509 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5510 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5511 5512 /* Check the source (from). */ 5513 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5514 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5515 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5516 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5517 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5518 5519 /* Check the target. */ 5520 if (a->a_tvp) 5521 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5522 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5523 #endif 5524 /* 5525 * It may be tempting to add vn_seqc_write_begin/end calls here and 5526 * in vop_rename_post but that's not going to work out since some 5527 * filesystems relookup vnodes mid-rename. This is probably a bug. 5528 * 5529 * For now filesystems are expected to do the relevant calls after they 5530 * decide what vnodes to operate on. 5531 */ 5532 if (a->a_tdvp != a->a_fdvp) 5533 vhold(a->a_fdvp); 5534 if (a->a_tvp != a->a_fvp) 5535 vhold(a->a_fvp); 5536 vhold(a->a_tdvp); 5537 if (a->a_tvp) 5538 vhold(a->a_tvp); 5539 } 5540 5541 #ifdef DEBUG_VFS_LOCKS 5542 void 5543 vop_fplookup_vexec_debugpre(void *ap __unused) 5544 { 5545 5546 VFS_SMR_ASSERT_ENTERED(); 5547 } 5548 5549 void 5550 vop_fplookup_vexec_debugpost(void *ap __unused, int rc __unused) 5551 { 5552 5553 VFS_SMR_ASSERT_ENTERED(); 5554 } 5555 5556 void 5557 vop_fplookup_symlink_debugpre(void *ap __unused) 5558 { 5559 5560 VFS_SMR_ASSERT_ENTERED(); 5561 } 5562 5563 void 5564 vop_fplookup_symlink_debugpost(void *ap __unused, int rc __unused) 5565 { 5566 5567 VFS_SMR_ASSERT_ENTERED(); 5568 } 5569 void 5570 vop_strategy_debugpre(void *ap) 5571 { 5572 struct vop_strategy_args *a; 5573 struct buf *bp; 5574 5575 a = ap; 5576 bp = a->a_bp; 5577 5578 /* 5579 * Cluster ops lock their component buffers but not the IO container. 5580 */ 5581 if ((bp->b_flags & B_CLUSTER) != 0) 5582 return; 5583 5584 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5585 if (vfs_badlock_print) 5586 printf( 5587 "VOP_STRATEGY: bp is not locked but should be\n"); 5588 if (vfs_badlock_ddb) 5589 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5590 } 5591 } 5592 5593 void 5594 vop_lock_debugpre(void *ap) 5595 { 5596 struct vop_lock1_args *a = ap; 5597 5598 if ((a->a_flags & LK_INTERLOCK) == 0) 5599 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5600 else 5601 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5602 } 5603 5604 void 5605 vop_lock_debugpost(void *ap, int rc) 5606 { 5607 struct vop_lock1_args *a = ap; 5608 5609 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5610 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5611 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5612 } 5613 5614 void 5615 vop_unlock_debugpre(void *ap) 5616 { 5617 struct vop_unlock_args *a = ap; 5618 5619 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 5620 } 5621 5622 void 5623 vop_need_inactive_debugpre(void *ap) 5624 { 5625 struct vop_need_inactive_args *a = ap; 5626 5627 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5628 } 5629 5630 void 5631 vop_need_inactive_debugpost(void *ap, int rc) 5632 { 5633 struct vop_need_inactive_args *a = ap; 5634 5635 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5636 } 5637 #endif 5638 5639 void 5640 vop_create_pre(void *ap) 5641 { 5642 struct vop_create_args *a; 5643 struct vnode *dvp; 5644 5645 a = ap; 5646 dvp = a->a_dvp; 5647 vn_seqc_write_begin(dvp); 5648 } 5649 5650 void 5651 vop_create_post(void *ap, int rc) 5652 { 5653 struct vop_create_args *a; 5654 struct vnode *dvp; 5655 5656 a = ap; 5657 dvp = a->a_dvp; 5658 vn_seqc_write_end(dvp); 5659 if (!rc) 5660 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5661 } 5662 5663 void 5664 vop_whiteout_pre(void *ap) 5665 { 5666 struct vop_whiteout_args *a; 5667 struct vnode *dvp; 5668 5669 a = ap; 5670 dvp = a->a_dvp; 5671 vn_seqc_write_begin(dvp); 5672 } 5673 5674 void 5675 vop_whiteout_post(void *ap, int rc) 5676 { 5677 struct vop_whiteout_args *a; 5678 struct vnode *dvp; 5679 5680 a = ap; 5681 dvp = a->a_dvp; 5682 vn_seqc_write_end(dvp); 5683 } 5684 5685 void 5686 vop_deleteextattr_pre(void *ap) 5687 { 5688 struct vop_deleteextattr_args *a; 5689 struct vnode *vp; 5690 5691 a = ap; 5692 vp = a->a_vp; 5693 vn_seqc_write_begin(vp); 5694 } 5695 5696 void 5697 vop_deleteextattr_post(void *ap, int rc) 5698 { 5699 struct vop_deleteextattr_args *a; 5700 struct vnode *vp; 5701 5702 a = ap; 5703 vp = a->a_vp; 5704 vn_seqc_write_end(vp); 5705 if (!rc) 5706 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5707 } 5708 5709 void 5710 vop_link_pre(void *ap) 5711 { 5712 struct vop_link_args *a; 5713 struct vnode *vp, *tdvp; 5714 5715 a = ap; 5716 vp = a->a_vp; 5717 tdvp = a->a_tdvp; 5718 vn_seqc_write_begin(vp); 5719 vn_seqc_write_begin(tdvp); 5720 } 5721 5722 void 5723 vop_link_post(void *ap, int rc) 5724 { 5725 struct vop_link_args *a; 5726 struct vnode *vp, *tdvp; 5727 5728 a = ap; 5729 vp = a->a_vp; 5730 tdvp = a->a_tdvp; 5731 vn_seqc_write_end(vp); 5732 vn_seqc_write_end(tdvp); 5733 if (!rc) { 5734 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 5735 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 5736 } 5737 } 5738 5739 void 5740 vop_mkdir_pre(void *ap) 5741 { 5742 struct vop_mkdir_args *a; 5743 struct vnode *dvp; 5744 5745 a = ap; 5746 dvp = a->a_dvp; 5747 vn_seqc_write_begin(dvp); 5748 } 5749 5750 void 5751 vop_mkdir_post(void *ap, int rc) 5752 { 5753 struct vop_mkdir_args *a; 5754 struct vnode *dvp; 5755 5756 a = ap; 5757 dvp = a->a_dvp; 5758 vn_seqc_write_end(dvp); 5759 if (!rc) 5760 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5761 } 5762 5763 #ifdef DEBUG_VFS_LOCKS 5764 void 5765 vop_mkdir_debugpost(void *ap, int rc) 5766 { 5767 struct vop_mkdir_args *a; 5768 5769 a = ap; 5770 if (!rc) 5771 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); 5772 } 5773 #endif 5774 5775 void 5776 vop_mknod_pre(void *ap) 5777 { 5778 struct vop_mknod_args *a; 5779 struct vnode *dvp; 5780 5781 a = ap; 5782 dvp = a->a_dvp; 5783 vn_seqc_write_begin(dvp); 5784 } 5785 5786 void 5787 vop_mknod_post(void *ap, int rc) 5788 { 5789 struct vop_mknod_args *a; 5790 struct vnode *dvp; 5791 5792 a = ap; 5793 dvp = a->a_dvp; 5794 vn_seqc_write_end(dvp); 5795 if (!rc) 5796 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5797 } 5798 5799 void 5800 vop_reclaim_post(void *ap, int rc) 5801 { 5802 struct vop_reclaim_args *a; 5803 struct vnode *vp; 5804 5805 a = ap; 5806 vp = a->a_vp; 5807 ASSERT_VOP_IN_SEQC(vp); 5808 if (!rc) 5809 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 5810 } 5811 5812 void 5813 vop_remove_pre(void *ap) 5814 { 5815 struct vop_remove_args *a; 5816 struct vnode *dvp, *vp; 5817 5818 a = ap; 5819 dvp = a->a_dvp; 5820 vp = a->a_vp; 5821 vn_seqc_write_begin(dvp); 5822 vn_seqc_write_begin(vp); 5823 } 5824 5825 void 5826 vop_remove_post(void *ap, int rc) 5827 { 5828 struct vop_remove_args *a; 5829 struct vnode *dvp, *vp; 5830 5831 a = ap; 5832 dvp = a->a_dvp; 5833 vp = a->a_vp; 5834 vn_seqc_write_end(dvp); 5835 vn_seqc_write_end(vp); 5836 if (!rc) { 5837 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5838 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5839 } 5840 } 5841 5842 void 5843 vop_rename_post(void *ap, int rc) 5844 { 5845 struct vop_rename_args *a = ap; 5846 long hint; 5847 5848 if (!rc) { 5849 hint = NOTE_WRITE; 5850 if (a->a_fdvp == a->a_tdvp) { 5851 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 5852 hint |= NOTE_LINK; 5853 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5854 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5855 } else { 5856 hint |= NOTE_EXTEND; 5857 if (a->a_fvp->v_type == VDIR) 5858 hint |= NOTE_LINK; 5859 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5860 5861 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 5862 a->a_tvp->v_type == VDIR) 5863 hint &= ~NOTE_LINK; 5864 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5865 } 5866 5867 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 5868 if (a->a_tvp) 5869 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 5870 } 5871 if (a->a_tdvp != a->a_fdvp) 5872 vdrop(a->a_fdvp); 5873 if (a->a_tvp != a->a_fvp) 5874 vdrop(a->a_fvp); 5875 vdrop(a->a_tdvp); 5876 if (a->a_tvp) 5877 vdrop(a->a_tvp); 5878 } 5879 5880 void 5881 vop_rmdir_pre(void *ap) 5882 { 5883 struct vop_rmdir_args *a; 5884 struct vnode *dvp, *vp; 5885 5886 a = ap; 5887 dvp = a->a_dvp; 5888 vp = a->a_vp; 5889 vn_seqc_write_begin(dvp); 5890 vn_seqc_write_begin(vp); 5891 } 5892 5893 void 5894 vop_rmdir_post(void *ap, int rc) 5895 { 5896 struct vop_rmdir_args *a; 5897 struct vnode *dvp, *vp; 5898 5899 a = ap; 5900 dvp = a->a_dvp; 5901 vp = a->a_vp; 5902 vn_seqc_write_end(dvp); 5903 vn_seqc_write_end(vp); 5904 if (!rc) { 5905 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5906 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5907 } 5908 } 5909 5910 void 5911 vop_setattr_pre(void *ap) 5912 { 5913 struct vop_setattr_args *a; 5914 struct vnode *vp; 5915 5916 a = ap; 5917 vp = a->a_vp; 5918 vn_seqc_write_begin(vp); 5919 } 5920 5921 void 5922 vop_setattr_post(void *ap, int rc) 5923 { 5924 struct vop_setattr_args *a; 5925 struct vnode *vp; 5926 5927 a = ap; 5928 vp = a->a_vp; 5929 vn_seqc_write_end(vp); 5930 if (!rc) 5931 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 5932 } 5933 5934 void 5935 vop_setacl_pre(void *ap) 5936 { 5937 struct vop_setacl_args *a; 5938 struct vnode *vp; 5939 5940 a = ap; 5941 vp = a->a_vp; 5942 vn_seqc_write_begin(vp); 5943 } 5944 5945 void 5946 vop_setacl_post(void *ap, int rc __unused) 5947 { 5948 struct vop_setacl_args *a; 5949 struct vnode *vp; 5950 5951 a = ap; 5952 vp = a->a_vp; 5953 vn_seqc_write_end(vp); 5954 } 5955 5956 void 5957 vop_setextattr_pre(void *ap) 5958 { 5959 struct vop_setextattr_args *a; 5960 struct vnode *vp; 5961 5962 a = ap; 5963 vp = a->a_vp; 5964 vn_seqc_write_begin(vp); 5965 } 5966 5967 void 5968 vop_setextattr_post(void *ap, int rc) 5969 { 5970 struct vop_setextattr_args *a; 5971 struct vnode *vp; 5972 5973 a = ap; 5974 vp = a->a_vp; 5975 vn_seqc_write_end(vp); 5976 if (!rc) 5977 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 5978 } 5979 5980 void 5981 vop_symlink_pre(void *ap) 5982 { 5983 struct vop_symlink_args *a; 5984 struct vnode *dvp; 5985 5986 a = ap; 5987 dvp = a->a_dvp; 5988 vn_seqc_write_begin(dvp); 5989 } 5990 5991 void 5992 vop_symlink_post(void *ap, int rc) 5993 { 5994 struct vop_symlink_args *a; 5995 struct vnode *dvp; 5996 5997 a = ap; 5998 dvp = a->a_dvp; 5999 vn_seqc_write_end(dvp); 6000 if (!rc) 6001 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6002 } 6003 6004 void 6005 vop_open_post(void *ap, int rc) 6006 { 6007 struct vop_open_args *a = ap; 6008 6009 if (!rc) 6010 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 6011 } 6012 6013 void 6014 vop_close_post(void *ap, int rc) 6015 { 6016 struct vop_close_args *a = ap; 6017 6018 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 6019 !VN_IS_DOOMED(a->a_vp))) { 6020 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 6021 NOTE_CLOSE_WRITE : NOTE_CLOSE); 6022 } 6023 } 6024 6025 void 6026 vop_read_post(void *ap, int rc) 6027 { 6028 struct vop_read_args *a = ap; 6029 6030 if (!rc) 6031 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6032 } 6033 6034 void 6035 vop_read_pgcache_post(void *ap, int rc) 6036 { 6037 struct vop_read_pgcache_args *a = ap; 6038 6039 if (!rc) 6040 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); 6041 } 6042 6043 void 6044 vop_readdir_post(void *ap, int rc) 6045 { 6046 struct vop_readdir_args *a = ap; 6047 6048 if (!rc) 6049 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6050 } 6051 6052 static struct knlist fs_knlist; 6053 6054 static void 6055 vfs_event_init(void *arg) 6056 { 6057 knlist_init_mtx(&fs_knlist, NULL); 6058 } 6059 /* XXX - correct order? */ 6060 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 6061 6062 void 6063 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 6064 { 6065 6066 KNOTE_UNLOCKED(&fs_knlist, event); 6067 } 6068 6069 static int filt_fsattach(struct knote *kn); 6070 static void filt_fsdetach(struct knote *kn); 6071 static int filt_fsevent(struct knote *kn, long hint); 6072 6073 struct filterops fs_filtops = { 6074 .f_isfd = 0, 6075 .f_attach = filt_fsattach, 6076 .f_detach = filt_fsdetach, 6077 .f_event = filt_fsevent 6078 }; 6079 6080 static int 6081 filt_fsattach(struct knote *kn) 6082 { 6083 6084 kn->kn_flags |= EV_CLEAR; 6085 knlist_add(&fs_knlist, kn, 0); 6086 return (0); 6087 } 6088 6089 static void 6090 filt_fsdetach(struct knote *kn) 6091 { 6092 6093 knlist_remove(&fs_knlist, kn, 0); 6094 } 6095 6096 static int 6097 filt_fsevent(struct knote *kn, long hint) 6098 { 6099 6100 kn->kn_fflags |= kn->kn_sfflags & hint; 6101 6102 return (kn->kn_fflags != 0); 6103 } 6104 6105 static int 6106 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 6107 { 6108 struct vfsidctl vc; 6109 int error; 6110 struct mount *mp; 6111 6112 error = SYSCTL_IN(req, &vc, sizeof(vc)); 6113 if (error) 6114 return (error); 6115 if (vc.vc_vers != VFS_CTL_VERS1) 6116 return (EINVAL); 6117 mp = vfs_getvfs(&vc.vc_fsid); 6118 if (mp == NULL) 6119 return (ENOENT); 6120 /* ensure that a specific sysctl goes to the right filesystem. */ 6121 if (strcmp(vc.vc_fstypename, "*") != 0 && 6122 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 6123 vfs_rel(mp); 6124 return (EINVAL); 6125 } 6126 VCTLTOREQ(&vc, req); 6127 error = VFS_SYSCTL(mp, vc.vc_op, req); 6128 vfs_rel(mp); 6129 return (error); 6130 } 6131 6132 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 6133 NULL, 0, sysctl_vfs_ctl, "", 6134 "Sysctl by fsid"); 6135 6136 /* 6137 * Function to initialize a va_filerev field sensibly. 6138 * XXX: Wouldn't a random number make a lot more sense ?? 6139 */ 6140 u_quad_t 6141 init_va_filerev(void) 6142 { 6143 struct bintime bt; 6144 6145 getbinuptime(&bt); 6146 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 6147 } 6148 6149 static int filt_vfsread(struct knote *kn, long hint); 6150 static int filt_vfswrite(struct knote *kn, long hint); 6151 static int filt_vfsvnode(struct knote *kn, long hint); 6152 static void filt_vfsdetach(struct knote *kn); 6153 static struct filterops vfsread_filtops = { 6154 .f_isfd = 1, 6155 .f_detach = filt_vfsdetach, 6156 .f_event = filt_vfsread 6157 }; 6158 static struct filterops vfswrite_filtops = { 6159 .f_isfd = 1, 6160 .f_detach = filt_vfsdetach, 6161 .f_event = filt_vfswrite 6162 }; 6163 static struct filterops vfsvnode_filtops = { 6164 .f_isfd = 1, 6165 .f_detach = filt_vfsdetach, 6166 .f_event = filt_vfsvnode 6167 }; 6168 6169 static void 6170 vfs_knllock(void *arg) 6171 { 6172 struct vnode *vp = arg; 6173 6174 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 6175 } 6176 6177 static void 6178 vfs_knlunlock(void *arg) 6179 { 6180 struct vnode *vp = arg; 6181 6182 VOP_UNLOCK(vp); 6183 } 6184 6185 static void 6186 vfs_knl_assert_lock(void *arg, int what) 6187 { 6188 #ifdef DEBUG_VFS_LOCKS 6189 struct vnode *vp = arg; 6190 6191 if (what == LA_LOCKED) 6192 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6193 else 6194 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6195 #endif 6196 } 6197 6198 int 6199 vfs_kqfilter(struct vop_kqfilter_args *ap) 6200 { 6201 struct vnode *vp = ap->a_vp; 6202 struct knote *kn = ap->a_kn; 6203 struct knlist *knl; 6204 6205 switch (kn->kn_filter) { 6206 case EVFILT_READ: 6207 kn->kn_fop = &vfsread_filtops; 6208 break; 6209 case EVFILT_WRITE: 6210 kn->kn_fop = &vfswrite_filtops; 6211 break; 6212 case EVFILT_VNODE: 6213 kn->kn_fop = &vfsvnode_filtops; 6214 break; 6215 default: 6216 return (EINVAL); 6217 } 6218 6219 kn->kn_hook = (caddr_t)vp; 6220 6221 v_addpollinfo(vp); 6222 if (vp->v_pollinfo == NULL) 6223 return (ENOMEM); 6224 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6225 vhold(vp); 6226 knlist_add(knl, kn, 0); 6227 6228 return (0); 6229 } 6230 6231 /* 6232 * Detach knote from vnode 6233 */ 6234 static void 6235 filt_vfsdetach(struct knote *kn) 6236 { 6237 struct vnode *vp = (struct vnode *)kn->kn_hook; 6238 6239 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6240 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6241 vdrop(vp); 6242 } 6243 6244 /*ARGSUSED*/ 6245 static int 6246 filt_vfsread(struct knote *kn, long hint) 6247 { 6248 struct vnode *vp = (struct vnode *)kn->kn_hook; 6249 struct vattr va; 6250 int res; 6251 6252 /* 6253 * filesystem is gone, so set the EOF flag and schedule 6254 * the knote for deletion. 6255 */ 6256 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6257 VI_LOCK(vp); 6258 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6259 VI_UNLOCK(vp); 6260 return (1); 6261 } 6262 6263 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 6264 return (0); 6265 6266 VI_LOCK(vp); 6267 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 6268 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6269 VI_UNLOCK(vp); 6270 return (res); 6271 } 6272 6273 /*ARGSUSED*/ 6274 static int 6275 filt_vfswrite(struct knote *kn, long hint) 6276 { 6277 struct vnode *vp = (struct vnode *)kn->kn_hook; 6278 6279 VI_LOCK(vp); 6280 6281 /* 6282 * filesystem is gone, so set the EOF flag and schedule 6283 * the knote for deletion. 6284 */ 6285 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6286 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6287 6288 kn->kn_data = 0; 6289 VI_UNLOCK(vp); 6290 return (1); 6291 } 6292 6293 static int 6294 filt_vfsvnode(struct knote *kn, long hint) 6295 { 6296 struct vnode *vp = (struct vnode *)kn->kn_hook; 6297 int res; 6298 6299 VI_LOCK(vp); 6300 if (kn->kn_sfflags & hint) 6301 kn->kn_fflags |= hint; 6302 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6303 kn->kn_flags |= EV_EOF; 6304 VI_UNLOCK(vp); 6305 return (1); 6306 } 6307 res = (kn->kn_fflags != 0); 6308 VI_UNLOCK(vp); 6309 return (res); 6310 } 6311 6312 /* 6313 * Returns whether the directory is empty or not. 6314 * If it is empty, the return value is 0; otherwise 6315 * the return value is an error value (which may 6316 * be ENOTEMPTY). 6317 */ 6318 int 6319 vfs_emptydir(struct vnode *vp) 6320 { 6321 struct uio uio; 6322 struct iovec iov; 6323 struct dirent *dirent, *dp, *endp; 6324 int error, eof; 6325 6326 error = 0; 6327 eof = 0; 6328 6329 ASSERT_VOP_LOCKED(vp, "vfs_emptydir"); 6330 6331 dirent = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK); 6332 iov.iov_base = dirent; 6333 iov.iov_len = sizeof(struct dirent); 6334 6335 uio.uio_iov = &iov; 6336 uio.uio_iovcnt = 1; 6337 uio.uio_offset = 0; 6338 uio.uio_resid = sizeof(struct dirent); 6339 uio.uio_segflg = UIO_SYSSPACE; 6340 uio.uio_rw = UIO_READ; 6341 uio.uio_td = curthread; 6342 6343 while (eof == 0 && error == 0) { 6344 error = VOP_READDIR(vp, &uio, curthread->td_ucred, &eof, 6345 NULL, NULL); 6346 if (error != 0) 6347 break; 6348 endp = (void *)((uint8_t *)dirent + 6349 sizeof(struct dirent) - uio.uio_resid); 6350 for (dp = dirent; dp < endp; 6351 dp = (void *)((uint8_t *)dp + GENERIC_DIRSIZ(dp))) { 6352 if (dp->d_type == DT_WHT) 6353 continue; 6354 if (dp->d_namlen == 0) 6355 continue; 6356 if (dp->d_type != DT_DIR && 6357 dp->d_type != DT_UNKNOWN) { 6358 error = ENOTEMPTY; 6359 break; 6360 } 6361 if (dp->d_namlen > 2) { 6362 error = ENOTEMPTY; 6363 break; 6364 } 6365 if (dp->d_namlen == 1 && 6366 dp->d_name[0] != '.') { 6367 error = ENOTEMPTY; 6368 break; 6369 } 6370 if (dp->d_namlen == 2 && 6371 dp->d_name[1] != '.') { 6372 error = ENOTEMPTY; 6373 break; 6374 } 6375 uio.uio_resid = sizeof(struct dirent); 6376 } 6377 } 6378 free(dirent, M_TEMP); 6379 return (error); 6380 } 6381 6382 int 6383 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6384 { 6385 int error; 6386 6387 if (dp->d_reclen > ap->a_uio->uio_resid) 6388 return (ENAMETOOLONG); 6389 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6390 if (error) { 6391 if (ap->a_ncookies != NULL) { 6392 if (ap->a_cookies != NULL) 6393 free(ap->a_cookies, M_TEMP); 6394 ap->a_cookies = NULL; 6395 *ap->a_ncookies = 0; 6396 } 6397 return (error); 6398 } 6399 if (ap->a_ncookies == NULL) 6400 return (0); 6401 6402 KASSERT(ap->a_cookies, 6403 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6404 6405 *ap->a_cookies = realloc(*ap->a_cookies, 6406 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 6407 (*ap->a_cookies)[*ap->a_ncookies] = off; 6408 *ap->a_ncookies += 1; 6409 return (0); 6410 } 6411 6412 /* 6413 * The purpose of this routine is to remove granularity from accmode_t, 6414 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6415 * VADMIN and VAPPEND. 6416 * 6417 * If it returns 0, the caller is supposed to continue with the usual 6418 * access checks using 'accmode' as modified by this routine. If it 6419 * returns nonzero value, the caller is supposed to return that value 6420 * as errno. 6421 * 6422 * Note that after this routine runs, accmode may be zero. 6423 */ 6424 int 6425 vfs_unixify_accmode(accmode_t *accmode) 6426 { 6427 /* 6428 * There is no way to specify explicit "deny" rule using 6429 * file mode or POSIX.1e ACLs. 6430 */ 6431 if (*accmode & VEXPLICIT_DENY) { 6432 *accmode = 0; 6433 return (0); 6434 } 6435 6436 /* 6437 * None of these can be translated into usual access bits. 6438 * Also, the common case for NFSv4 ACLs is to not contain 6439 * either of these bits. Caller should check for VWRITE 6440 * on the containing directory instead. 6441 */ 6442 if (*accmode & (VDELETE_CHILD | VDELETE)) 6443 return (EPERM); 6444 6445 if (*accmode & VADMIN_PERMS) { 6446 *accmode &= ~VADMIN_PERMS; 6447 *accmode |= VADMIN; 6448 } 6449 6450 /* 6451 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6452 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6453 */ 6454 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6455 6456 return (0); 6457 } 6458 6459 /* 6460 * Clear out a doomed vnode (if any) and replace it with a new one as long 6461 * as the fs is not being unmounted. Return the root vnode to the caller. 6462 */ 6463 static int __noinline 6464 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6465 { 6466 struct vnode *vp; 6467 int error; 6468 6469 restart: 6470 if (mp->mnt_rootvnode != NULL) { 6471 MNT_ILOCK(mp); 6472 vp = mp->mnt_rootvnode; 6473 if (vp != NULL) { 6474 if (!VN_IS_DOOMED(vp)) { 6475 vrefact(vp); 6476 MNT_IUNLOCK(mp); 6477 error = vn_lock(vp, flags); 6478 if (error == 0) { 6479 *vpp = vp; 6480 return (0); 6481 } 6482 vrele(vp); 6483 goto restart; 6484 } 6485 /* 6486 * Clear the old one. 6487 */ 6488 mp->mnt_rootvnode = NULL; 6489 } 6490 MNT_IUNLOCK(mp); 6491 if (vp != NULL) { 6492 vfs_op_barrier_wait(mp); 6493 vrele(vp); 6494 } 6495 } 6496 error = VFS_CACHEDROOT(mp, flags, vpp); 6497 if (error != 0) 6498 return (error); 6499 if (mp->mnt_vfs_ops == 0) { 6500 MNT_ILOCK(mp); 6501 if (mp->mnt_vfs_ops != 0) { 6502 MNT_IUNLOCK(mp); 6503 return (0); 6504 } 6505 if (mp->mnt_rootvnode == NULL) { 6506 vrefact(*vpp); 6507 mp->mnt_rootvnode = *vpp; 6508 } else { 6509 if (mp->mnt_rootvnode != *vpp) { 6510 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6511 panic("%s: mismatch between vnode returned " 6512 " by VFS_CACHEDROOT and the one cached " 6513 " (%p != %p)", 6514 __func__, *vpp, mp->mnt_rootvnode); 6515 } 6516 } 6517 } 6518 MNT_IUNLOCK(mp); 6519 } 6520 return (0); 6521 } 6522 6523 int 6524 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6525 { 6526 struct mount_pcpu *mpcpu; 6527 struct vnode *vp; 6528 int error; 6529 6530 if (!vfs_op_thread_enter(mp, mpcpu)) 6531 return (vfs_cache_root_fallback(mp, flags, vpp)); 6532 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6533 if (vp == NULL || VN_IS_DOOMED(vp)) { 6534 vfs_op_thread_exit(mp, mpcpu); 6535 return (vfs_cache_root_fallback(mp, flags, vpp)); 6536 } 6537 vrefact(vp); 6538 vfs_op_thread_exit(mp, mpcpu); 6539 error = vn_lock(vp, flags); 6540 if (error != 0) { 6541 vrele(vp); 6542 return (vfs_cache_root_fallback(mp, flags, vpp)); 6543 } 6544 *vpp = vp; 6545 return (0); 6546 } 6547 6548 struct vnode * 6549 vfs_cache_root_clear(struct mount *mp) 6550 { 6551 struct vnode *vp; 6552 6553 /* 6554 * ops > 0 guarantees there is nobody who can see this vnode 6555 */ 6556 MPASS(mp->mnt_vfs_ops > 0); 6557 vp = mp->mnt_rootvnode; 6558 if (vp != NULL) 6559 vn_seqc_write_begin(vp); 6560 mp->mnt_rootvnode = NULL; 6561 return (vp); 6562 } 6563 6564 void 6565 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6566 { 6567 6568 MPASS(mp->mnt_vfs_ops > 0); 6569 vrefact(vp); 6570 mp->mnt_rootvnode = vp; 6571 } 6572 6573 /* 6574 * These are helper functions for filesystems to traverse all 6575 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6576 * 6577 * This interface replaces MNT_VNODE_FOREACH. 6578 */ 6579 6580 struct vnode * 6581 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6582 { 6583 struct vnode *vp; 6584 6585 if (should_yield()) 6586 kern_yield(PRI_USER); 6587 MNT_ILOCK(mp); 6588 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6589 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6590 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6591 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6592 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6593 continue; 6594 VI_LOCK(vp); 6595 if (VN_IS_DOOMED(vp)) { 6596 VI_UNLOCK(vp); 6597 continue; 6598 } 6599 break; 6600 } 6601 if (vp == NULL) { 6602 __mnt_vnode_markerfree_all(mvp, mp); 6603 /* MNT_IUNLOCK(mp); -- done in above function */ 6604 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6605 return (NULL); 6606 } 6607 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6608 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6609 MNT_IUNLOCK(mp); 6610 return (vp); 6611 } 6612 6613 struct vnode * 6614 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6615 { 6616 struct vnode *vp; 6617 6618 *mvp = vn_alloc_marker(mp); 6619 MNT_ILOCK(mp); 6620 MNT_REF(mp); 6621 6622 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6623 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6624 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6625 continue; 6626 VI_LOCK(vp); 6627 if (VN_IS_DOOMED(vp)) { 6628 VI_UNLOCK(vp); 6629 continue; 6630 } 6631 break; 6632 } 6633 if (vp == NULL) { 6634 MNT_REL(mp); 6635 MNT_IUNLOCK(mp); 6636 vn_free_marker(*mvp); 6637 *mvp = NULL; 6638 return (NULL); 6639 } 6640 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6641 MNT_IUNLOCK(mp); 6642 return (vp); 6643 } 6644 6645 void 6646 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6647 { 6648 6649 if (*mvp == NULL) { 6650 MNT_IUNLOCK(mp); 6651 return; 6652 } 6653 6654 mtx_assert(MNT_MTX(mp), MA_OWNED); 6655 6656 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6657 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6658 MNT_REL(mp); 6659 MNT_IUNLOCK(mp); 6660 vn_free_marker(*mvp); 6661 *mvp = NULL; 6662 } 6663 6664 /* 6665 * These are helper functions for filesystems to traverse their 6666 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6667 */ 6668 static void 6669 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6670 { 6671 6672 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6673 6674 MNT_ILOCK(mp); 6675 MNT_REL(mp); 6676 MNT_IUNLOCK(mp); 6677 vn_free_marker(*mvp); 6678 *mvp = NULL; 6679 } 6680 6681 /* 6682 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6683 * conventional lock order during mnt_vnode_next_lazy iteration. 6684 * 6685 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6686 * The list lock is dropped and reacquired. On success, both locks are held. 6687 * On failure, the mount vnode list lock is held but the vnode interlock is 6688 * not, and the procedure may have yielded. 6689 */ 6690 static bool 6691 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6692 struct vnode *vp) 6693 { 6694 6695 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6696 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6697 ("%s: bad marker", __func__)); 6698 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6699 ("%s: inappropriate vnode", __func__)); 6700 ASSERT_VI_UNLOCKED(vp, __func__); 6701 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6702 6703 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6704 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6705 6706 /* 6707 * Note we may be racing against vdrop which transitioned the hold 6708 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 6709 * if we are the only user after we get the interlock we will just 6710 * vdrop. 6711 */ 6712 vhold(vp); 6713 mtx_unlock(&mp->mnt_listmtx); 6714 VI_LOCK(vp); 6715 if (VN_IS_DOOMED(vp)) { 6716 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 6717 goto out_lost; 6718 } 6719 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 6720 /* 6721 * There is nothing to do if we are the last user. 6722 */ 6723 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 6724 goto out_lost; 6725 mtx_lock(&mp->mnt_listmtx); 6726 return (true); 6727 out_lost: 6728 vdropl(vp); 6729 maybe_yield(); 6730 mtx_lock(&mp->mnt_listmtx); 6731 return (false); 6732 } 6733 6734 static struct vnode * 6735 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6736 void *cbarg) 6737 { 6738 struct vnode *vp; 6739 6740 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6741 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6742 restart: 6743 vp = TAILQ_NEXT(*mvp, v_lazylist); 6744 while (vp != NULL) { 6745 if (vp->v_type == VMARKER) { 6746 vp = TAILQ_NEXT(vp, v_lazylist); 6747 continue; 6748 } 6749 /* 6750 * See if we want to process the vnode. Note we may encounter a 6751 * long string of vnodes we don't care about and hog the list 6752 * as a result. Check for it and requeue the marker. 6753 */ 6754 VNPASS(!VN_IS_DOOMED(vp), vp); 6755 if (!cb(vp, cbarg)) { 6756 if (!should_yield()) { 6757 vp = TAILQ_NEXT(vp, v_lazylist); 6758 continue; 6759 } 6760 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 6761 v_lazylist); 6762 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 6763 v_lazylist); 6764 mtx_unlock(&mp->mnt_listmtx); 6765 kern_yield(PRI_USER); 6766 mtx_lock(&mp->mnt_listmtx); 6767 goto restart; 6768 } 6769 /* 6770 * Try-lock because this is the wrong lock order. 6771 */ 6772 if (!VI_TRYLOCK(vp) && 6773 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 6774 goto restart; 6775 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6776 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6777 ("alien vnode on the lazy list %p %p", vp, mp)); 6778 VNPASS(vp->v_mount == mp, vp); 6779 VNPASS(!VN_IS_DOOMED(vp), vp); 6780 break; 6781 } 6782 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6783 6784 /* Check if we are done */ 6785 if (vp == NULL) { 6786 mtx_unlock(&mp->mnt_listmtx); 6787 mnt_vnode_markerfree_lazy(mvp, mp); 6788 return (NULL); 6789 } 6790 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 6791 mtx_unlock(&mp->mnt_listmtx); 6792 ASSERT_VI_LOCKED(vp, "lazy iter"); 6793 return (vp); 6794 } 6795 6796 struct vnode * 6797 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6798 void *cbarg) 6799 { 6800 6801 if (should_yield()) 6802 kern_yield(PRI_USER); 6803 mtx_lock(&mp->mnt_listmtx); 6804 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6805 } 6806 6807 struct vnode * 6808 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6809 void *cbarg) 6810 { 6811 struct vnode *vp; 6812 6813 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 6814 return (NULL); 6815 6816 *mvp = vn_alloc_marker(mp); 6817 MNT_ILOCK(mp); 6818 MNT_REF(mp); 6819 MNT_IUNLOCK(mp); 6820 6821 mtx_lock(&mp->mnt_listmtx); 6822 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 6823 if (vp == NULL) { 6824 mtx_unlock(&mp->mnt_listmtx); 6825 mnt_vnode_markerfree_lazy(mvp, mp); 6826 return (NULL); 6827 } 6828 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 6829 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6830 } 6831 6832 void 6833 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6834 { 6835 6836 if (*mvp == NULL) 6837 return; 6838 6839 mtx_lock(&mp->mnt_listmtx); 6840 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6841 mtx_unlock(&mp->mnt_listmtx); 6842 mnt_vnode_markerfree_lazy(mvp, mp); 6843 } 6844 6845 int 6846 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 6847 { 6848 6849 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 6850 cnp->cn_flags &= ~NOEXECCHECK; 6851 return (0); 6852 } 6853 6854 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, cnp->cn_thread)); 6855 } 6856 6857 /* 6858 * Do not use this variant unless you have means other than the hold count 6859 * to prevent the vnode from getting freed. 6860 */ 6861 void 6862 vn_seqc_write_begin_locked(struct vnode *vp) 6863 { 6864 6865 ASSERT_VI_LOCKED(vp, __func__); 6866 VNPASS(vp->v_holdcnt > 0, vp); 6867 VNPASS(vp->v_seqc_users >= 0, vp); 6868 vp->v_seqc_users++; 6869 if (vp->v_seqc_users == 1) 6870 seqc_sleepable_write_begin(&vp->v_seqc); 6871 } 6872 6873 void 6874 vn_seqc_write_begin(struct vnode *vp) 6875 { 6876 6877 VI_LOCK(vp); 6878 vn_seqc_write_begin_locked(vp); 6879 VI_UNLOCK(vp); 6880 } 6881 6882 void 6883 vn_seqc_write_end_locked(struct vnode *vp) 6884 { 6885 6886 ASSERT_VI_LOCKED(vp, __func__); 6887 VNPASS(vp->v_seqc_users > 0, vp); 6888 vp->v_seqc_users--; 6889 if (vp->v_seqc_users == 0) 6890 seqc_sleepable_write_end(&vp->v_seqc); 6891 } 6892 6893 void 6894 vn_seqc_write_end(struct vnode *vp) 6895 { 6896 6897 VI_LOCK(vp); 6898 vn_seqc_write_end_locked(vp); 6899 VI_UNLOCK(vp); 6900 } 6901 6902 /* 6903 * Special case handling for allocating and freeing vnodes. 6904 * 6905 * The counter remains unchanged on free so that a doomed vnode will 6906 * keep testing as in modify as long as it is accessible with SMR. 6907 */ 6908 static void 6909 vn_seqc_init(struct vnode *vp) 6910 { 6911 6912 vp->v_seqc = 0; 6913 vp->v_seqc_users = 0; 6914 } 6915 6916 static void 6917 vn_seqc_write_end_free(struct vnode *vp) 6918 { 6919 6920 VNPASS(seqc_in_modify(vp->v_seqc), vp); 6921 VNPASS(vp->v_seqc_users == 1, vp); 6922 } 6923 6924 void 6925 vn_irflag_set_locked(struct vnode *vp, short toset) 6926 { 6927 short flags; 6928 6929 ASSERT_VI_LOCKED(vp, __func__); 6930 flags = vn_irflag_read(vp); 6931 VNASSERT((flags & toset) == 0, vp, 6932 ("%s: some of the passed flags already set (have %d, passed %d)\n", 6933 __func__, flags, toset)); 6934 atomic_store_short(&vp->v_irflag, flags | toset); 6935 } 6936 6937 void 6938 vn_irflag_set(struct vnode *vp, short toset) 6939 { 6940 6941 VI_LOCK(vp); 6942 vn_irflag_set_locked(vp, toset); 6943 VI_UNLOCK(vp); 6944 } 6945 6946 void 6947 vn_irflag_set_cond_locked(struct vnode *vp, short toset) 6948 { 6949 short flags; 6950 6951 ASSERT_VI_LOCKED(vp, __func__); 6952 flags = vn_irflag_read(vp); 6953 atomic_store_short(&vp->v_irflag, flags | toset); 6954 } 6955 6956 void 6957 vn_irflag_set_cond(struct vnode *vp, short toset) 6958 { 6959 6960 VI_LOCK(vp); 6961 vn_irflag_set_cond_locked(vp, toset); 6962 VI_UNLOCK(vp); 6963 } 6964 6965 void 6966 vn_irflag_unset_locked(struct vnode *vp, short tounset) 6967 { 6968 short flags; 6969 6970 ASSERT_VI_LOCKED(vp, __func__); 6971 flags = vn_irflag_read(vp); 6972 VNASSERT((flags & tounset) == tounset, vp, 6973 ("%s: some of the passed flags not set (have %d, passed %d)\n", 6974 __func__, flags, tounset)); 6975 atomic_store_short(&vp->v_irflag, flags & ~tounset); 6976 } 6977 6978 void 6979 vn_irflag_unset(struct vnode *vp, short tounset) 6980 { 6981 6982 VI_LOCK(vp); 6983 vn_irflag_unset_locked(vp, tounset); 6984 VI_UNLOCK(vp); 6985 } 6986