1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/asan.h> 52 #include <sys/bio.h> 53 #include <sys/buf.h> 54 #include <sys/capsicum.h> 55 #include <sys/condvar.h> 56 #include <sys/conf.h> 57 #include <sys/counter.h> 58 #include <sys/dirent.h> 59 #include <sys/event.h> 60 #include <sys/eventhandler.h> 61 #include <sys/extattr.h> 62 #include <sys/file.h> 63 #include <sys/fcntl.h> 64 #include <sys/jail.h> 65 #include <sys/kdb.h> 66 #include <sys/kernel.h> 67 #include <sys/kthread.h> 68 #include <sys/ktr.h> 69 #include <sys/lockf.h> 70 #include <sys/malloc.h> 71 #include <sys/mount.h> 72 #include <sys/namei.h> 73 #include <sys/pctrie.h> 74 #include <sys/priv.h> 75 #include <sys/reboot.h> 76 #include <sys/refcount.h> 77 #include <sys/rwlock.h> 78 #include <sys/sched.h> 79 #include <sys/sleepqueue.h> 80 #include <sys/smr.h> 81 #include <sys/smp.h> 82 #include <sys/stat.h> 83 #include <sys/sysctl.h> 84 #include <sys/syslog.h> 85 #include <sys/vmmeter.h> 86 #include <sys/vnode.h> 87 #include <sys/watchdog.h> 88 89 #include <machine/stdarg.h> 90 91 #include <security/mac/mac_framework.h> 92 93 #include <vm/vm.h> 94 #include <vm/vm_object.h> 95 #include <vm/vm_extern.h> 96 #include <vm/pmap.h> 97 #include <vm/vm_map.h> 98 #include <vm/vm_page.h> 99 #include <vm/vm_kern.h> 100 #include <vm/uma.h> 101 102 #ifdef DDB 103 #include <ddb/ddb.h> 104 #endif 105 106 static void delmntque(struct vnode *vp); 107 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 108 int slpflag, int slptimeo); 109 static void syncer_shutdown(void *arg, int howto); 110 static int vtryrecycle(struct vnode *vp); 111 static void v_init_counters(struct vnode *); 112 static void vn_seqc_init(struct vnode *); 113 static void vn_seqc_write_end_free(struct vnode *vp); 114 static void vgonel(struct vnode *); 115 static bool vhold_recycle_free(struct vnode *); 116 static void vfs_knllock(void *arg); 117 static void vfs_knlunlock(void *arg); 118 static void vfs_knl_assert_lock(void *arg, int what); 119 static void destroy_vpollinfo(struct vpollinfo *vi); 120 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 121 daddr_t startlbn, daddr_t endlbn); 122 static void vnlru_recalc(void); 123 124 /* 125 * Number of vnodes in existence. Increased whenever getnewvnode() 126 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 127 */ 128 static u_long __exclusive_cache_line numvnodes; 129 130 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 131 "Number of vnodes in existence"); 132 133 static counter_u64_t vnodes_created; 134 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 135 "Number of vnodes created by getnewvnode"); 136 137 /* 138 * Conversion tables for conversion from vnode types to inode formats 139 * and back. 140 */ 141 enum vtype iftovt_tab[16] = { 142 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 143 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 144 }; 145 int vttoif_tab[10] = { 146 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 147 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 148 }; 149 150 /* 151 * List of allocates vnodes in the system. 152 */ 153 static TAILQ_HEAD(freelst, vnode) vnode_list; 154 static struct vnode *vnode_list_free_marker; 155 static struct vnode *vnode_list_reclaim_marker; 156 157 /* 158 * "Free" vnode target. Free vnodes are rarely completely free, but are 159 * just ones that are cheap to recycle. Usually they are for files which 160 * have been stat'd but not read; these usually have inode and namecache 161 * data attached to them. This target is the preferred minimum size of a 162 * sub-cache consisting mostly of such files. The system balances the size 163 * of this sub-cache with its complement to try to prevent either from 164 * thrashing while the other is relatively inactive. The targets express 165 * a preference for the best balance. 166 * 167 * "Above" this target there are 2 further targets (watermarks) related 168 * to recyling of free vnodes. In the best-operating case, the cache is 169 * exactly full, the free list has size between vlowat and vhiwat above the 170 * free target, and recycling from it and normal use maintains this state. 171 * Sometimes the free list is below vlowat or even empty, but this state 172 * is even better for immediate use provided the cache is not full. 173 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 174 * ones) to reach one of these states. The watermarks are currently hard- 175 * coded as 4% and 9% of the available space higher. These and the default 176 * of 25% for wantfreevnodes are too large if the memory size is large. 177 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 178 * whenever vnlru_proc() becomes active. 179 */ 180 static long wantfreevnodes; 181 static long __exclusive_cache_line freevnodes; 182 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 183 &freevnodes, 0, "Number of \"free\" vnodes"); 184 static long freevnodes_old; 185 186 static counter_u64_t recycles_count; 187 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 188 "Number of vnodes recycled to meet vnode cache targets"); 189 190 static counter_u64_t recycles_free_count; 191 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 192 "Number of free vnodes recycled to meet vnode cache targets"); 193 194 static counter_u64_t deferred_inact; 195 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, &deferred_inact, 196 "Number of times inactive processing was deferred"); 197 198 /* To keep more than one thread at a time from running vfs_getnewfsid */ 199 static struct mtx mntid_mtx; 200 201 /* 202 * Lock for any access to the following: 203 * vnode_list 204 * numvnodes 205 * freevnodes 206 */ 207 static struct mtx __exclusive_cache_line vnode_list_mtx; 208 209 /* Publicly exported FS */ 210 struct nfs_public nfs_pub; 211 212 static uma_zone_t buf_trie_zone; 213 static smr_t buf_trie_smr; 214 215 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 216 static uma_zone_t vnode_zone; 217 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll"); 218 219 __read_frequently smr_t vfs_smr; 220 221 /* 222 * The workitem queue. 223 * 224 * It is useful to delay writes of file data and filesystem metadata 225 * for tens of seconds so that quickly created and deleted files need 226 * not waste disk bandwidth being created and removed. To realize this, 227 * we append vnodes to a "workitem" queue. When running with a soft 228 * updates implementation, most pending metadata dependencies should 229 * not wait for more than a few seconds. Thus, mounted on block devices 230 * are delayed only about a half the time that file data is delayed. 231 * Similarly, directory updates are more critical, so are only delayed 232 * about a third the time that file data is delayed. Thus, there are 233 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 234 * one each second (driven off the filesystem syncer process). The 235 * syncer_delayno variable indicates the next queue that is to be processed. 236 * Items that need to be processed soon are placed in this queue: 237 * 238 * syncer_workitem_pending[syncer_delayno] 239 * 240 * A delay of fifteen seconds is done by placing the request fifteen 241 * entries later in the queue: 242 * 243 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 244 * 245 */ 246 static int syncer_delayno; 247 static long syncer_mask; 248 LIST_HEAD(synclist, bufobj); 249 static struct synclist *syncer_workitem_pending; 250 /* 251 * The sync_mtx protects: 252 * bo->bo_synclist 253 * sync_vnode_count 254 * syncer_delayno 255 * syncer_state 256 * syncer_workitem_pending 257 * syncer_worklist_len 258 * rushjob 259 */ 260 static struct mtx sync_mtx; 261 static struct cv sync_wakeup; 262 263 #define SYNCER_MAXDELAY 32 264 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 265 static int syncdelay = 30; /* max time to delay syncing data */ 266 static int filedelay = 30; /* time to delay syncing files */ 267 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 268 "Time to delay syncing files (in seconds)"); 269 static int dirdelay = 29; /* time to delay syncing directories */ 270 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 271 "Time to delay syncing directories (in seconds)"); 272 static int metadelay = 28; /* time to delay syncing metadata */ 273 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 274 "Time to delay syncing metadata (in seconds)"); 275 static int rushjob; /* number of slots to run ASAP */ 276 static int stat_rush_requests; /* number of times I/O speeded up */ 277 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 278 "Number of times I/O speeded up (rush requests)"); 279 280 #define VDBATCH_SIZE 8 281 struct vdbatch { 282 u_int index; 283 long freevnodes; 284 struct mtx lock; 285 struct vnode *tab[VDBATCH_SIZE]; 286 }; 287 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 288 289 static void vdbatch_dequeue(struct vnode *vp); 290 291 /* 292 * When shutting down the syncer, run it at four times normal speed. 293 */ 294 #define SYNCER_SHUTDOWN_SPEEDUP 4 295 static int sync_vnode_count; 296 static int syncer_worklist_len; 297 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 298 syncer_state; 299 300 /* Target for maximum number of vnodes. */ 301 u_long desiredvnodes; 302 static u_long gapvnodes; /* gap between wanted and desired */ 303 static u_long vhiwat; /* enough extras after expansion */ 304 static u_long vlowat; /* minimal extras before expansion */ 305 static u_long vstir; /* nonzero to stir non-free vnodes */ 306 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 307 308 static u_long vnlru_read_freevnodes(void); 309 310 /* 311 * Note that no attempt is made to sanitize these parameters. 312 */ 313 static int 314 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 315 { 316 u_long val; 317 int error; 318 319 val = desiredvnodes; 320 error = sysctl_handle_long(oidp, &val, 0, req); 321 if (error != 0 || req->newptr == NULL) 322 return (error); 323 324 if (val == desiredvnodes) 325 return (0); 326 mtx_lock(&vnode_list_mtx); 327 desiredvnodes = val; 328 wantfreevnodes = desiredvnodes / 4; 329 vnlru_recalc(); 330 mtx_unlock(&vnode_list_mtx); 331 /* 332 * XXX There is no protection against multiple threads changing 333 * desiredvnodes at the same time. Locking above only helps vnlru and 334 * getnewvnode. 335 */ 336 vfs_hash_changesize(desiredvnodes); 337 cache_changesize(desiredvnodes); 338 return (0); 339 } 340 341 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 342 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 343 "LU", "Target for maximum number of vnodes"); 344 345 static int 346 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 347 { 348 u_long val; 349 int error; 350 351 val = wantfreevnodes; 352 error = sysctl_handle_long(oidp, &val, 0, req); 353 if (error != 0 || req->newptr == NULL) 354 return (error); 355 356 if (val == wantfreevnodes) 357 return (0); 358 mtx_lock(&vnode_list_mtx); 359 wantfreevnodes = val; 360 vnlru_recalc(); 361 mtx_unlock(&vnode_list_mtx); 362 return (0); 363 } 364 365 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 366 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 367 "LU", "Target for minimum number of \"free\" vnodes"); 368 369 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 370 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 371 static int vnlru_nowhere; 372 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 373 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 374 375 static int 376 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 377 { 378 struct vnode *vp; 379 struct nameidata nd; 380 char *buf; 381 unsigned long ndflags; 382 int error; 383 384 if (req->newptr == NULL) 385 return (EINVAL); 386 if (req->newlen >= PATH_MAX) 387 return (E2BIG); 388 389 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 390 error = SYSCTL_IN(req, buf, req->newlen); 391 if (error != 0) 392 goto out; 393 394 buf[req->newlen] = '\0'; 395 396 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1 | SAVENAME; 397 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf, curthread); 398 if ((error = namei(&nd)) != 0) 399 goto out; 400 vp = nd.ni_vp; 401 402 if (VN_IS_DOOMED(vp)) { 403 /* 404 * This vnode is being recycled. Return != 0 to let the caller 405 * know that the sysctl had no effect. Return EAGAIN because a 406 * subsequent call will likely succeed (since namei will create 407 * a new vnode if necessary) 408 */ 409 error = EAGAIN; 410 goto putvnode; 411 } 412 413 counter_u64_add(recycles_count, 1); 414 vgone(vp); 415 putvnode: 416 NDFREE(&nd, 0); 417 out: 418 free(buf, M_TEMP); 419 return (error); 420 } 421 422 static int 423 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 424 { 425 struct thread *td = curthread; 426 struct vnode *vp; 427 struct file *fp; 428 int error; 429 int fd; 430 431 if (req->newptr == NULL) 432 return (EBADF); 433 434 error = sysctl_handle_int(oidp, &fd, 0, req); 435 if (error != 0) 436 return (error); 437 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 438 if (error != 0) 439 return (error); 440 vp = fp->f_vnode; 441 442 error = vn_lock(vp, LK_EXCLUSIVE); 443 if (error != 0) 444 goto drop; 445 446 counter_u64_add(recycles_count, 1); 447 vgone(vp); 448 VOP_UNLOCK(vp); 449 drop: 450 fdrop(fp, td); 451 return (error); 452 } 453 454 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 455 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 456 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 457 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 458 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 459 sysctl_ftry_reclaim_vnode, "I", 460 "Try to reclaim a vnode by its file descriptor"); 461 462 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 463 static int vnsz2log; 464 465 /* 466 * Support for the bufobj clean & dirty pctrie. 467 */ 468 static void * 469 buf_trie_alloc(struct pctrie *ptree) 470 { 471 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 472 } 473 474 static void 475 buf_trie_free(struct pctrie *ptree, void *node) 476 { 477 uma_zfree_smr(buf_trie_zone, node); 478 } 479 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 480 buf_trie_smr); 481 482 /* 483 * Initialize the vnode management data structures. 484 * 485 * Reevaluate the following cap on the number of vnodes after the physical 486 * memory size exceeds 512GB. In the limit, as the physical memory size 487 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 488 */ 489 #ifndef MAXVNODES_MAX 490 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 491 #endif 492 493 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 494 495 static struct vnode * 496 vn_alloc_marker(struct mount *mp) 497 { 498 struct vnode *vp; 499 500 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 501 vp->v_type = VMARKER; 502 vp->v_mount = mp; 503 504 return (vp); 505 } 506 507 static void 508 vn_free_marker(struct vnode *vp) 509 { 510 511 MPASS(vp->v_type == VMARKER); 512 free(vp, M_VNODE_MARKER); 513 } 514 515 #ifdef KASAN 516 static int 517 vnode_ctor(void *mem, int size, void *arg __unused, int flags __unused) 518 { 519 kasan_mark(mem, size, roundup2(size, UMA_ALIGN_PTR + 1), 0); 520 return (0); 521 } 522 523 static void 524 vnode_dtor(void *mem, int size, void *arg __unused) 525 { 526 size_t end1, end2, off1, off2; 527 528 _Static_assert(offsetof(struct vnode, v_vnodelist) < 529 offsetof(struct vnode, v_dbatchcpu), 530 "KASAN marks require updating"); 531 532 off1 = offsetof(struct vnode, v_vnodelist); 533 off2 = offsetof(struct vnode, v_dbatchcpu); 534 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist); 535 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu); 536 537 /* 538 * Access to the v_vnodelist and v_dbatchcpu fields are permitted even 539 * after the vnode has been freed. Try to get some KASAN coverage by 540 * marking everything except those two fields as invalid. Because 541 * KASAN's tracking is not byte-granular, any preceding fields sharing 542 * the same 8-byte aligned word must also be marked valid. 543 */ 544 545 /* Handle the area from the start until v_vnodelist... */ 546 off1 = rounddown2(off1, KASAN_SHADOW_SCALE); 547 kasan_mark(mem, off1, off1, KASAN_UMA_FREED); 548 549 /* ... then the area between v_vnodelist and v_dbatchcpu ... */ 550 off1 = roundup2(end1, KASAN_SHADOW_SCALE); 551 off2 = rounddown2(off2, KASAN_SHADOW_SCALE); 552 if (off2 > off1) 553 kasan_mark((void *)((char *)mem + off1), off2 - off1, 554 off2 - off1, KASAN_UMA_FREED); 555 556 /* ... and finally the area from v_dbatchcpu to the end. */ 557 off2 = roundup2(end2, KASAN_SHADOW_SCALE); 558 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2, 559 KASAN_UMA_FREED); 560 } 561 #endif /* KASAN */ 562 563 /* 564 * Initialize a vnode as it first enters the zone. 565 */ 566 static int 567 vnode_init(void *mem, int size, int flags) 568 { 569 struct vnode *vp; 570 571 vp = mem; 572 bzero(vp, size); 573 /* 574 * Setup locks. 575 */ 576 vp->v_vnlock = &vp->v_lock; 577 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 578 /* 579 * By default, don't allow shared locks unless filesystems opt-in. 580 */ 581 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 582 LK_NOSHARE | LK_IS_VNODE); 583 /* 584 * Initialize bufobj. 585 */ 586 bufobj_init(&vp->v_bufobj, vp); 587 /* 588 * Initialize namecache. 589 */ 590 cache_vnode_init(vp); 591 /* 592 * Initialize rangelocks. 593 */ 594 rangelock_init(&vp->v_rl); 595 596 vp->v_dbatchcpu = NOCPU; 597 598 /* 599 * Check vhold_recycle_free for an explanation. 600 */ 601 vp->v_holdcnt = VHOLD_NO_SMR; 602 vp->v_type = VNON; 603 mtx_lock(&vnode_list_mtx); 604 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 605 mtx_unlock(&vnode_list_mtx); 606 return (0); 607 } 608 609 /* 610 * Free a vnode when it is cleared from the zone. 611 */ 612 static void 613 vnode_fini(void *mem, int size) 614 { 615 struct vnode *vp; 616 struct bufobj *bo; 617 618 vp = mem; 619 vdbatch_dequeue(vp); 620 mtx_lock(&vnode_list_mtx); 621 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 622 mtx_unlock(&vnode_list_mtx); 623 rangelock_destroy(&vp->v_rl); 624 lockdestroy(vp->v_vnlock); 625 mtx_destroy(&vp->v_interlock); 626 bo = &vp->v_bufobj; 627 rw_destroy(BO_LOCKPTR(bo)); 628 629 kasan_mark(mem, size, size, 0); 630 } 631 632 /* 633 * Provide the size of NFS nclnode and NFS fh for calculation of the 634 * vnode memory consumption. The size is specified directly to 635 * eliminate dependency on NFS-private header. 636 * 637 * Other filesystems may use bigger or smaller (like UFS and ZFS) 638 * private inode data, but the NFS-based estimation is ample enough. 639 * Still, we care about differences in the size between 64- and 32-bit 640 * platforms. 641 * 642 * Namecache structure size is heuristically 643 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 644 */ 645 #ifdef _LP64 646 #define NFS_NCLNODE_SZ (528 + 64) 647 #define NC_SZ 148 648 #else 649 #define NFS_NCLNODE_SZ (360 + 32) 650 #define NC_SZ 92 651 #endif 652 653 static void 654 vntblinit(void *dummy __unused) 655 { 656 struct vdbatch *vd; 657 uma_ctor ctor; 658 uma_dtor dtor; 659 int cpu, physvnodes, virtvnodes; 660 u_int i; 661 662 /* 663 * Desiredvnodes is a function of the physical memory size and the 664 * kernel's heap size. Generally speaking, it scales with the 665 * physical memory size. The ratio of desiredvnodes to the physical 666 * memory size is 1:16 until desiredvnodes exceeds 98,304. 667 * Thereafter, the 668 * marginal ratio of desiredvnodes to the physical memory size is 669 * 1:64. However, desiredvnodes is limited by the kernel's heap 670 * size. The memory required by desiredvnodes vnodes and vm objects 671 * must not exceed 1/10th of the kernel's heap size. 672 */ 673 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 674 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 675 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 676 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 677 desiredvnodes = min(physvnodes, virtvnodes); 678 if (desiredvnodes > MAXVNODES_MAX) { 679 if (bootverbose) 680 printf("Reducing kern.maxvnodes %lu -> %lu\n", 681 desiredvnodes, MAXVNODES_MAX); 682 desiredvnodes = MAXVNODES_MAX; 683 } 684 wantfreevnodes = desiredvnodes / 4; 685 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 686 TAILQ_INIT(&vnode_list); 687 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 688 /* 689 * The lock is taken to appease WITNESS. 690 */ 691 mtx_lock(&vnode_list_mtx); 692 vnlru_recalc(); 693 mtx_unlock(&vnode_list_mtx); 694 vnode_list_free_marker = vn_alloc_marker(NULL); 695 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 696 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 697 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 698 699 #ifdef KASAN 700 ctor = vnode_ctor; 701 dtor = vnode_dtor; 702 #else 703 ctor = NULL; 704 dtor = NULL; 705 #endif 706 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor, 707 vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_NOKASAN); 708 uma_zone_set_smr(vnode_zone, vfs_smr); 709 710 /* 711 * Preallocate enough nodes to support one-per buf so that 712 * we can not fail an insert. reassignbuf() callers can not 713 * tolerate the insertion failure. 714 */ 715 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 716 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 717 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 718 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 719 uma_prealloc(buf_trie_zone, nbuf); 720 721 vnodes_created = counter_u64_alloc(M_WAITOK); 722 recycles_count = counter_u64_alloc(M_WAITOK); 723 recycles_free_count = counter_u64_alloc(M_WAITOK); 724 deferred_inact = counter_u64_alloc(M_WAITOK); 725 726 /* 727 * Initialize the filesystem syncer. 728 */ 729 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 730 &syncer_mask); 731 syncer_maxdelay = syncer_mask + 1; 732 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 733 cv_init(&sync_wakeup, "syncer"); 734 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 735 vnsz2log++; 736 vnsz2log--; 737 738 CPU_FOREACH(cpu) { 739 vd = DPCPU_ID_PTR((cpu), vd); 740 bzero(vd, sizeof(*vd)); 741 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 742 } 743 } 744 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 745 746 /* 747 * Mark a mount point as busy. Used to synchronize access and to delay 748 * unmounting. Eventually, mountlist_mtx is not released on failure. 749 * 750 * vfs_busy() is a custom lock, it can block the caller. 751 * vfs_busy() only sleeps if the unmount is active on the mount point. 752 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 753 * vnode belonging to mp. 754 * 755 * Lookup uses vfs_busy() to traverse mount points. 756 * root fs var fs 757 * / vnode lock A / vnode lock (/var) D 758 * /var vnode lock B /log vnode lock(/var/log) E 759 * vfs_busy lock C vfs_busy lock F 760 * 761 * Within each file system, the lock order is C->A->B and F->D->E. 762 * 763 * When traversing across mounts, the system follows that lock order: 764 * 765 * C->A->B 766 * | 767 * +->F->D->E 768 * 769 * The lookup() process for namei("/var") illustrates the process: 770 * VOP_LOOKUP() obtains B while A is held 771 * vfs_busy() obtains a shared lock on F while A and B are held 772 * vput() releases lock on B 773 * vput() releases lock on A 774 * VFS_ROOT() obtains lock on D while shared lock on F is held 775 * vfs_unbusy() releases shared lock on F 776 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 777 * Attempt to lock A (instead of vp_crossmp) while D is held would 778 * violate the global order, causing deadlocks. 779 * 780 * dounmount() locks B while F is drained. 781 */ 782 int 783 vfs_busy(struct mount *mp, int flags) 784 { 785 struct mount_pcpu *mpcpu; 786 787 MPASS((flags & ~MBF_MASK) == 0); 788 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 789 790 if (vfs_op_thread_enter(mp, mpcpu)) { 791 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 792 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 793 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 794 vfs_mp_count_add_pcpu(mpcpu, ref, 1); 795 vfs_mp_count_add_pcpu(mpcpu, lockref, 1); 796 vfs_op_thread_exit(mp, mpcpu); 797 if (flags & MBF_MNTLSTLOCK) 798 mtx_unlock(&mountlist_mtx); 799 return (0); 800 } 801 802 MNT_ILOCK(mp); 803 vfs_assert_mount_counters(mp); 804 MNT_REF(mp); 805 /* 806 * If mount point is currently being unmounted, sleep until the 807 * mount point fate is decided. If thread doing the unmounting fails, 808 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 809 * that this mount point has survived the unmount attempt and vfs_busy 810 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 811 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 812 * about to be really destroyed. vfs_busy needs to release its 813 * reference on the mount point in this case and return with ENOENT, 814 * telling the caller that mount mount it tried to busy is no longer 815 * valid. 816 */ 817 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 818 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), 819 ("%s: non-empty upper mount list with pending unmount", 820 __func__)); 821 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 822 MNT_REL(mp); 823 MNT_IUNLOCK(mp); 824 CTR1(KTR_VFS, "%s: failed busying before sleeping", 825 __func__); 826 return (ENOENT); 827 } 828 if (flags & MBF_MNTLSTLOCK) 829 mtx_unlock(&mountlist_mtx); 830 mp->mnt_kern_flag |= MNTK_MWAIT; 831 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 832 if (flags & MBF_MNTLSTLOCK) 833 mtx_lock(&mountlist_mtx); 834 MNT_ILOCK(mp); 835 } 836 if (flags & MBF_MNTLSTLOCK) 837 mtx_unlock(&mountlist_mtx); 838 mp->mnt_lockref++; 839 MNT_IUNLOCK(mp); 840 return (0); 841 } 842 843 /* 844 * Free a busy filesystem. 845 */ 846 void 847 vfs_unbusy(struct mount *mp) 848 { 849 struct mount_pcpu *mpcpu; 850 int c; 851 852 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 853 854 if (vfs_op_thread_enter(mp, mpcpu)) { 855 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 856 vfs_mp_count_sub_pcpu(mpcpu, lockref, 1); 857 vfs_mp_count_sub_pcpu(mpcpu, ref, 1); 858 vfs_op_thread_exit(mp, mpcpu); 859 return; 860 } 861 862 MNT_ILOCK(mp); 863 vfs_assert_mount_counters(mp); 864 MNT_REL(mp); 865 c = --mp->mnt_lockref; 866 if (mp->mnt_vfs_ops == 0) { 867 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 868 MNT_IUNLOCK(mp); 869 return; 870 } 871 if (c < 0) 872 vfs_dump_mount_counters(mp); 873 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 874 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 875 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 876 mp->mnt_kern_flag &= ~MNTK_DRAINING; 877 wakeup(&mp->mnt_lockref); 878 } 879 MNT_IUNLOCK(mp); 880 } 881 882 /* 883 * Lookup a mount point by filesystem identifier. 884 */ 885 struct mount * 886 vfs_getvfs(fsid_t *fsid) 887 { 888 struct mount *mp; 889 890 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 891 mtx_lock(&mountlist_mtx); 892 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 893 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 894 vfs_ref(mp); 895 mtx_unlock(&mountlist_mtx); 896 return (mp); 897 } 898 } 899 mtx_unlock(&mountlist_mtx); 900 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 901 return ((struct mount *) 0); 902 } 903 904 /* 905 * Lookup a mount point by filesystem identifier, busying it before 906 * returning. 907 * 908 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 909 * cache for popular filesystem identifiers. The cache is lockess, using 910 * the fact that struct mount's are never freed. In worst case we may 911 * get pointer to unmounted or even different filesystem, so we have to 912 * check what we got, and go slow way if so. 913 */ 914 struct mount * 915 vfs_busyfs(fsid_t *fsid) 916 { 917 #define FSID_CACHE_SIZE 256 918 typedef struct mount * volatile vmp_t; 919 static vmp_t cache[FSID_CACHE_SIZE]; 920 struct mount *mp; 921 int error; 922 uint32_t hash; 923 924 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 925 hash = fsid->val[0] ^ fsid->val[1]; 926 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 927 mp = cache[hash]; 928 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 929 goto slow; 930 if (vfs_busy(mp, 0) != 0) { 931 cache[hash] = NULL; 932 goto slow; 933 } 934 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 935 return (mp); 936 else 937 vfs_unbusy(mp); 938 939 slow: 940 mtx_lock(&mountlist_mtx); 941 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 942 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 943 error = vfs_busy(mp, MBF_MNTLSTLOCK); 944 if (error) { 945 cache[hash] = NULL; 946 mtx_unlock(&mountlist_mtx); 947 return (NULL); 948 } 949 cache[hash] = mp; 950 return (mp); 951 } 952 } 953 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 954 mtx_unlock(&mountlist_mtx); 955 return ((struct mount *) 0); 956 } 957 958 /* 959 * Check if a user can access privileged mount options. 960 */ 961 int 962 vfs_suser(struct mount *mp, struct thread *td) 963 { 964 int error; 965 966 if (jailed(td->td_ucred)) { 967 /* 968 * If the jail of the calling thread lacks permission for 969 * this type of file system, deny immediately. 970 */ 971 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 972 return (EPERM); 973 974 /* 975 * If the file system was mounted outside the jail of the 976 * calling thread, deny immediately. 977 */ 978 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 979 return (EPERM); 980 } 981 982 /* 983 * If file system supports delegated administration, we don't check 984 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 985 * by the file system itself. 986 * If this is not the user that did original mount, we check for 987 * the PRIV_VFS_MOUNT_OWNER privilege. 988 */ 989 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 990 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 991 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 992 return (error); 993 } 994 return (0); 995 } 996 997 /* 998 * Get a new unique fsid. Try to make its val[0] unique, since this value 999 * will be used to create fake device numbers for stat(). Also try (but 1000 * not so hard) make its val[0] unique mod 2^16, since some emulators only 1001 * support 16-bit device numbers. We end up with unique val[0]'s for the 1002 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 1003 * 1004 * Keep in mind that several mounts may be running in parallel. Starting 1005 * the search one past where the previous search terminated is both a 1006 * micro-optimization and a defense against returning the same fsid to 1007 * different mounts. 1008 */ 1009 void 1010 vfs_getnewfsid(struct mount *mp) 1011 { 1012 static uint16_t mntid_base; 1013 struct mount *nmp; 1014 fsid_t tfsid; 1015 int mtype; 1016 1017 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 1018 mtx_lock(&mntid_mtx); 1019 mtype = mp->mnt_vfc->vfc_typenum; 1020 tfsid.val[1] = mtype; 1021 mtype = (mtype & 0xFF) << 24; 1022 for (;;) { 1023 tfsid.val[0] = makedev(255, 1024 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 1025 mntid_base++; 1026 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 1027 break; 1028 vfs_rel(nmp); 1029 } 1030 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 1031 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 1032 mtx_unlock(&mntid_mtx); 1033 } 1034 1035 /* 1036 * Knob to control the precision of file timestamps: 1037 * 1038 * 0 = seconds only; nanoseconds zeroed. 1039 * 1 = seconds and nanoseconds, accurate within 1/HZ. 1040 * 2 = seconds and nanoseconds, truncated to microseconds. 1041 * >=3 = seconds and nanoseconds, maximum precision. 1042 */ 1043 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 1044 1045 static int timestamp_precision = TSP_USEC; 1046 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 1047 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 1048 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 1049 "3+: sec + ns (max. precision))"); 1050 1051 /* 1052 * Get a current timestamp. 1053 */ 1054 void 1055 vfs_timestamp(struct timespec *tsp) 1056 { 1057 struct timeval tv; 1058 1059 switch (timestamp_precision) { 1060 case TSP_SEC: 1061 tsp->tv_sec = time_second; 1062 tsp->tv_nsec = 0; 1063 break; 1064 case TSP_HZ: 1065 getnanotime(tsp); 1066 break; 1067 case TSP_USEC: 1068 microtime(&tv); 1069 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1070 break; 1071 case TSP_NSEC: 1072 default: 1073 nanotime(tsp); 1074 break; 1075 } 1076 } 1077 1078 /* 1079 * Set vnode attributes to VNOVAL 1080 */ 1081 void 1082 vattr_null(struct vattr *vap) 1083 { 1084 1085 vap->va_type = VNON; 1086 vap->va_size = VNOVAL; 1087 vap->va_bytes = VNOVAL; 1088 vap->va_mode = VNOVAL; 1089 vap->va_nlink = VNOVAL; 1090 vap->va_uid = VNOVAL; 1091 vap->va_gid = VNOVAL; 1092 vap->va_fsid = VNOVAL; 1093 vap->va_fileid = VNOVAL; 1094 vap->va_blocksize = VNOVAL; 1095 vap->va_rdev = VNOVAL; 1096 vap->va_atime.tv_sec = VNOVAL; 1097 vap->va_atime.tv_nsec = VNOVAL; 1098 vap->va_mtime.tv_sec = VNOVAL; 1099 vap->va_mtime.tv_nsec = VNOVAL; 1100 vap->va_ctime.tv_sec = VNOVAL; 1101 vap->va_ctime.tv_nsec = VNOVAL; 1102 vap->va_birthtime.tv_sec = VNOVAL; 1103 vap->va_birthtime.tv_nsec = VNOVAL; 1104 vap->va_flags = VNOVAL; 1105 vap->va_gen = VNOVAL; 1106 vap->va_vaflags = 0; 1107 } 1108 1109 /* 1110 * Try to reduce the total number of vnodes. 1111 * 1112 * This routine (and its user) are buggy in at least the following ways: 1113 * - all parameters were picked years ago when RAM sizes were significantly 1114 * smaller 1115 * - it can pick vnodes based on pages used by the vm object, but filesystems 1116 * like ZFS don't use it making the pick broken 1117 * - since ZFS has its own aging policy it gets partially combated by this one 1118 * - a dedicated method should be provided for filesystems to let them decide 1119 * whether the vnode should be recycled 1120 * 1121 * This routine is called when we have too many vnodes. It attempts 1122 * to free <count> vnodes and will potentially free vnodes that still 1123 * have VM backing store (VM backing store is typically the cause 1124 * of a vnode blowout so we want to do this). Therefore, this operation 1125 * is not considered cheap. 1126 * 1127 * A number of conditions may prevent a vnode from being reclaimed. 1128 * the buffer cache may have references on the vnode, a directory 1129 * vnode may still have references due to the namei cache representing 1130 * underlying files, or the vnode may be in active use. It is not 1131 * desirable to reuse such vnodes. These conditions may cause the 1132 * number of vnodes to reach some minimum value regardless of what 1133 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1134 * 1135 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1136 * entries if this argument is strue 1137 * @param trigger Only reclaim vnodes with fewer than this many resident 1138 * pages. 1139 * @param target How many vnodes to reclaim. 1140 * @return The number of vnodes that were reclaimed. 1141 */ 1142 static int 1143 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1144 { 1145 struct vnode *vp, *mvp; 1146 struct mount *mp; 1147 struct vm_object *object; 1148 u_long done; 1149 bool retried; 1150 1151 mtx_assert(&vnode_list_mtx, MA_OWNED); 1152 1153 retried = false; 1154 done = 0; 1155 1156 mvp = vnode_list_reclaim_marker; 1157 restart: 1158 vp = mvp; 1159 while (done < target) { 1160 vp = TAILQ_NEXT(vp, v_vnodelist); 1161 if (__predict_false(vp == NULL)) 1162 break; 1163 1164 if (__predict_false(vp->v_type == VMARKER)) 1165 continue; 1166 1167 /* 1168 * If it's been deconstructed already, it's still 1169 * referenced, or it exceeds the trigger, skip it. 1170 * Also skip free vnodes. We are trying to make space 1171 * to expand the free list, not reduce it. 1172 */ 1173 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1174 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1175 goto next_iter; 1176 1177 if (vp->v_type == VBAD || vp->v_type == VNON) 1178 goto next_iter; 1179 1180 object = atomic_load_ptr(&vp->v_object); 1181 if (object == NULL || object->resident_page_count > trigger) { 1182 goto next_iter; 1183 } 1184 1185 /* 1186 * Handle races against vnode allocation. Filesystems lock the 1187 * vnode some time after it gets returned from getnewvnode, 1188 * despite type and hold count being manipulated earlier. 1189 * Resorting to checking v_mount restores guarantees present 1190 * before the global list was reworked to contain all vnodes. 1191 */ 1192 if (!VI_TRYLOCK(vp)) 1193 goto next_iter; 1194 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1195 VI_UNLOCK(vp); 1196 goto next_iter; 1197 } 1198 if (vp->v_mount == NULL) { 1199 VI_UNLOCK(vp); 1200 goto next_iter; 1201 } 1202 vholdl(vp); 1203 VI_UNLOCK(vp); 1204 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1205 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1206 mtx_unlock(&vnode_list_mtx); 1207 1208 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1209 vdrop(vp); 1210 goto next_iter_unlocked; 1211 } 1212 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1213 vdrop(vp); 1214 vn_finished_write(mp); 1215 goto next_iter_unlocked; 1216 } 1217 1218 VI_LOCK(vp); 1219 if (vp->v_usecount > 0 || 1220 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1221 (vp->v_object != NULL && vp->v_object->handle == vp && 1222 vp->v_object->resident_page_count > trigger)) { 1223 VOP_UNLOCK(vp); 1224 vdropl(vp); 1225 vn_finished_write(mp); 1226 goto next_iter_unlocked; 1227 } 1228 counter_u64_add(recycles_count, 1); 1229 vgonel(vp); 1230 VOP_UNLOCK(vp); 1231 vdropl(vp); 1232 vn_finished_write(mp); 1233 done++; 1234 next_iter_unlocked: 1235 if (should_yield()) 1236 kern_yield(PRI_USER); 1237 mtx_lock(&vnode_list_mtx); 1238 goto restart; 1239 next_iter: 1240 MPASS(vp->v_type != VMARKER); 1241 if (!should_yield()) 1242 continue; 1243 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1244 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1245 mtx_unlock(&vnode_list_mtx); 1246 kern_yield(PRI_USER); 1247 mtx_lock(&vnode_list_mtx); 1248 goto restart; 1249 } 1250 if (done == 0 && !retried) { 1251 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1252 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1253 retried = true; 1254 goto restart; 1255 } 1256 return (done); 1257 } 1258 1259 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1260 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1261 0, 1262 "limit on vnode free requests per call to the vnlru_free routine"); 1263 1264 /* 1265 * Attempt to reduce the free list by the requested amount. 1266 */ 1267 static int 1268 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp) 1269 { 1270 struct vnode *vp; 1271 struct mount *mp; 1272 int ocount; 1273 1274 mtx_assert(&vnode_list_mtx, MA_OWNED); 1275 if (count > max_vnlru_free) 1276 count = max_vnlru_free; 1277 ocount = count; 1278 vp = mvp; 1279 for (;;) { 1280 if (count == 0) { 1281 break; 1282 } 1283 vp = TAILQ_NEXT(vp, v_vnodelist); 1284 if (__predict_false(vp == NULL)) { 1285 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1286 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1287 break; 1288 } 1289 if (__predict_false(vp->v_type == VMARKER)) 1290 continue; 1291 if (vp->v_holdcnt > 0) 1292 continue; 1293 /* 1294 * Don't recycle if our vnode is from different type 1295 * of mount point. Note that mp is type-safe, the 1296 * check does not reach unmapped address even if 1297 * vnode is reclaimed. 1298 */ 1299 if (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1300 mp->mnt_op != mnt_op) { 1301 continue; 1302 } 1303 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1304 continue; 1305 } 1306 if (!vhold_recycle_free(vp)) 1307 continue; 1308 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1309 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1310 mtx_unlock(&vnode_list_mtx); 1311 if (vtryrecycle(vp) == 0) 1312 count--; 1313 mtx_lock(&vnode_list_mtx); 1314 vp = mvp; 1315 } 1316 return (ocount - count); 1317 } 1318 1319 static int 1320 vnlru_free_locked(int count) 1321 { 1322 1323 mtx_assert(&vnode_list_mtx, MA_OWNED); 1324 return (vnlru_free_impl(count, NULL, vnode_list_free_marker)); 1325 } 1326 1327 void 1328 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp) 1329 { 1330 1331 MPASS(mnt_op != NULL); 1332 MPASS(mvp != NULL); 1333 VNPASS(mvp->v_type == VMARKER, mvp); 1334 mtx_lock(&vnode_list_mtx); 1335 vnlru_free_impl(count, mnt_op, mvp); 1336 mtx_unlock(&vnode_list_mtx); 1337 } 1338 1339 struct vnode * 1340 vnlru_alloc_marker(void) 1341 { 1342 struct vnode *mvp; 1343 1344 mvp = vn_alloc_marker(NULL); 1345 mtx_lock(&vnode_list_mtx); 1346 TAILQ_INSERT_BEFORE(vnode_list_free_marker, mvp, v_vnodelist); 1347 mtx_unlock(&vnode_list_mtx); 1348 return (mvp); 1349 } 1350 1351 void 1352 vnlru_free_marker(struct vnode *mvp) 1353 { 1354 mtx_lock(&vnode_list_mtx); 1355 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1356 mtx_unlock(&vnode_list_mtx); 1357 vn_free_marker(mvp); 1358 } 1359 1360 static void 1361 vnlru_recalc(void) 1362 { 1363 1364 mtx_assert(&vnode_list_mtx, MA_OWNED); 1365 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1366 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1367 vlowat = vhiwat / 2; 1368 } 1369 1370 /* 1371 * Attempt to recycle vnodes in a context that is always safe to block. 1372 * Calling vlrurecycle() from the bowels of filesystem code has some 1373 * interesting deadlock problems. 1374 */ 1375 static struct proc *vnlruproc; 1376 static int vnlruproc_sig; 1377 1378 /* 1379 * The main freevnodes counter is only updated when threads requeue their vnode 1380 * batches. CPUs are conditionally walked to compute a more accurate total. 1381 * 1382 * Limit how much of a slop are we willing to tolerate. Note: the actual value 1383 * at any given moment can still exceed slop, but it should not be by significant 1384 * margin in practice. 1385 */ 1386 #define VNLRU_FREEVNODES_SLOP 128 1387 1388 static __inline void 1389 vfs_freevnodes_inc(void) 1390 { 1391 struct vdbatch *vd; 1392 1393 critical_enter(); 1394 vd = DPCPU_PTR(vd); 1395 vd->freevnodes++; 1396 critical_exit(); 1397 } 1398 1399 static __inline void 1400 vfs_freevnodes_dec(void) 1401 { 1402 struct vdbatch *vd; 1403 1404 critical_enter(); 1405 vd = DPCPU_PTR(vd); 1406 vd->freevnodes--; 1407 critical_exit(); 1408 } 1409 1410 static u_long 1411 vnlru_read_freevnodes(void) 1412 { 1413 struct vdbatch *vd; 1414 long slop; 1415 int cpu; 1416 1417 mtx_assert(&vnode_list_mtx, MA_OWNED); 1418 if (freevnodes > freevnodes_old) 1419 slop = freevnodes - freevnodes_old; 1420 else 1421 slop = freevnodes_old - freevnodes; 1422 if (slop < VNLRU_FREEVNODES_SLOP) 1423 return (freevnodes >= 0 ? freevnodes : 0); 1424 freevnodes_old = freevnodes; 1425 CPU_FOREACH(cpu) { 1426 vd = DPCPU_ID_PTR((cpu), vd); 1427 freevnodes_old += vd->freevnodes; 1428 } 1429 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1430 } 1431 1432 static bool 1433 vnlru_under(u_long rnumvnodes, u_long limit) 1434 { 1435 u_long rfreevnodes, space; 1436 1437 if (__predict_false(rnumvnodes > desiredvnodes)) 1438 return (true); 1439 1440 space = desiredvnodes - rnumvnodes; 1441 if (space < limit) { 1442 rfreevnodes = vnlru_read_freevnodes(); 1443 if (rfreevnodes > wantfreevnodes) 1444 space += rfreevnodes - wantfreevnodes; 1445 } 1446 return (space < limit); 1447 } 1448 1449 static bool 1450 vnlru_under_unlocked(u_long rnumvnodes, u_long limit) 1451 { 1452 long rfreevnodes, space; 1453 1454 if (__predict_false(rnumvnodes > desiredvnodes)) 1455 return (true); 1456 1457 space = desiredvnodes - rnumvnodes; 1458 if (space < limit) { 1459 rfreevnodes = atomic_load_long(&freevnodes); 1460 if (rfreevnodes > wantfreevnodes) 1461 space += rfreevnodes - wantfreevnodes; 1462 } 1463 return (space < limit); 1464 } 1465 1466 static void 1467 vnlru_kick(void) 1468 { 1469 1470 mtx_assert(&vnode_list_mtx, MA_OWNED); 1471 if (vnlruproc_sig == 0) { 1472 vnlruproc_sig = 1; 1473 wakeup(vnlruproc); 1474 } 1475 } 1476 1477 static void 1478 vnlru_proc(void) 1479 { 1480 u_long rnumvnodes, rfreevnodes, target; 1481 unsigned long onumvnodes; 1482 int done, force, trigger, usevnodes; 1483 bool reclaim_nc_src, want_reread; 1484 1485 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1486 SHUTDOWN_PRI_FIRST); 1487 1488 force = 0; 1489 want_reread = false; 1490 for (;;) { 1491 kproc_suspend_check(vnlruproc); 1492 mtx_lock(&vnode_list_mtx); 1493 rnumvnodes = atomic_load_long(&numvnodes); 1494 1495 if (want_reread) { 1496 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1497 want_reread = false; 1498 } 1499 1500 /* 1501 * If numvnodes is too large (due to desiredvnodes being 1502 * adjusted using its sysctl, or emergency growth), first 1503 * try to reduce it by discarding from the free list. 1504 */ 1505 if (rnumvnodes > desiredvnodes) { 1506 vnlru_free_locked(rnumvnodes - desiredvnodes); 1507 rnumvnodes = atomic_load_long(&numvnodes); 1508 } 1509 /* 1510 * Sleep if the vnode cache is in a good state. This is 1511 * when it is not over-full and has space for about a 4% 1512 * or 9% expansion (by growing its size or inexcessively 1513 * reducing its free list). Otherwise, try to reclaim 1514 * space for a 10% expansion. 1515 */ 1516 if (vstir && force == 0) { 1517 force = 1; 1518 vstir = 0; 1519 } 1520 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1521 vnlruproc_sig = 0; 1522 wakeup(&vnlruproc_sig); 1523 msleep(vnlruproc, &vnode_list_mtx, 1524 PVFS|PDROP, "vlruwt", hz); 1525 continue; 1526 } 1527 rfreevnodes = vnlru_read_freevnodes(); 1528 1529 onumvnodes = rnumvnodes; 1530 /* 1531 * Calculate parameters for recycling. These are the same 1532 * throughout the loop to give some semblance of fairness. 1533 * The trigger point is to avoid recycling vnodes with lots 1534 * of resident pages. We aren't trying to free memory; we 1535 * are trying to recycle or at least free vnodes. 1536 */ 1537 if (rnumvnodes <= desiredvnodes) 1538 usevnodes = rnumvnodes - rfreevnodes; 1539 else 1540 usevnodes = rnumvnodes; 1541 if (usevnodes <= 0) 1542 usevnodes = 1; 1543 /* 1544 * The trigger value is is chosen to give a conservatively 1545 * large value to ensure that it alone doesn't prevent 1546 * making progress. The value can easily be so large that 1547 * it is effectively infinite in some congested and 1548 * misconfigured cases, and this is necessary. Normally 1549 * it is about 8 to 100 (pages), which is quite large. 1550 */ 1551 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1552 if (force < 2) 1553 trigger = vsmalltrigger; 1554 reclaim_nc_src = force >= 3; 1555 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1556 target = target / 10 + 1; 1557 done = vlrureclaim(reclaim_nc_src, trigger, target); 1558 mtx_unlock(&vnode_list_mtx); 1559 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1560 uma_reclaim(UMA_RECLAIM_DRAIN); 1561 if (done == 0) { 1562 if (force == 0 || force == 1) { 1563 force = 2; 1564 continue; 1565 } 1566 if (force == 2) { 1567 force = 3; 1568 continue; 1569 } 1570 want_reread = true; 1571 force = 0; 1572 vnlru_nowhere++; 1573 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1574 } else { 1575 want_reread = true; 1576 kern_yield(PRI_USER); 1577 } 1578 } 1579 } 1580 1581 static struct kproc_desc vnlru_kp = { 1582 "vnlru", 1583 vnlru_proc, 1584 &vnlruproc 1585 }; 1586 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1587 &vnlru_kp); 1588 1589 /* 1590 * Routines having to do with the management of the vnode table. 1591 */ 1592 1593 /* 1594 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1595 * before we actually vgone(). This function must be called with the vnode 1596 * held to prevent the vnode from being returned to the free list midway 1597 * through vgone(). 1598 */ 1599 static int 1600 vtryrecycle(struct vnode *vp) 1601 { 1602 struct mount *vnmp; 1603 1604 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1605 VNASSERT(vp->v_holdcnt, vp, 1606 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1607 /* 1608 * This vnode may found and locked via some other list, if so we 1609 * can't recycle it yet. 1610 */ 1611 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1612 CTR2(KTR_VFS, 1613 "%s: impossible to recycle, vp %p lock is already held", 1614 __func__, vp); 1615 vdrop(vp); 1616 return (EWOULDBLOCK); 1617 } 1618 /* 1619 * Don't recycle if its filesystem is being suspended. 1620 */ 1621 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1622 VOP_UNLOCK(vp); 1623 CTR2(KTR_VFS, 1624 "%s: impossible to recycle, cannot start the write for %p", 1625 __func__, vp); 1626 vdrop(vp); 1627 return (EBUSY); 1628 } 1629 /* 1630 * If we got this far, we need to acquire the interlock and see if 1631 * anyone picked up this vnode from another list. If not, we will 1632 * mark it with DOOMED via vgonel() so that anyone who does find it 1633 * will skip over it. 1634 */ 1635 VI_LOCK(vp); 1636 if (vp->v_usecount) { 1637 VOP_UNLOCK(vp); 1638 vdropl(vp); 1639 vn_finished_write(vnmp); 1640 CTR2(KTR_VFS, 1641 "%s: impossible to recycle, %p is already referenced", 1642 __func__, vp); 1643 return (EBUSY); 1644 } 1645 if (!VN_IS_DOOMED(vp)) { 1646 counter_u64_add(recycles_free_count, 1); 1647 vgonel(vp); 1648 } 1649 VOP_UNLOCK(vp); 1650 vdropl(vp); 1651 vn_finished_write(vnmp); 1652 return (0); 1653 } 1654 1655 /* 1656 * Allocate a new vnode. 1657 * 1658 * The operation never returns an error. Returning an error was disabled 1659 * in r145385 (dated 2005) with the following comment: 1660 * 1661 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1662 * 1663 * Given the age of this commit (almost 15 years at the time of writing this 1664 * comment) restoring the ability to fail requires a significant audit of 1665 * all codepaths. 1666 * 1667 * The routine can try to free a vnode or stall for up to 1 second waiting for 1668 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1669 */ 1670 static u_long vn_alloc_cyclecount; 1671 1672 static struct vnode * __noinline 1673 vn_alloc_hard(struct mount *mp) 1674 { 1675 u_long rnumvnodes, rfreevnodes; 1676 1677 mtx_lock(&vnode_list_mtx); 1678 rnumvnodes = atomic_load_long(&numvnodes); 1679 if (rnumvnodes + 1 < desiredvnodes) { 1680 vn_alloc_cyclecount = 0; 1681 goto alloc; 1682 } 1683 rfreevnodes = vnlru_read_freevnodes(); 1684 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1685 vn_alloc_cyclecount = 0; 1686 vstir = 1; 1687 } 1688 /* 1689 * Grow the vnode cache if it will not be above its target max 1690 * after growing. Otherwise, if the free list is nonempty, try 1691 * to reclaim 1 item from it before growing the cache (possibly 1692 * above its target max if the reclamation failed or is delayed). 1693 * Otherwise, wait for some space. In all cases, schedule 1694 * vnlru_proc() if we are getting short of space. The watermarks 1695 * should be chosen so that we never wait or even reclaim from 1696 * the free list to below its target minimum. 1697 */ 1698 if (vnlru_free_locked(1) > 0) 1699 goto alloc; 1700 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1701 /* 1702 * Wait for space for a new vnode. 1703 */ 1704 vnlru_kick(); 1705 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 1706 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 1707 vnlru_read_freevnodes() > 1) 1708 vnlru_free_locked(1); 1709 } 1710 alloc: 1711 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1712 if (vnlru_under(rnumvnodes, vlowat)) 1713 vnlru_kick(); 1714 mtx_unlock(&vnode_list_mtx); 1715 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1716 } 1717 1718 static struct vnode * 1719 vn_alloc(struct mount *mp) 1720 { 1721 u_long rnumvnodes; 1722 1723 if (__predict_false(vn_alloc_cyclecount != 0)) 1724 return (vn_alloc_hard(mp)); 1725 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1726 if (__predict_false(vnlru_under_unlocked(rnumvnodes, vlowat))) { 1727 atomic_subtract_long(&numvnodes, 1); 1728 return (vn_alloc_hard(mp)); 1729 } 1730 1731 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1732 } 1733 1734 static void 1735 vn_free(struct vnode *vp) 1736 { 1737 1738 atomic_subtract_long(&numvnodes, 1); 1739 uma_zfree_smr(vnode_zone, vp); 1740 } 1741 1742 /* 1743 * Return the next vnode from the free list. 1744 */ 1745 int 1746 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1747 struct vnode **vpp) 1748 { 1749 struct vnode *vp; 1750 struct thread *td; 1751 struct lock_object *lo; 1752 1753 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1754 1755 KASSERT(vops->registered, 1756 ("%s: not registered vector op %p\n", __func__, vops)); 1757 1758 td = curthread; 1759 if (td->td_vp_reserved != NULL) { 1760 vp = td->td_vp_reserved; 1761 td->td_vp_reserved = NULL; 1762 } else { 1763 vp = vn_alloc(mp); 1764 } 1765 counter_u64_add(vnodes_created, 1); 1766 /* 1767 * Locks are given the generic name "vnode" when created. 1768 * Follow the historic practice of using the filesystem 1769 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1770 * 1771 * Locks live in a witness group keyed on their name. Thus, 1772 * when a lock is renamed, it must also move from the witness 1773 * group of its old name to the witness group of its new name. 1774 * 1775 * The change only needs to be made when the vnode moves 1776 * from one filesystem type to another. We ensure that each 1777 * filesystem use a single static name pointer for its tag so 1778 * that we can compare pointers rather than doing a strcmp(). 1779 */ 1780 lo = &vp->v_vnlock->lock_object; 1781 #ifdef WITNESS 1782 if (lo->lo_name != tag) { 1783 #endif 1784 lo->lo_name = tag; 1785 #ifdef WITNESS 1786 WITNESS_DESTROY(lo); 1787 WITNESS_INIT(lo, tag); 1788 } 1789 #endif 1790 /* 1791 * By default, don't allow shared locks unless filesystems opt-in. 1792 */ 1793 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1794 /* 1795 * Finalize various vnode identity bits. 1796 */ 1797 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1798 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1799 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1800 vp->v_type = VNON; 1801 vp->v_op = vops; 1802 vp->v_irflag = 0; 1803 v_init_counters(vp); 1804 vn_seqc_init(vp); 1805 vp->v_bufobj.bo_ops = &buf_ops_bio; 1806 #ifdef DIAGNOSTIC 1807 if (mp == NULL && vops != &dead_vnodeops) 1808 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1809 #endif 1810 #ifdef MAC 1811 mac_vnode_init(vp); 1812 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1813 mac_vnode_associate_singlelabel(mp, vp); 1814 #endif 1815 if (mp != NULL) { 1816 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1817 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1818 vp->v_vflag |= VV_NOKNOTE; 1819 } 1820 1821 /* 1822 * For the filesystems which do not use vfs_hash_insert(), 1823 * still initialize v_hash to have vfs_hash_index() useful. 1824 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1825 * its own hashing. 1826 */ 1827 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1828 1829 *vpp = vp; 1830 return (0); 1831 } 1832 1833 void 1834 getnewvnode_reserve(void) 1835 { 1836 struct thread *td; 1837 1838 td = curthread; 1839 MPASS(td->td_vp_reserved == NULL); 1840 td->td_vp_reserved = vn_alloc(NULL); 1841 } 1842 1843 void 1844 getnewvnode_drop_reserve(void) 1845 { 1846 struct thread *td; 1847 1848 td = curthread; 1849 if (td->td_vp_reserved != NULL) { 1850 vn_free(td->td_vp_reserved); 1851 td->td_vp_reserved = NULL; 1852 } 1853 } 1854 1855 static void __noinline 1856 freevnode(struct vnode *vp) 1857 { 1858 struct bufobj *bo; 1859 1860 /* 1861 * The vnode has been marked for destruction, so free it. 1862 * 1863 * The vnode will be returned to the zone where it will 1864 * normally remain until it is needed for another vnode. We 1865 * need to cleanup (or verify that the cleanup has already 1866 * been done) any residual data left from its current use 1867 * so as not to contaminate the freshly allocated vnode. 1868 */ 1869 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 1870 /* 1871 * Paired with vgone. 1872 */ 1873 vn_seqc_write_end_free(vp); 1874 1875 bo = &vp->v_bufobj; 1876 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 1877 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 1878 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 1879 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 1880 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 1881 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 1882 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 1883 ("clean blk trie not empty")); 1884 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 1885 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 1886 ("dirty blk trie not empty")); 1887 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 1888 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 1889 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 1890 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 1891 ("Dangling rangelock waiters")); 1892 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, 1893 ("Leaked inactivation")); 1894 VI_UNLOCK(vp); 1895 #ifdef MAC 1896 mac_vnode_destroy(vp); 1897 #endif 1898 if (vp->v_pollinfo != NULL) { 1899 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1900 destroy_vpollinfo(vp->v_pollinfo); 1901 VOP_UNLOCK(vp); 1902 vp->v_pollinfo = NULL; 1903 } 1904 vp->v_mountedhere = NULL; 1905 vp->v_unpcb = NULL; 1906 vp->v_rdev = NULL; 1907 vp->v_fifoinfo = NULL; 1908 vp->v_iflag = 0; 1909 vp->v_vflag = 0; 1910 bo->bo_flag = 0; 1911 vn_free(vp); 1912 } 1913 1914 /* 1915 * Delete from old mount point vnode list, if on one. 1916 */ 1917 static void 1918 delmntque(struct vnode *vp) 1919 { 1920 struct mount *mp; 1921 1922 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 1923 1924 mp = vp->v_mount; 1925 if (mp == NULL) 1926 return; 1927 MNT_ILOCK(mp); 1928 VI_LOCK(vp); 1929 vp->v_mount = NULL; 1930 VI_UNLOCK(vp); 1931 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1932 ("bad mount point vnode list size")); 1933 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1934 mp->mnt_nvnodelistsize--; 1935 MNT_REL(mp); 1936 MNT_IUNLOCK(mp); 1937 } 1938 1939 static void 1940 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1941 { 1942 1943 vp->v_data = NULL; 1944 vp->v_op = &dead_vnodeops; 1945 vgone(vp); 1946 vput(vp); 1947 } 1948 1949 /* 1950 * Insert into list of vnodes for the new mount point, if available. 1951 */ 1952 int 1953 insmntque1(struct vnode *vp, struct mount *mp, 1954 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1955 { 1956 1957 KASSERT(vp->v_mount == NULL, 1958 ("insmntque: vnode already on per mount vnode list")); 1959 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1960 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1961 1962 /* 1963 * We acquire the vnode interlock early to ensure that the 1964 * vnode cannot be recycled by another process releasing a 1965 * holdcnt on it before we get it on both the vnode list 1966 * and the active vnode list. The mount mutex protects only 1967 * manipulation of the vnode list and the vnode freelist 1968 * mutex protects only manipulation of the active vnode list. 1969 * Hence the need to hold the vnode interlock throughout. 1970 */ 1971 MNT_ILOCK(mp); 1972 VI_LOCK(vp); 1973 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 1974 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1975 mp->mnt_nvnodelistsize == 0)) && 1976 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1977 VI_UNLOCK(vp); 1978 MNT_IUNLOCK(mp); 1979 if (dtr != NULL) 1980 dtr(vp, dtr_arg); 1981 return (EBUSY); 1982 } 1983 vp->v_mount = mp; 1984 MNT_REF(mp); 1985 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1986 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1987 ("neg mount point vnode list size")); 1988 mp->mnt_nvnodelistsize++; 1989 VI_UNLOCK(vp); 1990 MNT_IUNLOCK(mp); 1991 return (0); 1992 } 1993 1994 int 1995 insmntque(struct vnode *vp, struct mount *mp) 1996 { 1997 1998 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1999 } 2000 2001 /* 2002 * Flush out and invalidate all buffers associated with a bufobj 2003 * Called with the underlying object locked. 2004 */ 2005 int 2006 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 2007 { 2008 int error; 2009 2010 BO_LOCK(bo); 2011 if (flags & V_SAVE) { 2012 error = bufobj_wwait(bo, slpflag, slptimeo); 2013 if (error) { 2014 BO_UNLOCK(bo); 2015 return (error); 2016 } 2017 if (bo->bo_dirty.bv_cnt > 0) { 2018 BO_UNLOCK(bo); 2019 do { 2020 error = BO_SYNC(bo, MNT_WAIT); 2021 } while (error == ERELOOKUP); 2022 if (error != 0) 2023 return (error); 2024 BO_LOCK(bo); 2025 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 2026 BO_UNLOCK(bo); 2027 return (EBUSY); 2028 } 2029 } 2030 } 2031 /* 2032 * If you alter this loop please notice that interlock is dropped and 2033 * reacquired in flushbuflist. Special care is needed to ensure that 2034 * no race conditions occur from this. 2035 */ 2036 do { 2037 error = flushbuflist(&bo->bo_clean, 2038 flags, bo, slpflag, slptimeo); 2039 if (error == 0 && !(flags & V_CLEANONLY)) 2040 error = flushbuflist(&bo->bo_dirty, 2041 flags, bo, slpflag, slptimeo); 2042 if (error != 0 && error != EAGAIN) { 2043 BO_UNLOCK(bo); 2044 return (error); 2045 } 2046 } while (error != 0); 2047 2048 /* 2049 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 2050 * have write I/O in-progress but if there is a VM object then the 2051 * VM object can also have read-I/O in-progress. 2052 */ 2053 do { 2054 bufobj_wwait(bo, 0, 0); 2055 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 2056 BO_UNLOCK(bo); 2057 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 2058 BO_LOCK(bo); 2059 } 2060 } while (bo->bo_numoutput > 0); 2061 BO_UNLOCK(bo); 2062 2063 /* 2064 * Destroy the copy in the VM cache, too. 2065 */ 2066 if (bo->bo_object != NULL && 2067 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 2068 VM_OBJECT_WLOCK(bo->bo_object); 2069 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 2070 OBJPR_CLEANONLY : 0); 2071 VM_OBJECT_WUNLOCK(bo->bo_object); 2072 } 2073 2074 #ifdef INVARIANTS 2075 BO_LOCK(bo); 2076 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 2077 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 2078 bo->bo_clean.bv_cnt > 0)) 2079 panic("vinvalbuf: flush failed"); 2080 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 2081 bo->bo_dirty.bv_cnt > 0) 2082 panic("vinvalbuf: flush dirty failed"); 2083 BO_UNLOCK(bo); 2084 #endif 2085 return (0); 2086 } 2087 2088 /* 2089 * Flush out and invalidate all buffers associated with a vnode. 2090 * Called with the underlying object locked. 2091 */ 2092 int 2093 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 2094 { 2095 2096 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2097 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 2098 if (vp->v_object != NULL && vp->v_object->handle != vp) 2099 return (0); 2100 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 2101 } 2102 2103 /* 2104 * Flush out buffers on the specified list. 2105 * 2106 */ 2107 static int 2108 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2109 int slptimeo) 2110 { 2111 struct buf *bp, *nbp; 2112 int retval, error; 2113 daddr_t lblkno; 2114 b_xflags_t xflags; 2115 2116 ASSERT_BO_WLOCKED(bo); 2117 2118 retval = 0; 2119 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2120 /* 2121 * If we are flushing both V_NORMAL and V_ALT buffers then 2122 * do not skip any buffers. If we are flushing only V_NORMAL 2123 * buffers then skip buffers marked as BX_ALTDATA. If we are 2124 * flushing only V_ALT buffers then skip buffers not marked 2125 * as BX_ALTDATA. 2126 */ 2127 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2128 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2129 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2130 continue; 2131 } 2132 if (nbp != NULL) { 2133 lblkno = nbp->b_lblkno; 2134 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2135 } 2136 retval = EAGAIN; 2137 error = BUF_TIMELOCK(bp, 2138 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2139 "flushbuf", slpflag, slptimeo); 2140 if (error) { 2141 BO_LOCK(bo); 2142 return (error != ENOLCK ? error : EAGAIN); 2143 } 2144 KASSERT(bp->b_bufobj == bo, 2145 ("bp %p wrong b_bufobj %p should be %p", 2146 bp, bp->b_bufobj, bo)); 2147 /* 2148 * XXX Since there are no node locks for NFS, I 2149 * believe there is a slight chance that a delayed 2150 * write will occur while sleeping just above, so 2151 * check for it. 2152 */ 2153 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2154 (flags & V_SAVE)) { 2155 bremfree(bp); 2156 bp->b_flags |= B_ASYNC; 2157 bwrite(bp); 2158 BO_LOCK(bo); 2159 return (EAGAIN); /* XXX: why not loop ? */ 2160 } 2161 bremfree(bp); 2162 bp->b_flags |= (B_INVAL | B_RELBUF); 2163 bp->b_flags &= ~B_ASYNC; 2164 brelse(bp); 2165 BO_LOCK(bo); 2166 if (nbp == NULL) 2167 break; 2168 nbp = gbincore(bo, lblkno); 2169 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2170 != xflags) 2171 break; /* nbp invalid */ 2172 } 2173 return (retval); 2174 } 2175 2176 int 2177 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2178 { 2179 struct buf *bp; 2180 int error; 2181 daddr_t lblkno; 2182 2183 ASSERT_BO_LOCKED(bo); 2184 2185 for (lblkno = startn;;) { 2186 again: 2187 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2188 if (bp == NULL || bp->b_lblkno >= endn || 2189 bp->b_lblkno < startn) 2190 break; 2191 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2192 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2193 if (error != 0) { 2194 BO_RLOCK(bo); 2195 if (error == ENOLCK) 2196 goto again; 2197 return (error); 2198 } 2199 KASSERT(bp->b_bufobj == bo, 2200 ("bp %p wrong b_bufobj %p should be %p", 2201 bp, bp->b_bufobj, bo)); 2202 lblkno = bp->b_lblkno + 1; 2203 if ((bp->b_flags & B_MANAGED) == 0) 2204 bremfree(bp); 2205 bp->b_flags |= B_RELBUF; 2206 /* 2207 * In the VMIO case, use the B_NOREUSE flag to hint that the 2208 * pages backing each buffer in the range are unlikely to be 2209 * reused. Dirty buffers will have the hint applied once 2210 * they've been written. 2211 */ 2212 if ((bp->b_flags & B_VMIO) != 0) 2213 bp->b_flags |= B_NOREUSE; 2214 brelse(bp); 2215 BO_RLOCK(bo); 2216 } 2217 return (0); 2218 } 2219 2220 /* 2221 * Truncate a file's buffer and pages to a specified length. This 2222 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2223 * sync activity. 2224 */ 2225 int 2226 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2227 { 2228 struct buf *bp, *nbp; 2229 struct bufobj *bo; 2230 daddr_t startlbn; 2231 2232 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2233 vp, blksize, (uintmax_t)length); 2234 2235 /* 2236 * Round up to the *next* lbn. 2237 */ 2238 startlbn = howmany(length, blksize); 2239 2240 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2241 2242 bo = &vp->v_bufobj; 2243 restart_unlocked: 2244 BO_LOCK(bo); 2245 2246 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2247 ; 2248 2249 if (length > 0) { 2250 restartsync: 2251 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2252 if (bp->b_lblkno > 0) 2253 continue; 2254 /* 2255 * Since we hold the vnode lock this should only 2256 * fail if we're racing with the buf daemon. 2257 */ 2258 if (BUF_LOCK(bp, 2259 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2260 BO_LOCKPTR(bo)) == ENOLCK) 2261 goto restart_unlocked; 2262 2263 VNASSERT((bp->b_flags & B_DELWRI), vp, 2264 ("buf(%p) on dirty queue without DELWRI", bp)); 2265 2266 bremfree(bp); 2267 bawrite(bp); 2268 BO_LOCK(bo); 2269 goto restartsync; 2270 } 2271 } 2272 2273 bufobj_wwait(bo, 0, 0); 2274 BO_UNLOCK(bo); 2275 vnode_pager_setsize(vp, length); 2276 2277 return (0); 2278 } 2279 2280 /* 2281 * Invalidate the cached pages of a file's buffer within the range of block 2282 * numbers [startlbn, endlbn). 2283 */ 2284 void 2285 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2286 int blksize) 2287 { 2288 struct bufobj *bo; 2289 off_t start, end; 2290 2291 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2292 2293 start = blksize * startlbn; 2294 end = blksize * endlbn; 2295 2296 bo = &vp->v_bufobj; 2297 BO_LOCK(bo); 2298 MPASS(blksize == bo->bo_bsize); 2299 2300 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2301 ; 2302 2303 BO_UNLOCK(bo); 2304 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2305 } 2306 2307 static int 2308 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2309 daddr_t startlbn, daddr_t endlbn) 2310 { 2311 struct buf *bp, *nbp; 2312 bool anyfreed; 2313 2314 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2315 ASSERT_BO_LOCKED(bo); 2316 2317 do { 2318 anyfreed = false; 2319 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2320 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2321 continue; 2322 if (BUF_LOCK(bp, 2323 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2324 BO_LOCKPTR(bo)) == ENOLCK) { 2325 BO_LOCK(bo); 2326 return (EAGAIN); 2327 } 2328 2329 bremfree(bp); 2330 bp->b_flags |= B_INVAL | B_RELBUF; 2331 bp->b_flags &= ~B_ASYNC; 2332 brelse(bp); 2333 anyfreed = true; 2334 2335 BO_LOCK(bo); 2336 if (nbp != NULL && 2337 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2338 nbp->b_vp != vp || 2339 (nbp->b_flags & B_DELWRI) != 0)) 2340 return (EAGAIN); 2341 } 2342 2343 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2344 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2345 continue; 2346 if (BUF_LOCK(bp, 2347 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2348 BO_LOCKPTR(bo)) == ENOLCK) { 2349 BO_LOCK(bo); 2350 return (EAGAIN); 2351 } 2352 bremfree(bp); 2353 bp->b_flags |= B_INVAL | B_RELBUF; 2354 bp->b_flags &= ~B_ASYNC; 2355 brelse(bp); 2356 anyfreed = true; 2357 2358 BO_LOCK(bo); 2359 if (nbp != NULL && 2360 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2361 (nbp->b_vp != vp) || 2362 (nbp->b_flags & B_DELWRI) == 0)) 2363 return (EAGAIN); 2364 } 2365 } while (anyfreed); 2366 return (0); 2367 } 2368 2369 static void 2370 buf_vlist_remove(struct buf *bp) 2371 { 2372 struct bufv *bv; 2373 b_xflags_t flags; 2374 2375 flags = bp->b_xflags; 2376 2377 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2378 ASSERT_BO_WLOCKED(bp->b_bufobj); 2379 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 && 2380 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN), 2381 ("%s: buffer %p has invalid queue state", __func__, bp)); 2382 2383 if ((flags & BX_VNDIRTY) != 0) 2384 bv = &bp->b_bufobj->bo_dirty; 2385 else 2386 bv = &bp->b_bufobj->bo_clean; 2387 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2388 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2389 bv->bv_cnt--; 2390 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2391 } 2392 2393 /* 2394 * Add the buffer to the sorted clean or dirty block list. 2395 * 2396 * NOTE: xflags is passed as a constant, optimizing this inline function! 2397 */ 2398 static void 2399 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2400 { 2401 struct bufv *bv; 2402 struct buf *n; 2403 int error; 2404 2405 ASSERT_BO_WLOCKED(bo); 2406 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2407 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2408 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2409 ("dead bo %p", bo)); 2410 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2411 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2412 bp->b_xflags |= xflags; 2413 if (xflags & BX_VNDIRTY) 2414 bv = &bo->bo_dirty; 2415 else 2416 bv = &bo->bo_clean; 2417 2418 /* 2419 * Keep the list ordered. Optimize empty list insertion. Assume 2420 * we tend to grow at the tail so lookup_le should usually be cheaper 2421 * than _ge. 2422 */ 2423 if (bv->bv_cnt == 0 || 2424 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2425 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2426 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2427 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2428 else 2429 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2430 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2431 if (error) 2432 panic("buf_vlist_add: Preallocated nodes insufficient."); 2433 bv->bv_cnt++; 2434 } 2435 2436 /* 2437 * Look up a buffer using the buffer tries. 2438 */ 2439 struct buf * 2440 gbincore(struct bufobj *bo, daddr_t lblkno) 2441 { 2442 struct buf *bp; 2443 2444 ASSERT_BO_LOCKED(bo); 2445 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2446 if (bp != NULL) 2447 return (bp); 2448 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2449 } 2450 2451 /* 2452 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2453 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2454 * stability of the result. Like other lockless lookups, the found buf may 2455 * already be invalid by the time this function returns. 2456 */ 2457 struct buf * 2458 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2459 { 2460 struct buf *bp; 2461 2462 ASSERT_BO_UNLOCKED(bo); 2463 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2464 if (bp != NULL) 2465 return (bp); 2466 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2467 } 2468 2469 /* 2470 * Associate a buffer with a vnode. 2471 */ 2472 void 2473 bgetvp(struct vnode *vp, struct buf *bp) 2474 { 2475 struct bufobj *bo; 2476 2477 bo = &vp->v_bufobj; 2478 ASSERT_BO_WLOCKED(bo); 2479 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2480 2481 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2482 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2483 ("bgetvp: bp already attached! %p", bp)); 2484 2485 vhold(vp); 2486 bp->b_vp = vp; 2487 bp->b_bufobj = bo; 2488 /* 2489 * Insert onto list for new vnode. 2490 */ 2491 buf_vlist_add(bp, bo, BX_VNCLEAN); 2492 } 2493 2494 /* 2495 * Disassociate a buffer from a vnode. 2496 */ 2497 void 2498 brelvp(struct buf *bp) 2499 { 2500 struct bufobj *bo; 2501 struct vnode *vp; 2502 2503 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2504 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2505 2506 /* 2507 * Delete from old vnode list, if on one. 2508 */ 2509 vp = bp->b_vp; /* XXX */ 2510 bo = bp->b_bufobj; 2511 BO_LOCK(bo); 2512 buf_vlist_remove(bp); 2513 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2514 bo->bo_flag &= ~BO_ONWORKLST; 2515 mtx_lock(&sync_mtx); 2516 LIST_REMOVE(bo, bo_synclist); 2517 syncer_worklist_len--; 2518 mtx_unlock(&sync_mtx); 2519 } 2520 bp->b_vp = NULL; 2521 bp->b_bufobj = NULL; 2522 BO_UNLOCK(bo); 2523 vdrop(vp); 2524 } 2525 2526 /* 2527 * Add an item to the syncer work queue. 2528 */ 2529 static void 2530 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2531 { 2532 int slot; 2533 2534 ASSERT_BO_WLOCKED(bo); 2535 2536 mtx_lock(&sync_mtx); 2537 if (bo->bo_flag & BO_ONWORKLST) 2538 LIST_REMOVE(bo, bo_synclist); 2539 else { 2540 bo->bo_flag |= BO_ONWORKLST; 2541 syncer_worklist_len++; 2542 } 2543 2544 if (delay > syncer_maxdelay - 2) 2545 delay = syncer_maxdelay - 2; 2546 slot = (syncer_delayno + delay) & syncer_mask; 2547 2548 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2549 mtx_unlock(&sync_mtx); 2550 } 2551 2552 static int 2553 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2554 { 2555 int error, len; 2556 2557 mtx_lock(&sync_mtx); 2558 len = syncer_worklist_len - sync_vnode_count; 2559 mtx_unlock(&sync_mtx); 2560 error = SYSCTL_OUT(req, &len, sizeof(len)); 2561 return (error); 2562 } 2563 2564 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2565 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2566 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2567 2568 static struct proc *updateproc; 2569 static void sched_sync(void); 2570 static struct kproc_desc up_kp = { 2571 "syncer", 2572 sched_sync, 2573 &updateproc 2574 }; 2575 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2576 2577 static int 2578 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2579 { 2580 struct vnode *vp; 2581 struct mount *mp; 2582 2583 *bo = LIST_FIRST(slp); 2584 if (*bo == NULL) 2585 return (0); 2586 vp = bo2vnode(*bo); 2587 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2588 return (1); 2589 /* 2590 * We use vhold in case the vnode does not 2591 * successfully sync. vhold prevents the vnode from 2592 * going away when we unlock the sync_mtx so that 2593 * we can acquire the vnode interlock. 2594 */ 2595 vholdl(vp); 2596 mtx_unlock(&sync_mtx); 2597 VI_UNLOCK(vp); 2598 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2599 vdrop(vp); 2600 mtx_lock(&sync_mtx); 2601 return (*bo == LIST_FIRST(slp)); 2602 } 2603 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2604 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2605 VOP_UNLOCK(vp); 2606 vn_finished_write(mp); 2607 BO_LOCK(*bo); 2608 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2609 /* 2610 * Put us back on the worklist. The worklist 2611 * routine will remove us from our current 2612 * position and then add us back in at a later 2613 * position. 2614 */ 2615 vn_syncer_add_to_worklist(*bo, syncdelay); 2616 } 2617 BO_UNLOCK(*bo); 2618 vdrop(vp); 2619 mtx_lock(&sync_mtx); 2620 return (0); 2621 } 2622 2623 static int first_printf = 1; 2624 2625 /* 2626 * System filesystem synchronizer daemon. 2627 */ 2628 static void 2629 sched_sync(void) 2630 { 2631 struct synclist *next, *slp; 2632 struct bufobj *bo; 2633 long starttime; 2634 struct thread *td = curthread; 2635 int last_work_seen; 2636 int net_worklist_len; 2637 int syncer_final_iter; 2638 int error; 2639 2640 last_work_seen = 0; 2641 syncer_final_iter = 0; 2642 syncer_state = SYNCER_RUNNING; 2643 starttime = time_uptime; 2644 td->td_pflags |= TDP_NORUNNINGBUF; 2645 2646 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2647 SHUTDOWN_PRI_LAST); 2648 2649 mtx_lock(&sync_mtx); 2650 for (;;) { 2651 if (syncer_state == SYNCER_FINAL_DELAY && 2652 syncer_final_iter == 0) { 2653 mtx_unlock(&sync_mtx); 2654 kproc_suspend_check(td->td_proc); 2655 mtx_lock(&sync_mtx); 2656 } 2657 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2658 if (syncer_state != SYNCER_RUNNING && 2659 starttime != time_uptime) { 2660 if (first_printf) { 2661 printf("\nSyncing disks, vnodes remaining... "); 2662 first_printf = 0; 2663 } 2664 printf("%d ", net_worklist_len); 2665 } 2666 starttime = time_uptime; 2667 2668 /* 2669 * Push files whose dirty time has expired. Be careful 2670 * of interrupt race on slp queue. 2671 * 2672 * Skip over empty worklist slots when shutting down. 2673 */ 2674 do { 2675 slp = &syncer_workitem_pending[syncer_delayno]; 2676 syncer_delayno += 1; 2677 if (syncer_delayno == syncer_maxdelay) 2678 syncer_delayno = 0; 2679 next = &syncer_workitem_pending[syncer_delayno]; 2680 /* 2681 * If the worklist has wrapped since the 2682 * it was emptied of all but syncer vnodes, 2683 * switch to the FINAL_DELAY state and run 2684 * for one more second. 2685 */ 2686 if (syncer_state == SYNCER_SHUTTING_DOWN && 2687 net_worklist_len == 0 && 2688 last_work_seen == syncer_delayno) { 2689 syncer_state = SYNCER_FINAL_DELAY; 2690 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2691 } 2692 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2693 syncer_worklist_len > 0); 2694 2695 /* 2696 * Keep track of the last time there was anything 2697 * on the worklist other than syncer vnodes. 2698 * Return to the SHUTTING_DOWN state if any 2699 * new work appears. 2700 */ 2701 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2702 last_work_seen = syncer_delayno; 2703 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2704 syncer_state = SYNCER_SHUTTING_DOWN; 2705 while (!LIST_EMPTY(slp)) { 2706 error = sync_vnode(slp, &bo, td); 2707 if (error == 1) { 2708 LIST_REMOVE(bo, bo_synclist); 2709 LIST_INSERT_HEAD(next, bo, bo_synclist); 2710 continue; 2711 } 2712 2713 if (first_printf == 0) { 2714 /* 2715 * Drop the sync mutex, because some watchdog 2716 * drivers need to sleep while patting 2717 */ 2718 mtx_unlock(&sync_mtx); 2719 wdog_kern_pat(WD_LASTVAL); 2720 mtx_lock(&sync_mtx); 2721 } 2722 } 2723 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2724 syncer_final_iter--; 2725 /* 2726 * The variable rushjob allows the kernel to speed up the 2727 * processing of the filesystem syncer process. A rushjob 2728 * value of N tells the filesystem syncer to process the next 2729 * N seconds worth of work on its queue ASAP. Currently rushjob 2730 * is used by the soft update code to speed up the filesystem 2731 * syncer process when the incore state is getting so far 2732 * ahead of the disk that the kernel memory pool is being 2733 * threatened with exhaustion. 2734 */ 2735 if (rushjob > 0) { 2736 rushjob -= 1; 2737 continue; 2738 } 2739 /* 2740 * Just sleep for a short period of time between 2741 * iterations when shutting down to allow some I/O 2742 * to happen. 2743 * 2744 * If it has taken us less than a second to process the 2745 * current work, then wait. Otherwise start right over 2746 * again. We can still lose time if any single round 2747 * takes more than two seconds, but it does not really 2748 * matter as we are just trying to generally pace the 2749 * filesystem activity. 2750 */ 2751 if (syncer_state != SYNCER_RUNNING || 2752 time_uptime == starttime) { 2753 thread_lock(td); 2754 sched_prio(td, PPAUSE); 2755 thread_unlock(td); 2756 } 2757 if (syncer_state != SYNCER_RUNNING) 2758 cv_timedwait(&sync_wakeup, &sync_mtx, 2759 hz / SYNCER_SHUTDOWN_SPEEDUP); 2760 else if (time_uptime == starttime) 2761 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2762 } 2763 } 2764 2765 /* 2766 * Request the syncer daemon to speed up its work. 2767 * We never push it to speed up more than half of its 2768 * normal turn time, otherwise it could take over the cpu. 2769 */ 2770 int 2771 speedup_syncer(void) 2772 { 2773 int ret = 0; 2774 2775 mtx_lock(&sync_mtx); 2776 if (rushjob < syncdelay / 2) { 2777 rushjob += 1; 2778 stat_rush_requests += 1; 2779 ret = 1; 2780 } 2781 mtx_unlock(&sync_mtx); 2782 cv_broadcast(&sync_wakeup); 2783 return (ret); 2784 } 2785 2786 /* 2787 * Tell the syncer to speed up its work and run though its work 2788 * list several times, then tell it to shut down. 2789 */ 2790 static void 2791 syncer_shutdown(void *arg, int howto) 2792 { 2793 2794 if (howto & RB_NOSYNC) 2795 return; 2796 mtx_lock(&sync_mtx); 2797 syncer_state = SYNCER_SHUTTING_DOWN; 2798 rushjob = 0; 2799 mtx_unlock(&sync_mtx); 2800 cv_broadcast(&sync_wakeup); 2801 kproc_shutdown(arg, howto); 2802 } 2803 2804 void 2805 syncer_suspend(void) 2806 { 2807 2808 syncer_shutdown(updateproc, 0); 2809 } 2810 2811 void 2812 syncer_resume(void) 2813 { 2814 2815 mtx_lock(&sync_mtx); 2816 first_printf = 1; 2817 syncer_state = SYNCER_RUNNING; 2818 mtx_unlock(&sync_mtx); 2819 cv_broadcast(&sync_wakeup); 2820 kproc_resume(updateproc); 2821 } 2822 2823 /* 2824 * Move the buffer between the clean and dirty lists of its vnode. 2825 */ 2826 void 2827 reassignbuf(struct buf *bp) 2828 { 2829 struct vnode *vp; 2830 struct bufobj *bo; 2831 int delay; 2832 #ifdef INVARIANTS 2833 struct bufv *bv; 2834 #endif 2835 2836 vp = bp->b_vp; 2837 bo = bp->b_bufobj; 2838 2839 KASSERT((bp->b_flags & B_PAGING) == 0, 2840 ("%s: cannot reassign paging buffer %p", __func__, bp)); 2841 2842 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2843 bp, bp->b_vp, bp->b_flags); 2844 2845 BO_LOCK(bo); 2846 buf_vlist_remove(bp); 2847 2848 /* 2849 * If dirty, put on list of dirty buffers; otherwise insert onto list 2850 * of clean buffers. 2851 */ 2852 if (bp->b_flags & B_DELWRI) { 2853 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2854 switch (vp->v_type) { 2855 case VDIR: 2856 delay = dirdelay; 2857 break; 2858 case VCHR: 2859 delay = metadelay; 2860 break; 2861 default: 2862 delay = filedelay; 2863 } 2864 vn_syncer_add_to_worklist(bo, delay); 2865 } 2866 buf_vlist_add(bp, bo, BX_VNDIRTY); 2867 } else { 2868 buf_vlist_add(bp, bo, BX_VNCLEAN); 2869 2870 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2871 mtx_lock(&sync_mtx); 2872 LIST_REMOVE(bo, bo_synclist); 2873 syncer_worklist_len--; 2874 mtx_unlock(&sync_mtx); 2875 bo->bo_flag &= ~BO_ONWORKLST; 2876 } 2877 } 2878 #ifdef INVARIANTS 2879 bv = &bo->bo_clean; 2880 bp = TAILQ_FIRST(&bv->bv_hd); 2881 KASSERT(bp == NULL || bp->b_bufobj == bo, 2882 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2883 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2884 KASSERT(bp == NULL || bp->b_bufobj == bo, 2885 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2886 bv = &bo->bo_dirty; 2887 bp = TAILQ_FIRST(&bv->bv_hd); 2888 KASSERT(bp == NULL || bp->b_bufobj == bo, 2889 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2890 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2891 KASSERT(bp == NULL || bp->b_bufobj == bo, 2892 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2893 #endif 2894 BO_UNLOCK(bo); 2895 } 2896 2897 static void 2898 v_init_counters(struct vnode *vp) 2899 { 2900 2901 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2902 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2903 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2904 2905 refcount_init(&vp->v_holdcnt, 1); 2906 refcount_init(&vp->v_usecount, 1); 2907 } 2908 2909 /* 2910 * Grab a particular vnode from the free list, increment its 2911 * reference count and lock it. VIRF_DOOMED is set if the vnode 2912 * is being destroyed. Only callers who specify LK_RETRY will 2913 * see doomed vnodes. If inactive processing was delayed in 2914 * vput try to do it here. 2915 * 2916 * usecount is manipulated using atomics without holding any locks. 2917 * 2918 * holdcnt can be manipulated using atomics without holding any locks, 2919 * except when transitioning 1<->0, in which case the interlock is held. 2920 * 2921 * Consumers which don't guarantee liveness of the vnode can use SMR to 2922 * try to get a reference. Note this operation can fail since the vnode 2923 * may be awaiting getting freed by the time they get to it. 2924 */ 2925 enum vgetstate 2926 vget_prep_smr(struct vnode *vp) 2927 { 2928 enum vgetstate vs; 2929 2930 VFS_SMR_ASSERT_ENTERED(); 2931 2932 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2933 vs = VGET_USECOUNT; 2934 } else { 2935 if (vhold_smr(vp)) 2936 vs = VGET_HOLDCNT; 2937 else 2938 vs = VGET_NONE; 2939 } 2940 return (vs); 2941 } 2942 2943 enum vgetstate 2944 vget_prep(struct vnode *vp) 2945 { 2946 enum vgetstate vs; 2947 2948 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2949 vs = VGET_USECOUNT; 2950 } else { 2951 vhold(vp); 2952 vs = VGET_HOLDCNT; 2953 } 2954 return (vs); 2955 } 2956 2957 void 2958 vget_abort(struct vnode *vp, enum vgetstate vs) 2959 { 2960 2961 switch (vs) { 2962 case VGET_USECOUNT: 2963 vrele(vp); 2964 break; 2965 case VGET_HOLDCNT: 2966 vdrop(vp); 2967 break; 2968 default: 2969 __assert_unreachable(); 2970 } 2971 } 2972 2973 int 2974 vget(struct vnode *vp, int flags) 2975 { 2976 enum vgetstate vs; 2977 2978 vs = vget_prep(vp); 2979 return (vget_finish(vp, flags, vs)); 2980 } 2981 2982 int 2983 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 2984 { 2985 int error; 2986 2987 if ((flags & LK_INTERLOCK) != 0) 2988 ASSERT_VI_LOCKED(vp, __func__); 2989 else 2990 ASSERT_VI_UNLOCKED(vp, __func__); 2991 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 2992 VNPASS(vp->v_holdcnt > 0, vp); 2993 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 2994 2995 error = vn_lock(vp, flags); 2996 if (__predict_false(error != 0)) { 2997 vget_abort(vp, vs); 2998 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2999 vp); 3000 return (error); 3001 } 3002 3003 vget_finish_ref(vp, vs); 3004 return (0); 3005 } 3006 3007 void 3008 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 3009 { 3010 int old; 3011 3012 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3013 VNPASS(vp->v_holdcnt > 0, vp); 3014 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3015 3016 if (vs == VGET_USECOUNT) 3017 return; 3018 3019 /* 3020 * We hold the vnode. If the usecount is 0 it will be utilized to keep 3021 * the vnode around. Otherwise someone else lended their hold count and 3022 * we have to drop ours. 3023 */ 3024 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3025 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3026 if (old != 0) { 3027 #ifdef INVARIANTS 3028 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3029 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3030 #else 3031 refcount_release(&vp->v_holdcnt); 3032 #endif 3033 } 3034 } 3035 3036 void 3037 vref(struct vnode *vp) 3038 { 3039 enum vgetstate vs; 3040 3041 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3042 vs = vget_prep(vp); 3043 vget_finish_ref(vp, vs); 3044 } 3045 3046 void 3047 vrefact(struct vnode *vp) 3048 { 3049 3050 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3051 #ifdef INVARIANTS 3052 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 3053 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3054 #else 3055 refcount_acquire(&vp->v_usecount); 3056 #endif 3057 } 3058 3059 void 3060 vlazy(struct vnode *vp) 3061 { 3062 struct mount *mp; 3063 3064 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 3065 3066 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 3067 return; 3068 /* 3069 * We may get here for inactive routines after the vnode got doomed. 3070 */ 3071 if (VN_IS_DOOMED(vp)) 3072 return; 3073 mp = vp->v_mount; 3074 mtx_lock(&mp->mnt_listmtx); 3075 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 3076 vp->v_mflag |= VMP_LAZYLIST; 3077 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3078 mp->mnt_lazyvnodelistsize++; 3079 } 3080 mtx_unlock(&mp->mnt_listmtx); 3081 } 3082 3083 static void 3084 vunlazy(struct vnode *vp) 3085 { 3086 struct mount *mp; 3087 3088 ASSERT_VI_LOCKED(vp, __func__); 3089 VNPASS(!VN_IS_DOOMED(vp), vp); 3090 3091 mp = vp->v_mount; 3092 mtx_lock(&mp->mnt_listmtx); 3093 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3094 /* 3095 * Don't remove the vnode from the lazy list if another thread 3096 * has increased the hold count. It may have re-enqueued the 3097 * vnode to the lazy list and is now responsible for its 3098 * removal. 3099 */ 3100 if (vp->v_holdcnt == 0) { 3101 vp->v_mflag &= ~VMP_LAZYLIST; 3102 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3103 mp->mnt_lazyvnodelistsize--; 3104 } 3105 mtx_unlock(&mp->mnt_listmtx); 3106 } 3107 3108 /* 3109 * This routine is only meant to be called from vgonel prior to dooming 3110 * the vnode. 3111 */ 3112 static void 3113 vunlazy_gone(struct vnode *vp) 3114 { 3115 struct mount *mp; 3116 3117 ASSERT_VOP_ELOCKED(vp, __func__); 3118 ASSERT_VI_LOCKED(vp, __func__); 3119 VNPASS(!VN_IS_DOOMED(vp), vp); 3120 3121 if (vp->v_mflag & VMP_LAZYLIST) { 3122 mp = vp->v_mount; 3123 mtx_lock(&mp->mnt_listmtx); 3124 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3125 vp->v_mflag &= ~VMP_LAZYLIST; 3126 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3127 mp->mnt_lazyvnodelistsize--; 3128 mtx_unlock(&mp->mnt_listmtx); 3129 } 3130 } 3131 3132 static void 3133 vdefer_inactive(struct vnode *vp) 3134 { 3135 3136 ASSERT_VI_LOCKED(vp, __func__); 3137 VNASSERT(vp->v_holdcnt > 0, vp, 3138 ("%s: vnode without hold count", __func__)); 3139 if (VN_IS_DOOMED(vp)) { 3140 vdropl(vp); 3141 return; 3142 } 3143 if (vp->v_iflag & VI_DEFINACT) { 3144 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3145 vdropl(vp); 3146 return; 3147 } 3148 if (vp->v_usecount > 0) { 3149 vp->v_iflag &= ~VI_OWEINACT; 3150 vdropl(vp); 3151 return; 3152 } 3153 vlazy(vp); 3154 vp->v_iflag |= VI_DEFINACT; 3155 VI_UNLOCK(vp); 3156 counter_u64_add(deferred_inact, 1); 3157 } 3158 3159 static void 3160 vdefer_inactive_unlocked(struct vnode *vp) 3161 { 3162 3163 VI_LOCK(vp); 3164 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3165 vdropl(vp); 3166 return; 3167 } 3168 vdefer_inactive(vp); 3169 } 3170 3171 enum vput_op { VRELE, VPUT, VUNREF }; 3172 3173 /* 3174 * Handle ->v_usecount transitioning to 0. 3175 * 3176 * By releasing the last usecount we take ownership of the hold count which 3177 * provides liveness of the vnode, meaning we have to vdrop. 3178 * 3179 * For all vnodes we may need to perform inactive processing. It requires an 3180 * exclusive lock on the vnode, while it is legal to call here with only a 3181 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3182 * inactive processing gets deferred to the syncer. 3183 * 3184 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3185 * on the lock being held all the way until VOP_INACTIVE. This in particular 3186 * happens with UFS which adds half-constructed vnodes to the hash, where they 3187 * can be found by other code. 3188 */ 3189 static void 3190 vput_final(struct vnode *vp, enum vput_op func) 3191 { 3192 int error; 3193 bool want_unlock; 3194 3195 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3196 VNPASS(vp->v_holdcnt > 0, vp); 3197 3198 VI_LOCK(vp); 3199 3200 /* 3201 * By the time we got here someone else might have transitioned 3202 * the count back to > 0. 3203 */ 3204 if (vp->v_usecount > 0) 3205 goto out; 3206 3207 /* 3208 * If the vnode is doomed vgone already performed inactive processing 3209 * (if needed). 3210 */ 3211 if (VN_IS_DOOMED(vp)) 3212 goto out; 3213 3214 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3215 goto out; 3216 3217 if (vp->v_iflag & VI_DOINGINACT) 3218 goto out; 3219 3220 /* 3221 * Locking operations here will drop the interlock and possibly the 3222 * vnode lock, opening a window where the vnode can get doomed all the 3223 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3224 * perform inactive. 3225 */ 3226 vp->v_iflag |= VI_OWEINACT; 3227 want_unlock = false; 3228 error = 0; 3229 switch (func) { 3230 case VRELE: 3231 switch (VOP_ISLOCKED(vp)) { 3232 case LK_EXCLUSIVE: 3233 break; 3234 case LK_EXCLOTHER: 3235 case 0: 3236 want_unlock = true; 3237 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3238 VI_LOCK(vp); 3239 break; 3240 default: 3241 /* 3242 * The lock has at least one sharer, but we have no way 3243 * to conclude whether this is us. Play it safe and 3244 * defer processing. 3245 */ 3246 error = EAGAIN; 3247 break; 3248 } 3249 break; 3250 case VPUT: 3251 want_unlock = true; 3252 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3253 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3254 LK_NOWAIT); 3255 VI_LOCK(vp); 3256 } 3257 break; 3258 case VUNREF: 3259 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3260 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3261 VI_LOCK(vp); 3262 } 3263 break; 3264 } 3265 if (error == 0) { 3266 if (func == VUNREF) { 3267 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp, 3268 ("recursive vunref")); 3269 vp->v_vflag |= VV_UNREF; 3270 } 3271 for (;;) { 3272 error = vinactive(vp); 3273 if (want_unlock) 3274 VOP_UNLOCK(vp); 3275 if (error != ERELOOKUP || !want_unlock) 3276 break; 3277 VOP_LOCK(vp, LK_EXCLUSIVE); 3278 } 3279 if (func == VUNREF) 3280 vp->v_vflag &= ~VV_UNREF; 3281 vdropl(vp); 3282 } else { 3283 vdefer_inactive(vp); 3284 } 3285 return; 3286 out: 3287 if (func == VPUT) 3288 VOP_UNLOCK(vp); 3289 vdropl(vp); 3290 } 3291 3292 /* 3293 * Decrement ->v_usecount for a vnode. 3294 * 3295 * Releasing the last use count requires additional processing, see vput_final 3296 * above for details. 3297 * 3298 * Comment above each variant denotes lock state on entry and exit. 3299 */ 3300 3301 /* 3302 * in: any 3303 * out: same as passed in 3304 */ 3305 void 3306 vrele(struct vnode *vp) 3307 { 3308 3309 ASSERT_VI_UNLOCKED(vp, __func__); 3310 if (!refcount_release(&vp->v_usecount)) 3311 return; 3312 vput_final(vp, VRELE); 3313 } 3314 3315 /* 3316 * in: locked 3317 * out: unlocked 3318 */ 3319 void 3320 vput(struct vnode *vp) 3321 { 3322 3323 ASSERT_VOP_LOCKED(vp, __func__); 3324 ASSERT_VI_UNLOCKED(vp, __func__); 3325 if (!refcount_release(&vp->v_usecount)) { 3326 VOP_UNLOCK(vp); 3327 return; 3328 } 3329 vput_final(vp, VPUT); 3330 } 3331 3332 /* 3333 * in: locked 3334 * out: locked 3335 */ 3336 void 3337 vunref(struct vnode *vp) 3338 { 3339 3340 ASSERT_VOP_LOCKED(vp, __func__); 3341 ASSERT_VI_UNLOCKED(vp, __func__); 3342 if (!refcount_release(&vp->v_usecount)) 3343 return; 3344 vput_final(vp, VUNREF); 3345 } 3346 3347 void 3348 vhold(struct vnode *vp) 3349 { 3350 int old; 3351 3352 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3353 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3354 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3355 ("%s: wrong hold count %d", __func__, old)); 3356 if (old == 0) 3357 vfs_freevnodes_dec(); 3358 } 3359 3360 void 3361 vholdnz(struct vnode *vp) 3362 { 3363 3364 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3365 #ifdef INVARIANTS 3366 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3367 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3368 ("%s: wrong hold count %d", __func__, old)); 3369 #else 3370 atomic_add_int(&vp->v_holdcnt, 1); 3371 #endif 3372 } 3373 3374 /* 3375 * Grab a hold count unless the vnode is freed. 3376 * 3377 * Only use this routine if vfs smr is the only protection you have against 3378 * freeing the vnode. 3379 * 3380 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3381 * is not set. After the flag is set the vnode becomes immutable to anyone but 3382 * the thread which managed to set the flag. 3383 * 3384 * It may be tempting to replace the loop with: 3385 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3386 * if (count & VHOLD_NO_SMR) { 3387 * backpedal and error out; 3388 * } 3389 * 3390 * However, while this is more performant, it hinders debugging by eliminating 3391 * the previously mentioned invariant. 3392 */ 3393 bool 3394 vhold_smr(struct vnode *vp) 3395 { 3396 int count; 3397 3398 VFS_SMR_ASSERT_ENTERED(); 3399 3400 count = atomic_load_int(&vp->v_holdcnt); 3401 for (;;) { 3402 if (count & VHOLD_NO_SMR) { 3403 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3404 ("non-zero hold count with flags %d\n", count)); 3405 return (false); 3406 } 3407 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3408 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3409 if (count == 0) 3410 vfs_freevnodes_dec(); 3411 return (true); 3412 } 3413 } 3414 } 3415 3416 /* 3417 * Hold a free vnode for recycling. 3418 * 3419 * Note: vnode_init references this comment. 3420 * 3421 * Attempts to recycle only need the global vnode list lock and have no use for 3422 * SMR. 3423 * 3424 * However, vnodes get inserted into the global list before they get fully 3425 * initialized and stay there until UMA decides to free the memory. This in 3426 * particular means the target can be found before it becomes usable and after 3427 * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to 3428 * VHOLD_NO_SMR. 3429 * 3430 * Note: the vnode may gain more references after we transition the count 0->1. 3431 */ 3432 static bool 3433 vhold_recycle_free(struct vnode *vp) 3434 { 3435 int count; 3436 3437 mtx_assert(&vnode_list_mtx, MA_OWNED); 3438 3439 count = atomic_load_int(&vp->v_holdcnt); 3440 for (;;) { 3441 if (count & VHOLD_NO_SMR) { 3442 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3443 ("non-zero hold count with flags %d\n", count)); 3444 return (false); 3445 } 3446 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3447 if (count > 0) { 3448 return (false); 3449 } 3450 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3451 vfs_freevnodes_dec(); 3452 return (true); 3453 } 3454 } 3455 } 3456 3457 static void __noinline 3458 vdbatch_process(struct vdbatch *vd) 3459 { 3460 struct vnode *vp; 3461 int i; 3462 3463 mtx_assert(&vd->lock, MA_OWNED); 3464 MPASS(curthread->td_pinned > 0); 3465 MPASS(vd->index == VDBATCH_SIZE); 3466 3467 mtx_lock(&vnode_list_mtx); 3468 critical_enter(); 3469 freevnodes += vd->freevnodes; 3470 for (i = 0; i < VDBATCH_SIZE; i++) { 3471 vp = vd->tab[i]; 3472 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3473 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3474 MPASS(vp->v_dbatchcpu != NOCPU); 3475 vp->v_dbatchcpu = NOCPU; 3476 } 3477 mtx_unlock(&vnode_list_mtx); 3478 vd->freevnodes = 0; 3479 bzero(vd->tab, sizeof(vd->tab)); 3480 vd->index = 0; 3481 critical_exit(); 3482 } 3483 3484 static void 3485 vdbatch_enqueue(struct vnode *vp) 3486 { 3487 struct vdbatch *vd; 3488 3489 ASSERT_VI_LOCKED(vp, __func__); 3490 VNASSERT(!VN_IS_DOOMED(vp), vp, 3491 ("%s: deferring requeue of a doomed vnode", __func__)); 3492 3493 if (vp->v_dbatchcpu != NOCPU) { 3494 VI_UNLOCK(vp); 3495 return; 3496 } 3497 3498 sched_pin(); 3499 vd = DPCPU_PTR(vd); 3500 mtx_lock(&vd->lock); 3501 MPASS(vd->index < VDBATCH_SIZE); 3502 MPASS(vd->tab[vd->index] == NULL); 3503 /* 3504 * A hack: we depend on being pinned so that we know what to put in 3505 * ->v_dbatchcpu. 3506 */ 3507 vp->v_dbatchcpu = curcpu; 3508 vd->tab[vd->index] = vp; 3509 vd->index++; 3510 VI_UNLOCK(vp); 3511 if (vd->index == VDBATCH_SIZE) 3512 vdbatch_process(vd); 3513 mtx_unlock(&vd->lock); 3514 sched_unpin(); 3515 } 3516 3517 /* 3518 * This routine must only be called for vnodes which are about to be 3519 * deallocated. Supporting dequeue for arbitrary vndoes would require 3520 * validating that the locked batch matches. 3521 */ 3522 static void 3523 vdbatch_dequeue(struct vnode *vp) 3524 { 3525 struct vdbatch *vd; 3526 int i; 3527 short cpu; 3528 3529 VNASSERT(vp->v_type == VBAD || vp->v_type == VNON, vp, 3530 ("%s: called for a used vnode\n", __func__)); 3531 3532 cpu = vp->v_dbatchcpu; 3533 if (cpu == NOCPU) 3534 return; 3535 3536 vd = DPCPU_ID_PTR(cpu, vd); 3537 mtx_lock(&vd->lock); 3538 for (i = 0; i < vd->index; i++) { 3539 if (vd->tab[i] != vp) 3540 continue; 3541 vp->v_dbatchcpu = NOCPU; 3542 vd->index--; 3543 vd->tab[i] = vd->tab[vd->index]; 3544 vd->tab[vd->index] = NULL; 3545 break; 3546 } 3547 mtx_unlock(&vd->lock); 3548 /* 3549 * Either we dequeued the vnode above or the target CPU beat us to it. 3550 */ 3551 MPASS(vp->v_dbatchcpu == NOCPU); 3552 } 3553 3554 /* 3555 * Drop the hold count of the vnode. If this is the last reference to 3556 * the vnode we place it on the free list unless it has been vgone'd 3557 * (marked VIRF_DOOMED) in which case we will free it. 3558 * 3559 * Because the vnode vm object keeps a hold reference on the vnode if 3560 * there is at least one resident non-cached page, the vnode cannot 3561 * leave the active list without the page cleanup done. 3562 */ 3563 static void __noinline 3564 vdropl_final(struct vnode *vp) 3565 { 3566 3567 ASSERT_VI_LOCKED(vp, __func__); 3568 VNPASS(VN_IS_DOOMED(vp), vp); 3569 /* 3570 * Set the VHOLD_NO_SMR flag. 3571 * 3572 * We may be racing against vhold_smr. If they win we can just pretend 3573 * we never got this far, they will vdrop later. 3574 */ 3575 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { 3576 vfs_freevnodes_inc(); 3577 VI_UNLOCK(vp); 3578 /* 3579 * We lost the aforementioned race. Any subsequent access is 3580 * invalid as they might have managed to vdropl on their own. 3581 */ 3582 return; 3583 } 3584 /* 3585 * Don't bump freevnodes as this one is going away. 3586 */ 3587 freevnode(vp); 3588 } 3589 3590 void 3591 vdrop(struct vnode *vp) 3592 { 3593 3594 ASSERT_VI_UNLOCKED(vp, __func__); 3595 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3596 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3597 return; 3598 VI_LOCK(vp); 3599 vdropl(vp); 3600 } 3601 3602 void 3603 vdropl(struct vnode *vp) 3604 { 3605 3606 ASSERT_VI_LOCKED(vp, __func__); 3607 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3608 if (!refcount_release(&vp->v_holdcnt)) { 3609 VI_UNLOCK(vp); 3610 return; 3611 } 3612 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp); 3613 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 3614 if (VN_IS_DOOMED(vp)) { 3615 vdropl_final(vp); 3616 return; 3617 } 3618 3619 vfs_freevnodes_inc(); 3620 if (vp->v_mflag & VMP_LAZYLIST) { 3621 vunlazy(vp); 3622 } 3623 /* 3624 * Also unlocks the interlock. We can't assert on it as we 3625 * released our hold and by now the vnode might have been 3626 * freed. 3627 */ 3628 vdbatch_enqueue(vp); 3629 } 3630 3631 /* 3632 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3633 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3634 */ 3635 static int 3636 vinactivef(struct vnode *vp) 3637 { 3638 struct vm_object *obj; 3639 int error; 3640 3641 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3642 ASSERT_VI_LOCKED(vp, "vinactive"); 3643 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3644 ("vinactive: recursed on VI_DOINGINACT")); 3645 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3646 vp->v_iflag |= VI_DOINGINACT; 3647 vp->v_iflag &= ~VI_OWEINACT; 3648 VI_UNLOCK(vp); 3649 /* 3650 * Before moving off the active list, we must be sure that any 3651 * modified pages are converted into the vnode's dirty 3652 * buffers, since these will no longer be checked once the 3653 * vnode is on the inactive list. 3654 * 3655 * The write-out of the dirty pages is asynchronous. At the 3656 * point that VOP_INACTIVE() is called, there could still be 3657 * pending I/O and dirty pages in the object. 3658 */ 3659 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3660 vm_object_mightbedirty(obj)) { 3661 VM_OBJECT_WLOCK(obj); 3662 vm_object_page_clean(obj, 0, 0, 0); 3663 VM_OBJECT_WUNLOCK(obj); 3664 } 3665 error = VOP_INACTIVE(vp); 3666 VI_LOCK(vp); 3667 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3668 ("vinactive: lost VI_DOINGINACT")); 3669 vp->v_iflag &= ~VI_DOINGINACT; 3670 return (error); 3671 } 3672 3673 int 3674 vinactive(struct vnode *vp) 3675 { 3676 3677 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3678 ASSERT_VI_LOCKED(vp, "vinactive"); 3679 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3680 3681 if ((vp->v_iflag & VI_OWEINACT) == 0) 3682 return (0); 3683 if (vp->v_iflag & VI_DOINGINACT) 3684 return (0); 3685 if (vp->v_usecount > 0) { 3686 vp->v_iflag &= ~VI_OWEINACT; 3687 return (0); 3688 } 3689 return (vinactivef(vp)); 3690 } 3691 3692 /* 3693 * Remove any vnodes in the vnode table belonging to mount point mp. 3694 * 3695 * If FORCECLOSE is not specified, there should not be any active ones, 3696 * return error if any are found (nb: this is a user error, not a 3697 * system error). If FORCECLOSE is specified, detach any active vnodes 3698 * that are found. 3699 * 3700 * If WRITECLOSE is set, only flush out regular file vnodes open for 3701 * writing. 3702 * 3703 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3704 * 3705 * `rootrefs' specifies the base reference count for the root vnode 3706 * of this filesystem. The root vnode is considered busy if its 3707 * v_usecount exceeds this value. On a successful return, vflush(, td) 3708 * will call vrele() on the root vnode exactly rootrefs times. 3709 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3710 * be zero. 3711 */ 3712 #ifdef DIAGNOSTIC 3713 static int busyprt = 0; /* print out busy vnodes */ 3714 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3715 #endif 3716 3717 int 3718 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3719 { 3720 struct vnode *vp, *mvp, *rootvp = NULL; 3721 struct vattr vattr; 3722 int busy = 0, error; 3723 3724 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3725 rootrefs, flags); 3726 if (rootrefs > 0) { 3727 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3728 ("vflush: bad args")); 3729 /* 3730 * Get the filesystem root vnode. We can vput() it 3731 * immediately, since with rootrefs > 0, it won't go away. 3732 */ 3733 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3734 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3735 __func__, error); 3736 return (error); 3737 } 3738 vput(rootvp); 3739 } 3740 loop: 3741 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3742 vholdl(vp); 3743 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3744 if (error) { 3745 vdrop(vp); 3746 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3747 goto loop; 3748 } 3749 /* 3750 * Skip over a vnodes marked VV_SYSTEM. 3751 */ 3752 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3753 VOP_UNLOCK(vp); 3754 vdrop(vp); 3755 continue; 3756 } 3757 /* 3758 * If WRITECLOSE is set, flush out unlinked but still open 3759 * files (even if open only for reading) and regular file 3760 * vnodes open for writing. 3761 */ 3762 if (flags & WRITECLOSE) { 3763 if (vp->v_object != NULL) { 3764 VM_OBJECT_WLOCK(vp->v_object); 3765 vm_object_page_clean(vp->v_object, 0, 0, 0); 3766 VM_OBJECT_WUNLOCK(vp->v_object); 3767 } 3768 do { 3769 error = VOP_FSYNC(vp, MNT_WAIT, td); 3770 } while (error == ERELOOKUP); 3771 if (error != 0) { 3772 VOP_UNLOCK(vp); 3773 vdrop(vp); 3774 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3775 return (error); 3776 } 3777 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3778 VI_LOCK(vp); 3779 3780 if ((vp->v_type == VNON || 3781 (error == 0 && vattr.va_nlink > 0)) && 3782 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3783 VOP_UNLOCK(vp); 3784 vdropl(vp); 3785 continue; 3786 } 3787 } else 3788 VI_LOCK(vp); 3789 /* 3790 * With v_usecount == 0, all we need to do is clear out the 3791 * vnode data structures and we are done. 3792 * 3793 * If FORCECLOSE is set, forcibly close the vnode. 3794 */ 3795 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3796 vgonel(vp); 3797 } else { 3798 busy++; 3799 #ifdef DIAGNOSTIC 3800 if (busyprt) 3801 vn_printf(vp, "vflush: busy vnode "); 3802 #endif 3803 } 3804 VOP_UNLOCK(vp); 3805 vdropl(vp); 3806 } 3807 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3808 /* 3809 * If just the root vnode is busy, and if its refcount 3810 * is equal to `rootrefs', then go ahead and kill it. 3811 */ 3812 VI_LOCK(rootvp); 3813 KASSERT(busy > 0, ("vflush: not busy")); 3814 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3815 ("vflush: usecount %d < rootrefs %d", 3816 rootvp->v_usecount, rootrefs)); 3817 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3818 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3819 vgone(rootvp); 3820 VOP_UNLOCK(rootvp); 3821 busy = 0; 3822 } else 3823 VI_UNLOCK(rootvp); 3824 } 3825 if (busy) { 3826 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3827 busy); 3828 return (EBUSY); 3829 } 3830 for (; rootrefs > 0; rootrefs--) 3831 vrele(rootvp); 3832 return (0); 3833 } 3834 3835 /* 3836 * Recycle an unused vnode to the front of the free list. 3837 */ 3838 int 3839 vrecycle(struct vnode *vp) 3840 { 3841 int recycled; 3842 3843 VI_LOCK(vp); 3844 recycled = vrecyclel(vp); 3845 VI_UNLOCK(vp); 3846 return (recycled); 3847 } 3848 3849 /* 3850 * vrecycle, with the vp interlock held. 3851 */ 3852 int 3853 vrecyclel(struct vnode *vp) 3854 { 3855 int recycled; 3856 3857 ASSERT_VOP_ELOCKED(vp, __func__); 3858 ASSERT_VI_LOCKED(vp, __func__); 3859 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3860 recycled = 0; 3861 if (vp->v_usecount == 0) { 3862 recycled = 1; 3863 vgonel(vp); 3864 } 3865 return (recycled); 3866 } 3867 3868 /* 3869 * Eliminate all activity associated with a vnode 3870 * in preparation for reuse. 3871 */ 3872 void 3873 vgone(struct vnode *vp) 3874 { 3875 VI_LOCK(vp); 3876 vgonel(vp); 3877 VI_UNLOCK(vp); 3878 } 3879 3880 /* 3881 * Notify upper mounts about reclaimed or unlinked vnode. 3882 */ 3883 void 3884 vfs_notify_upper(struct vnode *vp, int event) 3885 { 3886 struct mount *mp; 3887 struct mount_upper_node *ump; 3888 3889 mp = atomic_load_ptr(&vp->v_mount); 3890 if (mp == NULL) 3891 return; 3892 if (TAILQ_EMPTY(&mp->mnt_notify)) 3893 return; 3894 3895 MNT_ILOCK(mp); 3896 mp->mnt_upper_pending++; 3897 KASSERT(mp->mnt_upper_pending > 0, 3898 ("%s: mnt_upper_pending %d", __func__, mp->mnt_upper_pending)); 3899 TAILQ_FOREACH(ump, &mp->mnt_notify, mnt_upper_link) { 3900 MNT_IUNLOCK(mp); 3901 switch (event) { 3902 case VFS_NOTIFY_UPPER_RECLAIM: 3903 VFS_RECLAIM_LOWERVP(ump->mp, vp); 3904 break; 3905 case VFS_NOTIFY_UPPER_UNLINK: 3906 VFS_UNLINK_LOWERVP(ump->mp, vp); 3907 break; 3908 default: 3909 KASSERT(0, ("invalid event %d", event)); 3910 break; 3911 } 3912 MNT_ILOCK(mp); 3913 } 3914 mp->mnt_upper_pending--; 3915 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && 3916 mp->mnt_upper_pending == 0) { 3917 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; 3918 wakeup(&mp->mnt_uppers); 3919 } 3920 MNT_IUNLOCK(mp); 3921 } 3922 3923 /* 3924 * vgone, with the vp interlock held. 3925 */ 3926 static void 3927 vgonel(struct vnode *vp) 3928 { 3929 struct thread *td; 3930 struct mount *mp; 3931 vm_object_t object; 3932 bool active, doinginact, oweinact; 3933 3934 ASSERT_VOP_ELOCKED(vp, "vgonel"); 3935 ASSERT_VI_LOCKED(vp, "vgonel"); 3936 VNASSERT(vp->v_holdcnt, vp, 3937 ("vgonel: vp %p has no reference.", vp)); 3938 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3939 td = curthread; 3940 3941 /* 3942 * Don't vgonel if we're already doomed. 3943 */ 3944 if (VN_IS_DOOMED(vp)) 3945 return; 3946 /* 3947 * Paired with freevnode. 3948 */ 3949 vn_seqc_write_begin_locked(vp); 3950 vunlazy_gone(vp); 3951 vn_irflag_set_locked(vp, VIRF_DOOMED); 3952 3953 /* 3954 * Check to see if the vnode is in use. If so, we have to 3955 * call VOP_CLOSE() and VOP_INACTIVE(). 3956 * 3957 * It could be that VOP_INACTIVE() requested reclamation, in 3958 * which case we should avoid recursion, so check 3959 * VI_DOINGINACT. This is not precise but good enough. 3960 */ 3961 active = vp->v_usecount > 0; 3962 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 3963 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; 3964 3965 /* 3966 * If we need to do inactive VI_OWEINACT will be set. 3967 */ 3968 if (vp->v_iflag & VI_DEFINACT) { 3969 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3970 vp->v_iflag &= ~VI_DEFINACT; 3971 vdropl(vp); 3972 } else { 3973 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 3974 VI_UNLOCK(vp); 3975 } 3976 cache_purge_vgone(vp); 3977 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 3978 3979 /* 3980 * If purging an active vnode, it must be closed and 3981 * deactivated before being reclaimed. 3982 */ 3983 if (active) 3984 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 3985 if (!doinginact) { 3986 do { 3987 if (oweinact || active) { 3988 VI_LOCK(vp); 3989 vinactivef(vp); 3990 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 3991 VI_UNLOCK(vp); 3992 } 3993 } while (oweinact); 3994 } 3995 if (vp->v_type == VSOCK) 3996 vfs_unp_reclaim(vp); 3997 3998 /* 3999 * Clean out any buffers associated with the vnode. 4000 * If the flush fails, just toss the buffers. 4001 */ 4002 mp = NULL; 4003 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 4004 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 4005 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 4006 while (vinvalbuf(vp, 0, 0, 0) != 0) 4007 ; 4008 } 4009 4010 BO_LOCK(&vp->v_bufobj); 4011 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 4012 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 4013 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 4014 vp->v_bufobj.bo_clean.bv_cnt == 0, 4015 ("vp %p bufobj not invalidated", vp)); 4016 4017 /* 4018 * For VMIO bufobj, BO_DEAD is set later, or in 4019 * vm_object_terminate() after the object's page queue is 4020 * flushed. 4021 */ 4022 object = vp->v_bufobj.bo_object; 4023 if (object == NULL) 4024 vp->v_bufobj.bo_flag |= BO_DEAD; 4025 BO_UNLOCK(&vp->v_bufobj); 4026 4027 /* 4028 * Handle the VM part. Tmpfs handles v_object on its own (the 4029 * OBJT_VNODE check). Nullfs or other bypassing filesystems 4030 * should not touch the object borrowed from the lower vnode 4031 * (the handle check). 4032 */ 4033 if (object != NULL && object->type == OBJT_VNODE && 4034 object->handle == vp) 4035 vnode_destroy_vobject(vp); 4036 4037 /* 4038 * Reclaim the vnode. 4039 */ 4040 if (VOP_RECLAIM(vp)) 4041 panic("vgone: cannot reclaim"); 4042 if (mp != NULL) 4043 vn_finished_secondary_write(mp); 4044 VNASSERT(vp->v_object == NULL, vp, 4045 ("vop_reclaim left v_object vp=%p", vp)); 4046 /* 4047 * Clear the advisory locks and wake up waiting threads. 4048 */ 4049 (void)VOP_ADVLOCKPURGE(vp); 4050 vp->v_lockf = NULL; 4051 /* 4052 * Delete from old mount point vnode list. 4053 */ 4054 delmntque(vp); 4055 /* 4056 * Done with purge, reset to the standard lock and invalidate 4057 * the vnode. 4058 */ 4059 VI_LOCK(vp); 4060 vp->v_vnlock = &vp->v_lock; 4061 vp->v_op = &dead_vnodeops; 4062 vp->v_type = VBAD; 4063 } 4064 4065 /* 4066 * Print out a description of a vnode. 4067 */ 4068 static const char * const typename[] = 4069 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 4070 "VMARKER"}; 4071 4072 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 4073 "new hold count flag not added to vn_printf"); 4074 4075 void 4076 vn_printf(struct vnode *vp, const char *fmt, ...) 4077 { 4078 va_list ap; 4079 char buf[256], buf2[16]; 4080 u_long flags; 4081 u_int holdcnt; 4082 short irflag; 4083 4084 va_start(ap, fmt); 4085 vprintf(fmt, ap); 4086 va_end(ap); 4087 printf("%p: ", (void *)vp); 4088 printf("type %s\n", typename[vp->v_type]); 4089 holdcnt = atomic_load_int(&vp->v_holdcnt); 4090 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 4091 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 4092 vp->v_seqc_users); 4093 switch (vp->v_type) { 4094 case VDIR: 4095 printf(" mountedhere %p\n", vp->v_mountedhere); 4096 break; 4097 case VCHR: 4098 printf(" rdev %p\n", vp->v_rdev); 4099 break; 4100 case VSOCK: 4101 printf(" socket %p\n", vp->v_unpcb); 4102 break; 4103 case VFIFO: 4104 printf(" fifoinfo %p\n", vp->v_fifoinfo); 4105 break; 4106 default: 4107 printf("\n"); 4108 break; 4109 } 4110 buf[0] = '\0'; 4111 buf[1] = '\0'; 4112 if (holdcnt & VHOLD_NO_SMR) 4113 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 4114 printf(" hold count flags (%s)\n", buf + 1); 4115 4116 buf[0] = '\0'; 4117 buf[1] = '\0'; 4118 irflag = vn_irflag_read(vp); 4119 if (irflag & VIRF_DOOMED) 4120 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 4121 if (irflag & VIRF_PGREAD) 4122 strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); 4123 if (irflag & VIRF_MOUNTPOINT) 4124 strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf)); 4125 flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT); 4126 if (flags != 0) { 4127 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 4128 strlcat(buf, buf2, sizeof(buf)); 4129 } 4130 if (vp->v_vflag & VV_ROOT) 4131 strlcat(buf, "|VV_ROOT", sizeof(buf)); 4132 if (vp->v_vflag & VV_ISTTY) 4133 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4134 if (vp->v_vflag & VV_NOSYNC) 4135 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4136 if (vp->v_vflag & VV_ETERNALDEV) 4137 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4138 if (vp->v_vflag & VV_CACHEDLABEL) 4139 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4140 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4141 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4142 if (vp->v_vflag & VV_COPYONWRITE) 4143 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4144 if (vp->v_vflag & VV_SYSTEM) 4145 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4146 if (vp->v_vflag & VV_PROCDEP) 4147 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4148 if (vp->v_vflag & VV_NOKNOTE) 4149 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 4150 if (vp->v_vflag & VV_DELETED) 4151 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4152 if (vp->v_vflag & VV_MD) 4153 strlcat(buf, "|VV_MD", sizeof(buf)); 4154 if (vp->v_vflag & VV_FORCEINSMQ) 4155 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4156 if (vp->v_vflag & VV_READLINK) 4157 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4158 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4159 VV_CACHEDLABEL | VV_VMSIZEVNLOCK | VV_COPYONWRITE | VV_SYSTEM | 4160 VV_PROCDEP | VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ | 4161 VV_READLINK); 4162 if (flags != 0) { 4163 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4164 strlcat(buf, buf2, sizeof(buf)); 4165 } 4166 if (vp->v_iflag & VI_TEXT_REF) 4167 strlcat(buf, "|VI_TEXT_REF", sizeof(buf)); 4168 if (vp->v_iflag & VI_MOUNT) 4169 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4170 if (vp->v_iflag & VI_DOINGINACT) 4171 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4172 if (vp->v_iflag & VI_OWEINACT) 4173 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4174 if (vp->v_iflag & VI_DEFINACT) 4175 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4176 if (vp->v_iflag & VI_FOPENING) 4177 strlcat(buf, "|VI_FOPENING", sizeof(buf)); 4178 flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_DOINGINACT | 4179 VI_OWEINACT | VI_DEFINACT | VI_FOPENING); 4180 if (flags != 0) { 4181 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4182 strlcat(buf, buf2, sizeof(buf)); 4183 } 4184 if (vp->v_mflag & VMP_LAZYLIST) 4185 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4186 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4187 if (flags != 0) { 4188 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4189 strlcat(buf, buf2, sizeof(buf)); 4190 } 4191 printf(" flags (%s)", buf + 1); 4192 if (mtx_owned(VI_MTX(vp))) 4193 printf(" VI_LOCKed"); 4194 printf("\n"); 4195 if (vp->v_object != NULL) 4196 printf(" v_object %p ref %d pages %d " 4197 "cleanbuf %d dirtybuf %d\n", 4198 vp->v_object, vp->v_object->ref_count, 4199 vp->v_object->resident_page_count, 4200 vp->v_bufobj.bo_clean.bv_cnt, 4201 vp->v_bufobj.bo_dirty.bv_cnt); 4202 printf(" "); 4203 lockmgr_printinfo(vp->v_vnlock); 4204 if (vp->v_data != NULL) 4205 VOP_PRINT(vp); 4206 } 4207 4208 #ifdef DDB 4209 /* 4210 * List all of the locked vnodes in the system. 4211 * Called when debugging the kernel. 4212 */ 4213 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 4214 { 4215 struct mount *mp; 4216 struct vnode *vp; 4217 4218 /* 4219 * Note: because this is DDB, we can't obey the locking semantics 4220 * for these structures, which means we could catch an inconsistent 4221 * state and dereference a nasty pointer. Not much to be done 4222 * about that. 4223 */ 4224 db_printf("Locked vnodes\n"); 4225 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4226 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4227 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4228 vn_printf(vp, "vnode "); 4229 } 4230 } 4231 } 4232 4233 /* 4234 * Show details about the given vnode. 4235 */ 4236 DB_SHOW_COMMAND(vnode, db_show_vnode) 4237 { 4238 struct vnode *vp; 4239 4240 if (!have_addr) 4241 return; 4242 vp = (struct vnode *)addr; 4243 vn_printf(vp, "vnode "); 4244 } 4245 4246 /* 4247 * Show details about the given mount point. 4248 */ 4249 DB_SHOW_COMMAND(mount, db_show_mount) 4250 { 4251 struct mount *mp; 4252 struct vfsopt *opt; 4253 struct statfs *sp; 4254 struct vnode *vp; 4255 char buf[512]; 4256 uint64_t mflags; 4257 u_int flags; 4258 4259 if (!have_addr) { 4260 /* No address given, print short info about all mount points. */ 4261 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4262 db_printf("%p %s on %s (%s)\n", mp, 4263 mp->mnt_stat.f_mntfromname, 4264 mp->mnt_stat.f_mntonname, 4265 mp->mnt_stat.f_fstypename); 4266 if (db_pager_quit) 4267 break; 4268 } 4269 db_printf("\nMore info: show mount <addr>\n"); 4270 return; 4271 } 4272 4273 mp = (struct mount *)addr; 4274 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4275 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4276 4277 buf[0] = '\0'; 4278 mflags = mp->mnt_flag; 4279 #define MNT_FLAG(flag) do { \ 4280 if (mflags & (flag)) { \ 4281 if (buf[0] != '\0') \ 4282 strlcat(buf, ", ", sizeof(buf)); \ 4283 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4284 mflags &= ~(flag); \ 4285 } \ 4286 } while (0) 4287 MNT_FLAG(MNT_RDONLY); 4288 MNT_FLAG(MNT_SYNCHRONOUS); 4289 MNT_FLAG(MNT_NOEXEC); 4290 MNT_FLAG(MNT_NOSUID); 4291 MNT_FLAG(MNT_NFS4ACLS); 4292 MNT_FLAG(MNT_UNION); 4293 MNT_FLAG(MNT_ASYNC); 4294 MNT_FLAG(MNT_SUIDDIR); 4295 MNT_FLAG(MNT_SOFTDEP); 4296 MNT_FLAG(MNT_NOSYMFOLLOW); 4297 MNT_FLAG(MNT_GJOURNAL); 4298 MNT_FLAG(MNT_MULTILABEL); 4299 MNT_FLAG(MNT_ACLS); 4300 MNT_FLAG(MNT_NOATIME); 4301 MNT_FLAG(MNT_NOCLUSTERR); 4302 MNT_FLAG(MNT_NOCLUSTERW); 4303 MNT_FLAG(MNT_SUJ); 4304 MNT_FLAG(MNT_EXRDONLY); 4305 MNT_FLAG(MNT_EXPORTED); 4306 MNT_FLAG(MNT_DEFEXPORTED); 4307 MNT_FLAG(MNT_EXPORTANON); 4308 MNT_FLAG(MNT_EXKERB); 4309 MNT_FLAG(MNT_EXPUBLIC); 4310 MNT_FLAG(MNT_LOCAL); 4311 MNT_FLAG(MNT_QUOTA); 4312 MNT_FLAG(MNT_ROOTFS); 4313 MNT_FLAG(MNT_USER); 4314 MNT_FLAG(MNT_IGNORE); 4315 MNT_FLAG(MNT_UPDATE); 4316 MNT_FLAG(MNT_DELEXPORT); 4317 MNT_FLAG(MNT_RELOAD); 4318 MNT_FLAG(MNT_FORCE); 4319 MNT_FLAG(MNT_SNAPSHOT); 4320 MNT_FLAG(MNT_BYFSID); 4321 #undef MNT_FLAG 4322 if (mflags != 0) { 4323 if (buf[0] != '\0') 4324 strlcat(buf, ", ", sizeof(buf)); 4325 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4326 "0x%016jx", mflags); 4327 } 4328 db_printf(" mnt_flag = %s\n", buf); 4329 4330 buf[0] = '\0'; 4331 flags = mp->mnt_kern_flag; 4332 #define MNT_KERN_FLAG(flag) do { \ 4333 if (flags & (flag)) { \ 4334 if (buf[0] != '\0') \ 4335 strlcat(buf, ", ", sizeof(buf)); \ 4336 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4337 flags &= ~(flag); \ 4338 } \ 4339 } while (0) 4340 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4341 MNT_KERN_FLAG(MNTK_ASYNC); 4342 MNT_KERN_FLAG(MNTK_SOFTDEP); 4343 MNT_KERN_FLAG(MNTK_DRAINING); 4344 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4345 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4346 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4347 MNT_KERN_FLAG(MNTK_NO_IOPF); 4348 MNT_KERN_FLAG(MNTK_RECURSE); 4349 MNT_KERN_FLAG(MNTK_UPPER_WAITER); 4350 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 4351 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4352 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4353 MNT_KERN_FLAG(MNTK_TASKQUEUE_WAITER); 4354 MNT_KERN_FLAG(MNTK_NOASYNC); 4355 MNT_KERN_FLAG(MNTK_UNMOUNT); 4356 MNT_KERN_FLAG(MNTK_MWAIT); 4357 MNT_KERN_FLAG(MNTK_SUSPEND); 4358 MNT_KERN_FLAG(MNTK_SUSPEND2); 4359 MNT_KERN_FLAG(MNTK_SUSPENDED); 4360 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4361 MNT_KERN_FLAG(MNTK_NOKNOTE); 4362 #undef MNT_KERN_FLAG 4363 if (flags != 0) { 4364 if (buf[0] != '\0') 4365 strlcat(buf, ", ", sizeof(buf)); 4366 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4367 "0x%08x", flags); 4368 } 4369 db_printf(" mnt_kern_flag = %s\n", buf); 4370 4371 db_printf(" mnt_opt = "); 4372 opt = TAILQ_FIRST(mp->mnt_opt); 4373 if (opt != NULL) { 4374 db_printf("%s", opt->name); 4375 opt = TAILQ_NEXT(opt, link); 4376 while (opt != NULL) { 4377 db_printf(", %s", opt->name); 4378 opt = TAILQ_NEXT(opt, link); 4379 } 4380 } 4381 db_printf("\n"); 4382 4383 sp = &mp->mnt_stat; 4384 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4385 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4386 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4387 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4388 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4389 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4390 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4391 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4392 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4393 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4394 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4395 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4396 4397 db_printf(" mnt_cred = { uid=%u ruid=%u", 4398 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4399 if (jailed(mp->mnt_cred)) 4400 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4401 db_printf(" }\n"); 4402 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4403 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4404 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4405 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4406 db_printf(" mnt_lazyvnodelistsize = %d\n", 4407 mp->mnt_lazyvnodelistsize); 4408 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4409 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4410 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4411 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4412 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4413 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4414 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4415 db_printf(" mnt_secondary_accwrites = %d\n", 4416 mp->mnt_secondary_accwrites); 4417 db_printf(" mnt_gjprovider = %s\n", 4418 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4419 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4420 4421 db_printf("\n\nList of active vnodes\n"); 4422 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4423 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4424 vn_printf(vp, "vnode "); 4425 if (db_pager_quit) 4426 break; 4427 } 4428 } 4429 db_printf("\n\nList of inactive vnodes\n"); 4430 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4431 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4432 vn_printf(vp, "vnode "); 4433 if (db_pager_quit) 4434 break; 4435 } 4436 } 4437 } 4438 #endif /* DDB */ 4439 4440 /* 4441 * Fill in a struct xvfsconf based on a struct vfsconf. 4442 */ 4443 static int 4444 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4445 { 4446 struct xvfsconf xvfsp; 4447 4448 bzero(&xvfsp, sizeof(xvfsp)); 4449 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4450 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4451 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4452 xvfsp.vfc_flags = vfsp->vfc_flags; 4453 /* 4454 * These are unused in userland, we keep them 4455 * to not break binary compatibility. 4456 */ 4457 xvfsp.vfc_vfsops = NULL; 4458 xvfsp.vfc_next = NULL; 4459 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4460 } 4461 4462 #ifdef COMPAT_FREEBSD32 4463 struct xvfsconf32 { 4464 uint32_t vfc_vfsops; 4465 char vfc_name[MFSNAMELEN]; 4466 int32_t vfc_typenum; 4467 int32_t vfc_refcount; 4468 int32_t vfc_flags; 4469 uint32_t vfc_next; 4470 }; 4471 4472 static int 4473 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4474 { 4475 struct xvfsconf32 xvfsp; 4476 4477 bzero(&xvfsp, sizeof(xvfsp)); 4478 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4479 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4480 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4481 xvfsp.vfc_flags = vfsp->vfc_flags; 4482 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4483 } 4484 #endif 4485 4486 /* 4487 * Top level filesystem related information gathering. 4488 */ 4489 static int 4490 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4491 { 4492 struct vfsconf *vfsp; 4493 int error; 4494 4495 error = 0; 4496 vfsconf_slock(); 4497 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4498 #ifdef COMPAT_FREEBSD32 4499 if (req->flags & SCTL_MASK32) 4500 error = vfsconf2x32(req, vfsp); 4501 else 4502 #endif 4503 error = vfsconf2x(req, vfsp); 4504 if (error) 4505 break; 4506 } 4507 vfsconf_sunlock(); 4508 return (error); 4509 } 4510 4511 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4512 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4513 "S,xvfsconf", "List of all configured filesystems"); 4514 4515 #ifndef BURN_BRIDGES 4516 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4517 4518 static int 4519 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4520 { 4521 int *name = (int *)arg1 - 1; /* XXX */ 4522 u_int namelen = arg2 + 1; /* XXX */ 4523 struct vfsconf *vfsp; 4524 4525 log(LOG_WARNING, "userland calling deprecated sysctl, " 4526 "please rebuild world\n"); 4527 4528 #if 1 || defined(COMPAT_PRELITE2) 4529 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4530 if (namelen == 1) 4531 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4532 #endif 4533 4534 switch (name[1]) { 4535 case VFS_MAXTYPENUM: 4536 if (namelen != 2) 4537 return (ENOTDIR); 4538 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4539 case VFS_CONF: 4540 if (namelen != 3) 4541 return (ENOTDIR); /* overloaded */ 4542 vfsconf_slock(); 4543 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4544 if (vfsp->vfc_typenum == name[2]) 4545 break; 4546 } 4547 vfsconf_sunlock(); 4548 if (vfsp == NULL) 4549 return (EOPNOTSUPP); 4550 #ifdef COMPAT_FREEBSD32 4551 if (req->flags & SCTL_MASK32) 4552 return (vfsconf2x32(req, vfsp)); 4553 else 4554 #endif 4555 return (vfsconf2x(req, vfsp)); 4556 } 4557 return (EOPNOTSUPP); 4558 } 4559 4560 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4561 CTLFLAG_MPSAFE, vfs_sysctl, 4562 "Generic filesystem"); 4563 4564 #if 1 || defined(COMPAT_PRELITE2) 4565 4566 static int 4567 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4568 { 4569 int error; 4570 struct vfsconf *vfsp; 4571 struct ovfsconf ovfs; 4572 4573 vfsconf_slock(); 4574 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4575 bzero(&ovfs, sizeof(ovfs)); 4576 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4577 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4578 ovfs.vfc_index = vfsp->vfc_typenum; 4579 ovfs.vfc_refcount = vfsp->vfc_refcount; 4580 ovfs.vfc_flags = vfsp->vfc_flags; 4581 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4582 if (error != 0) { 4583 vfsconf_sunlock(); 4584 return (error); 4585 } 4586 } 4587 vfsconf_sunlock(); 4588 return (0); 4589 } 4590 4591 #endif /* 1 || COMPAT_PRELITE2 */ 4592 #endif /* !BURN_BRIDGES */ 4593 4594 #define KINFO_VNODESLOP 10 4595 #ifdef notyet 4596 /* 4597 * Dump vnode list (via sysctl). 4598 */ 4599 /* ARGSUSED */ 4600 static int 4601 sysctl_vnode(SYSCTL_HANDLER_ARGS) 4602 { 4603 struct xvnode *xvn; 4604 struct mount *mp; 4605 struct vnode *vp; 4606 int error, len, n; 4607 4608 /* 4609 * Stale numvnodes access is not fatal here. 4610 */ 4611 req->lock = 0; 4612 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 4613 if (!req->oldptr) 4614 /* Make an estimate */ 4615 return (SYSCTL_OUT(req, 0, len)); 4616 4617 error = sysctl_wire_old_buffer(req, 0); 4618 if (error != 0) 4619 return (error); 4620 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 4621 n = 0; 4622 mtx_lock(&mountlist_mtx); 4623 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4624 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 4625 continue; 4626 MNT_ILOCK(mp); 4627 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4628 if (n == len) 4629 break; 4630 vref(vp); 4631 xvn[n].xv_size = sizeof *xvn; 4632 xvn[n].xv_vnode = vp; 4633 xvn[n].xv_id = 0; /* XXX compat */ 4634 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 4635 XV_COPY(usecount); 4636 XV_COPY(writecount); 4637 XV_COPY(holdcnt); 4638 XV_COPY(mount); 4639 XV_COPY(numoutput); 4640 XV_COPY(type); 4641 #undef XV_COPY 4642 xvn[n].xv_flag = vp->v_vflag; 4643 4644 switch (vp->v_type) { 4645 case VREG: 4646 case VDIR: 4647 case VLNK: 4648 break; 4649 case VBLK: 4650 case VCHR: 4651 if (vp->v_rdev == NULL) { 4652 vrele(vp); 4653 continue; 4654 } 4655 xvn[n].xv_dev = dev2udev(vp->v_rdev); 4656 break; 4657 case VSOCK: 4658 xvn[n].xv_socket = vp->v_socket; 4659 break; 4660 case VFIFO: 4661 xvn[n].xv_fifo = vp->v_fifoinfo; 4662 break; 4663 case VNON: 4664 case VBAD: 4665 default: 4666 /* shouldn't happen? */ 4667 vrele(vp); 4668 continue; 4669 } 4670 vrele(vp); 4671 ++n; 4672 } 4673 MNT_IUNLOCK(mp); 4674 mtx_lock(&mountlist_mtx); 4675 vfs_unbusy(mp); 4676 if (n == len) 4677 break; 4678 } 4679 mtx_unlock(&mountlist_mtx); 4680 4681 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 4682 free(xvn, M_TEMP); 4683 return (error); 4684 } 4685 4686 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 4687 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 4688 ""); 4689 #endif 4690 4691 static void 4692 unmount_or_warn(struct mount *mp) 4693 { 4694 int error; 4695 4696 error = dounmount(mp, MNT_FORCE, curthread); 4697 if (error != 0) { 4698 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4699 if (error == EBUSY) 4700 printf("BUSY)\n"); 4701 else 4702 printf("%d)\n", error); 4703 } 4704 } 4705 4706 /* 4707 * Unmount all filesystems. The list is traversed in reverse order 4708 * of mounting to avoid dependencies. 4709 */ 4710 void 4711 vfs_unmountall(void) 4712 { 4713 struct mount *mp, *tmp; 4714 4715 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4716 4717 /* 4718 * Since this only runs when rebooting, it is not interlocked. 4719 */ 4720 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4721 vfs_ref(mp); 4722 4723 /* 4724 * Forcibly unmounting "/dev" before "/" would prevent clean 4725 * unmount of the latter. 4726 */ 4727 if (mp == rootdevmp) 4728 continue; 4729 4730 unmount_or_warn(mp); 4731 } 4732 4733 if (rootdevmp != NULL) 4734 unmount_or_warn(rootdevmp); 4735 } 4736 4737 static void 4738 vfs_deferred_inactive(struct vnode *vp, int lkflags) 4739 { 4740 4741 ASSERT_VI_LOCKED(vp, __func__); 4742 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, ("VI_DEFINACT still set")); 4743 if ((vp->v_iflag & VI_OWEINACT) == 0) { 4744 vdropl(vp); 4745 return; 4746 } 4747 if (vn_lock(vp, lkflags) == 0) { 4748 VI_LOCK(vp); 4749 vinactive(vp); 4750 VOP_UNLOCK(vp); 4751 vdropl(vp); 4752 return; 4753 } 4754 vdefer_inactive_unlocked(vp); 4755 } 4756 4757 static int 4758 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 4759 { 4760 4761 return (vp->v_iflag & VI_DEFINACT); 4762 } 4763 4764 static void __noinline 4765 vfs_periodic_inactive(struct mount *mp, int flags) 4766 { 4767 struct vnode *vp, *mvp; 4768 int lkflags; 4769 4770 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4771 if (flags != MNT_WAIT) 4772 lkflags |= LK_NOWAIT; 4773 4774 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 4775 if ((vp->v_iflag & VI_DEFINACT) == 0) { 4776 VI_UNLOCK(vp); 4777 continue; 4778 } 4779 vp->v_iflag &= ~VI_DEFINACT; 4780 vfs_deferred_inactive(vp, lkflags); 4781 } 4782 } 4783 4784 static inline bool 4785 vfs_want_msync(struct vnode *vp) 4786 { 4787 struct vm_object *obj; 4788 4789 /* 4790 * This test may be performed without any locks held. 4791 * We rely on vm_object's type stability. 4792 */ 4793 if (vp->v_vflag & VV_NOSYNC) 4794 return (false); 4795 obj = vp->v_object; 4796 return (obj != NULL && vm_object_mightbedirty(obj)); 4797 } 4798 4799 static int 4800 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 4801 { 4802 4803 if (vp->v_vflag & VV_NOSYNC) 4804 return (false); 4805 if (vp->v_iflag & VI_DEFINACT) 4806 return (true); 4807 return (vfs_want_msync(vp)); 4808 } 4809 4810 static void __noinline 4811 vfs_periodic_msync_inactive(struct mount *mp, int flags) 4812 { 4813 struct vnode *vp, *mvp; 4814 struct vm_object *obj; 4815 int lkflags, objflags; 4816 bool seen_defer; 4817 4818 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4819 if (flags != MNT_WAIT) { 4820 lkflags |= LK_NOWAIT; 4821 objflags = OBJPC_NOSYNC; 4822 } else { 4823 objflags = OBJPC_SYNC; 4824 } 4825 4826 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 4827 seen_defer = false; 4828 if (vp->v_iflag & VI_DEFINACT) { 4829 vp->v_iflag &= ~VI_DEFINACT; 4830 seen_defer = true; 4831 } 4832 if (!vfs_want_msync(vp)) { 4833 if (seen_defer) 4834 vfs_deferred_inactive(vp, lkflags); 4835 else 4836 VI_UNLOCK(vp); 4837 continue; 4838 } 4839 if (vget(vp, lkflags) == 0) { 4840 obj = vp->v_object; 4841 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 4842 VM_OBJECT_WLOCK(obj); 4843 vm_object_page_clean(obj, 0, 0, objflags); 4844 VM_OBJECT_WUNLOCK(obj); 4845 } 4846 vput(vp); 4847 if (seen_defer) 4848 vdrop(vp); 4849 } else { 4850 if (seen_defer) 4851 vdefer_inactive_unlocked(vp); 4852 } 4853 } 4854 } 4855 4856 void 4857 vfs_periodic(struct mount *mp, int flags) 4858 { 4859 4860 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4861 4862 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 4863 vfs_periodic_inactive(mp, flags); 4864 else 4865 vfs_periodic_msync_inactive(mp, flags); 4866 } 4867 4868 static void 4869 destroy_vpollinfo_free(struct vpollinfo *vi) 4870 { 4871 4872 knlist_destroy(&vi->vpi_selinfo.si_note); 4873 mtx_destroy(&vi->vpi_lock); 4874 free(vi, M_VNODEPOLL); 4875 } 4876 4877 static void 4878 destroy_vpollinfo(struct vpollinfo *vi) 4879 { 4880 4881 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4882 seldrain(&vi->vpi_selinfo); 4883 destroy_vpollinfo_free(vi); 4884 } 4885 4886 /* 4887 * Initialize per-vnode helper structure to hold poll-related state. 4888 */ 4889 void 4890 v_addpollinfo(struct vnode *vp) 4891 { 4892 struct vpollinfo *vi; 4893 4894 if (vp->v_pollinfo != NULL) 4895 return; 4896 vi = malloc(sizeof(*vi), M_VNODEPOLL, M_WAITOK | M_ZERO); 4897 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4898 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4899 vfs_knlunlock, vfs_knl_assert_lock); 4900 VI_LOCK(vp); 4901 if (vp->v_pollinfo != NULL) { 4902 VI_UNLOCK(vp); 4903 destroy_vpollinfo_free(vi); 4904 return; 4905 } 4906 vp->v_pollinfo = vi; 4907 VI_UNLOCK(vp); 4908 } 4909 4910 /* 4911 * Record a process's interest in events which might happen to 4912 * a vnode. Because poll uses the historic select-style interface 4913 * internally, this routine serves as both the ``check for any 4914 * pending events'' and the ``record my interest in future events'' 4915 * functions. (These are done together, while the lock is held, 4916 * to avoid race conditions.) 4917 */ 4918 int 4919 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4920 { 4921 4922 v_addpollinfo(vp); 4923 mtx_lock(&vp->v_pollinfo->vpi_lock); 4924 if (vp->v_pollinfo->vpi_revents & events) { 4925 /* 4926 * This leaves events we are not interested 4927 * in available for the other process which 4928 * which presumably had requested them 4929 * (otherwise they would never have been 4930 * recorded). 4931 */ 4932 events &= vp->v_pollinfo->vpi_revents; 4933 vp->v_pollinfo->vpi_revents &= ~events; 4934 4935 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4936 return (events); 4937 } 4938 vp->v_pollinfo->vpi_events |= events; 4939 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 4940 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4941 return (0); 4942 } 4943 4944 /* 4945 * Routine to create and manage a filesystem syncer vnode. 4946 */ 4947 #define sync_close ((int (*)(struct vop_close_args *))nullop) 4948 static int sync_fsync(struct vop_fsync_args *); 4949 static int sync_inactive(struct vop_inactive_args *); 4950 static int sync_reclaim(struct vop_reclaim_args *); 4951 4952 static struct vop_vector sync_vnodeops = { 4953 .vop_bypass = VOP_EOPNOTSUPP, 4954 .vop_close = sync_close, /* close */ 4955 .vop_fsync = sync_fsync, /* fsync */ 4956 .vop_inactive = sync_inactive, /* inactive */ 4957 .vop_need_inactive = vop_stdneed_inactive, /* need_inactive */ 4958 .vop_reclaim = sync_reclaim, /* reclaim */ 4959 .vop_lock1 = vop_stdlock, /* lock */ 4960 .vop_unlock = vop_stdunlock, /* unlock */ 4961 .vop_islocked = vop_stdislocked, /* islocked */ 4962 }; 4963 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 4964 4965 /* 4966 * Create a new filesystem syncer vnode for the specified mount point. 4967 */ 4968 void 4969 vfs_allocate_syncvnode(struct mount *mp) 4970 { 4971 struct vnode *vp; 4972 struct bufobj *bo; 4973 static long start, incr, next; 4974 int error; 4975 4976 /* Allocate a new vnode */ 4977 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 4978 if (error != 0) 4979 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 4980 vp->v_type = VNON; 4981 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4982 vp->v_vflag |= VV_FORCEINSMQ; 4983 error = insmntque(vp, mp); 4984 if (error != 0) 4985 panic("vfs_allocate_syncvnode: insmntque() failed"); 4986 vp->v_vflag &= ~VV_FORCEINSMQ; 4987 VOP_UNLOCK(vp); 4988 /* 4989 * Place the vnode onto the syncer worklist. We attempt to 4990 * scatter them about on the list so that they will go off 4991 * at evenly distributed times even if all the filesystems 4992 * are mounted at once. 4993 */ 4994 next += incr; 4995 if (next == 0 || next > syncer_maxdelay) { 4996 start /= 2; 4997 incr /= 2; 4998 if (start == 0) { 4999 start = syncer_maxdelay / 2; 5000 incr = syncer_maxdelay; 5001 } 5002 next = start; 5003 } 5004 bo = &vp->v_bufobj; 5005 BO_LOCK(bo); 5006 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 5007 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 5008 mtx_lock(&sync_mtx); 5009 sync_vnode_count++; 5010 if (mp->mnt_syncer == NULL) { 5011 mp->mnt_syncer = vp; 5012 vp = NULL; 5013 } 5014 mtx_unlock(&sync_mtx); 5015 BO_UNLOCK(bo); 5016 if (vp != NULL) { 5017 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5018 vgone(vp); 5019 vput(vp); 5020 } 5021 } 5022 5023 void 5024 vfs_deallocate_syncvnode(struct mount *mp) 5025 { 5026 struct vnode *vp; 5027 5028 mtx_lock(&sync_mtx); 5029 vp = mp->mnt_syncer; 5030 if (vp != NULL) 5031 mp->mnt_syncer = NULL; 5032 mtx_unlock(&sync_mtx); 5033 if (vp != NULL) 5034 vrele(vp); 5035 } 5036 5037 /* 5038 * Do a lazy sync of the filesystem. 5039 */ 5040 static int 5041 sync_fsync(struct vop_fsync_args *ap) 5042 { 5043 struct vnode *syncvp = ap->a_vp; 5044 struct mount *mp = syncvp->v_mount; 5045 int error, save; 5046 struct bufobj *bo; 5047 5048 /* 5049 * We only need to do something if this is a lazy evaluation. 5050 */ 5051 if (ap->a_waitfor != MNT_LAZY) 5052 return (0); 5053 5054 /* 5055 * Move ourselves to the back of the sync list. 5056 */ 5057 bo = &syncvp->v_bufobj; 5058 BO_LOCK(bo); 5059 vn_syncer_add_to_worklist(bo, syncdelay); 5060 BO_UNLOCK(bo); 5061 5062 /* 5063 * Walk the list of vnodes pushing all that are dirty and 5064 * not already on the sync list. 5065 */ 5066 if (vfs_busy(mp, MBF_NOWAIT) != 0) 5067 return (0); 5068 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 5069 vfs_unbusy(mp); 5070 return (0); 5071 } 5072 save = curthread_pflags_set(TDP_SYNCIO); 5073 /* 5074 * The filesystem at hand may be idle with free vnodes stored in the 5075 * batch. Return them instead of letting them stay there indefinitely. 5076 */ 5077 vfs_periodic(mp, MNT_NOWAIT); 5078 error = VFS_SYNC(mp, MNT_LAZY); 5079 curthread_pflags_restore(save); 5080 vn_finished_write(mp); 5081 vfs_unbusy(mp); 5082 return (error); 5083 } 5084 5085 /* 5086 * The syncer vnode is no referenced. 5087 */ 5088 static int 5089 sync_inactive(struct vop_inactive_args *ap) 5090 { 5091 5092 vgone(ap->a_vp); 5093 return (0); 5094 } 5095 5096 /* 5097 * The syncer vnode is no longer needed and is being decommissioned. 5098 * 5099 * Modifications to the worklist must be protected by sync_mtx. 5100 */ 5101 static int 5102 sync_reclaim(struct vop_reclaim_args *ap) 5103 { 5104 struct vnode *vp = ap->a_vp; 5105 struct bufobj *bo; 5106 5107 bo = &vp->v_bufobj; 5108 BO_LOCK(bo); 5109 mtx_lock(&sync_mtx); 5110 if (vp->v_mount->mnt_syncer == vp) 5111 vp->v_mount->mnt_syncer = NULL; 5112 if (bo->bo_flag & BO_ONWORKLST) { 5113 LIST_REMOVE(bo, bo_synclist); 5114 syncer_worklist_len--; 5115 sync_vnode_count--; 5116 bo->bo_flag &= ~BO_ONWORKLST; 5117 } 5118 mtx_unlock(&sync_mtx); 5119 BO_UNLOCK(bo); 5120 5121 return (0); 5122 } 5123 5124 int 5125 vn_need_pageq_flush(struct vnode *vp) 5126 { 5127 struct vm_object *obj; 5128 5129 obj = vp->v_object; 5130 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 5131 vm_object_mightbedirty(obj)); 5132 } 5133 5134 /* 5135 * Check if vnode represents a disk device 5136 */ 5137 bool 5138 vn_isdisk_error(struct vnode *vp, int *errp) 5139 { 5140 int error; 5141 5142 if (vp->v_type != VCHR) { 5143 error = ENOTBLK; 5144 goto out; 5145 } 5146 error = 0; 5147 dev_lock(); 5148 if (vp->v_rdev == NULL) 5149 error = ENXIO; 5150 else if (vp->v_rdev->si_devsw == NULL) 5151 error = ENXIO; 5152 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5153 error = ENOTBLK; 5154 dev_unlock(); 5155 out: 5156 *errp = error; 5157 return (error == 0); 5158 } 5159 5160 bool 5161 vn_isdisk(struct vnode *vp) 5162 { 5163 int error; 5164 5165 return (vn_isdisk_error(vp, &error)); 5166 } 5167 5168 /* 5169 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5170 * the comment above cache_fplookup for details. 5171 */ 5172 int 5173 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5174 { 5175 int error; 5176 5177 VFS_SMR_ASSERT_ENTERED(); 5178 5179 /* Check the owner. */ 5180 if (cred->cr_uid == file_uid) { 5181 if (file_mode & S_IXUSR) 5182 return (0); 5183 goto out_error; 5184 } 5185 5186 /* Otherwise, check the groups (first match) */ 5187 if (groupmember(file_gid, cred)) { 5188 if (file_mode & S_IXGRP) 5189 return (0); 5190 goto out_error; 5191 } 5192 5193 /* Otherwise, check everyone else. */ 5194 if (file_mode & S_IXOTH) 5195 return (0); 5196 out_error: 5197 /* 5198 * Permission check failed, but it is possible denial will get overwritten 5199 * (e.g., when root is traversing through a 700 directory owned by someone 5200 * else). 5201 * 5202 * vaccess() calls priv_check_cred which in turn can descent into MAC 5203 * modules overriding this result. It's quite unclear what semantics 5204 * are allowed for them to operate, thus for safety we don't call them 5205 * from within the SMR section. This also means if any such modules 5206 * are present, we have to let the regular lookup decide. 5207 */ 5208 error = priv_check_cred_vfs_lookup_nomac(cred); 5209 switch (error) { 5210 case 0: 5211 return (0); 5212 case EAGAIN: 5213 /* 5214 * MAC modules present. 5215 */ 5216 return (EAGAIN); 5217 case EPERM: 5218 return (EACCES); 5219 default: 5220 return (error); 5221 } 5222 } 5223 5224 /* 5225 * Common filesystem object access control check routine. Accepts a 5226 * vnode's type, "mode", uid and gid, requested access mode, and credentials. 5227 * Returns 0 on success, or an errno on failure. 5228 */ 5229 int 5230 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5231 accmode_t accmode, struct ucred *cred) 5232 { 5233 accmode_t dac_granted; 5234 accmode_t priv_granted; 5235 5236 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5237 ("invalid bit in accmode")); 5238 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5239 ("VAPPEND without VWRITE")); 5240 5241 /* 5242 * Look for a normal, non-privileged way to access the file/directory 5243 * as requested. If it exists, go with that. 5244 */ 5245 5246 dac_granted = 0; 5247 5248 /* Check the owner. */ 5249 if (cred->cr_uid == file_uid) { 5250 dac_granted |= VADMIN; 5251 if (file_mode & S_IXUSR) 5252 dac_granted |= VEXEC; 5253 if (file_mode & S_IRUSR) 5254 dac_granted |= VREAD; 5255 if (file_mode & S_IWUSR) 5256 dac_granted |= (VWRITE | VAPPEND); 5257 5258 if ((accmode & dac_granted) == accmode) 5259 return (0); 5260 5261 goto privcheck; 5262 } 5263 5264 /* Otherwise, check the groups (first match) */ 5265 if (groupmember(file_gid, cred)) { 5266 if (file_mode & S_IXGRP) 5267 dac_granted |= VEXEC; 5268 if (file_mode & S_IRGRP) 5269 dac_granted |= VREAD; 5270 if (file_mode & S_IWGRP) 5271 dac_granted |= (VWRITE | VAPPEND); 5272 5273 if ((accmode & dac_granted) == accmode) 5274 return (0); 5275 5276 goto privcheck; 5277 } 5278 5279 /* Otherwise, check everyone else. */ 5280 if (file_mode & S_IXOTH) 5281 dac_granted |= VEXEC; 5282 if (file_mode & S_IROTH) 5283 dac_granted |= VREAD; 5284 if (file_mode & S_IWOTH) 5285 dac_granted |= (VWRITE | VAPPEND); 5286 if ((accmode & dac_granted) == accmode) 5287 return (0); 5288 5289 privcheck: 5290 /* 5291 * Build a privilege mask to determine if the set of privileges 5292 * satisfies the requirements when combined with the granted mask 5293 * from above. For each privilege, if the privilege is required, 5294 * bitwise or the request type onto the priv_granted mask. 5295 */ 5296 priv_granted = 0; 5297 5298 if (type == VDIR) { 5299 /* 5300 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5301 * requests, instead of PRIV_VFS_EXEC. 5302 */ 5303 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5304 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5305 priv_granted |= VEXEC; 5306 } else { 5307 /* 5308 * Ensure that at least one execute bit is on. Otherwise, 5309 * a privileged user will always succeed, and we don't want 5310 * this to happen unless the file really is executable. 5311 */ 5312 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5313 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5314 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5315 priv_granted |= VEXEC; 5316 } 5317 5318 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5319 !priv_check_cred(cred, PRIV_VFS_READ)) 5320 priv_granted |= VREAD; 5321 5322 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5323 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5324 priv_granted |= (VWRITE | VAPPEND); 5325 5326 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5327 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5328 priv_granted |= VADMIN; 5329 5330 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5331 return (0); 5332 } 5333 5334 return ((accmode & VADMIN) ? EPERM : EACCES); 5335 } 5336 5337 /* 5338 * Credential check based on process requesting service, and per-attribute 5339 * permissions. 5340 */ 5341 int 5342 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5343 struct thread *td, accmode_t accmode) 5344 { 5345 5346 /* 5347 * Kernel-invoked always succeeds. 5348 */ 5349 if (cred == NOCRED) 5350 return (0); 5351 5352 /* 5353 * Do not allow privileged processes in jail to directly manipulate 5354 * system attributes. 5355 */ 5356 switch (attrnamespace) { 5357 case EXTATTR_NAMESPACE_SYSTEM: 5358 /* Potentially should be: return (EPERM); */ 5359 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5360 case EXTATTR_NAMESPACE_USER: 5361 return (VOP_ACCESS(vp, accmode, cred, td)); 5362 default: 5363 return (EPERM); 5364 } 5365 } 5366 5367 #ifdef DEBUG_VFS_LOCKS 5368 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5369 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5370 "Drop into debugger on lock violation"); 5371 5372 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5373 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5374 0, "Check for interlock across VOPs"); 5375 5376 int vfs_badlock_print = 1; /* Print lock violations. */ 5377 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5378 0, "Print lock violations"); 5379 5380 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5381 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5382 0, "Print vnode details on lock violations"); 5383 5384 #ifdef KDB 5385 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5386 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5387 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5388 #endif 5389 5390 static void 5391 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5392 { 5393 5394 #ifdef KDB 5395 if (vfs_badlock_backtrace) 5396 kdb_backtrace(); 5397 #endif 5398 if (vfs_badlock_vnode) 5399 vn_printf(vp, "vnode "); 5400 if (vfs_badlock_print) 5401 printf("%s: %p %s\n", str, (void *)vp, msg); 5402 if (vfs_badlock_ddb) 5403 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5404 } 5405 5406 void 5407 assert_vi_locked(struct vnode *vp, const char *str) 5408 { 5409 5410 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5411 vfs_badlock("interlock is not locked but should be", str, vp); 5412 } 5413 5414 void 5415 assert_vi_unlocked(struct vnode *vp, const char *str) 5416 { 5417 5418 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5419 vfs_badlock("interlock is locked but should not be", str, vp); 5420 } 5421 5422 void 5423 assert_vop_locked(struct vnode *vp, const char *str) 5424 { 5425 int locked; 5426 5427 if (KERNEL_PANICKED() || vp == NULL) 5428 return; 5429 5430 locked = VOP_ISLOCKED(vp); 5431 if (locked == 0 || locked == LK_EXCLOTHER) 5432 vfs_badlock("is not locked but should be", str, vp); 5433 } 5434 5435 void 5436 assert_vop_unlocked(struct vnode *vp, const char *str) 5437 { 5438 if (KERNEL_PANICKED() || vp == NULL) 5439 return; 5440 5441 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5442 vfs_badlock("is locked but should not be", str, vp); 5443 } 5444 5445 void 5446 assert_vop_elocked(struct vnode *vp, const char *str) 5447 { 5448 if (KERNEL_PANICKED() || vp == NULL) 5449 return; 5450 5451 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5452 vfs_badlock("is not exclusive locked but should be", str, vp); 5453 } 5454 #endif /* DEBUG_VFS_LOCKS */ 5455 5456 void 5457 vop_rename_fail(struct vop_rename_args *ap) 5458 { 5459 5460 if (ap->a_tvp != NULL) 5461 vput(ap->a_tvp); 5462 if (ap->a_tdvp == ap->a_tvp) 5463 vrele(ap->a_tdvp); 5464 else 5465 vput(ap->a_tdvp); 5466 vrele(ap->a_fdvp); 5467 vrele(ap->a_fvp); 5468 } 5469 5470 void 5471 vop_rename_pre(void *ap) 5472 { 5473 struct vop_rename_args *a = ap; 5474 5475 #ifdef DEBUG_VFS_LOCKS 5476 if (a->a_tvp) 5477 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5478 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5479 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5480 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5481 5482 /* Check the source (from). */ 5483 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5484 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5485 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5486 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5487 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5488 5489 /* Check the target. */ 5490 if (a->a_tvp) 5491 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5492 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5493 #endif 5494 /* 5495 * It may be tempting to add vn_seqc_write_begin/end calls here and 5496 * in vop_rename_post but that's not going to work out since some 5497 * filesystems relookup vnodes mid-rename. This is probably a bug. 5498 * 5499 * For now filesystems are expected to do the relevant calls after they 5500 * decide what vnodes to operate on. 5501 */ 5502 if (a->a_tdvp != a->a_fdvp) 5503 vhold(a->a_fdvp); 5504 if (a->a_tvp != a->a_fvp) 5505 vhold(a->a_fvp); 5506 vhold(a->a_tdvp); 5507 if (a->a_tvp) 5508 vhold(a->a_tvp); 5509 } 5510 5511 #ifdef DEBUG_VFS_LOCKS 5512 void 5513 vop_fplookup_vexec_debugpre(void *ap __unused) 5514 { 5515 5516 VFS_SMR_ASSERT_ENTERED(); 5517 } 5518 5519 void 5520 vop_fplookup_vexec_debugpost(void *ap __unused, int rc __unused) 5521 { 5522 5523 VFS_SMR_ASSERT_ENTERED(); 5524 } 5525 5526 void 5527 vop_fplookup_symlink_debugpre(void *ap __unused) 5528 { 5529 5530 VFS_SMR_ASSERT_ENTERED(); 5531 } 5532 5533 void 5534 vop_fplookup_symlink_debugpost(void *ap __unused, int rc __unused) 5535 { 5536 5537 VFS_SMR_ASSERT_ENTERED(); 5538 } 5539 5540 static void 5541 vop_fsync_debugprepost(struct vnode *vp, const char *name) 5542 { 5543 if (vp->v_type == VCHR) 5544 ; 5545 else if (MNT_EXTENDED_SHARED(vp->v_mount)) 5546 ASSERT_VOP_LOCKED(vp, name); 5547 else 5548 ASSERT_VOP_ELOCKED(vp, name); 5549 } 5550 5551 void 5552 vop_fsync_debugpre(void *a) 5553 { 5554 struct vop_fsync_args *ap; 5555 5556 ap = a; 5557 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5558 } 5559 5560 void 5561 vop_fsync_debugpost(void *a, int rc __unused) 5562 { 5563 struct vop_fsync_args *ap; 5564 5565 ap = a; 5566 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5567 } 5568 5569 void 5570 vop_fdatasync_debugpre(void *a) 5571 { 5572 struct vop_fdatasync_args *ap; 5573 5574 ap = a; 5575 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5576 } 5577 5578 void 5579 vop_fdatasync_debugpost(void *a, int rc __unused) 5580 { 5581 struct vop_fdatasync_args *ap; 5582 5583 ap = a; 5584 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5585 } 5586 5587 void 5588 vop_strategy_debugpre(void *ap) 5589 { 5590 struct vop_strategy_args *a; 5591 struct buf *bp; 5592 5593 a = ap; 5594 bp = a->a_bp; 5595 5596 /* 5597 * Cluster ops lock their component buffers but not the IO container. 5598 */ 5599 if ((bp->b_flags & B_CLUSTER) != 0) 5600 return; 5601 5602 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5603 if (vfs_badlock_print) 5604 printf( 5605 "VOP_STRATEGY: bp is not locked but should be\n"); 5606 if (vfs_badlock_ddb) 5607 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5608 } 5609 } 5610 5611 void 5612 vop_lock_debugpre(void *ap) 5613 { 5614 struct vop_lock1_args *a = ap; 5615 5616 if ((a->a_flags & LK_INTERLOCK) == 0) 5617 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5618 else 5619 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5620 } 5621 5622 void 5623 vop_lock_debugpost(void *ap, int rc) 5624 { 5625 struct vop_lock1_args *a = ap; 5626 5627 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5628 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5629 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5630 } 5631 5632 void 5633 vop_unlock_debugpre(void *ap) 5634 { 5635 struct vop_unlock_args *a = ap; 5636 5637 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 5638 } 5639 5640 void 5641 vop_need_inactive_debugpre(void *ap) 5642 { 5643 struct vop_need_inactive_args *a = ap; 5644 5645 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5646 } 5647 5648 void 5649 vop_need_inactive_debugpost(void *ap, int rc) 5650 { 5651 struct vop_need_inactive_args *a = ap; 5652 5653 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5654 } 5655 #endif 5656 5657 void 5658 vop_create_pre(void *ap) 5659 { 5660 struct vop_create_args *a; 5661 struct vnode *dvp; 5662 5663 a = ap; 5664 dvp = a->a_dvp; 5665 vn_seqc_write_begin(dvp); 5666 } 5667 5668 void 5669 vop_create_post(void *ap, int rc) 5670 { 5671 struct vop_create_args *a; 5672 struct vnode *dvp; 5673 5674 a = ap; 5675 dvp = a->a_dvp; 5676 vn_seqc_write_end(dvp); 5677 if (!rc) 5678 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5679 } 5680 5681 void 5682 vop_whiteout_pre(void *ap) 5683 { 5684 struct vop_whiteout_args *a; 5685 struct vnode *dvp; 5686 5687 a = ap; 5688 dvp = a->a_dvp; 5689 vn_seqc_write_begin(dvp); 5690 } 5691 5692 void 5693 vop_whiteout_post(void *ap, int rc) 5694 { 5695 struct vop_whiteout_args *a; 5696 struct vnode *dvp; 5697 5698 a = ap; 5699 dvp = a->a_dvp; 5700 vn_seqc_write_end(dvp); 5701 } 5702 5703 void 5704 vop_deleteextattr_pre(void *ap) 5705 { 5706 struct vop_deleteextattr_args *a; 5707 struct vnode *vp; 5708 5709 a = ap; 5710 vp = a->a_vp; 5711 vn_seqc_write_begin(vp); 5712 } 5713 5714 void 5715 vop_deleteextattr_post(void *ap, int rc) 5716 { 5717 struct vop_deleteextattr_args *a; 5718 struct vnode *vp; 5719 5720 a = ap; 5721 vp = a->a_vp; 5722 vn_seqc_write_end(vp); 5723 if (!rc) 5724 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5725 } 5726 5727 void 5728 vop_link_pre(void *ap) 5729 { 5730 struct vop_link_args *a; 5731 struct vnode *vp, *tdvp; 5732 5733 a = ap; 5734 vp = a->a_vp; 5735 tdvp = a->a_tdvp; 5736 vn_seqc_write_begin(vp); 5737 vn_seqc_write_begin(tdvp); 5738 } 5739 5740 void 5741 vop_link_post(void *ap, int rc) 5742 { 5743 struct vop_link_args *a; 5744 struct vnode *vp, *tdvp; 5745 5746 a = ap; 5747 vp = a->a_vp; 5748 tdvp = a->a_tdvp; 5749 vn_seqc_write_end(vp); 5750 vn_seqc_write_end(tdvp); 5751 if (!rc) { 5752 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 5753 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 5754 } 5755 } 5756 5757 void 5758 vop_mkdir_pre(void *ap) 5759 { 5760 struct vop_mkdir_args *a; 5761 struct vnode *dvp; 5762 5763 a = ap; 5764 dvp = a->a_dvp; 5765 vn_seqc_write_begin(dvp); 5766 } 5767 5768 void 5769 vop_mkdir_post(void *ap, int rc) 5770 { 5771 struct vop_mkdir_args *a; 5772 struct vnode *dvp; 5773 5774 a = ap; 5775 dvp = a->a_dvp; 5776 vn_seqc_write_end(dvp); 5777 if (!rc) 5778 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5779 } 5780 5781 #ifdef DEBUG_VFS_LOCKS 5782 void 5783 vop_mkdir_debugpost(void *ap, int rc) 5784 { 5785 struct vop_mkdir_args *a; 5786 5787 a = ap; 5788 if (!rc) 5789 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); 5790 } 5791 #endif 5792 5793 void 5794 vop_mknod_pre(void *ap) 5795 { 5796 struct vop_mknod_args *a; 5797 struct vnode *dvp; 5798 5799 a = ap; 5800 dvp = a->a_dvp; 5801 vn_seqc_write_begin(dvp); 5802 } 5803 5804 void 5805 vop_mknod_post(void *ap, int rc) 5806 { 5807 struct vop_mknod_args *a; 5808 struct vnode *dvp; 5809 5810 a = ap; 5811 dvp = a->a_dvp; 5812 vn_seqc_write_end(dvp); 5813 if (!rc) 5814 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5815 } 5816 5817 void 5818 vop_reclaim_post(void *ap, int rc) 5819 { 5820 struct vop_reclaim_args *a; 5821 struct vnode *vp; 5822 5823 a = ap; 5824 vp = a->a_vp; 5825 ASSERT_VOP_IN_SEQC(vp); 5826 if (!rc) 5827 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 5828 } 5829 5830 void 5831 vop_remove_pre(void *ap) 5832 { 5833 struct vop_remove_args *a; 5834 struct vnode *dvp, *vp; 5835 5836 a = ap; 5837 dvp = a->a_dvp; 5838 vp = a->a_vp; 5839 vn_seqc_write_begin(dvp); 5840 vn_seqc_write_begin(vp); 5841 } 5842 5843 void 5844 vop_remove_post(void *ap, int rc) 5845 { 5846 struct vop_remove_args *a; 5847 struct vnode *dvp, *vp; 5848 5849 a = ap; 5850 dvp = a->a_dvp; 5851 vp = a->a_vp; 5852 vn_seqc_write_end(dvp); 5853 vn_seqc_write_end(vp); 5854 if (!rc) { 5855 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5856 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5857 } 5858 } 5859 5860 void 5861 vop_rename_post(void *ap, int rc) 5862 { 5863 struct vop_rename_args *a = ap; 5864 long hint; 5865 5866 if (!rc) { 5867 hint = NOTE_WRITE; 5868 if (a->a_fdvp == a->a_tdvp) { 5869 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 5870 hint |= NOTE_LINK; 5871 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5872 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5873 } else { 5874 hint |= NOTE_EXTEND; 5875 if (a->a_fvp->v_type == VDIR) 5876 hint |= NOTE_LINK; 5877 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5878 5879 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 5880 a->a_tvp->v_type == VDIR) 5881 hint &= ~NOTE_LINK; 5882 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5883 } 5884 5885 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 5886 if (a->a_tvp) 5887 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 5888 } 5889 if (a->a_tdvp != a->a_fdvp) 5890 vdrop(a->a_fdvp); 5891 if (a->a_tvp != a->a_fvp) 5892 vdrop(a->a_fvp); 5893 vdrop(a->a_tdvp); 5894 if (a->a_tvp) 5895 vdrop(a->a_tvp); 5896 } 5897 5898 void 5899 vop_rmdir_pre(void *ap) 5900 { 5901 struct vop_rmdir_args *a; 5902 struct vnode *dvp, *vp; 5903 5904 a = ap; 5905 dvp = a->a_dvp; 5906 vp = a->a_vp; 5907 vn_seqc_write_begin(dvp); 5908 vn_seqc_write_begin(vp); 5909 } 5910 5911 void 5912 vop_rmdir_post(void *ap, int rc) 5913 { 5914 struct vop_rmdir_args *a; 5915 struct vnode *dvp, *vp; 5916 5917 a = ap; 5918 dvp = a->a_dvp; 5919 vp = a->a_vp; 5920 vn_seqc_write_end(dvp); 5921 vn_seqc_write_end(vp); 5922 if (!rc) { 5923 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5924 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5925 } 5926 } 5927 5928 void 5929 vop_setattr_pre(void *ap) 5930 { 5931 struct vop_setattr_args *a; 5932 struct vnode *vp; 5933 5934 a = ap; 5935 vp = a->a_vp; 5936 vn_seqc_write_begin(vp); 5937 } 5938 5939 void 5940 vop_setattr_post(void *ap, int rc) 5941 { 5942 struct vop_setattr_args *a; 5943 struct vnode *vp; 5944 5945 a = ap; 5946 vp = a->a_vp; 5947 vn_seqc_write_end(vp); 5948 if (!rc) 5949 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 5950 } 5951 5952 void 5953 vop_setacl_pre(void *ap) 5954 { 5955 struct vop_setacl_args *a; 5956 struct vnode *vp; 5957 5958 a = ap; 5959 vp = a->a_vp; 5960 vn_seqc_write_begin(vp); 5961 } 5962 5963 void 5964 vop_setacl_post(void *ap, int rc __unused) 5965 { 5966 struct vop_setacl_args *a; 5967 struct vnode *vp; 5968 5969 a = ap; 5970 vp = a->a_vp; 5971 vn_seqc_write_end(vp); 5972 } 5973 5974 void 5975 vop_setextattr_pre(void *ap) 5976 { 5977 struct vop_setextattr_args *a; 5978 struct vnode *vp; 5979 5980 a = ap; 5981 vp = a->a_vp; 5982 vn_seqc_write_begin(vp); 5983 } 5984 5985 void 5986 vop_setextattr_post(void *ap, int rc) 5987 { 5988 struct vop_setextattr_args *a; 5989 struct vnode *vp; 5990 5991 a = ap; 5992 vp = a->a_vp; 5993 vn_seqc_write_end(vp); 5994 if (!rc) 5995 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 5996 } 5997 5998 void 5999 vop_symlink_pre(void *ap) 6000 { 6001 struct vop_symlink_args *a; 6002 struct vnode *dvp; 6003 6004 a = ap; 6005 dvp = a->a_dvp; 6006 vn_seqc_write_begin(dvp); 6007 } 6008 6009 void 6010 vop_symlink_post(void *ap, int rc) 6011 { 6012 struct vop_symlink_args *a; 6013 struct vnode *dvp; 6014 6015 a = ap; 6016 dvp = a->a_dvp; 6017 vn_seqc_write_end(dvp); 6018 if (!rc) 6019 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6020 } 6021 6022 void 6023 vop_open_post(void *ap, int rc) 6024 { 6025 struct vop_open_args *a = ap; 6026 6027 if (!rc) 6028 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 6029 } 6030 6031 void 6032 vop_close_post(void *ap, int rc) 6033 { 6034 struct vop_close_args *a = ap; 6035 6036 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 6037 !VN_IS_DOOMED(a->a_vp))) { 6038 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 6039 NOTE_CLOSE_WRITE : NOTE_CLOSE); 6040 } 6041 } 6042 6043 void 6044 vop_read_post(void *ap, int rc) 6045 { 6046 struct vop_read_args *a = ap; 6047 6048 if (!rc) 6049 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6050 } 6051 6052 void 6053 vop_read_pgcache_post(void *ap, int rc) 6054 { 6055 struct vop_read_pgcache_args *a = ap; 6056 6057 if (!rc) 6058 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); 6059 } 6060 6061 void 6062 vop_readdir_post(void *ap, int rc) 6063 { 6064 struct vop_readdir_args *a = ap; 6065 6066 if (!rc) 6067 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6068 } 6069 6070 static struct knlist fs_knlist; 6071 6072 static void 6073 vfs_event_init(void *arg) 6074 { 6075 knlist_init_mtx(&fs_knlist, NULL); 6076 } 6077 /* XXX - correct order? */ 6078 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 6079 6080 void 6081 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 6082 { 6083 6084 KNOTE_UNLOCKED(&fs_knlist, event); 6085 } 6086 6087 static int filt_fsattach(struct knote *kn); 6088 static void filt_fsdetach(struct knote *kn); 6089 static int filt_fsevent(struct knote *kn, long hint); 6090 6091 struct filterops fs_filtops = { 6092 .f_isfd = 0, 6093 .f_attach = filt_fsattach, 6094 .f_detach = filt_fsdetach, 6095 .f_event = filt_fsevent 6096 }; 6097 6098 static int 6099 filt_fsattach(struct knote *kn) 6100 { 6101 6102 kn->kn_flags |= EV_CLEAR; 6103 knlist_add(&fs_knlist, kn, 0); 6104 return (0); 6105 } 6106 6107 static void 6108 filt_fsdetach(struct knote *kn) 6109 { 6110 6111 knlist_remove(&fs_knlist, kn, 0); 6112 } 6113 6114 static int 6115 filt_fsevent(struct knote *kn, long hint) 6116 { 6117 6118 kn->kn_fflags |= kn->kn_sfflags & hint; 6119 6120 return (kn->kn_fflags != 0); 6121 } 6122 6123 static int 6124 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 6125 { 6126 struct vfsidctl vc; 6127 int error; 6128 struct mount *mp; 6129 6130 error = SYSCTL_IN(req, &vc, sizeof(vc)); 6131 if (error) 6132 return (error); 6133 if (vc.vc_vers != VFS_CTL_VERS1) 6134 return (EINVAL); 6135 mp = vfs_getvfs(&vc.vc_fsid); 6136 if (mp == NULL) 6137 return (ENOENT); 6138 /* ensure that a specific sysctl goes to the right filesystem. */ 6139 if (strcmp(vc.vc_fstypename, "*") != 0 && 6140 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 6141 vfs_rel(mp); 6142 return (EINVAL); 6143 } 6144 VCTLTOREQ(&vc, req); 6145 error = VFS_SYSCTL(mp, vc.vc_op, req); 6146 vfs_rel(mp); 6147 return (error); 6148 } 6149 6150 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 6151 NULL, 0, sysctl_vfs_ctl, "", 6152 "Sysctl by fsid"); 6153 6154 /* 6155 * Function to initialize a va_filerev field sensibly. 6156 * XXX: Wouldn't a random number make a lot more sense ?? 6157 */ 6158 u_quad_t 6159 init_va_filerev(void) 6160 { 6161 struct bintime bt; 6162 6163 getbinuptime(&bt); 6164 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 6165 } 6166 6167 static int filt_vfsread(struct knote *kn, long hint); 6168 static int filt_vfswrite(struct knote *kn, long hint); 6169 static int filt_vfsvnode(struct knote *kn, long hint); 6170 static void filt_vfsdetach(struct knote *kn); 6171 static struct filterops vfsread_filtops = { 6172 .f_isfd = 1, 6173 .f_detach = filt_vfsdetach, 6174 .f_event = filt_vfsread 6175 }; 6176 static struct filterops vfswrite_filtops = { 6177 .f_isfd = 1, 6178 .f_detach = filt_vfsdetach, 6179 .f_event = filt_vfswrite 6180 }; 6181 static struct filterops vfsvnode_filtops = { 6182 .f_isfd = 1, 6183 .f_detach = filt_vfsdetach, 6184 .f_event = filt_vfsvnode 6185 }; 6186 6187 static void 6188 vfs_knllock(void *arg) 6189 { 6190 struct vnode *vp = arg; 6191 6192 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 6193 } 6194 6195 static void 6196 vfs_knlunlock(void *arg) 6197 { 6198 struct vnode *vp = arg; 6199 6200 VOP_UNLOCK(vp); 6201 } 6202 6203 static void 6204 vfs_knl_assert_lock(void *arg, int what) 6205 { 6206 #ifdef DEBUG_VFS_LOCKS 6207 struct vnode *vp = arg; 6208 6209 if (what == LA_LOCKED) 6210 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6211 else 6212 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6213 #endif 6214 } 6215 6216 int 6217 vfs_kqfilter(struct vop_kqfilter_args *ap) 6218 { 6219 struct vnode *vp = ap->a_vp; 6220 struct knote *kn = ap->a_kn; 6221 struct knlist *knl; 6222 6223 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ && 6224 kn->kn_filter != EVFILT_WRITE), 6225 ("READ/WRITE filter on a FIFO leaked through")); 6226 switch (kn->kn_filter) { 6227 case EVFILT_READ: 6228 kn->kn_fop = &vfsread_filtops; 6229 break; 6230 case EVFILT_WRITE: 6231 kn->kn_fop = &vfswrite_filtops; 6232 break; 6233 case EVFILT_VNODE: 6234 kn->kn_fop = &vfsvnode_filtops; 6235 break; 6236 default: 6237 return (EINVAL); 6238 } 6239 6240 kn->kn_hook = (caddr_t)vp; 6241 6242 v_addpollinfo(vp); 6243 if (vp->v_pollinfo == NULL) 6244 return (ENOMEM); 6245 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6246 vhold(vp); 6247 knlist_add(knl, kn, 0); 6248 6249 return (0); 6250 } 6251 6252 /* 6253 * Detach knote from vnode 6254 */ 6255 static void 6256 filt_vfsdetach(struct knote *kn) 6257 { 6258 struct vnode *vp = (struct vnode *)kn->kn_hook; 6259 6260 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6261 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6262 vdrop(vp); 6263 } 6264 6265 /*ARGSUSED*/ 6266 static int 6267 filt_vfsread(struct knote *kn, long hint) 6268 { 6269 struct vnode *vp = (struct vnode *)kn->kn_hook; 6270 struct vattr va; 6271 int res; 6272 6273 /* 6274 * filesystem is gone, so set the EOF flag and schedule 6275 * the knote for deletion. 6276 */ 6277 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6278 VI_LOCK(vp); 6279 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6280 VI_UNLOCK(vp); 6281 return (1); 6282 } 6283 6284 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 6285 return (0); 6286 6287 VI_LOCK(vp); 6288 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 6289 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6290 VI_UNLOCK(vp); 6291 return (res); 6292 } 6293 6294 /*ARGSUSED*/ 6295 static int 6296 filt_vfswrite(struct knote *kn, long hint) 6297 { 6298 struct vnode *vp = (struct vnode *)kn->kn_hook; 6299 6300 VI_LOCK(vp); 6301 6302 /* 6303 * filesystem is gone, so set the EOF flag and schedule 6304 * the knote for deletion. 6305 */ 6306 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6307 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6308 6309 kn->kn_data = 0; 6310 VI_UNLOCK(vp); 6311 return (1); 6312 } 6313 6314 static int 6315 filt_vfsvnode(struct knote *kn, long hint) 6316 { 6317 struct vnode *vp = (struct vnode *)kn->kn_hook; 6318 int res; 6319 6320 VI_LOCK(vp); 6321 if (kn->kn_sfflags & hint) 6322 kn->kn_fflags |= hint; 6323 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6324 kn->kn_flags |= EV_EOF; 6325 VI_UNLOCK(vp); 6326 return (1); 6327 } 6328 res = (kn->kn_fflags != 0); 6329 VI_UNLOCK(vp); 6330 return (res); 6331 } 6332 6333 /* 6334 * Returns whether the directory is empty or not. 6335 * If it is empty, the return value is 0; otherwise 6336 * the return value is an error value (which may 6337 * be ENOTEMPTY). 6338 */ 6339 int 6340 vfs_emptydir(struct vnode *vp) 6341 { 6342 struct uio uio; 6343 struct iovec iov; 6344 struct dirent *dirent, *dp, *endp; 6345 int error, eof; 6346 6347 error = 0; 6348 eof = 0; 6349 6350 ASSERT_VOP_LOCKED(vp, "vfs_emptydir"); 6351 VNASSERT(vp->v_type == VDIR, vp, ("vp is not a directory")); 6352 6353 dirent = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK); 6354 iov.iov_base = dirent; 6355 iov.iov_len = sizeof(struct dirent); 6356 6357 uio.uio_iov = &iov; 6358 uio.uio_iovcnt = 1; 6359 uio.uio_offset = 0; 6360 uio.uio_resid = sizeof(struct dirent); 6361 uio.uio_segflg = UIO_SYSSPACE; 6362 uio.uio_rw = UIO_READ; 6363 uio.uio_td = curthread; 6364 6365 while (eof == 0 && error == 0) { 6366 error = VOP_READDIR(vp, &uio, curthread->td_ucred, &eof, 6367 NULL, NULL); 6368 if (error != 0) 6369 break; 6370 endp = (void *)((uint8_t *)dirent + 6371 sizeof(struct dirent) - uio.uio_resid); 6372 for (dp = dirent; dp < endp; 6373 dp = (void *)((uint8_t *)dp + GENERIC_DIRSIZ(dp))) { 6374 if (dp->d_type == DT_WHT) 6375 continue; 6376 if (dp->d_namlen == 0) 6377 continue; 6378 if (dp->d_type != DT_DIR && 6379 dp->d_type != DT_UNKNOWN) { 6380 error = ENOTEMPTY; 6381 break; 6382 } 6383 if (dp->d_namlen > 2) { 6384 error = ENOTEMPTY; 6385 break; 6386 } 6387 if (dp->d_namlen == 1 && 6388 dp->d_name[0] != '.') { 6389 error = ENOTEMPTY; 6390 break; 6391 } 6392 if (dp->d_namlen == 2 && 6393 dp->d_name[1] != '.') { 6394 error = ENOTEMPTY; 6395 break; 6396 } 6397 uio.uio_resid = sizeof(struct dirent); 6398 } 6399 } 6400 free(dirent, M_TEMP); 6401 return (error); 6402 } 6403 6404 int 6405 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6406 { 6407 int error; 6408 6409 if (dp->d_reclen > ap->a_uio->uio_resid) 6410 return (ENAMETOOLONG); 6411 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6412 if (error) { 6413 if (ap->a_ncookies != NULL) { 6414 if (ap->a_cookies != NULL) 6415 free(ap->a_cookies, M_TEMP); 6416 ap->a_cookies = NULL; 6417 *ap->a_ncookies = 0; 6418 } 6419 return (error); 6420 } 6421 if (ap->a_ncookies == NULL) 6422 return (0); 6423 6424 KASSERT(ap->a_cookies, 6425 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6426 6427 *ap->a_cookies = realloc(*ap->a_cookies, 6428 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 6429 (*ap->a_cookies)[*ap->a_ncookies] = off; 6430 *ap->a_ncookies += 1; 6431 return (0); 6432 } 6433 6434 /* 6435 * The purpose of this routine is to remove granularity from accmode_t, 6436 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6437 * VADMIN and VAPPEND. 6438 * 6439 * If it returns 0, the caller is supposed to continue with the usual 6440 * access checks using 'accmode' as modified by this routine. If it 6441 * returns nonzero value, the caller is supposed to return that value 6442 * as errno. 6443 * 6444 * Note that after this routine runs, accmode may be zero. 6445 */ 6446 int 6447 vfs_unixify_accmode(accmode_t *accmode) 6448 { 6449 /* 6450 * There is no way to specify explicit "deny" rule using 6451 * file mode or POSIX.1e ACLs. 6452 */ 6453 if (*accmode & VEXPLICIT_DENY) { 6454 *accmode = 0; 6455 return (0); 6456 } 6457 6458 /* 6459 * None of these can be translated into usual access bits. 6460 * Also, the common case for NFSv4 ACLs is to not contain 6461 * either of these bits. Caller should check for VWRITE 6462 * on the containing directory instead. 6463 */ 6464 if (*accmode & (VDELETE_CHILD | VDELETE)) 6465 return (EPERM); 6466 6467 if (*accmode & VADMIN_PERMS) { 6468 *accmode &= ~VADMIN_PERMS; 6469 *accmode |= VADMIN; 6470 } 6471 6472 /* 6473 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6474 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6475 */ 6476 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6477 6478 return (0); 6479 } 6480 6481 /* 6482 * Clear out a doomed vnode (if any) and replace it with a new one as long 6483 * as the fs is not being unmounted. Return the root vnode to the caller. 6484 */ 6485 static int __noinline 6486 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6487 { 6488 struct vnode *vp; 6489 int error; 6490 6491 restart: 6492 if (mp->mnt_rootvnode != NULL) { 6493 MNT_ILOCK(mp); 6494 vp = mp->mnt_rootvnode; 6495 if (vp != NULL) { 6496 if (!VN_IS_DOOMED(vp)) { 6497 vrefact(vp); 6498 MNT_IUNLOCK(mp); 6499 error = vn_lock(vp, flags); 6500 if (error == 0) { 6501 *vpp = vp; 6502 return (0); 6503 } 6504 vrele(vp); 6505 goto restart; 6506 } 6507 /* 6508 * Clear the old one. 6509 */ 6510 mp->mnt_rootvnode = NULL; 6511 } 6512 MNT_IUNLOCK(mp); 6513 if (vp != NULL) { 6514 vfs_op_barrier_wait(mp); 6515 vrele(vp); 6516 } 6517 } 6518 error = VFS_CACHEDROOT(mp, flags, vpp); 6519 if (error != 0) 6520 return (error); 6521 if (mp->mnt_vfs_ops == 0) { 6522 MNT_ILOCK(mp); 6523 if (mp->mnt_vfs_ops != 0) { 6524 MNT_IUNLOCK(mp); 6525 return (0); 6526 } 6527 if (mp->mnt_rootvnode == NULL) { 6528 vrefact(*vpp); 6529 mp->mnt_rootvnode = *vpp; 6530 } else { 6531 if (mp->mnt_rootvnode != *vpp) { 6532 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6533 panic("%s: mismatch between vnode returned " 6534 " by VFS_CACHEDROOT and the one cached " 6535 " (%p != %p)", 6536 __func__, *vpp, mp->mnt_rootvnode); 6537 } 6538 } 6539 } 6540 MNT_IUNLOCK(mp); 6541 } 6542 return (0); 6543 } 6544 6545 int 6546 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6547 { 6548 struct mount_pcpu *mpcpu; 6549 struct vnode *vp; 6550 int error; 6551 6552 if (!vfs_op_thread_enter(mp, mpcpu)) 6553 return (vfs_cache_root_fallback(mp, flags, vpp)); 6554 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6555 if (vp == NULL || VN_IS_DOOMED(vp)) { 6556 vfs_op_thread_exit(mp, mpcpu); 6557 return (vfs_cache_root_fallback(mp, flags, vpp)); 6558 } 6559 vrefact(vp); 6560 vfs_op_thread_exit(mp, mpcpu); 6561 error = vn_lock(vp, flags); 6562 if (error != 0) { 6563 vrele(vp); 6564 return (vfs_cache_root_fallback(mp, flags, vpp)); 6565 } 6566 *vpp = vp; 6567 return (0); 6568 } 6569 6570 struct vnode * 6571 vfs_cache_root_clear(struct mount *mp) 6572 { 6573 struct vnode *vp; 6574 6575 /* 6576 * ops > 0 guarantees there is nobody who can see this vnode 6577 */ 6578 MPASS(mp->mnt_vfs_ops > 0); 6579 vp = mp->mnt_rootvnode; 6580 if (vp != NULL) 6581 vn_seqc_write_begin(vp); 6582 mp->mnt_rootvnode = NULL; 6583 return (vp); 6584 } 6585 6586 void 6587 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6588 { 6589 6590 MPASS(mp->mnt_vfs_ops > 0); 6591 vrefact(vp); 6592 mp->mnt_rootvnode = vp; 6593 } 6594 6595 /* 6596 * These are helper functions for filesystems to traverse all 6597 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6598 * 6599 * This interface replaces MNT_VNODE_FOREACH. 6600 */ 6601 6602 struct vnode * 6603 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6604 { 6605 struct vnode *vp; 6606 6607 if (should_yield()) 6608 kern_yield(PRI_USER); 6609 MNT_ILOCK(mp); 6610 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6611 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6612 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6613 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6614 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6615 continue; 6616 VI_LOCK(vp); 6617 if (VN_IS_DOOMED(vp)) { 6618 VI_UNLOCK(vp); 6619 continue; 6620 } 6621 break; 6622 } 6623 if (vp == NULL) { 6624 __mnt_vnode_markerfree_all(mvp, mp); 6625 /* MNT_IUNLOCK(mp); -- done in above function */ 6626 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6627 return (NULL); 6628 } 6629 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6630 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6631 MNT_IUNLOCK(mp); 6632 return (vp); 6633 } 6634 6635 struct vnode * 6636 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6637 { 6638 struct vnode *vp; 6639 6640 *mvp = vn_alloc_marker(mp); 6641 MNT_ILOCK(mp); 6642 MNT_REF(mp); 6643 6644 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6645 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6646 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6647 continue; 6648 VI_LOCK(vp); 6649 if (VN_IS_DOOMED(vp)) { 6650 VI_UNLOCK(vp); 6651 continue; 6652 } 6653 break; 6654 } 6655 if (vp == NULL) { 6656 MNT_REL(mp); 6657 MNT_IUNLOCK(mp); 6658 vn_free_marker(*mvp); 6659 *mvp = NULL; 6660 return (NULL); 6661 } 6662 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6663 MNT_IUNLOCK(mp); 6664 return (vp); 6665 } 6666 6667 void 6668 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6669 { 6670 6671 if (*mvp == NULL) { 6672 MNT_IUNLOCK(mp); 6673 return; 6674 } 6675 6676 mtx_assert(MNT_MTX(mp), MA_OWNED); 6677 6678 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6679 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6680 MNT_REL(mp); 6681 MNT_IUNLOCK(mp); 6682 vn_free_marker(*mvp); 6683 *mvp = NULL; 6684 } 6685 6686 /* 6687 * These are helper functions for filesystems to traverse their 6688 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6689 */ 6690 static void 6691 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6692 { 6693 6694 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6695 6696 MNT_ILOCK(mp); 6697 MNT_REL(mp); 6698 MNT_IUNLOCK(mp); 6699 vn_free_marker(*mvp); 6700 *mvp = NULL; 6701 } 6702 6703 /* 6704 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6705 * conventional lock order during mnt_vnode_next_lazy iteration. 6706 * 6707 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6708 * The list lock is dropped and reacquired. On success, both locks are held. 6709 * On failure, the mount vnode list lock is held but the vnode interlock is 6710 * not, and the procedure may have yielded. 6711 */ 6712 static bool 6713 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6714 struct vnode *vp) 6715 { 6716 6717 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6718 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6719 ("%s: bad marker", __func__)); 6720 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6721 ("%s: inappropriate vnode", __func__)); 6722 ASSERT_VI_UNLOCKED(vp, __func__); 6723 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6724 6725 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6726 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6727 6728 /* 6729 * Note we may be racing against vdrop which transitioned the hold 6730 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 6731 * if we are the only user after we get the interlock we will just 6732 * vdrop. 6733 */ 6734 vhold(vp); 6735 mtx_unlock(&mp->mnt_listmtx); 6736 VI_LOCK(vp); 6737 if (VN_IS_DOOMED(vp)) { 6738 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 6739 goto out_lost; 6740 } 6741 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 6742 /* 6743 * There is nothing to do if we are the last user. 6744 */ 6745 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 6746 goto out_lost; 6747 mtx_lock(&mp->mnt_listmtx); 6748 return (true); 6749 out_lost: 6750 vdropl(vp); 6751 maybe_yield(); 6752 mtx_lock(&mp->mnt_listmtx); 6753 return (false); 6754 } 6755 6756 static struct vnode * 6757 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6758 void *cbarg) 6759 { 6760 struct vnode *vp; 6761 6762 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6763 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6764 restart: 6765 vp = TAILQ_NEXT(*mvp, v_lazylist); 6766 while (vp != NULL) { 6767 if (vp->v_type == VMARKER) { 6768 vp = TAILQ_NEXT(vp, v_lazylist); 6769 continue; 6770 } 6771 /* 6772 * See if we want to process the vnode. Note we may encounter a 6773 * long string of vnodes we don't care about and hog the list 6774 * as a result. Check for it and requeue the marker. 6775 */ 6776 VNPASS(!VN_IS_DOOMED(vp), vp); 6777 if (!cb(vp, cbarg)) { 6778 if (!should_yield()) { 6779 vp = TAILQ_NEXT(vp, v_lazylist); 6780 continue; 6781 } 6782 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 6783 v_lazylist); 6784 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 6785 v_lazylist); 6786 mtx_unlock(&mp->mnt_listmtx); 6787 kern_yield(PRI_USER); 6788 mtx_lock(&mp->mnt_listmtx); 6789 goto restart; 6790 } 6791 /* 6792 * Try-lock because this is the wrong lock order. 6793 */ 6794 if (!VI_TRYLOCK(vp) && 6795 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 6796 goto restart; 6797 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6798 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6799 ("alien vnode on the lazy list %p %p", vp, mp)); 6800 VNPASS(vp->v_mount == mp, vp); 6801 VNPASS(!VN_IS_DOOMED(vp), vp); 6802 break; 6803 } 6804 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6805 6806 /* Check if we are done */ 6807 if (vp == NULL) { 6808 mtx_unlock(&mp->mnt_listmtx); 6809 mnt_vnode_markerfree_lazy(mvp, mp); 6810 return (NULL); 6811 } 6812 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 6813 mtx_unlock(&mp->mnt_listmtx); 6814 ASSERT_VI_LOCKED(vp, "lazy iter"); 6815 return (vp); 6816 } 6817 6818 struct vnode * 6819 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6820 void *cbarg) 6821 { 6822 6823 if (should_yield()) 6824 kern_yield(PRI_USER); 6825 mtx_lock(&mp->mnt_listmtx); 6826 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6827 } 6828 6829 struct vnode * 6830 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6831 void *cbarg) 6832 { 6833 struct vnode *vp; 6834 6835 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 6836 return (NULL); 6837 6838 *mvp = vn_alloc_marker(mp); 6839 MNT_ILOCK(mp); 6840 MNT_REF(mp); 6841 MNT_IUNLOCK(mp); 6842 6843 mtx_lock(&mp->mnt_listmtx); 6844 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 6845 if (vp == NULL) { 6846 mtx_unlock(&mp->mnt_listmtx); 6847 mnt_vnode_markerfree_lazy(mvp, mp); 6848 return (NULL); 6849 } 6850 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 6851 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6852 } 6853 6854 void 6855 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6856 { 6857 6858 if (*mvp == NULL) 6859 return; 6860 6861 mtx_lock(&mp->mnt_listmtx); 6862 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6863 mtx_unlock(&mp->mnt_listmtx); 6864 mnt_vnode_markerfree_lazy(mvp, mp); 6865 } 6866 6867 int 6868 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 6869 { 6870 6871 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 6872 cnp->cn_flags &= ~NOEXECCHECK; 6873 return (0); 6874 } 6875 6876 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread)); 6877 } 6878 6879 /* 6880 * Do not use this variant unless you have means other than the hold count 6881 * to prevent the vnode from getting freed. 6882 */ 6883 void 6884 vn_seqc_write_begin_locked(struct vnode *vp) 6885 { 6886 6887 ASSERT_VI_LOCKED(vp, __func__); 6888 VNPASS(vp->v_holdcnt > 0, vp); 6889 VNPASS(vp->v_seqc_users >= 0, vp); 6890 vp->v_seqc_users++; 6891 if (vp->v_seqc_users == 1) 6892 seqc_sleepable_write_begin(&vp->v_seqc); 6893 } 6894 6895 void 6896 vn_seqc_write_begin(struct vnode *vp) 6897 { 6898 6899 VI_LOCK(vp); 6900 vn_seqc_write_begin_locked(vp); 6901 VI_UNLOCK(vp); 6902 } 6903 6904 void 6905 vn_seqc_write_end_locked(struct vnode *vp) 6906 { 6907 6908 ASSERT_VI_LOCKED(vp, __func__); 6909 VNPASS(vp->v_seqc_users > 0, vp); 6910 vp->v_seqc_users--; 6911 if (vp->v_seqc_users == 0) 6912 seqc_sleepable_write_end(&vp->v_seqc); 6913 } 6914 6915 void 6916 vn_seqc_write_end(struct vnode *vp) 6917 { 6918 6919 VI_LOCK(vp); 6920 vn_seqc_write_end_locked(vp); 6921 VI_UNLOCK(vp); 6922 } 6923 6924 /* 6925 * Special case handling for allocating and freeing vnodes. 6926 * 6927 * The counter remains unchanged on free so that a doomed vnode will 6928 * keep testing as in modify as long as it is accessible with SMR. 6929 */ 6930 static void 6931 vn_seqc_init(struct vnode *vp) 6932 { 6933 6934 vp->v_seqc = 0; 6935 vp->v_seqc_users = 0; 6936 } 6937 6938 static void 6939 vn_seqc_write_end_free(struct vnode *vp) 6940 { 6941 6942 VNPASS(seqc_in_modify(vp->v_seqc), vp); 6943 VNPASS(vp->v_seqc_users == 1, vp); 6944 } 6945 6946 void 6947 vn_irflag_set_locked(struct vnode *vp, short toset) 6948 { 6949 short flags; 6950 6951 ASSERT_VI_LOCKED(vp, __func__); 6952 flags = vn_irflag_read(vp); 6953 VNASSERT((flags & toset) == 0, vp, 6954 ("%s: some of the passed flags already set (have %d, passed %d)\n", 6955 __func__, flags, toset)); 6956 atomic_store_short(&vp->v_irflag, flags | toset); 6957 } 6958 6959 void 6960 vn_irflag_set(struct vnode *vp, short toset) 6961 { 6962 6963 VI_LOCK(vp); 6964 vn_irflag_set_locked(vp, toset); 6965 VI_UNLOCK(vp); 6966 } 6967 6968 void 6969 vn_irflag_set_cond_locked(struct vnode *vp, short toset) 6970 { 6971 short flags; 6972 6973 ASSERT_VI_LOCKED(vp, __func__); 6974 flags = vn_irflag_read(vp); 6975 atomic_store_short(&vp->v_irflag, flags | toset); 6976 } 6977 6978 void 6979 vn_irflag_set_cond(struct vnode *vp, short toset) 6980 { 6981 6982 VI_LOCK(vp); 6983 vn_irflag_set_cond_locked(vp, toset); 6984 VI_UNLOCK(vp); 6985 } 6986 6987 void 6988 vn_irflag_unset_locked(struct vnode *vp, short tounset) 6989 { 6990 short flags; 6991 6992 ASSERT_VI_LOCKED(vp, __func__); 6993 flags = vn_irflag_read(vp); 6994 VNASSERT((flags & tounset) == tounset, vp, 6995 ("%s: some of the passed flags not set (have %d, passed %d)\n", 6996 __func__, flags, tounset)); 6997 atomic_store_short(&vp->v_irflag, flags & ~tounset); 6998 } 6999 7000 void 7001 vn_irflag_unset(struct vnode *vp, short tounset) 7002 { 7003 7004 VI_LOCK(vp); 7005 vn_irflag_unset_locked(vp, tounset); 7006 VI_UNLOCK(vp); 7007 } 7008