1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/capsicum.h> 54 #include <sys/condvar.h> 55 #include <sys/conf.h> 56 #include <sys/counter.h> 57 #include <sys/dirent.h> 58 #include <sys/event.h> 59 #include <sys/eventhandler.h> 60 #include <sys/extattr.h> 61 #include <sys/file.h> 62 #include <sys/fcntl.h> 63 #include <sys/jail.h> 64 #include <sys/kdb.h> 65 #include <sys/kernel.h> 66 #include <sys/kthread.h> 67 #include <sys/ktr.h> 68 #include <sys/lockf.h> 69 #include <sys/malloc.h> 70 #include <sys/mount.h> 71 #include <sys/namei.h> 72 #include <sys/pctrie.h> 73 #include <sys/priv.h> 74 #include <sys/reboot.h> 75 #include <sys/refcount.h> 76 #include <sys/rwlock.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smr.h> 80 #include <sys/smp.h> 81 #include <sys/stat.h> 82 #include <sys/sysctl.h> 83 #include <sys/syslog.h> 84 #include <sys/vmmeter.h> 85 #include <sys/vnode.h> 86 #include <sys/watchdog.h> 87 88 #include <machine/stdarg.h> 89 90 #include <security/mac/mac_framework.h> 91 92 #include <vm/vm.h> 93 #include <vm/vm_object.h> 94 #include <vm/vm_extern.h> 95 #include <vm/pmap.h> 96 #include <vm/vm_map.h> 97 #include <vm/vm_page.h> 98 #include <vm/vm_kern.h> 99 #include <vm/uma.h> 100 101 #ifdef DDB 102 #include <ddb/ddb.h> 103 #endif 104 105 static void delmntque(struct vnode *vp); 106 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 107 int slpflag, int slptimeo); 108 static void syncer_shutdown(void *arg, int howto); 109 static int vtryrecycle(struct vnode *vp); 110 static void v_init_counters(struct vnode *); 111 static void vgonel(struct vnode *); 112 static bool vhold_recycle_free(struct vnode *); 113 static void vfs_knllock(void *arg); 114 static void vfs_knlunlock(void *arg); 115 static void vfs_knl_assert_locked(void *arg); 116 static void vfs_knl_assert_unlocked(void *arg); 117 static void destroy_vpollinfo(struct vpollinfo *vi); 118 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 119 daddr_t startlbn, daddr_t endlbn); 120 static void vnlru_recalc(void); 121 122 /* 123 * These fences are intended for cases where some synchronization is 124 * needed between access of v_iflags and lockless vnode refcount (v_holdcnt 125 * and v_usecount) updates. Access to v_iflags is generally synchronized 126 * by the interlock, but we have some internal assertions that check vnode 127 * flags without acquiring the lock. Thus, these fences are INVARIANTS-only 128 * for now. 129 */ 130 #ifdef INVARIANTS 131 #define VNODE_REFCOUNT_FENCE_ACQ() atomic_thread_fence_acq() 132 #define VNODE_REFCOUNT_FENCE_REL() atomic_thread_fence_rel() 133 #else 134 #define VNODE_REFCOUNT_FENCE_ACQ() 135 #define VNODE_REFCOUNT_FENCE_REL() 136 #endif 137 138 /* 139 * Number of vnodes in existence. Increased whenever getnewvnode() 140 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 141 */ 142 static u_long __exclusive_cache_line numvnodes; 143 144 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 145 "Number of vnodes in existence"); 146 147 static counter_u64_t vnodes_created; 148 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 149 "Number of vnodes created by getnewvnode"); 150 151 /* 152 * Conversion tables for conversion from vnode types to inode formats 153 * and back. 154 */ 155 enum vtype iftovt_tab[16] = { 156 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 157 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 158 }; 159 int vttoif_tab[10] = { 160 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 161 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 162 }; 163 164 /* 165 * List of allocates vnodes in the system. 166 */ 167 static TAILQ_HEAD(freelst, vnode) vnode_list; 168 static struct vnode *vnode_list_free_marker; 169 static struct vnode *vnode_list_reclaim_marker; 170 171 /* 172 * "Free" vnode target. Free vnodes are rarely completely free, but are 173 * just ones that are cheap to recycle. Usually they are for files which 174 * have been stat'd but not read; these usually have inode and namecache 175 * data attached to them. This target is the preferred minimum size of a 176 * sub-cache consisting mostly of such files. The system balances the size 177 * of this sub-cache with its complement to try to prevent either from 178 * thrashing while the other is relatively inactive. The targets express 179 * a preference for the best balance. 180 * 181 * "Above" this target there are 2 further targets (watermarks) related 182 * to recyling of free vnodes. In the best-operating case, the cache is 183 * exactly full, the free list has size between vlowat and vhiwat above the 184 * free target, and recycling from it and normal use maintains this state. 185 * Sometimes the free list is below vlowat or even empty, but this state 186 * is even better for immediate use provided the cache is not full. 187 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 188 * ones) to reach one of these states. The watermarks are currently hard- 189 * coded as 4% and 9% of the available space higher. These and the default 190 * of 25% for wantfreevnodes are too large if the memory size is large. 191 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 192 * whenever vnlru_proc() becomes active. 193 */ 194 static long wantfreevnodes; 195 static long __exclusive_cache_line freevnodes; 196 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 197 &freevnodes, 0, "Number of \"free\" vnodes"); 198 static long freevnodes_old; 199 200 static counter_u64_t recycles_count; 201 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 202 "Number of vnodes recycled to meet vnode cache targets"); 203 204 static counter_u64_t recycles_free_count; 205 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 206 "Number of free vnodes recycled to meet vnode cache targets"); 207 208 static counter_u64_t deferred_inact; 209 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, &deferred_inact, 210 "Number of times inactive processing was deferred"); 211 212 /* To keep more than one thread at a time from running vfs_getnewfsid */ 213 static struct mtx mntid_mtx; 214 215 /* 216 * Lock for any access to the following: 217 * vnode_list 218 * numvnodes 219 * freevnodes 220 */ 221 static struct mtx __exclusive_cache_line vnode_list_mtx; 222 223 /* Publicly exported FS */ 224 struct nfs_public nfs_pub; 225 226 static uma_zone_t buf_trie_zone; 227 static smr_t buf_trie_smr; 228 229 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 230 static uma_zone_t vnode_zone; 231 static uma_zone_t vnodepoll_zone; 232 233 __read_frequently smr_t vfs_smr; 234 235 /* 236 * The workitem queue. 237 * 238 * It is useful to delay writes of file data and filesystem metadata 239 * for tens of seconds so that quickly created and deleted files need 240 * not waste disk bandwidth being created and removed. To realize this, 241 * we append vnodes to a "workitem" queue. When running with a soft 242 * updates implementation, most pending metadata dependencies should 243 * not wait for more than a few seconds. Thus, mounted on block devices 244 * are delayed only about a half the time that file data is delayed. 245 * Similarly, directory updates are more critical, so are only delayed 246 * about a third the time that file data is delayed. Thus, there are 247 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 248 * one each second (driven off the filesystem syncer process). The 249 * syncer_delayno variable indicates the next queue that is to be processed. 250 * Items that need to be processed soon are placed in this queue: 251 * 252 * syncer_workitem_pending[syncer_delayno] 253 * 254 * A delay of fifteen seconds is done by placing the request fifteen 255 * entries later in the queue: 256 * 257 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 258 * 259 */ 260 static int syncer_delayno; 261 static long syncer_mask; 262 LIST_HEAD(synclist, bufobj); 263 static struct synclist *syncer_workitem_pending; 264 /* 265 * The sync_mtx protects: 266 * bo->bo_synclist 267 * sync_vnode_count 268 * syncer_delayno 269 * syncer_state 270 * syncer_workitem_pending 271 * syncer_worklist_len 272 * rushjob 273 */ 274 static struct mtx sync_mtx; 275 static struct cv sync_wakeup; 276 277 #define SYNCER_MAXDELAY 32 278 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 279 static int syncdelay = 30; /* max time to delay syncing data */ 280 static int filedelay = 30; /* time to delay syncing files */ 281 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 282 "Time to delay syncing files (in seconds)"); 283 static int dirdelay = 29; /* time to delay syncing directories */ 284 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 285 "Time to delay syncing directories (in seconds)"); 286 static int metadelay = 28; /* time to delay syncing metadata */ 287 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 288 "Time to delay syncing metadata (in seconds)"); 289 static int rushjob; /* number of slots to run ASAP */ 290 static int stat_rush_requests; /* number of times I/O speeded up */ 291 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 292 "Number of times I/O speeded up (rush requests)"); 293 294 #define VDBATCH_SIZE 8 295 struct vdbatch { 296 u_int index; 297 long freevnodes; 298 struct mtx lock; 299 struct vnode *tab[VDBATCH_SIZE]; 300 }; 301 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 302 303 static void vdbatch_dequeue(struct vnode *vp); 304 305 /* 306 * When shutting down the syncer, run it at four times normal speed. 307 */ 308 #define SYNCER_SHUTDOWN_SPEEDUP 4 309 static int sync_vnode_count; 310 static int syncer_worklist_len; 311 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 312 syncer_state; 313 314 /* Target for maximum number of vnodes. */ 315 u_long desiredvnodes; 316 static u_long gapvnodes; /* gap between wanted and desired */ 317 static u_long vhiwat; /* enough extras after expansion */ 318 static u_long vlowat; /* minimal extras before expansion */ 319 static u_long vstir; /* nonzero to stir non-free vnodes */ 320 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 321 322 static u_long vnlru_read_freevnodes(void); 323 324 /* 325 * Note that no attempt is made to sanitize these parameters. 326 */ 327 static int 328 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 329 { 330 u_long val; 331 int error; 332 333 val = desiredvnodes; 334 error = sysctl_handle_long(oidp, &val, 0, req); 335 if (error != 0 || req->newptr == NULL) 336 return (error); 337 338 if (val == desiredvnodes) 339 return (0); 340 mtx_lock(&vnode_list_mtx); 341 desiredvnodes = val; 342 wantfreevnodes = desiredvnodes / 4; 343 vnlru_recalc(); 344 mtx_unlock(&vnode_list_mtx); 345 /* 346 * XXX There is no protection against multiple threads changing 347 * desiredvnodes at the same time. Locking above only helps vnlru and 348 * getnewvnode. 349 */ 350 vfs_hash_changesize(desiredvnodes); 351 cache_changesize(desiredvnodes); 352 return (0); 353 } 354 355 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 356 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 357 "LU", "Target for maximum number of vnodes"); 358 359 static int 360 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 361 { 362 u_long val; 363 int error; 364 365 val = wantfreevnodes; 366 error = sysctl_handle_long(oidp, &val, 0, req); 367 if (error != 0 || req->newptr == NULL) 368 return (error); 369 370 if (val == wantfreevnodes) 371 return (0); 372 mtx_lock(&vnode_list_mtx); 373 wantfreevnodes = val; 374 vnlru_recalc(); 375 mtx_unlock(&vnode_list_mtx); 376 return (0); 377 } 378 379 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 380 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 381 "LU", "Target for minimum number of \"free\" vnodes"); 382 383 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 384 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 385 static int vnlru_nowhere; 386 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 387 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 388 389 static int 390 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 391 { 392 struct vnode *vp; 393 struct nameidata nd; 394 char *buf; 395 unsigned long ndflags; 396 int error; 397 398 if (req->newptr == NULL) 399 return (EINVAL); 400 if (req->newlen >= PATH_MAX) 401 return (E2BIG); 402 403 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 404 error = SYSCTL_IN(req, buf, req->newlen); 405 if (error != 0) 406 goto out; 407 408 buf[req->newlen] = '\0'; 409 410 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1 | SAVENAME; 411 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf, curthread); 412 if ((error = namei(&nd)) != 0) 413 goto out; 414 vp = nd.ni_vp; 415 416 if (VN_IS_DOOMED(vp)) { 417 /* 418 * This vnode is being recycled. Return != 0 to let the caller 419 * know that the sysctl had no effect. Return EAGAIN because a 420 * subsequent call will likely succeed (since namei will create 421 * a new vnode if necessary) 422 */ 423 error = EAGAIN; 424 goto putvnode; 425 } 426 427 counter_u64_add(recycles_count, 1); 428 vgone(vp); 429 putvnode: 430 NDFREE(&nd, 0); 431 out: 432 free(buf, M_TEMP); 433 return (error); 434 } 435 436 static int 437 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 438 { 439 struct thread *td = curthread; 440 struct vnode *vp; 441 struct file *fp; 442 int error; 443 int fd; 444 445 if (req->newptr == NULL) 446 return (EBADF); 447 448 error = sysctl_handle_int(oidp, &fd, 0, req); 449 if (error != 0) 450 return (error); 451 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 452 if (error != 0) 453 return (error); 454 vp = fp->f_vnode; 455 456 error = vn_lock(vp, LK_EXCLUSIVE); 457 if (error != 0) 458 goto drop; 459 460 counter_u64_add(recycles_count, 1); 461 vgone(vp); 462 VOP_UNLOCK(vp); 463 drop: 464 fdrop(fp, td); 465 return (error); 466 } 467 468 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 469 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 470 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 471 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 472 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 473 sysctl_ftry_reclaim_vnode, "I", 474 "Try to reclaim a vnode by its file descriptor"); 475 476 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 477 static int vnsz2log; 478 479 /* 480 * Support for the bufobj clean & dirty pctrie. 481 */ 482 static void * 483 buf_trie_alloc(struct pctrie *ptree) 484 { 485 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 486 } 487 488 static void 489 buf_trie_free(struct pctrie *ptree, void *node) 490 { 491 uma_zfree_smr(buf_trie_zone, node); 492 } 493 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 494 buf_trie_smr); 495 496 /* 497 * Initialize the vnode management data structures. 498 * 499 * Reevaluate the following cap on the number of vnodes after the physical 500 * memory size exceeds 512GB. In the limit, as the physical memory size 501 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 502 */ 503 #ifndef MAXVNODES_MAX 504 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 505 #endif 506 507 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 508 509 static struct vnode * 510 vn_alloc_marker(struct mount *mp) 511 { 512 struct vnode *vp; 513 514 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 515 vp->v_type = VMARKER; 516 vp->v_mount = mp; 517 518 return (vp); 519 } 520 521 static void 522 vn_free_marker(struct vnode *vp) 523 { 524 525 MPASS(vp->v_type == VMARKER); 526 free(vp, M_VNODE_MARKER); 527 } 528 529 /* 530 * Initialize a vnode as it first enters the zone. 531 */ 532 static int 533 vnode_init(void *mem, int size, int flags) 534 { 535 struct vnode *vp; 536 537 vp = mem; 538 bzero(vp, size); 539 /* 540 * Setup locks. 541 */ 542 vp->v_vnlock = &vp->v_lock; 543 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 544 /* 545 * By default, don't allow shared locks unless filesystems opt-in. 546 */ 547 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 548 LK_NOSHARE | LK_IS_VNODE); 549 /* 550 * Initialize bufobj. 551 */ 552 bufobj_init(&vp->v_bufobj, vp); 553 /* 554 * Initialize namecache. 555 */ 556 cache_vnode_init(vp); 557 /* 558 * Initialize rangelocks. 559 */ 560 rangelock_init(&vp->v_rl); 561 562 vp->v_dbatchcpu = NOCPU; 563 564 /* 565 * Check vhold_recycle_free for an explanation. 566 */ 567 vp->v_holdcnt = VHOLD_NO_SMR; 568 vp->v_type = VNON; 569 mtx_lock(&vnode_list_mtx); 570 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 571 mtx_unlock(&vnode_list_mtx); 572 return (0); 573 } 574 575 /* 576 * Free a vnode when it is cleared from the zone. 577 */ 578 static void 579 vnode_fini(void *mem, int size) 580 { 581 struct vnode *vp; 582 struct bufobj *bo; 583 584 vp = mem; 585 vdbatch_dequeue(vp); 586 mtx_lock(&vnode_list_mtx); 587 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 588 mtx_unlock(&vnode_list_mtx); 589 rangelock_destroy(&vp->v_rl); 590 lockdestroy(vp->v_vnlock); 591 mtx_destroy(&vp->v_interlock); 592 bo = &vp->v_bufobj; 593 rw_destroy(BO_LOCKPTR(bo)); 594 } 595 596 /* 597 * Provide the size of NFS nclnode and NFS fh for calculation of the 598 * vnode memory consumption. The size is specified directly to 599 * eliminate dependency on NFS-private header. 600 * 601 * Other filesystems may use bigger or smaller (like UFS and ZFS) 602 * private inode data, but the NFS-based estimation is ample enough. 603 * Still, we care about differences in the size between 64- and 32-bit 604 * platforms. 605 * 606 * Namecache structure size is heuristically 607 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 608 */ 609 #ifdef _LP64 610 #define NFS_NCLNODE_SZ (528 + 64) 611 #define NC_SZ 148 612 #else 613 #define NFS_NCLNODE_SZ (360 + 32) 614 #define NC_SZ 92 615 #endif 616 617 static void 618 vntblinit(void *dummy __unused) 619 { 620 struct vdbatch *vd; 621 int cpu, physvnodes, virtvnodes; 622 u_int i; 623 624 /* 625 * Desiredvnodes is a function of the physical memory size and the 626 * kernel's heap size. Generally speaking, it scales with the 627 * physical memory size. The ratio of desiredvnodes to the physical 628 * memory size is 1:16 until desiredvnodes exceeds 98,304. 629 * Thereafter, the 630 * marginal ratio of desiredvnodes to the physical memory size is 631 * 1:64. However, desiredvnodes is limited by the kernel's heap 632 * size. The memory required by desiredvnodes vnodes and vm objects 633 * must not exceed 1/10th of the kernel's heap size. 634 */ 635 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 636 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 637 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 638 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 639 desiredvnodes = min(physvnodes, virtvnodes); 640 if (desiredvnodes > MAXVNODES_MAX) { 641 if (bootverbose) 642 printf("Reducing kern.maxvnodes %lu -> %lu\n", 643 desiredvnodes, MAXVNODES_MAX); 644 desiredvnodes = MAXVNODES_MAX; 645 } 646 wantfreevnodes = desiredvnodes / 4; 647 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 648 TAILQ_INIT(&vnode_list); 649 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 650 /* 651 * The lock is taken to appease WITNESS. 652 */ 653 mtx_lock(&vnode_list_mtx); 654 vnlru_recalc(); 655 mtx_unlock(&vnode_list_mtx); 656 vnode_list_free_marker = vn_alloc_marker(NULL); 657 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 658 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 659 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 660 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 661 vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); 662 uma_zone_set_smr(vnode_zone, vfs_smr); 663 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 664 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 665 /* 666 * Preallocate enough nodes to support one-per buf so that 667 * we can not fail an insert. reassignbuf() callers can not 668 * tolerate the insertion failure. 669 */ 670 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 671 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 672 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 673 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 674 uma_prealloc(buf_trie_zone, nbuf); 675 676 vnodes_created = counter_u64_alloc(M_WAITOK); 677 recycles_count = counter_u64_alloc(M_WAITOK); 678 recycles_free_count = counter_u64_alloc(M_WAITOK); 679 deferred_inact = counter_u64_alloc(M_WAITOK); 680 681 /* 682 * Initialize the filesystem syncer. 683 */ 684 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 685 &syncer_mask); 686 syncer_maxdelay = syncer_mask + 1; 687 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 688 cv_init(&sync_wakeup, "syncer"); 689 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 690 vnsz2log++; 691 vnsz2log--; 692 693 CPU_FOREACH(cpu) { 694 vd = DPCPU_ID_PTR((cpu), vd); 695 bzero(vd, sizeof(*vd)); 696 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 697 } 698 } 699 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 700 701 /* 702 * Mark a mount point as busy. Used to synchronize access and to delay 703 * unmounting. Eventually, mountlist_mtx is not released on failure. 704 * 705 * vfs_busy() is a custom lock, it can block the caller. 706 * vfs_busy() only sleeps if the unmount is active on the mount point. 707 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 708 * vnode belonging to mp. 709 * 710 * Lookup uses vfs_busy() to traverse mount points. 711 * root fs var fs 712 * / vnode lock A / vnode lock (/var) D 713 * /var vnode lock B /log vnode lock(/var/log) E 714 * vfs_busy lock C vfs_busy lock F 715 * 716 * Within each file system, the lock order is C->A->B and F->D->E. 717 * 718 * When traversing across mounts, the system follows that lock order: 719 * 720 * C->A->B 721 * | 722 * +->F->D->E 723 * 724 * The lookup() process for namei("/var") illustrates the process: 725 * VOP_LOOKUP() obtains B while A is held 726 * vfs_busy() obtains a shared lock on F while A and B are held 727 * vput() releases lock on B 728 * vput() releases lock on A 729 * VFS_ROOT() obtains lock on D while shared lock on F is held 730 * vfs_unbusy() releases shared lock on F 731 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 732 * Attempt to lock A (instead of vp_crossmp) while D is held would 733 * violate the global order, causing deadlocks. 734 * 735 * dounmount() locks B while F is drained. 736 */ 737 int 738 vfs_busy(struct mount *mp, int flags) 739 { 740 741 MPASS((flags & ~MBF_MASK) == 0); 742 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 743 744 if (vfs_op_thread_enter(mp)) { 745 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 746 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 747 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 748 vfs_mp_count_add_pcpu(mp, ref, 1); 749 vfs_mp_count_add_pcpu(mp, lockref, 1); 750 vfs_op_thread_exit(mp); 751 if (flags & MBF_MNTLSTLOCK) 752 mtx_unlock(&mountlist_mtx); 753 return (0); 754 } 755 756 MNT_ILOCK(mp); 757 vfs_assert_mount_counters(mp); 758 MNT_REF(mp); 759 /* 760 * If mount point is currently being unmounted, sleep until the 761 * mount point fate is decided. If thread doing the unmounting fails, 762 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 763 * that this mount point has survived the unmount attempt and vfs_busy 764 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 765 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 766 * about to be really destroyed. vfs_busy needs to release its 767 * reference on the mount point in this case and return with ENOENT, 768 * telling the caller that mount mount it tried to busy is no longer 769 * valid. 770 */ 771 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 772 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 773 MNT_REL(mp); 774 MNT_IUNLOCK(mp); 775 CTR1(KTR_VFS, "%s: failed busying before sleeping", 776 __func__); 777 return (ENOENT); 778 } 779 if (flags & MBF_MNTLSTLOCK) 780 mtx_unlock(&mountlist_mtx); 781 mp->mnt_kern_flag |= MNTK_MWAIT; 782 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 783 if (flags & MBF_MNTLSTLOCK) 784 mtx_lock(&mountlist_mtx); 785 MNT_ILOCK(mp); 786 } 787 if (flags & MBF_MNTLSTLOCK) 788 mtx_unlock(&mountlist_mtx); 789 mp->mnt_lockref++; 790 MNT_IUNLOCK(mp); 791 return (0); 792 } 793 794 /* 795 * Free a busy filesystem. 796 */ 797 void 798 vfs_unbusy(struct mount *mp) 799 { 800 int c; 801 802 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 803 804 if (vfs_op_thread_enter(mp)) { 805 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 806 vfs_mp_count_sub_pcpu(mp, lockref, 1); 807 vfs_mp_count_sub_pcpu(mp, ref, 1); 808 vfs_op_thread_exit(mp); 809 return; 810 } 811 812 MNT_ILOCK(mp); 813 vfs_assert_mount_counters(mp); 814 MNT_REL(mp); 815 c = --mp->mnt_lockref; 816 if (mp->mnt_vfs_ops == 0) { 817 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 818 MNT_IUNLOCK(mp); 819 return; 820 } 821 if (c < 0) 822 vfs_dump_mount_counters(mp); 823 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 824 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 825 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 826 mp->mnt_kern_flag &= ~MNTK_DRAINING; 827 wakeup(&mp->mnt_lockref); 828 } 829 MNT_IUNLOCK(mp); 830 } 831 832 /* 833 * Lookup a mount point by filesystem identifier. 834 */ 835 struct mount * 836 vfs_getvfs(fsid_t *fsid) 837 { 838 struct mount *mp; 839 840 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 841 mtx_lock(&mountlist_mtx); 842 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 843 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 844 vfs_ref(mp); 845 mtx_unlock(&mountlist_mtx); 846 return (mp); 847 } 848 } 849 mtx_unlock(&mountlist_mtx); 850 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 851 return ((struct mount *) 0); 852 } 853 854 /* 855 * Lookup a mount point by filesystem identifier, busying it before 856 * returning. 857 * 858 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 859 * cache for popular filesystem identifiers. The cache is lockess, using 860 * the fact that struct mount's are never freed. In worst case we may 861 * get pointer to unmounted or even different filesystem, so we have to 862 * check what we got, and go slow way if so. 863 */ 864 struct mount * 865 vfs_busyfs(fsid_t *fsid) 866 { 867 #define FSID_CACHE_SIZE 256 868 typedef struct mount * volatile vmp_t; 869 static vmp_t cache[FSID_CACHE_SIZE]; 870 struct mount *mp; 871 int error; 872 uint32_t hash; 873 874 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 875 hash = fsid->val[0] ^ fsid->val[1]; 876 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 877 mp = cache[hash]; 878 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 879 goto slow; 880 if (vfs_busy(mp, 0) != 0) { 881 cache[hash] = NULL; 882 goto slow; 883 } 884 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 885 return (mp); 886 else 887 vfs_unbusy(mp); 888 889 slow: 890 mtx_lock(&mountlist_mtx); 891 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 892 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 893 error = vfs_busy(mp, MBF_MNTLSTLOCK); 894 if (error) { 895 cache[hash] = NULL; 896 mtx_unlock(&mountlist_mtx); 897 return (NULL); 898 } 899 cache[hash] = mp; 900 return (mp); 901 } 902 } 903 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 904 mtx_unlock(&mountlist_mtx); 905 return ((struct mount *) 0); 906 } 907 908 /* 909 * Check if a user can access privileged mount options. 910 */ 911 int 912 vfs_suser(struct mount *mp, struct thread *td) 913 { 914 int error; 915 916 if (jailed(td->td_ucred)) { 917 /* 918 * If the jail of the calling thread lacks permission for 919 * this type of file system, deny immediately. 920 */ 921 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 922 return (EPERM); 923 924 /* 925 * If the file system was mounted outside the jail of the 926 * calling thread, deny immediately. 927 */ 928 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 929 return (EPERM); 930 } 931 932 /* 933 * If file system supports delegated administration, we don't check 934 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 935 * by the file system itself. 936 * If this is not the user that did original mount, we check for 937 * the PRIV_VFS_MOUNT_OWNER privilege. 938 */ 939 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 940 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 941 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 942 return (error); 943 } 944 return (0); 945 } 946 947 /* 948 * Get a new unique fsid. Try to make its val[0] unique, since this value 949 * will be used to create fake device numbers for stat(). Also try (but 950 * not so hard) make its val[0] unique mod 2^16, since some emulators only 951 * support 16-bit device numbers. We end up with unique val[0]'s for the 952 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 953 * 954 * Keep in mind that several mounts may be running in parallel. Starting 955 * the search one past where the previous search terminated is both a 956 * micro-optimization and a defense against returning the same fsid to 957 * different mounts. 958 */ 959 void 960 vfs_getnewfsid(struct mount *mp) 961 { 962 static uint16_t mntid_base; 963 struct mount *nmp; 964 fsid_t tfsid; 965 int mtype; 966 967 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 968 mtx_lock(&mntid_mtx); 969 mtype = mp->mnt_vfc->vfc_typenum; 970 tfsid.val[1] = mtype; 971 mtype = (mtype & 0xFF) << 24; 972 for (;;) { 973 tfsid.val[0] = makedev(255, 974 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 975 mntid_base++; 976 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 977 break; 978 vfs_rel(nmp); 979 } 980 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 981 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 982 mtx_unlock(&mntid_mtx); 983 } 984 985 /* 986 * Knob to control the precision of file timestamps: 987 * 988 * 0 = seconds only; nanoseconds zeroed. 989 * 1 = seconds and nanoseconds, accurate within 1/HZ. 990 * 2 = seconds and nanoseconds, truncated to microseconds. 991 * >=3 = seconds and nanoseconds, maximum precision. 992 */ 993 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 994 995 static int timestamp_precision = TSP_USEC; 996 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 997 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 998 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 999 "3+: sec + ns (max. precision))"); 1000 1001 /* 1002 * Get a current timestamp. 1003 */ 1004 void 1005 vfs_timestamp(struct timespec *tsp) 1006 { 1007 struct timeval tv; 1008 1009 switch (timestamp_precision) { 1010 case TSP_SEC: 1011 tsp->tv_sec = time_second; 1012 tsp->tv_nsec = 0; 1013 break; 1014 case TSP_HZ: 1015 getnanotime(tsp); 1016 break; 1017 case TSP_USEC: 1018 microtime(&tv); 1019 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1020 break; 1021 case TSP_NSEC: 1022 default: 1023 nanotime(tsp); 1024 break; 1025 } 1026 } 1027 1028 /* 1029 * Set vnode attributes to VNOVAL 1030 */ 1031 void 1032 vattr_null(struct vattr *vap) 1033 { 1034 1035 vap->va_type = VNON; 1036 vap->va_size = VNOVAL; 1037 vap->va_bytes = VNOVAL; 1038 vap->va_mode = VNOVAL; 1039 vap->va_nlink = VNOVAL; 1040 vap->va_uid = VNOVAL; 1041 vap->va_gid = VNOVAL; 1042 vap->va_fsid = VNOVAL; 1043 vap->va_fileid = VNOVAL; 1044 vap->va_blocksize = VNOVAL; 1045 vap->va_rdev = VNOVAL; 1046 vap->va_atime.tv_sec = VNOVAL; 1047 vap->va_atime.tv_nsec = VNOVAL; 1048 vap->va_mtime.tv_sec = VNOVAL; 1049 vap->va_mtime.tv_nsec = VNOVAL; 1050 vap->va_ctime.tv_sec = VNOVAL; 1051 vap->va_ctime.tv_nsec = VNOVAL; 1052 vap->va_birthtime.tv_sec = VNOVAL; 1053 vap->va_birthtime.tv_nsec = VNOVAL; 1054 vap->va_flags = VNOVAL; 1055 vap->va_gen = VNOVAL; 1056 vap->va_vaflags = 0; 1057 } 1058 1059 /* 1060 * Try to reduce the total number of vnodes. 1061 * 1062 * This routine (and its user) are buggy in at least the following ways: 1063 * - all parameters were picked years ago when RAM sizes were significantly 1064 * smaller 1065 * - it can pick vnodes based on pages used by the vm object, but filesystems 1066 * like ZFS don't use it making the pick broken 1067 * - since ZFS has its own aging policy it gets partially combated by this one 1068 * - a dedicated method should be provided for filesystems to let them decide 1069 * whether the vnode should be recycled 1070 * 1071 * This routine is called when we have too many vnodes. It attempts 1072 * to free <count> vnodes and will potentially free vnodes that still 1073 * have VM backing store (VM backing store is typically the cause 1074 * of a vnode blowout so we want to do this). Therefore, this operation 1075 * is not considered cheap. 1076 * 1077 * A number of conditions may prevent a vnode from being reclaimed. 1078 * the buffer cache may have references on the vnode, a directory 1079 * vnode may still have references due to the namei cache representing 1080 * underlying files, or the vnode may be in active use. It is not 1081 * desirable to reuse such vnodes. These conditions may cause the 1082 * number of vnodes to reach some minimum value regardless of what 1083 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1084 * 1085 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1086 * entries if this argument is strue 1087 * @param trigger Only reclaim vnodes with fewer than this many resident 1088 * pages. 1089 * @param target How many vnodes to reclaim. 1090 * @return The number of vnodes that were reclaimed. 1091 */ 1092 static int 1093 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1094 { 1095 struct vnode *vp, *mvp; 1096 struct mount *mp; 1097 struct vm_object *object; 1098 u_long done; 1099 bool retried; 1100 1101 mtx_assert(&vnode_list_mtx, MA_OWNED); 1102 1103 retried = false; 1104 done = 0; 1105 1106 mvp = vnode_list_reclaim_marker; 1107 restart: 1108 vp = mvp; 1109 while (done < target) { 1110 vp = TAILQ_NEXT(vp, v_vnodelist); 1111 if (__predict_false(vp == NULL)) 1112 break; 1113 1114 if (__predict_false(vp->v_type == VMARKER)) 1115 continue; 1116 1117 /* 1118 * If it's been deconstructed already, it's still 1119 * referenced, or it exceeds the trigger, skip it. 1120 * Also skip free vnodes. We are trying to make space 1121 * to expand the free list, not reduce it. 1122 */ 1123 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1124 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1125 goto next_iter; 1126 1127 if (vp->v_type == VBAD || vp->v_type == VNON) 1128 goto next_iter; 1129 1130 object = atomic_load_ptr(&vp->v_object); 1131 if (object == NULL || object->resident_page_count > trigger) { 1132 goto next_iter; 1133 } 1134 1135 /* 1136 * Handle races against vnode allocation. Filesystems lock the 1137 * vnode some time after it gets returned from getnewvnode, 1138 * despite type and hold count being manipulated earlier. 1139 * Resorting to checking v_mount restores guarantees present 1140 * before the global list was reworked to contain all vnodes. 1141 */ 1142 if (!VI_TRYLOCK(vp)) 1143 goto next_iter; 1144 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1145 VI_UNLOCK(vp); 1146 goto next_iter; 1147 } 1148 if (vp->v_mount == NULL) { 1149 VI_UNLOCK(vp); 1150 goto next_iter; 1151 } 1152 vholdl(vp); 1153 VI_UNLOCK(vp); 1154 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1155 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1156 mtx_unlock(&vnode_list_mtx); 1157 1158 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1159 vdrop(vp); 1160 goto next_iter_unlocked; 1161 } 1162 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1163 vdrop(vp); 1164 vn_finished_write(mp); 1165 goto next_iter_unlocked; 1166 } 1167 1168 VI_LOCK(vp); 1169 if (vp->v_usecount > 0 || 1170 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1171 (vp->v_object != NULL && 1172 vp->v_object->resident_page_count > trigger)) { 1173 VOP_UNLOCK(vp); 1174 vdropl(vp); 1175 vn_finished_write(mp); 1176 goto next_iter_unlocked; 1177 } 1178 counter_u64_add(recycles_count, 1); 1179 vgonel(vp); 1180 VOP_UNLOCK(vp); 1181 vdropl(vp); 1182 vn_finished_write(mp); 1183 done++; 1184 next_iter_unlocked: 1185 if (should_yield()) 1186 kern_yield(PRI_USER); 1187 mtx_lock(&vnode_list_mtx); 1188 goto restart; 1189 next_iter: 1190 MPASS(vp->v_type != VMARKER); 1191 if (!should_yield()) 1192 continue; 1193 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1194 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1195 mtx_unlock(&vnode_list_mtx); 1196 kern_yield(PRI_USER); 1197 mtx_lock(&vnode_list_mtx); 1198 goto restart; 1199 } 1200 if (done == 0 && !retried) { 1201 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1202 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1203 retried = true; 1204 goto restart; 1205 } 1206 return (done); 1207 } 1208 1209 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1210 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1211 0, 1212 "limit on vnode free requests per call to the vnlru_free routine"); 1213 1214 /* 1215 * Attempt to reduce the free list by the requested amount. 1216 */ 1217 static int 1218 vnlru_free_locked(int count, struct vfsops *mnt_op) 1219 { 1220 struct vnode *vp, *mvp; 1221 struct mount *mp; 1222 int ocount; 1223 1224 mtx_assert(&vnode_list_mtx, MA_OWNED); 1225 if (count > max_vnlru_free) 1226 count = max_vnlru_free; 1227 ocount = count; 1228 mvp = vnode_list_free_marker; 1229 vp = mvp; 1230 for (;;) { 1231 if (count == 0) { 1232 break; 1233 } 1234 vp = TAILQ_NEXT(vp, v_vnodelist); 1235 if (__predict_false(vp == NULL)) { 1236 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1237 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1238 break; 1239 } 1240 if (__predict_false(vp->v_type == VMARKER)) 1241 continue; 1242 if (vp->v_holdcnt > 0) 1243 continue; 1244 /* 1245 * Don't recycle if our vnode is from different type 1246 * of mount point. Note that mp is type-safe, the 1247 * check does not reach unmapped address even if 1248 * vnode is reclaimed. 1249 */ 1250 if (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1251 mp->mnt_op != mnt_op) { 1252 continue; 1253 } 1254 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1255 continue; 1256 } 1257 if (!vhold_recycle_free(vp)) 1258 continue; 1259 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1260 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1261 mtx_unlock(&vnode_list_mtx); 1262 if (vtryrecycle(vp) == 0) 1263 count--; 1264 mtx_lock(&vnode_list_mtx); 1265 vp = mvp; 1266 } 1267 return (ocount - count); 1268 } 1269 1270 void 1271 vnlru_free(int count, struct vfsops *mnt_op) 1272 { 1273 1274 mtx_lock(&vnode_list_mtx); 1275 vnlru_free_locked(count, mnt_op); 1276 mtx_unlock(&vnode_list_mtx); 1277 } 1278 1279 static void 1280 vnlru_recalc(void) 1281 { 1282 1283 mtx_assert(&vnode_list_mtx, MA_OWNED); 1284 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1285 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1286 vlowat = vhiwat / 2; 1287 } 1288 1289 /* 1290 * Attempt to recycle vnodes in a context that is always safe to block. 1291 * Calling vlrurecycle() from the bowels of filesystem code has some 1292 * interesting deadlock problems. 1293 */ 1294 static struct proc *vnlruproc; 1295 static int vnlruproc_sig; 1296 1297 /* 1298 * The main freevnodes counter is only updated when threads requeue their vnode 1299 * batches. CPUs are conditionally walked to compute a more accurate total. 1300 * 1301 * Limit how much of a slop are we willing to tolerate. Note: the actual value 1302 * at any given moment can still exceed slop, but it should not be by significant 1303 * margin in practice. 1304 */ 1305 #define VNLRU_FREEVNODES_SLOP 128 1306 1307 static __inline void 1308 vn_freevnodes_inc(void) 1309 { 1310 struct vdbatch *vd; 1311 1312 critical_enter(); 1313 vd = DPCPU_PTR(vd); 1314 vd->freevnodes++; 1315 critical_exit(); 1316 } 1317 1318 static __inline void 1319 vn_freevnodes_dec(void) 1320 { 1321 struct vdbatch *vd; 1322 1323 critical_enter(); 1324 vd = DPCPU_PTR(vd); 1325 vd->freevnodes--; 1326 critical_exit(); 1327 } 1328 1329 static u_long 1330 vnlru_read_freevnodes(void) 1331 { 1332 struct vdbatch *vd; 1333 long slop; 1334 int cpu; 1335 1336 mtx_assert(&vnode_list_mtx, MA_OWNED); 1337 if (freevnodes > freevnodes_old) 1338 slop = freevnodes - freevnodes_old; 1339 else 1340 slop = freevnodes_old - freevnodes; 1341 if (slop < VNLRU_FREEVNODES_SLOP) 1342 return (freevnodes >= 0 ? freevnodes : 0); 1343 freevnodes_old = freevnodes; 1344 CPU_FOREACH(cpu) { 1345 vd = DPCPU_ID_PTR((cpu), vd); 1346 freevnodes_old += vd->freevnodes; 1347 } 1348 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1349 } 1350 1351 static bool 1352 vnlru_under(u_long rnumvnodes, u_long limit) 1353 { 1354 u_long rfreevnodes, space; 1355 1356 if (__predict_false(rnumvnodes > desiredvnodes)) 1357 return (true); 1358 1359 space = desiredvnodes - rnumvnodes; 1360 if (space < limit) { 1361 rfreevnodes = vnlru_read_freevnodes(); 1362 if (rfreevnodes > wantfreevnodes) 1363 space += rfreevnodes - wantfreevnodes; 1364 } 1365 return (space < limit); 1366 } 1367 1368 static bool 1369 vnlru_under_unlocked(u_long rnumvnodes, u_long limit) 1370 { 1371 long rfreevnodes, space; 1372 1373 if (__predict_false(rnumvnodes > desiredvnodes)) 1374 return (true); 1375 1376 space = desiredvnodes - rnumvnodes; 1377 if (space < limit) { 1378 rfreevnodes = atomic_load_long(&freevnodes); 1379 if (rfreevnodes > wantfreevnodes) 1380 space += rfreevnodes - wantfreevnodes; 1381 } 1382 return (space < limit); 1383 } 1384 1385 static void 1386 vnlru_kick(void) 1387 { 1388 1389 mtx_assert(&vnode_list_mtx, MA_OWNED); 1390 if (vnlruproc_sig == 0) { 1391 vnlruproc_sig = 1; 1392 wakeup(vnlruproc); 1393 } 1394 } 1395 1396 static void 1397 vnlru_proc(void) 1398 { 1399 u_long rnumvnodes, rfreevnodes, target; 1400 unsigned long onumvnodes; 1401 int done, force, trigger, usevnodes; 1402 bool reclaim_nc_src, want_reread; 1403 1404 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1405 SHUTDOWN_PRI_FIRST); 1406 1407 force = 0; 1408 want_reread = false; 1409 for (;;) { 1410 kproc_suspend_check(vnlruproc); 1411 mtx_lock(&vnode_list_mtx); 1412 rnumvnodes = atomic_load_long(&numvnodes); 1413 1414 if (want_reread) { 1415 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1416 want_reread = false; 1417 } 1418 1419 /* 1420 * If numvnodes is too large (due to desiredvnodes being 1421 * adjusted using its sysctl, or emergency growth), first 1422 * try to reduce it by discarding from the free list. 1423 */ 1424 if (rnumvnodes > desiredvnodes) { 1425 vnlru_free_locked(rnumvnodes - desiredvnodes, NULL); 1426 rnumvnodes = atomic_load_long(&numvnodes); 1427 } 1428 /* 1429 * Sleep if the vnode cache is in a good state. This is 1430 * when it is not over-full and has space for about a 4% 1431 * or 9% expansion (by growing its size or inexcessively 1432 * reducing its free list). Otherwise, try to reclaim 1433 * space for a 10% expansion. 1434 */ 1435 if (vstir && force == 0) { 1436 force = 1; 1437 vstir = 0; 1438 } 1439 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1440 vnlruproc_sig = 0; 1441 wakeup(&vnlruproc_sig); 1442 msleep(vnlruproc, &vnode_list_mtx, 1443 PVFS|PDROP, "vlruwt", hz); 1444 continue; 1445 } 1446 rfreevnodes = vnlru_read_freevnodes(); 1447 1448 onumvnodes = rnumvnodes; 1449 /* 1450 * Calculate parameters for recycling. These are the same 1451 * throughout the loop to give some semblance of fairness. 1452 * The trigger point is to avoid recycling vnodes with lots 1453 * of resident pages. We aren't trying to free memory; we 1454 * are trying to recycle or at least free vnodes. 1455 */ 1456 if (rnumvnodes <= desiredvnodes) 1457 usevnodes = rnumvnodes - rfreevnodes; 1458 else 1459 usevnodes = rnumvnodes; 1460 if (usevnodes <= 0) 1461 usevnodes = 1; 1462 /* 1463 * The trigger value is is chosen to give a conservatively 1464 * large value to ensure that it alone doesn't prevent 1465 * making progress. The value can easily be so large that 1466 * it is effectively infinite in some congested and 1467 * misconfigured cases, and this is necessary. Normally 1468 * it is about 8 to 100 (pages), which is quite large. 1469 */ 1470 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1471 if (force < 2) 1472 trigger = vsmalltrigger; 1473 reclaim_nc_src = force >= 3; 1474 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1475 target = target / 10 + 1; 1476 done = vlrureclaim(reclaim_nc_src, trigger, target); 1477 mtx_unlock(&vnode_list_mtx); 1478 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1479 uma_reclaim(UMA_RECLAIM_DRAIN); 1480 if (done == 0) { 1481 if (force == 0 || force == 1) { 1482 force = 2; 1483 continue; 1484 } 1485 if (force == 2) { 1486 force = 3; 1487 continue; 1488 } 1489 want_reread = true; 1490 force = 0; 1491 vnlru_nowhere++; 1492 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1493 } else { 1494 want_reread = true; 1495 kern_yield(PRI_USER); 1496 } 1497 } 1498 } 1499 1500 static struct kproc_desc vnlru_kp = { 1501 "vnlru", 1502 vnlru_proc, 1503 &vnlruproc 1504 }; 1505 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1506 &vnlru_kp); 1507 1508 /* 1509 * Routines having to do with the management of the vnode table. 1510 */ 1511 1512 /* 1513 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1514 * before we actually vgone(). This function must be called with the vnode 1515 * held to prevent the vnode from being returned to the free list midway 1516 * through vgone(). 1517 */ 1518 static int 1519 vtryrecycle(struct vnode *vp) 1520 { 1521 struct mount *vnmp; 1522 1523 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1524 VNASSERT(vp->v_holdcnt, vp, 1525 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1526 /* 1527 * This vnode may found and locked via some other list, if so we 1528 * can't recycle it yet. 1529 */ 1530 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1531 CTR2(KTR_VFS, 1532 "%s: impossible to recycle, vp %p lock is already held", 1533 __func__, vp); 1534 vdrop(vp); 1535 return (EWOULDBLOCK); 1536 } 1537 /* 1538 * Don't recycle if its filesystem is being suspended. 1539 */ 1540 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1541 VOP_UNLOCK(vp); 1542 CTR2(KTR_VFS, 1543 "%s: impossible to recycle, cannot start the write for %p", 1544 __func__, vp); 1545 vdrop(vp); 1546 return (EBUSY); 1547 } 1548 /* 1549 * If we got this far, we need to acquire the interlock and see if 1550 * anyone picked up this vnode from another list. If not, we will 1551 * mark it with DOOMED via vgonel() so that anyone who does find it 1552 * will skip over it. 1553 */ 1554 VI_LOCK(vp); 1555 if (vp->v_usecount) { 1556 VOP_UNLOCK(vp); 1557 vdropl(vp); 1558 vn_finished_write(vnmp); 1559 CTR2(KTR_VFS, 1560 "%s: impossible to recycle, %p is already referenced", 1561 __func__, vp); 1562 return (EBUSY); 1563 } 1564 if (!VN_IS_DOOMED(vp)) { 1565 counter_u64_add(recycles_free_count, 1); 1566 vgonel(vp); 1567 } 1568 VOP_UNLOCK(vp); 1569 vdropl(vp); 1570 vn_finished_write(vnmp); 1571 return (0); 1572 } 1573 1574 /* 1575 * Allocate a new vnode. 1576 * 1577 * The operation never returns an error. Returning an error was disabled 1578 * in r145385 (dated 2005) with the following comment: 1579 * 1580 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1581 * 1582 * Given the age of this commit (almost 15 years at the time of writing this 1583 * comment) restoring the ability to fail requires a significant audit of 1584 * all codepaths. 1585 * 1586 * The routine can try to free a vnode or stall for up to 1 second waiting for 1587 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1588 */ 1589 static u_long vn_alloc_cyclecount; 1590 1591 static struct vnode * __noinline 1592 vn_alloc_hard(struct mount *mp) 1593 { 1594 u_long rnumvnodes, rfreevnodes; 1595 1596 mtx_lock(&vnode_list_mtx); 1597 rnumvnodes = atomic_load_long(&numvnodes); 1598 if (rnumvnodes + 1 < desiredvnodes) { 1599 vn_alloc_cyclecount = 0; 1600 goto alloc; 1601 } 1602 rfreevnodes = vnlru_read_freevnodes(); 1603 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1604 vn_alloc_cyclecount = 0; 1605 vstir = 1; 1606 } 1607 /* 1608 * Grow the vnode cache if it will not be above its target max 1609 * after growing. Otherwise, if the free list is nonempty, try 1610 * to reclaim 1 item from it before growing the cache (possibly 1611 * above its target max if the reclamation failed or is delayed). 1612 * Otherwise, wait for some space. In all cases, schedule 1613 * vnlru_proc() if we are getting short of space. The watermarks 1614 * should be chosen so that we never wait or even reclaim from 1615 * the free list to below its target minimum. 1616 */ 1617 if (vnlru_free_locked(1, NULL) > 0) 1618 goto alloc; 1619 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1620 /* 1621 * Wait for space for a new vnode. 1622 */ 1623 vnlru_kick(); 1624 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 1625 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 1626 vnlru_read_freevnodes() > 1) 1627 vnlru_free_locked(1, NULL); 1628 } 1629 alloc: 1630 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1631 if (vnlru_under(rnumvnodes, vlowat)) 1632 vnlru_kick(); 1633 mtx_unlock(&vnode_list_mtx); 1634 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1635 } 1636 1637 static struct vnode * 1638 vn_alloc(struct mount *mp) 1639 { 1640 u_long rnumvnodes; 1641 1642 if (__predict_false(vn_alloc_cyclecount != 0)) 1643 return (vn_alloc_hard(mp)); 1644 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1645 if (__predict_false(vnlru_under_unlocked(rnumvnodes, vlowat))) { 1646 atomic_subtract_long(&numvnodes, 1); 1647 return (vn_alloc_hard(mp)); 1648 } 1649 1650 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1651 } 1652 1653 static void 1654 vn_free(struct vnode *vp) 1655 { 1656 1657 atomic_subtract_long(&numvnodes, 1); 1658 uma_zfree_smr(vnode_zone, vp); 1659 } 1660 1661 /* 1662 * Return the next vnode from the free list. 1663 */ 1664 int 1665 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1666 struct vnode **vpp) 1667 { 1668 struct vnode *vp; 1669 struct thread *td; 1670 struct lock_object *lo; 1671 1672 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1673 1674 KASSERT(vops->registered, 1675 ("%s: not registered vector op %p\n", __func__, vops)); 1676 1677 td = curthread; 1678 if (td->td_vp_reserved != NULL) { 1679 vp = td->td_vp_reserved; 1680 td->td_vp_reserved = NULL; 1681 } else { 1682 vp = vn_alloc(mp); 1683 } 1684 counter_u64_add(vnodes_created, 1); 1685 /* 1686 * Locks are given the generic name "vnode" when created. 1687 * Follow the historic practice of using the filesystem 1688 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1689 * 1690 * Locks live in a witness group keyed on their name. Thus, 1691 * when a lock is renamed, it must also move from the witness 1692 * group of its old name to the witness group of its new name. 1693 * 1694 * The change only needs to be made when the vnode moves 1695 * from one filesystem type to another. We ensure that each 1696 * filesystem use a single static name pointer for its tag so 1697 * that we can compare pointers rather than doing a strcmp(). 1698 */ 1699 lo = &vp->v_vnlock->lock_object; 1700 #ifdef WITNESS 1701 if (lo->lo_name != tag) { 1702 #endif 1703 lo->lo_name = tag; 1704 #ifdef WITNESS 1705 WITNESS_DESTROY(lo); 1706 WITNESS_INIT(lo, tag); 1707 } 1708 #endif 1709 /* 1710 * By default, don't allow shared locks unless filesystems opt-in. 1711 */ 1712 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1713 /* 1714 * Finalize various vnode identity bits. 1715 */ 1716 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1717 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1718 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1719 vp->v_type = VNON; 1720 vp->v_op = vops; 1721 v_init_counters(vp); 1722 vp->v_bufobj.bo_ops = &buf_ops_bio; 1723 #ifdef DIAGNOSTIC 1724 if (mp == NULL && vops != &dead_vnodeops) 1725 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1726 #endif 1727 #ifdef MAC 1728 mac_vnode_init(vp); 1729 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1730 mac_vnode_associate_singlelabel(mp, vp); 1731 #endif 1732 if (mp != NULL) { 1733 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1734 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1735 vp->v_vflag |= VV_NOKNOTE; 1736 } 1737 1738 /* 1739 * For the filesystems which do not use vfs_hash_insert(), 1740 * still initialize v_hash to have vfs_hash_index() useful. 1741 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1742 * its own hashing. 1743 */ 1744 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1745 1746 *vpp = vp; 1747 return (0); 1748 } 1749 1750 void 1751 getnewvnode_reserve(void) 1752 { 1753 struct thread *td; 1754 1755 td = curthread; 1756 MPASS(td->td_vp_reserved == NULL); 1757 td->td_vp_reserved = vn_alloc(NULL); 1758 } 1759 1760 void 1761 getnewvnode_drop_reserve(void) 1762 { 1763 struct thread *td; 1764 1765 td = curthread; 1766 if (td->td_vp_reserved != NULL) { 1767 vn_free(td->td_vp_reserved); 1768 td->td_vp_reserved = NULL; 1769 } 1770 } 1771 1772 static void __noinline 1773 freevnode(struct vnode *vp) 1774 { 1775 struct bufobj *bo; 1776 1777 /* 1778 * The vnode has been marked for destruction, so free it. 1779 * 1780 * The vnode will be returned to the zone where it will 1781 * normally remain until it is needed for another vnode. We 1782 * need to cleanup (or verify that the cleanup has already 1783 * been done) any residual data left from its current use 1784 * so as not to contaminate the freshly allocated vnode. 1785 */ 1786 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 1787 /* 1788 * Paired with vgone. 1789 */ 1790 vn_seqc_write_end_locked(vp); 1791 VNPASS(vp->v_seqc_users == 0, vp); 1792 1793 bo = &vp->v_bufobj; 1794 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 1795 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 1796 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 1797 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 1798 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 1799 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 1800 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 1801 ("clean blk trie not empty")); 1802 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 1803 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 1804 ("dirty blk trie not empty")); 1805 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 1806 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 1807 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 1808 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 1809 ("Dangling rangelock waiters")); 1810 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, 1811 ("Leaked inactivation")); 1812 VI_UNLOCK(vp); 1813 #ifdef MAC 1814 mac_vnode_destroy(vp); 1815 #endif 1816 if (vp->v_pollinfo != NULL) { 1817 destroy_vpollinfo(vp->v_pollinfo); 1818 vp->v_pollinfo = NULL; 1819 } 1820 #ifdef INVARIANTS 1821 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ 1822 vp->v_op = NULL; 1823 #endif 1824 vp->v_mountedhere = NULL; 1825 vp->v_unpcb = NULL; 1826 vp->v_rdev = NULL; 1827 vp->v_fifoinfo = NULL; 1828 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 1829 vp->v_irflag = 0; 1830 vp->v_iflag = 0; 1831 vp->v_vflag = 0; 1832 bo->bo_flag = 0; 1833 vn_free(vp); 1834 } 1835 1836 /* 1837 * Delete from old mount point vnode list, if on one. 1838 */ 1839 static void 1840 delmntque(struct vnode *vp) 1841 { 1842 struct mount *mp; 1843 1844 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 1845 1846 mp = vp->v_mount; 1847 if (mp == NULL) 1848 return; 1849 MNT_ILOCK(mp); 1850 VI_LOCK(vp); 1851 vp->v_mount = NULL; 1852 VI_UNLOCK(vp); 1853 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1854 ("bad mount point vnode list size")); 1855 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1856 mp->mnt_nvnodelistsize--; 1857 MNT_REL(mp); 1858 MNT_IUNLOCK(mp); 1859 } 1860 1861 static void 1862 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1863 { 1864 1865 vp->v_data = NULL; 1866 vp->v_op = &dead_vnodeops; 1867 vgone(vp); 1868 vput(vp); 1869 } 1870 1871 /* 1872 * Insert into list of vnodes for the new mount point, if available. 1873 */ 1874 int 1875 insmntque1(struct vnode *vp, struct mount *mp, 1876 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1877 { 1878 1879 KASSERT(vp->v_mount == NULL, 1880 ("insmntque: vnode already on per mount vnode list")); 1881 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1882 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1883 1884 /* 1885 * We acquire the vnode interlock early to ensure that the 1886 * vnode cannot be recycled by another process releasing a 1887 * holdcnt on it before we get it on both the vnode list 1888 * and the active vnode list. The mount mutex protects only 1889 * manipulation of the vnode list and the vnode freelist 1890 * mutex protects only manipulation of the active vnode list. 1891 * Hence the need to hold the vnode interlock throughout. 1892 */ 1893 MNT_ILOCK(mp); 1894 VI_LOCK(vp); 1895 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 1896 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1897 mp->mnt_nvnodelistsize == 0)) && 1898 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1899 VI_UNLOCK(vp); 1900 MNT_IUNLOCK(mp); 1901 if (dtr != NULL) 1902 dtr(vp, dtr_arg); 1903 return (EBUSY); 1904 } 1905 vp->v_mount = mp; 1906 MNT_REF(mp); 1907 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1908 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1909 ("neg mount point vnode list size")); 1910 mp->mnt_nvnodelistsize++; 1911 VI_UNLOCK(vp); 1912 MNT_IUNLOCK(mp); 1913 return (0); 1914 } 1915 1916 int 1917 insmntque(struct vnode *vp, struct mount *mp) 1918 { 1919 1920 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1921 } 1922 1923 /* 1924 * Flush out and invalidate all buffers associated with a bufobj 1925 * Called with the underlying object locked. 1926 */ 1927 int 1928 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1929 { 1930 int error; 1931 1932 BO_LOCK(bo); 1933 if (flags & V_SAVE) { 1934 error = bufobj_wwait(bo, slpflag, slptimeo); 1935 if (error) { 1936 BO_UNLOCK(bo); 1937 return (error); 1938 } 1939 if (bo->bo_dirty.bv_cnt > 0) { 1940 BO_UNLOCK(bo); 1941 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1942 return (error); 1943 /* 1944 * XXX We could save a lock/unlock if this was only 1945 * enabled under INVARIANTS 1946 */ 1947 BO_LOCK(bo); 1948 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1949 panic("vinvalbuf: dirty bufs"); 1950 } 1951 } 1952 /* 1953 * If you alter this loop please notice that interlock is dropped and 1954 * reacquired in flushbuflist. Special care is needed to ensure that 1955 * no race conditions occur from this. 1956 */ 1957 do { 1958 error = flushbuflist(&bo->bo_clean, 1959 flags, bo, slpflag, slptimeo); 1960 if (error == 0 && !(flags & V_CLEANONLY)) 1961 error = flushbuflist(&bo->bo_dirty, 1962 flags, bo, slpflag, slptimeo); 1963 if (error != 0 && error != EAGAIN) { 1964 BO_UNLOCK(bo); 1965 return (error); 1966 } 1967 } while (error != 0); 1968 1969 /* 1970 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1971 * have write I/O in-progress but if there is a VM object then the 1972 * VM object can also have read-I/O in-progress. 1973 */ 1974 do { 1975 bufobj_wwait(bo, 0, 0); 1976 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 1977 BO_UNLOCK(bo); 1978 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 1979 BO_LOCK(bo); 1980 } 1981 } while (bo->bo_numoutput > 0); 1982 BO_UNLOCK(bo); 1983 1984 /* 1985 * Destroy the copy in the VM cache, too. 1986 */ 1987 if (bo->bo_object != NULL && 1988 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 1989 VM_OBJECT_WLOCK(bo->bo_object); 1990 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1991 OBJPR_CLEANONLY : 0); 1992 VM_OBJECT_WUNLOCK(bo->bo_object); 1993 } 1994 1995 #ifdef INVARIANTS 1996 BO_LOCK(bo); 1997 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 1998 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 1999 bo->bo_clean.bv_cnt > 0)) 2000 panic("vinvalbuf: flush failed"); 2001 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 2002 bo->bo_dirty.bv_cnt > 0) 2003 panic("vinvalbuf: flush dirty failed"); 2004 BO_UNLOCK(bo); 2005 #endif 2006 return (0); 2007 } 2008 2009 /* 2010 * Flush out and invalidate all buffers associated with a vnode. 2011 * Called with the underlying object locked. 2012 */ 2013 int 2014 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 2015 { 2016 2017 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2018 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 2019 if (vp->v_object != NULL && vp->v_object->handle != vp) 2020 return (0); 2021 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 2022 } 2023 2024 /* 2025 * Flush out buffers on the specified list. 2026 * 2027 */ 2028 static int 2029 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2030 int slptimeo) 2031 { 2032 struct buf *bp, *nbp; 2033 int retval, error; 2034 daddr_t lblkno; 2035 b_xflags_t xflags; 2036 2037 ASSERT_BO_WLOCKED(bo); 2038 2039 retval = 0; 2040 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2041 /* 2042 * If we are flushing both V_NORMAL and V_ALT buffers then 2043 * do not skip any buffers. If we are flushing only V_NORMAL 2044 * buffers then skip buffers marked as BX_ALTDATA. If we are 2045 * flushing only V_ALT buffers then skip buffers not marked 2046 * as BX_ALTDATA. 2047 */ 2048 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2049 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2050 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2051 continue; 2052 } 2053 if (nbp != NULL) { 2054 lblkno = nbp->b_lblkno; 2055 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2056 } 2057 retval = EAGAIN; 2058 error = BUF_TIMELOCK(bp, 2059 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2060 "flushbuf", slpflag, slptimeo); 2061 if (error) { 2062 BO_LOCK(bo); 2063 return (error != ENOLCK ? error : EAGAIN); 2064 } 2065 KASSERT(bp->b_bufobj == bo, 2066 ("bp %p wrong b_bufobj %p should be %p", 2067 bp, bp->b_bufobj, bo)); 2068 /* 2069 * XXX Since there are no node locks for NFS, I 2070 * believe there is a slight chance that a delayed 2071 * write will occur while sleeping just above, so 2072 * check for it. 2073 */ 2074 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2075 (flags & V_SAVE)) { 2076 bremfree(bp); 2077 bp->b_flags |= B_ASYNC; 2078 bwrite(bp); 2079 BO_LOCK(bo); 2080 return (EAGAIN); /* XXX: why not loop ? */ 2081 } 2082 bremfree(bp); 2083 bp->b_flags |= (B_INVAL | B_RELBUF); 2084 bp->b_flags &= ~B_ASYNC; 2085 brelse(bp); 2086 BO_LOCK(bo); 2087 if (nbp == NULL) 2088 break; 2089 nbp = gbincore(bo, lblkno); 2090 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2091 != xflags) 2092 break; /* nbp invalid */ 2093 } 2094 return (retval); 2095 } 2096 2097 int 2098 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2099 { 2100 struct buf *bp; 2101 int error; 2102 daddr_t lblkno; 2103 2104 ASSERT_BO_LOCKED(bo); 2105 2106 for (lblkno = startn;;) { 2107 again: 2108 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2109 if (bp == NULL || bp->b_lblkno >= endn || 2110 bp->b_lblkno < startn) 2111 break; 2112 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2113 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2114 if (error != 0) { 2115 BO_RLOCK(bo); 2116 if (error == ENOLCK) 2117 goto again; 2118 return (error); 2119 } 2120 KASSERT(bp->b_bufobj == bo, 2121 ("bp %p wrong b_bufobj %p should be %p", 2122 bp, bp->b_bufobj, bo)); 2123 lblkno = bp->b_lblkno + 1; 2124 if ((bp->b_flags & B_MANAGED) == 0) 2125 bremfree(bp); 2126 bp->b_flags |= B_RELBUF; 2127 /* 2128 * In the VMIO case, use the B_NOREUSE flag to hint that the 2129 * pages backing each buffer in the range are unlikely to be 2130 * reused. Dirty buffers will have the hint applied once 2131 * they've been written. 2132 */ 2133 if ((bp->b_flags & B_VMIO) != 0) 2134 bp->b_flags |= B_NOREUSE; 2135 brelse(bp); 2136 BO_RLOCK(bo); 2137 } 2138 return (0); 2139 } 2140 2141 /* 2142 * Truncate a file's buffer and pages to a specified length. This 2143 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2144 * sync activity. 2145 */ 2146 int 2147 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2148 { 2149 struct buf *bp, *nbp; 2150 struct bufobj *bo; 2151 daddr_t startlbn; 2152 2153 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2154 vp, blksize, (uintmax_t)length); 2155 2156 /* 2157 * Round up to the *next* lbn. 2158 */ 2159 startlbn = howmany(length, blksize); 2160 2161 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2162 2163 bo = &vp->v_bufobj; 2164 restart_unlocked: 2165 BO_LOCK(bo); 2166 2167 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2168 ; 2169 2170 if (length > 0) { 2171 restartsync: 2172 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2173 if (bp->b_lblkno > 0) 2174 continue; 2175 /* 2176 * Since we hold the vnode lock this should only 2177 * fail if we're racing with the buf daemon. 2178 */ 2179 if (BUF_LOCK(bp, 2180 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2181 BO_LOCKPTR(bo)) == ENOLCK) 2182 goto restart_unlocked; 2183 2184 VNASSERT((bp->b_flags & B_DELWRI), vp, 2185 ("buf(%p) on dirty queue without DELWRI", bp)); 2186 2187 bremfree(bp); 2188 bawrite(bp); 2189 BO_LOCK(bo); 2190 goto restartsync; 2191 } 2192 } 2193 2194 bufobj_wwait(bo, 0, 0); 2195 BO_UNLOCK(bo); 2196 vnode_pager_setsize(vp, length); 2197 2198 return (0); 2199 } 2200 2201 /* 2202 * Invalidate the cached pages of a file's buffer within the range of block 2203 * numbers [startlbn, endlbn). 2204 */ 2205 void 2206 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2207 int blksize) 2208 { 2209 struct bufobj *bo; 2210 off_t start, end; 2211 2212 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2213 2214 start = blksize * startlbn; 2215 end = blksize * endlbn; 2216 2217 bo = &vp->v_bufobj; 2218 BO_LOCK(bo); 2219 MPASS(blksize == bo->bo_bsize); 2220 2221 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2222 ; 2223 2224 BO_UNLOCK(bo); 2225 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2226 } 2227 2228 static int 2229 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2230 daddr_t startlbn, daddr_t endlbn) 2231 { 2232 struct buf *bp, *nbp; 2233 bool anyfreed; 2234 2235 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2236 ASSERT_BO_LOCKED(bo); 2237 2238 do { 2239 anyfreed = false; 2240 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2241 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2242 continue; 2243 if (BUF_LOCK(bp, 2244 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2245 BO_LOCKPTR(bo)) == ENOLCK) { 2246 BO_LOCK(bo); 2247 return (EAGAIN); 2248 } 2249 2250 bremfree(bp); 2251 bp->b_flags |= B_INVAL | B_RELBUF; 2252 bp->b_flags &= ~B_ASYNC; 2253 brelse(bp); 2254 anyfreed = true; 2255 2256 BO_LOCK(bo); 2257 if (nbp != NULL && 2258 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2259 nbp->b_vp != vp || 2260 (nbp->b_flags & B_DELWRI) != 0)) 2261 return (EAGAIN); 2262 } 2263 2264 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2265 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2266 continue; 2267 if (BUF_LOCK(bp, 2268 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2269 BO_LOCKPTR(bo)) == ENOLCK) { 2270 BO_LOCK(bo); 2271 return (EAGAIN); 2272 } 2273 bremfree(bp); 2274 bp->b_flags |= B_INVAL | B_RELBUF; 2275 bp->b_flags &= ~B_ASYNC; 2276 brelse(bp); 2277 anyfreed = true; 2278 2279 BO_LOCK(bo); 2280 if (nbp != NULL && 2281 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2282 (nbp->b_vp != vp) || 2283 (nbp->b_flags & B_DELWRI) == 0)) 2284 return (EAGAIN); 2285 } 2286 } while (anyfreed); 2287 return (0); 2288 } 2289 2290 static void 2291 buf_vlist_remove(struct buf *bp) 2292 { 2293 struct bufv *bv; 2294 b_xflags_t flags; 2295 2296 flags = bp->b_xflags; 2297 2298 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2299 ASSERT_BO_WLOCKED(bp->b_bufobj); 2300 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 && 2301 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN), 2302 ("%s: buffer %p has invalid queue state", __func__, bp)); 2303 2304 if ((flags & BX_VNDIRTY) != 0) 2305 bv = &bp->b_bufobj->bo_dirty; 2306 else 2307 bv = &bp->b_bufobj->bo_clean; 2308 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2309 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2310 bv->bv_cnt--; 2311 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2312 } 2313 2314 /* 2315 * Add the buffer to the sorted clean or dirty block list. 2316 * 2317 * NOTE: xflags is passed as a constant, optimizing this inline function! 2318 */ 2319 static void 2320 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2321 { 2322 struct bufv *bv; 2323 struct buf *n; 2324 int error; 2325 2326 ASSERT_BO_WLOCKED(bo); 2327 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2328 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2329 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2330 ("dead bo %p", bo)); 2331 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2332 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2333 bp->b_xflags |= xflags; 2334 if (xflags & BX_VNDIRTY) 2335 bv = &bo->bo_dirty; 2336 else 2337 bv = &bo->bo_clean; 2338 2339 /* 2340 * Keep the list ordered. Optimize empty list insertion. Assume 2341 * we tend to grow at the tail so lookup_le should usually be cheaper 2342 * than _ge. 2343 */ 2344 if (bv->bv_cnt == 0 || 2345 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2346 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2347 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2348 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2349 else 2350 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2351 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2352 if (error) 2353 panic("buf_vlist_add: Preallocated nodes insufficient."); 2354 bv->bv_cnt++; 2355 } 2356 2357 /* 2358 * Look up a buffer using the buffer tries. 2359 */ 2360 struct buf * 2361 gbincore(struct bufobj *bo, daddr_t lblkno) 2362 { 2363 struct buf *bp; 2364 2365 ASSERT_BO_LOCKED(bo); 2366 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2367 if (bp != NULL) 2368 return (bp); 2369 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2370 } 2371 2372 /* 2373 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2374 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2375 * stability of the result. Like other lockless lookups, the found buf may 2376 * already be invalid by the time this function returns. 2377 */ 2378 struct buf * 2379 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2380 { 2381 struct buf *bp; 2382 2383 ASSERT_BO_UNLOCKED(bo); 2384 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2385 if (bp != NULL) 2386 return (bp); 2387 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2388 } 2389 2390 /* 2391 * Associate a buffer with a vnode. 2392 */ 2393 void 2394 bgetvp(struct vnode *vp, struct buf *bp) 2395 { 2396 struct bufobj *bo; 2397 2398 bo = &vp->v_bufobj; 2399 ASSERT_BO_WLOCKED(bo); 2400 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2401 2402 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2403 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2404 ("bgetvp: bp already attached! %p", bp)); 2405 2406 vhold(vp); 2407 bp->b_vp = vp; 2408 bp->b_bufobj = bo; 2409 /* 2410 * Insert onto list for new vnode. 2411 */ 2412 buf_vlist_add(bp, bo, BX_VNCLEAN); 2413 } 2414 2415 /* 2416 * Disassociate a buffer from a vnode. 2417 */ 2418 void 2419 brelvp(struct buf *bp) 2420 { 2421 struct bufobj *bo; 2422 struct vnode *vp; 2423 2424 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2425 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2426 2427 /* 2428 * Delete from old vnode list, if on one. 2429 */ 2430 vp = bp->b_vp; /* XXX */ 2431 bo = bp->b_bufobj; 2432 BO_LOCK(bo); 2433 buf_vlist_remove(bp); 2434 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2435 bo->bo_flag &= ~BO_ONWORKLST; 2436 mtx_lock(&sync_mtx); 2437 LIST_REMOVE(bo, bo_synclist); 2438 syncer_worklist_len--; 2439 mtx_unlock(&sync_mtx); 2440 } 2441 bp->b_vp = NULL; 2442 bp->b_bufobj = NULL; 2443 BO_UNLOCK(bo); 2444 vdrop(vp); 2445 } 2446 2447 /* 2448 * Add an item to the syncer work queue. 2449 */ 2450 static void 2451 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2452 { 2453 int slot; 2454 2455 ASSERT_BO_WLOCKED(bo); 2456 2457 mtx_lock(&sync_mtx); 2458 if (bo->bo_flag & BO_ONWORKLST) 2459 LIST_REMOVE(bo, bo_synclist); 2460 else { 2461 bo->bo_flag |= BO_ONWORKLST; 2462 syncer_worklist_len++; 2463 } 2464 2465 if (delay > syncer_maxdelay - 2) 2466 delay = syncer_maxdelay - 2; 2467 slot = (syncer_delayno + delay) & syncer_mask; 2468 2469 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2470 mtx_unlock(&sync_mtx); 2471 } 2472 2473 static int 2474 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2475 { 2476 int error, len; 2477 2478 mtx_lock(&sync_mtx); 2479 len = syncer_worklist_len - sync_vnode_count; 2480 mtx_unlock(&sync_mtx); 2481 error = SYSCTL_OUT(req, &len, sizeof(len)); 2482 return (error); 2483 } 2484 2485 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2486 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2487 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2488 2489 static struct proc *updateproc; 2490 static void sched_sync(void); 2491 static struct kproc_desc up_kp = { 2492 "syncer", 2493 sched_sync, 2494 &updateproc 2495 }; 2496 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2497 2498 static int 2499 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2500 { 2501 struct vnode *vp; 2502 struct mount *mp; 2503 2504 *bo = LIST_FIRST(slp); 2505 if (*bo == NULL) 2506 return (0); 2507 vp = bo2vnode(*bo); 2508 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2509 return (1); 2510 /* 2511 * We use vhold in case the vnode does not 2512 * successfully sync. vhold prevents the vnode from 2513 * going away when we unlock the sync_mtx so that 2514 * we can acquire the vnode interlock. 2515 */ 2516 vholdl(vp); 2517 mtx_unlock(&sync_mtx); 2518 VI_UNLOCK(vp); 2519 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2520 vdrop(vp); 2521 mtx_lock(&sync_mtx); 2522 return (*bo == LIST_FIRST(slp)); 2523 } 2524 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2525 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2526 VOP_UNLOCK(vp); 2527 vn_finished_write(mp); 2528 BO_LOCK(*bo); 2529 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2530 /* 2531 * Put us back on the worklist. The worklist 2532 * routine will remove us from our current 2533 * position and then add us back in at a later 2534 * position. 2535 */ 2536 vn_syncer_add_to_worklist(*bo, syncdelay); 2537 } 2538 BO_UNLOCK(*bo); 2539 vdrop(vp); 2540 mtx_lock(&sync_mtx); 2541 return (0); 2542 } 2543 2544 static int first_printf = 1; 2545 2546 /* 2547 * System filesystem synchronizer daemon. 2548 */ 2549 static void 2550 sched_sync(void) 2551 { 2552 struct synclist *next, *slp; 2553 struct bufobj *bo; 2554 long starttime; 2555 struct thread *td = curthread; 2556 int last_work_seen; 2557 int net_worklist_len; 2558 int syncer_final_iter; 2559 int error; 2560 2561 last_work_seen = 0; 2562 syncer_final_iter = 0; 2563 syncer_state = SYNCER_RUNNING; 2564 starttime = time_uptime; 2565 td->td_pflags |= TDP_NORUNNINGBUF; 2566 2567 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2568 SHUTDOWN_PRI_LAST); 2569 2570 mtx_lock(&sync_mtx); 2571 for (;;) { 2572 if (syncer_state == SYNCER_FINAL_DELAY && 2573 syncer_final_iter == 0) { 2574 mtx_unlock(&sync_mtx); 2575 kproc_suspend_check(td->td_proc); 2576 mtx_lock(&sync_mtx); 2577 } 2578 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2579 if (syncer_state != SYNCER_RUNNING && 2580 starttime != time_uptime) { 2581 if (first_printf) { 2582 printf("\nSyncing disks, vnodes remaining... "); 2583 first_printf = 0; 2584 } 2585 printf("%d ", net_worklist_len); 2586 } 2587 starttime = time_uptime; 2588 2589 /* 2590 * Push files whose dirty time has expired. Be careful 2591 * of interrupt race on slp queue. 2592 * 2593 * Skip over empty worklist slots when shutting down. 2594 */ 2595 do { 2596 slp = &syncer_workitem_pending[syncer_delayno]; 2597 syncer_delayno += 1; 2598 if (syncer_delayno == syncer_maxdelay) 2599 syncer_delayno = 0; 2600 next = &syncer_workitem_pending[syncer_delayno]; 2601 /* 2602 * If the worklist has wrapped since the 2603 * it was emptied of all but syncer vnodes, 2604 * switch to the FINAL_DELAY state and run 2605 * for one more second. 2606 */ 2607 if (syncer_state == SYNCER_SHUTTING_DOWN && 2608 net_worklist_len == 0 && 2609 last_work_seen == syncer_delayno) { 2610 syncer_state = SYNCER_FINAL_DELAY; 2611 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2612 } 2613 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2614 syncer_worklist_len > 0); 2615 2616 /* 2617 * Keep track of the last time there was anything 2618 * on the worklist other than syncer vnodes. 2619 * Return to the SHUTTING_DOWN state if any 2620 * new work appears. 2621 */ 2622 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2623 last_work_seen = syncer_delayno; 2624 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2625 syncer_state = SYNCER_SHUTTING_DOWN; 2626 while (!LIST_EMPTY(slp)) { 2627 error = sync_vnode(slp, &bo, td); 2628 if (error == 1) { 2629 LIST_REMOVE(bo, bo_synclist); 2630 LIST_INSERT_HEAD(next, bo, bo_synclist); 2631 continue; 2632 } 2633 2634 if (first_printf == 0) { 2635 /* 2636 * Drop the sync mutex, because some watchdog 2637 * drivers need to sleep while patting 2638 */ 2639 mtx_unlock(&sync_mtx); 2640 wdog_kern_pat(WD_LASTVAL); 2641 mtx_lock(&sync_mtx); 2642 } 2643 } 2644 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2645 syncer_final_iter--; 2646 /* 2647 * The variable rushjob allows the kernel to speed up the 2648 * processing of the filesystem syncer process. A rushjob 2649 * value of N tells the filesystem syncer to process the next 2650 * N seconds worth of work on its queue ASAP. Currently rushjob 2651 * is used by the soft update code to speed up the filesystem 2652 * syncer process when the incore state is getting so far 2653 * ahead of the disk that the kernel memory pool is being 2654 * threatened with exhaustion. 2655 */ 2656 if (rushjob > 0) { 2657 rushjob -= 1; 2658 continue; 2659 } 2660 /* 2661 * Just sleep for a short period of time between 2662 * iterations when shutting down to allow some I/O 2663 * to happen. 2664 * 2665 * If it has taken us less than a second to process the 2666 * current work, then wait. Otherwise start right over 2667 * again. We can still lose time if any single round 2668 * takes more than two seconds, but it does not really 2669 * matter as we are just trying to generally pace the 2670 * filesystem activity. 2671 */ 2672 if (syncer_state != SYNCER_RUNNING || 2673 time_uptime == starttime) { 2674 thread_lock(td); 2675 sched_prio(td, PPAUSE); 2676 thread_unlock(td); 2677 } 2678 if (syncer_state != SYNCER_RUNNING) 2679 cv_timedwait(&sync_wakeup, &sync_mtx, 2680 hz / SYNCER_SHUTDOWN_SPEEDUP); 2681 else if (time_uptime == starttime) 2682 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2683 } 2684 } 2685 2686 /* 2687 * Request the syncer daemon to speed up its work. 2688 * We never push it to speed up more than half of its 2689 * normal turn time, otherwise it could take over the cpu. 2690 */ 2691 int 2692 speedup_syncer(void) 2693 { 2694 int ret = 0; 2695 2696 mtx_lock(&sync_mtx); 2697 if (rushjob < syncdelay / 2) { 2698 rushjob += 1; 2699 stat_rush_requests += 1; 2700 ret = 1; 2701 } 2702 mtx_unlock(&sync_mtx); 2703 cv_broadcast(&sync_wakeup); 2704 return (ret); 2705 } 2706 2707 /* 2708 * Tell the syncer to speed up its work and run though its work 2709 * list several times, then tell it to shut down. 2710 */ 2711 static void 2712 syncer_shutdown(void *arg, int howto) 2713 { 2714 2715 if (howto & RB_NOSYNC) 2716 return; 2717 mtx_lock(&sync_mtx); 2718 syncer_state = SYNCER_SHUTTING_DOWN; 2719 rushjob = 0; 2720 mtx_unlock(&sync_mtx); 2721 cv_broadcast(&sync_wakeup); 2722 kproc_shutdown(arg, howto); 2723 } 2724 2725 void 2726 syncer_suspend(void) 2727 { 2728 2729 syncer_shutdown(updateproc, 0); 2730 } 2731 2732 void 2733 syncer_resume(void) 2734 { 2735 2736 mtx_lock(&sync_mtx); 2737 first_printf = 1; 2738 syncer_state = SYNCER_RUNNING; 2739 mtx_unlock(&sync_mtx); 2740 cv_broadcast(&sync_wakeup); 2741 kproc_resume(updateproc); 2742 } 2743 2744 /* 2745 * Move the buffer between the clean and dirty lists of its vnode. 2746 */ 2747 void 2748 reassignbuf(struct buf *bp) 2749 { 2750 struct vnode *vp; 2751 struct bufobj *bo; 2752 int delay; 2753 #ifdef INVARIANTS 2754 struct bufv *bv; 2755 #endif 2756 2757 vp = bp->b_vp; 2758 bo = bp->b_bufobj; 2759 2760 KASSERT((bp->b_flags & B_PAGING) == 0, 2761 ("%s: cannot reassign paging buffer %p", __func__, bp)); 2762 2763 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2764 bp, bp->b_vp, bp->b_flags); 2765 2766 BO_LOCK(bo); 2767 buf_vlist_remove(bp); 2768 2769 /* 2770 * If dirty, put on list of dirty buffers; otherwise insert onto list 2771 * of clean buffers. 2772 */ 2773 if (bp->b_flags & B_DELWRI) { 2774 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2775 switch (vp->v_type) { 2776 case VDIR: 2777 delay = dirdelay; 2778 break; 2779 case VCHR: 2780 delay = metadelay; 2781 break; 2782 default: 2783 delay = filedelay; 2784 } 2785 vn_syncer_add_to_worklist(bo, delay); 2786 } 2787 buf_vlist_add(bp, bo, BX_VNDIRTY); 2788 } else { 2789 buf_vlist_add(bp, bo, BX_VNCLEAN); 2790 2791 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2792 mtx_lock(&sync_mtx); 2793 LIST_REMOVE(bo, bo_synclist); 2794 syncer_worklist_len--; 2795 mtx_unlock(&sync_mtx); 2796 bo->bo_flag &= ~BO_ONWORKLST; 2797 } 2798 } 2799 #ifdef INVARIANTS 2800 bv = &bo->bo_clean; 2801 bp = TAILQ_FIRST(&bv->bv_hd); 2802 KASSERT(bp == NULL || bp->b_bufobj == bo, 2803 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2804 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2805 KASSERT(bp == NULL || bp->b_bufobj == bo, 2806 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2807 bv = &bo->bo_dirty; 2808 bp = TAILQ_FIRST(&bv->bv_hd); 2809 KASSERT(bp == NULL || bp->b_bufobj == bo, 2810 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2811 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2812 KASSERT(bp == NULL || bp->b_bufobj == bo, 2813 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2814 #endif 2815 BO_UNLOCK(bo); 2816 } 2817 2818 static void 2819 v_init_counters(struct vnode *vp) 2820 { 2821 2822 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2823 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2824 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2825 2826 refcount_init(&vp->v_holdcnt, 1); 2827 refcount_init(&vp->v_usecount, 1); 2828 } 2829 2830 /* 2831 * Grab a particular vnode from the free list, increment its 2832 * reference count and lock it. VIRF_DOOMED is set if the vnode 2833 * is being destroyed. Only callers who specify LK_RETRY will 2834 * see doomed vnodes. If inactive processing was delayed in 2835 * vput try to do it here. 2836 * 2837 * usecount is manipulated using atomics without holding any locks. 2838 * 2839 * holdcnt can be manipulated using atomics without holding any locks, 2840 * except when transitioning 1<->0, in which case the interlock is held. 2841 * 2842 * Consumers which don't guarantee liveness of the vnode can use SMR to 2843 * try to get a reference. Note this operation can fail since the vnode 2844 * may be awaiting getting freed by the time they get to it. 2845 */ 2846 enum vgetstate 2847 vget_prep_smr(struct vnode *vp) 2848 { 2849 enum vgetstate vs; 2850 2851 VFS_SMR_ASSERT_ENTERED(); 2852 2853 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2854 vs = VGET_USECOUNT; 2855 } else { 2856 if (vhold_smr(vp)) 2857 vs = VGET_HOLDCNT; 2858 else 2859 vs = VGET_NONE; 2860 } 2861 return (vs); 2862 } 2863 2864 enum vgetstate 2865 vget_prep(struct vnode *vp) 2866 { 2867 enum vgetstate vs; 2868 2869 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2870 vs = VGET_USECOUNT; 2871 } else { 2872 vhold(vp); 2873 vs = VGET_HOLDCNT; 2874 } 2875 return (vs); 2876 } 2877 2878 void 2879 vget_abort(struct vnode *vp, enum vgetstate vs) 2880 { 2881 2882 switch (vs) { 2883 case VGET_USECOUNT: 2884 vrele(vp); 2885 break; 2886 case VGET_HOLDCNT: 2887 vdrop(vp); 2888 break; 2889 default: 2890 __assert_unreachable(); 2891 } 2892 } 2893 2894 int 2895 vget(struct vnode *vp, int flags) 2896 { 2897 enum vgetstate vs; 2898 2899 vs = vget_prep(vp); 2900 return (vget_finish(vp, flags, vs)); 2901 } 2902 2903 int 2904 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 2905 { 2906 int error; 2907 2908 if ((flags & LK_INTERLOCK) != 0) 2909 ASSERT_VI_LOCKED(vp, __func__); 2910 else 2911 ASSERT_VI_UNLOCKED(vp, __func__); 2912 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 2913 VNPASS(vp->v_holdcnt > 0, vp); 2914 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 2915 2916 error = vn_lock(vp, flags); 2917 if (__predict_false(error != 0)) { 2918 vget_abort(vp, vs); 2919 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2920 vp); 2921 return (error); 2922 } 2923 2924 vget_finish_ref(vp, vs); 2925 return (0); 2926 } 2927 2928 void 2929 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 2930 { 2931 int old; 2932 2933 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 2934 VNPASS(vp->v_holdcnt > 0, vp); 2935 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 2936 2937 if (vs == VGET_USECOUNT) 2938 return; 2939 2940 /* 2941 * We hold the vnode. If the usecount is 0 it will be utilized to keep 2942 * the vnode around. Otherwise someone else lended their hold count and 2943 * we have to drop ours. 2944 */ 2945 old = atomic_fetchadd_int(&vp->v_usecount, 1); 2946 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 2947 if (old != 0) { 2948 #ifdef INVARIANTS 2949 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 2950 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 2951 #else 2952 refcount_release(&vp->v_holdcnt); 2953 #endif 2954 } 2955 } 2956 2957 void 2958 vref(struct vnode *vp) 2959 { 2960 enum vgetstate vs; 2961 2962 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2963 vs = vget_prep(vp); 2964 vget_finish_ref(vp, vs); 2965 } 2966 2967 void 2968 vrefact(struct vnode *vp) 2969 { 2970 2971 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2972 #ifdef INVARIANTS 2973 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 2974 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 2975 #else 2976 refcount_acquire(&vp->v_usecount); 2977 #endif 2978 } 2979 2980 void 2981 vlazy(struct vnode *vp) 2982 { 2983 struct mount *mp; 2984 2985 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 2986 2987 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 2988 return; 2989 /* 2990 * We may get here for inactive routines after the vnode got doomed. 2991 */ 2992 if (VN_IS_DOOMED(vp)) 2993 return; 2994 mp = vp->v_mount; 2995 mtx_lock(&mp->mnt_listmtx); 2996 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 2997 vp->v_mflag |= VMP_LAZYLIST; 2998 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 2999 mp->mnt_lazyvnodelistsize++; 3000 } 3001 mtx_unlock(&mp->mnt_listmtx); 3002 } 3003 3004 /* 3005 * This routine is only meant to be called from vgonel prior to dooming 3006 * the vnode. 3007 */ 3008 static void 3009 vunlazy_gone(struct vnode *vp) 3010 { 3011 struct mount *mp; 3012 3013 ASSERT_VOP_ELOCKED(vp, __func__); 3014 ASSERT_VI_LOCKED(vp, __func__); 3015 VNPASS(!VN_IS_DOOMED(vp), vp); 3016 3017 if (vp->v_mflag & VMP_LAZYLIST) { 3018 mp = vp->v_mount; 3019 mtx_lock(&mp->mnt_listmtx); 3020 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3021 vp->v_mflag &= ~VMP_LAZYLIST; 3022 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3023 mp->mnt_lazyvnodelistsize--; 3024 mtx_unlock(&mp->mnt_listmtx); 3025 } 3026 } 3027 3028 static void 3029 vdefer_inactive(struct vnode *vp) 3030 { 3031 3032 ASSERT_VI_LOCKED(vp, __func__); 3033 VNASSERT(vp->v_holdcnt > 0, vp, 3034 ("%s: vnode without hold count", __func__)); 3035 if (VN_IS_DOOMED(vp)) { 3036 vdropl(vp); 3037 return; 3038 } 3039 if (vp->v_iflag & VI_DEFINACT) { 3040 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3041 vdropl(vp); 3042 return; 3043 } 3044 if (vp->v_usecount > 0) { 3045 vp->v_iflag &= ~VI_OWEINACT; 3046 vdropl(vp); 3047 return; 3048 } 3049 vlazy(vp); 3050 vp->v_iflag |= VI_DEFINACT; 3051 VI_UNLOCK(vp); 3052 counter_u64_add(deferred_inact, 1); 3053 } 3054 3055 static void 3056 vdefer_inactive_unlocked(struct vnode *vp) 3057 { 3058 3059 VI_LOCK(vp); 3060 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3061 vdropl(vp); 3062 return; 3063 } 3064 vdefer_inactive(vp); 3065 } 3066 3067 enum vput_op { VRELE, VPUT, VUNREF }; 3068 3069 /* 3070 * Handle ->v_usecount transitioning to 0. 3071 * 3072 * By releasing the last usecount we take ownership of the hold count which 3073 * provides liveness of the vnode, meaning we have to vdrop. 3074 * 3075 * For all vnodes we may need to perform inactive processing. It requires an 3076 * exclusive lock on the vnode, while it is legal to call here with only a 3077 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3078 * inactive processing gets deferred to the syncer. 3079 * 3080 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3081 * on the lock being held all the way until VOP_INACTIVE. This in particular 3082 * happens with UFS which adds half-constructed vnodes to the hash, where they 3083 * can be found by other code. 3084 */ 3085 static void 3086 vput_final(struct vnode *vp, enum vput_op func) 3087 { 3088 int error; 3089 bool want_unlock; 3090 3091 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3092 VNPASS(vp->v_holdcnt > 0, vp); 3093 3094 VI_LOCK(vp); 3095 3096 /* 3097 * By the time we got here someone else might have transitioned 3098 * the count back to > 0. 3099 */ 3100 if (vp->v_usecount > 0) 3101 goto out; 3102 3103 /* 3104 * If the vnode is doomed vgone already performed inactive processing 3105 * (if needed). 3106 */ 3107 if (VN_IS_DOOMED(vp)) 3108 goto out; 3109 3110 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3111 goto out; 3112 3113 if (vp->v_iflag & VI_DOINGINACT) 3114 goto out; 3115 3116 /* 3117 * Locking operations here will drop the interlock and possibly the 3118 * vnode lock, opening a window where the vnode can get doomed all the 3119 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3120 * perform inactive. 3121 */ 3122 vp->v_iflag |= VI_OWEINACT; 3123 want_unlock = false; 3124 error = 0; 3125 switch (func) { 3126 case VRELE: 3127 switch (VOP_ISLOCKED(vp)) { 3128 case LK_EXCLUSIVE: 3129 break; 3130 case LK_EXCLOTHER: 3131 case 0: 3132 want_unlock = true; 3133 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3134 VI_LOCK(vp); 3135 break; 3136 default: 3137 /* 3138 * The lock has at least one sharer, but we have no way 3139 * to conclude whether this is us. Play it safe and 3140 * defer processing. 3141 */ 3142 error = EAGAIN; 3143 break; 3144 } 3145 break; 3146 case VPUT: 3147 want_unlock = true; 3148 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3149 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3150 LK_NOWAIT); 3151 VI_LOCK(vp); 3152 } 3153 break; 3154 case VUNREF: 3155 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3156 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3157 VI_LOCK(vp); 3158 } 3159 break; 3160 } 3161 if (error == 0) { 3162 vinactive(vp); 3163 if (want_unlock) 3164 VOP_UNLOCK(vp); 3165 vdropl(vp); 3166 } else { 3167 vdefer_inactive(vp); 3168 } 3169 return; 3170 out: 3171 if (func == VPUT) 3172 VOP_UNLOCK(vp); 3173 vdropl(vp); 3174 } 3175 3176 /* 3177 * Decrement ->v_usecount for a vnode. 3178 * 3179 * Releasing the last use count requires additional processing, see vput_final 3180 * above for details. 3181 * 3182 * Comment above each variant denotes lock state on entry and exit. 3183 */ 3184 3185 /* 3186 * in: any 3187 * out: same as passed in 3188 */ 3189 void 3190 vrele(struct vnode *vp) 3191 { 3192 3193 ASSERT_VI_UNLOCKED(vp, __func__); 3194 if (!refcount_release(&vp->v_usecount)) 3195 return; 3196 vput_final(vp, VRELE); 3197 } 3198 3199 /* 3200 * in: locked 3201 * out: unlocked 3202 */ 3203 void 3204 vput(struct vnode *vp) 3205 { 3206 3207 ASSERT_VOP_LOCKED(vp, __func__); 3208 ASSERT_VI_UNLOCKED(vp, __func__); 3209 if (!refcount_release(&vp->v_usecount)) { 3210 VOP_UNLOCK(vp); 3211 return; 3212 } 3213 vput_final(vp, VPUT); 3214 } 3215 3216 /* 3217 * in: locked 3218 * out: locked 3219 */ 3220 void 3221 vunref(struct vnode *vp) 3222 { 3223 3224 ASSERT_VOP_LOCKED(vp, __func__); 3225 ASSERT_VI_UNLOCKED(vp, __func__); 3226 if (!refcount_release(&vp->v_usecount)) 3227 return; 3228 vput_final(vp, VUNREF); 3229 } 3230 3231 void 3232 vhold(struct vnode *vp) 3233 { 3234 int old; 3235 3236 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3237 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3238 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3239 ("%s: wrong hold count %d", __func__, old)); 3240 if (old == 0) 3241 vn_freevnodes_dec(); 3242 } 3243 3244 void 3245 vholdnz(struct vnode *vp) 3246 { 3247 3248 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3249 #ifdef INVARIANTS 3250 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3251 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3252 ("%s: wrong hold count %d", __func__, old)); 3253 #else 3254 atomic_add_int(&vp->v_holdcnt, 1); 3255 #endif 3256 } 3257 3258 /* 3259 * Grab a hold count unless the vnode is freed. 3260 * 3261 * Only use this routine if vfs smr is the only protection you have against 3262 * freeing the vnode. 3263 * 3264 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3265 * is not set. After the flag is set the vnode becomes immutable to anyone but 3266 * the thread which managed to set the flag. 3267 * 3268 * It may be tempting to replace the loop with: 3269 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3270 * if (count & VHOLD_NO_SMR) { 3271 * backpedal and error out; 3272 * } 3273 * 3274 * However, while this is more performant, it hinders debugging by eliminating 3275 * the previously mentioned invariant. 3276 */ 3277 bool 3278 vhold_smr(struct vnode *vp) 3279 { 3280 int count; 3281 3282 VFS_SMR_ASSERT_ENTERED(); 3283 3284 count = atomic_load_int(&vp->v_holdcnt); 3285 for (;;) { 3286 if (count & VHOLD_NO_SMR) { 3287 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3288 ("non-zero hold count with flags %d\n", count)); 3289 return (false); 3290 } 3291 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3292 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3293 if (count == 0) 3294 vn_freevnodes_dec(); 3295 return (true); 3296 } 3297 } 3298 } 3299 3300 /* 3301 * Hold a free vnode for recycling. 3302 * 3303 * Note: vnode_init references this comment. 3304 * 3305 * Attempts to recycle only need the global vnode list lock and have no use for 3306 * SMR. 3307 * 3308 * However, vnodes get inserted into the global list before they get fully 3309 * initialized and stay there until UMA decides to free the memory. This in 3310 * particular means the target can be found before it becomes usable and after 3311 * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to 3312 * VHOLD_NO_SMR. 3313 * 3314 * Note: the vnode may gain more references after we transition the count 0->1. 3315 */ 3316 static bool 3317 vhold_recycle_free(struct vnode *vp) 3318 { 3319 int count; 3320 3321 mtx_assert(&vnode_list_mtx, MA_OWNED); 3322 3323 count = atomic_load_int(&vp->v_holdcnt); 3324 for (;;) { 3325 if (count & VHOLD_NO_SMR) { 3326 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3327 ("non-zero hold count with flags %d\n", count)); 3328 return (false); 3329 } 3330 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3331 if (count > 0) { 3332 return (false); 3333 } 3334 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3335 vn_freevnodes_dec(); 3336 return (true); 3337 } 3338 } 3339 } 3340 3341 static void __noinline 3342 vdbatch_process(struct vdbatch *vd) 3343 { 3344 struct vnode *vp; 3345 int i; 3346 3347 mtx_assert(&vd->lock, MA_OWNED); 3348 MPASS(curthread->td_pinned > 0); 3349 MPASS(vd->index == VDBATCH_SIZE); 3350 3351 mtx_lock(&vnode_list_mtx); 3352 critical_enter(); 3353 freevnodes += vd->freevnodes; 3354 for (i = 0; i < VDBATCH_SIZE; i++) { 3355 vp = vd->tab[i]; 3356 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3357 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3358 MPASS(vp->v_dbatchcpu != NOCPU); 3359 vp->v_dbatchcpu = NOCPU; 3360 } 3361 mtx_unlock(&vnode_list_mtx); 3362 vd->freevnodes = 0; 3363 bzero(vd->tab, sizeof(vd->tab)); 3364 vd->index = 0; 3365 critical_exit(); 3366 } 3367 3368 static void 3369 vdbatch_enqueue(struct vnode *vp) 3370 { 3371 struct vdbatch *vd; 3372 3373 ASSERT_VI_LOCKED(vp, __func__); 3374 VNASSERT(!VN_IS_DOOMED(vp), vp, 3375 ("%s: deferring requeue of a doomed vnode", __func__)); 3376 3377 if (vp->v_dbatchcpu != NOCPU) { 3378 VI_UNLOCK(vp); 3379 return; 3380 } 3381 3382 sched_pin(); 3383 vd = DPCPU_PTR(vd); 3384 mtx_lock(&vd->lock); 3385 MPASS(vd->index < VDBATCH_SIZE); 3386 MPASS(vd->tab[vd->index] == NULL); 3387 /* 3388 * A hack: we depend on being pinned so that we know what to put in 3389 * ->v_dbatchcpu. 3390 */ 3391 vp->v_dbatchcpu = curcpu; 3392 vd->tab[vd->index] = vp; 3393 vd->index++; 3394 VI_UNLOCK(vp); 3395 if (vd->index == VDBATCH_SIZE) 3396 vdbatch_process(vd); 3397 mtx_unlock(&vd->lock); 3398 sched_unpin(); 3399 } 3400 3401 /* 3402 * This routine must only be called for vnodes which are about to be 3403 * deallocated. Supporting dequeue for arbitrary vndoes would require 3404 * validating that the locked batch matches. 3405 */ 3406 static void 3407 vdbatch_dequeue(struct vnode *vp) 3408 { 3409 struct vdbatch *vd; 3410 int i; 3411 short cpu; 3412 3413 VNASSERT(vp->v_type == VBAD || vp->v_type == VNON, vp, 3414 ("%s: called for a used vnode\n", __func__)); 3415 3416 cpu = vp->v_dbatchcpu; 3417 if (cpu == NOCPU) 3418 return; 3419 3420 vd = DPCPU_ID_PTR(cpu, vd); 3421 mtx_lock(&vd->lock); 3422 for (i = 0; i < vd->index; i++) { 3423 if (vd->tab[i] != vp) 3424 continue; 3425 vp->v_dbatchcpu = NOCPU; 3426 vd->index--; 3427 vd->tab[i] = vd->tab[vd->index]; 3428 vd->tab[vd->index] = NULL; 3429 break; 3430 } 3431 mtx_unlock(&vd->lock); 3432 /* 3433 * Either we dequeued the vnode above or the target CPU beat us to it. 3434 */ 3435 MPASS(vp->v_dbatchcpu == NOCPU); 3436 } 3437 3438 /* 3439 * Drop the hold count of the vnode. If this is the last reference to 3440 * the vnode we place it on the free list unless it has been vgone'd 3441 * (marked VIRF_DOOMED) in which case we will free it. 3442 * 3443 * Because the vnode vm object keeps a hold reference on the vnode if 3444 * there is at least one resident non-cached page, the vnode cannot 3445 * leave the active list without the page cleanup done. 3446 */ 3447 static void 3448 vdrop_deactivate(struct vnode *vp) 3449 { 3450 struct mount *mp; 3451 3452 ASSERT_VI_LOCKED(vp, __func__); 3453 /* 3454 * Mark a vnode as free: remove it from its active list 3455 * and put it up for recycling on the freelist. 3456 */ 3457 VNASSERT(!VN_IS_DOOMED(vp), vp, 3458 ("vdrop: returning doomed vnode")); 3459 VNASSERT(vp->v_op != NULL, vp, 3460 ("vdrop: vnode already reclaimed.")); 3461 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 3462 ("vnode with VI_OWEINACT set")); 3463 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, 3464 ("vnode with VI_DEFINACT set")); 3465 if (vp->v_mflag & VMP_LAZYLIST) { 3466 mp = vp->v_mount; 3467 mtx_lock(&mp->mnt_listmtx); 3468 VNASSERT(vp->v_mflag & VMP_LAZYLIST, vp, ("lost VMP_LAZYLIST")); 3469 /* 3470 * Don't remove the vnode from the lazy list if another thread 3471 * has increased the hold count. It may have re-enqueued the 3472 * vnode to the lazy list and is now responsible for its 3473 * removal. 3474 */ 3475 if (vp->v_holdcnt == 0) { 3476 vp->v_mflag &= ~VMP_LAZYLIST; 3477 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3478 mp->mnt_lazyvnodelistsize--; 3479 } 3480 mtx_unlock(&mp->mnt_listmtx); 3481 } 3482 vdbatch_enqueue(vp); 3483 } 3484 3485 static void __noinline 3486 vdropl_final(struct vnode *vp) 3487 { 3488 3489 ASSERT_VI_LOCKED(vp, __func__); 3490 VNPASS(VN_IS_DOOMED(vp), vp); 3491 /* 3492 * Set the VHOLD_NO_SMR flag. 3493 * 3494 * We may be racing against vhold_smr. If they win we can just pretend 3495 * we never got this far, they will vdrop later. 3496 */ 3497 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { 3498 vn_freevnodes_inc(); 3499 VI_UNLOCK(vp); 3500 /* 3501 * We lost the aforementioned race. Any subsequent access is 3502 * invalid as they might have managed to vdropl on their own. 3503 */ 3504 return; 3505 } 3506 /* 3507 * Don't bump freevnodes as this one is going away. 3508 */ 3509 freevnode(vp); 3510 } 3511 3512 void 3513 vdrop(struct vnode *vp) 3514 { 3515 3516 ASSERT_VI_UNLOCKED(vp, __func__); 3517 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3518 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3519 return; 3520 VI_LOCK(vp); 3521 vdropl(vp); 3522 } 3523 3524 void 3525 vdropl(struct vnode *vp) 3526 { 3527 3528 ASSERT_VI_LOCKED(vp, __func__); 3529 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3530 if (!refcount_release(&vp->v_holdcnt)) { 3531 VI_UNLOCK(vp); 3532 return; 3533 } 3534 if (!VN_IS_DOOMED(vp)) { 3535 vn_freevnodes_inc(); 3536 vdrop_deactivate(vp); 3537 /* 3538 * Also unlocks the interlock. We can't assert on it as we 3539 * released our hold and by now the vnode might have been 3540 * freed. 3541 */ 3542 return; 3543 } 3544 vdropl_final(vp); 3545 } 3546 3547 /* 3548 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3549 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3550 */ 3551 static void 3552 vinactivef(struct vnode *vp) 3553 { 3554 struct vm_object *obj; 3555 3556 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3557 ASSERT_VI_LOCKED(vp, "vinactive"); 3558 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3559 ("vinactive: recursed on VI_DOINGINACT")); 3560 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3561 vp->v_iflag |= VI_DOINGINACT; 3562 vp->v_iflag &= ~VI_OWEINACT; 3563 VI_UNLOCK(vp); 3564 /* 3565 * Before moving off the active list, we must be sure that any 3566 * modified pages are converted into the vnode's dirty 3567 * buffers, since these will no longer be checked once the 3568 * vnode is on the inactive list. 3569 * 3570 * The write-out of the dirty pages is asynchronous. At the 3571 * point that VOP_INACTIVE() is called, there could still be 3572 * pending I/O and dirty pages in the object. 3573 */ 3574 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3575 vm_object_mightbedirty(obj)) { 3576 VM_OBJECT_WLOCK(obj); 3577 vm_object_page_clean(obj, 0, 0, 0); 3578 VM_OBJECT_WUNLOCK(obj); 3579 } 3580 VOP_INACTIVE(vp); 3581 VI_LOCK(vp); 3582 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3583 ("vinactive: lost VI_DOINGINACT")); 3584 vp->v_iflag &= ~VI_DOINGINACT; 3585 } 3586 3587 void 3588 vinactive(struct vnode *vp) 3589 { 3590 3591 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3592 ASSERT_VI_LOCKED(vp, "vinactive"); 3593 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3594 3595 if ((vp->v_iflag & VI_OWEINACT) == 0) 3596 return; 3597 if (vp->v_iflag & VI_DOINGINACT) 3598 return; 3599 if (vp->v_usecount > 0) { 3600 vp->v_iflag &= ~VI_OWEINACT; 3601 return; 3602 } 3603 vinactivef(vp); 3604 } 3605 3606 /* 3607 * Remove any vnodes in the vnode table belonging to mount point mp. 3608 * 3609 * If FORCECLOSE is not specified, there should not be any active ones, 3610 * return error if any are found (nb: this is a user error, not a 3611 * system error). If FORCECLOSE is specified, detach any active vnodes 3612 * that are found. 3613 * 3614 * If WRITECLOSE is set, only flush out regular file vnodes open for 3615 * writing. 3616 * 3617 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3618 * 3619 * `rootrefs' specifies the base reference count for the root vnode 3620 * of this filesystem. The root vnode is considered busy if its 3621 * v_usecount exceeds this value. On a successful return, vflush(, td) 3622 * will call vrele() on the root vnode exactly rootrefs times. 3623 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3624 * be zero. 3625 */ 3626 #ifdef DIAGNOSTIC 3627 static int busyprt = 0; /* print out busy vnodes */ 3628 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3629 #endif 3630 3631 int 3632 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3633 { 3634 struct vnode *vp, *mvp, *rootvp = NULL; 3635 struct vattr vattr; 3636 int busy = 0, error; 3637 3638 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3639 rootrefs, flags); 3640 if (rootrefs > 0) { 3641 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3642 ("vflush: bad args")); 3643 /* 3644 * Get the filesystem root vnode. We can vput() it 3645 * immediately, since with rootrefs > 0, it won't go away. 3646 */ 3647 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3648 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3649 __func__, error); 3650 return (error); 3651 } 3652 vput(rootvp); 3653 } 3654 loop: 3655 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3656 vholdl(vp); 3657 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3658 if (error) { 3659 vdrop(vp); 3660 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3661 goto loop; 3662 } 3663 /* 3664 * Skip over a vnodes marked VV_SYSTEM. 3665 */ 3666 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3667 VOP_UNLOCK(vp); 3668 vdrop(vp); 3669 continue; 3670 } 3671 /* 3672 * If WRITECLOSE is set, flush out unlinked but still open 3673 * files (even if open only for reading) and regular file 3674 * vnodes open for writing. 3675 */ 3676 if (flags & WRITECLOSE) { 3677 if (vp->v_object != NULL) { 3678 VM_OBJECT_WLOCK(vp->v_object); 3679 vm_object_page_clean(vp->v_object, 0, 0, 0); 3680 VM_OBJECT_WUNLOCK(vp->v_object); 3681 } 3682 error = VOP_FSYNC(vp, MNT_WAIT, td); 3683 if (error != 0) { 3684 VOP_UNLOCK(vp); 3685 vdrop(vp); 3686 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3687 return (error); 3688 } 3689 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3690 VI_LOCK(vp); 3691 3692 if ((vp->v_type == VNON || 3693 (error == 0 && vattr.va_nlink > 0)) && 3694 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3695 VOP_UNLOCK(vp); 3696 vdropl(vp); 3697 continue; 3698 } 3699 } else 3700 VI_LOCK(vp); 3701 /* 3702 * With v_usecount == 0, all we need to do is clear out the 3703 * vnode data structures and we are done. 3704 * 3705 * If FORCECLOSE is set, forcibly close the vnode. 3706 */ 3707 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3708 vgonel(vp); 3709 } else { 3710 busy++; 3711 #ifdef DIAGNOSTIC 3712 if (busyprt) 3713 vn_printf(vp, "vflush: busy vnode "); 3714 #endif 3715 } 3716 VOP_UNLOCK(vp); 3717 vdropl(vp); 3718 } 3719 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3720 /* 3721 * If just the root vnode is busy, and if its refcount 3722 * is equal to `rootrefs', then go ahead and kill it. 3723 */ 3724 VI_LOCK(rootvp); 3725 KASSERT(busy > 0, ("vflush: not busy")); 3726 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3727 ("vflush: usecount %d < rootrefs %d", 3728 rootvp->v_usecount, rootrefs)); 3729 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3730 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3731 vgone(rootvp); 3732 VOP_UNLOCK(rootvp); 3733 busy = 0; 3734 } else 3735 VI_UNLOCK(rootvp); 3736 } 3737 if (busy) { 3738 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3739 busy); 3740 return (EBUSY); 3741 } 3742 for (; rootrefs > 0; rootrefs--) 3743 vrele(rootvp); 3744 return (0); 3745 } 3746 3747 /* 3748 * Recycle an unused vnode to the front of the free list. 3749 */ 3750 int 3751 vrecycle(struct vnode *vp) 3752 { 3753 int recycled; 3754 3755 VI_LOCK(vp); 3756 recycled = vrecyclel(vp); 3757 VI_UNLOCK(vp); 3758 return (recycled); 3759 } 3760 3761 /* 3762 * vrecycle, with the vp interlock held. 3763 */ 3764 int 3765 vrecyclel(struct vnode *vp) 3766 { 3767 int recycled; 3768 3769 ASSERT_VOP_ELOCKED(vp, __func__); 3770 ASSERT_VI_LOCKED(vp, __func__); 3771 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3772 recycled = 0; 3773 if (vp->v_usecount == 0) { 3774 recycled = 1; 3775 vgonel(vp); 3776 } 3777 return (recycled); 3778 } 3779 3780 /* 3781 * Eliminate all activity associated with a vnode 3782 * in preparation for reuse. 3783 */ 3784 void 3785 vgone(struct vnode *vp) 3786 { 3787 VI_LOCK(vp); 3788 vgonel(vp); 3789 VI_UNLOCK(vp); 3790 } 3791 3792 static void 3793 notify_lowervp_vfs_dummy(struct mount *mp __unused, 3794 struct vnode *lowervp __unused) 3795 { 3796 } 3797 3798 /* 3799 * Notify upper mounts about reclaimed or unlinked vnode. 3800 */ 3801 void 3802 vfs_notify_upper(struct vnode *vp, int event) 3803 { 3804 static struct vfsops vgonel_vfsops = { 3805 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 3806 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 3807 }; 3808 struct mount *mp, *ump, *mmp; 3809 3810 mp = vp->v_mount; 3811 if (mp == NULL) 3812 return; 3813 if (TAILQ_EMPTY(&mp->mnt_uppers)) 3814 return; 3815 3816 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 3817 mmp->mnt_op = &vgonel_vfsops; 3818 mmp->mnt_kern_flag |= MNTK_MARKER; 3819 MNT_ILOCK(mp); 3820 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 3821 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 3822 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 3823 ump = TAILQ_NEXT(ump, mnt_upper_link); 3824 continue; 3825 } 3826 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 3827 MNT_IUNLOCK(mp); 3828 switch (event) { 3829 case VFS_NOTIFY_UPPER_RECLAIM: 3830 VFS_RECLAIM_LOWERVP(ump, vp); 3831 break; 3832 case VFS_NOTIFY_UPPER_UNLINK: 3833 VFS_UNLINK_LOWERVP(ump, vp); 3834 break; 3835 default: 3836 KASSERT(0, ("invalid event %d", event)); 3837 break; 3838 } 3839 MNT_ILOCK(mp); 3840 ump = TAILQ_NEXT(mmp, mnt_upper_link); 3841 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 3842 } 3843 free(mmp, M_TEMP); 3844 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 3845 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 3846 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 3847 wakeup(&mp->mnt_uppers); 3848 } 3849 MNT_IUNLOCK(mp); 3850 } 3851 3852 /* 3853 * vgone, with the vp interlock held. 3854 */ 3855 static void 3856 vgonel(struct vnode *vp) 3857 { 3858 struct thread *td; 3859 struct mount *mp; 3860 vm_object_t object; 3861 bool active, doinginact, oweinact; 3862 3863 ASSERT_VOP_ELOCKED(vp, "vgonel"); 3864 ASSERT_VI_LOCKED(vp, "vgonel"); 3865 VNASSERT(vp->v_holdcnt, vp, 3866 ("vgonel: vp %p has no reference.", vp)); 3867 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3868 td = curthread; 3869 3870 /* 3871 * Don't vgonel if we're already doomed. 3872 */ 3873 if (vp->v_irflag & VIRF_DOOMED) 3874 return; 3875 /* 3876 * Paired with freevnode. 3877 */ 3878 vn_seqc_write_begin_locked(vp); 3879 vunlazy_gone(vp); 3880 vp->v_irflag |= VIRF_DOOMED; 3881 3882 /* 3883 * Check to see if the vnode is in use. If so, we have to 3884 * call VOP_CLOSE() and VOP_INACTIVE(). 3885 * 3886 * It could be that VOP_INACTIVE() requested reclamation, in 3887 * which case we should avoid recursion, so check 3888 * VI_DOINGINACT. This is not precise but good enough. 3889 */ 3890 active = vp->v_usecount > 0; 3891 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 3892 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; 3893 3894 /* 3895 * If we need to do inactive VI_OWEINACT will be set. 3896 */ 3897 if (vp->v_iflag & VI_DEFINACT) { 3898 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3899 vp->v_iflag &= ~VI_DEFINACT; 3900 vdropl(vp); 3901 } else { 3902 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 3903 VI_UNLOCK(vp); 3904 } 3905 cache_purge_vgone(vp); 3906 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 3907 3908 /* 3909 * If purging an active vnode, it must be closed and 3910 * deactivated before being reclaimed. 3911 */ 3912 if (active) 3913 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 3914 if ((oweinact || active) && !doinginact) { 3915 VI_LOCK(vp); 3916 vinactivef(vp); 3917 VI_UNLOCK(vp); 3918 } 3919 if (vp->v_type == VSOCK) 3920 vfs_unp_reclaim(vp); 3921 3922 /* 3923 * Clean out any buffers associated with the vnode. 3924 * If the flush fails, just toss the buffers. 3925 */ 3926 mp = NULL; 3927 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 3928 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 3929 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 3930 while (vinvalbuf(vp, 0, 0, 0) != 0) 3931 ; 3932 } 3933 3934 BO_LOCK(&vp->v_bufobj); 3935 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 3936 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 3937 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 3938 vp->v_bufobj.bo_clean.bv_cnt == 0, 3939 ("vp %p bufobj not invalidated", vp)); 3940 3941 /* 3942 * For VMIO bufobj, BO_DEAD is set later, or in 3943 * vm_object_terminate() after the object's page queue is 3944 * flushed. 3945 */ 3946 object = vp->v_bufobj.bo_object; 3947 if (object == NULL) 3948 vp->v_bufobj.bo_flag |= BO_DEAD; 3949 BO_UNLOCK(&vp->v_bufobj); 3950 3951 /* 3952 * Handle the VM part. Tmpfs handles v_object on its own (the 3953 * OBJT_VNODE check). Nullfs or other bypassing filesystems 3954 * should not touch the object borrowed from the lower vnode 3955 * (the handle check). 3956 */ 3957 if (object != NULL && object->type == OBJT_VNODE && 3958 object->handle == vp) 3959 vnode_destroy_vobject(vp); 3960 3961 /* 3962 * Reclaim the vnode. 3963 */ 3964 if (VOP_RECLAIM(vp)) 3965 panic("vgone: cannot reclaim"); 3966 if (mp != NULL) 3967 vn_finished_secondary_write(mp); 3968 VNASSERT(vp->v_object == NULL, vp, 3969 ("vop_reclaim left v_object vp=%p", vp)); 3970 /* 3971 * Clear the advisory locks and wake up waiting threads. 3972 */ 3973 (void)VOP_ADVLOCKPURGE(vp); 3974 vp->v_lockf = NULL; 3975 /* 3976 * Delete from old mount point vnode list. 3977 */ 3978 delmntque(vp); 3979 /* 3980 * Done with purge, reset to the standard lock and invalidate 3981 * the vnode. 3982 */ 3983 VI_LOCK(vp); 3984 vp->v_vnlock = &vp->v_lock; 3985 vp->v_op = &dead_vnodeops; 3986 vp->v_type = VBAD; 3987 } 3988 3989 /* 3990 * Print out a description of a vnode. 3991 */ 3992 static const char * const typename[] = 3993 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 3994 "VMARKER"}; 3995 3996 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 3997 "new hold count flag not added to vn_printf"); 3998 3999 void 4000 vn_printf(struct vnode *vp, const char *fmt, ...) 4001 { 4002 va_list ap; 4003 char buf[256], buf2[16]; 4004 u_long flags; 4005 u_int holdcnt; 4006 4007 va_start(ap, fmt); 4008 vprintf(fmt, ap); 4009 va_end(ap); 4010 printf("%p: ", (void *)vp); 4011 printf("type %s\n", typename[vp->v_type]); 4012 holdcnt = atomic_load_int(&vp->v_holdcnt); 4013 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 4014 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 4015 vp->v_seqc_users); 4016 switch (vp->v_type) { 4017 case VDIR: 4018 printf(" mountedhere %p\n", vp->v_mountedhere); 4019 break; 4020 case VCHR: 4021 printf(" rdev %p\n", vp->v_rdev); 4022 break; 4023 case VSOCK: 4024 printf(" socket %p\n", vp->v_unpcb); 4025 break; 4026 case VFIFO: 4027 printf(" fifoinfo %p\n", vp->v_fifoinfo); 4028 break; 4029 default: 4030 printf("\n"); 4031 break; 4032 } 4033 buf[0] = '\0'; 4034 buf[1] = '\0'; 4035 if (holdcnt & VHOLD_NO_SMR) 4036 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 4037 printf(" hold count flags (%s)\n", buf + 1); 4038 4039 buf[0] = '\0'; 4040 buf[1] = '\0'; 4041 if (vp->v_irflag & VIRF_DOOMED) 4042 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 4043 if (vp->v_irflag & VIRF_PGREAD) 4044 strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); 4045 flags = vp->v_irflag & ~(VIRF_DOOMED | VIRF_PGREAD); 4046 if (flags != 0) { 4047 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 4048 strlcat(buf, buf2, sizeof(buf)); 4049 } 4050 if (vp->v_vflag & VV_ROOT) 4051 strlcat(buf, "|VV_ROOT", sizeof(buf)); 4052 if (vp->v_vflag & VV_ISTTY) 4053 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4054 if (vp->v_vflag & VV_NOSYNC) 4055 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4056 if (vp->v_vflag & VV_ETERNALDEV) 4057 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4058 if (vp->v_vflag & VV_CACHEDLABEL) 4059 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4060 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4061 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4062 if (vp->v_vflag & VV_COPYONWRITE) 4063 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4064 if (vp->v_vflag & VV_SYSTEM) 4065 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4066 if (vp->v_vflag & VV_PROCDEP) 4067 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4068 if (vp->v_vflag & VV_NOKNOTE) 4069 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 4070 if (vp->v_vflag & VV_DELETED) 4071 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4072 if (vp->v_vflag & VV_MD) 4073 strlcat(buf, "|VV_MD", sizeof(buf)); 4074 if (vp->v_vflag & VV_FORCEINSMQ) 4075 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4076 if (vp->v_vflag & VV_READLINK) 4077 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4078 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4079 VV_CACHEDLABEL | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 4080 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); 4081 if (flags != 0) { 4082 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4083 strlcat(buf, buf2, sizeof(buf)); 4084 } 4085 if (vp->v_iflag & VI_TEXT_REF) 4086 strlcat(buf, "|VI_TEXT_REF", sizeof(buf)); 4087 if (vp->v_iflag & VI_MOUNT) 4088 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4089 if (vp->v_iflag & VI_DOINGINACT) 4090 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4091 if (vp->v_iflag & VI_OWEINACT) 4092 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4093 if (vp->v_iflag & VI_DEFINACT) 4094 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4095 flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_DOINGINACT | 4096 VI_OWEINACT | VI_DEFINACT); 4097 if (flags != 0) { 4098 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4099 strlcat(buf, buf2, sizeof(buf)); 4100 } 4101 if (vp->v_mflag & VMP_LAZYLIST) 4102 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4103 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4104 if (flags != 0) { 4105 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4106 strlcat(buf, buf2, sizeof(buf)); 4107 } 4108 printf(" flags (%s)\n", buf + 1); 4109 if (mtx_owned(VI_MTX(vp))) 4110 printf(" VI_LOCKed"); 4111 if (vp->v_object != NULL) 4112 printf(" v_object %p ref %d pages %d " 4113 "cleanbuf %d dirtybuf %d\n", 4114 vp->v_object, vp->v_object->ref_count, 4115 vp->v_object->resident_page_count, 4116 vp->v_bufobj.bo_clean.bv_cnt, 4117 vp->v_bufobj.bo_dirty.bv_cnt); 4118 printf(" "); 4119 lockmgr_printinfo(vp->v_vnlock); 4120 if (vp->v_data != NULL) 4121 VOP_PRINT(vp); 4122 } 4123 4124 #ifdef DDB 4125 /* 4126 * List all of the locked vnodes in the system. 4127 * Called when debugging the kernel. 4128 */ 4129 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 4130 { 4131 struct mount *mp; 4132 struct vnode *vp; 4133 4134 /* 4135 * Note: because this is DDB, we can't obey the locking semantics 4136 * for these structures, which means we could catch an inconsistent 4137 * state and dereference a nasty pointer. Not much to be done 4138 * about that. 4139 */ 4140 db_printf("Locked vnodes\n"); 4141 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4142 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4143 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4144 vn_printf(vp, "vnode "); 4145 } 4146 } 4147 } 4148 4149 /* 4150 * Show details about the given vnode. 4151 */ 4152 DB_SHOW_COMMAND(vnode, db_show_vnode) 4153 { 4154 struct vnode *vp; 4155 4156 if (!have_addr) 4157 return; 4158 vp = (struct vnode *)addr; 4159 vn_printf(vp, "vnode "); 4160 } 4161 4162 /* 4163 * Show details about the given mount point. 4164 */ 4165 DB_SHOW_COMMAND(mount, db_show_mount) 4166 { 4167 struct mount *mp; 4168 struct vfsopt *opt; 4169 struct statfs *sp; 4170 struct vnode *vp; 4171 char buf[512]; 4172 uint64_t mflags; 4173 u_int flags; 4174 4175 if (!have_addr) { 4176 /* No address given, print short info about all mount points. */ 4177 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4178 db_printf("%p %s on %s (%s)\n", mp, 4179 mp->mnt_stat.f_mntfromname, 4180 mp->mnt_stat.f_mntonname, 4181 mp->mnt_stat.f_fstypename); 4182 if (db_pager_quit) 4183 break; 4184 } 4185 db_printf("\nMore info: show mount <addr>\n"); 4186 return; 4187 } 4188 4189 mp = (struct mount *)addr; 4190 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4191 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4192 4193 buf[0] = '\0'; 4194 mflags = mp->mnt_flag; 4195 #define MNT_FLAG(flag) do { \ 4196 if (mflags & (flag)) { \ 4197 if (buf[0] != '\0') \ 4198 strlcat(buf, ", ", sizeof(buf)); \ 4199 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4200 mflags &= ~(flag); \ 4201 } \ 4202 } while (0) 4203 MNT_FLAG(MNT_RDONLY); 4204 MNT_FLAG(MNT_SYNCHRONOUS); 4205 MNT_FLAG(MNT_NOEXEC); 4206 MNT_FLAG(MNT_NOSUID); 4207 MNT_FLAG(MNT_NFS4ACLS); 4208 MNT_FLAG(MNT_UNION); 4209 MNT_FLAG(MNT_ASYNC); 4210 MNT_FLAG(MNT_SUIDDIR); 4211 MNT_FLAG(MNT_SOFTDEP); 4212 MNT_FLAG(MNT_NOSYMFOLLOW); 4213 MNT_FLAG(MNT_GJOURNAL); 4214 MNT_FLAG(MNT_MULTILABEL); 4215 MNT_FLAG(MNT_ACLS); 4216 MNT_FLAG(MNT_NOATIME); 4217 MNT_FLAG(MNT_NOCLUSTERR); 4218 MNT_FLAG(MNT_NOCLUSTERW); 4219 MNT_FLAG(MNT_SUJ); 4220 MNT_FLAG(MNT_EXRDONLY); 4221 MNT_FLAG(MNT_EXPORTED); 4222 MNT_FLAG(MNT_DEFEXPORTED); 4223 MNT_FLAG(MNT_EXPORTANON); 4224 MNT_FLAG(MNT_EXKERB); 4225 MNT_FLAG(MNT_EXPUBLIC); 4226 MNT_FLAG(MNT_LOCAL); 4227 MNT_FLAG(MNT_QUOTA); 4228 MNT_FLAG(MNT_ROOTFS); 4229 MNT_FLAG(MNT_USER); 4230 MNT_FLAG(MNT_IGNORE); 4231 MNT_FLAG(MNT_UPDATE); 4232 MNT_FLAG(MNT_DELEXPORT); 4233 MNT_FLAG(MNT_RELOAD); 4234 MNT_FLAG(MNT_FORCE); 4235 MNT_FLAG(MNT_SNAPSHOT); 4236 MNT_FLAG(MNT_BYFSID); 4237 #undef MNT_FLAG 4238 if (mflags != 0) { 4239 if (buf[0] != '\0') 4240 strlcat(buf, ", ", sizeof(buf)); 4241 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4242 "0x%016jx", mflags); 4243 } 4244 db_printf(" mnt_flag = %s\n", buf); 4245 4246 buf[0] = '\0'; 4247 flags = mp->mnt_kern_flag; 4248 #define MNT_KERN_FLAG(flag) do { \ 4249 if (flags & (flag)) { \ 4250 if (buf[0] != '\0') \ 4251 strlcat(buf, ", ", sizeof(buf)); \ 4252 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4253 flags &= ~(flag); \ 4254 } \ 4255 } while (0) 4256 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4257 MNT_KERN_FLAG(MNTK_ASYNC); 4258 MNT_KERN_FLAG(MNTK_SOFTDEP); 4259 MNT_KERN_FLAG(MNTK_DRAINING); 4260 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4261 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4262 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4263 MNT_KERN_FLAG(MNTK_NO_IOPF); 4264 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 4265 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 4266 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 4267 MNT_KERN_FLAG(MNTK_MARKER); 4268 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4269 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4270 MNT_KERN_FLAG(MNTK_NOASYNC); 4271 MNT_KERN_FLAG(MNTK_UNMOUNT); 4272 MNT_KERN_FLAG(MNTK_MWAIT); 4273 MNT_KERN_FLAG(MNTK_SUSPEND); 4274 MNT_KERN_FLAG(MNTK_SUSPEND2); 4275 MNT_KERN_FLAG(MNTK_SUSPENDED); 4276 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4277 MNT_KERN_FLAG(MNTK_NOKNOTE); 4278 #undef MNT_KERN_FLAG 4279 if (flags != 0) { 4280 if (buf[0] != '\0') 4281 strlcat(buf, ", ", sizeof(buf)); 4282 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4283 "0x%08x", flags); 4284 } 4285 db_printf(" mnt_kern_flag = %s\n", buf); 4286 4287 db_printf(" mnt_opt = "); 4288 opt = TAILQ_FIRST(mp->mnt_opt); 4289 if (opt != NULL) { 4290 db_printf("%s", opt->name); 4291 opt = TAILQ_NEXT(opt, link); 4292 while (opt != NULL) { 4293 db_printf(", %s", opt->name); 4294 opt = TAILQ_NEXT(opt, link); 4295 } 4296 } 4297 db_printf("\n"); 4298 4299 sp = &mp->mnt_stat; 4300 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4301 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4302 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4303 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4304 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4305 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4306 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4307 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4308 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4309 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4310 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4311 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4312 4313 db_printf(" mnt_cred = { uid=%u ruid=%u", 4314 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4315 if (jailed(mp->mnt_cred)) 4316 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4317 db_printf(" }\n"); 4318 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4319 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4320 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4321 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4322 db_printf(" mnt_lazyvnodelistsize = %d\n", 4323 mp->mnt_lazyvnodelistsize); 4324 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4325 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4326 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 4327 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4328 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4329 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4330 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4331 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4332 db_printf(" mnt_secondary_accwrites = %d\n", 4333 mp->mnt_secondary_accwrites); 4334 db_printf(" mnt_gjprovider = %s\n", 4335 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4336 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4337 4338 db_printf("\n\nList of active vnodes\n"); 4339 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4340 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4341 vn_printf(vp, "vnode "); 4342 if (db_pager_quit) 4343 break; 4344 } 4345 } 4346 db_printf("\n\nList of inactive vnodes\n"); 4347 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4348 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4349 vn_printf(vp, "vnode "); 4350 if (db_pager_quit) 4351 break; 4352 } 4353 } 4354 } 4355 #endif /* DDB */ 4356 4357 /* 4358 * Fill in a struct xvfsconf based on a struct vfsconf. 4359 */ 4360 static int 4361 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4362 { 4363 struct xvfsconf xvfsp; 4364 4365 bzero(&xvfsp, sizeof(xvfsp)); 4366 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4367 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4368 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4369 xvfsp.vfc_flags = vfsp->vfc_flags; 4370 /* 4371 * These are unused in userland, we keep them 4372 * to not break binary compatibility. 4373 */ 4374 xvfsp.vfc_vfsops = NULL; 4375 xvfsp.vfc_next = NULL; 4376 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4377 } 4378 4379 #ifdef COMPAT_FREEBSD32 4380 struct xvfsconf32 { 4381 uint32_t vfc_vfsops; 4382 char vfc_name[MFSNAMELEN]; 4383 int32_t vfc_typenum; 4384 int32_t vfc_refcount; 4385 int32_t vfc_flags; 4386 uint32_t vfc_next; 4387 }; 4388 4389 static int 4390 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4391 { 4392 struct xvfsconf32 xvfsp; 4393 4394 bzero(&xvfsp, sizeof(xvfsp)); 4395 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4396 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4397 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4398 xvfsp.vfc_flags = vfsp->vfc_flags; 4399 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4400 } 4401 #endif 4402 4403 /* 4404 * Top level filesystem related information gathering. 4405 */ 4406 static int 4407 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4408 { 4409 struct vfsconf *vfsp; 4410 int error; 4411 4412 error = 0; 4413 vfsconf_slock(); 4414 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4415 #ifdef COMPAT_FREEBSD32 4416 if (req->flags & SCTL_MASK32) 4417 error = vfsconf2x32(req, vfsp); 4418 else 4419 #endif 4420 error = vfsconf2x(req, vfsp); 4421 if (error) 4422 break; 4423 } 4424 vfsconf_sunlock(); 4425 return (error); 4426 } 4427 4428 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4429 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4430 "S,xvfsconf", "List of all configured filesystems"); 4431 4432 #ifndef BURN_BRIDGES 4433 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4434 4435 static int 4436 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4437 { 4438 int *name = (int *)arg1 - 1; /* XXX */ 4439 u_int namelen = arg2 + 1; /* XXX */ 4440 struct vfsconf *vfsp; 4441 4442 log(LOG_WARNING, "userland calling deprecated sysctl, " 4443 "please rebuild world\n"); 4444 4445 #if 1 || defined(COMPAT_PRELITE2) 4446 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4447 if (namelen == 1) 4448 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4449 #endif 4450 4451 switch (name[1]) { 4452 case VFS_MAXTYPENUM: 4453 if (namelen != 2) 4454 return (ENOTDIR); 4455 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4456 case VFS_CONF: 4457 if (namelen != 3) 4458 return (ENOTDIR); /* overloaded */ 4459 vfsconf_slock(); 4460 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4461 if (vfsp->vfc_typenum == name[2]) 4462 break; 4463 } 4464 vfsconf_sunlock(); 4465 if (vfsp == NULL) 4466 return (EOPNOTSUPP); 4467 #ifdef COMPAT_FREEBSD32 4468 if (req->flags & SCTL_MASK32) 4469 return (vfsconf2x32(req, vfsp)); 4470 else 4471 #endif 4472 return (vfsconf2x(req, vfsp)); 4473 } 4474 return (EOPNOTSUPP); 4475 } 4476 4477 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4478 CTLFLAG_MPSAFE, vfs_sysctl, 4479 "Generic filesystem"); 4480 4481 #if 1 || defined(COMPAT_PRELITE2) 4482 4483 static int 4484 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4485 { 4486 int error; 4487 struct vfsconf *vfsp; 4488 struct ovfsconf ovfs; 4489 4490 vfsconf_slock(); 4491 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4492 bzero(&ovfs, sizeof(ovfs)); 4493 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4494 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4495 ovfs.vfc_index = vfsp->vfc_typenum; 4496 ovfs.vfc_refcount = vfsp->vfc_refcount; 4497 ovfs.vfc_flags = vfsp->vfc_flags; 4498 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4499 if (error != 0) { 4500 vfsconf_sunlock(); 4501 return (error); 4502 } 4503 } 4504 vfsconf_sunlock(); 4505 return (0); 4506 } 4507 4508 #endif /* 1 || COMPAT_PRELITE2 */ 4509 #endif /* !BURN_BRIDGES */ 4510 4511 #define KINFO_VNODESLOP 10 4512 #ifdef notyet 4513 /* 4514 * Dump vnode list (via sysctl). 4515 */ 4516 /* ARGSUSED */ 4517 static int 4518 sysctl_vnode(SYSCTL_HANDLER_ARGS) 4519 { 4520 struct xvnode *xvn; 4521 struct mount *mp; 4522 struct vnode *vp; 4523 int error, len, n; 4524 4525 /* 4526 * Stale numvnodes access is not fatal here. 4527 */ 4528 req->lock = 0; 4529 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 4530 if (!req->oldptr) 4531 /* Make an estimate */ 4532 return (SYSCTL_OUT(req, 0, len)); 4533 4534 error = sysctl_wire_old_buffer(req, 0); 4535 if (error != 0) 4536 return (error); 4537 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 4538 n = 0; 4539 mtx_lock(&mountlist_mtx); 4540 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4541 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 4542 continue; 4543 MNT_ILOCK(mp); 4544 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4545 if (n == len) 4546 break; 4547 vref(vp); 4548 xvn[n].xv_size = sizeof *xvn; 4549 xvn[n].xv_vnode = vp; 4550 xvn[n].xv_id = 0; /* XXX compat */ 4551 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 4552 XV_COPY(usecount); 4553 XV_COPY(writecount); 4554 XV_COPY(holdcnt); 4555 XV_COPY(mount); 4556 XV_COPY(numoutput); 4557 XV_COPY(type); 4558 #undef XV_COPY 4559 xvn[n].xv_flag = vp->v_vflag; 4560 4561 switch (vp->v_type) { 4562 case VREG: 4563 case VDIR: 4564 case VLNK: 4565 break; 4566 case VBLK: 4567 case VCHR: 4568 if (vp->v_rdev == NULL) { 4569 vrele(vp); 4570 continue; 4571 } 4572 xvn[n].xv_dev = dev2udev(vp->v_rdev); 4573 break; 4574 case VSOCK: 4575 xvn[n].xv_socket = vp->v_socket; 4576 break; 4577 case VFIFO: 4578 xvn[n].xv_fifo = vp->v_fifoinfo; 4579 break; 4580 case VNON: 4581 case VBAD: 4582 default: 4583 /* shouldn't happen? */ 4584 vrele(vp); 4585 continue; 4586 } 4587 vrele(vp); 4588 ++n; 4589 } 4590 MNT_IUNLOCK(mp); 4591 mtx_lock(&mountlist_mtx); 4592 vfs_unbusy(mp); 4593 if (n == len) 4594 break; 4595 } 4596 mtx_unlock(&mountlist_mtx); 4597 4598 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 4599 free(xvn, M_TEMP); 4600 return (error); 4601 } 4602 4603 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 4604 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 4605 ""); 4606 #endif 4607 4608 static void 4609 unmount_or_warn(struct mount *mp) 4610 { 4611 int error; 4612 4613 error = dounmount(mp, MNT_FORCE, curthread); 4614 if (error != 0) { 4615 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4616 if (error == EBUSY) 4617 printf("BUSY)\n"); 4618 else 4619 printf("%d)\n", error); 4620 } 4621 } 4622 4623 /* 4624 * Unmount all filesystems. The list is traversed in reverse order 4625 * of mounting to avoid dependencies. 4626 */ 4627 void 4628 vfs_unmountall(void) 4629 { 4630 struct mount *mp, *tmp; 4631 4632 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4633 4634 /* 4635 * Since this only runs when rebooting, it is not interlocked. 4636 */ 4637 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4638 vfs_ref(mp); 4639 4640 /* 4641 * Forcibly unmounting "/dev" before "/" would prevent clean 4642 * unmount of the latter. 4643 */ 4644 if (mp == rootdevmp) 4645 continue; 4646 4647 unmount_or_warn(mp); 4648 } 4649 4650 if (rootdevmp != NULL) 4651 unmount_or_warn(rootdevmp); 4652 } 4653 4654 static void 4655 vfs_deferred_inactive(struct vnode *vp, int lkflags) 4656 { 4657 4658 ASSERT_VI_LOCKED(vp, __func__); 4659 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, ("VI_DEFINACT still set")); 4660 if ((vp->v_iflag & VI_OWEINACT) == 0) { 4661 vdropl(vp); 4662 return; 4663 } 4664 if (vn_lock(vp, lkflags) == 0) { 4665 VI_LOCK(vp); 4666 vinactive(vp); 4667 VOP_UNLOCK(vp); 4668 vdropl(vp); 4669 return; 4670 } 4671 vdefer_inactive_unlocked(vp); 4672 } 4673 4674 static int 4675 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 4676 { 4677 4678 return (vp->v_iflag & VI_DEFINACT); 4679 } 4680 4681 static void __noinline 4682 vfs_periodic_inactive(struct mount *mp, int flags) 4683 { 4684 struct vnode *vp, *mvp; 4685 int lkflags; 4686 4687 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4688 if (flags != MNT_WAIT) 4689 lkflags |= LK_NOWAIT; 4690 4691 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 4692 if ((vp->v_iflag & VI_DEFINACT) == 0) { 4693 VI_UNLOCK(vp); 4694 continue; 4695 } 4696 vp->v_iflag &= ~VI_DEFINACT; 4697 vfs_deferred_inactive(vp, lkflags); 4698 } 4699 } 4700 4701 static inline bool 4702 vfs_want_msync(struct vnode *vp) 4703 { 4704 struct vm_object *obj; 4705 4706 /* 4707 * This test may be performed without any locks held. 4708 * We rely on vm_object's type stability. 4709 */ 4710 if (vp->v_vflag & VV_NOSYNC) 4711 return (false); 4712 obj = vp->v_object; 4713 return (obj != NULL && vm_object_mightbedirty(obj)); 4714 } 4715 4716 static int 4717 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 4718 { 4719 4720 if (vp->v_vflag & VV_NOSYNC) 4721 return (false); 4722 if (vp->v_iflag & VI_DEFINACT) 4723 return (true); 4724 return (vfs_want_msync(vp)); 4725 } 4726 4727 static void __noinline 4728 vfs_periodic_msync_inactive(struct mount *mp, int flags) 4729 { 4730 struct vnode *vp, *mvp; 4731 struct vm_object *obj; 4732 int lkflags, objflags; 4733 bool seen_defer; 4734 4735 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4736 if (flags != MNT_WAIT) { 4737 lkflags |= LK_NOWAIT; 4738 objflags = OBJPC_NOSYNC; 4739 } else { 4740 objflags = OBJPC_SYNC; 4741 } 4742 4743 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 4744 seen_defer = false; 4745 if (vp->v_iflag & VI_DEFINACT) { 4746 vp->v_iflag &= ~VI_DEFINACT; 4747 seen_defer = true; 4748 } 4749 if (!vfs_want_msync(vp)) { 4750 if (seen_defer) 4751 vfs_deferred_inactive(vp, lkflags); 4752 else 4753 VI_UNLOCK(vp); 4754 continue; 4755 } 4756 if (vget(vp, lkflags) == 0) { 4757 obj = vp->v_object; 4758 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 4759 VM_OBJECT_WLOCK(obj); 4760 vm_object_page_clean(obj, 0, 0, objflags); 4761 VM_OBJECT_WUNLOCK(obj); 4762 } 4763 vput(vp); 4764 if (seen_defer) 4765 vdrop(vp); 4766 } else { 4767 if (seen_defer) 4768 vdefer_inactive_unlocked(vp); 4769 } 4770 } 4771 } 4772 4773 void 4774 vfs_periodic(struct mount *mp, int flags) 4775 { 4776 4777 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4778 4779 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 4780 vfs_periodic_inactive(mp, flags); 4781 else 4782 vfs_periodic_msync_inactive(mp, flags); 4783 } 4784 4785 static void 4786 destroy_vpollinfo_free(struct vpollinfo *vi) 4787 { 4788 4789 knlist_destroy(&vi->vpi_selinfo.si_note); 4790 mtx_destroy(&vi->vpi_lock); 4791 uma_zfree(vnodepoll_zone, vi); 4792 } 4793 4794 static void 4795 destroy_vpollinfo(struct vpollinfo *vi) 4796 { 4797 4798 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4799 seldrain(&vi->vpi_selinfo); 4800 destroy_vpollinfo_free(vi); 4801 } 4802 4803 /* 4804 * Initialize per-vnode helper structure to hold poll-related state. 4805 */ 4806 void 4807 v_addpollinfo(struct vnode *vp) 4808 { 4809 struct vpollinfo *vi; 4810 4811 if (vp->v_pollinfo != NULL) 4812 return; 4813 vi = uma_zalloc(vnodepoll_zone, M_WAITOK | M_ZERO); 4814 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4815 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4816 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 4817 VI_LOCK(vp); 4818 if (vp->v_pollinfo != NULL) { 4819 VI_UNLOCK(vp); 4820 destroy_vpollinfo_free(vi); 4821 return; 4822 } 4823 vp->v_pollinfo = vi; 4824 VI_UNLOCK(vp); 4825 } 4826 4827 /* 4828 * Record a process's interest in events which might happen to 4829 * a vnode. Because poll uses the historic select-style interface 4830 * internally, this routine serves as both the ``check for any 4831 * pending events'' and the ``record my interest in future events'' 4832 * functions. (These are done together, while the lock is held, 4833 * to avoid race conditions.) 4834 */ 4835 int 4836 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4837 { 4838 4839 v_addpollinfo(vp); 4840 mtx_lock(&vp->v_pollinfo->vpi_lock); 4841 if (vp->v_pollinfo->vpi_revents & events) { 4842 /* 4843 * This leaves events we are not interested 4844 * in available for the other process which 4845 * which presumably had requested them 4846 * (otherwise they would never have been 4847 * recorded). 4848 */ 4849 events &= vp->v_pollinfo->vpi_revents; 4850 vp->v_pollinfo->vpi_revents &= ~events; 4851 4852 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4853 return (events); 4854 } 4855 vp->v_pollinfo->vpi_events |= events; 4856 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 4857 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4858 return (0); 4859 } 4860 4861 /* 4862 * Routine to create and manage a filesystem syncer vnode. 4863 */ 4864 #define sync_close ((int (*)(struct vop_close_args *))nullop) 4865 static int sync_fsync(struct vop_fsync_args *); 4866 static int sync_inactive(struct vop_inactive_args *); 4867 static int sync_reclaim(struct vop_reclaim_args *); 4868 4869 static struct vop_vector sync_vnodeops = { 4870 .vop_bypass = VOP_EOPNOTSUPP, 4871 .vop_close = sync_close, /* close */ 4872 .vop_fsync = sync_fsync, /* fsync */ 4873 .vop_inactive = sync_inactive, /* inactive */ 4874 .vop_need_inactive = vop_stdneed_inactive, /* need_inactive */ 4875 .vop_reclaim = sync_reclaim, /* reclaim */ 4876 .vop_lock1 = vop_stdlock, /* lock */ 4877 .vop_unlock = vop_stdunlock, /* unlock */ 4878 .vop_islocked = vop_stdislocked, /* islocked */ 4879 }; 4880 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 4881 4882 /* 4883 * Create a new filesystem syncer vnode for the specified mount point. 4884 */ 4885 void 4886 vfs_allocate_syncvnode(struct mount *mp) 4887 { 4888 struct vnode *vp; 4889 struct bufobj *bo; 4890 static long start, incr, next; 4891 int error; 4892 4893 /* Allocate a new vnode */ 4894 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 4895 if (error != 0) 4896 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 4897 vp->v_type = VNON; 4898 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4899 vp->v_vflag |= VV_FORCEINSMQ; 4900 error = insmntque(vp, mp); 4901 if (error != 0) 4902 panic("vfs_allocate_syncvnode: insmntque() failed"); 4903 vp->v_vflag &= ~VV_FORCEINSMQ; 4904 VOP_UNLOCK(vp); 4905 /* 4906 * Place the vnode onto the syncer worklist. We attempt to 4907 * scatter them about on the list so that they will go off 4908 * at evenly distributed times even if all the filesystems 4909 * are mounted at once. 4910 */ 4911 next += incr; 4912 if (next == 0 || next > syncer_maxdelay) { 4913 start /= 2; 4914 incr /= 2; 4915 if (start == 0) { 4916 start = syncer_maxdelay / 2; 4917 incr = syncer_maxdelay; 4918 } 4919 next = start; 4920 } 4921 bo = &vp->v_bufobj; 4922 BO_LOCK(bo); 4923 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 4924 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 4925 mtx_lock(&sync_mtx); 4926 sync_vnode_count++; 4927 if (mp->mnt_syncer == NULL) { 4928 mp->mnt_syncer = vp; 4929 vp = NULL; 4930 } 4931 mtx_unlock(&sync_mtx); 4932 BO_UNLOCK(bo); 4933 if (vp != NULL) { 4934 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4935 vgone(vp); 4936 vput(vp); 4937 } 4938 } 4939 4940 void 4941 vfs_deallocate_syncvnode(struct mount *mp) 4942 { 4943 struct vnode *vp; 4944 4945 mtx_lock(&sync_mtx); 4946 vp = mp->mnt_syncer; 4947 if (vp != NULL) 4948 mp->mnt_syncer = NULL; 4949 mtx_unlock(&sync_mtx); 4950 if (vp != NULL) 4951 vrele(vp); 4952 } 4953 4954 /* 4955 * Do a lazy sync of the filesystem. 4956 */ 4957 static int 4958 sync_fsync(struct vop_fsync_args *ap) 4959 { 4960 struct vnode *syncvp = ap->a_vp; 4961 struct mount *mp = syncvp->v_mount; 4962 int error, save; 4963 struct bufobj *bo; 4964 4965 /* 4966 * We only need to do something if this is a lazy evaluation. 4967 */ 4968 if (ap->a_waitfor != MNT_LAZY) 4969 return (0); 4970 4971 /* 4972 * Move ourselves to the back of the sync list. 4973 */ 4974 bo = &syncvp->v_bufobj; 4975 BO_LOCK(bo); 4976 vn_syncer_add_to_worklist(bo, syncdelay); 4977 BO_UNLOCK(bo); 4978 4979 /* 4980 * Walk the list of vnodes pushing all that are dirty and 4981 * not already on the sync list. 4982 */ 4983 if (vfs_busy(mp, MBF_NOWAIT) != 0) 4984 return (0); 4985 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 4986 vfs_unbusy(mp); 4987 return (0); 4988 } 4989 save = curthread_pflags_set(TDP_SYNCIO); 4990 /* 4991 * The filesystem at hand may be idle with free vnodes stored in the 4992 * batch. Return them instead of letting them stay there indefinitely. 4993 */ 4994 vfs_periodic(mp, MNT_NOWAIT); 4995 error = VFS_SYNC(mp, MNT_LAZY); 4996 curthread_pflags_restore(save); 4997 vn_finished_write(mp); 4998 vfs_unbusy(mp); 4999 return (error); 5000 } 5001 5002 /* 5003 * The syncer vnode is no referenced. 5004 */ 5005 static int 5006 sync_inactive(struct vop_inactive_args *ap) 5007 { 5008 5009 vgone(ap->a_vp); 5010 return (0); 5011 } 5012 5013 /* 5014 * The syncer vnode is no longer needed and is being decommissioned. 5015 * 5016 * Modifications to the worklist must be protected by sync_mtx. 5017 */ 5018 static int 5019 sync_reclaim(struct vop_reclaim_args *ap) 5020 { 5021 struct vnode *vp = ap->a_vp; 5022 struct bufobj *bo; 5023 5024 bo = &vp->v_bufobj; 5025 BO_LOCK(bo); 5026 mtx_lock(&sync_mtx); 5027 if (vp->v_mount->mnt_syncer == vp) 5028 vp->v_mount->mnt_syncer = NULL; 5029 if (bo->bo_flag & BO_ONWORKLST) { 5030 LIST_REMOVE(bo, bo_synclist); 5031 syncer_worklist_len--; 5032 sync_vnode_count--; 5033 bo->bo_flag &= ~BO_ONWORKLST; 5034 } 5035 mtx_unlock(&sync_mtx); 5036 BO_UNLOCK(bo); 5037 5038 return (0); 5039 } 5040 5041 int 5042 vn_need_pageq_flush(struct vnode *vp) 5043 { 5044 struct vm_object *obj; 5045 int need; 5046 5047 MPASS(mtx_owned(VI_MTX(vp))); 5048 need = 0; 5049 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 5050 vm_object_mightbedirty(obj)) 5051 need = 1; 5052 return (need); 5053 } 5054 5055 /* 5056 * Check if vnode represents a disk device 5057 */ 5058 bool 5059 vn_isdisk_error(struct vnode *vp, int *errp) 5060 { 5061 int error; 5062 5063 if (vp->v_type != VCHR) { 5064 error = ENOTBLK; 5065 goto out; 5066 } 5067 error = 0; 5068 dev_lock(); 5069 if (vp->v_rdev == NULL) 5070 error = ENXIO; 5071 else if (vp->v_rdev->si_devsw == NULL) 5072 error = ENXIO; 5073 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5074 error = ENOTBLK; 5075 dev_unlock(); 5076 out: 5077 *errp = error; 5078 return (error == 0); 5079 } 5080 5081 bool 5082 vn_isdisk(struct vnode *vp) 5083 { 5084 int error; 5085 5086 return (vn_isdisk_error(vp, &error)); 5087 } 5088 5089 /* 5090 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5091 * the comment above cache_fplookup for details. 5092 */ 5093 int 5094 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5095 { 5096 int error; 5097 5098 VFS_SMR_ASSERT_ENTERED(); 5099 5100 /* Check the owner. */ 5101 if (cred->cr_uid == file_uid) { 5102 if (file_mode & S_IXUSR) 5103 return (0); 5104 goto out_error; 5105 } 5106 5107 /* Otherwise, check the groups (first match) */ 5108 if (groupmember(file_gid, cred)) { 5109 if (file_mode & S_IXGRP) 5110 return (0); 5111 goto out_error; 5112 } 5113 5114 /* Otherwise, check everyone else. */ 5115 if (file_mode & S_IXOTH) 5116 return (0); 5117 out_error: 5118 /* 5119 * Permission check failed, but it is possible denial will get overwritten 5120 * (e.g., when root is traversing through a 700 directory owned by someone 5121 * else). 5122 * 5123 * vaccess() calls priv_check_cred which in turn can descent into MAC 5124 * modules overriding this result. It's quite unclear what semantics 5125 * are allowed for them to operate, thus for safety we don't call them 5126 * from within the SMR section. This also means if any such modules 5127 * are present, we have to let the regular lookup decide. 5128 */ 5129 error = priv_check_cred_vfs_lookup_nomac(cred); 5130 switch (error) { 5131 case 0: 5132 return (0); 5133 case EAGAIN: 5134 /* 5135 * MAC modules present. 5136 */ 5137 return (EAGAIN); 5138 case EPERM: 5139 return (EACCES); 5140 default: 5141 return (error); 5142 } 5143 } 5144 5145 /* 5146 * Common filesystem object access control check routine. Accepts a 5147 * vnode's type, "mode", uid and gid, requested access mode, and credentials. 5148 * Returns 0 on success, or an errno on failure. 5149 */ 5150 int 5151 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5152 accmode_t accmode, struct ucred *cred) 5153 { 5154 accmode_t dac_granted; 5155 accmode_t priv_granted; 5156 5157 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5158 ("invalid bit in accmode")); 5159 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5160 ("VAPPEND without VWRITE")); 5161 5162 /* 5163 * Look for a normal, non-privileged way to access the file/directory 5164 * as requested. If it exists, go with that. 5165 */ 5166 5167 dac_granted = 0; 5168 5169 /* Check the owner. */ 5170 if (cred->cr_uid == file_uid) { 5171 dac_granted |= VADMIN; 5172 if (file_mode & S_IXUSR) 5173 dac_granted |= VEXEC; 5174 if (file_mode & S_IRUSR) 5175 dac_granted |= VREAD; 5176 if (file_mode & S_IWUSR) 5177 dac_granted |= (VWRITE | VAPPEND); 5178 5179 if ((accmode & dac_granted) == accmode) 5180 return (0); 5181 5182 goto privcheck; 5183 } 5184 5185 /* Otherwise, check the groups (first match) */ 5186 if (groupmember(file_gid, cred)) { 5187 if (file_mode & S_IXGRP) 5188 dac_granted |= VEXEC; 5189 if (file_mode & S_IRGRP) 5190 dac_granted |= VREAD; 5191 if (file_mode & S_IWGRP) 5192 dac_granted |= (VWRITE | VAPPEND); 5193 5194 if ((accmode & dac_granted) == accmode) 5195 return (0); 5196 5197 goto privcheck; 5198 } 5199 5200 /* Otherwise, check everyone else. */ 5201 if (file_mode & S_IXOTH) 5202 dac_granted |= VEXEC; 5203 if (file_mode & S_IROTH) 5204 dac_granted |= VREAD; 5205 if (file_mode & S_IWOTH) 5206 dac_granted |= (VWRITE | VAPPEND); 5207 if ((accmode & dac_granted) == accmode) 5208 return (0); 5209 5210 privcheck: 5211 /* 5212 * Build a privilege mask to determine if the set of privileges 5213 * satisfies the requirements when combined with the granted mask 5214 * from above. For each privilege, if the privilege is required, 5215 * bitwise or the request type onto the priv_granted mask. 5216 */ 5217 priv_granted = 0; 5218 5219 if (type == VDIR) { 5220 /* 5221 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5222 * requests, instead of PRIV_VFS_EXEC. 5223 */ 5224 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5225 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5226 priv_granted |= VEXEC; 5227 } else { 5228 /* 5229 * Ensure that at least one execute bit is on. Otherwise, 5230 * a privileged user will always succeed, and we don't want 5231 * this to happen unless the file really is executable. 5232 */ 5233 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5234 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5235 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5236 priv_granted |= VEXEC; 5237 } 5238 5239 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5240 !priv_check_cred(cred, PRIV_VFS_READ)) 5241 priv_granted |= VREAD; 5242 5243 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5244 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5245 priv_granted |= (VWRITE | VAPPEND); 5246 5247 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5248 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5249 priv_granted |= VADMIN; 5250 5251 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5252 return (0); 5253 } 5254 5255 return ((accmode & VADMIN) ? EPERM : EACCES); 5256 } 5257 5258 /* 5259 * Credential check based on process requesting service, and per-attribute 5260 * permissions. 5261 */ 5262 int 5263 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5264 struct thread *td, accmode_t accmode) 5265 { 5266 5267 /* 5268 * Kernel-invoked always succeeds. 5269 */ 5270 if (cred == NOCRED) 5271 return (0); 5272 5273 /* 5274 * Do not allow privileged processes in jail to directly manipulate 5275 * system attributes. 5276 */ 5277 switch (attrnamespace) { 5278 case EXTATTR_NAMESPACE_SYSTEM: 5279 /* Potentially should be: return (EPERM); */ 5280 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5281 case EXTATTR_NAMESPACE_USER: 5282 return (VOP_ACCESS(vp, accmode, cred, td)); 5283 default: 5284 return (EPERM); 5285 } 5286 } 5287 5288 #ifdef DEBUG_VFS_LOCKS 5289 /* 5290 * This only exists to suppress warnings from unlocked specfs accesses. It is 5291 * no longer ok to have an unlocked VFS. 5292 */ 5293 #define IGNORE_LOCK(vp) (KERNEL_PANICKED() || (vp) == NULL || \ 5294 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 5295 5296 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5297 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5298 "Drop into debugger on lock violation"); 5299 5300 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5301 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5302 0, "Check for interlock across VOPs"); 5303 5304 int vfs_badlock_print = 1; /* Print lock violations. */ 5305 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5306 0, "Print lock violations"); 5307 5308 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5309 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5310 0, "Print vnode details on lock violations"); 5311 5312 #ifdef KDB 5313 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5314 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5315 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5316 #endif 5317 5318 static void 5319 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5320 { 5321 5322 #ifdef KDB 5323 if (vfs_badlock_backtrace) 5324 kdb_backtrace(); 5325 #endif 5326 if (vfs_badlock_vnode) 5327 vn_printf(vp, "vnode "); 5328 if (vfs_badlock_print) 5329 printf("%s: %p %s\n", str, (void *)vp, msg); 5330 if (vfs_badlock_ddb) 5331 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5332 } 5333 5334 void 5335 assert_vi_locked(struct vnode *vp, const char *str) 5336 { 5337 5338 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5339 vfs_badlock("interlock is not locked but should be", str, vp); 5340 } 5341 5342 void 5343 assert_vi_unlocked(struct vnode *vp, const char *str) 5344 { 5345 5346 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5347 vfs_badlock("interlock is locked but should not be", str, vp); 5348 } 5349 5350 void 5351 assert_vop_locked(struct vnode *vp, const char *str) 5352 { 5353 int locked; 5354 5355 if (!IGNORE_LOCK(vp)) { 5356 locked = VOP_ISLOCKED(vp); 5357 if (locked == 0 || locked == LK_EXCLOTHER) 5358 vfs_badlock("is not locked but should be", str, vp); 5359 } 5360 } 5361 5362 void 5363 assert_vop_unlocked(struct vnode *vp, const char *str) 5364 { 5365 5366 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5367 vfs_badlock("is locked but should not be", str, vp); 5368 } 5369 5370 void 5371 assert_vop_elocked(struct vnode *vp, const char *str) 5372 { 5373 5374 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5375 vfs_badlock("is not exclusive locked but should be", str, vp); 5376 } 5377 #endif /* DEBUG_VFS_LOCKS */ 5378 5379 void 5380 vop_rename_fail(struct vop_rename_args *ap) 5381 { 5382 5383 if (ap->a_tvp != NULL) 5384 vput(ap->a_tvp); 5385 if (ap->a_tdvp == ap->a_tvp) 5386 vrele(ap->a_tdvp); 5387 else 5388 vput(ap->a_tdvp); 5389 vrele(ap->a_fdvp); 5390 vrele(ap->a_fvp); 5391 } 5392 5393 void 5394 vop_rename_pre(void *ap) 5395 { 5396 struct vop_rename_args *a = ap; 5397 5398 #ifdef DEBUG_VFS_LOCKS 5399 if (a->a_tvp) 5400 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5401 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5402 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5403 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5404 5405 /* Check the source (from). */ 5406 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5407 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5408 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5409 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5410 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5411 5412 /* Check the target. */ 5413 if (a->a_tvp) 5414 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5415 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5416 #endif 5417 /* 5418 * It may be tempting to add vn_seqc_write_begin/end calls here and 5419 * in vop_rename_post but that's not going to work out since some 5420 * filesystems relookup vnodes mid-rename. This is probably a bug. 5421 * 5422 * For now filesystems are expected to do the relevant calls after they 5423 * decide what vnodes to operate on. 5424 */ 5425 if (a->a_tdvp != a->a_fdvp) 5426 vhold(a->a_fdvp); 5427 if (a->a_tvp != a->a_fvp) 5428 vhold(a->a_fvp); 5429 vhold(a->a_tdvp); 5430 if (a->a_tvp) 5431 vhold(a->a_tvp); 5432 } 5433 5434 #ifdef DEBUG_VFS_LOCKS 5435 void 5436 vop_fplookup_vexec_debugpre(void *ap __unused) 5437 { 5438 5439 VFS_SMR_ASSERT_ENTERED(); 5440 } 5441 5442 void 5443 vop_fplookup_vexec_debugpost(void *ap __unused, int rc __unused) 5444 { 5445 5446 VFS_SMR_ASSERT_ENTERED(); 5447 } 5448 5449 void 5450 vop_strategy_debugpre(void *ap) 5451 { 5452 struct vop_strategy_args *a; 5453 struct buf *bp; 5454 5455 a = ap; 5456 bp = a->a_bp; 5457 5458 /* 5459 * Cluster ops lock their component buffers but not the IO container. 5460 */ 5461 if ((bp->b_flags & B_CLUSTER) != 0) 5462 return; 5463 5464 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5465 if (vfs_badlock_print) 5466 printf( 5467 "VOP_STRATEGY: bp is not locked but should be\n"); 5468 if (vfs_badlock_ddb) 5469 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5470 } 5471 } 5472 5473 void 5474 vop_lock_debugpre(void *ap) 5475 { 5476 struct vop_lock1_args *a = ap; 5477 5478 if ((a->a_flags & LK_INTERLOCK) == 0) 5479 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5480 else 5481 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5482 } 5483 5484 void 5485 vop_lock_debugpost(void *ap, int rc) 5486 { 5487 struct vop_lock1_args *a = ap; 5488 5489 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5490 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5491 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5492 } 5493 5494 void 5495 vop_unlock_debugpre(void *ap) 5496 { 5497 struct vop_unlock_args *a = ap; 5498 5499 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 5500 } 5501 5502 void 5503 vop_need_inactive_debugpre(void *ap) 5504 { 5505 struct vop_need_inactive_args *a = ap; 5506 5507 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5508 } 5509 5510 void 5511 vop_need_inactive_debugpost(void *ap, int rc) 5512 { 5513 struct vop_need_inactive_args *a = ap; 5514 5515 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5516 } 5517 #endif 5518 5519 void 5520 vop_create_pre(void *ap) 5521 { 5522 struct vop_create_args *a; 5523 struct vnode *dvp; 5524 5525 a = ap; 5526 dvp = a->a_dvp; 5527 vn_seqc_write_begin(dvp); 5528 } 5529 5530 void 5531 vop_create_post(void *ap, int rc) 5532 { 5533 struct vop_create_args *a; 5534 struct vnode *dvp; 5535 5536 a = ap; 5537 dvp = a->a_dvp; 5538 vn_seqc_write_end(dvp); 5539 if (!rc) 5540 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5541 } 5542 5543 void 5544 vop_whiteout_pre(void *ap) 5545 { 5546 struct vop_whiteout_args *a; 5547 struct vnode *dvp; 5548 5549 a = ap; 5550 dvp = a->a_dvp; 5551 vn_seqc_write_begin(dvp); 5552 } 5553 5554 void 5555 vop_whiteout_post(void *ap, int rc) 5556 { 5557 struct vop_whiteout_args *a; 5558 struct vnode *dvp; 5559 5560 a = ap; 5561 dvp = a->a_dvp; 5562 vn_seqc_write_end(dvp); 5563 } 5564 5565 void 5566 vop_deleteextattr_pre(void *ap) 5567 { 5568 struct vop_deleteextattr_args *a; 5569 struct vnode *vp; 5570 5571 a = ap; 5572 vp = a->a_vp; 5573 vn_seqc_write_begin(vp); 5574 } 5575 5576 void 5577 vop_deleteextattr_post(void *ap, int rc) 5578 { 5579 struct vop_deleteextattr_args *a; 5580 struct vnode *vp; 5581 5582 a = ap; 5583 vp = a->a_vp; 5584 vn_seqc_write_end(vp); 5585 if (!rc) 5586 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5587 } 5588 5589 void 5590 vop_link_pre(void *ap) 5591 { 5592 struct vop_link_args *a; 5593 struct vnode *vp, *tdvp; 5594 5595 a = ap; 5596 vp = a->a_vp; 5597 tdvp = a->a_tdvp; 5598 vn_seqc_write_begin(vp); 5599 vn_seqc_write_begin(tdvp); 5600 } 5601 5602 void 5603 vop_link_post(void *ap, int rc) 5604 { 5605 struct vop_link_args *a; 5606 struct vnode *vp, *tdvp; 5607 5608 a = ap; 5609 vp = a->a_vp; 5610 tdvp = a->a_tdvp; 5611 vn_seqc_write_end(vp); 5612 vn_seqc_write_end(tdvp); 5613 if (!rc) { 5614 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 5615 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 5616 } 5617 } 5618 5619 void 5620 vop_mkdir_pre(void *ap) 5621 { 5622 struct vop_mkdir_args *a; 5623 struct vnode *dvp; 5624 5625 a = ap; 5626 dvp = a->a_dvp; 5627 vn_seqc_write_begin(dvp); 5628 } 5629 5630 void 5631 vop_mkdir_post(void *ap, int rc) 5632 { 5633 struct vop_mkdir_args *a; 5634 struct vnode *dvp; 5635 5636 a = ap; 5637 dvp = a->a_dvp; 5638 vn_seqc_write_end(dvp); 5639 if (!rc) 5640 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5641 } 5642 5643 #ifdef DEBUG_VFS_LOCKS 5644 void 5645 vop_mkdir_debugpost(void *ap, int rc) 5646 { 5647 struct vop_mkdir_args *a; 5648 5649 a = ap; 5650 if (!rc) 5651 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); 5652 } 5653 #endif 5654 5655 void 5656 vop_mknod_pre(void *ap) 5657 { 5658 struct vop_mknod_args *a; 5659 struct vnode *dvp; 5660 5661 a = ap; 5662 dvp = a->a_dvp; 5663 vn_seqc_write_begin(dvp); 5664 } 5665 5666 void 5667 vop_mknod_post(void *ap, int rc) 5668 { 5669 struct vop_mknod_args *a; 5670 struct vnode *dvp; 5671 5672 a = ap; 5673 dvp = a->a_dvp; 5674 vn_seqc_write_end(dvp); 5675 if (!rc) 5676 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5677 } 5678 5679 void 5680 vop_reclaim_post(void *ap, int rc) 5681 { 5682 struct vop_reclaim_args *a; 5683 struct vnode *vp; 5684 5685 a = ap; 5686 vp = a->a_vp; 5687 ASSERT_VOP_IN_SEQC(vp); 5688 if (!rc) 5689 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 5690 } 5691 5692 void 5693 vop_remove_pre(void *ap) 5694 { 5695 struct vop_remove_args *a; 5696 struct vnode *dvp, *vp; 5697 5698 a = ap; 5699 dvp = a->a_dvp; 5700 vp = a->a_vp; 5701 vn_seqc_write_begin(dvp); 5702 vn_seqc_write_begin(vp); 5703 } 5704 5705 void 5706 vop_remove_post(void *ap, int rc) 5707 { 5708 struct vop_remove_args *a; 5709 struct vnode *dvp, *vp; 5710 5711 a = ap; 5712 dvp = a->a_dvp; 5713 vp = a->a_vp; 5714 vn_seqc_write_end(dvp); 5715 vn_seqc_write_end(vp); 5716 if (!rc) { 5717 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5718 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5719 } 5720 } 5721 5722 void 5723 vop_rename_post(void *ap, int rc) 5724 { 5725 struct vop_rename_args *a = ap; 5726 long hint; 5727 5728 if (!rc) { 5729 hint = NOTE_WRITE; 5730 if (a->a_fdvp == a->a_tdvp) { 5731 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 5732 hint |= NOTE_LINK; 5733 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5734 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5735 } else { 5736 hint |= NOTE_EXTEND; 5737 if (a->a_fvp->v_type == VDIR) 5738 hint |= NOTE_LINK; 5739 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5740 5741 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 5742 a->a_tvp->v_type == VDIR) 5743 hint &= ~NOTE_LINK; 5744 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5745 } 5746 5747 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 5748 if (a->a_tvp) 5749 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 5750 } 5751 if (a->a_tdvp != a->a_fdvp) 5752 vdrop(a->a_fdvp); 5753 if (a->a_tvp != a->a_fvp) 5754 vdrop(a->a_fvp); 5755 vdrop(a->a_tdvp); 5756 if (a->a_tvp) 5757 vdrop(a->a_tvp); 5758 } 5759 5760 void 5761 vop_rmdir_pre(void *ap) 5762 { 5763 struct vop_rmdir_args *a; 5764 struct vnode *dvp, *vp; 5765 5766 a = ap; 5767 dvp = a->a_dvp; 5768 vp = a->a_vp; 5769 vn_seqc_write_begin(dvp); 5770 vn_seqc_write_begin(vp); 5771 } 5772 5773 void 5774 vop_rmdir_post(void *ap, int rc) 5775 { 5776 struct vop_rmdir_args *a; 5777 struct vnode *dvp, *vp; 5778 5779 a = ap; 5780 dvp = a->a_dvp; 5781 vp = a->a_vp; 5782 vn_seqc_write_end(dvp); 5783 vn_seqc_write_end(vp); 5784 if (!rc) { 5785 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5786 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5787 } 5788 } 5789 5790 void 5791 vop_setattr_pre(void *ap) 5792 { 5793 struct vop_setattr_args *a; 5794 struct vnode *vp; 5795 5796 a = ap; 5797 vp = a->a_vp; 5798 vn_seqc_write_begin(vp); 5799 } 5800 5801 void 5802 vop_setattr_post(void *ap, int rc) 5803 { 5804 struct vop_setattr_args *a; 5805 struct vnode *vp; 5806 5807 a = ap; 5808 vp = a->a_vp; 5809 vn_seqc_write_end(vp); 5810 if (!rc) 5811 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 5812 } 5813 5814 void 5815 vop_setacl_pre(void *ap) 5816 { 5817 struct vop_setacl_args *a; 5818 struct vnode *vp; 5819 5820 a = ap; 5821 vp = a->a_vp; 5822 vn_seqc_write_begin(vp); 5823 } 5824 5825 void 5826 vop_setacl_post(void *ap, int rc __unused) 5827 { 5828 struct vop_setacl_args *a; 5829 struct vnode *vp; 5830 5831 a = ap; 5832 vp = a->a_vp; 5833 vn_seqc_write_end(vp); 5834 } 5835 5836 void 5837 vop_setextattr_pre(void *ap) 5838 { 5839 struct vop_setextattr_args *a; 5840 struct vnode *vp; 5841 5842 a = ap; 5843 vp = a->a_vp; 5844 vn_seqc_write_begin(vp); 5845 } 5846 5847 void 5848 vop_setextattr_post(void *ap, int rc) 5849 { 5850 struct vop_setextattr_args *a; 5851 struct vnode *vp; 5852 5853 a = ap; 5854 vp = a->a_vp; 5855 vn_seqc_write_end(vp); 5856 if (!rc) 5857 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 5858 } 5859 5860 void 5861 vop_symlink_pre(void *ap) 5862 { 5863 struct vop_symlink_args *a; 5864 struct vnode *dvp; 5865 5866 a = ap; 5867 dvp = a->a_dvp; 5868 vn_seqc_write_begin(dvp); 5869 } 5870 5871 void 5872 vop_symlink_post(void *ap, int rc) 5873 { 5874 struct vop_symlink_args *a; 5875 struct vnode *dvp; 5876 5877 a = ap; 5878 dvp = a->a_dvp; 5879 vn_seqc_write_end(dvp); 5880 if (!rc) 5881 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5882 } 5883 5884 void 5885 vop_open_post(void *ap, int rc) 5886 { 5887 struct vop_open_args *a = ap; 5888 5889 if (!rc) 5890 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 5891 } 5892 5893 void 5894 vop_close_post(void *ap, int rc) 5895 { 5896 struct vop_close_args *a = ap; 5897 5898 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 5899 !VN_IS_DOOMED(a->a_vp))) { 5900 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 5901 NOTE_CLOSE_WRITE : NOTE_CLOSE); 5902 } 5903 } 5904 5905 void 5906 vop_read_post(void *ap, int rc) 5907 { 5908 struct vop_read_args *a = ap; 5909 5910 if (!rc) 5911 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5912 } 5913 5914 void 5915 vop_read_pgcache_post(void *ap, int rc) 5916 { 5917 struct vop_read_pgcache_args *a = ap; 5918 5919 if (!rc) 5920 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); 5921 } 5922 5923 void 5924 vop_readdir_post(void *ap, int rc) 5925 { 5926 struct vop_readdir_args *a = ap; 5927 5928 if (!rc) 5929 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5930 } 5931 5932 static struct knlist fs_knlist; 5933 5934 static void 5935 vfs_event_init(void *arg) 5936 { 5937 knlist_init_mtx(&fs_knlist, NULL); 5938 } 5939 /* XXX - correct order? */ 5940 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 5941 5942 void 5943 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 5944 { 5945 5946 KNOTE_UNLOCKED(&fs_knlist, event); 5947 } 5948 5949 static int filt_fsattach(struct knote *kn); 5950 static void filt_fsdetach(struct knote *kn); 5951 static int filt_fsevent(struct knote *kn, long hint); 5952 5953 struct filterops fs_filtops = { 5954 .f_isfd = 0, 5955 .f_attach = filt_fsattach, 5956 .f_detach = filt_fsdetach, 5957 .f_event = filt_fsevent 5958 }; 5959 5960 static int 5961 filt_fsattach(struct knote *kn) 5962 { 5963 5964 kn->kn_flags |= EV_CLEAR; 5965 knlist_add(&fs_knlist, kn, 0); 5966 return (0); 5967 } 5968 5969 static void 5970 filt_fsdetach(struct knote *kn) 5971 { 5972 5973 knlist_remove(&fs_knlist, kn, 0); 5974 } 5975 5976 static int 5977 filt_fsevent(struct knote *kn, long hint) 5978 { 5979 5980 kn->kn_fflags |= hint; 5981 return (kn->kn_fflags != 0); 5982 } 5983 5984 static int 5985 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 5986 { 5987 struct vfsidctl vc; 5988 int error; 5989 struct mount *mp; 5990 5991 error = SYSCTL_IN(req, &vc, sizeof(vc)); 5992 if (error) 5993 return (error); 5994 if (vc.vc_vers != VFS_CTL_VERS1) 5995 return (EINVAL); 5996 mp = vfs_getvfs(&vc.vc_fsid); 5997 if (mp == NULL) 5998 return (ENOENT); 5999 /* ensure that a specific sysctl goes to the right filesystem. */ 6000 if (strcmp(vc.vc_fstypename, "*") != 0 && 6001 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 6002 vfs_rel(mp); 6003 return (EINVAL); 6004 } 6005 VCTLTOREQ(&vc, req); 6006 error = VFS_SYSCTL(mp, vc.vc_op, req); 6007 vfs_rel(mp); 6008 return (error); 6009 } 6010 6011 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 6012 NULL, 0, sysctl_vfs_ctl, "", 6013 "Sysctl by fsid"); 6014 6015 /* 6016 * Function to initialize a va_filerev field sensibly. 6017 * XXX: Wouldn't a random number make a lot more sense ?? 6018 */ 6019 u_quad_t 6020 init_va_filerev(void) 6021 { 6022 struct bintime bt; 6023 6024 getbinuptime(&bt); 6025 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 6026 } 6027 6028 static int filt_vfsread(struct knote *kn, long hint); 6029 static int filt_vfswrite(struct knote *kn, long hint); 6030 static int filt_vfsvnode(struct knote *kn, long hint); 6031 static void filt_vfsdetach(struct knote *kn); 6032 static struct filterops vfsread_filtops = { 6033 .f_isfd = 1, 6034 .f_detach = filt_vfsdetach, 6035 .f_event = filt_vfsread 6036 }; 6037 static struct filterops vfswrite_filtops = { 6038 .f_isfd = 1, 6039 .f_detach = filt_vfsdetach, 6040 .f_event = filt_vfswrite 6041 }; 6042 static struct filterops vfsvnode_filtops = { 6043 .f_isfd = 1, 6044 .f_detach = filt_vfsdetach, 6045 .f_event = filt_vfsvnode 6046 }; 6047 6048 static void 6049 vfs_knllock(void *arg) 6050 { 6051 struct vnode *vp = arg; 6052 6053 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 6054 } 6055 6056 static void 6057 vfs_knlunlock(void *arg) 6058 { 6059 struct vnode *vp = arg; 6060 6061 VOP_UNLOCK(vp); 6062 } 6063 6064 static void 6065 vfs_knl_assert_locked(void *arg) 6066 { 6067 #ifdef DEBUG_VFS_LOCKS 6068 struct vnode *vp = arg; 6069 6070 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6071 #endif 6072 } 6073 6074 static void 6075 vfs_knl_assert_unlocked(void *arg) 6076 { 6077 #ifdef DEBUG_VFS_LOCKS 6078 struct vnode *vp = arg; 6079 6080 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6081 #endif 6082 } 6083 6084 int 6085 vfs_kqfilter(struct vop_kqfilter_args *ap) 6086 { 6087 struct vnode *vp = ap->a_vp; 6088 struct knote *kn = ap->a_kn; 6089 struct knlist *knl; 6090 6091 switch (kn->kn_filter) { 6092 case EVFILT_READ: 6093 kn->kn_fop = &vfsread_filtops; 6094 break; 6095 case EVFILT_WRITE: 6096 kn->kn_fop = &vfswrite_filtops; 6097 break; 6098 case EVFILT_VNODE: 6099 kn->kn_fop = &vfsvnode_filtops; 6100 break; 6101 default: 6102 return (EINVAL); 6103 } 6104 6105 kn->kn_hook = (caddr_t)vp; 6106 6107 v_addpollinfo(vp); 6108 if (vp->v_pollinfo == NULL) 6109 return (ENOMEM); 6110 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6111 vhold(vp); 6112 knlist_add(knl, kn, 0); 6113 6114 return (0); 6115 } 6116 6117 /* 6118 * Detach knote from vnode 6119 */ 6120 static void 6121 filt_vfsdetach(struct knote *kn) 6122 { 6123 struct vnode *vp = (struct vnode *)kn->kn_hook; 6124 6125 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6126 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6127 vdrop(vp); 6128 } 6129 6130 /*ARGSUSED*/ 6131 static int 6132 filt_vfsread(struct knote *kn, long hint) 6133 { 6134 struct vnode *vp = (struct vnode *)kn->kn_hook; 6135 struct vattr va; 6136 int res; 6137 6138 /* 6139 * filesystem is gone, so set the EOF flag and schedule 6140 * the knote for deletion. 6141 */ 6142 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6143 VI_LOCK(vp); 6144 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6145 VI_UNLOCK(vp); 6146 return (1); 6147 } 6148 6149 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 6150 return (0); 6151 6152 VI_LOCK(vp); 6153 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 6154 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6155 VI_UNLOCK(vp); 6156 return (res); 6157 } 6158 6159 /*ARGSUSED*/ 6160 static int 6161 filt_vfswrite(struct knote *kn, long hint) 6162 { 6163 struct vnode *vp = (struct vnode *)kn->kn_hook; 6164 6165 VI_LOCK(vp); 6166 6167 /* 6168 * filesystem is gone, so set the EOF flag and schedule 6169 * the knote for deletion. 6170 */ 6171 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6172 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6173 6174 kn->kn_data = 0; 6175 VI_UNLOCK(vp); 6176 return (1); 6177 } 6178 6179 static int 6180 filt_vfsvnode(struct knote *kn, long hint) 6181 { 6182 struct vnode *vp = (struct vnode *)kn->kn_hook; 6183 int res; 6184 6185 VI_LOCK(vp); 6186 if (kn->kn_sfflags & hint) 6187 kn->kn_fflags |= hint; 6188 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6189 kn->kn_flags |= EV_EOF; 6190 VI_UNLOCK(vp); 6191 return (1); 6192 } 6193 res = (kn->kn_fflags != 0); 6194 VI_UNLOCK(vp); 6195 return (res); 6196 } 6197 6198 /* 6199 * Returns whether the directory is empty or not. 6200 * If it is empty, the return value is 0; otherwise 6201 * the return value is an error value (which may 6202 * be ENOTEMPTY). 6203 */ 6204 int 6205 vfs_emptydir(struct vnode *vp) 6206 { 6207 struct uio uio; 6208 struct iovec iov; 6209 struct dirent *dirent, *dp, *endp; 6210 int error, eof; 6211 6212 error = 0; 6213 eof = 0; 6214 6215 ASSERT_VOP_LOCKED(vp, "vfs_emptydir"); 6216 6217 dirent = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK); 6218 iov.iov_base = dirent; 6219 iov.iov_len = sizeof(struct dirent); 6220 6221 uio.uio_iov = &iov; 6222 uio.uio_iovcnt = 1; 6223 uio.uio_offset = 0; 6224 uio.uio_resid = sizeof(struct dirent); 6225 uio.uio_segflg = UIO_SYSSPACE; 6226 uio.uio_rw = UIO_READ; 6227 uio.uio_td = curthread; 6228 6229 while (eof == 0 && error == 0) { 6230 error = VOP_READDIR(vp, &uio, curthread->td_ucred, &eof, 6231 NULL, NULL); 6232 if (error != 0) 6233 break; 6234 endp = (void *)((uint8_t *)dirent + 6235 sizeof(struct dirent) - uio.uio_resid); 6236 for (dp = dirent; dp < endp; 6237 dp = (void *)((uint8_t *)dp + GENERIC_DIRSIZ(dp))) { 6238 if (dp->d_type == DT_WHT) 6239 continue; 6240 if (dp->d_namlen == 0) 6241 continue; 6242 if (dp->d_type != DT_DIR && 6243 dp->d_type != DT_UNKNOWN) { 6244 error = ENOTEMPTY; 6245 break; 6246 } 6247 if (dp->d_namlen > 2) { 6248 error = ENOTEMPTY; 6249 break; 6250 } 6251 if (dp->d_namlen == 1 && 6252 dp->d_name[0] != '.') { 6253 error = ENOTEMPTY; 6254 break; 6255 } 6256 if (dp->d_namlen == 2 && 6257 dp->d_name[1] != '.') { 6258 error = ENOTEMPTY; 6259 break; 6260 } 6261 uio.uio_resid = sizeof(struct dirent); 6262 } 6263 } 6264 free(dirent, M_TEMP); 6265 return (error); 6266 } 6267 6268 int 6269 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6270 { 6271 int error; 6272 6273 if (dp->d_reclen > ap->a_uio->uio_resid) 6274 return (ENAMETOOLONG); 6275 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6276 if (error) { 6277 if (ap->a_ncookies != NULL) { 6278 if (ap->a_cookies != NULL) 6279 free(ap->a_cookies, M_TEMP); 6280 ap->a_cookies = NULL; 6281 *ap->a_ncookies = 0; 6282 } 6283 return (error); 6284 } 6285 if (ap->a_ncookies == NULL) 6286 return (0); 6287 6288 KASSERT(ap->a_cookies, 6289 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6290 6291 *ap->a_cookies = realloc(*ap->a_cookies, 6292 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 6293 (*ap->a_cookies)[*ap->a_ncookies] = off; 6294 *ap->a_ncookies += 1; 6295 return (0); 6296 } 6297 6298 /* 6299 * The purpose of this routine is to remove granularity from accmode_t, 6300 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6301 * VADMIN and VAPPEND. 6302 * 6303 * If it returns 0, the caller is supposed to continue with the usual 6304 * access checks using 'accmode' as modified by this routine. If it 6305 * returns nonzero value, the caller is supposed to return that value 6306 * as errno. 6307 * 6308 * Note that after this routine runs, accmode may be zero. 6309 */ 6310 int 6311 vfs_unixify_accmode(accmode_t *accmode) 6312 { 6313 /* 6314 * There is no way to specify explicit "deny" rule using 6315 * file mode or POSIX.1e ACLs. 6316 */ 6317 if (*accmode & VEXPLICIT_DENY) { 6318 *accmode = 0; 6319 return (0); 6320 } 6321 6322 /* 6323 * None of these can be translated into usual access bits. 6324 * Also, the common case for NFSv4 ACLs is to not contain 6325 * either of these bits. Caller should check for VWRITE 6326 * on the containing directory instead. 6327 */ 6328 if (*accmode & (VDELETE_CHILD | VDELETE)) 6329 return (EPERM); 6330 6331 if (*accmode & VADMIN_PERMS) { 6332 *accmode &= ~VADMIN_PERMS; 6333 *accmode |= VADMIN; 6334 } 6335 6336 /* 6337 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6338 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6339 */ 6340 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6341 6342 return (0); 6343 } 6344 6345 /* 6346 * Clear out a doomed vnode (if any) and replace it with a new one as long 6347 * as the fs is not being unmounted. Return the root vnode to the caller. 6348 */ 6349 static int __noinline 6350 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6351 { 6352 struct vnode *vp; 6353 int error; 6354 6355 restart: 6356 if (mp->mnt_rootvnode != NULL) { 6357 MNT_ILOCK(mp); 6358 vp = mp->mnt_rootvnode; 6359 if (vp != NULL) { 6360 if (!VN_IS_DOOMED(vp)) { 6361 vrefact(vp); 6362 MNT_IUNLOCK(mp); 6363 error = vn_lock(vp, flags); 6364 if (error == 0) { 6365 *vpp = vp; 6366 return (0); 6367 } 6368 vrele(vp); 6369 goto restart; 6370 } 6371 /* 6372 * Clear the old one. 6373 */ 6374 mp->mnt_rootvnode = NULL; 6375 } 6376 MNT_IUNLOCK(mp); 6377 if (vp != NULL) { 6378 vfs_op_barrier_wait(mp); 6379 vrele(vp); 6380 } 6381 } 6382 error = VFS_CACHEDROOT(mp, flags, vpp); 6383 if (error != 0) 6384 return (error); 6385 if (mp->mnt_vfs_ops == 0) { 6386 MNT_ILOCK(mp); 6387 if (mp->mnt_vfs_ops != 0) { 6388 MNT_IUNLOCK(mp); 6389 return (0); 6390 } 6391 if (mp->mnt_rootvnode == NULL) { 6392 vrefact(*vpp); 6393 mp->mnt_rootvnode = *vpp; 6394 } else { 6395 if (mp->mnt_rootvnode != *vpp) { 6396 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6397 panic("%s: mismatch between vnode returned " 6398 " by VFS_CACHEDROOT and the one cached " 6399 " (%p != %p)", 6400 __func__, *vpp, mp->mnt_rootvnode); 6401 } 6402 } 6403 } 6404 MNT_IUNLOCK(mp); 6405 } 6406 return (0); 6407 } 6408 6409 int 6410 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6411 { 6412 struct vnode *vp; 6413 int error; 6414 6415 if (!vfs_op_thread_enter(mp)) 6416 return (vfs_cache_root_fallback(mp, flags, vpp)); 6417 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6418 if (vp == NULL || VN_IS_DOOMED(vp)) { 6419 vfs_op_thread_exit(mp); 6420 return (vfs_cache_root_fallback(mp, flags, vpp)); 6421 } 6422 vrefact(vp); 6423 vfs_op_thread_exit(mp); 6424 error = vn_lock(vp, flags); 6425 if (error != 0) { 6426 vrele(vp); 6427 return (vfs_cache_root_fallback(mp, flags, vpp)); 6428 } 6429 *vpp = vp; 6430 return (0); 6431 } 6432 6433 struct vnode * 6434 vfs_cache_root_clear(struct mount *mp) 6435 { 6436 struct vnode *vp; 6437 6438 /* 6439 * ops > 0 guarantees there is nobody who can see this vnode 6440 */ 6441 MPASS(mp->mnt_vfs_ops > 0); 6442 vp = mp->mnt_rootvnode; 6443 if (vp != NULL) 6444 vn_seqc_write_begin(vp); 6445 mp->mnt_rootvnode = NULL; 6446 return (vp); 6447 } 6448 6449 void 6450 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6451 { 6452 6453 MPASS(mp->mnt_vfs_ops > 0); 6454 vrefact(vp); 6455 mp->mnt_rootvnode = vp; 6456 } 6457 6458 /* 6459 * These are helper functions for filesystems to traverse all 6460 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6461 * 6462 * This interface replaces MNT_VNODE_FOREACH. 6463 */ 6464 6465 struct vnode * 6466 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6467 { 6468 struct vnode *vp; 6469 6470 if (should_yield()) 6471 kern_yield(PRI_USER); 6472 MNT_ILOCK(mp); 6473 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6474 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6475 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6476 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6477 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6478 continue; 6479 VI_LOCK(vp); 6480 if (VN_IS_DOOMED(vp)) { 6481 VI_UNLOCK(vp); 6482 continue; 6483 } 6484 break; 6485 } 6486 if (vp == NULL) { 6487 __mnt_vnode_markerfree_all(mvp, mp); 6488 /* MNT_IUNLOCK(mp); -- done in above function */ 6489 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6490 return (NULL); 6491 } 6492 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6493 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6494 MNT_IUNLOCK(mp); 6495 return (vp); 6496 } 6497 6498 struct vnode * 6499 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6500 { 6501 struct vnode *vp; 6502 6503 *mvp = vn_alloc_marker(mp); 6504 MNT_ILOCK(mp); 6505 MNT_REF(mp); 6506 6507 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6508 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6509 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6510 continue; 6511 VI_LOCK(vp); 6512 if (VN_IS_DOOMED(vp)) { 6513 VI_UNLOCK(vp); 6514 continue; 6515 } 6516 break; 6517 } 6518 if (vp == NULL) { 6519 MNT_REL(mp); 6520 MNT_IUNLOCK(mp); 6521 vn_free_marker(*mvp); 6522 *mvp = NULL; 6523 return (NULL); 6524 } 6525 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6526 MNT_IUNLOCK(mp); 6527 return (vp); 6528 } 6529 6530 void 6531 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6532 { 6533 6534 if (*mvp == NULL) { 6535 MNT_IUNLOCK(mp); 6536 return; 6537 } 6538 6539 mtx_assert(MNT_MTX(mp), MA_OWNED); 6540 6541 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6542 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6543 MNT_REL(mp); 6544 MNT_IUNLOCK(mp); 6545 vn_free_marker(*mvp); 6546 *mvp = NULL; 6547 } 6548 6549 /* 6550 * These are helper functions for filesystems to traverse their 6551 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6552 */ 6553 static void 6554 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6555 { 6556 6557 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6558 6559 MNT_ILOCK(mp); 6560 MNT_REL(mp); 6561 MNT_IUNLOCK(mp); 6562 vn_free_marker(*mvp); 6563 *mvp = NULL; 6564 } 6565 6566 /* 6567 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6568 * conventional lock order during mnt_vnode_next_lazy iteration. 6569 * 6570 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6571 * The list lock is dropped and reacquired. On success, both locks are held. 6572 * On failure, the mount vnode list lock is held but the vnode interlock is 6573 * not, and the procedure may have yielded. 6574 */ 6575 static bool 6576 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6577 struct vnode *vp) 6578 { 6579 6580 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6581 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6582 ("%s: bad marker", __func__)); 6583 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6584 ("%s: inappropriate vnode", __func__)); 6585 ASSERT_VI_UNLOCKED(vp, __func__); 6586 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6587 6588 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6589 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6590 6591 /* 6592 * Note we may be racing against vdrop which transitioned the hold 6593 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 6594 * if we are the only user after we get the interlock we will just 6595 * vdrop. 6596 */ 6597 vhold(vp); 6598 mtx_unlock(&mp->mnt_listmtx); 6599 VI_LOCK(vp); 6600 if (VN_IS_DOOMED(vp)) { 6601 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 6602 goto out_lost; 6603 } 6604 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 6605 /* 6606 * There is nothing to do if we are the last user. 6607 */ 6608 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 6609 goto out_lost; 6610 mtx_lock(&mp->mnt_listmtx); 6611 return (true); 6612 out_lost: 6613 vdropl(vp); 6614 maybe_yield(); 6615 mtx_lock(&mp->mnt_listmtx); 6616 return (false); 6617 } 6618 6619 static struct vnode * 6620 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6621 void *cbarg) 6622 { 6623 struct vnode *vp; 6624 6625 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6626 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6627 restart: 6628 vp = TAILQ_NEXT(*mvp, v_lazylist); 6629 while (vp != NULL) { 6630 if (vp->v_type == VMARKER) { 6631 vp = TAILQ_NEXT(vp, v_lazylist); 6632 continue; 6633 } 6634 /* 6635 * See if we want to process the vnode. Note we may encounter a 6636 * long string of vnodes we don't care about and hog the list 6637 * as a result. Check for it and requeue the marker. 6638 */ 6639 VNPASS(!VN_IS_DOOMED(vp), vp); 6640 if (!cb(vp, cbarg)) { 6641 if (!should_yield()) { 6642 vp = TAILQ_NEXT(vp, v_lazylist); 6643 continue; 6644 } 6645 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 6646 v_lazylist); 6647 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 6648 v_lazylist); 6649 mtx_unlock(&mp->mnt_listmtx); 6650 kern_yield(PRI_USER); 6651 mtx_lock(&mp->mnt_listmtx); 6652 goto restart; 6653 } 6654 /* 6655 * Try-lock because this is the wrong lock order. 6656 */ 6657 if (!VI_TRYLOCK(vp) && 6658 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 6659 goto restart; 6660 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6661 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6662 ("alien vnode on the lazy list %p %p", vp, mp)); 6663 VNPASS(vp->v_mount == mp, vp); 6664 VNPASS(!VN_IS_DOOMED(vp), vp); 6665 break; 6666 } 6667 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6668 6669 /* Check if we are done */ 6670 if (vp == NULL) { 6671 mtx_unlock(&mp->mnt_listmtx); 6672 mnt_vnode_markerfree_lazy(mvp, mp); 6673 return (NULL); 6674 } 6675 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 6676 mtx_unlock(&mp->mnt_listmtx); 6677 ASSERT_VI_LOCKED(vp, "lazy iter"); 6678 return (vp); 6679 } 6680 6681 struct vnode * 6682 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6683 void *cbarg) 6684 { 6685 6686 if (should_yield()) 6687 kern_yield(PRI_USER); 6688 mtx_lock(&mp->mnt_listmtx); 6689 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6690 } 6691 6692 struct vnode * 6693 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6694 void *cbarg) 6695 { 6696 struct vnode *vp; 6697 6698 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 6699 return (NULL); 6700 6701 *mvp = vn_alloc_marker(mp); 6702 MNT_ILOCK(mp); 6703 MNT_REF(mp); 6704 MNT_IUNLOCK(mp); 6705 6706 mtx_lock(&mp->mnt_listmtx); 6707 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 6708 if (vp == NULL) { 6709 mtx_unlock(&mp->mnt_listmtx); 6710 mnt_vnode_markerfree_lazy(mvp, mp); 6711 return (NULL); 6712 } 6713 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 6714 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6715 } 6716 6717 void 6718 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6719 { 6720 6721 if (*mvp == NULL) 6722 return; 6723 6724 mtx_lock(&mp->mnt_listmtx); 6725 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6726 mtx_unlock(&mp->mnt_listmtx); 6727 mnt_vnode_markerfree_lazy(mvp, mp); 6728 } 6729 6730 int 6731 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 6732 { 6733 6734 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 6735 cnp->cn_flags &= ~NOEXECCHECK; 6736 return (0); 6737 } 6738 6739 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, cnp->cn_thread)); 6740 } 6741 6742 /* 6743 * Do not use this variant unless you have means other than the hold count 6744 * to prevent the vnode from getting freed. 6745 */ 6746 void 6747 vn_seqc_write_begin_unheld_locked(struct vnode *vp) 6748 { 6749 6750 ASSERT_VI_LOCKED(vp, __func__); 6751 VNPASS(vp->v_seqc_users >= 0, vp); 6752 vp->v_seqc_users++; 6753 if (vp->v_seqc_users == 1) 6754 seqc_sleepable_write_begin(&vp->v_seqc); 6755 } 6756 6757 void 6758 vn_seqc_write_begin_locked(struct vnode *vp) 6759 { 6760 6761 ASSERT_VI_LOCKED(vp, __func__); 6762 VNPASS(vp->v_holdcnt > 0, vp); 6763 vn_seqc_write_begin_unheld_locked(vp); 6764 } 6765 6766 void 6767 vn_seqc_write_begin(struct vnode *vp) 6768 { 6769 6770 VI_LOCK(vp); 6771 vn_seqc_write_begin_locked(vp); 6772 VI_UNLOCK(vp); 6773 } 6774 6775 void 6776 vn_seqc_write_begin_unheld(struct vnode *vp) 6777 { 6778 6779 VI_LOCK(vp); 6780 vn_seqc_write_begin_unheld_locked(vp); 6781 VI_UNLOCK(vp); 6782 } 6783 6784 void 6785 vn_seqc_write_end_locked(struct vnode *vp) 6786 { 6787 6788 ASSERT_VI_LOCKED(vp, __func__); 6789 VNPASS(vp->v_seqc_users > 0, vp); 6790 vp->v_seqc_users--; 6791 if (vp->v_seqc_users == 0) 6792 seqc_sleepable_write_end(&vp->v_seqc); 6793 } 6794 6795 void 6796 vn_seqc_write_end(struct vnode *vp) 6797 { 6798 6799 VI_LOCK(vp); 6800 vn_seqc_write_end_locked(vp); 6801 VI_UNLOCK(vp); 6802 } 6803