1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/capsicum.h> 54 #include <sys/condvar.h> 55 #include <sys/conf.h> 56 #include <sys/counter.h> 57 #include <sys/dirent.h> 58 #include <sys/event.h> 59 #include <sys/eventhandler.h> 60 #include <sys/extattr.h> 61 #include <sys/file.h> 62 #include <sys/fcntl.h> 63 #include <sys/jail.h> 64 #include <sys/kdb.h> 65 #include <sys/kernel.h> 66 #include <sys/kthread.h> 67 #include <sys/ktr.h> 68 #include <sys/lockf.h> 69 #include <sys/malloc.h> 70 #include <sys/mount.h> 71 #include <sys/namei.h> 72 #include <sys/pctrie.h> 73 #include <sys/priv.h> 74 #include <sys/reboot.h> 75 #include <sys/refcount.h> 76 #include <sys/rwlock.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smp.h> 80 #include <sys/stat.h> 81 #include <sys/sysctl.h> 82 #include <sys/syslog.h> 83 #include <sys/vmmeter.h> 84 #include <sys/vnode.h> 85 #include <sys/watchdog.h> 86 87 #include <machine/stdarg.h> 88 89 #include <security/mac/mac_framework.h> 90 91 #include <vm/vm.h> 92 #include <vm/vm_object.h> 93 #include <vm/vm_extern.h> 94 #include <vm/pmap.h> 95 #include <vm/vm_map.h> 96 #include <vm/vm_page.h> 97 #include <vm/vm_kern.h> 98 #include <vm/uma.h> 99 100 #ifdef DDB 101 #include <ddb/ddb.h> 102 #endif 103 104 static void delmntque(struct vnode *vp); 105 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 106 int slpflag, int slptimeo); 107 static void syncer_shutdown(void *arg, int howto); 108 static int vtryrecycle(struct vnode *vp); 109 static void v_init_counters(struct vnode *); 110 static void v_incr_devcount(struct vnode *); 111 static void v_decr_devcount(struct vnode *); 112 static void vgonel(struct vnode *); 113 static void vfs_knllock(void *arg); 114 static void vfs_knlunlock(void *arg); 115 static void vfs_knl_assert_locked(void *arg); 116 static void vfs_knl_assert_unlocked(void *arg); 117 static void destroy_vpollinfo(struct vpollinfo *vi); 118 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 119 daddr_t startlbn, daddr_t endlbn); 120 static void vnlru_recalc(void); 121 122 /* 123 * These fences are intended for cases where some synchronization is 124 * needed between access of v_iflags and lockless vnode refcount (v_holdcnt 125 * and v_usecount) updates. Access to v_iflags is generally synchronized 126 * by the interlock, but we have some internal assertions that check vnode 127 * flags without acquiring the lock. Thus, these fences are INVARIANTS-only 128 * for now. 129 */ 130 #ifdef INVARIANTS 131 #define VNODE_REFCOUNT_FENCE_ACQ() atomic_thread_fence_acq() 132 #define VNODE_REFCOUNT_FENCE_REL() atomic_thread_fence_rel() 133 #else 134 #define VNODE_REFCOUNT_FENCE_ACQ() 135 #define VNODE_REFCOUNT_FENCE_REL() 136 #endif 137 138 /* 139 * Number of vnodes in existence. Increased whenever getnewvnode() 140 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 141 */ 142 static u_long __exclusive_cache_line numvnodes; 143 144 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 145 "Number of vnodes in existence"); 146 147 static counter_u64_t vnodes_created; 148 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 149 "Number of vnodes created by getnewvnode"); 150 151 /* 152 * Conversion tables for conversion from vnode types to inode formats 153 * and back. 154 */ 155 enum vtype iftovt_tab[16] = { 156 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 157 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 158 }; 159 int vttoif_tab[10] = { 160 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 161 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 162 }; 163 164 /* 165 * List of allocates vnodes in the system. 166 */ 167 static TAILQ_HEAD(freelst, vnode) vnode_list; 168 static struct vnode *vnode_list_free_marker; 169 170 /* 171 * "Free" vnode target. Free vnodes are rarely completely free, but are 172 * just ones that are cheap to recycle. Usually they are for files which 173 * have been stat'd but not read; these usually have inode and namecache 174 * data attached to them. This target is the preferred minimum size of a 175 * sub-cache consisting mostly of such files. The system balances the size 176 * of this sub-cache with its complement to try to prevent either from 177 * thrashing while the other is relatively inactive. The targets express 178 * a preference for the best balance. 179 * 180 * "Above" this target there are 2 further targets (watermarks) related 181 * to recyling of free vnodes. In the best-operating case, the cache is 182 * exactly full, the free list has size between vlowat and vhiwat above the 183 * free target, and recycling from it and normal use maintains this state. 184 * Sometimes the free list is below vlowat or even empty, but this state 185 * is even better for immediate use provided the cache is not full. 186 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 187 * ones) to reach one of these states. The watermarks are currently hard- 188 * coded as 4% and 9% of the available space higher. These and the default 189 * of 25% for wantfreevnodes are too large if the memory size is large. 190 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 191 * whenever vnlru_proc() becomes active. 192 */ 193 static u_long wantfreevnodes; 194 static u_long __exclusive_cache_line freevnodes; 195 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 196 &freevnodes, 0, "Number of \"free\" vnodes"); 197 198 static counter_u64_t recycles_count; 199 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 200 "Number of vnodes recycled to meet vnode cache targets"); 201 202 static counter_u64_t recycles_free_count; 203 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 204 "Number of free vnodes recycled to meet vnode cache targets"); 205 206 /* 207 * Various variables used for debugging the new implementation of 208 * reassignbuf(). 209 * XXX these are probably of (very) limited utility now. 210 */ 211 static int reassignbufcalls; 212 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW | CTLFLAG_STATS, 213 &reassignbufcalls, 0, "Number of calls to reassignbuf"); 214 215 static counter_u64_t deferred_inact; 216 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, &deferred_inact, 217 "Number of times inactive processing was deferred"); 218 219 /* To keep more than one thread at a time from running vfs_getnewfsid */ 220 static struct mtx mntid_mtx; 221 222 /* 223 * Lock for any access to the following: 224 * vnode_list 225 * numvnodes 226 * freevnodes 227 */ 228 static struct mtx __exclusive_cache_line vnode_list_mtx; 229 230 /* Publicly exported FS */ 231 struct nfs_public nfs_pub; 232 233 static uma_zone_t buf_trie_zone; 234 235 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 236 static uma_zone_t vnode_zone; 237 static uma_zone_t vnodepoll_zone; 238 239 /* 240 * The workitem queue. 241 * 242 * It is useful to delay writes of file data and filesystem metadata 243 * for tens of seconds so that quickly created and deleted files need 244 * not waste disk bandwidth being created and removed. To realize this, 245 * we append vnodes to a "workitem" queue. When running with a soft 246 * updates implementation, most pending metadata dependencies should 247 * not wait for more than a few seconds. Thus, mounted on block devices 248 * are delayed only about a half the time that file data is delayed. 249 * Similarly, directory updates are more critical, so are only delayed 250 * about a third the time that file data is delayed. Thus, there are 251 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 252 * one each second (driven off the filesystem syncer process). The 253 * syncer_delayno variable indicates the next queue that is to be processed. 254 * Items that need to be processed soon are placed in this queue: 255 * 256 * syncer_workitem_pending[syncer_delayno] 257 * 258 * A delay of fifteen seconds is done by placing the request fifteen 259 * entries later in the queue: 260 * 261 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 262 * 263 */ 264 static int syncer_delayno; 265 static long syncer_mask; 266 LIST_HEAD(synclist, bufobj); 267 static struct synclist *syncer_workitem_pending; 268 /* 269 * The sync_mtx protects: 270 * bo->bo_synclist 271 * sync_vnode_count 272 * syncer_delayno 273 * syncer_state 274 * syncer_workitem_pending 275 * syncer_worklist_len 276 * rushjob 277 */ 278 static struct mtx sync_mtx; 279 static struct cv sync_wakeup; 280 281 #define SYNCER_MAXDELAY 32 282 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 283 static int syncdelay = 30; /* max time to delay syncing data */ 284 static int filedelay = 30; /* time to delay syncing files */ 285 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 286 "Time to delay syncing files (in seconds)"); 287 static int dirdelay = 29; /* time to delay syncing directories */ 288 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 289 "Time to delay syncing directories (in seconds)"); 290 static int metadelay = 28; /* time to delay syncing metadata */ 291 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 292 "Time to delay syncing metadata (in seconds)"); 293 static int rushjob; /* number of slots to run ASAP */ 294 static int stat_rush_requests; /* number of times I/O speeded up */ 295 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 296 "Number of times I/O speeded up (rush requests)"); 297 298 #define VDBATCH_SIZE 8 299 struct vdbatch { 300 u_int index; 301 struct mtx lock; 302 struct vnode *tab[VDBATCH_SIZE]; 303 }; 304 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 305 306 static void vdbatch_dequeue(struct vnode *vp); 307 308 /* 309 * When shutting down the syncer, run it at four times normal speed. 310 */ 311 #define SYNCER_SHUTDOWN_SPEEDUP 4 312 static int sync_vnode_count; 313 static int syncer_worklist_len; 314 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 315 syncer_state; 316 317 /* Target for maximum number of vnodes. */ 318 u_long desiredvnodes; 319 static u_long gapvnodes; /* gap between wanted and desired */ 320 static u_long vhiwat; /* enough extras after expansion */ 321 static u_long vlowat; /* minimal extras before expansion */ 322 static u_long vstir; /* nonzero to stir non-free vnodes */ 323 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 324 325 /* 326 * Note that no attempt is made to sanitize these parameters. 327 */ 328 static int 329 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 330 { 331 u_long val; 332 int error; 333 334 val = desiredvnodes; 335 error = sysctl_handle_long(oidp, &val, 0, req); 336 if (error != 0 || req->newptr == NULL) 337 return (error); 338 339 if (val == desiredvnodes) 340 return (0); 341 mtx_lock(&vnode_list_mtx); 342 desiredvnodes = val; 343 wantfreevnodes = desiredvnodes / 4; 344 vnlru_recalc(); 345 mtx_unlock(&vnode_list_mtx); 346 /* 347 * XXX There is no protection against multiple threads changing 348 * desiredvnodes at the same time. Locking above only helps vnlru and 349 * getnewvnode. 350 */ 351 vfs_hash_changesize(desiredvnodes); 352 cache_changesize(desiredvnodes); 353 return (0); 354 } 355 356 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 357 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 358 "UL", "Target for maximum number of vnodes"); 359 360 static int 361 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 362 { 363 u_long val; 364 int error; 365 366 val = wantfreevnodes; 367 error = sysctl_handle_long(oidp, &val, 0, req); 368 if (error != 0 || req->newptr == NULL) 369 return (error); 370 371 if (val == wantfreevnodes) 372 return (0); 373 mtx_lock(&vnode_list_mtx); 374 wantfreevnodes = val; 375 vnlru_recalc(); 376 mtx_unlock(&vnode_list_mtx); 377 return (0); 378 } 379 380 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 381 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 382 "UL", "Target for minimum number of \"free\" vnodes"); 383 384 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 385 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 386 static int vnlru_nowhere; 387 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 388 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 389 390 static int 391 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 392 { 393 struct vnode *vp; 394 struct nameidata nd; 395 char *buf; 396 unsigned long ndflags; 397 int error; 398 399 if (req->newptr == NULL) 400 return (EINVAL); 401 if (req->newlen >= PATH_MAX) 402 return (E2BIG); 403 404 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 405 error = SYSCTL_IN(req, buf, req->newlen); 406 if (error != 0) 407 goto out; 408 409 buf[req->newlen] = '\0'; 410 411 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1 | NOCACHE | SAVENAME; 412 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf, curthread); 413 if ((error = namei(&nd)) != 0) 414 goto out; 415 vp = nd.ni_vp; 416 417 if (VN_IS_DOOMED(vp)) { 418 /* 419 * This vnode is being recycled. Return != 0 to let the caller 420 * know that the sysctl had no effect. Return EAGAIN because a 421 * subsequent call will likely succeed (since namei will create 422 * a new vnode if necessary) 423 */ 424 error = EAGAIN; 425 goto putvnode; 426 } 427 428 counter_u64_add(recycles_count, 1); 429 vgone(vp); 430 putvnode: 431 NDFREE(&nd, 0); 432 out: 433 free(buf, M_TEMP); 434 return (error); 435 } 436 437 static int 438 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 439 { 440 struct thread *td = curthread; 441 struct vnode *vp; 442 struct file *fp; 443 int error; 444 int fd; 445 446 if (req->newptr == NULL) 447 return (EBADF); 448 449 error = sysctl_handle_int(oidp, &fd, 0, req); 450 if (error != 0) 451 return (error); 452 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 453 if (error != 0) 454 return (error); 455 vp = fp->f_vnode; 456 457 error = vn_lock(vp, LK_EXCLUSIVE); 458 if (error != 0) 459 goto drop; 460 461 counter_u64_add(recycles_count, 1); 462 vgone(vp); 463 VOP_UNLOCK(vp); 464 drop: 465 fdrop(fp, td); 466 return (error); 467 } 468 469 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 470 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 471 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 472 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 473 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 474 sysctl_ftry_reclaim_vnode, "I", 475 "Try to reclaim a vnode by its file descriptor"); 476 477 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 478 static int vnsz2log; 479 480 /* 481 * Support for the bufobj clean & dirty pctrie. 482 */ 483 static void * 484 buf_trie_alloc(struct pctrie *ptree) 485 { 486 487 return uma_zalloc(buf_trie_zone, M_NOWAIT); 488 } 489 490 static void 491 buf_trie_free(struct pctrie *ptree, void *node) 492 { 493 494 uma_zfree(buf_trie_zone, node); 495 } 496 PCTRIE_DEFINE(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free); 497 498 /* 499 * Initialize the vnode management data structures. 500 * 501 * Reevaluate the following cap on the number of vnodes after the physical 502 * memory size exceeds 512GB. In the limit, as the physical memory size 503 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 504 */ 505 #ifndef MAXVNODES_MAX 506 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 507 #endif 508 509 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 510 511 static struct vnode * 512 vn_alloc_marker(struct mount *mp) 513 { 514 struct vnode *vp; 515 516 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 517 vp->v_type = VMARKER; 518 vp->v_mount = mp; 519 520 return (vp); 521 } 522 523 static void 524 vn_free_marker(struct vnode *vp) 525 { 526 527 MPASS(vp->v_type == VMARKER); 528 free(vp, M_VNODE_MARKER); 529 } 530 531 /* 532 * Initialize a vnode as it first enters the zone. 533 */ 534 static int 535 vnode_init(void *mem, int size, int flags) 536 { 537 struct vnode *vp; 538 539 vp = mem; 540 bzero(vp, size); 541 /* 542 * Setup locks. 543 */ 544 vp->v_vnlock = &vp->v_lock; 545 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 546 /* 547 * By default, don't allow shared locks unless filesystems opt-in. 548 */ 549 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 550 LK_NOSHARE | LK_IS_VNODE); 551 /* 552 * Initialize bufobj. 553 */ 554 bufobj_init(&vp->v_bufobj, vp); 555 /* 556 * Initialize namecache. 557 */ 558 LIST_INIT(&vp->v_cache_src); 559 TAILQ_INIT(&vp->v_cache_dst); 560 /* 561 * Initialize rangelocks. 562 */ 563 rangelock_init(&vp->v_rl); 564 565 vp->v_dbatchcpu = NOCPU; 566 567 mtx_lock(&vnode_list_mtx); 568 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 569 mtx_unlock(&vnode_list_mtx); 570 return (0); 571 } 572 573 /* 574 * Free a vnode when it is cleared from the zone. 575 */ 576 static void 577 vnode_fini(void *mem, int size) 578 { 579 struct vnode *vp; 580 struct bufobj *bo; 581 582 vp = mem; 583 vdbatch_dequeue(vp); 584 mtx_lock(&vnode_list_mtx); 585 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 586 mtx_unlock(&vnode_list_mtx); 587 rangelock_destroy(&vp->v_rl); 588 lockdestroy(vp->v_vnlock); 589 mtx_destroy(&vp->v_interlock); 590 bo = &vp->v_bufobj; 591 rw_destroy(BO_LOCKPTR(bo)); 592 } 593 594 /* 595 * Provide the size of NFS nclnode and NFS fh for calculation of the 596 * vnode memory consumption. The size is specified directly to 597 * eliminate dependency on NFS-private header. 598 * 599 * Other filesystems may use bigger or smaller (like UFS and ZFS) 600 * private inode data, but the NFS-based estimation is ample enough. 601 * Still, we care about differences in the size between 64- and 32-bit 602 * platforms. 603 * 604 * Namecache structure size is heuristically 605 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 606 */ 607 #ifdef _LP64 608 #define NFS_NCLNODE_SZ (528 + 64) 609 #define NC_SZ 148 610 #else 611 #define NFS_NCLNODE_SZ (360 + 32) 612 #define NC_SZ 92 613 #endif 614 615 static void 616 vntblinit(void *dummy __unused) 617 { 618 struct vdbatch *vd; 619 int cpu, physvnodes, virtvnodes; 620 u_int i; 621 622 /* 623 * Desiredvnodes is a function of the physical memory size and the 624 * kernel's heap size. Generally speaking, it scales with the 625 * physical memory size. The ratio of desiredvnodes to the physical 626 * memory size is 1:16 until desiredvnodes exceeds 98,304. 627 * Thereafter, the 628 * marginal ratio of desiredvnodes to the physical memory size is 629 * 1:64. However, desiredvnodes is limited by the kernel's heap 630 * size. The memory required by desiredvnodes vnodes and vm objects 631 * must not exceed 1/10th of the kernel's heap size. 632 */ 633 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 634 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 635 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 636 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 637 desiredvnodes = min(physvnodes, virtvnodes); 638 if (desiredvnodes > MAXVNODES_MAX) { 639 if (bootverbose) 640 printf("Reducing kern.maxvnodes %lu -> %lu\n", 641 desiredvnodes, MAXVNODES_MAX); 642 desiredvnodes = MAXVNODES_MAX; 643 } 644 wantfreevnodes = desiredvnodes / 4; 645 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 646 TAILQ_INIT(&vnode_list); 647 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 648 /* 649 * The lock is taken to appease WITNESS. 650 */ 651 mtx_lock(&vnode_list_mtx); 652 vnlru_recalc(); 653 mtx_unlock(&vnode_list_mtx); 654 vnode_list_free_marker = vn_alloc_marker(NULL); 655 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 656 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 657 vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); 658 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 659 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 660 /* 661 * Preallocate enough nodes to support one-per buf so that 662 * we can not fail an insert. reassignbuf() callers can not 663 * tolerate the insertion failure. 664 */ 665 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 666 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 667 UMA_ZONE_NOFREE | UMA_ZONE_VM); 668 uma_prealloc(buf_trie_zone, nbuf); 669 670 vnodes_created = counter_u64_alloc(M_WAITOK); 671 recycles_count = counter_u64_alloc(M_WAITOK); 672 recycles_free_count = counter_u64_alloc(M_WAITOK); 673 deferred_inact = counter_u64_alloc(M_WAITOK); 674 675 /* 676 * Initialize the filesystem syncer. 677 */ 678 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 679 &syncer_mask); 680 syncer_maxdelay = syncer_mask + 1; 681 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 682 cv_init(&sync_wakeup, "syncer"); 683 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 684 vnsz2log++; 685 vnsz2log--; 686 687 CPU_FOREACH(cpu) { 688 vd = DPCPU_ID_PTR((cpu), vd); 689 bzero(vd, sizeof(*vd)); 690 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 691 } 692 } 693 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 694 695 696 /* 697 * Mark a mount point as busy. Used to synchronize access and to delay 698 * unmounting. Eventually, mountlist_mtx is not released on failure. 699 * 700 * vfs_busy() is a custom lock, it can block the caller. 701 * vfs_busy() only sleeps if the unmount is active on the mount point. 702 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 703 * vnode belonging to mp. 704 * 705 * Lookup uses vfs_busy() to traverse mount points. 706 * root fs var fs 707 * / vnode lock A / vnode lock (/var) D 708 * /var vnode lock B /log vnode lock(/var/log) E 709 * vfs_busy lock C vfs_busy lock F 710 * 711 * Within each file system, the lock order is C->A->B and F->D->E. 712 * 713 * When traversing across mounts, the system follows that lock order: 714 * 715 * C->A->B 716 * | 717 * +->F->D->E 718 * 719 * The lookup() process for namei("/var") illustrates the process: 720 * VOP_LOOKUP() obtains B while A is held 721 * vfs_busy() obtains a shared lock on F while A and B are held 722 * vput() releases lock on B 723 * vput() releases lock on A 724 * VFS_ROOT() obtains lock on D while shared lock on F is held 725 * vfs_unbusy() releases shared lock on F 726 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 727 * Attempt to lock A (instead of vp_crossmp) while D is held would 728 * violate the global order, causing deadlocks. 729 * 730 * dounmount() locks B while F is drained. 731 */ 732 int 733 vfs_busy(struct mount *mp, int flags) 734 { 735 736 MPASS((flags & ~MBF_MASK) == 0); 737 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 738 739 if (vfs_op_thread_enter(mp)) { 740 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 741 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 742 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 743 vfs_mp_count_add_pcpu(mp, ref, 1); 744 vfs_mp_count_add_pcpu(mp, lockref, 1); 745 vfs_op_thread_exit(mp); 746 if (flags & MBF_MNTLSTLOCK) 747 mtx_unlock(&mountlist_mtx); 748 return (0); 749 } 750 751 MNT_ILOCK(mp); 752 vfs_assert_mount_counters(mp); 753 MNT_REF(mp); 754 /* 755 * If mount point is currently being unmounted, sleep until the 756 * mount point fate is decided. If thread doing the unmounting fails, 757 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 758 * that this mount point has survived the unmount attempt and vfs_busy 759 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 760 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 761 * about to be really destroyed. vfs_busy needs to release its 762 * reference on the mount point in this case and return with ENOENT, 763 * telling the caller that mount mount it tried to busy is no longer 764 * valid. 765 */ 766 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 767 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 768 MNT_REL(mp); 769 MNT_IUNLOCK(mp); 770 CTR1(KTR_VFS, "%s: failed busying before sleeping", 771 __func__); 772 return (ENOENT); 773 } 774 if (flags & MBF_MNTLSTLOCK) 775 mtx_unlock(&mountlist_mtx); 776 mp->mnt_kern_flag |= MNTK_MWAIT; 777 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 778 if (flags & MBF_MNTLSTLOCK) 779 mtx_lock(&mountlist_mtx); 780 MNT_ILOCK(mp); 781 } 782 if (flags & MBF_MNTLSTLOCK) 783 mtx_unlock(&mountlist_mtx); 784 mp->mnt_lockref++; 785 MNT_IUNLOCK(mp); 786 return (0); 787 } 788 789 /* 790 * Free a busy filesystem. 791 */ 792 void 793 vfs_unbusy(struct mount *mp) 794 { 795 int c; 796 797 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 798 799 if (vfs_op_thread_enter(mp)) { 800 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 801 vfs_mp_count_sub_pcpu(mp, lockref, 1); 802 vfs_mp_count_sub_pcpu(mp, ref, 1); 803 vfs_op_thread_exit(mp); 804 return; 805 } 806 807 MNT_ILOCK(mp); 808 vfs_assert_mount_counters(mp); 809 MNT_REL(mp); 810 c = --mp->mnt_lockref; 811 if (mp->mnt_vfs_ops == 0) { 812 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 813 MNT_IUNLOCK(mp); 814 return; 815 } 816 if (c < 0) 817 vfs_dump_mount_counters(mp); 818 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 819 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 820 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 821 mp->mnt_kern_flag &= ~MNTK_DRAINING; 822 wakeup(&mp->mnt_lockref); 823 } 824 MNT_IUNLOCK(mp); 825 } 826 827 /* 828 * Lookup a mount point by filesystem identifier. 829 */ 830 struct mount * 831 vfs_getvfs(fsid_t *fsid) 832 { 833 struct mount *mp; 834 835 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 836 mtx_lock(&mountlist_mtx); 837 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 838 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 839 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 840 vfs_ref(mp); 841 mtx_unlock(&mountlist_mtx); 842 return (mp); 843 } 844 } 845 mtx_unlock(&mountlist_mtx); 846 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 847 return ((struct mount *) 0); 848 } 849 850 /* 851 * Lookup a mount point by filesystem identifier, busying it before 852 * returning. 853 * 854 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 855 * cache for popular filesystem identifiers. The cache is lockess, using 856 * the fact that struct mount's are never freed. In worst case we may 857 * get pointer to unmounted or even different filesystem, so we have to 858 * check what we got, and go slow way if so. 859 */ 860 struct mount * 861 vfs_busyfs(fsid_t *fsid) 862 { 863 #define FSID_CACHE_SIZE 256 864 typedef struct mount * volatile vmp_t; 865 static vmp_t cache[FSID_CACHE_SIZE]; 866 struct mount *mp; 867 int error; 868 uint32_t hash; 869 870 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 871 hash = fsid->val[0] ^ fsid->val[1]; 872 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 873 mp = cache[hash]; 874 if (mp == NULL || 875 mp->mnt_stat.f_fsid.val[0] != fsid->val[0] || 876 mp->mnt_stat.f_fsid.val[1] != fsid->val[1]) 877 goto slow; 878 if (vfs_busy(mp, 0) != 0) { 879 cache[hash] = NULL; 880 goto slow; 881 } 882 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 883 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) 884 return (mp); 885 else 886 vfs_unbusy(mp); 887 888 slow: 889 mtx_lock(&mountlist_mtx); 890 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 891 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 892 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 893 error = vfs_busy(mp, MBF_MNTLSTLOCK); 894 if (error) { 895 cache[hash] = NULL; 896 mtx_unlock(&mountlist_mtx); 897 return (NULL); 898 } 899 cache[hash] = mp; 900 return (mp); 901 } 902 } 903 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 904 mtx_unlock(&mountlist_mtx); 905 return ((struct mount *) 0); 906 } 907 908 /* 909 * Check if a user can access privileged mount options. 910 */ 911 int 912 vfs_suser(struct mount *mp, struct thread *td) 913 { 914 int error; 915 916 if (jailed(td->td_ucred)) { 917 /* 918 * If the jail of the calling thread lacks permission for 919 * this type of file system, deny immediately. 920 */ 921 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 922 return (EPERM); 923 924 /* 925 * If the file system was mounted outside the jail of the 926 * calling thread, deny immediately. 927 */ 928 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 929 return (EPERM); 930 } 931 932 /* 933 * If file system supports delegated administration, we don't check 934 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 935 * by the file system itself. 936 * If this is not the user that did original mount, we check for 937 * the PRIV_VFS_MOUNT_OWNER privilege. 938 */ 939 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 940 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 941 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 942 return (error); 943 } 944 return (0); 945 } 946 947 /* 948 * Get a new unique fsid. Try to make its val[0] unique, since this value 949 * will be used to create fake device numbers for stat(). Also try (but 950 * not so hard) make its val[0] unique mod 2^16, since some emulators only 951 * support 16-bit device numbers. We end up with unique val[0]'s for the 952 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 953 * 954 * Keep in mind that several mounts may be running in parallel. Starting 955 * the search one past where the previous search terminated is both a 956 * micro-optimization and a defense against returning the same fsid to 957 * different mounts. 958 */ 959 void 960 vfs_getnewfsid(struct mount *mp) 961 { 962 static uint16_t mntid_base; 963 struct mount *nmp; 964 fsid_t tfsid; 965 int mtype; 966 967 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 968 mtx_lock(&mntid_mtx); 969 mtype = mp->mnt_vfc->vfc_typenum; 970 tfsid.val[1] = mtype; 971 mtype = (mtype & 0xFF) << 24; 972 for (;;) { 973 tfsid.val[0] = makedev(255, 974 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 975 mntid_base++; 976 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 977 break; 978 vfs_rel(nmp); 979 } 980 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 981 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 982 mtx_unlock(&mntid_mtx); 983 } 984 985 /* 986 * Knob to control the precision of file timestamps: 987 * 988 * 0 = seconds only; nanoseconds zeroed. 989 * 1 = seconds and nanoseconds, accurate within 1/HZ. 990 * 2 = seconds and nanoseconds, truncated to microseconds. 991 * >=3 = seconds and nanoseconds, maximum precision. 992 */ 993 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 994 995 static int timestamp_precision = TSP_USEC; 996 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 997 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 998 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 999 "3+: sec + ns (max. precision))"); 1000 1001 /* 1002 * Get a current timestamp. 1003 */ 1004 void 1005 vfs_timestamp(struct timespec *tsp) 1006 { 1007 struct timeval tv; 1008 1009 switch (timestamp_precision) { 1010 case TSP_SEC: 1011 tsp->tv_sec = time_second; 1012 tsp->tv_nsec = 0; 1013 break; 1014 case TSP_HZ: 1015 getnanotime(tsp); 1016 break; 1017 case TSP_USEC: 1018 microtime(&tv); 1019 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1020 break; 1021 case TSP_NSEC: 1022 default: 1023 nanotime(tsp); 1024 break; 1025 } 1026 } 1027 1028 /* 1029 * Set vnode attributes to VNOVAL 1030 */ 1031 void 1032 vattr_null(struct vattr *vap) 1033 { 1034 1035 vap->va_type = VNON; 1036 vap->va_size = VNOVAL; 1037 vap->va_bytes = VNOVAL; 1038 vap->va_mode = VNOVAL; 1039 vap->va_nlink = VNOVAL; 1040 vap->va_uid = VNOVAL; 1041 vap->va_gid = VNOVAL; 1042 vap->va_fsid = VNOVAL; 1043 vap->va_fileid = VNOVAL; 1044 vap->va_blocksize = VNOVAL; 1045 vap->va_rdev = VNOVAL; 1046 vap->va_atime.tv_sec = VNOVAL; 1047 vap->va_atime.tv_nsec = VNOVAL; 1048 vap->va_mtime.tv_sec = VNOVAL; 1049 vap->va_mtime.tv_nsec = VNOVAL; 1050 vap->va_ctime.tv_sec = VNOVAL; 1051 vap->va_ctime.tv_nsec = VNOVAL; 1052 vap->va_birthtime.tv_sec = VNOVAL; 1053 vap->va_birthtime.tv_nsec = VNOVAL; 1054 vap->va_flags = VNOVAL; 1055 vap->va_gen = VNOVAL; 1056 vap->va_vaflags = 0; 1057 } 1058 1059 /* 1060 * This routine is called when we have too many vnodes. It attempts 1061 * to free <count> vnodes and will potentially free vnodes that still 1062 * have VM backing store (VM backing store is typically the cause 1063 * of a vnode blowout so we want to do this). Therefore, this operation 1064 * is not considered cheap. 1065 * 1066 * A number of conditions may prevent a vnode from being reclaimed. 1067 * the buffer cache may have references on the vnode, a directory 1068 * vnode may still have references due to the namei cache representing 1069 * underlying files, or the vnode may be in active use. It is not 1070 * desirable to reuse such vnodes. These conditions may cause the 1071 * number of vnodes to reach some minimum value regardless of what 1072 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1073 * 1074 * @param mp Try to reclaim vnodes from this mountpoint 1075 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1076 * entries if this argument is strue 1077 * @param trigger Only reclaim vnodes with fewer than this many resident 1078 * pages. 1079 * @return The number of vnodes that were reclaimed. 1080 */ 1081 static int 1082 vlrureclaim(struct mount *mp, bool reclaim_nc_src, int trigger) 1083 { 1084 struct vnode *vp; 1085 int count, done, target; 1086 1087 done = 0; 1088 vn_start_write(NULL, &mp, V_WAIT); 1089 MNT_ILOCK(mp); 1090 count = mp->mnt_nvnodelistsize; 1091 target = count * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1092 target = target / 10 + 1; 1093 while (count != 0 && done < target) { 1094 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 1095 while (vp != NULL && vp->v_type == VMARKER) 1096 vp = TAILQ_NEXT(vp, v_nmntvnodes); 1097 if (vp == NULL) 1098 break; 1099 /* 1100 * XXX LRU is completely broken for non-free vnodes. First 1101 * by calling here in mountpoint order, then by moving 1102 * unselected vnodes to the end here, and most grossly by 1103 * removing the vlruvp() function that was supposed to 1104 * maintain the order. (This function was born broken 1105 * since syncer problems prevented it doing anything.) The 1106 * order is closer to LRC (C = Created). 1107 * 1108 * LRU reclaiming of vnodes seems to have last worked in 1109 * FreeBSD-3 where LRU wasn't mentioned under any spelling. 1110 * Then there was no hold count, and inactive vnodes were 1111 * simply put on the free list in LRU order. The separate 1112 * lists also break LRU. We prefer to reclaim from the 1113 * free list for technical reasons. This tends to thrash 1114 * the free list to keep very unrecently used held vnodes. 1115 * The problem is mitigated by keeping the free list large. 1116 */ 1117 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1118 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1119 --count; 1120 if (!VI_TRYLOCK(vp)) 1121 goto next_iter; 1122 /* 1123 * If it's been deconstructed already, it's still 1124 * referenced, or it exceeds the trigger, skip it. 1125 * Also skip free vnodes. We are trying to make space 1126 * to expand the free list, not reduce it. 1127 */ 1128 if (vp->v_usecount || 1129 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1130 vp->v_holdcnt == 0 || 1131 VN_IS_DOOMED(vp) || (vp->v_object != NULL && 1132 vp->v_object->resident_page_count > trigger)) { 1133 VI_UNLOCK(vp); 1134 goto next_iter; 1135 } 1136 MNT_IUNLOCK(mp); 1137 vholdl(vp); 1138 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) { 1139 vdrop(vp); 1140 goto next_iter_mntunlocked; 1141 } 1142 VI_LOCK(vp); 1143 /* 1144 * v_usecount may have been bumped after VOP_LOCK() dropped 1145 * the vnode interlock and before it was locked again. 1146 * 1147 * It is not necessary to recheck VIRF_DOOMED because it can 1148 * only be set by another thread that holds both the vnode 1149 * lock and vnode interlock. If another thread has the 1150 * vnode lock before we get to VOP_LOCK() and obtains the 1151 * vnode interlock after VOP_LOCK() drops the vnode 1152 * interlock, the other thread will be unable to drop the 1153 * vnode lock before our VOP_LOCK() call fails. 1154 */ 1155 if (vp->v_usecount || 1156 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1157 (vp->v_object != NULL && 1158 vp->v_object->resident_page_count > trigger)) { 1159 VOP_UNLOCK(vp); 1160 vdropl(vp); 1161 goto next_iter_mntunlocked; 1162 } 1163 KASSERT(!VN_IS_DOOMED(vp), 1164 ("VIRF_DOOMED unexpectedly detected in vlrureclaim()")); 1165 counter_u64_add(recycles_count, 1); 1166 vgonel(vp); 1167 VOP_UNLOCK(vp); 1168 vdropl(vp); 1169 done++; 1170 next_iter_mntunlocked: 1171 if (!should_yield()) 1172 goto relock_mnt; 1173 goto yield; 1174 next_iter: 1175 if (!should_yield()) 1176 continue; 1177 MNT_IUNLOCK(mp); 1178 yield: 1179 kern_yield(PRI_USER); 1180 relock_mnt: 1181 MNT_ILOCK(mp); 1182 } 1183 MNT_IUNLOCK(mp); 1184 vn_finished_write(mp); 1185 return done; 1186 } 1187 1188 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1189 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1190 0, 1191 "limit on vnode free requests per call to the vnlru_free routine"); 1192 1193 /* 1194 * Attempt to reduce the free list by the requested amount. 1195 */ 1196 static void 1197 vnlru_free_locked(int count, struct vfsops *mnt_op) 1198 { 1199 struct vnode *vp, *mvp; 1200 struct mount *mp; 1201 1202 mtx_assert(&vnode_list_mtx, MA_OWNED); 1203 if (count > max_vnlru_free) 1204 count = max_vnlru_free; 1205 mvp = vnode_list_free_marker; 1206 restart: 1207 vp = mvp; 1208 while (count > 0) { 1209 vp = TAILQ_NEXT(vp, v_vnodelist); 1210 if (__predict_false(vp == NULL)) { 1211 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1212 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1213 break; 1214 } 1215 if (__predict_false(vp->v_type == VMARKER)) 1216 continue; 1217 1218 /* 1219 * Don't recycle if our vnode is from different type 1220 * of mount point. Note that mp is type-safe, the 1221 * check does not reach unmapped address even if 1222 * vnode is reclaimed. 1223 * Don't recycle if we can't get the interlock without 1224 * blocking. 1225 */ 1226 if (vp->v_holdcnt > 0 || (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1227 mp->mnt_op != mnt_op) || !VI_TRYLOCK(vp)) { 1228 continue; 1229 } 1230 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1231 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1232 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1233 VI_UNLOCK(vp); 1234 continue; 1235 } 1236 vholdl(vp); 1237 count--; 1238 mtx_unlock(&vnode_list_mtx); 1239 VI_UNLOCK(vp); 1240 vtryrecycle(vp); 1241 vdrop(vp); 1242 mtx_lock(&vnode_list_mtx); 1243 goto restart; 1244 } 1245 } 1246 1247 void 1248 vnlru_free(int count, struct vfsops *mnt_op) 1249 { 1250 1251 mtx_lock(&vnode_list_mtx); 1252 vnlru_free_locked(count, mnt_op); 1253 mtx_unlock(&vnode_list_mtx); 1254 } 1255 1256 static void 1257 vnlru_recalc(void) 1258 { 1259 1260 mtx_assert(&vnode_list_mtx, MA_OWNED); 1261 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1262 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1263 vlowat = vhiwat / 2; 1264 } 1265 1266 /* XXX some names and initialization are bad for limits and watermarks. */ 1267 static int 1268 vspace(void) 1269 { 1270 u_long rnumvnodes, rfreevnodes; 1271 int space; 1272 1273 rnumvnodes = atomic_load_long(&numvnodes); 1274 rfreevnodes = atomic_load_long(&freevnodes); 1275 if (rnumvnodes > desiredvnodes) 1276 return (0); 1277 space = desiredvnodes - rnumvnodes; 1278 if (freevnodes > wantfreevnodes) 1279 space += rfreevnodes - wantfreevnodes; 1280 return (space); 1281 } 1282 1283 /* 1284 * Attempt to recycle vnodes in a context that is always safe to block. 1285 * Calling vlrurecycle() from the bowels of filesystem code has some 1286 * interesting deadlock problems. 1287 */ 1288 static struct proc *vnlruproc; 1289 static int vnlruproc_sig; 1290 1291 static void 1292 vnlru_proc(void) 1293 { 1294 u_long rnumvnodes, rfreevnodes; 1295 struct mount *mp, *nmp; 1296 unsigned long onumvnodes; 1297 int done, force, trigger, usevnodes, vsp; 1298 bool reclaim_nc_src; 1299 1300 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1301 SHUTDOWN_PRI_FIRST); 1302 1303 force = 0; 1304 for (;;) { 1305 kproc_suspend_check(vnlruproc); 1306 mtx_lock(&vnode_list_mtx); 1307 rnumvnodes = atomic_load_long(&numvnodes); 1308 /* 1309 * If numvnodes is too large (due to desiredvnodes being 1310 * adjusted using its sysctl, or emergency growth), first 1311 * try to reduce it by discarding from the free list. 1312 */ 1313 if (rnumvnodes > desiredvnodes) 1314 vnlru_free_locked(rnumvnodes - desiredvnodes, NULL); 1315 /* 1316 * Sleep if the vnode cache is in a good state. This is 1317 * when it is not over-full and has space for about a 4% 1318 * or 9% expansion (by growing its size or inexcessively 1319 * reducing its free list). Otherwise, try to reclaim 1320 * space for a 10% expansion. 1321 */ 1322 if (vstir && force == 0) { 1323 force = 1; 1324 vstir = 0; 1325 } 1326 vsp = vspace(); 1327 if (vsp >= vlowat && force == 0) { 1328 vnlruproc_sig = 0; 1329 wakeup(&vnlruproc_sig); 1330 msleep(vnlruproc, &vnode_list_mtx, 1331 PVFS|PDROP, "vlruwt", hz); 1332 continue; 1333 } 1334 mtx_unlock(&vnode_list_mtx); 1335 done = 0; 1336 rnumvnodes = atomic_load_long(&numvnodes); 1337 rfreevnodes = atomic_load_long(&freevnodes); 1338 1339 onumvnodes = rnumvnodes; 1340 /* 1341 * Calculate parameters for recycling. These are the same 1342 * throughout the loop to give some semblance of fairness. 1343 * The trigger point is to avoid recycling vnodes with lots 1344 * of resident pages. We aren't trying to free memory; we 1345 * are trying to recycle or at least free vnodes. 1346 */ 1347 if (rnumvnodes <= desiredvnodes) 1348 usevnodes = rnumvnodes - rfreevnodes; 1349 else 1350 usevnodes = rnumvnodes; 1351 if (usevnodes <= 0) 1352 usevnodes = 1; 1353 /* 1354 * The trigger value is is chosen to give a conservatively 1355 * large value to ensure that it alone doesn't prevent 1356 * making progress. The value can easily be so large that 1357 * it is effectively infinite in some congested and 1358 * misconfigured cases, and this is necessary. Normally 1359 * it is about 8 to 100 (pages), which is quite large. 1360 */ 1361 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1362 if (force < 2) 1363 trigger = vsmalltrigger; 1364 reclaim_nc_src = force >= 3; 1365 mtx_lock(&mountlist_mtx); 1366 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 1367 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) { 1368 nmp = TAILQ_NEXT(mp, mnt_list); 1369 continue; 1370 } 1371 done += vlrureclaim(mp, reclaim_nc_src, trigger); 1372 mtx_lock(&mountlist_mtx); 1373 nmp = TAILQ_NEXT(mp, mnt_list); 1374 vfs_unbusy(mp); 1375 } 1376 mtx_unlock(&mountlist_mtx); 1377 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1378 uma_reclaim(UMA_RECLAIM_DRAIN); 1379 if (done == 0) { 1380 if (force == 0 || force == 1) { 1381 force = 2; 1382 continue; 1383 } 1384 if (force == 2) { 1385 force = 3; 1386 continue; 1387 } 1388 force = 0; 1389 vnlru_nowhere++; 1390 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1391 } else 1392 kern_yield(PRI_USER); 1393 /* 1394 * After becoming active to expand above low water, keep 1395 * active until above high water. 1396 */ 1397 vsp = vspace(); 1398 force = vsp < vhiwat; 1399 } 1400 } 1401 1402 static struct kproc_desc vnlru_kp = { 1403 "vnlru", 1404 vnlru_proc, 1405 &vnlruproc 1406 }; 1407 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1408 &vnlru_kp); 1409 1410 /* 1411 * Routines having to do with the management of the vnode table. 1412 */ 1413 1414 /* 1415 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1416 * before we actually vgone(). This function must be called with the vnode 1417 * held to prevent the vnode from being returned to the free list midway 1418 * through vgone(). 1419 */ 1420 static int 1421 vtryrecycle(struct vnode *vp) 1422 { 1423 struct mount *vnmp; 1424 1425 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1426 VNASSERT(vp->v_holdcnt, vp, 1427 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1428 /* 1429 * This vnode may found and locked via some other list, if so we 1430 * can't recycle it yet. 1431 */ 1432 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1433 CTR2(KTR_VFS, 1434 "%s: impossible to recycle, vp %p lock is already held", 1435 __func__, vp); 1436 return (EWOULDBLOCK); 1437 } 1438 /* 1439 * Don't recycle if its filesystem is being suspended. 1440 */ 1441 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1442 VOP_UNLOCK(vp); 1443 CTR2(KTR_VFS, 1444 "%s: impossible to recycle, cannot start the write for %p", 1445 __func__, vp); 1446 return (EBUSY); 1447 } 1448 /* 1449 * If we got this far, we need to acquire the interlock and see if 1450 * anyone picked up this vnode from another list. If not, we will 1451 * mark it with DOOMED via vgonel() so that anyone who does find it 1452 * will skip over it. 1453 */ 1454 VI_LOCK(vp); 1455 if (vp->v_usecount) { 1456 VOP_UNLOCK(vp); 1457 VI_UNLOCK(vp); 1458 vn_finished_write(vnmp); 1459 CTR2(KTR_VFS, 1460 "%s: impossible to recycle, %p is already referenced", 1461 __func__, vp); 1462 return (EBUSY); 1463 } 1464 if (!VN_IS_DOOMED(vp)) { 1465 counter_u64_add(recycles_free_count, 1); 1466 vgonel(vp); 1467 } 1468 VOP_UNLOCK(vp); 1469 VI_UNLOCK(vp); 1470 vn_finished_write(vnmp); 1471 return (0); 1472 } 1473 1474 static void 1475 vcheckspace(void) 1476 { 1477 int vsp; 1478 1479 vsp = vspace(); 1480 if (vsp < vlowat && vnlruproc_sig == 0) { 1481 vnlruproc_sig = 1; 1482 wakeup(vnlruproc); 1483 } 1484 } 1485 1486 /* 1487 * Wait if necessary for space for a new vnode. 1488 */ 1489 static int 1490 vn_alloc_wait(int suspended) 1491 { 1492 1493 mtx_assert(&vnode_list_mtx, MA_OWNED); 1494 if (numvnodes >= desiredvnodes) { 1495 if (suspended) { 1496 /* 1497 * The file system is being suspended. We cannot 1498 * risk a deadlock here, so allow allocation of 1499 * another vnode even if this would give too many. 1500 */ 1501 return (0); 1502 } 1503 if (vnlruproc_sig == 0) { 1504 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 1505 wakeup(vnlruproc); 1506 } 1507 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, 1508 "vlruwk", hz); 1509 } 1510 /* Post-adjust like the pre-adjust in getnewvnode(). */ 1511 if (numvnodes + 1 > desiredvnodes && freevnodes > 1) 1512 vnlru_free_locked(1, NULL); 1513 return (numvnodes >= desiredvnodes ? ENFILE : 0); 1514 } 1515 1516 static struct vnode * 1517 vn_alloc(struct mount *mp) 1518 { 1519 static int cyclecount; 1520 int error __unused; 1521 1522 mtx_lock(&vnode_list_mtx); 1523 if (numvnodes < desiredvnodes) 1524 cyclecount = 0; 1525 else if (cyclecount++ >= freevnodes) { 1526 cyclecount = 0; 1527 vstir = 1; 1528 } 1529 /* 1530 * Grow the vnode cache if it will not be above its target max 1531 * after growing. Otherwise, if the free list is nonempty, try 1532 * to reclaim 1 item from it before growing the cache (possibly 1533 * above its target max if the reclamation failed or is delayed). 1534 * Otherwise, wait for some space. In all cases, schedule 1535 * vnlru_proc() if we are getting short of space. The watermarks 1536 * should be chosen so that we never wait or even reclaim from 1537 * the free list to below its target minimum. 1538 */ 1539 if (numvnodes + 1 <= desiredvnodes) 1540 ; 1541 else if (freevnodes > 0) 1542 vnlru_free_locked(1, NULL); 1543 else { 1544 error = vn_alloc_wait(mp != NULL && (mp->mnt_kern_flag & 1545 MNTK_SUSPEND)); 1546 #if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */ 1547 if (error != 0) { 1548 mtx_unlock(&vnode_list_mtx); 1549 return (error); 1550 } 1551 #endif 1552 } 1553 vcheckspace(); 1554 atomic_add_long(&numvnodes, 1); 1555 mtx_unlock(&vnode_list_mtx); 1556 return (uma_zalloc(vnode_zone, M_WAITOK)); 1557 } 1558 1559 static void 1560 vn_free(struct vnode *vp) 1561 { 1562 1563 atomic_subtract_long(&numvnodes, 1); 1564 uma_zfree(vnode_zone, vp); 1565 } 1566 1567 /* 1568 * Return the next vnode from the free list. 1569 */ 1570 int 1571 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1572 struct vnode **vpp) 1573 { 1574 struct vnode *vp; 1575 struct thread *td; 1576 struct lock_object *lo; 1577 1578 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1579 1580 KASSERT(vops->registered, 1581 ("%s: not registered vector op %p\n", __func__, vops)); 1582 1583 td = curthread; 1584 if (td->td_vp_reserved != NULL) { 1585 vp = td->td_vp_reserved; 1586 td->td_vp_reserved = NULL; 1587 } else { 1588 vp = vn_alloc(mp); 1589 } 1590 counter_u64_add(vnodes_created, 1); 1591 /* 1592 * Locks are given the generic name "vnode" when created. 1593 * Follow the historic practice of using the filesystem 1594 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1595 * 1596 * Locks live in a witness group keyed on their name. Thus, 1597 * when a lock is renamed, it must also move from the witness 1598 * group of its old name to the witness group of its new name. 1599 * 1600 * The change only needs to be made when the vnode moves 1601 * from one filesystem type to another. We ensure that each 1602 * filesystem use a single static name pointer for its tag so 1603 * that we can compare pointers rather than doing a strcmp(). 1604 */ 1605 lo = &vp->v_vnlock->lock_object; 1606 if (lo->lo_name != tag) { 1607 lo->lo_name = tag; 1608 WITNESS_DESTROY(lo); 1609 WITNESS_INIT(lo, tag); 1610 } 1611 /* 1612 * By default, don't allow shared locks unless filesystems opt-in. 1613 */ 1614 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1615 /* 1616 * Finalize various vnode identity bits. 1617 */ 1618 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1619 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1620 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1621 vp->v_type = VNON; 1622 vp->v_op = vops; 1623 v_init_counters(vp); 1624 vp->v_bufobj.bo_ops = &buf_ops_bio; 1625 #ifdef DIAGNOSTIC 1626 if (mp == NULL && vops != &dead_vnodeops) 1627 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1628 #endif 1629 #ifdef MAC 1630 mac_vnode_init(vp); 1631 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1632 mac_vnode_associate_singlelabel(mp, vp); 1633 #endif 1634 if (mp != NULL) { 1635 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1636 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1637 vp->v_vflag |= VV_NOKNOTE; 1638 } 1639 1640 /* 1641 * For the filesystems which do not use vfs_hash_insert(), 1642 * still initialize v_hash to have vfs_hash_index() useful. 1643 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1644 * its own hashing. 1645 */ 1646 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1647 1648 *vpp = vp; 1649 return (0); 1650 } 1651 1652 void 1653 getnewvnode_reserve(void) 1654 { 1655 struct thread *td; 1656 1657 td = curthread; 1658 MPASS(td->td_vp_reserved == NULL); 1659 td->td_vp_reserved = vn_alloc(NULL); 1660 } 1661 1662 void 1663 getnewvnode_drop_reserve(void) 1664 { 1665 struct thread *td; 1666 1667 td = curthread; 1668 if (td->td_vp_reserved != NULL) { 1669 vn_free(td->td_vp_reserved); 1670 td->td_vp_reserved = NULL; 1671 } 1672 } 1673 1674 static void 1675 freevnode(struct vnode *vp) 1676 { 1677 struct bufobj *bo; 1678 1679 /* 1680 * The vnode has been marked for destruction, so free it. 1681 * 1682 * The vnode will be returned to the zone where it will 1683 * normally remain until it is needed for another vnode. We 1684 * need to cleanup (or verify that the cleanup has already 1685 * been done) any residual data left from its current use 1686 * so as not to contaminate the freshly allocated vnode. 1687 */ 1688 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 1689 bo = &vp->v_bufobj; 1690 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 1691 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); 1692 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 1693 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 1694 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 1695 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 1696 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 1697 ("clean blk trie not empty")); 1698 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 1699 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 1700 ("dirty blk trie not empty")); 1701 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 1702 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 1703 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 1704 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 1705 ("Dangling rangelock waiters")); 1706 VI_UNLOCK(vp); 1707 #ifdef MAC 1708 mac_vnode_destroy(vp); 1709 #endif 1710 if (vp->v_pollinfo != NULL) { 1711 destroy_vpollinfo(vp->v_pollinfo); 1712 vp->v_pollinfo = NULL; 1713 } 1714 #ifdef INVARIANTS 1715 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ 1716 vp->v_op = NULL; 1717 #endif 1718 vp->v_mountedhere = NULL; 1719 vp->v_unpcb = NULL; 1720 vp->v_rdev = NULL; 1721 vp->v_fifoinfo = NULL; 1722 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 1723 vp->v_irflag = 0; 1724 vp->v_iflag = 0; 1725 vp->v_vflag = 0; 1726 bo->bo_flag = 0; 1727 vn_free(vp); 1728 } 1729 1730 /* 1731 * Delete from old mount point vnode list, if on one. 1732 */ 1733 static void 1734 delmntque(struct vnode *vp) 1735 { 1736 struct mount *mp; 1737 1738 mp = vp->v_mount; 1739 if (mp == NULL) 1740 return; 1741 MNT_ILOCK(mp); 1742 VI_LOCK(vp); 1743 if (vp->v_mflag & VMP_LAZYLIST) { 1744 mtx_lock(&mp->mnt_listmtx); 1745 if (vp->v_mflag & VMP_LAZYLIST) { 1746 vp->v_mflag &= ~VMP_LAZYLIST; 1747 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 1748 mp->mnt_lazyvnodelistsize--; 1749 } 1750 mtx_unlock(&mp->mnt_listmtx); 1751 } 1752 vp->v_mount = NULL; 1753 VI_UNLOCK(vp); 1754 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1755 ("bad mount point vnode list size")); 1756 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1757 mp->mnt_nvnodelistsize--; 1758 MNT_REL(mp); 1759 MNT_IUNLOCK(mp); 1760 } 1761 1762 static void 1763 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1764 { 1765 1766 vp->v_data = NULL; 1767 vp->v_op = &dead_vnodeops; 1768 vgone(vp); 1769 vput(vp); 1770 } 1771 1772 /* 1773 * Insert into list of vnodes for the new mount point, if available. 1774 */ 1775 int 1776 insmntque1(struct vnode *vp, struct mount *mp, 1777 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1778 { 1779 1780 KASSERT(vp->v_mount == NULL, 1781 ("insmntque: vnode already on per mount vnode list")); 1782 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1783 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1784 1785 /* 1786 * We acquire the vnode interlock early to ensure that the 1787 * vnode cannot be recycled by another process releasing a 1788 * holdcnt on it before we get it on both the vnode list 1789 * and the active vnode list. The mount mutex protects only 1790 * manipulation of the vnode list and the vnode freelist 1791 * mutex protects only manipulation of the active vnode list. 1792 * Hence the need to hold the vnode interlock throughout. 1793 */ 1794 MNT_ILOCK(mp); 1795 VI_LOCK(vp); 1796 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 1797 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1798 mp->mnt_nvnodelistsize == 0)) && 1799 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1800 VI_UNLOCK(vp); 1801 MNT_IUNLOCK(mp); 1802 if (dtr != NULL) 1803 dtr(vp, dtr_arg); 1804 return (EBUSY); 1805 } 1806 vp->v_mount = mp; 1807 MNT_REF(mp); 1808 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1809 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1810 ("neg mount point vnode list size")); 1811 mp->mnt_nvnodelistsize++; 1812 VI_UNLOCK(vp); 1813 MNT_IUNLOCK(mp); 1814 return (0); 1815 } 1816 1817 int 1818 insmntque(struct vnode *vp, struct mount *mp) 1819 { 1820 1821 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1822 } 1823 1824 /* 1825 * Flush out and invalidate all buffers associated with a bufobj 1826 * Called with the underlying object locked. 1827 */ 1828 int 1829 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1830 { 1831 int error; 1832 1833 BO_LOCK(bo); 1834 if (flags & V_SAVE) { 1835 error = bufobj_wwait(bo, slpflag, slptimeo); 1836 if (error) { 1837 BO_UNLOCK(bo); 1838 return (error); 1839 } 1840 if (bo->bo_dirty.bv_cnt > 0) { 1841 BO_UNLOCK(bo); 1842 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1843 return (error); 1844 /* 1845 * XXX We could save a lock/unlock if this was only 1846 * enabled under INVARIANTS 1847 */ 1848 BO_LOCK(bo); 1849 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1850 panic("vinvalbuf: dirty bufs"); 1851 } 1852 } 1853 /* 1854 * If you alter this loop please notice that interlock is dropped and 1855 * reacquired in flushbuflist. Special care is needed to ensure that 1856 * no race conditions occur from this. 1857 */ 1858 do { 1859 error = flushbuflist(&bo->bo_clean, 1860 flags, bo, slpflag, slptimeo); 1861 if (error == 0 && !(flags & V_CLEANONLY)) 1862 error = flushbuflist(&bo->bo_dirty, 1863 flags, bo, slpflag, slptimeo); 1864 if (error != 0 && error != EAGAIN) { 1865 BO_UNLOCK(bo); 1866 return (error); 1867 } 1868 } while (error != 0); 1869 1870 /* 1871 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1872 * have write I/O in-progress but if there is a VM object then the 1873 * VM object can also have read-I/O in-progress. 1874 */ 1875 do { 1876 bufobj_wwait(bo, 0, 0); 1877 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 1878 BO_UNLOCK(bo); 1879 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 1880 BO_LOCK(bo); 1881 } 1882 } while (bo->bo_numoutput > 0); 1883 BO_UNLOCK(bo); 1884 1885 /* 1886 * Destroy the copy in the VM cache, too. 1887 */ 1888 if (bo->bo_object != NULL && 1889 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 1890 VM_OBJECT_WLOCK(bo->bo_object); 1891 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1892 OBJPR_CLEANONLY : 0); 1893 VM_OBJECT_WUNLOCK(bo->bo_object); 1894 } 1895 1896 #ifdef INVARIANTS 1897 BO_LOCK(bo); 1898 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 1899 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 1900 bo->bo_clean.bv_cnt > 0)) 1901 panic("vinvalbuf: flush failed"); 1902 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 1903 bo->bo_dirty.bv_cnt > 0) 1904 panic("vinvalbuf: flush dirty failed"); 1905 BO_UNLOCK(bo); 1906 #endif 1907 return (0); 1908 } 1909 1910 /* 1911 * Flush out and invalidate all buffers associated with a vnode. 1912 * Called with the underlying object locked. 1913 */ 1914 int 1915 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 1916 { 1917 1918 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 1919 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1920 if (vp->v_object != NULL && vp->v_object->handle != vp) 1921 return (0); 1922 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 1923 } 1924 1925 /* 1926 * Flush out buffers on the specified list. 1927 * 1928 */ 1929 static int 1930 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 1931 int slptimeo) 1932 { 1933 struct buf *bp, *nbp; 1934 int retval, error; 1935 daddr_t lblkno; 1936 b_xflags_t xflags; 1937 1938 ASSERT_BO_WLOCKED(bo); 1939 1940 retval = 0; 1941 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 1942 /* 1943 * If we are flushing both V_NORMAL and V_ALT buffers then 1944 * do not skip any buffers. If we are flushing only V_NORMAL 1945 * buffers then skip buffers marked as BX_ALTDATA. If we are 1946 * flushing only V_ALT buffers then skip buffers not marked 1947 * as BX_ALTDATA. 1948 */ 1949 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 1950 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 1951 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 1952 continue; 1953 } 1954 if (nbp != NULL) { 1955 lblkno = nbp->b_lblkno; 1956 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 1957 } 1958 retval = EAGAIN; 1959 error = BUF_TIMELOCK(bp, 1960 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 1961 "flushbuf", slpflag, slptimeo); 1962 if (error) { 1963 BO_LOCK(bo); 1964 return (error != ENOLCK ? error : EAGAIN); 1965 } 1966 KASSERT(bp->b_bufobj == bo, 1967 ("bp %p wrong b_bufobj %p should be %p", 1968 bp, bp->b_bufobj, bo)); 1969 /* 1970 * XXX Since there are no node locks for NFS, I 1971 * believe there is a slight chance that a delayed 1972 * write will occur while sleeping just above, so 1973 * check for it. 1974 */ 1975 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1976 (flags & V_SAVE)) { 1977 bremfree(bp); 1978 bp->b_flags |= B_ASYNC; 1979 bwrite(bp); 1980 BO_LOCK(bo); 1981 return (EAGAIN); /* XXX: why not loop ? */ 1982 } 1983 bremfree(bp); 1984 bp->b_flags |= (B_INVAL | B_RELBUF); 1985 bp->b_flags &= ~B_ASYNC; 1986 brelse(bp); 1987 BO_LOCK(bo); 1988 if (nbp == NULL) 1989 break; 1990 nbp = gbincore(bo, lblkno); 1991 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1992 != xflags) 1993 break; /* nbp invalid */ 1994 } 1995 return (retval); 1996 } 1997 1998 int 1999 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2000 { 2001 struct buf *bp; 2002 int error; 2003 daddr_t lblkno; 2004 2005 ASSERT_BO_LOCKED(bo); 2006 2007 for (lblkno = startn;;) { 2008 again: 2009 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2010 if (bp == NULL || bp->b_lblkno >= endn || 2011 bp->b_lblkno < startn) 2012 break; 2013 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2014 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2015 if (error != 0) { 2016 BO_RLOCK(bo); 2017 if (error == ENOLCK) 2018 goto again; 2019 return (error); 2020 } 2021 KASSERT(bp->b_bufobj == bo, 2022 ("bp %p wrong b_bufobj %p should be %p", 2023 bp, bp->b_bufobj, bo)); 2024 lblkno = bp->b_lblkno + 1; 2025 if ((bp->b_flags & B_MANAGED) == 0) 2026 bremfree(bp); 2027 bp->b_flags |= B_RELBUF; 2028 /* 2029 * In the VMIO case, use the B_NOREUSE flag to hint that the 2030 * pages backing each buffer in the range are unlikely to be 2031 * reused. Dirty buffers will have the hint applied once 2032 * they've been written. 2033 */ 2034 if ((bp->b_flags & B_VMIO) != 0) 2035 bp->b_flags |= B_NOREUSE; 2036 brelse(bp); 2037 BO_RLOCK(bo); 2038 } 2039 return (0); 2040 } 2041 2042 /* 2043 * Truncate a file's buffer and pages to a specified length. This 2044 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2045 * sync activity. 2046 */ 2047 int 2048 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2049 { 2050 struct buf *bp, *nbp; 2051 struct bufobj *bo; 2052 daddr_t startlbn; 2053 2054 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2055 vp, blksize, (uintmax_t)length); 2056 2057 /* 2058 * Round up to the *next* lbn. 2059 */ 2060 startlbn = howmany(length, blksize); 2061 2062 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2063 2064 bo = &vp->v_bufobj; 2065 restart_unlocked: 2066 BO_LOCK(bo); 2067 2068 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2069 ; 2070 2071 if (length > 0) { 2072 restartsync: 2073 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2074 if (bp->b_lblkno > 0) 2075 continue; 2076 /* 2077 * Since we hold the vnode lock this should only 2078 * fail if we're racing with the buf daemon. 2079 */ 2080 if (BUF_LOCK(bp, 2081 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2082 BO_LOCKPTR(bo)) == ENOLCK) 2083 goto restart_unlocked; 2084 2085 VNASSERT((bp->b_flags & B_DELWRI), vp, 2086 ("buf(%p) on dirty queue without DELWRI", bp)); 2087 2088 bremfree(bp); 2089 bawrite(bp); 2090 BO_LOCK(bo); 2091 goto restartsync; 2092 } 2093 } 2094 2095 bufobj_wwait(bo, 0, 0); 2096 BO_UNLOCK(bo); 2097 vnode_pager_setsize(vp, length); 2098 2099 return (0); 2100 } 2101 2102 /* 2103 * Invalidate the cached pages of a file's buffer within the range of block 2104 * numbers [startlbn, endlbn). 2105 */ 2106 void 2107 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2108 int blksize) 2109 { 2110 struct bufobj *bo; 2111 off_t start, end; 2112 2113 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2114 2115 start = blksize * startlbn; 2116 end = blksize * endlbn; 2117 2118 bo = &vp->v_bufobj; 2119 BO_LOCK(bo); 2120 MPASS(blksize == bo->bo_bsize); 2121 2122 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2123 ; 2124 2125 BO_UNLOCK(bo); 2126 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2127 } 2128 2129 static int 2130 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2131 daddr_t startlbn, daddr_t endlbn) 2132 { 2133 struct buf *bp, *nbp; 2134 bool anyfreed; 2135 2136 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2137 ASSERT_BO_LOCKED(bo); 2138 2139 do { 2140 anyfreed = false; 2141 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2142 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2143 continue; 2144 if (BUF_LOCK(bp, 2145 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2146 BO_LOCKPTR(bo)) == ENOLCK) { 2147 BO_LOCK(bo); 2148 return (EAGAIN); 2149 } 2150 2151 bremfree(bp); 2152 bp->b_flags |= B_INVAL | B_RELBUF; 2153 bp->b_flags &= ~B_ASYNC; 2154 brelse(bp); 2155 anyfreed = true; 2156 2157 BO_LOCK(bo); 2158 if (nbp != NULL && 2159 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2160 nbp->b_vp != vp || 2161 (nbp->b_flags & B_DELWRI) != 0)) 2162 return (EAGAIN); 2163 } 2164 2165 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2166 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2167 continue; 2168 if (BUF_LOCK(bp, 2169 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2170 BO_LOCKPTR(bo)) == ENOLCK) { 2171 BO_LOCK(bo); 2172 return (EAGAIN); 2173 } 2174 bremfree(bp); 2175 bp->b_flags |= B_INVAL | B_RELBUF; 2176 bp->b_flags &= ~B_ASYNC; 2177 brelse(bp); 2178 anyfreed = true; 2179 2180 BO_LOCK(bo); 2181 if (nbp != NULL && 2182 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2183 (nbp->b_vp != vp) || 2184 (nbp->b_flags & B_DELWRI) == 0)) 2185 return (EAGAIN); 2186 } 2187 } while (anyfreed); 2188 return (0); 2189 } 2190 2191 static void 2192 buf_vlist_remove(struct buf *bp) 2193 { 2194 struct bufv *bv; 2195 2196 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2197 ASSERT_BO_WLOCKED(bp->b_bufobj); 2198 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != 2199 (BX_VNDIRTY|BX_VNCLEAN), 2200 ("buf_vlist_remove: Buf %p is on two lists", bp)); 2201 if (bp->b_xflags & BX_VNDIRTY) 2202 bv = &bp->b_bufobj->bo_dirty; 2203 else 2204 bv = &bp->b_bufobj->bo_clean; 2205 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2206 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2207 bv->bv_cnt--; 2208 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2209 } 2210 2211 /* 2212 * Add the buffer to the sorted clean or dirty block list. 2213 * 2214 * NOTE: xflags is passed as a constant, optimizing this inline function! 2215 */ 2216 static void 2217 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2218 { 2219 struct bufv *bv; 2220 struct buf *n; 2221 int error; 2222 2223 ASSERT_BO_WLOCKED(bo); 2224 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2225 ("dead bo %p", bo)); 2226 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2227 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2228 bp->b_xflags |= xflags; 2229 if (xflags & BX_VNDIRTY) 2230 bv = &bo->bo_dirty; 2231 else 2232 bv = &bo->bo_clean; 2233 2234 /* 2235 * Keep the list ordered. Optimize empty list insertion. Assume 2236 * we tend to grow at the tail so lookup_le should usually be cheaper 2237 * than _ge. 2238 */ 2239 if (bv->bv_cnt == 0 || 2240 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2241 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2242 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2243 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2244 else 2245 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2246 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2247 if (error) 2248 panic("buf_vlist_add: Preallocated nodes insufficient."); 2249 bv->bv_cnt++; 2250 } 2251 2252 /* 2253 * Look up a buffer using the buffer tries. 2254 */ 2255 struct buf * 2256 gbincore(struct bufobj *bo, daddr_t lblkno) 2257 { 2258 struct buf *bp; 2259 2260 ASSERT_BO_LOCKED(bo); 2261 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2262 if (bp != NULL) 2263 return (bp); 2264 return BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno); 2265 } 2266 2267 /* 2268 * Associate a buffer with a vnode. 2269 */ 2270 void 2271 bgetvp(struct vnode *vp, struct buf *bp) 2272 { 2273 struct bufobj *bo; 2274 2275 bo = &vp->v_bufobj; 2276 ASSERT_BO_WLOCKED(bo); 2277 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2278 2279 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2280 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2281 ("bgetvp: bp already attached! %p", bp)); 2282 2283 vhold(vp); 2284 bp->b_vp = vp; 2285 bp->b_bufobj = bo; 2286 /* 2287 * Insert onto list for new vnode. 2288 */ 2289 buf_vlist_add(bp, bo, BX_VNCLEAN); 2290 } 2291 2292 /* 2293 * Disassociate a buffer from a vnode. 2294 */ 2295 void 2296 brelvp(struct buf *bp) 2297 { 2298 struct bufobj *bo; 2299 struct vnode *vp; 2300 2301 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2302 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2303 2304 /* 2305 * Delete from old vnode list, if on one. 2306 */ 2307 vp = bp->b_vp; /* XXX */ 2308 bo = bp->b_bufobj; 2309 BO_LOCK(bo); 2310 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2311 buf_vlist_remove(bp); 2312 else 2313 panic("brelvp: Buffer %p not on queue.", bp); 2314 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2315 bo->bo_flag &= ~BO_ONWORKLST; 2316 mtx_lock(&sync_mtx); 2317 LIST_REMOVE(bo, bo_synclist); 2318 syncer_worklist_len--; 2319 mtx_unlock(&sync_mtx); 2320 } 2321 bp->b_vp = NULL; 2322 bp->b_bufobj = NULL; 2323 BO_UNLOCK(bo); 2324 vdrop(vp); 2325 } 2326 2327 /* 2328 * Add an item to the syncer work queue. 2329 */ 2330 static void 2331 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2332 { 2333 int slot; 2334 2335 ASSERT_BO_WLOCKED(bo); 2336 2337 mtx_lock(&sync_mtx); 2338 if (bo->bo_flag & BO_ONWORKLST) 2339 LIST_REMOVE(bo, bo_synclist); 2340 else { 2341 bo->bo_flag |= BO_ONWORKLST; 2342 syncer_worklist_len++; 2343 } 2344 2345 if (delay > syncer_maxdelay - 2) 2346 delay = syncer_maxdelay - 2; 2347 slot = (syncer_delayno + delay) & syncer_mask; 2348 2349 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2350 mtx_unlock(&sync_mtx); 2351 } 2352 2353 static int 2354 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2355 { 2356 int error, len; 2357 2358 mtx_lock(&sync_mtx); 2359 len = syncer_worklist_len - sync_vnode_count; 2360 mtx_unlock(&sync_mtx); 2361 error = SYSCTL_OUT(req, &len, sizeof(len)); 2362 return (error); 2363 } 2364 2365 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2366 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2367 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2368 2369 static struct proc *updateproc; 2370 static void sched_sync(void); 2371 static struct kproc_desc up_kp = { 2372 "syncer", 2373 sched_sync, 2374 &updateproc 2375 }; 2376 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2377 2378 static int 2379 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2380 { 2381 struct vnode *vp; 2382 struct mount *mp; 2383 2384 *bo = LIST_FIRST(slp); 2385 if (*bo == NULL) 2386 return (0); 2387 vp = bo2vnode(*bo); 2388 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2389 return (1); 2390 /* 2391 * We use vhold in case the vnode does not 2392 * successfully sync. vhold prevents the vnode from 2393 * going away when we unlock the sync_mtx so that 2394 * we can acquire the vnode interlock. 2395 */ 2396 vholdl(vp); 2397 mtx_unlock(&sync_mtx); 2398 VI_UNLOCK(vp); 2399 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2400 vdrop(vp); 2401 mtx_lock(&sync_mtx); 2402 return (*bo == LIST_FIRST(slp)); 2403 } 2404 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2405 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2406 VOP_UNLOCK(vp); 2407 vn_finished_write(mp); 2408 BO_LOCK(*bo); 2409 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2410 /* 2411 * Put us back on the worklist. The worklist 2412 * routine will remove us from our current 2413 * position and then add us back in at a later 2414 * position. 2415 */ 2416 vn_syncer_add_to_worklist(*bo, syncdelay); 2417 } 2418 BO_UNLOCK(*bo); 2419 vdrop(vp); 2420 mtx_lock(&sync_mtx); 2421 return (0); 2422 } 2423 2424 static int first_printf = 1; 2425 2426 /* 2427 * System filesystem synchronizer daemon. 2428 */ 2429 static void 2430 sched_sync(void) 2431 { 2432 struct synclist *next, *slp; 2433 struct bufobj *bo; 2434 long starttime; 2435 struct thread *td = curthread; 2436 int last_work_seen; 2437 int net_worklist_len; 2438 int syncer_final_iter; 2439 int error; 2440 2441 last_work_seen = 0; 2442 syncer_final_iter = 0; 2443 syncer_state = SYNCER_RUNNING; 2444 starttime = time_uptime; 2445 td->td_pflags |= TDP_NORUNNINGBUF; 2446 2447 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2448 SHUTDOWN_PRI_LAST); 2449 2450 mtx_lock(&sync_mtx); 2451 for (;;) { 2452 if (syncer_state == SYNCER_FINAL_DELAY && 2453 syncer_final_iter == 0) { 2454 mtx_unlock(&sync_mtx); 2455 kproc_suspend_check(td->td_proc); 2456 mtx_lock(&sync_mtx); 2457 } 2458 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2459 if (syncer_state != SYNCER_RUNNING && 2460 starttime != time_uptime) { 2461 if (first_printf) { 2462 printf("\nSyncing disks, vnodes remaining... "); 2463 first_printf = 0; 2464 } 2465 printf("%d ", net_worklist_len); 2466 } 2467 starttime = time_uptime; 2468 2469 /* 2470 * Push files whose dirty time has expired. Be careful 2471 * of interrupt race on slp queue. 2472 * 2473 * Skip over empty worklist slots when shutting down. 2474 */ 2475 do { 2476 slp = &syncer_workitem_pending[syncer_delayno]; 2477 syncer_delayno += 1; 2478 if (syncer_delayno == syncer_maxdelay) 2479 syncer_delayno = 0; 2480 next = &syncer_workitem_pending[syncer_delayno]; 2481 /* 2482 * If the worklist has wrapped since the 2483 * it was emptied of all but syncer vnodes, 2484 * switch to the FINAL_DELAY state and run 2485 * for one more second. 2486 */ 2487 if (syncer_state == SYNCER_SHUTTING_DOWN && 2488 net_worklist_len == 0 && 2489 last_work_seen == syncer_delayno) { 2490 syncer_state = SYNCER_FINAL_DELAY; 2491 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2492 } 2493 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2494 syncer_worklist_len > 0); 2495 2496 /* 2497 * Keep track of the last time there was anything 2498 * on the worklist other than syncer vnodes. 2499 * Return to the SHUTTING_DOWN state if any 2500 * new work appears. 2501 */ 2502 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2503 last_work_seen = syncer_delayno; 2504 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2505 syncer_state = SYNCER_SHUTTING_DOWN; 2506 while (!LIST_EMPTY(slp)) { 2507 error = sync_vnode(slp, &bo, td); 2508 if (error == 1) { 2509 LIST_REMOVE(bo, bo_synclist); 2510 LIST_INSERT_HEAD(next, bo, bo_synclist); 2511 continue; 2512 } 2513 2514 if (first_printf == 0) { 2515 /* 2516 * Drop the sync mutex, because some watchdog 2517 * drivers need to sleep while patting 2518 */ 2519 mtx_unlock(&sync_mtx); 2520 wdog_kern_pat(WD_LASTVAL); 2521 mtx_lock(&sync_mtx); 2522 } 2523 2524 } 2525 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2526 syncer_final_iter--; 2527 /* 2528 * The variable rushjob allows the kernel to speed up the 2529 * processing of the filesystem syncer process. A rushjob 2530 * value of N tells the filesystem syncer to process the next 2531 * N seconds worth of work on its queue ASAP. Currently rushjob 2532 * is used by the soft update code to speed up the filesystem 2533 * syncer process when the incore state is getting so far 2534 * ahead of the disk that the kernel memory pool is being 2535 * threatened with exhaustion. 2536 */ 2537 if (rushjob > 0) { 2538 rushjob -= 1; 2539 continue; 2540 } 2541 /* 2542 * Just sleep for a short period of time between 2543 * iterations when shutting down to allow some I/O 2544 * to happen. 2545 * 2546 * If it has taken us less than a second to process the 2547 * current work, then wait. Otherwise start right over 2548 * again. We can still lose time if any single round 2549 * takes more than two seconds, but it does not really 2550 * matter as we are just trying to generally pace the 2551 * filesystem activity. 2552 */ 2553 if (syncer_state != SYNCER_RUNNING || 2554 time_uptime == starttime) { 2555 thread_lock(td); 2556 sched_prio(td, PPAUSE); 2557 thread_unlock(td); 2558 } 2559 if (syncer_state != SYNCER_RUNNING) 2560 cv_timedwait(&sync_wakeup, &sync_mtx, 2561 hz / SYNCER_SHUTDOWN_SPEEDUP); 2562 else if (time_uptime == starttime) 2563 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2564 } 2565 } 2566 2567 /* 2568 * Request the syncer daemon to speed up its work. 2569 * We never push it to speed up more than half of its 2570 * normal turn time, otherwise it could take over the cpu. 2571 */ 2572 int 2573 speedup_syncer(void) 2574 { 2575 int ret = 0; 2576 2577 mtx_lock(&sync_mtx); 2578 if (rushjob < syncdelay / 2) { 2579 rushjob += 1; 2580 stat_rush_requests += 1; 2581 ret = 1; 2582 } 2583 mtx_unlock(&sync_mtx); 2584 cv_broadcast(&sync_wakeup); 2585 return (ret); 2586 } 2587 2588 /* 2589 * Tell the syncer to speed up its work and run though its work 2590 * list several times, then tell it to shut down. 2591 */ 2592 static void 2593 syncer_shutdown(void *arg, int howto) 2594 { 2595 2596 if (howto & RB_NOSYNC) 2597 return; 2598 mtx_lock(&sync_mtx); 2599 syncer_state = SYNCER_SHUTTING_DOWN; 2600 rushjob = 0; 2601 mtx_unlock(&sync_mtx); 2602 cv_broadcast(&sync_wakeup); 2603 kproc_shutdown(arg, howto); 2604 } 2605 2606 void 2607 syncer_suspend(void) 2608 { 2609 2610 syncer_shutdown(updateproc, 0); 2611 } 2612 2613 void 2614 syncer_resume(void) 2615 { 2616 2617 mtx_lock(&sync_mtx); 2618 first_printf = 1; 2619 syncer_state = SYNCER_RUNNING; 2620 mtx_unlock(&sync_mtx); 2621 cv_broadcast(&sync_wakeup); 2622 kproc_resume(updateproc); 2623 } 2624 2625 /* 2626 * Reassign a buffer from one vnode to another. 2627 * Used to assign file specific control information 2628 * (indirect blocks) to the vnode to which they belong. 2629 */ 2630 void 2631 reassignbuf(struct buf *bp) 2632 { 2633 struct vnode *vp; 2634 struct bufobj *bo; 2635 int delay; 2636 #ifdef INVARIANTS 2637 struct bufv *bv; 2638 #endif 2639 2640 vp = bp->b_vp; 2641 bo = bp->b_bufobj; 2642 ++reassignbufcalls; 2643 2644 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2645 bp, bp->b_vp, bp->b_flags); 2646 /* 2647 * B_PAGING flagged buffers cannot be reassigned because their vp 2648 * is not fully linked in. 2649 */ 2650 if (bp->b_flags & B_PAGING) 2651 panic("cannot reassign paging buffer"); 2652 2653 /* 2654 * Delete from old vnode list, if on one. 2655 */ 2656 BO_LOCK(bo); 2657 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2658 buf_vlist_remove(bp); 2659 else 2660 panic("reassignbuf: Buffer %p not on queue.", bp); 2661 /* 2662 * If dirty, put on list of dirty buffers; otherwise insert onto list 2663 * of clean buffers. 2664 */ 2665 if (bp->b_flags & B_DELWRI) { 2666 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2667 switch (vp->v_type) { 2668 case VDIR: 2669 delay = dirdelay; 2670 break; 2671 case VCHR: 2672 delay = metadelay; 2673 break; 2674 default: 2675 delay = filedelay; 2676 } 2677 vn_syncer_add_to_worklist(bo, delay); 2678 } 2679 buf_vlist_add(bp, bo, BX_VNDIRTY); 2680 } else { 2681 buf_vlist_add(bp, bo, BX_VNCLEAN); 2682 2683 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2684 mtx_lock(&sync_mtx); 2685 LIST_REMOVE(bo, bo_synclist); 2686 syncer_worklist_len--; 2687 mtx_unlock(&sync_mtx); 2688 bo->bo_flag &= ~BO_ONWORKLST; 2689 } 2690 } 2691 #ifdef INVARIANTS 2692 bv = &bo->bo_clean; 2693 bp = TAILQ_FIRST(&bv->bv_hd); 2694 KASSERT(bp == NULL || bp->b_bufobj == bo, 2695 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2696 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2697 KASSERT(bp == NULL || bp->b_bufobj == bo, 2698 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2699 bv = &bo->bo_dirty; 2700 bp = TAILQ_FIRST(&bv->bv_hd); 2701 KASSERT(bp == NULL || bp->b_bufobj == bo, 2702 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2703 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2704 KASSERT(bp == NULL || bp->b_bufobj == bo, 2705 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2706 #endif 2707 BO_UNLOCK(bo); 2708 } 2709 2710 static void 2711 v_init_counters(struct vnode *vp) 2712 { 2713 2714 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2715 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2716 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2717 2718 refcount_init(&vp->v_holdcnt, 1); 2719 refcount_init(&vp->v_usecount, 1); 2720 } 2721 2722 /* 2723 * Increment si_usecount of the associated device, if any. 2724 */ 2725 static void 2726 v_incr_devcount(struct vnode *vp) 2727 { 2728 2729 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2730 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2731 dev_lock(); 2732 vp->v_rdev->si_usecount++; 2733 dev_unlock(); 2734 } 2735 } 2736 2737 /* 2738 * Decrement si_usecount of the associated device, if any. 2739 */ 2740 static void 2741 v_decr_devcount(struct vnode *vp) 2742 { 2743 2744 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2745 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2746 dev_lock(); 2747 vp->v_rdev->si_usecount--; 2748 dev_unlock(); 2749 } 2750 } 2751 2752 /* 2753 * Grab a particular vnode from the free list, increment its 2754 * reference count and lock it. VIRF_DOOMED is set if the vnode 2755 * is being destroyed. Only callers who specify LK_RETRY will 2756 * see doomed vnodes. If inactive processing was delayed in 2757 * vput try to do it here. 2758 * 2759 * Both holdcnt and usecount can be manipulated using atomics without holding 2760 * any locks except in these cases which require the vnode interlock: 2761 * holdcnt: 1->0 and 0->1 2762 * usecount: 0->1 2763 * 2764 * usecount is permitted to transition 1->0 without the interlock because 2765 * vnode is kept live by holdcnt. 2766 */ 2767 static enum vgetstate __always_inline 2768 _vget_prep(struct vnode *vp, bool interlock) 2769 { 2770 enum vgetstate vs; 2771 2772 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2773 vs = VGET_USECOUNT; 2774 } else { 2775 if (interlock) 2776 vholdl(vp); 2777 else 2778 vhold(vp); 2779 vs = VGET_HOLDCNT; 2780 } 2781 return (vs); 2782 } 2783 2784 enum vgetstate 2785 vget_prep(struct vnode *vp) 2786 { 2787 2788 return (_vget_prep(vp, false)); 2789 } 2790 2791 int 2792 vget(struct vnode *vp, int flags, struct thread *td) 2793 { 2794 enum vgetstate vs; 2795 2796 MPASS(td == curthread); 2797 2798 vs = _vget_prep(vp, (flags & LK_INTERLOCK) != 0); 2799 return (vget_finish(vp, flags, vs)); 2800 } 2801 2802 int 2803 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 2804 { 2805 int error, oweinact; 2806 2807 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 2808 ("%s: invalid lock operation", __func__)); 2809 2810 if ((flags & LK_INTERLOCK) != 0) 2811 ASSERT_VI_LOCKED(vp, __func__); 2812 else 2813 ASSERT_VI_UNLOCKED(vp, __func__); 2814 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 2815 if (vs == VGET_USECOUNT) { 2816 VNASSERT(vp->v_usecount > 0, vp, 2817 ("%s: vnode without usecount when VGET_USECOUNT was passed", 2818 __func__)); 2819 } 2820 2821 if ((error = vn_lock(vp, flags)) != 0) { 2822 if (vs == VGET_USECOUNT) 2823 vrele(vp); 2824 else 2825 vdrop(vp); 2826 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2827 vp); 2828 return (error); 2829 } 2830 2831 if (vs == VGET_USECOUNT) { 2832 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2833 ("%s: vnode with usecount and VI_OWEINACT set", __func__)); 2834 return (0); 2835 } 2836 2837 /* 2838 * We hold the vnode. If the usecount is 0 it will be utilized to keep 2839 * the vnode around. Otherwise someone else lended their hold count and 2840 * we have to drop ours. 2841 */ 2842 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2843 #ifdef INVARIANTS 2844 int old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 2845 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 2846 #else 2847 refcount_release(&vp->v_holdcnt); 2848 #endif 2849 VNODE_REFCOUNT_FENCE_ACQ(); 2850 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2851 ("%s: vnode with usecount and VI_OWEINACT set", __func__)); 2852 return (0); 2853 } 2854 2855 /* 2856 * We don't guarantee that any particular close will 2857 * trigger inactive processing so just make a best effort 2858 * here at preventing a reference to a removed file. If 2859 * we don't succeed no harm is done. 2860 * 2861 * Upgrade our holdcnt to a usecount. 2862 */ 2863 VI_LOCK(vp); 2864 /* 2865 * See the previous section. By the time we get here we may find 2866 * ourselves in the same spot. 2867 */ 2868 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2869 #ifdef INVARIANTS 2870 int old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 2871 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 2872 #else 2873 refcount_release(&vp->v_holdcnt); 2874 #endif 2875 VNODE_REFCOUNT_FENCE_ACQ(); 2876 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2877 ("%s: vnode with usecount and VI_OWEINACT set", 2878 __func__)); 2879 VI_UNLOCK(vp); 2880 return (0); 2881 } 2882 if ((vp->v_iflag & VI_OWEINACT) == 0) { 2883 oweinact = 0; 2884 } else { 2885 oweinact = 1; 2886 vp->v_iflag &= ~VI_OWEINACT; 2887 VNODE_REFCOUNT_FENCE_REL(); 2888 } 2889 v_incr_devcount(vp); 2890 refcount_acquire(&vp->v_usecount); 2891 if (oweinact && VOP_ISLOCKED(vp) == LK_EXCLUSIVE && 2892 (flags & LK_NOWAIT) == 0) 2893 vinactive(vp); 2894 VI_UNLOCK(vp); 2895 return (0); 2896 } 2897 2898 /* 2899 * Increase the reference (use) and hold count of a vnode. 2900 * This will also remove the vnode from the free list if it is presently free. 2901 */ 2902 void 2903 vref(struct vnode *vp) 2904 { 2905 2906 ASSERT_VI_UNLOCKED(vp, __func__); 2907 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2908 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2909 VNODE_REFCOUNT_FENCE_ACQ(); 2910 VNASSERT(vp->v_holdcnt > 0, vp, 2911 ("%s: active vnode not held", __func__)); 2912 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2913 ("%s: vnode with usecount and VI_OWEINACT set", __func__)); 2914 return; 2915 } 2916 VI_LOCK(vp); 2917 vrefl(vp); 2918 VI_UNLOCK(vp); 2919 } 2920 2921 void 2922 vrefl(struct vnode *vp) 2923 { 2924 2925 ASSERT_VI_LOCKED(vp, __func__); 2926 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2927 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2928 VNODE_REFCOUNT_FENCE_ACQ(); 2929 VNASSERT(vp->v_holdcnt > 0, vp, 2930 ("%s: active vnode not held", __func__)); 2931 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2932 ("%s: vnode with usecount and VI_OWEINACT set", __func__)); 2933 return; 2934 } 2935 vholdl(vp); 2936 if ((vp->v_iflag & VI_OWEINACT) != 0) { 2937 vp->v_iflag &= ~VI_OWEINACT; 2938 VNODE_REFCOUNT_FENCE_REL(); 2939 } 2940 v_incr_devcount(vp); 2941 refcount_acquire(&vp->v_usecount); 2942 } 2943 2944 void 2945 vrefact(struct vnode *vp) 2946 { 2947 2948 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2949 #ifdef INVARIANTS 2950 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 2951 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 2952 #else 2953 refcount_acquire(&vp->v_usecount); 2954 #endif 2955 } 2956 2957 /* 2958 * Return reference count of a vnode. 2959 * 2960 * The results of this call are only guaranteed when some mechanism is used to 2961 * stop other processes from gaining references to the vnode. This may be the 2962 * case if the caller holds the only reference. This is also useful when stale 2963 * data is acceptable as race conditions may be accounted for by some other 2964 * means. 2965 */ 2966 int 2967 vrefcnt(struct vnode *vp) 2968 { 2969 2970 return (vp->v_usecount); 2971 } 2972 2973 void 2974 vlazy(struct vnode *vp) 2975 { 2976 struct mount *mp; 2977 2978 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 2979 2980 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 2981 return; 2982 mp = vp->v_mount; 2983 mtx_lock(&mp->mnt_listmtx); 2984 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 2985 vp->v_mflag |= VMP_LAZYLIST; 2986 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 2987 mp->mnt_lazyvnodelistsize++; 2988 } 2989 mtx_unlock(&mp->mnt_listmtx); 2990 } 2991 2992 static void 2993 vdefer_inactive(struct vnode *vp) 2994 { 2995 2996 ASSERT_VI_LOCKED(vp, __func__); 2997 VNASSERT(vp->v_iflag & VI_OWEINACT, vp, 2998 ("%s: vnode without VI_OWEINACT", __func__)); 2999 if (VN_IS_DOOMED(vp)) { 3000 vdropl(vp); 3001 return; 3002 } 3003 if (vp->v_iflag & VI_DEFINACT) { 3004 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3005 vdropl(vp); 3006 return; 3007 } 3008 vlazy(vp); 3009 vp->v_iflag |= VI_DEFINACT; 3010 VI_UNLOCK(vp); 3011 counter_u64_add(deferred_inact, 1); 3012 } 3013 3014 static void 3015 vdefer_inactive_cond(struct vnode *vp) 3016 { 3017 3018 VI_LOCK(vp); 3019 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 3020 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3021 vdropl(vp); 3022 return; 3023 } 3024 vdefer_inactive(vp); 3025 } 3026 3027 enum vputx_op { VPUTX_VRELE, VPUTX_VPUT, VPUTX_VUNREF }; 3028 3029 /* 3030 * Decrement the use and hold counts for a vnode. 3031 * 3032 * See an explanation near vget() as to why atomic operation is safe. 3033 */ 3034 static void 3035 vputx(struct vnode *vp, enum vputx_op func) 3036 { 3037 int error; 3038 3039 KASSERT(vp != NULL, ("vputx: null vp")); 3040 if (func == VPUTX_VUNREF) 3041 ASSERT_VOP_LOCKED(vp, "vunref"); 3042 ASSERT_VI_UNLOCKED(vp, __func__); 3043 VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp, 3044 ("%s: wrong ref counts", __func__)); 3045 3046 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3047 3048 /* 3049 * We want to hold the vnode until the inactive finishes to 3050 * prevent vgone() races. We drop the use count here and the 3051 * hold count below when we're done. 3052 * 3053 * If we release the last usecount we take ownership of the hold 3054 * count which provides liveness of the vnode, in which case we 3055 * have to vdrop. 3056 */ 3057 if (!refcount_release(&vp->v_usecount)) 3058 return; 3059 VI_LOCK(vp); 3060 v_decr_devcount(vp); 3061 /* 3062 * By the time we got here someone else might have transitioned 3063 * the count back to > 0. 3064 */ 3065 if (vp->v_usecount > 0) { 3066 vdropl(vp); 3067 return; 3068 } 3069 if (vp->v_iflag & VI_DOINGINACT) { 3070 vdropl(vp); 3071 return; 3072 } 3073 3074 /* 3075 * Check if the fs wants to perform inactive processing. Note we 3076 * may be only holding the interlock, in which case it is possible 3077 * someone else called vgone on the vnode and ->v_data is now NULL. 3078 * Since vgone performs inactive on its own there is nothing to do 3079 * here but to drop our hold count. 3080 */ 3081 if (__predict_false(VN_IS_DOOMED(vp)) || 3082 VOP_NEED_INACTIVE(vp) == 0) { 3083 vdropl(vp); 3084 return; 3085 } 3086 3087 /* 3088 * We must call VOP_INACTIVE with the node locked. Mark 3089 * as VI_DOINGINACT to avoid recursion. 3090 */ 3091 vp->v_iflag |= VI_OWEINACT; 3092 switch (func) { 3093 case VPUTX_VRELE: 3094 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3095 VI_LOCK(vp); 3096 break; 3097 case VPUTX_VPUT: 3098 error = VOP_LOCK(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT); 3099 VI_LOCK(vp); 3100 break; 3101 case VPUTX_VUNREF: 3102 error = 0; 3103 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3104 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3105 VI_LOCK(vp); 3106 } 3107 break; 3108 } 3109 VNASSERT(vp->v_usecount == 0 || (vp->v_iflag & VI_OWEINACT) == 0, vp, 3110 ("vnode with usecount and VI_OWEINACT set")); 3111 if (error == 0) { 3112 if (vp->v_iflag & VI_OWEINACT) 3113 vinactive(vp); 3114 if (func != VPUTX_VUNREF) 3115 VOP_UNLOCK(vp); 3116 vdropl(vp); 3117 } else if (vp->v_iflag & VI_OWEINACT) { 3118 vdefer_inactive(vp); 3119 } else { 3120 vdropl(vp); 3121 } 3122 } 3123 3124 /* 3125 * Vnode put/release. 3126 * If count drops to zero, call inactive routine and return to freelist. 3127 */ 3128 void 3129 vrele(struct vnode *vp) 3130 { 3131 3132 vputx(vp, VPUTX_VRELE); 3133 } 3134 3135 /* 3136 * Release an already locked vnode. This give the same effects as 3137 * unlock+vrele(), but takes less time and avoids releasing and 3138 * re-aquiring the lock (as vrele() acquires the lock internally.) 3139 * 3140 * It is an invariant that all VOP_* calls operate on a held vnode. 3141 * We may be only having an implicit hold stemming from our usecount, 3142 * which we are about to release. If we unlock the vnode afterwards we 3143 * open a time window where someone else dropped the last usecount and 3144 * proceeded to free the vnode before our unlock finished. For this 3145 * reason we unlock the vnode early. This is a little bit wasteful as 3146 * it may be the vnode is exclusively locked and inactive processing is 3147 * needed, in which case we are adding work. 3148 */ 3149 void 3150 vput(struct vnode *vp) 3151 { 3152 3153 VOP_UNLOCK(vp); 3154 vputx(vp, VPUTX_VPUT); 3155 } 3156 3157 /* 3158 * Release an exclusively locked vnode. Do not unlock the vnode lock. 3159 */ 3160 void 3161 vunref(struct vnode *vp) 3162 { 3163 3164 vputx(vp, VPUTX_VUNREF); 3165 } 3166 3167 /* 3168 * Increase the hold count and activate if this is the first reference. 3169 */ 3170 static void 3171 vhold_activate(struct vnode *vp) 3172 { 3173 3174 ASSERT_VI_LOCKED(vp, __func__); 3175 VNASSERT(vp->v_holdcnt == 0, vp, 3176 ("%s: wrong hold count", __func__)); 3177 VNASSERT(vp->v_op != NULL, vp, 3178 ("%s: vnode already reclaimed.", __func__)); 3179 atomic_subtract_long(&freevnodes, 1); 3180 refcount_acquire(&vp->v_holdcnt); 3181 } 3182 3183 void 3184 vhold(struct vnode *vp) 3185 { 3186 3187 ASSERT_VI_UNLOCKED(vp, __func__); 3188 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3189 if (refcount_acquire_if_not_zero(&vp->v_holdcnt)) 3190 return; 3191 VI_LOCK(vp); 3192 vholdl(vp); 3193 VI_UNLOCK(vp); 3194 } 3195 3196 void 3197 vholdl(struct vnode *vp) 3198 { 3199 3200 ASSERT_VI_LOCKED(vp, __func__); 3201 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3202 if (vp->v_holdcnt > 0) { 3203 refcount_acquire(&vp->v_holdcnt); 3204 return; 3205 } 3206 vhold_activate(vp); 3207 } 3208 3209 void 3210 vholdnz(struct vnode *vp) 3211 { 3212 3213 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3214 #ifdef INVARIANTS 3215 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3216 VNASSERT(old > 0, vp, ("%s: wrong hold count %d", __func__, old)); 3217 #else 3218 atomic_add_int(&vp->v_holdcnt, 1); 3219 #endif 3220 } 3221 3222 static void __noinline 3223 vdbatch_process(struct vdbatch *vd) 3224 { 3225 struct vnode *vp; 3226 int i; 3227 3228 mtx_assert(&vd->lock, MA_OWNED); 3229 MPASS(vd->index == VDBATCH_SIZE); 3230 3231 mtx_lock(&vnode_list_mtx); 3232 for (i = 0; i < VDBATCH_SIZE; i++) { 3233 vp = vd->tab[i]; 3234 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3235 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3236 MPASS(vp->v_dbatchcpu != NOCPU); 3237 vp->v_dbatchcpu = NOCPU; 3238 } 3239 bzero(vd->tab, sizeof(vd->tab)); 3240 vd->index = 0; 3241 mtx_unlock(&vnode_list_mtx); 3242 } 3243 3244 static void 3245 vdbatch_enqueue(struct vnode *vp) 3246 { 3247 struct vdbatch *vd; 3248 3249 ASSERT_VI_LOCKED(vp, __func__); 3250 VNASSERT(!VN_IS_DOOMED(vp), vp, 3251 ("%s: deferring requeue of a doomed vnode", __func__)); 3252 3253 if (vp->v_dbatchcpu != NOCPU) { 3254 VI_UNLOCK(vp); 3255 return; 3256 } 3257 3258 /* 3259 * A hack: pin us to the current CPU so that we know what to put in 3260 * ->v_dbatchcpu. 3261 */ 3262 sched_pin(); 3263 vd = DPCPU_PTR(vd); 3264 mtx_lock(&vd->lock); 3265 MPASS(vd->index < VDBATCH_SIZE); 3266 MPASS(vd->tab[vd->index] == NULL); 3267 vp->v_dbatchcpu = curcpu; 3268 vd->tab[vd->index] = vp; 3269 vd->index++; 3270 VI_UNLOCK(vp); 3271 if (vd->index == VDBATCH_SIZE) 3272 vdbatch_process(vd); 3273 mtx_unlock(&vd->lock); 3274 sched_unpin(); 3275 } 3276 3277 /* 3278 * This routine must only be called for vnodes which are about to be 3279 * deallocated. Supporting dequeue for arbitrary vndoes would require 3280 * validating that the locked batch matches. 3281 */ 3282 static void 3283 vdbatch_dequeue(struct vnode *vp) 3284 { 3285 struct vdbatch *vd; 3286 int i; 3287 short cpu; 3288 3289 VNASSERT(vp->v_type == VBAD || vp->v_type == VNON, vp, 3290 ("%s: called for a used vnode\n", __func__)); 3291 3292 cpu = vp->v_dbatchcpu; 3293 if (cpu == NOCPU) 3294 return; 3295 3296 vd = DPCPU_ID_PTR(cpu, vd); 3297 mtx_lock(&vd->lock); 3298 for (i = 0; i < vd->index; i++) { 3299 if (vd->tab[i] != vp) 3300 continue; 3301 vp->v_dbatchcpu = NOCPU; 3302 vd->index--; 3303 vd->tab[i] = vd->tab[vd->index]; 3304 vd->tab[vd->index] = NULL; 3305 break; 3306 } 3307 mtx_unlock(&vd->lock); 3308 /* 3309 * Either we dequeued the vnode above or the target CPU beat us to it. 3310 */ 3311 MPASS(vp->v_dbatchcpu == NOCPU); 3312 } 3313 3314 /* 3315 * Drop the hold count of the vnode. If this is the last reference to 3316 * the vnode we place it on the free list unless it has been vgone'd 3317 * (marked VIRF_DOOMED) in which case we will free it. 3318 * 3319 * Because the vnode vm object keeps a hold reference on the vnode if 3320 * there is at least one resident non-cached page, the vnode cannot 3321 * leave the active list without the page cleanup done. 3322 */ 3323 static void 3324 vdrop_deactivate(struct vnode *vp) 3325 { 3326 struct mount *mp; 3327 3328 ASSERT_VI_LOCKED(vp, __func__); 3329 /* 3330 * Mark a vnode as free: remove it from its active list 3331 * and put it up for recycling on the freelist. 3332 */ 3333 VNASSERT(!VN_IS_DOOMED(vp), vp, 3334 ("vdrop: returning doomed vnode")); 3335 VNASSERT(vp->v_op != NULL, vp, 3336 ("vdrop: vnode already reclaimed.")); 3337 VNASSERT(vp->v_holdcnt == 0, vp, 3338 ("vdrop: freeing when we shouldn't")); 3339 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 3340 ("vnode with VI_OWEINACT set")); 3341 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, 3342 ("vnode with VI_DEFINACT set")); 3343 if (vp->v_mflag & VMP_LAZYLIST) { 3344 mp = vp->v_mount; 3345 mtx_lock(&mp->mnt_listmtx); 3346 vp->v_mflag &= ~VMP_LAZYLIST; 3347 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3348 mp->mnt_lazyvnodelistsize--; 3349 mtx_unlock(&mp->mnt_listmtx); 3350 } 3351 atomic_add_long(&freevnodes, 1); 3352 vdbatch_enqueue(vp); 3353 } 3354 3355 void 3356 vdrop(struct vnode *vp) 3357 { 3358 3359 ASSERT_VI_UNLOCKED(vp, __func__); 3360 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3361 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3362 return; 3363 VI_LOCK(vp); 3364 vdropl(vp); 3365 } 3366 3367 void 3368 vdropl(struct vnode *vp) 3369 { 3370 3371 ASSERT_VI_LOCKED(vp, __func__); 3372 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3373 if (!refcount_release(&vp->v_holdcnt)) { 3374 VI_UNLOCK(vp); 3375 return; 3376 } 3377 if (VN_IS_DOOMED(vp)) { 3378 freevnode(vp); 3379 return; 3380 } 3381 vdrop_deactivate(vp); 3382 } 3383 3384 /* 3385 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3386 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3387 * OWEINACT tracks whether a vnode missed a call to inactive due to a 3388 * failed lock upgrade. 3389 */ 3390 void 3391 vinactive(struct vnode *vp) 3392 { 3393 struct vm_object *obj; 3394 3395 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3396 ASSERT_VI_LOCKED(vp, "vinactive"); 3397 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3398 ("vinactive: recursed on VI_DOINGINACT")); 3399 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3400 vp->v_iflag |= VI_DOINGINACT; 3401 vp->v_iflag &= ~VI_OWEINACT; 3402 VI_UNLOCK(vp); 3403 /* 3404 * Before moving off the active list, we must be sure that any 3405 * modified pages are converted into the vnode's dirty 3406 * buffers, since these will no longer be checked once the 3407 * vnode is on the inactive list. 3408 * 3409 * The write-out of the dirty pages is asynchronous. At the 3410 * point that VOP_INACTIVE() is called, there could still be 3411 * pending I/O and dirty pages in the object. 3412 */ 3413 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3414 vm_object_mightbedirty(obj)) { 3415 VM_OBJECT_WLOCK(obj); 3416 vm_object_page_clean(obj, 0, 0, 0); 3417 VM_OBJECT_WUNLOCK(obj); 3418 } 3419 VOP_INACTIVE(vp, curthread); 3420 VI_LOCK(vp); 3421 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3422 ("vinactive: lost VI_DOINGINACT")); 3423 vp->v_iflag &= ~VI_DOINGINACT; 3424 } 3425 3426 /* 3427 * Remove any vnodes in the vnode table belonging to mount point mp. 3428 * 3429 * If FORCECLOSE is not specified, there should not be any active ones, 3430 * return error if any are found (nb: this is a user error, not a 3431 * system error). If FORCECLOSE is specified, detach any active vnodes 3432 * that are found. 3433 * 3434 * If WRITECLOSE is set, only flush out regular file vnodes open for 3435 * writing. 3436 * 3437 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3438 * 3439 * `rootrefs' specifies the base reference count for the root vnode 3440 * of this filesystem. The root vnode is considered busy if its 3441 * v_usecount exceeds this value. On a successful return, vflush(, td) 3442 * will call vrele() on the root vnode exactly rootrefs times. 3443 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3444 * be zero. 3445 */ 3446 #ifdef DIAGNOSTIC 3447 static int busyprt = 0; /* print out busy vnodes */ 3448 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3449 #endif 3450 3451 int 3452 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3453 { 3454 struct vnode *vp, *mvp, *rootvp = NULL; 3455 struct vattr vattr; 3456 int busy = 0, error; 3457 3458 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3459 rootrefs, flags); 3460 if (rootrefs > 0) { 3461 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3462 ("vflush: bad args")); 3463 /* 3464 * Get the filesystem root vnode. We can vput() it 3465 * immediately, since with rootrefs > 0, it won't go away. 3466 */ 3467 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3468 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3469 __func__, error); 3470 return (error); 3471 } 3472 vput(rootvp); 3473 } 3474 loop: 3475 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3476 vholdl(vp); 3477 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3478 if (error) { 3479 vdrop(vp); 3480 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3481 goto loop; 3482 } 3483 /* 3484 * Skip over a vnodes marked VV_SYSTEM. 3485 */ 3486 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3487 VOP_UNLOCK(vp); 3488 vdrop(vp); 3489 continue; 3490 } 3491 /* 3492 * If WRITECLOSE is set, flush out unlinked but still open 3493 * files (even if open only for reading) and regular file 3494 * vnodes open for writing. 3495 */ 3496 if (flags & WRITECLOSE) { 3497 if (vp->v_object != NULL) { 3498 VM_OBJECT_WLOCK(vp->v_object); 3499 vm_object_page_clean(vp->v_object, 0, 0, 0); 3500 VM_OBJECT_WUNLOCK(vp->v_object); 3501 } 3502 error = VOP_FSYNC(vp, MNT_WAIT, td); 3503 if (error != 0) { 3504 VOP_UNLOCK(vp); 3505 vdrop(vp); 3506 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3507 return (error); 3508 } 3509 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3510 VI_LOCK(vp); 3511 3512 if ((vp->v_type == VNON || 3513 (error == 0 && vattr.va_nlink > 0)) && 3514 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3515 VOP_UNLOCK(vp); 3516 vdropl(vp); 3517 continue; 3518 } 3519 } else 3520 VI_LOCK(vp); 3521 /* 3522 * With v_usecount == 0, all we need to do is clear out the 3523 * vnode data structures and we are done. 3524 * 3525 * If FORCECLOSE is set, forcibly close the vnode. 3526 */ 3527 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3528 vgonel(vp); 3529 } else { 3530 busy++; 3531 #ifdef DIAGNOSTIC 3532 if (busyprt) 3533 vn_printf(vp, "vflush: busy vnode "); 3534 #endif 3535 } 3536 VOP_UNLOCK(vp); 3537 vdropl(vp); 3538 } 3539 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3540 /* 3541 * If just the root vnode is busy, and if its refcount 3542 * is equal to `rootrefs', then go ahead and kill it. 3543 */ 3544 VI_LOCK(rootvp); 3545 KASSERT(busy > 0, ("vflush: not busy")); 3546 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3547 ("vflush: usecount %d < rootrefs %d", 3548 rootvp->v_usecount, rootrefs)); 3549 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3550 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3551 vgone(rootvp); 3552 VOP_UNLOCK(rootvp); 3553 busy = 0; 3554 } else 3555 VI_UNLOCK(rootvp); 3556 } 3557 if (busy) { 3558 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3559 busy); 3560 return (EBUSY); 3561 } 3562 for (; rootrefs > 0; rootrefs--) 3563 vrele(rootvp); 3564 return (0); 3565 } 3566 3567 /* 3568 * Recycle an unused vnode to the front of the free list. 3569 */ 3570 int 3571 vrecycle(struct vnode *vp) 3572 { 3573 int recycled; 3574 3575 VI_LOCK(vp); 3576 recycled = vrecyclel(vp); 3577 VI_UNLOCK(vp); 3578 return (recycled); 3579 } 3580 3581 /* 3582 * vrecycle, with the vp interlock held. 3583 */ 3584 int 3585 vrecyclel(struct vnode *vp) 3586 { 3587 int recycled; 3588 3589 ASSERT_VOP_ELOCKED(vp, __func__); 3590 ASSERT_VI_LOCKED(vp, __func__); 3591 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3592 recycled = 0; 3593 if (vp->v_usecount == 0) { 3594 recycled = 1; 3595 vgonel(vp); 3596 } 3597 return (recycled); 3598 } 3599 3600 /* 3601 * Eliminate all activity associated with a vnode 3602 * in preparation for reuse. 3603 */ 3604 void 3605 vgone(struct vnode *vp) 3606 { 3607 VI_LOCK(vp); 3608 vgonel(vp); 3609 VI_UNLOCK(vp); 3610 } 3611 3612 static void 3613 notify_lowervp_vfs_dummy(struct mount *mp __unused, 3614 struct vnode *lowervp __unused) 3615 { 3616 } 3617 3618 /* 3619 * Notify upper mounts about reclaimed or unlinked vnode. 3620 */ 3621 void 3622 vfs_notify_upper(struct vnode *vp, int event) 3623 { 3624 static struct vfsops vgonel_vfsops = { 3625 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 3626 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 3627 }; 3628 struct mount *mp, *ump, *mmp; 3629 3630 mp = vp->v_mount; 3631 if (mp == NULL) 3632 return; 3633 if (TAILQ_EMPTY(&mp->mnt_uppers)) 3634 return; 3635 3636 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 3637 mmp->mnt_op = &vgonel_vfsops; 3638 mmp->mnt_kern_flag |= MNTK_MARKER; 3639 MNT_ILOCK(mp); 3640 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 3641 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 3642 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 3643 ump = TAILQ_NEXT(ump, mnt_upper_link); 3644 continue; 3645 } 3646 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 3647 MNT_IUNLOCK(mp); 3648 switch (event) { 3649 case VFS_NOTIFY_UPPER_RECLAIM: 3650 VFS_RECLAIM_LOWERVP(ump, vp); 3651 break; 3652 case VFS_NOTIFY_UPPER_UNLINK: 3653 VFS_UNLINK_LOWERVP(ump, vp); 3654 break; 3655 default: 3656 KASSERT(0, ("invalid event %d", event)); 3657 break; 3658 } 3659 MNT_ILOCK(mp); 3660 ump = TAILQ_NEXT(mmp, mnt_upper_link); 3661 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 3662 } 3663 free(mmp, M_TEMP); 3664 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 3665 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 3666 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 3667 wakeup(&mp->mnt_uppers); 3668 } 3669 MNT_IUNLOCK(mp); 3670 } 3671 3672 /* 3673 * vgone, with the vp interlock held. 3674 */ 3675 static void 3676 vgonel(struct vnode *vp) 3677 { 3678 struct thread *td; 3679 struct mount *mp; 3680 vm_object_t object; 3681 bool active, oweinact; 3682 3683 ASSERT_VOP_ELOCKED(vp, "vgonel"); 3684 ASSERT_VI_LOCKED(vp, "vgonel"); 3685 VNASSERT(vp->v_holdcnt, vp, 3686 ("vgonel: vp %p has no reference.", vp)); 3687 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3688 td = curthread; 3689 3690 /* 3691 * Don't vgonel if we're already doomed. 3692 */ 3693 if (vp->v_irflag & VIRF_DOOMED) 3694 return; 3695 vp->v_irflag |= VIRF_DOOMED; 3696 3697 /* 3698 * Check to see if the vnode is in use. If so, we have to call 3699 * VOP_CLOSE() and VOP_INACTIVE(). 3700 */ 3701 active = vp->v_usecount > 0; 3702 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 3703 /* 3704 * If we need to do inactive VI_OWEINACT will be set. 3705 */ 3706 if (vp->v_iflag & VI_DEFINACT) { 3707 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3708 vp->v_iflag &= ~VI_DEFINACT; 3709 vdropl(vp); 3710 } else { 3711 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 3712 VI_UNLOCK(vp); 3713 } 3714 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 3715 3716 /* 3717 * If purging an active vnode, it must be closed and 3718 * deactivated before being reclaimed. 3719 */ 3720 if (active) 3721 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 3722 if (oweinact || active) { 3723 VI_LOCK(vp); 3724 if ((vp->v_iflag & VI_DOINGINACT) == 0) 3725 vinactive(vp); 3726 VI_UNLOCK(vp); 3727 } 3728 if (vp->v_type == VSOCK) 3729 vfs_unp_reclaim(vp); 3730 3731 /* 3732 * Clean out any buffers associated with the vnode. 3733 * If the flush fails, just toss the buffers. 3734 */ 3735 mp = NULL; 3736 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 3737 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 3738 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 3739 while (vinvalbuf(vp, 0, 0, 0) != 0) 3740 ; 3741 } 3742 3743 BO_LOCK(&vp->v_bufobj); 3744 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 3745 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 3746 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 3747 vp->v_bufobj.bo_clean.bv_cnt == 0, 3748 ("vp %p bufobj not invalidated", vp)); 3749 3750 /* 3751 * For VMIO bufobj, BO_DEAD is set later, or in 3752 * vm_object_terminate() after the object's page queue is 3753 * flushed. 3754 */ 3755 object = vp->v_bufobj.bo_object; 3756 if (object == NULL) 3757 vp->v_bufobj.bo_flag |= BO_DEAD; 3758 BO_UNLOCK(&vp->v_bufobj); 3759 3760 /* 3761 * Handle the VM part. Tmpfs handles v_object on its own (the 3762 * OBJT_VNODE check). Nullfs or other bypassing filesystems 3763 * should not touch the object borrowed from the lower vnode 3764 * (the handle check). 3765 */ 3766 if (object != NULL && object->type == OBJT_VNODE && 3767 object->handle == vp) 3768 vnode_destroy_vobject(vp); 3769 3770 /* 3771 * Reclaim the vnode. 3772 */ 3773 if (VOP_RECLAIM(vp, td)) 3774 panic("vgone: cannot reclaim"); 3775 if (mp != NULL) 3776 vn_finished_secondary_write(mp); 3777 VNASSERT(vp->v_object == NULL, vp, 3778 ("vop_reclaim left v_object vp=%p", vp)); 3779 /* 3780 * Clear the advisory locks and wake up waiting threads. 3781 */ 3782 (void)VOP_ADVLOCKPURGE(vp); 3783 vp->v_lockf = NULL; 3784 /* 3785 * Delete from old mount point vnode list. 3786 */ 3787 delmntque(vp); 3788 cache_purge(vp); 3789 /* 3790 * Done with purge, reset to the standard lock and invalidate 3791 * the vnode. 3792 */ 3793 VI_LOCK(vp); 3794 vp->v_vnlock = &vp->v_lock; 3795 vp->v_op = &dead_vnodeops; 3796 vp->v_type = VBAD; 3797 } 3798 3799 /* 3800 * Calculate the total number of references to a special device. 3801 */ 3802 int 3803 vcount(struct vnode *vp) 3804 { 3805 int count; 3806 3807 dev_lock(); 3808 count = vp->v_rdev->si_usecount; 3809 dev_unlock(); 3810 return (count); 3811 } 3812 3813 /* 3814 * Print out a description of a vnode. 3815 */ 3816 static char *typename[] = 3817 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 3818 "VMARKER"}; 3819 3820 void 3821 vn_printf(struct vnode *vp, const char *fmt, ...) 3822 { 3823 va_list ap; 3824 char buf[256], buf2[16]; 3825 u_long flags; 3826 3827 va_start(ap, fmt); 3828 vprintf(fmt, ap); 3829 va_end(ap); 3830 printf("%p: ", (void *)vp); 3831 printf("type %s\n", typename[vp->v_type]); 3832 printf(" usecount %d, writecount %d, refcount %d", 3833 vp->v_usecount, vp->v_writecount, vp->v_holdcnt); 3834 switch (vp->v_type) { 3835 case VDIR: 3836 printf(" mountedhere %p\n", vp->v_mountedhere); 3837 break; 3838 case VCHR: 3839 printf(" rdev %p\n", vp->v_rdev); 3840 break; 3841 case VSOCK: 3842 printf(" socket %p\n", vp->v_unpcb); 3843 break; 3844 case VFIFO: 3845 printf(" fifoinfo %p\n", vp->v_fifoinfo); 3846 break; 3847 default: 3848 printf("\n"); 3849 break; 3850 } 3851 buf[0] = '\0'; 3852 buf[1] = '\0'; 3853 if (vp->v_irflag & VIRF_DOOMED) 3854 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 3855 flags = vp->v_irflag & ~(VIRF_DOOMED); 3856 if (flags != 0) { 3857 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 3858 strlcat(buf, buf2, sizeof(buf)); 3859 } 3860 if (vp->v_vflag & VV_ROOT) 3861 strlcat(buf, "|VV_ROOT", sizeof(buf)); 3862 if (vp->v_vflag & VV_ISTTY) 3863 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 3864 if (vp->v_vflag & VV_NOSYNC) 3865 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 3866 if (vp->v_vflag & VV_ETERNALDEV) 3867 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 3868 if (vp->v_vflag & VV_CACHEDLABEL) 3869 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 3870 if (vp->v_vflag & VV_VMSIZEVNLOCK) 3871 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 3872 if (vp->v_vflag & VV_COPYONWRITE) 3873 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 3874 if (vp->v_vflag & VV_SYSTEM) 3875 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 3876 if (vp->v_vflag & VV_PROCDEP) 3877 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 3878 if (vp->v_vflag & VV_NOKNOTE) 3879 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 3880 if (vp->v_vflag & VV_DELETED) 3881 strlcat(buf, "|VV_DELETED", sizeof(buf)); 3882 if (vp->v_vflag & VV_MD) 3883 strlcat(buf, "|VV_MD", sizeof(buf)); 3884 if (vp->v_vflag & VV_FORCEINSMQ) 3885 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 3886 if (vp->v_vflag & VV_READLINK) 3887 strlcat(buf, "|VV_READLINK", sizeof(buf)); 3888 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 3889 VV_CACHEDLABEL | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 3890 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); 3891 if (flags != 0) { 3892 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 3893 strlcat(buf, buf2, sizeof(buf)); 3894 } 3895 if (vp->v_iflag & VI_TEXT_REF) 3896 strlcat(buf, "|VI_TEXT_REF", sizeof(buf)); 3897 if (vp->v_iflag & VI_MOUNT) 3898 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 3899 if (vp->v_iflag & VI_DOINGINACT) 3900 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 3901 if (vp->v_iflag & VI_OWEINACT) 3902 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 3903 if (vp->v_iflag & VI_DEFINACT) 3904 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 3905 flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_DOINGINACT | 3906 VI_OWEINACT | VI_DEFINACT); 3907 if (flags != 0) { 3908 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 3909 strlcat(buf, buf2, sizeof(buf)); 3910 } 3911 if (vp->v_mflag & VMP_LAZYLIST) 3912 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 3913 flags = vp->v_mflag & ~(VMP_LAZYLIST); 3914 if (flags != 0) { 3915 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 3916 strlcat(buf, buf2, sizeof(buf)); 3917 } 3918 printf(" flags (%s)\n", buf + 1); 3919 if (mtx_owned(VI_MTX(vp))) 3920 printf(" VI_LOCKed"); 3921 if (vp->v_object != NULL) 3922 printf(" v_object %p ref %d pages %d " 3923 "cleanbuf %d dirtybuf %d\n", 3924 vp->v_object, vp->v_object->ref_count, 3925 vp->v_object->resident_page_count, 3926 vp->v_bufobj.bo_clean.bv_cnt, 3927 vp->v_bufobj.bo_dirty.bv_cnt); 3928 printf(" "); 3929 lockmgr_printinfo(vp->v_vnlock); 3930 if (vp->v_data != NULL) 3931 VOP_PRINT(vp); 3932 } 3933 3934 #ifdef DDB 3935 /* 3936 * List all of the locked vnodes in the system. 3937 * Called when debugging the kernel. 3938 */ 3939 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 3940 { 3941 struct mount *mp; 3942 struct vnode *vp; 3943 3944 /* 3945 * Note: because this is DDB, we can't obey the locking semantics 3946 * for these structures, which means we could catch an inconsistent 3947 * state and dereference a nasty pointer. Not much to be done 3948 * about that. 3949 */ 3950 db_printf("Locked vnodes\n"); 3951 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3952 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3953 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 3954 vn_printf(vp, "vnode "); 3955 } 3956 } 3957 } 3958 3959 /* 3960 * Show details about the given vnode. 3961 */ 3962 DB_SHOW_COMMAND(vnode, db_show_vnode) 3963 { 3964 struct vnode *vp; 3965 3966 if (!have_addr) 3967 return; 3968 vp = (struct vnode *)addr; 3969 vn_printf(vp, "vnode "); 3970 } 3971 3972 /* 3973 * Show details about the given mount point. 3974 */ 3975 DB_SHOW_COMMAND(mount, db_show_mount) 3976 { 3977 struct mount *mp; 3978 struct vfsopt *opt; 3979 struct statfs *sp; 3980 struct vnode *vp; 3981 char buf[512]; 3982 uint64_t mflags; 3983 u_int flags; 3984 3985 if (!have_addr) { 3986 /* No address given, print short info about all mount points. */ 3987 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3988 db_printf("%p %s on %s (%s)\n", mp, 3989 mp->mnt_stat.f_mntfromname, 3990 mp->mnt_stat.f_mntonname, 3991 mp->mnt_stat.f_fstypename); 3992 if (db_pager_quit) 3993 break; 3994 } 3995 db_printf("\nMore info: show mount <addr>\n"); 3996 return; 3997 } 3998 3999 mp = (struct mount *)addr; 4000 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4001 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4002 4003 buf[0] = '\0'; 4004 mflags = mp->mnt_flag; 4005 #define MNT_FLAG(flag) do { \ 4006 if (mflags & (flag)) { \ 4007 if (buf[0] != '\0') \ 4008 strlcat(buf, ", ", sizeof(buf)); \ 4009 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4010 mflags &= ~(flag); \ 4011 } \ 4012 } while (0) 4013 MNT_FLAG(MNT_RDONLY); 4014 MNT_FLAG(MNT_SYNCHRONOUS); 4015 MNT_FLAG(MNT_NOEXEC); 4016 MNT_FLAG(MNT_NOSUID); 4017 MNT_FLAG(MNT_NFS4ACLS); 4018 MNT_FLAG(MNT_UNION); 4019 MNT_FLAG(MNT_ASYNC); 4020 MNT_FLAG(MNT_SUIDDIR); 4021 MNT_FLAG(MNT_SOFTDEP); 4022 MNT_FLAG(MNT_NOSYMFOLLOW); 4023 MNT_FLAG(MNT_GJOURNAL); 4024 MNT_FLAG(MNT_MULTILABEL); 4025 MNT_FLAG(MNT_ACLS); 4026 MNT_FLAG(MNT_NOATIME); 4027 MNT_FLAG(MNT_NOCLUSTERR); 4028 MNT_FLAG(MNT_NOCLUSTERW); 4029 MNT_FLAG(MNT_SUJ); 4030 MNT_FLAG(MNT_EXRDONLY); 4031 MNT_FLAG(MNT_EXPORTED); 4032 MNT_FLAG(MNT_DEFEXPORTED); 4033 MNT_FLAG(MNT_EXPORTANON); 4034 MNT_FLAG(MNT_EXKERB); 4035 MNT_FLAG(MNT_EXPUBLIC); 4036 MNT_FLAG(MNT_LOCAL); 4037 MNT_FLAG(MNT_QUOTA); 4038 MNT_FLAG(MNT_ROOTFS); 4039 MNT_FLAG(MNT_USER); 4040 MNT_FLAG(MNT_IGNORE); 4041 MNT_FLAG(MNT_UPDATE); 4042 MNT_FLAG(MNT_DELEXPORT); 4043 MNT_FLAG(MNT_RELOAD); 4044 MNT_FLAG(MNT_FORCE); 4045 MNT_FLAG(MNT_SNAPSHOT); 4046 MNT_FLAG(MNT_BYFSID); 4047 #undef MNT_FLAG 4048 if (mflags != 0) { 4049 if (buf[0] != '\0') 4050 strlcat(buf, ", ", sizeof(buf)); 4051 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4052 "0x%016jx", mflags); 4053 } 4054 db_printf(" mnt_flag = %s\n", buf); 4055 4056 buf[0] = '\0'; 4057 flags = mp->mnt_kern_flag; 4058 #define MNT_KERN_FLAG(flag) do { \ 4059 if (flags & (flag)) { \ 4060 if (buf[0] != '\0') \ 4061 strlcat(buf, ", ", sizeof(buf)); \ 4062 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4063 flags &= ~(flag); \ 4064 } \ 4065 } while (0) 4066 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4067 MNT_KERN_FLAG(MNTK_ASYNC); 4068 MNT_KERN_FLAG(MNTK_SOFTDEP); 4069 MNT_KERN_FLAG(MNTK_DRAINING); 4070 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4071 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4072 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4073 MNT_KERN_FLAG(MNTK_NO_IOPF); 4074 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 4075 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 4076 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 4077 MNT_KERN_FLAG(MNTK_MARKER); 4078 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4079 MNT_KERN_FLAG(MNTK_NOASYNC); 4080 MNT_KERN_FLAG(MNTK_UNMOUNT); 4081 MNT_KERN_FLAG(MNTK_MWAIT); 4082 MNT_KERN_FLAG(MNTK_SUSPEND); 4083 MNT_KERN_FLAG(MNTK_SUSPEND2); 4084 MNT_KERN_FLAG(MNTK_SUSPENDED); 4085 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4086 MNT_KERN_FLAG(MNTK_NOKNOTE); 4087 #undef MNT_KERN_FLAG 4088 if (flags != 0) { 4089 if (buf[0] != '\0') 4090 strlcat(buf, ", ", sizeof(buf)); 4091 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4092 "0x%08x", flags); 4093 } 4094 db_printf(" mnt_kern_flag = %s\n", buf); 4095 4096 db_printf(" mnt_opt = "); 4097 opt = TAILQ_FIRST(mp->mnt_opt); 4098 if (opt != NULL) { 4099 db_printf("%s", opt->name); 4100 opt = TAILQ_NEXT(opt, link); 4101 while (opt != NULL) { 4102 db_printf(", %s", opt->name); 4103 opt = TAILQ_NEXT(opt, link); 4104 } 4105 } 4106 db_printf("\n"); 4107 4108 sp = &mp->mnt_stat; 4109 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4110 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4111 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4112 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4113 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4114 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4115 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4116 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4117 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4118 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4119 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4120 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4121 4122 db_printf(" mnt_cred = { uid=%u ruid=%u", 4123 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4124 if (jailed(mp->mnt_cred)) 4125 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4126 db_printf(" }\n"); 4127 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4128 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4129 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4130 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4131 db_printf(" mnt_lazyvnodelistsize = %d\n", 4132 mp->mnt_lazyvnodelistsize); 4133 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4134 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4135 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 4136 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4137 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4138 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4139 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4140 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4141 db_printf(" mnt_secondary_accwrites = %d\n", 4142 mp->mnt_secondary_accwrites); 4143 db_printf(" mnt_gjprovider = %s\n", 4144 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4145 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4146 4147 db_printf("\n\nList of active vnodes\n"); 4148 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4149 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4150 vn_printf(vp, "vnode "); 4151 if (db_pager_quit) 4152 break; 4153 } 4154 } 4155 db_printf("\n\nList of inactive vnodes\n"); 4156 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4157 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4158 vn_printf(vp, "vnode "); 4159 if (db_pager_quit) 4160 break; 4161 } 4162 } 4163 } 4164 #endif /* DDB */ 4165 4166 /* 4167 * Fill in a struct xvfsconf based on a struct vfsconf. 4168 */ 4169 static int 4170 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4171 { 4172 struct xvfsconf xvfsp; 4173 4174 bzero(&xvfsp, sizeof(xvfsp)); 4175 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4176 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4177 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4178 xvfsp.vfc_flags = vfsp->vfc_flags; 4179 /* 4180 * These are unused in userland, we keep them 4181 * to not break binary compatibility. 4182 */ 4183 xvfsp.vfc_vfsops = NULL; 4184 xvfsp.vfc_next = NULL; 4185 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4186 } 4187 4188 #ifdef COMPAT_FREEBSD32 4189 struct xvfsconf32 { 4190 uint32_t vfc_vfsops; 4191 char vfc_name[MFSNAMELEN]; 4192 int32_t vfc_typenum; 4193 int32_t vfc_refcount; 4194 int32_t vfc_flags; 4195 uint32_t vfc_next; 4196 }; 4197 4198 static int 4199 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4200 { 4201 struct xvfsconf32 xvfsp; 4202 4203 bzero(&xvfsp, sizeof(xvfsp)); 4204 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4205 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4206 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4207 xvfsp.vfc_flags = vfsp->vfc_flags; 4208 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4209 } 4210 #endif 4211 4212 /* 4213 * Top level filesystem related information gathering. 4214 */ 4215 static int 4216 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4217 { 4218 struct vfsconf *vfsp; 4219 int error; 4220 4221 error = 0; 4222 vfsconf_slock(); 4223 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4224 #ifdef COMPAT_FREEBSD32 4225 if (req->flags & SCTL_MASK32) 4226 error = vfsconf2x32(req, vfsp); 4227 else 4228 #endif 4229 error = vfsconf2x(req, vfsp); 4230 if (error) 4231 break; 4232 } 4233 vfsconf_sunlock(); 4234 return (error); 4235 } 4236 4237 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4238 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4239 "S,xvfsconf", "List of all configured filesystems"); 4240 4241 #ifndef BURN_BRIDGES 4242 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4243 4244 static int 4245 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4246 { 4247 int *name = (int *)arg1 - 1; /* XXX */ 4248 u_int namelen = arg2 + 1; /* XXX */ 4249 struct vfsconf *vfsp; 4250 4251 log(LOG_WARNING, "userland calling deprecated sysctl, " 4252 "please rebuild world\n"); 4253 4254 #if 1 || defined(COMPAT_PRELITE2) 4255 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4256 if (namelen == 1) 4257 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4258 #endif 4259 4260 switch (name[1]) { 4261 case VFS_MAXTYPENUM: 4262 if (namelen != 2) 4263 return (ENOTDIR); 4264 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4265 case VFS_CONF: 4266 if (namelen != 3) 4267 return (ENOTDIR); /* overloaded */ 4268 vfsconf_slock(); 4269 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4270 if (vfsp->vfc_typenum == name[2]) 4271 break; 4272 } 4273 vfsconf_sunlock(); 4274 if (vfsp == NULL) 4275 return (EOPNOTSUPP); 4276 #ifdef COMPAT_FREEBSD32 4277 if (req->flags & SCTL_MASK32) 4278 return (vfsconf2x32(req, vfsp)); 4279 else 4280 #endif 4281 return (vfsconf2x(req, vfsp)); 4282 } 4283 return (EOPNOTSUPP); 4284 } 4285 4286 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4287 CTLFLAG_MPSAFE, vfs_sysctl, 4288 "Generic filesystem"); 4289 4290 #if 1 || defined(COMPAT_PRELITE2) 4291 4292 static int 4293 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4294 { 4295 int error; 4296 struct vfsconf *vfsp; 4297 struct ovfsconf ovfs; 4298 4299 vfsconf_slock(); 4300 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4301 bzero(&ovfs, sizeof(ovfs)); 4302 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4303 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4304 ovfs.vfc_index = vfsp->vfc_typenum; 4305 ovfs.vfc_refcount = vfsp->vfc_refcount; 4306 ovfs.vfc_flags = vfsp->vfc_flags; 4307 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4308 if (error != 0) { 4309 vfsconf_sunlock(); 4310 return (error); 4311 } 4312 } 4313 vfsconf_sunlock(); 4314 return (0); 4315 } 4316 4317 #endif /* 1 || COMPAT_PRELITE2 */ 4318 #endif /* !BURN_BRIDGES */ 4319 4320 #define KINFO_VNODESLOP 10 4321 #ifdef notyet 4322 /* 4323 * Dump vnode list (via sysctl). 4324 */ 4325 /* ARGSUSED */ 4326 static int 4327 sysctl_vnode(SYSCTL_HANDLER_ARGS) 4328 { 4329 struct xvnode *xvn; 4330 struct mount *mp; 4331 struct vnode *vp; 4332 int error, len, n; 4333 4334 /* 4335 * Stale numvnodes access is not fatal here. 4336 */ 4337 req->lock = 0; 4338 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 4339 if (!req->oldptr) 4340 /* Make an estimate */ 4341 return (SYSCTL_OUT(req, 0, len)); 4342 4343 error = sysctl_wire_old_buffer(req, 0); 4344 if (error != 0) 4345 return (error); 4346 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 4347 n = 0; 4348 mtx_lock(&mountlist_mtx); 4349 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4350 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 4351 continue; 4352 MNT_ILOCK(mp); 4353 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4354 if (n == len) 4355 break; 4356 vref(vp); 4357 xvn[n].xv_size = sizeof *xvn; 4358 xvn[n].xv_vnode = vp; 4359 xvn[n].xv_id = 0; /* XXX compat */ 4360 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 4361 XV_COPY(usecount); 4362 XV_COPY(writecount); 4363 XV_COPY(holdcnt); 4364 XV_COPY(mount); 4365 XV_COPY(numoutput); 4366 XV_COPY(type); 4367 #undef XV_COPY 4368 xvn[n].xv_flag = vp->v_vflag; 4369 4370 switch (vp->v_type) { 4371 case VREG: 4372 case VDIR: 4373 case VLNK: 4374 break; 4375 case VBLK: 4376 case VCHR: 4377 if (vp->v_rdev == NULL) { 4378 vrele(vp); 4379 continue; 4380 } 4381 xvn[n].xv_dev = dev2udev(vp->v_rdev); 4382 break; 4383 case VSOCK: 4384 xvn[n].xv_socket = vp->v_socket; 4385 break; 4386 case VFIFO: 4387 xvn[n].xv_fifo = vp->v_fifoinfo; 4388 break; 4389 case VNON: 4390 case VBAD: 4391 default: 4392 /* shouldn't happen? */ 4393 vrele(vp); 4394 continue; 4395 } 4396 vrele(vp); 4397 ++n; 4398 } 4399 MNT_IUNLOCK(mp); 4400 mtx_lock(&mountlist_mtx); 4401 vfs_unbusy(mp); 4402 if (n == len) 4403 break; 4404 } 4405 mtx_unlock(&mountlist_mtx); 4406 4407 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 4408 free(xvn, M_TEMP); 4409 return (error); 4410 } 4411 4412 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 4413 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 4414 ""); 4415 #endif 4416 4417 static void 4418 unmount_or_warn(struct mount *mp) 4419 { 4420 int error; 4421 4422 error = dounmount(mp, MNT_FORCE, curthread); 4423 if (error != 0) { 4424 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4425 if (error == EBUSY) 4426 printf("BUSY)\n"); 4427 else 4428 printf("%d)\n", error); 4429 } 4430 } 4431 4432 /* 4433 * Unmount all filesystems. The list is traversed in reverse order 4434 * of mounting to avoid dependencies. 4435 */ 4436 void 4437 vfs_unmountall(void) 4438 { 4439 struct mount *mp, *tmp; 4440 4441 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4442 4443 /* 4444 * Since this only runs when rebooting, it is not interlocked. 4445 */ 4446 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4447 vfs_ref(mp); 4448 4449 /* 4450 * Forcibly unmounting "/dev" before "/" would prevent clean 4451 * unmount of the latter. 4452 */ 4453 if (mp == rootdevmp) 4454 continue; 4455 4456 unmount_or_warn(mp); 4457 } 4458 4459 if (rootdevmp != NULL) 4460 unmount_or_warn(rootdevmp); 4461 } 4462 4463 static void 4464 vfs_deferred_inactive(struct vnode *vp, int lkflags) 4465 { 4466 4467 ASSERT_VI_LOCKED(vp, __func__); 4468 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, ("VI_DEFINACT still set")); 4469 if ((vp->v_iflag & VI_OWEINACT) == 0) { 4470 vdropl(vp); 4471 return; 4472 } 4473 if (vn_lock(vp, lkflags) == 0) { 4474 VI_LOCK(vp); 4475 if ((vp->v_iflag & (VI_OWEINACT | VI_DOINGINACT)) == VI_OWEINACT) 4476 vinactive(vp); 4477 VOP_UNLOCK(vp); 4478 vdropl(vp); 4479 return; 4480 } 4481 vdefer_inactive_cond(vp); 4482 } 4483 4484 static int 4485 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 4486 { 4487 4488 return (vp->v_iflag & VI_DEFINACT); 4489 } 4490 4491 static void __noinline 4492 vfs_periodic_inactive(struct mount *mp, int flags) 4493 { 4494 struct vnode *vp, *mvp; 4495 int lkflags; 4496 4497 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4498 if (flags != MNT_WAIT) 4499 lkflags |= LK_NOWAIT; 4500 4501 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 4502 if ((vp->v_iflag & VI_DEFINACT) == 0) { 4503 VI_UNLOCK(vp); 4504 continue; 4505 } 4506 vp->v_iflag &= ~VI_DEFINACT; 4507 vfs_deferred_inactive(vp, lkflags); 4508 } 4509 } 4510 4511 static inline bool 4512 vfs_want_msync(struct vnode *vp) 4513 { 4514 struct vm_object *obj; 4515 4516 /* 4517 * This test may be performed without any locks held. 4518 * We rely on vm_object's type stability. 4519 */ 4520 if (vp->v_vflag & VV_NOSYNC) 4521 return (false); 4522 obj = vp->v_object; 4523 return (obj != NULL && vm_object_mightbedirty(obj)); 4524 } 4525 4526 static int 4527 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 4528 { 4529 4530 if (vp->v_vflag & VV_NOSYNC) 4531 return (false); 4532 if (vp->v_iflag & VI_DEFINACT) 4533 return (true); 4534 return (vfs_want_msync(vp)); 4535 } 4536 4537 static void __noinline 4538 vfs_periodic_msync_inactive(struct mount *mp, int flags) 4539 { 4540 struct vnode *vp, *mvp; 4541 struct vm_object *obj; 4542 struct thread *td; 4543 int lkflags, objflags; 4544 bool seen_defer; 4545 4546 td = curthread; 4547 4548 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4549 if (flags != MNT_WAIT) { 4550 lkflags |= LK_NOWAIT; 4551 objflags = OBJPC_NOSYNC; 4552 } else { 4553 objflags = OBJPC_SYNC; 4554 } 4555 4556 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 4557 seen_defer = false; 4558 if (vp->v_iflag & VI_DEFINACT) { 4559 vp->v_iflag &= ~VI_DEFINACT; 4560 seen_defer = true; 4561 } 4562 if (!vfs_want_msync(vp)) { 4563 if (seen_defer) 4564 vfs_deferred_inactive(vp, lkflags); 4565 else 4566 VI_UNLOCK(vp); 4567 continue; 4568 } 4569 if (vget(vp, lkflags, td) == 0) { 4570 obj = vp->v_object; 4571 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 4572 VM_OBJECT_WLOCK(obj); 4573 vm_object_page_clean(obj, 0, 0, objflags); 4574 VM_OBJECT_WUNLOCK(obj); 4575 } 4576 vput(vp); 4577 if (seen_defer) 4578 vdrop(vp); 4579 } else { 4580 if (seen_defer) 4581 vdefer_inactive_cond(vp); 4582 } 4583 } 4584 } 4585 4586 void 4587 vfs_periodic(struct mount *mp, int flags) 4588 { 4589 4590 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4591 4592 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 4593 vfs_periodic_inactive(mp, flags); 4594 else 4595 vfs_periodic_msync_inactive(mp, flags); 4596 } 4597 4598 static void 4599 destroy_vpollinfo_free(struct vpollinfo *vi) 4600 { 4601 4602 knlist_destroy(&vi->vpi_selinfo.si_note); 4603 mtx_destroy(&vi->vpi_lock); 4604 uma_zfree(vnodepoll_zone, vi); 4605 } 4606 4607 static void 4608 destroy_vpollinfo(struct vpollinfo *vi) 4609 { 4610 4611 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4612 seldrain(&vi->vpi_selinfo); 4613 destroy_vpollinfo_free(vi); 4614 } 4615 4616 /* 4617 * Initialize per-vnode helper structure to hold poll-related state. 4618 */ 4619 void 4620 v_addpollinfo(struct vnode *vp) 4621 { 4622 struct vpollinfo *vi; 4623 4624 if (vp->v_pollinfo != NULL) 4625 return; 4626 vi = uma_zalloc(vnodepoll_zone, M_WAITOK | M_ZERO); 4627 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4628 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4629 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 4630 VI_LOCK(vp); 4631 if (vp->v_pollinfo != NULL) { 4632 VI_UNLOCK(vp); 4633 destroy_vpollinfo_free(vi); 4634 return; 4635 } 4636 vp->v_pollinfo = vi; 4637 VI_UNLOCK(vp); 4638 } 4639 4640 /* 4641 * Record a process's interest in events which might happen to 4642 * a vnode. Because poll uses the historic select-style interface 4643 * internally, this routine serves as both the ``check for any 4644 * pending events'' and the ``record my interest in future events'' 4645 * functions. (These are done together, while the lock is held, 4646 * to avoid race conditions.) 4647 */ 4648 int 4649 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4650 { 4651 4652 v_addpollinfo(vp); 4653 mtx_lock(&vp->v_pollinfo->vpi_lock); 4654 if (vp->v_pollinfo->vpi_revents & events) { 4655 /* 4656 * This leaves events we are not interested 4657 * in available for the other process which 4658 * which presumably had requested them 4659 * (otherwise they would never have been 4660 * recorded). 4661 */ 4662 events &= vp->v_pollinfo->vpi_revents; 4663 vp->v_pollinfo->vpi_revents &= ~events; 4664 4665 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4666 return (events); 4667 } 4668 vp->v_pollinfo->vpi_events |= events; 4669 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 4670 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4671 return (0); 4672 } 4673 4674 /* 4675 * Routine to create and manage a filesystem syncer vnode. 4676 */ 4677 #define sync_close ((int (*)(struct vop_close_args *))nullop) 4678 static int sync_fsync(struct vop_fsync_args *); 4679 static int sync_inactive(struct vop_inactive_args *); 4680 static int sync_reclaim(struct vop_reclaim_args *); 4681 4682 static struct vop_vector sync_vnodeops = { 4683 .vop_bypass = VOP_EOPNOTSUPP, 4684 .vop_close = sync_close, /* close */ 4685 .vop_fsync = sync_fsync, /* fsync */ 4686 .vop_inactive = sync_inactive, /* inactive */ 4687 .vop_need_inactive = vop_stdneed_inactive, /* need_inactive */ 4688 .vop_reclaim = sync_reclaim, /* reclaim */ 4689 .vop_lock1 = vop_stdlock, /* lock */ 4690 .vop_unlock = vop_stdunlock, /* unlock */ 4691 .vop_islocked = vop_stdislocked, /* islocked */ 4692 }; 4693 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 4694 4695 /* 4696 * Create a new filesystem syncer vnode for the specified mount point. 4697 */ 4698 void 4699 vfs_allocate_syncvnode(struct mount *mp) 4700 { 4701 struct vnode *vp; 4702 struct bufobj *bo; 4703 static long start, incr, next; 4704 int error; 4705 4706 /* Allocate a new vnode */ 4707 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 4708 if (error != 0) 4709 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 4710 vp->v_type = VNON; 4711 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4712 vp->v_vflag |= VV_FORCEINSMQ; 4713 error = insmntque(vp, mp); 4714 if (error != 0) 4715 panic("vfs_allocate_syncvnode: insmntque() failed"); 4716 vp->v_vflag &= ~VV_FORCEINSMQ; 4717 VOP_UNLOCK(vp); 4718 /* 4719 * Place the vnode onto the syncer worklist. We attempt to 4720 * scatter them about on the list so that they will go off 4721 * at evenly distributed times even if all the filesystems 4722 * are mounted at once. 4723 */ 4724 next += incr; 4725 if (next == 0 || next > syncer_maxdelay) { 4726 start /= 2; 4727 incr /= 2; 4728 if (start == 0) { 4729 start = syncer_maxdelay / 2; 4730 incr = syncer_maxdelay; 4731 } 4732 next = start; 4733 } 4734 bo = &vp->v_bufobj; 4735 BO_LOCK(bo); 4736 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 4737 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 4738 mtx_lock(&sync_mtx); 4739 sync_vnode_count++; 4740 if (mp->mnt_syncer == NULL) { 4741 mp->mnt_syncer = vp; 4742 vp = NULL; 4743 } 4744 mtx_unlock(&sync_mtx); 4745 BO_UNLOCK(bo); 4746 if (vp != NULL) { 4747 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4748 vgone(vp); 4749 vput(vp); 4750 } 4751 } 4752 4753 void 4754 vfs_deallocate_syncvnode(struct mount *mp) 4755 { 4756 struct vnode *vp; 4757 4758 mtx_lock(&sync_mtx); 4759 vp = mp->mnt_syncer; 4760 if (vp != NULL) 4761 mp->mnt_syncer = NULL; 4762 mtx_unlock(&sync_mtx); 4763 if (vp != NULL) 4764 vrele(vp); 4765 } 4766 4767 /* 4768 * Do a lazy sync of the filesystem. 4769 */ 4770 static int 4771 sync_fsync(struct vop_fsync_args *ap) 4772 { 4773 struct vnode *syncvp = ap->a_vp; 4774 struct mount *mp = syncvp->v_mount; 4775 int error, save; 4776 struct bufobj *bo; 4777 4778 /* 4779 * We only need to do something if this is a lazy evaluation. 4780 */ 4781 if (ap->a_waitfor != MNT_LAZY) 4782 return (0); 4783 4784 /* 4785 * Move ourselves to the back of the sync list. 4786 */ 4787 bo = &syncvp->v_bufobj; 4788 BO_LOCK(bo); 4789 vn_syncer_add_to_worklist(bo, syncdelay); 4790 BO_UNLOCK(bo); 4791 4792 /* 4793 * Walk the list of vnodes pushing all that are dirty and 4794 * not already on the sync list. 4795 */ 4796 if (vfs_busy(mp, MBF_NOWAIT) != 0) 4797 return (0); 4798 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 4799 vfs_unbusy(mp); 4800 return (0); 4801 } 4802 save = curthread_pflags_set(TDP_SYNCIO); 4803 /* 4804 * The filesystem at hand may be idle with free vnodes stored in the 4805 * batch. Return them instead of letting them stay there indefinitely. 4806 */ 4807 vfs_periodic(mp, MNT_NOWAIT); 4808 error = VFS_SYNC(mp, MNT_LAZY); 4809 curthread_pflags_restore(save); 4810 vn_finished_write(mp); 4811 vfs_unbusy(mp); 4812 return (error); 4813 } 4814 4815 /* 4816 * The syncer vnode is no referenced. 4817 */ 4818 static int 4819 sync_inactive(struct vop_inactive_args *ap) 4820 { 4821 4822 vgone(ap->a_vp); 4823 return (0); 4824 } 4825 4826 /* 4827 * The syncer vnode is no longer needed and is being decommissioned. 4828 * 4829 * Modifications to the worklist must be protected by sync_mtx. 4830 */ 4831 static int 4832 sync_reclaim(struct vop_reclaim_args *ap) 4833 { 4834 struct vnode *vp = ap->a_vp; 4835 struct bufobj *bo; 4836 4837 bo = &vp->v_bufobj; 4838 BO_LOCK(bo); 4839 mtx_lock(&sync_mtx); 4840 if (vp->v_mount->mnt_syncer == vp) 4841 vp->v_mount->mnt_syncer = NULL; 4842 if (bo->bo_flag & BO_ONWORKLST) { 4843 LIST_REMOVE(bo, bo_synclist); 4844 syncer_worklist_len--; 4845 sync_vnode_count--; 4846 bo->bo_flag &= ~BO_ONWORKLST; 4847 } 4848 mtx_unlock(&sync_mtx); 4849 BO_UNLOCK(bo); 4850 4851 return (0); 4852 } 4853 4854 int 4855 vn_need_pageq_flush(struct vnode *vp) 4856 { 4857 struct vm_object *obj; 4858 int need; 4859 4860 MPASS(mtx_owned(VI_MTX(vp))); 4861 need = 0; 4862 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 4863 vm_object_mightbedirty(obj)) 4864 need = 1; 4865 return (need); 4866 } 4867 4868 /* 4869 * Check if vnode represents a disk device 4870 */ 4871 int 4872 vn_isdisk(struct vnode *vp, int *errp) 4873 { 4874 int error; 4875 4876 if (vp->v_type != VCHR) { 4877 error = ENOTBLK; 4878 goto out; 4879 } 4880 error = 0; 4881 dev_lock(); 4882 if (vp->v_rdev == NULL) 4883 error = ENXIO; 4884 else if (vp->v_rdev->si_devsw == NULL) 4885 error = ENXIO; 4886 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 4887 error = ENOTBLK; 4888 dev_unlock(); 4889 out: 4890 if (errp != NULL) 4891 *errp = error; 4892 return (error == 0); 4893 } 4894 4895 /* 4896 * Common filesystem object access control check routine. Accepts a 4897 * vnode's type, "mode", uid and gid, requested access mode, credentials, 4898 * and optional call-by-reference privused argument allowing vaccess() 4899 * to indicate to the caller whether privilege was used to satisfy the 4900 * request (obsoleted). Returns 0 on success, or an errno on failure. 4901 */ 4902 int 4903 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 4904 accmode_t accmode, struct ucred *cred, int *privused) 4905 { 4906 accmode_t dac_granted; 4907 accmode_t priv_granted; 4908 4909 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 4910 ("invalid bit in accmode")); 4911 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 4912 ("VAPPEND without VWRITE")); 4913 4914 /* 4915 * Look for a normal, non-privileged way to access the file/directory 4916 * as requested. If it exists, go with that. 4917 */ 4918 4919 if (privused != NULL) 4920 *privused = 0; 4921 4922 dac_granted = 0; 4923 4924 /* Check the owner. */ 4925 if (cred->cr_uid == file_uid) { 4926 dac_granted |= VADMIN; 4927 if (file_mode & S_IXUSR) 4928 dac_granted |= VEXEC; 4929 if (file_mode & S_IRUSR) 4930 dac_granted |= VREAD; 4931 if (file_mode & S_IWUSR) 4932 dac_granted |= (VWRITE | VAPPEND); 4933 4934 if ((accmode & dac_granted) == accmode) 4935 return (0); 4936 4937 goto privcheck; 4938 } 4939 4940 /* Otherwise, check the groups (first match) */ 4941 if (groupmember(file_gid, cred)) { 4942 if (file_mode & S_IXGRP) 4943 dac_granted |= VEXEC; 4944 if (file_mode & S_IRGRP) 4945 dac_granted |= VREAD; 4946 if (file_mode & S_IWGRP) 4947 dac_granted |= (VWRITE | VAPPEND); 4948 4949 if ((accmode & dac_granted) == accmode) 4950 return (0); 4951 4952 goto privcheck; 4953 } 4954 4955 /* Otherwise, check everyone else. */ 4956 if (file_mode & S_IXOTH) 4957 dac_granted |= VEXEC; 4958 if (file_mode & S_IROTH) 4959 dac_granted |= VREAD; 4960 if (file_mode & S_IWOTH) 4961 dac_granted |= (VWRITE | VAPPEND); 4962 if ((accmode & dac_granted) == accmode) 4963 return (0); 4964 4965 privcheck: 4966 /* 4967 * Build a privilege mask to determine if the set of privileges 4968 * satisfies the requirements when combined with the granted mask 4969 * from above. For each privilege, if the privilege is required, 4970 * bitwise or the request type onto the priv_granted mask. 4971 */ 4972 priv_granted = 0; 4973 4974 if (type == VDIR) { 4975 /* 4976 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 4977 * requests, instead of PRIV_VFS_EXEC. 4978 */ 4979 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4980 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 4981 priv_granted |= VEXEC; 4982 } else { 4983 /* 4984 * Ensure that at least one execute bit is on. Otherwise, 4985 * a privileged user will always succeed, and we don't want 4986 * this to happen unless the file really is executable. 4987 */ 4988 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4989 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 4990 !priv_check_cred(cred, PRIV_VFS_EXEC)) 4991 priv_granted |= VEXEC; 4992 } 4993 4994 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 4995 !priv_check_cred(cred, PRIV_VFS_READ)) 4996 priv_granted |= VREAD; 4997 4998 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 4999 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5000 priv_granted |= (VWRITE | VAPPEND); 5001 5002 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5003 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5004 priv_granted |= VADMIN; 5005 5006 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5007 /* XXX audit: privilege used */ 5008 if (privused != NULL) 5009 *privused = 1; 5010 return (0); 5011 } 5012 5013 return ((accmode & VADMIN) ? EPERM : EACCES); 5014 } 5015 5016 /* 5017 * Credential check based on process requesting service, and per-attribute 5018 * permissions. 5019 */ 5020 int 5021 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5022 struct thread *td, accmode_t accmode) 5023 { 5024 5025 /* 5026 * Kernel-invoked always succeeds. 5027 */ 5028 if (cred == NOCRED) 5029 return (0); 5030 5031 /* 5032 * Do not allow privileged processes in jail to directly manipulate 5033 * system attributes. 5034 */ 5035 switch (attrnamespace) { 5036 case EXTATTR_NAMESPACE_SYSTEM: 5037 /* Potentially should be: return (EPERM); */ 5038 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5039 case EXTATTR_NAMESPACE_USER: 5040 return (VOP_ACCESS(vp, accmode, cred, td)); 5041 default: 5042 return (EPERM); 5043 } 5044 } 5045 5046 #ifdef DEBUG_VFS_LOCKS 5047 /* 5048 * This only exists to suppress warnings from unlocked specfs accesses. It is 5049 * no longer ok to have an unlocked VFS. 5050 */ 5051 #define IGNORE_LOCK(vp) (KERNEL_PANICKED() || (vp) == NULL || \ 5052 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 5053 5054 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5055 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5056 "Drop into debugger on lock violation"); 5057 5058 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5059 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5060 0, "Check for interlock across VOPs"); 5061 5062 int vfs_badlock_print = 1; /* Print lock violations. */ 5063 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5064 0, "Print lock violations"); 5065 5066 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5067 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5068 0, "Print vnode details on lock violations"); 5069 5070 #ifdef KDB 5071 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5072 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5073 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5074 #endif 5075 5076 static void 5077 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5078 { 5079 5080 #ifdef KDB 5081 if (vfs_badlock_backtrace) 5082 kdb_backtrace(); 5083 #endif 5084 if (vfs_badlock_vnode) 5085 vn_printf(vp, "vnode "); 5086 if (vfs_badlock_print) 5087 printf("%s: %p %s\n", str, (void *)vp, msg); 5088 if (vfs_badlock_ddb) 5089 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5090 } 5091 5092 void 5093 assert_vi_locked(struct vnode *vp, const char *str) 5094 { 5095 5096 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5097 vfs_badlock("interlock is not locked but should be", str, vp); 5098 } 5099 5100 void 5101 assert_vi_unlocked(struct vnode *vp, const char *str) 5102 { 5103 5104 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5105 vfs_badlock("interlock is locked but should not be", str, vp); 5106 } 5107 5108 void 5109 assert_vop_locked(struct vnode *vp, const char *str) 5110 { 5111 int locked; 5112 5113 if (!IGNORE_LOCK(vp)) { 5114 locked = VOP_ISLOCKED(vp); 5115 if (locked == 0 || locked == LK_EXCLOTHER) 5116 vfs_badlock("is not locked but should be", str, vp); 5117 } 5118 } 5119 5120 void 5121 assert_vop_unlocked(struct vnode *vp, const char *str) 5122 { 5123 5124 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5125 vfs_badlock("is locked but should not be", str, vp); 5126 } 5127 5128 void 5129 assert_vop_elocked(struct vnode *vp, const char *str) 5130 { 5131 5132 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5133 vfs_badlock("is not exclusive locked but should be", str, vp); 5134 } 5135 #endif /* DEBUG_VFS_LOCKS */ 5136 5137 void 5138 vop_rename_fail(struct vop_rename_args *ap) 5139 { 5140 5141 if (ap->a_tvp != NULL) 5142 vput(ap->a_tvp); 5143 if (ap->a_tdvp == ap->a_tvp) 5144 vrele(ap->a_tdvp); 5145 else 5146 vput(ap->a_tdvp); 5147 vrele(ap->a_fdvp); 5148 vrele(ap->a_fvp); 5149 } 5150 5151 void 5152 vop_rename_pre(void *ap) 5153 { 5154 struct vop_rename_args *a = ap; 5155 5156 #ifdef DEBUG_VFS_LOCKS 5157 if (a->a_tvp) 5158 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5159 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5160 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5161 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5162 5163 /* Check the source (from). */ 5164 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5165 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5166 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5167 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5168 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5169 5170 /* Check the target. */ 5171 if (a->a_tvp) 5172 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5173 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5174 #endif 5175 if (a->a_tdvp != a->a_fdvp) 5176 vhold(a->a_fdvp); 5177 if (a->a_tvp != a->a_fvp) 5178 vhold(a->a_fvp); 5179 vhold(a->a_tdvp); 5180 if (a->a_tvp) 5181 vhold(a->a_tvp); 5182 } 5183 5184 #ifdef DEBUG_VFS_LOCKS 5185 void 5186 vop_strategy_pre(void *ap) 5187 { 5188 struct vop_strategy_args *a; 5189 struct buf *bp; 5190 5191 a = ap; 5192 bp = a->a_bp; 5193 5194 /* 5195 * Cluster ops lock their component buffers but not the IO container. 5196 */ 5197 if ((bp->b_flags & B_CLUSTER) != 0) 5198 return; 5199 5200 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5201 if (vfs_badlock_print) 5202 printf( 5203 "VOP_STRATEGY: bp is not locked but should be\n"); 5204 if (vfs_badlock_ddb) 5205 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5206 } 5207 } 5208 5209 void 5210 vop_lock_pre(void *ap) 5211 { 5212 struct vop_lock1_args *a = ap; 5213 5214 if ((a->a_flags & LK_INTERLOCK) == 0) 5215 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5216 else 5217 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5218 } 5219 5220 void 5221 vop_lock_post(void *ap, int rc) 5222 { 5223 struct vop_lock1_args *a = ap; 5224 5225 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5226 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5227 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5228 } 5229 5230 void 5231 vop_unlock_pre(void *ap) 5232 { 5233 struct vop_unlock_args *a = ap; 5234 5235 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 5236 } 5237 5238 void 5239 vop_unlock_post(void *ap, int rc) 5240 { 5241 return; 5242 } 5243 5244 void 5245 vop_need_inactive_pre(void *ap) 5246 { 5247 struct vop_need_inactive_args *a = ap; 5248 5249 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5250 } 5251 5252 void 5253 vop_need_inactive_post(void *ap, int rc) 5254 { 5255 struct vop_need_inactive_args *a = ap; 5256 5257 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5258 } 5259 #endif 5260 5261 void 5262 vop_create_post(void *ap, int rc) 5263 { 5264 struct vop_create_args *a = ap; 5265 5266 if (!rc) 5267 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5268 } 5269 5270 void 5271 vop_deleteextattr_post(void *ap, int rc) 5272 { 5273 struct vop_deleteextattr_args *a = ap; 5274 5275 if (!rc) 5276 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5277 } 5278 5279 void 5280 vop_link_post(void *ap, int rc) 5281 { 5282 struct vop_link_args *a = ap; 5283 5284 if (!rc) { 5285 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); 5286 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE); 5287 } 5288 } 5289 5290 void 5291 vop_mkdir_post(void *ap, int rc) 5292 { 5293 struct vop_mkdir_args *a = ap; 5294 5295 if (!rc) 5296 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 5297 } 5298 5299 void 5300 vop_mknod_post(void *ap, int rc) 5301 { 5302 struct vop_mknod_args *a = ap; 5303 5304 if (!rc) 5305 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5306 } 5307 5308 void 5309 vop_reclaim_post(void *ap, int rc) 5310 { 5311 struct vop_reclaim_args *a = ap; 5312 5313 if (!rc) 5314 VFS_KNOTE_LOCKED(a->a_vp, NOTE_REVOKE); 5315 } 5316 5317 void 5318 vop_remove_post(void *ap, int rc) 5319 { 5320 struct vop_remove_args *a = ap; 5321 5322 if (!rc) { 5323 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5324 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 5325 } 5326 } 5327 5328 void 5329 vop_rename_post(void *ap, int rc) 5330 { 5331 struct vop_rename_args *a = ap; 5332 long hint; 5333 5334 if (!rc) { 5335 hint = NOTE_WRITE; 5336 if (a->a_fdvp == a->a_tdvp) { 5337 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 5338 hint |= NOTE_LINK; 5339 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5340 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5341 } else { 5342 hint |= NOTE_EXTEND; 5343 if (a->a_fvp->v_type == VDIR) 5344 hint |= NOTE_LINK; 5345 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5346 5347 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 5348 a->a_tvp->v_type == VDIR) 5349 hint &= ~NOTE_LINK; 5350 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5351 } 5352 5353 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 5354 if (a->a_tvp) 5355 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 5356 } 5357 if (a->a_tdvp != a->a_fdvp) 5358 vdrop(a->a_fdvp); 5359 if (a->a_tvp != a->a_fvp) 5360 vdrop(a->a_fvp); 5361 vdrop(a->a_tdvp); 5362 if (a->a_tvp) 5363 vdrop(a->a_tvp); 5364 } 5365 5366 void 5367 vop_rmdir_post(void *ap, int rc) 5368 { 5369 struct vop_rmdir_args *a = ap; 5370 5371 if (!rc) { 5372 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 5373 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 5374 } 5375 } 5376 5377 void 5378 vop_setattr_post(void *ap, int rc) 5379 { 5380 struct vop_setattr_args *a = ap; 5381 5382 if (!rc) 5383 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5384 } 5385 5386 void 5387 vop_setextattr_post(void *ap, int rc) 5388 { 5389 struct vop_setextattr_args *a = ap; 5390 5391 if (!rc) 5392 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5393 } 5394 5395 void 5396 vop_symlink_post(void *ap, int rc) 5397 { 5398 struct vop_symlink_args *a = ap; 5399 5400 if (!rc) 5401 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5402 } 5403 5404 void 5405 vop_open_post(void *ap, int rc) 5406 { 5407 struct vop_open_args *a = ap; 5408 5409 if (!rc) 5410 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 5411 } 5412 5413 void 5414 vop_close_post(void *ap, int rc) 5415 { 5416 struct vop_close_args *a = ap; 5417 5418 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 5419 !VN_IS_DOOMED(a->a_vp))) { 5420 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 5421 NOTE_CLOSE_WRITE : NOTE_CLOSE); 5422 } 5423 } 5424 5425 void 5426 vop_read_post(void *ap, int rc) 5427 { 5428 struct vop_read_args *a = ap; 5429 5430 if (!rc) 5431 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5432 } 5433 5434 void 5435 vop_readdir_post(void *ap, int rc) 5436 { 5437 struct vop_readdir_args *a = ap; 5438 5439 if (!rc) 5440 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5441 } 5442 5443 static struct knlist fs_knlist; 5444 5445 static void 5446 vfs_event_init(void *arg) 5447 { 5448 knlist_init_mtx(&fs_knlist, NULL); 5449 } 5450 /* XXX - correct order? */ 5451 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 5452 5453 void 5454 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 5455 { 5456 5457 KNOTE_UNLOCKED(&fs_knlist, event); 5458 } 5459 5460 static int filt_fsattach(struct knote *kn); 5461 static void filt_fsdetach(struct knote *kn); 5462 static int filt_fsevent(struct knote *kn, long hint); 5463 5464 struct filterops fs_filtops = { 5465 .f_isfd = 0, 5466 .f_attach = filt_fsattach, 5467 .f_detach = filt_fsdetach, 5468 .f_event = filt_fsevent 5469 }; 5470 5471 static int 5472 filt_fsattach(struct knote *kn) 5473 { 5474 5475 kn->kn_flags |= EV_CLEAR; 5476 knlist_add(&fs_knlist, kn, 0); 5477 return (0); 5478 } 5479 5480 static void 5481 filt_fsdetach(struct knote *kn) 5482 { 5483 5484 knlist_remove(&fs_knlist, kn, 0); 5485 } 5486 5487 static int 5488 filt_fsevent(struct knote *kn, long hint) 5489 { 5490 5491 kn->kn_fflags |= hint; 5492 return (kn->kn_fflags != 0); 5493 } 5494 5495 static int 5496 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 5497 { 5498 struct vfsidctl vc; 5499 int error; 5500 struct mount *mp; 5501 5502 error = SYSCTL_IN(req, &vc, sizeof(vc)); 5503 if (error) 5504 return (error); 5505 if (vc.vc_vers != VFS_CTL_VERS1) 5506 return (EINVAL); 5507 mp = vfs_getvfs(&vc.vc_fsid); 5508 if (mp == NULL) 5509 return (ENOENT); 5510 /* ensure that a specific sysctl goes to the right filesystem. */ 5511 if (strcmp(vc.vc_fstypename, "*") != 0 && 5512 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 5513 vfs_rel(mp); 5514 return (EINVAL); 5515 } 5516 VCTLTOREQ(&vc, req); 5517 error = VFS_SYSCTL(mp, vc.vc_op, req); 5518 vfs_rel(mp); 5519 return (error); 5520 } 5521 5522 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 5523 NULL, 0, sysctl_vfs_ctl, "", 5524 "Sysctl by fsid"); 5525 5526 /* 5527 * Function to initialize a va_filerev field sensibly. 5528 * XXX: Wouldn't a random number make a lot more sense ?? 5529 */ 5530 u_quad_t 5531 init_va_filerev(void) 5532 { 5533 struct bintime bt; 5534 5535 getbinuptime(&bt); 5536 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 5537 } 5538 5539 static int filt_vfsread(struct knote *kn, long hint); 5540 static int filt_vfswrite(struct knote *kn, long hint); 5541 static int filt_vfsvnode(struct knote *kn, long hint); 5542 static void filt_vfsdetach(struct knote *kn); 5543 static struct filterops vfsread_filtops = { 5544 .f_isfd = 1, 5545 .f_detach = filt_vfsdetach, 5546 .f_event = filt_vfsread 5547 }; 5548 static struct filterops vfswrite_filtops = { 5549 .f_isfd = 1, 5550 .f_detach = filt_vfsdetach, 5551 .f_event = filt_vfswrite 5552 }; 5553 static struct filterops vfsvnode_filtops = { 5554 .f_isfd = 1, 5555 .f_detach = filt_vfsdetach, 5556 .f_event = filt_vfsvnode 5557 }; 5558 5559 static void 5560 vfs_knllock(void *arg) 5561 { 5562 struct vnode *vp = arg; 5563 5564 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5565 } 5566 5567 static void 5568 vfs_knlunlock(void *arg) 5569 { 5570 struct vnode *vp = arg; 5571 5572 VOP_UNLOCK(vp); 5573 } 5574 5575 static void 5576 vfs_knl_assert_locked(void *arg) 5577 { 5578 #ifdef DEBUG_VFS_LOCKS 5579 struct vnode *vp = arg; 5580 5581 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 5582 #endif 5583 } 5584 5585 static void 5586 vfs_knl_assert_unlocked(void *arg) 5587 { 5588 #ifdef DEBUG_VFS_LOCKS 5589 struct vnode *vp = arg; 5590 5591 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 5592 #endif 5593 } 5594 5595 int 5596 vfs_kqfilter(struct vop_kqfilter_args *ap) 5597 { 5598 struct vnode *vp = ap->a_vp; 5599 struct knote *kn = ap->a_kn; 5600 struct knlist *knl; 5601 5602 switch (kn->kn_filter) { 5603 case EVFILT_READ: 5604 kn->kn_fop = &vfsread_filtops; 5605 break; 5606 case EVFILT_WRITE: 5607 kn->kn_fop = &vfswrite_filtops; 5608 break; 5609 case EVFILT_VNODE: 5610 kn->kn_fop = &vfsvnode_filtops; 5611 break; 5612 default: 5613 return (EINVAL); 5614 } 5615 5616 kn->kn_hook = (caddr_t)vp; 5617 5618 v_addpollinfo(vp); 5619 if (vp->v_pollinfo == NULL) 5620 return (ENOMEM); 5621 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 5622 vhold(vp); 5623 knlist_add(knl, kn, 0); 5624 5625 return (0); 5626 } 5627 5628 /* 5629 * Detach knote from vnode 5630 */ 5631 static void 5632 filt_vfsdetach(struct knote *kn) 5633 { 5634 struct vnode *vp = (struct vnode *)kn->kn_hook; 5635 5636 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 5637 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 5638 vdrop(vp); 5639 } 5640 5641 /*ARGSUSED*/ 5642 static int 5643 filt_vfsread(struct knote *kn, long hint) 5644 { 5645 struct vnode *vp = (struct vnode *)kn->kn_hook; 5646 struct vattr va; 5647 int res; 5648 5649 /* 5650 * filesystem is gone, so set the EOF flag and schedule 5651 * the knote for deletion. 5652 */ 5653 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5654 VI_LOCK(vp); 5655 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5656 VI_UNLOCK(vp); 5657 return (1); 5658 } 5659 5660 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 5661 return (0); 5662 5663 VI_LOCK(vp); 5664 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 5665 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 5666 VI_UNLOCK(vp); 5667 return (res); 5668 } 5669 5670 /*ARGSUSED*/ 5671 static int 5672 filt_vfswrite(struct knote *kn, long hint) 5673 { 5674 struct vnode *vp = (struct vnode *)kn->kn_hook; 5675 5676 VI_LOCK(vp); 5677 5678 /* 5679 * filesystem is gone, so set the EOF flag and schedule 5680 * the knote for deletion. 5681 */ 5682 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 5683 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5684 5685 kn->kn_data = 0; 5686 VI_UNLOCK(vp); 5687 return (1); 5688 } 5689 5690 static int 5691 filt_vfsvnode(struct knote *kn, long hint) 5692 { 5693 struct vnode *vp = (struct vnode *)kn->kn_hook; 5694 int res; 5695 5696 VI_LOCK(vp); 5697 if (kn->kn_sfflags & hint) 5698 kn->kn_fflags |= hint; 5699 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5700 kn->kn_flags |= EV_EOF; 5701 VI_UNLOCK(vp); 5702 return (1); 5703 } 5704 res = (kn->kn_fflags != 0); 5705 VI_UNLOCK(vp); 5706 return (res); 5707 } 5708 5709 /* 5710 * Returns whether the directory is empty or not. 5711 * If it is empty, the return value is 0; otherwise 5712 * the return value is an error value (which may 5713 * be ENOTEMPTY). 5714 */ 5715 int 5716 vfs_emptydir(struct vnode *vp) 5717 { 5718 struct uio uio; 5719 struct iovec iov; 5720 struct dirent *dirent, *dp, *endp; 5721 int error, eof; 5722 5723 error = 0; 5724 eof = 0; 5725 5726 ASSERT_VOP_LOCKED(vp, "vfs_emptydir"); 5727 5728 dirent = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK); 5729 iov.iov_base = dirent; 5730 iov.iov_len = sizeof(struct dirent); 5731 5732 uio.uio_iov = &iov; 5733 uio.uio_iovcnt = 1; 5734 uio.uio_offset = 0; 5735 uio.uio_resid = sizeof(struct dirent); 5736 uio.uio_segflg = UIO_SYSSPACE; 5737 uio.uio_rw = UIO_READ; 5738 uio.uio_td = curthread; 5739 5740 while (eof == 0 && error == 0) { 5741 error = VOP_READDIR(vp, &uio, curthread->td_ucred, &eof, 5742 NULL, NULL); 5743 if (error != 0) 5744 break; 5745 endp = (void *)((uint8_t *)dirent + 5746 sizeof(struct dirent) - uio.uio_resid); 5747 for (dp = dirent; dp < endp; 5748 dp = (void *)((uint8_t *)dp + GENERIC_DIRSIZ(dp))) { 5749 if (dp->d_type == DT_WHT) 5750 continue; 5751 if (dp->d_namlen == 0) 5752 continue; 5753 if (dp->d_type != DT_DIR && 5754 dp->d_type != DT_UNKNOWN) { 5755 error = ENOTEMPTY; 5756 break; 5757 } 5758 if (dp->d_namlen > 2) { 5759 error = ENOTEMPTY; 5760 break; 5761 } 5762 if (dp->d_namlen == 1 && 5763 dp->d_name[0] != '.') { 5764 error = ENOTEMPTY; 5765 break; 5766 } 5767 if (dp->d_namlen == 2 && 5768 dp->d_name[1] != '.') { 5769 error = ENOTEMPTY; 5770 break; 5771 } 5772 uio.uio_resid = sizeof(struct dirent); 5773 } 5774 } 5775 free(dirent, M_TEMP); 5776 return (error); 5777 } 5778 5779 int 5780 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 5781 { 5782 int error; 5783 5784 if (dp->d_reclen > ap->a_uio->uio_resid) 5785 return (ENAMETOOLONG); 5786 error = uiomove(dp, dp->d_reclen, ap->a_uio); 5787 if (error) { 5788 if (ap->a_ncookies != NULL) { 5789 if (ap->a_cookies != NULL) 5790 free(ap->a_cookies, M_TEMP); 5791 ap->a_cookies = NULL; 5792 *ap->a_ncookies = 0; 5793 } 5794 return (error); 5795 } 5796 if (ap->a_ncookies == NULL) 5797 return (0); 5798 5799 KASSERT(ap->a_cookies, 5800 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 5801 5802 *ap->a_cookies = realloc(*ap->a_cookies, 5803 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 5804 (*ap->a_cookies)[*ap->a_ncookies] = off; 5805 *ap->a_ncookies += 1; 5806 return (0); 5807 } 5808 5809 /* 5810 * Mark for update the access time of the file if the filesystem 5811 * supports VOP_MARKATIME. This functionality is used by execve and 5812 * mmap, so we want to avoid the I/O implied by directly setting 5813 * va_atime for the sake of efficiency. 5814 */ 5815 void 5816 vfs_mark_atime(struct vnode *vp, struct ucred *cred) 5817 { 5818 struct mount *mp; 5819 5820 mp = vp->v_mount; 5821 ASSERT_VOP_LOCKED(vp, "vfs_mark_atime"); 5822 if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) 5823 (void)VOP_MARKATIME(vp); 5824 } 5825 5826 /* 5827 * The purpose of this routine is to remove granularity from accmode_t, 5828 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 5829 * VADMIN and VAPPEND. 5830 * 5831 * If it returns 0, the caller is supposed to continue with the usual 5832 * access checks using 'accmode' as modified by this routine. If it 5833 * returns nonzero value, the caller is supposed to return that value 5834 * as errno. 5835 * 5836 * Note that after this routine runs, accmode may be zero. 5837 */ 5838 int 5839 vfs_unixify_accmode(accmode_t *accmode) 5840 { 5841 /* 5842 * There is no way to specify explicit "deny" rule using 5843 * file mode or POSIX.1e ACLs. 5844 */ 5845 if (*accmode & VEXPLICIT_DENY) { 5846 *accmode = 0; 5847 return (0); 5848 } 5849 5850 /* 5851 * None of these can be translated into usual access bits. 5852 * Also, the common case for NFSv4 ACLs is to not contain 5853 * either of these bits. Caller should check for VWRITE 5854 * on the containing directory instead. 5855 */ 5856 if (*accmode & (VDELETE_CHILD | VDELETE)) 5857 return (EPERM); 5858 5859 if (*accmode & VADMIN_PERMS) { 5860 *accmode &= ~VADMIN_PERMS; 5861 *accmode |= VADMIN; 5862 } 5863 5864 /* 5865 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 5866 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 5867 */ 5868 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 5869 5870 return (0); 5871 } 5872 5873 /* 5874 * Clear out a doomed vnode (if any) and replace it with a new one as long 5875 * as the fs is not being unmounted. Return the root vnode to the caller. 5876 */ 5877 static int __noinline 5878 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 5879 { 5880 struct vnode *vp; 5881 int error; 5882 5883 restart: 5884 if (mp->mnt_rootvnode != NULL) { 5885 MNT_ILOCK(mp); 5886 vp = mp->mnt_rootvnode; 5887 if (vp != NULL) { 5888 if (!VN_IS_DOOMED(vp)) { 5889 vrefact(vp); 5890 MNT_IUNLOCK(mp); 5891 error = vn_lock(vp, flags); 5892 if (error == 0) { 5893 *vpp = vp; 5894 return (0); 5895 } 5896 vrele(vp); 5897 goto restart; 5898 } 5899 /* 5900 * Clear the old one. 5901 */ 5902 mp->mnt_rootvnode = NULL; 5903 } 5904 MNT_IUNLOCK(mp); 5905 if (vp != NULL) { 5906 /* 5907 * Paired with a fence in vfs_op_thread_exit(). 5908 */ 5909 atomic_thread_fence_acq(); 5910 vfs_op_barrier_wait(mp); 5911 vrele(vp); 5912 } 5913 } 5914 error = VFS_CACHEDROOT(mp, flags, vpp); 5915 if (error != 0) 5916 return (error); 5917 if (mp->mnt_vfs_ops == 0) { 5918 MNT_ILOCK(mp); 5919 if (mp->mnt_vfs_ops != 0) { 5920 MNT_IUNLOCK(mp); 5921 return (0); 5922 } 5923 if (mp->mnt_rootvnode == NULL) { 5924 vrefact(*vpp); 5925 mp->mnt_rootvnode = *vpp; 5926 } else { 5927 if (mp->mnt_rootvnode != *vpp) { 5928 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 5929 panic("%s: mismatch between vnode returned " 5930 " by VFS_CACHEDROOT and the one cached " 5931 " (%p != %p)", 5932 __func__, *vpp, mp->mnt_rootvnode); 5933 } 5934 } 5935 } 5936 MNT_IUNLOCK(mp); 5937 } 5938 return (0); 5939 } 5940 5941 int 5942 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 5943 { 5944 struct vnode *vp; 5945 int error; 5946 5947 if (!vfs_op_thread_enter(mp)) 5948 return (vfs_cache_root_fallback(mp, flags, vpp)); 5949 vp = (struct vnode *)atomic_load_ptr(&mp->mnt_rootvnode); 5950 if (vp == NULL || VN_IS_DOOMED(vp)) { 5951 vfs_op_thread_exit(mp); 5952 return (vfs_cache_root_fallback(mp, flags, vpp)); 5953 } 5954 vrefact(vp); 5955 vfs_op_thread_exit(mp); 5956 error = vn_lock(vp, flags); 5957 if (error != 0) { 5958 vrele(vp); 5959 return (vfs_cache_root_fallback(mp, flags, vpp)); 5960 } 5961 *vpp = vp; 5962 return (0); 5963 } 5964 5965 struct vnode * 5966 vfs_cache_root_clear(struct mount *mp) 5967 { 5968 struct vnode *vp; 5969 5970 /* 5971 * ops > 0 guarantees there is nobody who can see this vnode 5972 */ 5973 MPASS(mp->mnt_vfs_ops > 0); 5974 vp = mp->mnt_rootvnode; 5975 mp->mnt_rootvnode = NULL; 5976 return (vp); 5977 } 5978 5979 void 5980 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 5981 { 5982 5983 MPASS(mp->mnt_vfs_ops > 0); 5984 vrefact(vp); 5985 mp->mnt_rootvnode = vp; 5986 } 5987 5988 /* 5989 * These are helper functions for filesystems to traverse all 5990 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 5991 * 5992 * This interface replaces MNT_VNODE_FOREACH. 5993 */ 5994 5995 5996 struct vnode * 5997 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 5998 { 5999 struct vnode *vp; 6000 6001 if (should_yield()) 6002 kern_yield(PRI_USER); 6003 MNT_ILOCK(mp); 6004 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6005 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6006 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6007 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6008 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6009 continue; 6010 VI_LOCK(vp); 6011 if (VN_IS_DOOMED(vp)) { 6012 VI_UNLOCK(vp); 6013 continue; 6014 } 6015 break; 6016 } 6017 if (vp == NULL) { 6018 __mnt_vnode_markerfree_all(mvp, mp); 6019 /* MNT_IUNLOCK(mp); -- done in above function */ 6020 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6021 return (NULL); 6022 } 6023 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6024 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6025 MNT_IUNLOCK(mp); 6026 return (vp); 6027 } 6028 6029 struct vnode * 6030 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6031 { 6032 struct vnode *vp; 6033 6034 *mvp = vn_alloc_marker(mp); 6035 MNT_ILOCK(mp); 6036 MNT_REF(mp); 6037 6038 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6039 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6040 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6041 continue; 6042 VI_LOCK(vp); 6043 if (VN_IS_DOOMED(vp)) { 6044 VI_UNLOCK(vp); 6045 continue; 6046 } 6047 break; 6048 } 6049 if (vp == NULL) { 6050 MNT_REL(mp); 6051 MNT_IUNLOCK(mp); 6052 vn_free_marker(*mvp); 6053 *mvp = NULL; 6054 return (NULL); 6055 } 6056 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6057 MNT_IUNLOCK(mp); 6058 return (vp); 6059 } 6060 6061 void 6062 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6063 { 6064 6065 if (*mvp == NULL) { 6066 MNT_IUNLOCK(mp); 6067 return; 6068 } 6069 6070 mtx_assert(MNT_MTX(mp), MA_OWNED); 6071 6072 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6073 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6074 MNT_REL(mp); 6075 MNT_IUNLOCK(mp); 6076 vn_free_marker(*mvp); 6077 *mvp = NULL; 6078 } 6079 6080 /* 6081 * These are helper functions for filesystems to traverse their 6082 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6083 */ 6084 static void 6085 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6086 { 6087 6088 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6089 6090 MNT_ILOCK(mp); 6091 MNT_REL(mp); 6092 MNT_IUNLOCK(mp); 6093 vn_free_marker(*mvp); 6094 *mvp = NULL; 6095 } 6096 6097 /* 6098 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6099 * conventional lock order during mnt_vnode_next_lazy iteration. 6100 * 6101 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6102 * The list lock is dropped and reacquired. On success, both locks are held. 6103 * On failure, the mount vnode list lock is held but the vnode interlock is 6104 * not, and the procedure may have yielded. 6105 */ 6106 static bool 6107 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6108 struct vnode *vp) 6109 { 6110 const struct vnode *tmp; 6111 bool held, ret; 6112 6113 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6114 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6115 ("%s: bad marker", __func__)); 6116 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6117 ("%s: inappropriate vnode", __func__)); 6118 ASSERT_VI_UNLOCKED(vp, __func__); 6119 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6120 6121 ret = false; 6122 6123 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6124 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6125 6126 /* 6127 * Use a hold to prevent vp from disappearing while the mount vnode 6128 * list lock is dropped and reacquired. Normally a hold would be 6129 * acquired with vhold(), but that might try to acquire the vnode 6130 * interlock, which would be a LOR with the mount vnode list lock. 6131 */ 6132 held = refcount_acquire_if_not_zero(&vp->v_holdcnt); 6133 mtx_unlock(&mp->mnt_listmtx); 6134 if (!held) 6135 goto abort; 6136 VI_LOCK(vp); 6137 if (!refcount_release_if_not_last(&vp->v_holdcnt)) { 6138 vdropl(vp); 6139 goto abort; 6140 } 6141 mtx_lock(&mp->mnt_listmtx); 6142 6143 /* 6144 * Determine whether the vnode is still the next one after the marker, 6145 * excepting any other markers. If the vnode has not been doomed by 6146 * vgone() then the hold should have ensured that it remained on the 6147 * lazy list. If it has been doomed but is still on the lazy list, 6148 * don't abort, but rather skip over it (avoid spinning on doomed 6149 * vnodes). 6150 */ 6151 tmp = mvp; 6152 do { 6153 tmp = TAILQ_NEXT(tmp, v_lazylist); 6154 } while (tmp != NULL && tmp->v_type == VMARKER); 6155 if (tmp != vp) { 6156 mtx_unlock(&mp->mnt_listmtx); 6157 VI_UNLOCK(vp); 6158 goto abort; 6159 } 6160 6161 ret = true; 6162 goto out; 6163 abort: 6164 maybe_yield(); 6165 mtx_lock(&mp->mnt_listmtx); 6166 out: 6167 if (ret) 6168 ASSERT_VI_LOCKED(vp, __func__); 6169 else 6170 ASSERT_VI_UNLOCKED(vp, __func__); 6171 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6172 return (ret); 6173 } 6174 6175 static struct vnode * 6176 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6177 void *cbarg) 6178 { 6179 struct vnode *vp, *nvp; 6180 6181 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6182 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6183 restart: 6184 vp = TAILQ_NEXT(*mvp, v_lazylist); 6185 while (vp != NULL) { 6186 if (vp->v_type == VMARKER) { 6187 vp = TAILQ_NEXT(vp, v_lazylist); 6188 continue; 6189 } 6190 /* 6191 * See if we want to process the vnode. Note we may encounter a 6192 * long string of vnodes we don't care about and hog the list 6193 * as a result. Check for it and requeue the marker. 6194 */ 6195 if (VN_IS_DOOMED(vp) || !cb(vp, cbarg)) { 6196 if (!should_yield()) { 6197 vp = TAILQ_NEXT(vp, v_lazylist); 6198 continue; 6199 } 6200 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 6201 v_lazylist); 6202 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 6203 v_lazylist); 6204 mtx_unlock(&mp->mnt_listmtx); 6205 kern_yield(PRI_USER); 6206 mtx_lock(&mp->mnt_listmtx); 6207 goto restart; 6208 } 6209 /* 6210 * Try-lock because this is the wrong lock order. If that does 6211 * not succeed, drop the mount vnode list lock and try to 6212 * reacquire it and the vnode interlock in the right order. 6213 */ 6214 if (!VI_TRYLOCK(vp) && 6215 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 6216 goto restart; 6217 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6218 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6219 ("alien vnode on the lazy list %p %p", vp, mp)); 6220 if (vp->v_mount == mp && !VN_IS_DOOMED(vp)) 6221 break; 6222 nvp = TAILQ_NEXT(vp, v_lazylist); 6223 VI_UNLOCK(vp); 6224 vp = nvp; 6225 } 6226 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6227 6228 /* Check if we are done */ 6229 if (vp == NULL) { 6230 mtx_unlock(&mp->mnt_listmtx); 6231 mnt_vnode_markerfree_lazy(mvp, mp); 6232 return (NULL); 6233 } 6234 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 6235 mtx_unlock(&mp->mnt_listmtx); 6236 ASSERT_VI_LOCKED(vp, "lazy iter"); 6237 return (vp); 6238 } 6239 6240 struct vnode * 6241 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6242 void *cbarg) 6243 { 6244 6245 if (should_yield()) 6246 kern_yield(PRI_USER); 6247 mtx_lock(&mp->mnt_listmtx); 6248 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6249 } 6250 6251 struct vnode * 6252 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6253 void *cbarg) 6254 { 6255 struct vnode *vp; 6256 6257 *mvp = vn_alloc_marker(mp); 6258 MNT_ILOCK(mp); 6259 MNT_REF(mp); 6260 MNT_IUNLOCK(mp); 6261 6262 mtx_lock(&mp->mnt_listmtx); 6263 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 6264 if (vp == NULL) { 6265 mtx_unlock(&mp->mnt_listmtx); 6266 mnt_vnode_markerfree_lazy(mvp, mp); 6267 return (NULL); 6268 } 6269 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 6270 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6271 } 6272 6273 void 6274 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6275 { 6276 6277 if (*mvp == NULL) 6278 return; 6279 6280 mtx_lock(&mp->mnt_listmtx); 6281 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6282 mtx_unlock(&mp->mnt_listmtx); 6283 mnt_vnode_markerfree_lazy(mvp, mp); 6284 } 6285