1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/capsicum.h> 54 #include <sys/condvar.h> 55 #include <sys/conf.h> 56 #include <sys/counter.h> 57 #include <sys/dirent.h> 58 #include <sys/event.h> 59 #include <sys/eventhandler.h> 60 #include <sys/extattr.h> 61 #include <sys/file.h> 62 #include <sys/fcntl.h> 63 #include <sys/jail.h> 64 #include <sys/kdb.h> 65 #include <sys/kernel.h> 66 #include <sys/kthread.h> 67 #include <sys/ktr.h> 68 #include <sys/lockf.h> 69 #include <sys/malloc.h> 70 #include <sys/mount.h> 71 #include <sys/namei.h> 72 #include <sys/pctrie.h> 73 #include <sys/priv.h> 74 #include <sys/reboot.h> 75 #include <sys/refcount.h> 76 #include <sys/rwlock.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smp.h> 80 #include <sys/stat.h> 81 #include <sys/sysctl.h> 82 #include <sys/syslog.h> 83 #include <sys/vmmeter.h> 84 #include <sys/vnode.h> 85 #include <sys/watchdog.h> 86 87 #include <machine/stdarg.h> 88 89 #include <security/mac/mac_framework.h> 90 91 #include <vm/vm.h> 92 #include <vm/vm_object.h> 93 #include <vm/vm_extern.h> 94 #include <vm/pmap.h> 95 #include <vm/vm_map.h> 96 #include <vm/vm_page.h> 97 #include <vm/vm_kern.h> 98 #include <vm/uma.h> 99 100 #ifdef DDB 101 #include <ddb/ddb.h> 102 #endif 103 104 static void delmntque(struct vnode *vp); 105 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 106 int slpflag, int slptimeo); 107 static void syncer_shutdown(void *arg, int howto); 108 static int vtryrecycle(struct vnode *vp); 109 static void v_init_counters(struct vnode *); 110 static void v_incr_devcount(struct vnode *); 111 static void v_decr_devcount(struct vnode *); 112 static void vgonel(struct vnode *); 113 static void vfs_knllock(void *arg); 114 static void vfs_knlunlock(void *arg); 115 static void vfs_knl_assert_locked(void *arg); 116 static void vfs_knl_assert_unlocked(void *arg); 117 static void vnlru_return_batches(struct vfsops *mnt_op); 118 static void destroy_vpollinfo(struct vpollinfo *vi); 119 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 120 daddr_t startlbn, daddr_t endlbn); 121 122 /* 123 * These fences are intended for cases where some synchronization is 124 * needed between access of v_iflags and lockless vnode refcount (v_holdcnt 125 * and v_usecount) updates. Access to v_iflags is generally synchronized 126 * by the interlock, but we have some internal assertions that check vnode 127 * flags without acquiring the lock. Thus, these fences are INVARIANTS-only 128 * for now. 129 */ 130 #ifdef INVARIANTS 131 #define VNODE_REFCOUNT_FENCE_ACQ() atomic_thread_fence_acq() 132 #define VNODE_REFCOUNT_FENCE_REL() atomic_thread_fence_rel() 133 #else 134 #define VNODE_REFCOUNT_FENCE_ACQ() 135 #define VNODE_REFCOUNT_FENCE_REL() 136 #endif 137 138 /* 139 * Number of vnodes in existence. Increased whenever getnewvnode() 140 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 141 */ 142 static unsigned long numvnodes; 143 144 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 145 "Number of vnodes in existence"); 146 147 static counter_u64_t vnodes_created; 148 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 149 "Number of vnodes created by getnewvnode"); 150 151 static u_long mnt_free_list_batch = 128; 152 SYSCTL_ULONG(_vfs, OID_AUTO, mnt_free_list_batch, CTLFLAG_RW, 153 &mnt_free_list_batch, 0, "Limit of vnodes held on mnt's free list"); 154 155 /* 156 * Conversion tables for conversion from vnode types to inode formats 157 * and back. 158 */ 159 enum vtype iftovt_tab[16] = { 160 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 161 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 162 }; 163 int vttoif_tab[10] = { 164 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 165 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 166 }; 167 168 /* 169 * List of vnodes that are ready for recycling. 170 */ 171 static TAILQ_HEAD(freelst, vnode) vnode_free_list; 172 173 /* 174 * "Free" vnode target. Free vnodes are rarely completely free, but are 175 * just ones that are cheap to recycle. Usually they are for files which 176 * have been stat'd but not read; these usually have inode and namecache 177 * data attached to them. This target is the preferred minimum size of a 178 * sub-cache consisting mostly of such files. The system balances the size 179 * of this sub-cache with its complement to try to prevent either from 180 * thrashing while the other is relatively inactive. The targets express 181 * a preference for the best balance. 182 * 183 * "Above" this target there are 2 further targets (watermarks) related 184 * to recyling of free vnodes. In the best-operating case, the cache is 185 * exactly full, the free list has size between vlowat and vhiwat above the 186 * free target, and recycling from it and normal use maintains this state. 187 * Sometimes the free list is below vlowat or even empty, but this state 188 * is even better for immediate use provided the cache is not full. 189 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 190 * ones) to reach one of these states. The watermarks are currently hard- 191 * coded as 4% and 9% of the available space higher. These and the default 192 * of 25% for wantfreevnodes are too large if the memory size is large. 193 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 194 * whenever vnlru_proc() becomes active. 195 */ 196 static u_long wantfreevnodes; 197 SYSCTL_ULONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, 198 &wantfreevnodes, 0, "Target for minimum number of \"free\" vnodes"); 199 static u_long freevnodes; 200 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 201 &freevnodes, 0, "Number of \"free\" vnodes"); 202 203 static counter_u64_t recycles_count; 204 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 205 "Number of vnodes recycled to meet vnode cache targets"); 206 207 /* 208 * Various variables used for debugging the new implementation of 209 * reassignbuf(). 210 * XXX these are probably of (very) limited utility now. 211 */ 212 static int reassignbufcalls; 213 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW | CTLFLAG_STATS, 214 &reassignbufcalls, 0, "Number of calls to reassignbuf"); 215 216 static counter_u64_t free_owe_inact; 217 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, free_owe_inact, CTLFLAG_RD, &free_owe_inact, 218 "Number of times free vnodes kept on active list due to VFS " 219 "owing inactivation"); 220 221 /* To keep more than one thread at a time from running vfs_getnewfsid */ 222 static struct mtx mntid_mtx; 223 224 /* 225 * Lock for any access to the following: 226 * vnode_free_list 227 * numvnodes 228 * freevnodes 229 */ 230 static struct mtx vnode_free_list_mtx; 231 232 /* Publicly exported FS */ 233 struct nfs_public nfs_pub; 234 235 static uma_zone_t buf_trie_zone; 236 237 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 238 static uma_zone_t vnode_zone; 239 static uma_zone_t vnodepoll_zone; 240 241 /* 242 * The workitem queue. 243 * 244 * It is useful to delay writes of file data and filesystem metadata 245 * for tens of seconds so that quickly created and deleted files need 246 * not waste disk bandwidth being created and removed. To realize this, 247 * we append vnodes to a "workitem" queue. When running with a soft 248 * updates implementation, most pending metadata dependencies should 249 * not wait for more than a few seconds. Thus, mounted on block devices 250 * are delayed only about a half the time that file data is delayed. 251 * Similarly, directory updates are more critical, so are only delayed 252 * about a third the time that file data is delayed. Thus, there are 253 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 254 * one each second (driven off the filesystem syncer process). The 255 * syncer_delayno variable indicates the next queue that is to be processed. 256 * Items that need to be processed soon are placed in this queue: 257 * 258 * syncer_workitem_pending[syncer_delayno] 259 * 260 * A delay of fifteen seconds is done by placing the request fifteen 261 * entries later in the queue: 262 * 263 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 264 * 265 */ 266 static int syncer_delayno; 267 static long syncer_mask; 268 LIST_HEAD(synclist, bufobj); 269 static struct synclist *syncer_workitem_pending; 270 /* 271 * The sync_mtx protects: 272 * bo->bo_synclist 273 * sync_vnode_count 274 * syncer_delayno 275 * syncer_state 276 * syncer_workitem_pending 277 * syncer_worklist_len 278 * rushjob 279 */ 280 static struct mtx sync_mtx; 281 static struct cv sync_wakeup; 282 283 #define SYNCER_MAXDELAY 32 284 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 285 static int syncdelay = 30; /* max time to delay syncing data */ 286 static int filedelay = 30; /* time to delay syncing files */ 287 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 288 "Time to delay syncing files (in seconds)"); 289 static int dirdelay = 29; /* time to delay syncing directories */ 290 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 291 "Time to delay syncing directories (in seconds)"); 292 static int metadelay = 28; /* time to delay syncing metadata */ 293 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 294 "Time to delay syncing metadata (in seconds)"); 295 static int rushjob; /* number of slots to run ASAP */ 296 static int stat_rush_requests; /* number of times I/O speeded up */ 297 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 298 "Number of times I/O speeded up (rush requests)"); 299 300 /* 301 * When shutting down the syncer, run it at four times normal speed. 302 */ 303 #define SYNCER_SHUTDOWN_SPEEDUP 4 304 static int sync_vnode_count; 305 static int syncer_worklist_len; 306 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 307 syncer_state; 308 309 /* Target for maximum number of vnodes. */ 310 int desiredvnodes; 311 static int gapvnodes; /* gap between wanted and desired */ 312 static int vhiwat; /* enough extras after expansion */ 313 static int vlowat; /* minimal extras before expansion */ 314 static int vstir; /* nonzero to stir non-free vnodes */ 315 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 316 317 static int 318 sysctl_update_desiredvnodes(SYSCTL_HANDLER_ARGS) 319 { 320 int error, old_desiredvnodes; 321 322 old_desiredvnodes = desiredvnodes; 323 if ((error = sysctl_handle_int(oidp, arg1, arg2, req)) != 0) 324 return (error); 325 if (old_desiredvnodes != desiredvnodes) { 326 wantfreevnodes = desiredvnodes / 4; 327 /* XXX locking seems to be incomplete. */ 328 vfs_hash_changesize(desiredvnodes); 329 cache_changesize(desiredvnodes); 330 } 331 return (0); 332 } 333 334 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 335 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, &desiredvnodes, 0, 336 sysctl_update_desiredvnodes, "I", "Target for maximum number of vnodes"); 337 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 338 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 339 static int vnlru_nowhere; 340 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 341 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 342 343 static int 344 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 345 { 346 struct vnode *vp; 347 struct nameidata nd; 348 char *buf; 349 unsigned long ndflags; 350 int error; 351 352 if (req->newptr == NULL) 353 return (EINVAL); 354 if (req->newlen >= PATH_MAX) 355 return (E2BIG); 356 357 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 358 error = SYSCTL_IN(req, buf, req->newlen); 359 if (error != 0) 360 goto out; 361 362 buf[req->newlen] = '\0'; 363 364 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1 | NOCACHE | SAVENAME; 365 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf, curthread); 366 if ((error = namei(&nd)) != 0) 367 goto out; 368 vp = nd.ni_vp; 369 370 if (VN_IS_DOOMED(vp)) { 371 /* 372 * This vnode is being recycled. Return != 0 to let the caller 373 * know that the sysctl had no effect. Return EAGAIN because a 374 * subsequent call will likely succeed (since namei will create 375 * a new vnode if necessary) 376 */ 377 error = EAGAIN; 378 goto putvnode; 379 } 380 381 counter_u64_add(recycles_count, 1); 382 vgone(vp); 383 putvnode: 384 NDFREE(&nd, 0); 385 out: 386 free(buf, M_TEMP); 387 return (error); 388 } 389 390 static int 391 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 392 { 393 struct thread *td = curthread; 394 struct vnode *vp; 395 struct file *fp; 396 int error; 397 int fd; 398 399 if (req->newptr == NULL) 400 return (EBADF); 401 402 error = sysctl_handle_int(oidp, &fd, 0, req); 403 if (error != 0) 404 return (error); 405 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 406 if (error != 0) 407 return (error); 408 vp = fp->f_vnode; 409 410 error = vn_lock(vp, LK_EXCLUSIVE); 411 if (error != 0) 412 goto drop; 413 414 counter_u64_add(recycles_count, 1); 415 vgone(vp); 416 VOP_UNLOCK(vp, 0); 417 drop: 418 fdrop(fp, td); 419 return (error); 420 } 421 422 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 423 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 424 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 425 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 426 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 427 sysctl_ftry_reclaim_vnode, "I", 428 "Try to reclaim a vnode by its file descriptor"); 429 430 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 431 static int vnsz2log; 432 433 /* 434 * Support for the bufobj clean & dirty pctrie. 435 */ 436 static void * 437 buf_trie_alloc(struct pctrie *ptree) 438 { 439 440 return uma_zalloc(buf_trie_zone, M_NOWAIT); 441 } 442 443 static void 444 buf_trie_free(struct pctrie *ptree, void *node) 445 { 446 447 uma_zfree(buf_trie_zone, node); 448 } 449 PCTRIE_DEFINE(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free); 450 451 /* 452 * Initialize the vnode management data structures. 453 * 454 * Reevaluate the following cap on the number of vnodes after the physical 455 * memory size exceeds 512GB. In the limit, as the physical memory size 456 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 457 */ 458 #ifndef MAXVNODES_MAX 459 #define MAXVNODES_MAX (512 * 1024 * 1024 / 64) /* 8M */ 460 #endif 461 462 /* 463 * Initialize a vnode as it first enters the zone. 464 */ 465 static int 466 vnode_init(void *mem, int size, int flags) 467 { 468 struct vnode *vp; 469 470 vp = mem; 471 bzero(vp, size); 472 /* 473 * Setup locks. 474 */ 475 vp->v_vnlock = &vp->v_lock; 476 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 477 /* 478 * By default, don't allow shared locks unless filesystems opt-in. 479 */ 480 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 481 LK_NOSHARE | LK_IS_VNODE); 482 /* 483 * Initialize bufobj. 484 */ 485 bufobj_init(&vp->v_bufobj, vp); 486 /* 487 * Initialize namecache. 488 */ 489 LIST_INIT(&vp->v_cache_src); 490 TAILQ_INIT(&vp->v_cache_dst); 491 /* 492 * Initialize rangelocks. 493 */ 494 rangelock_init(&vp->v_rl); 495 return (0); 496 } 497 498 /* 499 * Free a vnode when it is cleared from the zone. 500 */ 501 static void 502 vnode_fini(void *mem, int size) 503 { 504 struct vnode *vp; 505 struct bufobj *bo; 506 507 vp = mem; 508 rangelock_destroy(&vp->v_rl); 509 lockdestroy(vp->v_vnlock); 510 mtx_destroy(&vp->v_interlock); 511 bo = &vp->v_bufobj; 512 rw_destroy(BO_LOCKPTR(bo)); 513 } 514 515 /* 516 * Provide the size of NFS nclnode and NFS fh for calculation of the 517 * vnode memory consumption. The size is specified directly to 518 * eliminate dependency on NFS-private header. 519 * 520 * Other filesystems may use bigger or smaller (like UFS and ZFS) 521 * private inode data, but the NFS-based estimation is ample enough. 522 * Still, we care about differences in the size between 64- and 32-bit 523 * platforms. 524 * 525 * Namecache structure size is heuristically 526 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 527 */ 528 #ifdef _LP64 529 #define NFS_NCLNODE_SZ (528 + 64) 530 #define NC_SZ 148 531 #else 532 #define NFS_NCLNODE_SZ (360 + 32) 533 #define NC_SZ 92 534 #endif 535 536 static void 537 vntblinit(void *dummy __unused) 538 { 539 u_int i; 540 int physvnodes, virtvnodes; 541 542 /* 543 * Desiredvnodes is a function of the physical memory size and the 544 * kernel's heap size. Generally speaking, it scales with the 545 * physical memory size. The ratio of desiredvnodes to the physical 546 * memory size is 1:16 until desiredvnodes exceeds 98,304. 547 * Thereafter, the 548 * marginal ratio of desiredvnodes to the physical memory size is 549 * 1:64. However, desiredvnodes is limited by the kernel's heap 550 * size. The memory required by desiredvnodes vnodes and vm objects 551 * must not exceed 1/10th of the kernel's heap size. 552 */ 553 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 554 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 555 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 556 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 557 desiredvnodes = min(physvnodes, virtvnodes); 558 if (desiredvnodes > MAXVNODES_MAX) { 559 if (bootverbose) 560 printf("Reducing kern.maxvnodes %d -> %d\n", 561 desiredvnodes, MAXVNODES_MAX); 562 desiredvnodes = MAXVNODES_MAX; 563 } 564 wantfreevnodes = desiredvnodes / 4; 565 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 566 TAILQ_INIT(&vnode_free_list); 567 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 568 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 569 vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); 570 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 571 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 572 /* 573 * Preallocate enough nodes to support one-per buf so that 574 * we can not fail an insert. reassignbuf() callers can not 575 * tolerate the insertion failure. 576 */ 577 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 578 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 579 UMA_ZONE_NOFREE | UMA_ZONE_VM); 580 uma_prealloc(buf_trie_zone, nbuf); 581 582 vnodes_created = counter_u64_alloc(M_WAITOK); 583 recycles_count = counter_u64_alloc(M_WAITOK); 584 free_owe_inact = counter_u64_alloc(M_WAITOK); 585 586 /* 587 * Initialize the filesystem syncer. 588 */ 589 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 590 &syncer_mask); 591 syncer_maxdelay = syncer_mask + 1; 592 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 593 cv_init(&sync_wakeup, "syncer"); 594 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 595 vnsz2log++; 596 vnsz2log--; 597 } 598 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 599 600 601 /* 602 * Mark a mount point as busy. Used to synchronize access and to delay 603 * unmounting. Eventually, mountlist_mtx is not released on failure. 604 * 605 * vfs_busy() is a custom lock, it can block the caller. 606 * vfs_busy() only sleeps if the unmount is active on the mount point. 607 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 608 * vnode belonging to mp. 609 * 610 * Lookup uses vfs_busy() to traverse mount points. 611 * root fs var fs 612 * / vnode lock A / vnode lock (/var) D 613 * /var vnode lock B /log vnode lock(/var/log) E 614 * vfs_busy lock C vfs_busy lock F 615 * 616 * Within each file system, the lock order is C->A->B and F->D->E. 617 * 618 * When traversing across mounts, the system follows that lock order: 619 * 620 * C->A->B 621 * | 622 * +->F->D->E 623 * 624 * The lookup() process for namei("/var") illustrates the process: 625 * VOP_LOOKUP() obtains B while A is held 626 * vfs_busy() obtains a shared lock on F while A and B are held 627 * vput() releases lock on B 628 * vput() releases lock on A 629 * VFS_ROOT() obtains lock on D while shared lock on F is held 630 * vfs_unbusy() releases shared lock on F 631 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 632 * Attempt to lock A (instead of vp_crossmp) while D is held would 633 * violate the global order, causing deadlocks. 634 * 635 * dounmount() locks B while F is drained. 636 */ 637 int 638 vfs_busy(struct mount *mp, int flags) 639 { 640 641 MPASS((flags & ~MBF_MASK) == 0); 642 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 643 644 if (vfs_op_thread_enter(mp)) { 645 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 646 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 647 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 648 vfs_mp_count_add_pcpu(mp, ref, 1); 649 vfs_mp_count_add_pcpu(mp, lockref, 1); 650 vfs_op_thread_exit(mp); 651 if (flags & MBF_MNTLSTLOCK) 652 mtx_unlock(&mountlist_mtx); 653 return (0); 654 } 655 656 MNT_ILOCK(mp); 657 vfs_assert_mount_counters(mp); 658 MNT_REF(mp); 659 /* 660 * If mount point is currently being unmounted, sleep until the 661 * mount point fate is decided. If thread doing the unmounting fails, 662 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 663 * that this mount point has survived the unmount attempt and vfs_busy 664 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 665 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 666 * about to be really destroyed. vfs_busy needs to release its 667 * reference on the mount point in this case and return with ENOENT, 668 * telling the caller that mount mount it tried to busy is no longer 669 * valid. 670 */ 671 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 672 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 673 MNT_REL(mp); 674 MNT_IUNLOCK(mp); 675 CTR1(KTR_VFS, "%s: failed busying before sleeping", 676 __func__); 677 return (ENOENT); 678 } 679 if (flags & MBF_MNTLSTLOCK) 680 mtx_unlock(&mountlist_mtx); 681 mp->mnt_kern_flag |= MNTK_MWAIT; 682 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 683 if (flags & MBF_MNTLSTLOCK) 684 mtx_lock(&mountlist_mtx); 685 MNT_ILOCK(mp); 686 } 687 if (flags & MBF_MNTLSTLOCK) 688 mtx_unlock(&mountlist_mtx); 689 mp->mnt_lockref++; 690 MNT_IUNLOCK(mp); 691 return (0); 692 } 693 694 /* 695 * Free a busy filesystem. 696 */ 697 void 698 vfs_unbusy(struct mount *mp) 699 { 700 int c; 701 702 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 703 704 if (vfs_op_thread_enter(mp)) { 705 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 706 vfs_mp_count_sub_pcpu(mp, lockref, 1); 707 vfs_mp_count_sub_pcpu(mp, ref, 1); 708 vfs_op_thread_exit(mp); 709 return; 710 } 711 712 MNT_ILOCK(mp); 713 vfs_assert_mount_counters(mp); 714 MNT_REL(mp); 715 c = --mp->mnt_lockref; 716 if (mp->mnt_vfs_ops == 0) { 717 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 718 MNT_IUNLOCK(mp); 719 return; 720 } 721 if (c < 0) 722 vfs_dump_mount_counters(mp); 723 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 724 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 725 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 726 mp->mnt_kern_flag &= ~MNTK_DRAINING; 727 wakeup(&mp->mnt_lockref); 728 } 729 MNT_IUNLOCK(mp); 730 } 731 732 /* 733 * Lookup a mount point by filesystem identifier. 734 */ 735 struct mount * 736 vfs_getvfs(fsid_t *fsid) 737 { 738 struct mount *mp; 739 740 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 741 mtx_lock(&mountlist_mtx); 742 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 743 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 744 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 745 vfs_ref(mp); 746 mtx_unlock(&mountlist_mtx); 747 return (mp); 748 } 749 } 750 mtx_unlock(&mountlist_mtx); 751 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 752 return ((struct mount *) 0); 753 } 754 755 /* 756 * Lookup a mount point by filesystem identifier, busying it before 757 * returning. 758 * 759 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 760 * cache for popular filesystem identifiers. The cache is lockess, using 761 * the fact that struct mount's are never freed. In worst case we may 762 * get pointer to unmounted or even different filesystem, so we have to 763 * check what we got, and go slow way if so. 764 */ 765 struct mount * 766 vfs_busyfs(fsid_t *fsid) 767 { 768 #define FSID_CACHE_SIZE 256 769 typedef struct mount * volatile vmp_t; 770 static vmp_t cache[FSID_CACHE_SIZE]; 771 struct mount *mp; 772 int error; 773 uint32_t hash; 774 775 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 776 hash = fsid->val[0] ^ fsid->val[1]; 777 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 778 mp = cache[hash]; 779 if (mp == NULL || 780 mp->mnt_stat.f_fsid.val[0] != fsid->val[0] || 781 mp->mnt_stat.f_fsid.val[1] != fsid->val[1]) 782 goto slow; 783 if (vfs_busy(mp, 0) != 0) { 784 cache[hash] = NULL; 785 goto slow; 786 } 787 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 788 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) 789 return (mp); 790 else 791 vfs_unbusy(mp); 792 793 slow: 794 mtx_lock(&mountlist_mtx); 795 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 796 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 797 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 798 error = vfs_busy(mp, MBF_MNTLSTLOCK); 799 if (error) { 800 cache[hash] = NULL; 801 mtx_unlock(&mountlist_mtx); 802 return (NULL); 803 } 804 cache[hash] = mp; 805 return (mp); 806 } 807 } 808 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 809 mtx_unlock(&mountlist_mtx); 810 return ((struct mount *) 0); 811 } 812 813 /* 814 * Check if a user can access privileged mount options. 815 */ 816 int 817 vfs_suser(struct mount *mp, struct thread *td) 818 { 819 int error; 820 821 if (jailed(td->td_ucred)) { 822 /* 823 * If the jail of the calling thread lacks permission for 824 * this type of file system, deny immediately. 825 */ 826 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 827 return (EPERM); 828 829 /* 830 * If the file system was mounted outside the jail of the 831 * calling thread, deny immediately. 832 */ 833 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 834 return (EPERM); 835 } 836 837 /* 838 * If file system supports delegated administration, we don't check 839 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 840 * by the file system itself. 841 * If this is not the user that did original mount, we check for 842 * the PRIV_VFS_MOUNT_OWNER privilege. 843 */ 844 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 845 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 846 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 847 return (error); 848 } 849 return (0); 850 } 851 852 /* 853 * Get a new unique fsid. Try to make its val[0] unique, since this value 854 * will be used to create fake device numbers for stat(). Also try (but 855 * not so hard) make its val[0] unique mod 2^16, since some emulators only 856 * support 16-bit device numbers. We end up with unique val[0]'s for the 857 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 858 * 859 * Keep in mind that several mounts may be running in parallel. Starting 860 * the search one past where the previous search terminated is both a 861 * micro-optimization and a defense against returning the same fsid to 862 * different mounts. 863 */ 864 void 865 vfs_getnewfsid(struct mount *mp) 866 { 867 static uint16_t mntid_base; 868 struct mount *nmp; 869 fsid_t tfsid; 870 int mtype; 871 872 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 873 mtx_lock(&mntid_mtx); 874 mtype = mp->mnt_vfc->vfc_typenum; 875 tfsid.val[1] = mtype; 876 mtype = (mtype & 0xFF) << 24; 877 for (;;) { 878 tfsid.val[0] = makedev(255, 879 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 880 mntid_base++; 881 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 882 break; 883 vfs_rel(nmp); 884 } 885 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 886 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 887 mtx_unlock(&mntid_mtx); 888 } 889 890 /* 891 * Knob to control the precision of file timestamps: 892 * 893 * 0 = seconds only; nanoseconds zeroed. 894 * 1 = seconds and nanoseconds, accurate within 1/HZ. 895 * 2 = seconds and nanoseconds, truncated to microseconds. 896 * >=3 = seconds and nanoseconds, maximum precision. 897 */ 898 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 899 900 static int timestamp_precision = TSP_USEC; 901 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 902 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 903 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 904 "3+: sec + ns (max. precision))"); 905 906 /* 907 * Get a current timestamp. 908 */ 909 void 910 vfs_timestamp(struct timespec *tsp) 911 { 912 struct timeval tv; 913 914 switch (timestamp_precision) { 915 case TSP_SEC: 916 tsp->tv_sec = time_second; 917 tsp->tv_nsec = 0; 918 break; 919 case TSP_HZ: 920 getnanotime(tsp); 921 break; 922 case TSP_USEC: 923 microtime(&tv); 924 TIMEVAL_TO_TIMESPEC(&tv, tsp); 925 break; 926 case TSP_NSEC: 927 default: 928 nanotime(tsp); 929 break; 930 } 931 } 932 933 /* 934 * Set vnode attributes to VNOVAL 935 */ 936 void 937 vattr_null(struct vattr *vap) 938 { 939 940 vap->va_type = VNON; 941 vap->va_size = VNOVAL; 942 vap->va_bytes = VNOVAL; 943 vap->va_mode = VNOVAL; 944 vap->va_nlink = VNOVAL; 945 vap->va_uid = VNOVAL; 946 vap->va_gid = VNOVAL; 947 vap->va_fsid = VNOVAL; 948 vap->va_fileid = VNOVAL; 949 vap->va_blocksize = VNOVAL; 950 vap->va_rdev = VNOVAL; 951 vap->va_atime.tv_sec = VNOVAL; 952 vap->va_atime.tv_nsec = VNOVAL; 953 vap->va_mtime.tv_sec = VNOVAL; 954 vap->va_mtime.tv_nsec = VNOVAL; 955 vap->va_ctime.tv_sec = VNOVAL; 956 vap->va_ctime.tv_nsec = VNOVAL; 957 vap->va_birthtime.tv_sec = VNOVAL; 958 vap->va_birthtime.tv_nsec = VNOVAL; 959 vap->va_flags = VNOVAL; 960 vap->va_gen = VNOVAL; 961 vap->va_vaflags = 0; 962 } 963 964 /* 965 * This routine is called when we have too many vnodes. It attempts 966 * to free <count> vnodes and will potentially free vnodes that still 967 * have VM backing store (VM backing store is typically the cause 968 * of a vnode blowout so we want to do this). Therefore, this operation 969 * is not considered cheap. 970 * 971 * A number of conditions may prevent a vnode from being reclaimed. 972 * the buffer cache may have references on the vnode, a directory 973 * vnode may still have references due to the namei cache representing 974 * underlying files, or the vnode may be in active use. It is not 975 * desirable to reuse such vnodes. These conditions may cause the 976 * number of vnodes to reach some minimum value regardless of what 977 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 978 * 979 * @param mp Try to reclaim vnodes from this mountpoint 980 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 981 * entries if this argument is strue 982 * @param trigger Only reclaim vnodes with fewer than this many resident 983 * pages. 984 * @return The number of vnodes that were reclaimed. 985 */ 986 static int 987 vlrureclaim(struct mount *mp, bool reclaim_nc_src, int trigger) 988 { 989 struct vnode *vp; 990 int count, done, target; 991 992 done = 0; 993 vn_start_write(NULL, &mp, V_WAIT); 994 MNT_ILOCK(mp); 995 count = mp->mnt_nvnodelistsize; 996 target = count * (int64_t)gapvnodes / imax(desiredvnodes, 1); 997 target = target / 10 + 1; 998 while (count != 0 && done < target) { 999 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 1000 while (vp != NULL && vp->v_type == VMARKER) 1001 vp = TAILQ_NEXT(vp, v_nmntvnodes); 1002 if (vp == NULL) 1003 break; 1004 /* 1005 * XXX LRU is completely broken for non-free vnodes. First 1006 * by calling here in mountpoint order, then by moving 1007 * unselected vnodes to the end here, and most grossly by 1008 * removing the vlruvp() function that was supposed to 1009 * maintain the order. (This function was born broken 1010 * since syncer problems prevented it doing anything.) The 1011 * order is closer to LRC (C = Created). 1012 * 1013 * LRU reclaiming of vnodes seems to have last worked in 1014 * FreeBSD-3 where LRU wasn't mentioned under any spelling. 1015 * Then there was no hold count, and inactive vnodes were 1016 * simply put on the free list in LRU order. The separate 1017 * lists also break LRU. We prefer to reclaim from the 1018 * free list for technical reasons. This tends to thrash 1019 * the free list to keep very unrecently used held vnodes. 1020 * The problem is mitigated by keeping the free list large. 1021 */ 1022 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1023 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1024 --count; 1025 if (!VI_TRYLOCK(vp)) 1026 goto next_iter; 1027 /* 1028 * If it's been deconstructed already, it's still 1029 * referenced, or it exceeds the trigger, skip it. 1030 * Also skip free vnodes. We are trying to make space 1031 * to expand the free list, not reduce it. 1032 */ 1033 if (vp->v_usecount || 1034 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1035 ((vp->v_iflag & VI_FREE) != 0) || 1036 VN_IS_DOOMED(vp) || (vp->v_object != NULL && 1037 vp->v_object->resident_page_count > trigger)) { 1038 VI_UNLOCK(vp); 1039 goto next_iter; 1040 } 1041 MNT_IUNLOCK(mp); 1042 vholdl(vp); 1043 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) { 1044 vdrop(vp); 1045 goto next_iter_mntunlocked; 1046 } 1047 VI_LOCK(vp); 1048 /* 1049 * v_usecount may have been bumped after VOP_LOCK() dropped 1050 * the vnode interlock and before it was locked again. 1051 * 1052 * It is not necessary to recheck VIRF_DOOMED because it can 1053 * only be set by another thread that holds both the vnode 1054 * lock and vnode interlock. If another thread has the 1055 * vnode lock before we get to VOP_LOCK() and obtains the 1056 * vnode interlock after VOP_LOCK() drops the vnode 1057 * interlock, the other thread will be unable to drop the 1058 * vnode lock before our VOP_LOCK() call fails. 1059 */ 1060 if (vp->v_usecount || 1061 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1062 (vp->v_iflag & VI_FREE) != 0 || 1063 (vp->v_object != NULL && 1064 vp->v_object->resident_page_count > trigger)) { 1065 VOP_UNLOCK(vp, 0); 1066 vdropl(vp); 1067 goto next_iter_mntunlocked; 1068 } 1069 KASSERT(!VN_IS_DOOMED(vp), 1070 ("VIRF_DOOMED unexpectedly detected in vlrureclaim()")); 1071 counter_u64_add(recycles_count, 1); 1072 vgonel(vp); 1073 VOP_UNLOCK(vp, 0); 1074 vdropl(vp); 1075 done++; 1076 next_iter_mntunlocked: 1077 if (!should_yield()) 1078 goto relock_mnt; 1079 goto yield; 1080 next_iter: 1081 if (!should_yield()) 1082 continue; 1083 MNT_IUNLOCK(mp); 1084 yield: 1085 kern_yield(PRI_USER); 1086 relock_mnt: 1087 MNT_ILOCK(mp); 1088 } 1089 MNT_IUNLOCK(mp); 1090 vn_finished_write(mp); 1091 return done; 1092 } 1093 1094 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1095 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1096 0, 1097 "limit on vnode free requests per call to the vnlru_free routine"); 1098 1099 /* 1100 * Attempt to reduce the free list by the requested amount. 1101 */ 1102 static void 1103 vnlru_free_locked(int count, struct vfsops *mnt_op) 1104 { 1105 struct vnode *vp; 1106 struct mount *mp; 1107 bool tried_batches; 1108 1109 tried_batches = false; 1110 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 1111 if (count > max_vnlru_free) 1112 count = max_vnlru_free; 1113 for (; count > 0; count--) { 1114 vp = TAILQ_FIRST(&vnode_free_list); 1115 /* 1116 * The list can be modified while the free_list_mtx 1117 * has been dropped and vp could be NULL here. 1118 */ 1119 if (vp == NULL) { 1120 if (tried_batches) 1121 break; 1122 mtx_unlock(&vnode_free_list_mtx); 1123 vnlru_return_batches(mnt_op); 1124 tried_batches = true; 1125 mtx_lock(&vnode_free_list_mtx); 1126 continue; 1127 } 1128 1129 VNASSERT(vp->v_op != NULL, vp, 1130 ("vnlru_free: vnode already reclaimed.")); 1131 KASSERT((vp->v_iflag & VI_FREE) != 0, 1132 ("Removing vnode not on freelist")); 1133 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 1134 ("Mangling active vnode")); 1135 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 1136 1137 /* 1138 * Don't recycle if our vnode is from different type 1139 * of mount point. Note that mp is type-safe, the 1140 * check does not reach unmapped address even if 1141 * vnode is reclaimed. 1142 * Don't recycle if we can't get the interlock without 1143 * blocking. 1144 */ 1145 if ((mnt_op != NULL && (mp = vp->v_mount) != NULL && 1146 mp->mnt_op != mnt_op) || !VI_TRYLOCK(vp)) { 1147 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist); 1148 continue; 1149 } 1150 VNASSERT((vp->v_iflag & VI_FREE) != 0 && vp->v_holdcnt == 0, 1151 vp, ("vp inconsistent on freelist")); 1152 1153 /* 1154 * The clear of VI_FREE prevents activation of the 1155 * vnode. There is no sense in putting the vnode on 1156 * the mount point active list, only to remove it 1157 * later during recycling. Inline the relevant part 1158 * of vholdl(), to avoid triggering assertions or 1159 * activating. 1160 */ 1161 freevnodes--; 1162 vp->v_iflag &= ~VI_FREE; 1163 VNODE_REFCOUNT_FENCE_REL(); 1164 refcount_acquire(&vp->v_holdcnt); 1165 1166 mtx_unlock(&vnode_free_list_mtx); 1167 VI_UNLOCK(vp); 1168 vtryrecycle(vp); 1169 /* 1170 * If the recycled succeeded this vdrop will actually free 1171 * the vnode. If not it will simply place it back on 1172 * the free list. 1173 */ 1174 vdrop(vp); 1175 mtx_lock(&vnode_free_list_mtx); 1176 } 1177 } 1178 1179 void 1180 vnlru_free(int count, struct vfsops *mnt_op) 1181 { 1182 1183 mtx_lock(&vnode_free_list_mtx); 1184 vnlru_free_locked(count, mnt_op); 1185 mtx_unlock(&vnode_free_list_mtx); 1186 } 1187 1188 1189 /* XXX some names and initialization are bad for limits and watermarks. */ 1190 static int 1191 vspace(void) 1192 { 1193 int space; 1194 1195 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1196 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1197 vlowat = vhiwat / 2; 1198 if (numvnodes > desiredvnodes) 1199 return (0); 1200 space = desiredvnodes - numvnodes; 1201 if (freevnodes > wantfreevnodes) 1202 space += freevnodes - wantfreevnodes; 1203 return (space); 1204 } 1205 1206 static void 1207 vnlru_return_batch_locked(struct mount *mp) 1208 { 1209 struct vnode *vp; 1210 1211 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 1212 1213 if (mp->mnt_tmpfreevnodelistsize == 0) 1214 return; 1215 1216 TAILQ_FOREACH(vp, &mp->mnt_tmpfreevnodelist, v_actfreelist) { 1217 VNASSERT((vp->v_mflag & VMP_TMPMNTFREELIST) != 0, vp, 1218 ("vnode without VMP_TMPMNTFREELIST on mnt_tmpfreevnodelist")); 1219 vp->v_mflag &= ~VMP_TMPMNTFREELIST; 1220 } 1221 mtx_lock(&vnode_free_list_mtx); 1222 TAILQ_CONCAT(&vnode_free_list, &mp->mnt_tmpfreevnodelist, v_actfreelist); 1223 freevnodes += mp->mnt_tmpfreevnodelistsize; 1224 mtx_unlock(&vnode_free_list_mtx); 1225 mp->mnt_tmpfreevnodelistsize = 0; 1226 } 1227 1228 static void 1229 vnlru_return_batch(struct mount *mp) 1230 { 1231 1232 mtx_lock(&mp->mnt_listmtx); 1233 vnlru_return_batch_locked(mp); 1234 mtx_unlock(&mp->mnt_listmtx); 1235 } 1236 1237 static void 1238 vnlru_return_batches(struct vfsops *mnt_op) 1239 { 1240 struct mount *mp, *nmp; 1241 bool need_unbusy; 1242 1243 mtx_lock(&mountlist_mtx); 1244 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 1245 need_unbusy = false; 1246 if (mnt_op != NULL && mp->mnt_op != mnt_op) 1247 goto next; 1248 if (mp->mnt_tmpfreevnodelistsize == 0) 1249 goto next; 1250 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK) == 0) { 1251 vnlru_return_batch(mp); 1252 need_unbusy = true; 1253 mtx_lock(&mountlist_mtx); 1254 } 1255 next: 1256 nmp = TAILQ_NEXT(mp, mnt_list); 1257 if (need_unbusy) 1258 vfs_unbusy(mp); 1259 } 1260 mtx_unlock(&mountlist_mtx); 1261 } 1262 1263 /* 1264 * Attempt to recycle vnodes in a context that is always safe to block. 1265 * Calling vlrurecycle() from the bowels of filesystem code has some 1266 * interesting deadlock problems. 1267 */ 1268 static struct proc *vnlruproc; 1269 static int vnlruproc_sig; 1270 1271 static void 1272 vnlru_proc(void) 1273 { 1274 struct mount *mp, *nmp; 1275 unsigned long onumvnodes; 1276 int done, force, trigger, usevnodes, vsp; 1277 bool reclaim_nc_src; 1278 1279 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1280 SHUTDOWN_PRI_FIRST); 1281 1282 force = 0; 1283 for (;;) { 1284 kproc_suspend_check(vnlruproc); 1285 mtx_lock(&vnode_free_list_mtx); 1286 /* 1287 * If numvnodes is too large (due to desiredvnodes being 1288 * adjusted using its sysctl, or emergency growth), first 1289 * try to reduce it by discarding from the free list. 1290 */ 1291 if (numvnodes > desiredvnodes) 1292 vnlru_free_locked(numvnodes - desiredvnodes, NULL); 1293 /* 1294 * Sleep if the vnode cache is in a good state. This is 1295 * when it is not over-full and has space for about a 4% 1296 * or 9% expansion (by growing its size or inexcessively 1297 * reducing its free list). Otherwise, try to reclaim 1298 * space for a 10% expansion. 1299 */ 1300 if (vstir && force == 0) { 1301 force = 1; 1302 vstir = 0; 1303 } 1304 vsp = vspace(); 1305 if (vsp >= vlowat && force == 0) { 1306 vnlruproc_sig = 0; 1307 wakeup(&vnlruproc_sig); 1308 msleep(vnlruproc, &vnode_free_list_mtx, 1309 PVFS|PDROP, "vlruwt", hz); 1310 continue; 1311 } 1312 mtx_unlock(&vnode_free_list_mtx); 1313 done = 0; 1314 onumvnodes = numvnodes; 1315 /* 1316 * Calculate parameters for recycling. These are the same 1317 * throughout the loop to give some semblance of fairness. 1318 * The trigger point is to avoid recycling vnodes with lots 1319 * of resident pages. We aren't trying to free memory; we 1320 * are trying to recycle or at least free vnodes. 1321 */ 1322 if (numvnodes <= desiredvnodes) 1323 usevnodes = numvnodes - freevnodes; 1324 else 1325 usevnodes = numvnodes; 1326 if (usevnodes <= 0) 1327 usevnodes = 1; 1328 /* 1329 * The trigger value is is chosen to give a conservatively 1330 * large value to ensure that it alone doesn't prevent 1331 * making progress. The value can easily be so large that 1332 * it is effectively infinite in some congested and 1333 * misconfigured cases, and this is necessary. Normally 1334 * it is about 8 to 100 (pages), which is quite large. 1335 */ 1336 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1337 if (force < 2) 1338 trigger = vsmalltrigger; 1339 reclaim_nc_src = force >= 3; 1340 mtx_lock(&mountlist_mtx); 1341 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 1342 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) { 1343 nmp = TAILQ_NEXT(mp, mnt_list); 1344 continue; 1345 } 1346 done += vlrureclaim(mp, reclaim_nc_src, trigger); 1347 mtx_lock(&mountlist_mtx); 1348 nmp = TAILQ_NEXT(mp, mnt_list); 1349 vfs_unbusy(mp); 1350 } 1351 mtx_unlock(&mountlist_mtx); 1352 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1353 uma_reclaim(UMA_RECLAIM_DRAIN); 1354 if (done == 0) { 1355 if (force == 0 || force == 1) { 1356 force = 2; 1357 continue; 1358 } 1359 if (force == 2) { 1360 force = 3; 1361 continue; 1362 } 1363 force = 0; 1364 vnlru_nowhere++; 1365 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1366 } else 1367 kern_yield(PRI_USER); 1368 /* 1369 * After becoming active to expand above low water, keep 1370 * active until above high water. 1371 */ 1372 vsp = vspace(); 1373 force = vsp < vhiwat; 1374 } 1375 } 1376 1377 static struct kproc_desc vnlru_kp = { 1378 "vnlru", 1379 vnlru_proc, 1380 &vnlruproc 1381 }; 1382 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1383 &vnlru_kp); 1384 1385 /* 1386 * Routines having to do with the management of the vnode table. 1387 */ 1388 1389 /* 1390 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1391 * before we actually vgone(). This function must be called with the vnode 1392 * held to prevent the vnode from being returned to the free list midway 1393 * through vgone(). 1394 */ 1395 static int 1396 vtryrecycle(struct vnode *vp) 1397 { 1398 struct mount *vnmp; 1399 1400 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1401 VNASSERT(vp->v_holdcnt, vp, 1402 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1403 /* 1404 * This vnode may found and locked via some other list, if so we 1405 * can't recycle it yet. 1406 */ 1407 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1408 CTR2(KTR_VFS, 1409 "%s: impossible to recycle, vp %p lock is already held", 1410 __func__, vp); 1411 return (EWOULDBLOCK); 1412 } 1413 /* 1414 * Don't recycle if its filesystem is being suspended. 1415 */ 1416 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1417 VOP_UNLOCK(vp, 0); 1418 CTR2(KTR_VFS, 1419 "%s: impossible to recycle, cannot start the write for %p", 1420 __func__, vp); 1421 return (EBUSY); 1422 } 1423 /* 1424 * If we got this far, we need to acquire the interlock and see if 1425 * anyone picked up this vnode from another list. If not, we will 1426 * mark it with DOOMED via vgonel() so that anyone who does find it 1427 * will skip over it. 1428 */ 1429 VI_LOCK(vp); 1430 if (vp->v_usecount) { 1431 VOP_UNLOCK(vp, 0); 1432 VI_UNLOCK(vp); 1433 vn_finished_write(vnmp); 1434 CTR2(KTR_VFS, 1435 "%s: impossible to recycle, %p is already referenced", 1436 __func__, vp); 1437 return (EBUSY); 1438 } 1439 if (!VN_IS_DOOMED(vp)) { 1440 counter_u64_add(recycles_count, 1); 1441 vgonel(vp); 1442 } 1443 VOP_UNLOCK(vp, 0); 1444 VI_UNLOCK(vp); 1445 vn_finished_write(vnmp); 1446 return (0); 1447 } 1448 1449 static void 1450 vcheckspace(void) 1451 { 1452 int vsp; 1453 1454 vsp = vspace(); 1455 if (vsp < vlowat && vnlruproc_sig == 0) { 1456 vnlruproc_sig = 1; 1457 wakeup(vnlruproc); 1458 } 1459 } 1460 1461 /* 1462 * Wait if necessary for space for a new vnode. 1463 */ 1464 static int 1465 getnewvnode_wait(int suspended) 1466 { 1467 1468 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 1469 if (numvnodes >= desiredvnodes) { 1470 if (suspended) { 1471 /* 1472 * The file system is being suspended. We cannot 1473 * risk a deadlock here, so allow allocation of 1474 * another vnode even if this would give too many. 1475 */ 1476 return (0); 1477 } 1478 if (vnlruproc_sig == 0) { 1479 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 1480 wakeup(vnlruproc); 1481 } 1482 msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS, 1483 "vlruwk", hz); 1484 } 1485 /* Post-adjust like the pre-adjust in getnewvnode(). */ 1486 if (numvnodes + 1 > desiredvnodes && freevnodes > 1) 1487 vnlru_free_locked(1, NULL); 1488 return (numvnodes >= desiredvnodes ? ENFILE : 0); 1489 } 1490 1491 /* 1492 * This hack is fragile, and probably not needed any more now that the 1493 * watermark handling works. 1494 */ 1495 void 1496 getnewvnode_reserve(u_int count) 1497 { 1498 struct thread *td; 1499 1500 /* Pre-adjust like the pre-adjust in getnewvnode(), with any count. */ 1501 /* XXX no longer so quick, but this part is not racy. */ 1502 mtx_lock(&vnode_free_list_mtx); 1503 if (numvnodes + count > desiredvnodes && freevnodes > wantfreevnodes) 1504 vnlru_free_locked(ulmin(numvnodes + count - desiredvnodes, 1505 freevnodes - wantfreevnodes), NULL); 1506 mtx_unlock(&vnode_free_list_mtx); 1507 1508 td = curthread; 1509 /* First try to be quick and racy. */ 1510 if (atomic_fetchadd_long(&numvnodes, count) + count <= desiredvnodes) { 1511 td->td_vp_reserv += count; 1512 vcheckspace(); /* XXX no longer so quick, but more racy */ 1513 return; 1514 } else 1515 atomic_subtract_long(&numvnodes, count); 1516 1517 mtx_lock(&vnode_free_list_mtx); 1518 while (count > 0) { 1519 if (getnewvnode_wait(0) == 0) { 1520 count--; 1521 td->td_vp_reserv++; 1522 atomic_add_long(&numvnodes, 1); 1523 } 1524 } 1525 vcheckspace(); 1526 mtx_unlock(&vnode_free_list_mtx); 1527 } 1528 1529 /* 1530 * This hack is fragile, especially if desiredvnodes or wantvnodes are 1531 * misconfgured or changed significantly. Reducing desiredvnodes below 1532 * the reserved amount should cause bizarre behaviour like reducing it 1533 * below the number of active vnodes -- the system will try to reduce 1534 * numvnodes to match, but should fail, so the subtraction below should 1535 * not overflow. 1536 */ 1537 void 1538 getnewvnode_drop_reserve(void) 1539 { 1540 struct thread *td; 1541 1542 td = curthread; 1543 atomic_subtract_long(&numvnodes, td->td_vp_reserv); 1544 td->td_vp_reserv = 0; 1545 } 1546 1547 /* 1548 * Return the next vnode from the free list. 1549 */ 1550 int 1551 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1552 struct vnode **vpp) 1553 { 1554 struct vnode *vp; 1555 struct thread *td; 1556 struct lock_object *lo; 1557 static int cyclecount; 1558 int error __unused; 1559 1560 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1561 1562 KASSERT(vops->registered, 1563 ("%s: not registered vector op %p\n", __func__, vops)); 1564 1565 vp = NULL; 1566 td = curthread; 1567 if (td->td_vp_reserv > 0) { 1568 td->td_vp_reserv -= 1; 1569 goto alloc; 1570 } 1571 mtx_lock(&vnode_free_list_mtx); 1572 if (numvnodes < desiredvnodes) 1573 cyclecount = 0; 1574 else if (cyclecount++ >= freevnodes) { 1575 cyclecount = 0; 1576 vstir = 1; 1577 } 1578 /* 1579 * Grow the vnode cache if it will not be above its target max 1580 * after growing. Otherwise, if the free list is nonempty, try 1581 * to reclaim 1 item from it before growing the cache (possibly 1582 * above its target max if the reclamation failed or is delayed). 1583 * Otherwise, wait for some space. In all cases, schedule 1584 * vnlru_proc() if we are getting short of space. The watermarks 1585 * should be chosen so that we never wait or even reclaim from 1586 * the free list to below its target minimum. 1587 */ 1588 if (numvnodes + 1 <= desiredvnodes) 1589 ; 1590 else if (freevnodes > 0) 1591 vnlru_free_locked(1, NULL); 1592 else { 1593 error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag & 1594 MNTK_SUSPEND)); 1595 #if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */ 1596 if (error != 0) { 1597 mtx_unlock(&vnode_free_list_mtx); 1598 return (error); 1599 } 1600 #endif 1601 } 1602 vcheckspace(); 1603 atomic_add_long(&numvnodes, 1); 1604 mtx_unlock(&vnode_free_list_mtx); 1605 alloc: 1606 counter_u64_add(vnodes_created, 1); 1607 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK); 1608 /* 1609 * Locks are given the generic name "vnode" when created. 1610 * Follow the historic practice of using the filesystem 1611 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1612 * 1613 * Locks live in a witness group keyed on their name. Thus, 1614 * when a lock is renamed, it must also move from the witness 1615 * group of its old name to the witness group of its new name. 1616 * 1617 * The change only needs to be made when the vnode moves 1618 * from one filesystem type to another. We ensure that each 1619 * filesystem use a single static name pointer for its tag so 1620 * that we can compare pointers rather than doing a strcmp(). 1621 */ 1622 lo = &vp->v_vnlock->lock_object; 1623 if (lo->lo_name != tag) { 1624 lo->lo_name = tag; 1625 WITNESS_DESTROY(lo); 1626 WITNESS_INIT(lo, tag); 1627 } 1628 /* 1629 * By default, don't allow shared locks unless filesystems opt-in. 1630 */ 1631 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1632 /* 1633 * Finalize various vnode identity bits. 1634 */ 1635 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1636 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1637 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1638 vp->v_type = VNON; 1639 vp->v_tag = tag; 1640 vp->v_op = vops; 1641 v_init_counters(vp); 1642 vp->v_bufobj.bo_ops = &buf_ops_bio; 1643 #ifdef DIAGNOSTIC 1644 if (mp == NULL && vops != &dead_vnodeops) 1645 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1646 #endif 1647 #ifdef MAC 1648 mac_vnode_init(vp); 1649 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1650 mac_vnode_associate_singlelabel(mp, vp); 1651 #endif 1652 if (mp != NULL) { 1653 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1654 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1655 vp->v_vflag |= VV_NOKNOTE; 1656 } 1657 1658 /* 1659 * For the filesystems which do not use vfs_hash_insert(), 1660 * still initialize v_hash to have vfs_hash_index() useful. 1661 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1662 * its own hashing. 1663 */ 1664 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1665 1666 *vpp = vp; 1667 return (0); 1668 } 1669 1670 static void 1671 freevnode(struct vnode *vp) 1672 { 1673 struct bufobj *bo; 1674 1675 /* 1676 * The vnode has been marked for destruction, so free it. 1677 * 1678 * The vnode will be returned to the zone where it will 1679 * normally remain until it is needed for another vnode. We 1680 * need to cleanup (or verify that the cleanup has already 1681 * been done) any residual data left from its current use 1682 * so as not to contaminate the freshly allocated vnode. 1683 */ 1684 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 1685 atomic_subtract_long(&numvnodes, 1); 1686 bo = &vp->v_bufobj; 1687 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 1688 ("cleaned vnode still on the free list.")); 1689 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 1690 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); 1691 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 1692 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 1693 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 1694 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 1695 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 1696 ("clean blk trie not empty")); 1697 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 1698 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 1699 ("dirty blk trie not empty")); 1700 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 1701 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 1702 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 1703 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 1704 ("Dangling rangelock waiters")); 1705 VI_UNLOCK(vp); 1706 #ifdef MAC 1707 mac_vnode_destroy(vp); 1708 #endif 1709 if (vp->v_pollinfo != NULL) { 1710 destroy_vpollinfo(vp->v_pollinfo); 1711 vp->v_pollinfo = NULL; 1712 } 1713 #ifdef INVARIANTS 1714 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ 1715 vp->v_op = NULL; 1716 #endif 1717 vp->v_mountedhere = NULL; 1718 vp->v_unpcb = NULL; 1719 vp->v_rdev = NULL; 1720 vp->v_fifoinfo = NULL; 1721 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 1722 vp->v_irflag = 0; 1723 vp->v_iflag = 0; 1724 vp->v_vflag = 0; 1725 bo->bo_flag = 0; 1726 uma_zfree(vnode_zone, vp); 1727 } 1728 1729 /* 1730 * Delete from old mount point vnode list, if on one. 1731 */ 1732 static void 1733 delmntque(struct vnode *vp) 1734 { 1735 struct mount *mp; 1736 1737 mp = vp->v_mount; 1738 if (mp == NULL) 1739 return; 1740 MNT_ILOCK(mp); 1741 VI_LOCK(vp); 1742 KASSERT(mp->mnt_activevnodelistsize <= mp->mnt_nvnodelistsize, 1743 ("Active vnode list size %d > Vnode list size %d", 1744 mp->mnt_activevnodelistsize, mp->mnt_nvnodelistsize)); 1745 if (vp->v_iflag & VI_ACTIVE) { 1746 vp->v_iflag &= ~VI_ACTIVE; 1747 mtx_lock(&mp->mnt_listmtx); 1748 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); 1749 mp->mnt_activevnodelistsize--; 1750 mtx_unlock(&mp->mnt_listmtx); 1751 } 1752 vp->v_mount = NULL; 1753 VI_UNLOCK(vp); 1754 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1755 ("bad mount point vnode list size")); 1756 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1757 mp->mnt_nvnodelistsize--; 1758 MNT_REL(mp); 1759 MNT_IUNLOCK(mp); 1760 } 1761 1762 static void 1763 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1764 { 1765 1766 vp->v_data = NULL; 1767 vp->v_op = &dead_vnodeops; 1768 vgone(vp); 1769 vput(vp); 1770 } 1771 1772 /* 1773 * Insert into list of vnodes for the new mount point, if available. 1774 */ 1775 int 1776 insmntque1(struct vnode *vp, struct mount *mp, 1777 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1778 { 1779 1780 KASSERT(vp->v_mount == NULL, 1781 ("insmntque: vnode already on per mount vnode list")); 1782 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1783 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1784 1785 /* 1786 * We acquire the vnode interlock early to ensure that the 1787 * vnode cannot be recycled by another process releasing a 1788 * holdcnt on it before we get it on both the vnode list 1789 * and the active vnode list. The mount mutex protects only 1790 * manipulation of the vnode list and the vnode freelist 1791 * mutex protects only manipulation of the active vnode list. 1792 * Hence the need to hold the vnode interlock throughout. 1793 */ 1794 MNT_ILOCK(mp); 1795 VI_LOCK(vp); 1796 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 1797 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1798 mp->mnt_nvnodelistsize == 0)) && 1799 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1800 VI_UNLOCK(vp); 1801 MNT_IUNLOCK(mp); 1802 if (dtr != NULL) 1803 dtr(vp, dtr_arg); 1804 return (EBUSY); 1805 } 1806 vp->v_mount = mp; 1807 MNT_REF(mp); 1808 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1809 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1810 ("neg mount point vnode list size")); 1811 mp->mnt_nvnodelistsize++; 1812 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 1813 ("Activating already active vnode")); 1814 vp->v_iflag |= VI_ACTIVE; 1815 mtx_lock(&mp->mnt_listmtx); 1816 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 1817 mp->mnt_activevnodelistsize++; 1818 mtx_unlock(&mp->mnt_listmtx); 1819 VI_UNLOCK(vp); 1820 MNT_IUNLOCK(mp); 1821 return (0); 1822 } 1823 1824 int 1825 insmntque(struct vnode *vp, struct mount *mp) 1826 { 1827 1828 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1829 } 1830 1831 /* 1832 * Flush out and invalidate all buffers associated with a bufobj 1833 * Called with the underlying object locked. 1834 */ 1835 int 1836 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1837 { 1838 int error; 1839 1840 BO_LOCK(bo); 1841 if (flags & V_SAVE) { 1842 error = bufobj_wwait(bo, slpflag, slptimeo); 1843 if (error) { 1844 BO_UNLOCK(bo); 1845 return (error); 1846 } 1847 if (bo->bo_dirty.bv_cnt > 0) { 1848 BO_UNLOCK(bo); 1849 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1850 return (error); 1851 /* 1852 * XXX We could save a lock/unlock if this was only 1853 * enabled under INVARIANTS 1854 */ 1855 BO_LOCK(bo); 1856 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1857 panic("vinvalbuf: dirty bufs"); 1858 } 1859 } 1860 /* 1861 * If you alter this loop please notice that interlock is dropped and 1862 * reacquired in flushbuflist. Special care is needed to ensure that 1863 * no race conditions occur from this. 1864 */ 1865 do { 1866 error = flushbuflist(&bo->bo_clean, 1867 flags, bo, slpflag, slptimeo); 1868 if (error == 0 && !(flags & V_CLEANONLY)) 1869 error = flushbuflist(&bo->bo_dirty, 1870 flags, bo, slpflag, slptimeo); 1871 if (error != 0 && error != EAGAIN) { 1872 BO_UNLOCK(bo); 1873 return (error); 1874 } 1875 } while (error != 0); 1876 1877 /* 1878 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1879 * have write I/O in-progress but if there is a VM object then the 1880 * VM object can also have read-I/O in-progress. 1881 */ 1882 do { 1883 bufobj_wwait(bo, 0, 0); 1884 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 1885 BO_UNLOCK(bo); 1886 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 1887 BO_LOCK(bo); 1888 } 1889 } while (bo->bo_numoutput > 0); 1890 BO_UNLOCK(bo); 1891 1892 /* 1893 * Destroy the copy in the VM cache, too. 1894 */ 1895 if (bo->bo_object != NULL && 1896 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 1897 VM_OBJECT_WLOCK(bo->bo_object); 1898 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1899 OBJPR_CLEANONLY : 0); 1900 VM_OBJECT_WUNLOCK(bo->bo_object); 1901 } 1902 1903 #ifdef INVARIANTS 1904 BO_LOCK(bo); 1905 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 1906 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 1907 bo->bo_clean.bv_cnt > 0)) 1908 panic("vinvalbuf: flush failed"); 1909 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 1910 bo->bo_dirty.bv_cnt > 0) 1911 panic("vinvalbuf: flush dirty failed"); 1912 BO_UNLOCK(bo); 1913 #endif 1914 return (0); 1915 } 1916 1917 /* 1918 * Flush out and invalidate all buffers associated with a vnode. 1919 * Called with the underlying object locked. 1920 */ 1921 int 1922 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 1923 { 1924 1925 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 1926 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1927 if (vp->v_object != NULL && vp->v_object->handle != vp) 1928 return (0); 1929 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 1930 } 1931 1932 /* 1933 * Flush out buffers on the specified list. 1934 * 1935 */ 1936 static int 1937 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 1938 int slptimeo) 1939 { 1940 struct buf *bp, *nbp; 1941 int retval, error; 1942 daddr_t lblkno; 1943 b_xflags_t xflags; 1944 1945 ASSERT_BO_WLOCKED(bo); 1946 1947 retval = 0; 1948 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 1949 /* 1950 * If we are flushing both V_NORMAL and V_ALT buffers then 1951 * do not skip any buffers. If we are flushing only V_NORMAL 1952 * buffers then skip buffers marked as BX_ALTDATA. If we are 1953 * flushing only V_ALT buffers then skip buffers not marked 1954 * as BX_ALTDATA. 1955 */ 1956 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 1957 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 1958 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 1959 continue; 1960 } 1961 if (nbp != NULL) { 1962 lblkno = nbp->b_lblkno; 1963 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 1964 } 1965 retval = EAGAIN; 1966 error = BUF_TIMELOCK(bp, 1967 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 1968 "flushbuf", slpflag, slptimeo); 1969 if (error) { 1970 BO_LOCK(bo); 1971 return (error != ENOLCK ? error : EAGAIN); 1972 } 1973 KASSERT(bp->b_bufobj == bo, 1974 ("bp %p wrong b_bufobj %p should be %p", 1975 bp, bp->b_bufobj, bo)); 1976 /* 1977 * XXX Since there are no node locks for NFS, I 1978 * believe there is a slight chance that a delayed 1979 * write will occur while sleeping just above, so 1980 * check for it. 1981 */ 1982 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1983 (flags & V_SAVE)) { 1984 bremfree(bp); 1985 bp->b_flags |= B_ASYNC; 1986 bwrite(bp); 1987 BO_LOCK(bo); 1988 return (EAGAIN); /* XXX: why not loop ? */ 1989 } 1990 bremfree(bp); 1991 bp->b_flags |= (B_INVAL | B_RELBUF); 1992 bp->b_flags &= ~B_ASYNC; 1993 brelse(bp); 1994 BO_LOCK(bo); 1995 if (nbp == NULL) 1996 break; 1997 nbp = gbincore(bo, lblkno); 1998 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1999 != xflags) 2000 break; /* nbp invalid */ 2001 } 2002 return (retval); 2003 } 2004 2005 int 2006 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2007 { 2008 struct buf *bp; 2009 int error; 2010 daddr_t lblkno; 2011 2012 ASSERT_BO_LOCKED(bo); 2013 2014 for (lblkno = startn;;) { 2015 again: 2016 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2017 if (bp == NULL || bp->b_lblkno >= endn || 2018 bp->b_lblkno < startn) 2019 break; 2020 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2021 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2022 if (error != 0) { 2023 BO_RLOCK(bo); 2024 if (error == ENOLCK) 2025 goto again; 2026 return (error); 2027 } 2028 KASSERT(bp->b_bufobj == bo, 2029 ("bp %p wrong b_bufobj %p should be %p", 2030 bp, bp->b_bufobj, bo)); 2031 lblkno = bp->b_lblkno + 1; 2032 if ((bp->b_flags & B_MANAGED) == 0) 2033 bremfree(bp); 2034 bp->b_flags |= B_RELBUF; 2035 /* 2036 * In the VMIO case, use the B_NOREUSE flag to hint that the 2037 * pages backing each buffer in the range are unlikely to be 2038 * reused. Dirty buffers will have the hint applied once 2039 * they've been written. 2040 */ 2041 if ((bp->b_flags & B_VMIO) != 0) 2042 bp->b_flags |= B_NOREUSE; 2043 brelse(bp); 2044 BO_RLOCK(bo); 2045 } 2046 return (0); 2047 } 2048 2049 /* 2050 * Truncate a file's buffer and pages to a specified length. This 2051 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2052 * sync activity. 2053 */ 2054 int 2055 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2056 { 2057 struct buf *bp, *nbp; 2058 struct bufobj *bo; 2059 daddr_t startlbn; 2060 2061 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2062 vp, blksize, (uintmax_t)length); 2063 2064 /* 2065 * Round up to the *next* lbn. 2066 */ 2067 startlbn = howmany(length, blksize); 2068 2069 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2070 2071 bo = &vp->v_bufobj; 2072 restart_unlocked: 2073 BO_LOCK(bo); 2074 2075 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2076 ; 2077 2078 if (length > 0) { 2079 restartsync: 2080 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2081 if (bp->b_lblkno > 0) 2082 continue; 2083 /* 2084 * Since we hold the vnode lock this should only 2085 * fail if we're racing with the buf daemon. 2086 */ 2087 if (BUF_LOCK(bp, 2088 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2089 BO_LOCKPTR(bo)) == ENOLCK) 2090 goto restart_unlocked; 2091 2092 VNASSERT((bp->b_flags & B_DELWRI), vp, 2093 ("buf(%p) on dirty queue without DELWRI", bp)); 2094 2095 bremfree(bp); 2096 bawrite(bp); 2097 BO_LOCK(bo); 2098 goto restartsync; 2099 } 2100 } 2101 2102 bufobj_wwait(bo, 0, 0); 2103 BO_UNLOCK(bo); 2104 vnode_pager_setsize(vp, length); 2105 2106 return (0); 2107 } 2108 2109 /* 2110 * Invalidate the cached pages of a file's buffer within the range of block 2111 * numbers [startlbn, endlbn). 2112 */ 2113 void 2114 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2115 int blksize) 2116 { 2117 struct bufobj *bo; 2118 off_t start, end; 2119 2120 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2121 2122 start = blksize * startlbn; 2123 end = blksize * endlbn; 2124 2125 bo = &vp->v_bufobj; 2126 BO_LOCK(bo); 2127 MPASS(blksize == bo->bo_bsize); 2128 2129 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2130 ; 2131 2132 BO_UNLOCK(bo); 2133 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2134 } 2135 2136 static int 2137 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2138 daddr_t startlbn, daddr_t endlbn) 2139 { 2140 struct buf *bp, *nbp; 2141 bool anyfreed; 2142 2143 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2144 ASSERT_BO_LOCKED(bo); 2145 2146 do { 2147 anyfreed = false; 2148 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2149 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2150 continue; 2151 if (BUF_LOCK(bp, 2152 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2153 BO_LOCKPTR(bo)) == ENOLCK) { 2154 BO_LOCK(bo); 2155 return (EAGAIN); 2156 } 2157 2158 bremfree(bp); 2159 bp->b_flags |= B_INVAL | B_RELBUF; 2160 bp->b_flags &= ~B_ASYNC; 2161 brelse(bp); 2162 anyfreed = true; 2163 2164 BO_LOCK(bo); 2165 if (nbp != NULL && 2166 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2167 nbp->b_vp != vp || 2168 (nbp->b_flags & B_DELWRI) != 0)) 2169 return (EAGAIN); 2170 } 2171 2172 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2173 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2174 continue; 2175 if (BUF_LOCK(bp, 2176 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2177 BO_LOCKPTR(bo)) == ENOLCK) { 2178 BO_LOCK(bo); 2179 return (EAGAIN); 2180 } 2181 bremfree(bp); 2182 bp->b_flags |= B_INVAL | B_RELBUF; 2183 bp->b_flags &= ~B_ASYNC; 2184 brelse(bp); 2185 anyfreed = true; 2186 2187 BO_LOCK(bo); 2188 if (nbp != NULL && 2189 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2190 (nbp->b_vp != vp) || 2191 (nbp->b_flags & B_DELWRI) == 0)) 2192 return (EAGAIN); 2193 } 2194 } while (anyfreed); 2195 return (0); 2196 } 2197 2198 static void 2199 buf_vlist_remove(struct buf *bp) 2200 { 2201 struct bufv *bv; 2202 2203 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2204 ASSERT_BO_WLOCKED(bp->b_bufobj); 2205 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != 2206 (BX_VNDIRTY|BX_VNCLEAN), 2207 ("buf_vlist_remove: Buf %p is on two lists", bp)); 2208 if (bp->b_xflags & BX_VNDIRTY) 2209 bv = &bp->b_bufobj->bo_dirty; 2210 else 2211 bv = &bp->b_bufobj->bo_clean; 2212 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2213 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2214 bv->bv_cnt--; 2215 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2216 } 2217 2218 /* 2219 * Add the buffer to the sorted clean or dirty block list. 2220 * 2221 * NOTE: xflags is passed as a constant, optimizing this inline function! 2222 */ 2223 static void 2224 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2225 { 2226 struct bufv *bv; 2227 struct buf *n; 2228 int error; 2229 2230 ASSERT_BO_WLOCKED(bo); 2231 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2232 ("dead bo %p", bo)); 2233 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2234 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2235 bp->b_xflags |= xflags; 2236 if (xflags & BX_VNDIRTY) 2237 bv = &bo->bo_dirty; 2238 else 2239 bv = &bo->bo_clean; 2240 2241 /* 2242 * Keep the list ordered. Optimize empty list insertion. Assume 2243 * we tend to grow at the tail so lookup_le should usually be cheaper 2244 * than _ge. 2245 */ 2246 if (bv->bv_cnt == 0 || 2247 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2248 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2249 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2250 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2251 else 2252 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2253 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2254 if (error) 2255 panic("buf_vlist_add: Preallocated nodes insufficient."); 2256 bv->bv_cnt++; 2257 } 2258 2259 /* 2260 * Look up a buffer using the buffer tries. 2261 */ 2262 struct buf * 2263 gbincore(struct bufobj *bo, daddr_t lblkno) 2264 { 2265 struct buf *bp; 2266 2267 ASSERT_BO_LOCKED(bo); 2268 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2269 if (bp != NULL) 2270 return (bp); 2271 return BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno); 2272 } 2273 2274 /* 2275 * Associate a buffer with a vnode. 2276 */ 2277 void 2278 bgetvp(struct vnode *vp, struct buf *bp) 2279 { 2280 struct bufobj *bo; 2281 2282 bo = &vp->v_bufobj; 2283 ASSERT_BO_WLOCKED(bo); 2284 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2285 2286 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2287 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2288 ("bgetvp: bp already attached! %p", bp)); 2289 2290 vhold(vp); 2291 bp->b_vp = vp; 2292 bp->b_bufobj = bo; 2293 /* 2294 * Insert onto list for new vnode. 2295 */ 2296 buf_vlist_add(bp, bo, BX_VNCLEAN); 2297 } 2298 2299 /* 2300 * Disassociate a buffer from a vnode. 2301 */ 2302 void 2303 brelvp(struct buf *bp) 2304 { 2305 struct bufobj *bo; 2306 struct vnode *vp; 2307 2308 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2309 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2310 2311 /* 2312 * Delete from old vnode list, if on one. 2313 */ 2314 vp = bp->b_vp; /* XXX */ 2315 bo = bp->b_bufobj; 2316 BO_LOCK(bo); 2317 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2318 buf_vlist_remove(bp); 2319 else 2320 panic("brelvp: Buffer %p not on queue.", bp); 2321 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2322 bo->bo_flag &= ~BO_ONWORKLST; 2323 mtx_lock(&sync_mtx); 2324 LIST_REMOVE(bo, bo_synclist); 2325 syncer_worklist_len--; 2326 mtx_unlock(&sync_mtx); 2327 } 2328 bp->b_vp = NULL; 2329 bp->b_bufobj = NULL; 2330 BO_UNLOCK(bo); 2331 vdrop(vp); 2332 } 2333 2334 /* 2335 * Add an item to the syncer work queue. 2336 */ 2337 static void 2338 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2339 { 2340 int slot; 2341 2342 ASSERT_BO_WLOCKED(bo); 2343 2344 mtx_lock(&sync_mtx); 2345 if (bo->bo_flag & BO_ONWORKLST) 2346 LIST_REMOVE(bo, bo_synclist); 2347 else { 2348 bo->bo_flag |= BO_ONWORKLST; 2349 syncer_worklist_len++; 2350 } 2351 2352 if (delay > syncer_maxdelay - 2) 2353 delay = syncer_maxdelay - 2; 2354 slot = (syncer_delayno + delay) & syncer_mask; 2355 2356 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2357 mtx_unlock(&sync_mtx); 2358 } 2359 2360 static int 2361 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2362 { 2363 int error, len; 2364 2365 mtx_lock(&sync_mtx); 2366 len = syncer_worklist_len - sync_vnode_count; 2367 mtx_unlock(&sync_mtx); 2368 error = SYSCTL_OUT(req, &len, sizeof(len)); 2369 return (error); 2370 } 2371 2372 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0, 2373 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2374 2375 static struct proc *updateproc; 2376 static void sched_sync(void); 2377 static struct kproc_desc up_kp = { 2378 "syncer", 2379 sched_sync, 2380 &updateproc 2381 }; 2382 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2383 2384 static int 2385 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2386 { 2387 struct vnode *vp; 2388 struct mount *mp; 2389 2390 *bo = LIST_FIRST(slp); 2391 if (*bo == NULL) 2392 return (0); 2393 vp = bo2vnode(*bo); 2394 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2395 return (1); 2396 /* 2397 * We use vhold in case the vnode does not 2398 * successfully sync. vhold prevents the vnode from 2399 * going away when we unlock the sync_mtx so that 2400 * we can acquire the vnode interlock. 2401 */ 2402 vholdl(vp); 2403 mtx_unlock(&sync_mtx); 2404 VI_UNLOCK(vp); 2405 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2406 vdrop(vp); 2407 mtx_lock(&sync_mtx); 2408 return (*bo == LIST_FIRST(slp)); 2409 } 2410 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2411 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2412 VOP_UNLOCK(vp, 0); 2413 vn_finished_write(mp); 2414 BO_LOCK(*bo); 2415 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2416 /* 2417 * Put us back on the worklist. The worklist 2418 * routine will remove us from our current 2419 * position and then add us back in at a later 2420 * position. 2421 */ 2422 vn_syncer_add_to_worklist(*bo, syncdelay); 2423 } 2424 BO_UNLOCK(*bo); 2425 vdrop(vp); 2426 mtx_lock(&sync_mtx); 2427 return (0); 2428 } 2429 2430 static int first_printf = 1; 2431 2432 /* 2433 * System filesystem synchronizer daemon. 2434 */ 2435 static void 2436 sched_sync(void) 2437 { 2438 struct synclist *next, *slp; 2439 struct bufobj *bo; 2440 long starttime; 2441 struct thread *td = curthread; 2442 int last_work_seen; 2443 int net_worklist_len; 2444 int syncer_final_iter; 2445 int error; 2446 2447 last_work_seen = 0; 2448 syncer_final_iter = 0; 2449 syncer_state = SYNCER_RUNNING; 2450 starttime = time_uptime; 2451 td->td_pflags |= TDP_NORUNNINGBUF; 2452 2453 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2454 SHUTDOWN_PRI_LAST); 2455 2456 mtx_lock(&sync_mtx); 2457 for (;;) { 2458 if (syncer_state == SYNCER_FINAL_DELAY && 2459 syncer_final_iter == 0) { 2460 mtx_unlock(&sync_mtx); 2461 kproc_suspend_check(td->td_proc); 2462 mtx_lock(&sync_mtx); 2463 } 2464 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2465 if (syncer_state != SYNCER_RUNNING && 2466 starttime != time_uptime) { 2467 if (first_printf) { 2468 printf("\nSyncing disks, vnodes remaining... "); 2469 first_printf = 0; 2470 } 2471 printf("%d ", net_worklist_len); 2472 } 2473 starttime = time_uptime; 2474 2475 /* 2476 * Push files whose dirty time has expired. Be careful 2477 * of interrupt race on slp queue. 2478 * 2479 * Skip over empty worklist slots when shutting down. 2480 */ 2481 do { 2482 slp = &syncer_workitem_pending[syncer_delayno]; 2483 syncer_delayno += 1; 2484 if (syncer_delayno == syncer_maxdelay) 2485 syncer_delayno = 0; 2486 next = &syncer_workitem_pending[syncer_delayno]; 2487 /* 2488 * If the worklist has wrapped since the 2489 * it was emptied of all but syncer vnodes, 2490 * switch to the FINAL_DELAY state and run 2491 * for one more second. 2492 */ 2493 if (syncer_state == SYNCER_SHUTTING_DOWN && 2494 net_worklist_len == 0 && 2495 last_work_seen == syncer_delayno) { 2496 syncer_state = SYNCER_FINAL_DELAY; 2497 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2498 } 2499 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2500 syncer_worklist_len > 0); 2501 2502 /* 2503 * Keep track of the last time there was anything 2504 * on the worklist other than syncer vnodes. 2505 * Return to the SHUTTING_DOWN state if any 2506 * new work appears. 2507 */ 2508 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2509 last_work_seen = syncer_delayno; 2510 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2511 syncer_state = SYNCER_SHUTTING_DOWN; 2512 while (!LIST_EMPTY(slp)) { 2513 error = sync_vnode(slp, &bo, td); 2514 if (error == 1) { 2515 LIST_REMOVE(bo, bo_synclist); 2516 LIST_INSERT_HEAD(next, bo, bo_synclist); 2517 continue; 2518 } 2519 2520 if (first_printf == 0) { 2521 /* 2522 * Drop the sync mutex, because some watchdog 2523 * drivers need to sleep while patting 2524 */ 2525 mtx_unlock(&sync_mtx); 2526 wdog_kern_pat(WD_LASTVAL); 2527 mtx_lock(&sync_mtx); 2528 } 2529 2530 } 2531 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2532 syncer_final_iter--; 2533 /* 2534 * The variable rushjob allows the kernel to speed up the 2535 * processing of the filesystem syncer process. A rushjob 2536 * value of N tells the filesystem syncer to process the next 2537 * N seconds worth of work on its queue ASAP. Currently rushjob 2538 * is used by the soft update code to speed up the filesystem 2539 * syncer process when the incore state is getting so far 2540 * ahead of the disk that the kernel memory pool is being 2541 * threatened with exhaustion. 2542 */ 2543 if (rushjob > 0) { 2544 rushjob -= 1; 2545 continue; 2546 } 2547 /* 2548 * Just sleep for a short period of time between 2549 * iterations when shutting down to allow some I/O 2550 * to happen. 2551 * 2552 * If it has taken us less than a second to process the 2553 * current work, then wait. Otherwise start right over 2554 * again. We can still lose time if any single round 2555 * takes more than two seconds, but it does not really 2556 * matter as we are just trying to generally pace the 2557 * filesystem activity. 2558 */ 2559 if (syncer_state != SYNCER_RUNNING || 2560 time_uptime == starttime) { 2561 thread_lock(td); 2562 sched_prio(td, PPAUSE); 2563 thread_unlock(td); 2564 } 2565 if (syncer_state != SYNCER_RUNNING) 2566 cv_timedwait(&sync_wakeup, &sync_mtx, 2567 hz / SYNCER_SHUTDOWN_SPEEDUP); 2568 else if (time_uptime == starttime) 2569 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2570 } 2571 } 2572 2573 /* 2574 * Request the syncer daemon to speed up its work. 2575 * We never push it to speed up more than half of its 2576 * normal turn time, otherwise it could take over the cpu. 2577 */ 2578 int 2579 speedup_syncer(void) 2580 { 2581 int ret = 0; 2582 2583 mtx_lock(&sync_mtx); 2584 if (rushjob < syncdelay / 2) { 2585 rushjob += 1; 2586 stat_rush_requests += 1; 2587 ret = 1; 2588 } 2589 mtx_unlock(&sync_mtx); 2590 cv_broadcast(&sync_wakeup); 2591 return (ret); 2592 } 2593 2594 /* 2595 * Tell the syncer to speed up its work and run though its work 2596 * list several times, then tell it to shut down. 2597 */ 2598 static void 2599 syncer_shutdown(void *arg, int howto) 2600 { 2601 2602 if (howto & RB_NOSYNC) 2603 return; 2604 mtx_lock(&sync_mtx); 2605 syncer_state = SYNCER_SHUTTING_DOWN; 2606 rushjob = 0; 2607 mtx_unlock(&sync_mtx); 2608 cv_broadcast(&sync_wakeup); 2609 kproc_shutdown(arg, howto); 2610 } 2611 2612 void 2613 syncer_suspend(void) 2614 { 2615 2616 syncer_shutdown(updateproc, 0); 2617 } 2618 2619 void 2620 syncer_resume(void) 2621 { 2622 2623 mtx_lock(&sync_mtx); 2624 first_printf = 1; 2625 syncer_state = SYNCER_RUNNING; 2626 mtx_unlock(&sync_mtx); 2627 cv_broadcast(&sync_wakeup); 2628 kproc_resume(updateproc); 2629 } 2630 2631 /* 2632 * Reassign a buffer from one vnode to another. 2633 * Used to assign file specific control information 2634 * (indirect blocks) to the vnode to which they belong. 2635 */ 2636 void 2637 reassignbuf(struct buf *bp) 2638 { 2639 struct vnode *vp; 2640 struct bufobj *bo; 2641 int delay; 2642 #ifdef INVARIANTS 2643 struct bufv *bv; 2644 #endif 2645 2646 vp = bp->b_vp; 2647 bo = bp->b_bufobj; 2648 ++reassignbufcalls; 2649 2650 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2651 bp, bp->b_vp, bp->b_flags); 2652 /* 2653 * B_PAGING flagged buffers cannot be reassigned because their vp 2654 * is not fully linked in. 2655 */ 2656 if (bp->b_flags & B_PAGING) 2657 panic("cannot reassign paging buffer"); 2658 2659 /* 2660 * Delete from old vnode list, if on one. 2661 */ 2662 BO_LOCK(bo); 2663 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2664 buf_vlist_remove(bp); 2665 else 2666 panic("reassignbuf: Buffer %p not on queue.", bp); 2667 /* 2668 * If dirty, put on list of dirty buffers; otherwise insert onto list 2669 * of clean buffers. 2670 */ 2671 if (bp->b_flags & B_DELWRI) { 2672 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2673 switch (vp->v_type) { 2674 case VDIR: 2675 delay = dirdelay; 2676 break; 2677 case VCHR: 2678 delay = metadelay; 2679 break; 2680 default: 2681 delay = filedelay; 2682 } 2683 vn_syncer_add_to_worklist(bo, delay); 2684 } 2685 buf_vlist_add(bp, bo, BX_VNDIRTY); 2686 } else { 2687 buf_vlist_add(bp, bo, BX_VNCLEAN); 2688 2689 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2690 mtx_lock(&sync_mtx); 2691 LIST_REMOVE(bo, bo_synclist); 2692 syncer_worklist_len--; 2693 mtx_unlock(&sync_mtx); 2694 bo->bo_flag &= ~BO_ONWORKLST; 2695 } 2696 } 2697 #ifdef INVARIANTS 2698 bv = &bo->bo_clean; 2699 bp = TAILQ_FIRST(&bv->bv_hd); 2700 KASSERT(bp == NULL || bp->b_bufobj == bo, 2701 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2702 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2703 KASSERT(bp == NULL || bp->b_bufobj == bo, 2704 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2705 bv = &bo->bo_dirty; 2706 bp = TAILQ_FIRST(&bv->bv_hd); 2707 KASSERT(bp == NULL || bp->b_bufobj == bo, 2708 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2709 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2710 KASSERT(bp == NULL || bp->b_bufobj == bo, 2711 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2712 #endif 2713 BO_UNLOCK(bo); 2714 } 2715 2716 static void 2717 v_init_counters(struct vnode *vp) 2718 { 2719 2720 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2721 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2722 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2723 2724 refcount_init(&vp->v_holdcnt, 1); 2725 refcount_init(&vp->v_usecount, 1); 2726 } 2727 2728 /* 2729 * Increment si_usecount of the associated device, if any. 2730 */ 2731 static void 2732 v_incr_devcount(struct vnode *vp) 2733 { 2734 2735 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2736 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2737 dev_lock(); 2738 vp->v_rdev->si_usecount++; 2739 dev_unlock(); 2740 } 2741 } 2742 2743 /* 2744 * Decrement si_usecount of the associated device, if any. 2745 */ 2746 static void 2747 v_decr_devcount(struct vnode *vp) 2748 { 2749 2750 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2751 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2752 dev_lock(); 2753 vp->v_rdev->si_usecount--; 2754 dev_unlock(); 2755 } 2756 } 2757 2758 /* 2759 * Grab a particular vnode from the free list, increment its 2760 * reference count and lock it. VIRF_DOOMED is set if the vnode 2761 * is being destroyed. Only callers who specify LK_RETRY will 2762 * see doomed vnodes. If inactive processing was delayed in 2763 * vput try to do it here. 2764 * 2765 * Both holdcnt and usecount can be manipulated using atomics without holding 2766 * any locks except in these cases which require the vnode interlock: 2767 * holdcnt: 1->0 and 0->1 2768 * usecount: 0->1 2769 * 2770 * usecount is permitted to transition 1->0 without the interlock because 2771 * vnode is kept live by holdcnt. 2772 */ 2773 static enum vgetstate __always_inline 2774 _vget_prep(struct vnode *vp, bool interlock) 2775 { 2776 enum vgetstate vs; 2777 2778 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2779 vs = VGET_USECOUNT; 2780 } else { 2781 if (interlock) 2782 vholdl(vp); 2783 else 2784 vhold(vp); 2785 vs = VGET_HOLDCNT; 2786 } 2787 return (vs); 2788 } 2789 2790 enum vgetstate 2791 vget_prep(struct vnode *vp) 2792 { 2793 2794 return (_vget_prep(vp, false)); 2795 } 2796 2797 int 2798 vget(struct vnode *vp, int flags, struct thread *td) 2799 { 2800 enum vgetstate vs; 2801 2802 MPASS(td == curthread); 2803 2804 vs = _vget_prep(vp, (flags & LK_INTERLOCK) != 0); 2805 return (vget_finish(vp, flags, vs)); 2806 } 2807 2808 int 2809 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 2810 { 2811 int error, oweinact; 2812 2813 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 2814 ("%s: invalid lock operation", __func__)); 2815 2816 if ((flags & LK_INTERLOCK) != 0) 2817 ASSERT_VI_LOCKED(vp, __func__); 2818 else 2819 ASSERT_VI_UNLOCKED(vp, __func__); 2820 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 2821 if (vs == VGET_USECOUNT) { 2822 VNASSERT(vp->v_usecount > 0, vp, 2823 ("%s: vnode without usecount when VGET_USECOUNT was passed", 2824 __func__)); 2825 } 2826 2827 if ((error = vn_lock(vp, flags)) != 0) { 2828 if (vs == VGET_USECOUNT) 2829 vrele(vp); 2830 else 2831 vdrop(vp); 2832 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2833 vp); 2834 return (error); 2835 } 2836 2837 if (vs == VGET_USECOUNT) { 2838 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2839 ("%s: vnode with usecount and VI_OWEINACT set", __func__)); 2840 return (0); 2841 } 2842 2843 /* 2844 * We hold the vnode. If the usecount is 0 it will be utilized to keep 2845 * the vnode around. Otherwise someone else lended their hold count and 2846 * we have to drop ours. 2847 */ 2848 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2849 #ifdef INVARIANTS 2850 int old = atomic_fetchadd_int(&vp->v_holdcnt, -1) - 1; 2851 VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__)); 2852 #else 2853 refcount_release(&vp->v_holdcnt); 2854 #endif 2855 VNODE_REFCOUNT_FENCE_ACQ(); 2856 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2857 ("%s: vnode with usecount and VI_OWEINACT set", __func__)); 2858 return (0); 2859 } 2860 2861 /* 2862 * We don't guarantee that any particular close will 2863 * trigger inactive processing so just make a best effort 2864 * here at preventing a reference to a removed file. If 2865 * we don't succeed no harm is done. 2866 * 2867 * Upgrade our holdcnt to a usecount. 2868 */ 2869 VI_LOCK(vp); 2870 /* 2871 * See the previous section. By the time we get here we may find 2872 * ourselves in the same spot. 2873 */ 2874 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2875 #ifdef INVARIANTS 2876 int old = atomic_fetchadd_int(&vp->v_holdcnt, -1) - 1; 2877 VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__)); 2878 #else 2879 refcount_release(&vp->v_holdcnt); 2880 #endif 2881 VNODE_REFCOUNT_FENCE_ACQ(); 2882 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2883 ("%s: vnode with usecount and VI_OWEINACT set", 2884 __func__)); 2885 VI_UNLOCK(vp); 2886 return (0); 2887 } 2888 if ((vp->v_iflag & VI_OWEINACT) == 0) { 2889 oweinact = 0; 2890 } else { 2891 oweinact = 1; 2892 vp->v_iflag &= ~VI_OWEINACT; 2893 VNODE_REFCOUNT_FENCE_REL(); 2894 } 2895 v_incr_devcount(vp); 2896 refcount_acquire(&vp->v_usecount); 2897 if (oweinact && VOP_ISLOCKED(vp) == LK_EXCLUSIVE && 2898 (flags & LK_NOWAIT) == 0) 2899 vinactive(vp, curthread); 2900 VI_UNLOCK(vp); 2901 return (0); 2902 } 2903 2904 /* 2905 * Increase the reference (use) and hold count of a vnode. 2906 * This will also remove the vnode from the free list if it is presently free. 2907 */ 2908 void 2909 vref(struct vnode *vp) 2910 { 2911 2912 ASSERT_VI_UNLOCKED(vp, __func__); 2913 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2914 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2915 VNODE_REFCOUNT_FENCE_ACQ(); 2916 VNASSERT(vp->v_holdcnt > 0, vp, 2917 ("%s: active vnode not held", __func__)); 2918 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2919 ("%s: vnode with usecount and VI_OWEINACT set", __func__)); 2920 return; 2921 } 2922 VI_LOCK(vp); 2923 vrefl(vp); 2924 VI_UNLOCK(vp); 2925 } 2926 2927 void 2928 vrefl(struct vnode *vp) 2929 { 2930 2931 ASSERT_VI_LOCKED(vp, __func__); 2932 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2933 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2934 VNODE_REFCOUNT_FENCE_ACQ(); 2935 VNASSERT(vp->v_holdcnt > 0, vp, 2936 ("%s: active vnode not held", __func__)); 2937 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2938 ("%s: vnode with usecount and VI_OWEINACT set", __func__)); 2939 return; 2940 } 2941 vholdl(vp); 2942 if ((vp->v_iflag & VI_OWEINACT) != 0) { 2943 vp->v_iflag &= ~VI_OWEINACT; 2944 VNODE_REFCOUNT_FENCE_REL(); 2945 } 2946 v_incr_devcount(vp); 2947 refcount_acquire(&vp->v_usecount); 2948 } 2949 2950 void 2951 vrefact(struct vnode *vp) 2952 { 2953 2954 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2955 #ifdef INVARIANTS 2956 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 2957 VNASSERT(old > 0, vp, ("%s: wrong use count", __func__)); 2958 #else 2959 refcount_acquire(&vp->v_usecount); 2960 #endif 2961 } 2962 2963 /* 2964 * Return reference count of a vnode. 2965 * 2966 * The results of this call are only guaranteed when some mechanism is used to 2967 * stop other processes from gaining references to the vnode. This may be the 2968 * case if the caller holds the only reference. This is also useful when stale 2969 * data is acceptable as race conditions may be accounted for by some other 2970 * means. 2971 */ 2972 int 2973 vrefcnt(struct vnode *vp) 2974 { 2975 2976 return (vp->v_usecount); 2977 } 2978 2979 enum vputx_op { VPUTX_VRELE, VPUTX_VPUT, VPUTX_VUNREF }; 2980 2981 /* 2982 * Decrement the use and hold counts for a vnode. 2983 * 2984 * See an explanation near vget() as to why atomic operation is safe. 2985 */ 2986 static void 2987 vputx(struct vnode *vp, enum vputx_op func) 2988 { 2989 int error; 2990 2991 KASSERT(vp != NULL, ("vputx: null vp")); 2992 if (func == VPUTX_VUNREF) 2993 ASSERT_VOP_LOCKED(vp, "vunref"); 2994 ASSERT_VI_UNLOCKED(vp, __func__); 2995 VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp, 2996 ("%s: wrong ref counts", __func__)); 2997 2998 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2999 3000 /* 3001 * We want to hold the vnode until the inactive finishes to 3002 * prevent vgone() races. We drop the use count here and the 3003 * hold count below when we're done. 3004 * 3005 * If we release the last usecount we take ownership of the hold 3006 * count which provides liveness of the vnode, in which case we 3007 * have to vdrop. 3008 */ 3009 if (!refcount_release(&vp->v_usecount)) 3010 return; 3011 VI_LOCK(vp); 3012 v_decr_devcount(vp); 3013 /* 3014 * By the time we got here someone else might have transitioned 3015 * the count back to > 0. 3016 */ 3017 if (vp->v_usecount > 0) { 3018 vdropl(vp); 3019 return; 3020 } 3021 if (vp->v_iflag & VI_DOINGINACT) { 3022 vdropl(vp); 3023 return; 3024 } 3025 3026 /* 3027 * Check if the fs wants to perform inactive processing. Note we 3028 * may be only holding the interlock, in which case it is possible 3029 * someone else called vgone on the vnode and ->v_data is now NULL. 3030 * Since vgone performs inactive on its own there is nothing to do 3031 * here but to drop our hold count. 3032 */ 3033 if (__predict_false(VN_IS_DOOMED(vp)) || 3034 VOP_NEED_INACTIVE(vp) == 0) { 3035 vdropl(vp); 3036 return; 3037 } 3038 3039 /* 3040 * We must call VOP_INACTIVE with the node locked. Mark 3041 * as VI_DOINGINACT to avoid recursion. 3042 */ 3043 vp->v_iflag |= VI_OWEINACT; 3044 switch (func) { 3045 case VPUTX_VRELE: 3046 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3047 VI_LOCK(vp); 3048 break; 3049 case VPUTX_VPUT: 3050 error = VOP_LOCK(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT); 3051 VI_LOCK(vp); 3052 break; 3053 case VPUTX_VUNREF: 3054 error = 0; 3055 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3056 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3057 VI_LOCK(vp); 3058 } 3059 break; 3060 } 3061 VNASSERT(vp->v_usecount == 0 || (vp->v_iflag & VI_OWEINACT) == 0, vp, 3062 ("vnode with usecount and VI_OWEINACT set")); 3063 if (error == 0) { 3064 if (vp->v_iflag & VI_OWEINACT) 3065 vinactive(vp, curthread); 3066 if (func != VPUTX_VUNREF) 3067 VOP_UNLOCK(vp, 0); 3068 } 3069 vdropl(vp); 3070 } 3071 3072 /* 3073 * Vnode put/release. 3074 * If count drops to zero, call inactive routine and return to freelist. 3075 */ 3076 void 3077 vrele(struct vnode *vp) 3078 { 3079 3080 vputx(vp, VPUTX_VRELE); 3081 } 3082 3083 /* 3084 * Release an already locked vnode. This give the same effects as 3085 * unlock+vrele(), but takes less time and avoids releasing and 3086 * re-aquiring the lock (as vrele() acquires the lock internally.) 3087 * 3088 * It is an invariant that all VOP_* calls operate on a held vnode. 3089 * We may be only having an implicit hold stemming from our usecount, 3090 * which we are about to release. If we unlock the vnode afterwards we 3091 * open a time window where someone else dropped the last usecount and 3092 * proceeded to free the vnode before our unlock finished. For this 3093 * reason we unlock the vnode early. This is a little bit wasteful as 3094 * it may be the vnode is exclusively locked and inactive processing is 3095 * needed, in which case we are adding work. 3096 */ 3097 void 3098 vput(struct vnode *vp) 3099 { 3100 3101 VOP_UNLOCK(vp, 0); 3102 vputx(vp, VPUTX_VPUT); 3103 } 3104 3105 /* 3106 * Release an exclusively locked vnode. Do not unlock the vnode lock. 3107 */ 3108 void 3109 vunref(struct vnode *vp) 3110 { 3111 3112 vputx(vp, VPUTX_VUNREF); 3113 } 3114 3115 /* 3116 * Increase the hold count and activate if this is the first reference. 3117 */ 3118 static void 3119 vhold_activate(struct vnode *vp) 3120 { 3121 struct mount *mp; 3122 3123 ASSERT_VI_LOCKED(vp, __func__); 3124 VNASSERT(vp->v_holdcnt == 0, vp, 3125 ("%s: wrong hold count", __func__)); 3126 VNASSERT(vp->v_op != NULL, vp, 3127 ("%s: vnode already reclaimed.", __func__)); 3128 /* 3129 * Remove a vnode from the free list, mark it as in use, 3130 * and put it on the active list. 3131 */ 3132 VNASSERT(vp->v_mount != NULL, vp, 3133 ("_vhold: vnode not on per mount vnode list")); 3134 mp = vp->v_mount; 3135 mtx_lock(&mp->mnt_listmtx); 3136 if ((vp->v_mflag & VMP_TMPMNTFREELIST) != 0) { 3137 TAILQ_REMOVE(&mp->mnt_tmpfreevnodelist, vp, v_actfreelist); 3138 mp->mnt_tmpfreevnodelistsize--; 3139 vp->v_mflag &= ~VMP_TMPMNTFREELIST; 3140 } else { 3141 mtx_lock(&vnode_free_list_mtx); 3142 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 3143 freevnodes--; 3144 mtx_unlock(&vnode_free_list_mtx); 3145 } 3146 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 3147 ("Activating already active vnode")); 3148 vp->v_iflag &= ~VI_FREE; 3149 vp->v_iflag |= VI_ACTIVE; 3150 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 3151 mp->mnt_activevnodelistsize++; 3152 mtx_unlock(&mp->mnt_listmtx); 3153 refcount_acquire(&vp->v_holdcnt); 3154 } 3155 3156 void 3157 vhold(struct vnode *vp) 3158 { 3159 3160 ASSERT_VI_UNLOCKED(vp, __func__); 3161 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3162 if (refcount_acquire_if_not_zero(&vp->v_holdcnt)) { 3163 VNODE_REFCOUNT_FENCE_ACQ(); 3164 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 3165 ("vhold: vnode with holdcnt is free")); 3166 return; 3167 } 3168 VI_LOCK(vp); 3169 vholdl(vp); 3170 VI_UNLOCK(vp); 3171 } 3172 3173 void 3174 vholdl(struct vnode *vp) 3175 { 3176 3177 ASSERT_VI_LOCKED(vp, __func__); 3178 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3179 if ((vp->v_iflag & VI_FREE) == 0) { 3180 refcount_acquire(&vp->v_holdcnt); 3181 return; 3182 } 3183 vhold_activate(vp); 3184 } 3185 3186 void 3187 vholdnz(struct vnode *vp) 3188 { 3189 3190 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3191 #ifdef INVARIANTS 3192 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3193 VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__)); 3194 #else 3195 atomic_add_int(&vp->v_holdcnt, 1); 3196 #endif 3197 } 3198 3199 /* 3200 * Drop the hold count of the vnode. If this is the last reference to 3201 * the vnode we place it on the free list unless it has been vgone'd 3202 * (marked VIRF_DOOMED) in which case we will free it. 3203 * 3204 * Because the vnode vm object keeps a hold reference on the vnode if 3205 * there is at least one resident non-cached page, the vnode cannot 3206 * leave the active list without the page cleanup done. 3207 */ 3208 static void 3209 vdrop_deactivate(struct vnode *vp) 3210 { 3211 struct mount *mp; 3212 3213 ASSERT_VI_LOCKED(vp, __func__); 3214 /* 3215 * Mark a vnode as free: remove it from its active list 3216 * and put it up for recycling on the freelist. 3217 */ 3218 VNASSERT(!VN_IS_DOOMED(vp), vp, 3219 ("vdrop: returning doomed vnode")); 3220 VNASSERT(vp->v_op != NULL, vp, 3221 ("vdrop: vnode already reclaimed.")); 3222 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 3223 ("vnode already free")); 3224 VNASSERT(vp->v_holdcnt == 0, vp, 3225 ("vdrop: freeing when we shouldn't")); 3226 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3227 mp = vp->v_mount; 3228 mtx_lock(&mp->mnt_listmtx); 3229 if (vp->v_iflag & VI_ACTIVE) { 3230 vp->v_iflag &= ~VI_ACTIVE; 3231 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); 3232 mp->mnt_activevnodelistsize--; 3233 } 3234 TAILQ_INSERT_TAIL(&mp->mnt_tmpfreevnodelist, vp, v_actfreelist); 3235 mp->mnt_tmpfreevnodelistsize++; 3236 vp->v_iflag |= VI_FREE; 3237 vp->v_mflag |= VMP_TMPMNTFREELIST; 3238 VI_UNLOCK(vp); 3239 if (mp->mnt_tmpfreevnodelistsize >= mnt_free_list_batch) 3240 vnlru_return_batch_locked(mp); 3241 mtx_unlock(&mp->mnt_listmtx); 3242 } else { 3243 VI_UNLOCK(vp); 3244 counter_u64_add(free_owe_inact, 1); 3245 } 3246 } 3247 3248 void 3249 vdrop(struct vnode *vp) 3250 { 3251 3252 ASSERT_VI_UNLOCKED(vp, __func__); 3253 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3254 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3255 return; 3256 VI_LOCK(vp); 3257 vdropl(vp); 3258 } 3259 3260 void 3261 vdropl(struct vnode *vp) 3262 { 3263 3264 ASSERT_VI_LOCKED(vp, __func__); 3265 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3266 if (!refcount_release(&vp->v_holdcnt)) { 3267 VI_UNLOCK(vp); 3268 return; 3269 } 3270 if (VN_IS_DOOMED(vp)) { 3271 freevnode(vp); 3272 return; 3273 } 3274 vdrop_deactivate(vp); 3275 } 3276 3277 /* 3278 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3279 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3280 * OWEINACT tracks whether a vnode missed a call to inactive due to a 3281 * failed lock upgrade. 3282 */ 3283 void 3284 vinactive(struct vnode *vp, struct thread *td) 3285 { 3286 struct vm_object *obj; 3287 3288 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3289 ASSERT_VI_LOCKED(vp, "vinactive"); 3290 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3291 ("vinactive: recursed on VI_DOINGINACT")); 3292 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3293 vp->v_iflag |= VI_DOINGINACT; 3294 vp->v_iflag &= ~VI_OWEINACT; 3295 VI_UNLOCK(vp); 3296 /* 3297 * Before moving off the active list, we must be sure that any 3298 * modified pages are converted into the vnode's dirty 3299 * buffers, since these will no longer be checked once the 3300 * vnode is on the inactive list. 3301 * 3302 * The write-out of the dirty pages is asynchronous. At the 3303 * point that VOP_INACTIVE() is called, there could still be 3304 * pending I/O and dirty pages in the object. 3305 */ 3306 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3307 vm_object_mightbedirty(obj)) { 3308 VM_OBJECT_WLOCK(obj); 3309 vm_object_page_clean(obj, 0, 0, 0); 3310 VM_OBJECT_WUNLOCK(obj); 3311 } 3312 VOP_INACTIVE(vp, td); 3313 VI_LOCK(vp); 3314 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3315 ("vinactive: lost VI_DOINGINACT")); 3316 vp->v_iflag &= ~VI_DOINGINACT; 3317 } 3318 3319 /* 3320 * Remove any vnodes in the vnode table belonging to mount point mp. 3321 * 3322 * If FORCECLOSE is not specified, there should not be any active ones, 3323 * return error if any are found (nb: this is a user error, not a 3324 * system error). If FORCECLOSE is specified, detach any active vnodes 3325 * that are found. 3326 * 3327 * If WRITECLOSE is set, only flush out regular file vnodes open for 3328 * writing. 3329 * 3330 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3331 * 3332 * `rootrefs' specifies the base reference count for the root vnode 3333 * of this filesystem. The root vnode is considered busy if its 3334 * v_usecount exceeds this value. On a successful return, vflush(, td) 3335 * will call vrele() on the root vnode exactly rootrefs times. 3336 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3337 * be zero. 3338 */ 3339 #ifdef DIAGNOSTIC 3340 static int busyprt = 0; /* print out busy vnodes */ 3341 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3342 #endif 3343 3344 int 3345 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3346 { 3347 struct vnode *vp, *mvp, *rootvp = NULL; 3348 struct vattr vattr; 3349 int busy = 0, error; 3350 3351 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3352 rootrefs, flags); 3353 if (rootrefs > 0) { 3354 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3355 ("vflush: bad args")); 3356 /* 3357 * Get the filesystem root vnode. We can vput() it 3358 * immediately, since with rootrefs > 0, it won't go away. 3359 */ 3360 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3361 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3362 __func__, error); 3363 return (error); 3364 } 3365 vput(rootvp); 3366 } 3367 loop: 3368 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3369 vholdl(vp); 3370 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3371 if (error) { 3372 vdrop(vp); 3373 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3374 goto loop; 3375 } 3376 /* 3377 * Skip over a vnodes marked VV_SYSTEM. 3378 */ 3379 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3380 VOP_UNLOCK(vp, 0); 3381 vdrop(vp); 3382 continue; 3383 } 3384 /* 3385 * If WRITECLOSE is set, flush out unlinked but still open 3386 * files (even if open only for reading) and regular file 3387 * vnodes open for writing. 3388 */ 3389 if (flags & WRITECLOSE) { 3390 if (vp->v_object != NULL) { 3391 VM_OBJECT_WLOCK(vp->v_object); 3392 vm_object_page_clean(vp->v_object, 0, 0, 0); 3393 VM_OBJECT_WUNLOCK(vp->v_object); 3394 } 3395 error = VOP_FSYNC(vp, MNT_WAIT, td); 3396 if (error != 0) { 3397 VOP_UNLOCK(vp, 0); 3398 vdrop(vp); 3399 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3400 return (error); 3401 } 3402 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3403 VI_LOCK(vp); 3404 3405 if ((vp->v_type == VNON || 3406 (error == 0 && vattr.va_nlink > 0)) && 3407 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3408 VOP_UNLOCK(vp, 0); 3409 vdropl(vp); 3410 continue; 3411 } 3412 } else 3413 VI_LOCK(vp); 3414 /* 3415 * With v_usecount == 0, all we need to do is clear out the 3416 * vnode data structures and we are done. 3417 * 3418 * If FORCECLOSE is set, forcibly close the vnode. 3419 */ 3420 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3421 vgonel(vp); 3422 } else { 3423 busy++; 3424 #ifdef DIAGNOSTIC 3425 if (busyprt) 3426 vn_printf(vp, "vflush: busy vnode "); 3427 #endif 3428 } 3429 VOP_UNLOCK(vp, 0); 3430 vdropl(vp); 3431 } 3432 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3433 /* 3434 * If just the root vnode is busy, and if its refcount 3435 * is equal to `rootrefs', then go ahead and kill it. 3436 */ 3437 VI_LOCK(rootvp); 3438 KASSERT(busy > 0, ("vflush: not busy")); 3439 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3440 ("vflush: usecount %d < rootrefs %d", 3441 rootvp->v_usecount, rootrefs)); 3442 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3443 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3444 vgone(rootvp); 3445 VOP_UNLOCK(rootvp, 0); 3446 busy = 0; 3447 } else 3448 VI_UNLOCK(rootvp); 3449 } 3450 if (busy) { 3451 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3452 busy); 3453 return (EBUSY); 3454 } 3455 for (; rootrefs > 0; rootrefs--) 3456 vrele(rootvp); 3457 return (0); 3458 } 3459 3460 /* 3461 * Recycle an unused vnode to the front of the free list. 3462 */ 3463 int 3464 vrecycle(struct vnode *vp) 3465 { 3466 int recycled; 3467 3468 VI_LOCK(vp); 3469 recycled = vrecyclel(vp); 3470 VI_UNLOCK(vp); 3471 return (recycled); 3472 } 3473 3474 /* 3475 * vrecycle, with the vp interlock held. 3476 */ 3477 int 3478 vrecyclel(struct vnode *vp) 3479 { 3480 int recycled; 3481 3482 ASSERT_VOP_ELOCKED(vp, __func__); 3483 ASSERT_VI_LOCKED(vp, __func__); 3484 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3485 recycled = 0; 3486 if (vp->v_usecount == 0) { 3487 recycled = 1; 3488 vgonel(vp); 3489 } 3490 return (recycled); 3491 } 3492 3493 /* 3494 * Eliminate all activity associated with a vnode 3495 * in preparation for reuse. 3496 */ 3497 void 3498 vgone(struct vnode *vp) 3499 { 3500 VI_LOCK(vp); 3501 vgonel(vp); 3502 VI_UNLOCK(vp); 3503 } 3504 3505 static void 3506 notify_lowervp_vfs_dummy(struct mount *mp __unused, 3507 struct vnode *lowervp __unused) 3508 { 3509 } 3510 3511 /* 3512 * Notify upper mounts about reclaimed or unlinked vnode. 3513 */ 3514 void 3515 vfs_notify_upper(struct vnode *vp, int event) 3516 { 3517 static struct vfsops vgonel_vfsops = { 3518 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 3519 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 3520 }; 3521 struct mount *mp, *ump, *mmp; 3522 3523 mp = vp->v_mount; 3524 if (mp == NULL) 3525 return; 3526 if (TAILQ_EMPTY(&mp->mnt_uppers)) 3527 return; 3528 3529 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 3530 mmp->mnt_op = &vgonel_vfsops; 3531 mmp->mnt_kern_flag |= MNTK_MARKER; 3532 MNT_ILOCK(mp); 3533 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 3534 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 3535 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 3536 ump = TAILQ_NEXT(ump, mnt_upper_link); 3537 continue; 3538 } 3539 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 3540 MNT_IUNLOCK(mp); 3541 switch (event) { 3542 case VFS_NOTIFY_UPPER_RECLAIM: 3543 VFS_RECLAIM_LOWERVP(ump, vp); 3544 break; 3545 case VFS_NOTIFY_UPPER_UNLINK: 3546 VFS_UNLINK_LOWERVP(ump, vp); 3547 break; 3548 default: 3549 KASSERT(0, ("invalid event %d", event)); 3550 break; 3551 } 3552 MNT_ILOCK(mp); 3553 ump = TAILQ_NEXT(mmp, mnt_upper_link); 3554 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 3555 } 3556 free(mmp, M_TEMP); 3557 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 3558 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 3559 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 3560 wakeup(&mp->mnt_uppers); 3561 } 3562 MNT_IUNLOCK(mp); 3563 } 3564 3565 /* 3566 * vgone, with the vp interlock held. 3567 */ 3568 static void 3569 vgonel(struct vnode *vp) 3570 { 3571 struct thread *td; 3572 struct mount *mp; 3573 vm_object_t object; 3574 bool active, oweinact; 3575 3576 ASSERT_VOP_ELOCKED(vp, "vgonel"); 3577 ASSERT_VI_LOCKED(vp, "vgonel"); 3578 VNASSERT(vp->v_holdcnt, vp, 3579 ("vgonel: vp %p has no reference.", vp)); 3580 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3581 td = curthread; 3582 3583 /* 3584 * Don't vgonel if we're already doomed. 3585 */ 3586 if (vp->v_irflag & VIRF_DOOMED) 3587 return; 3588 vp->v_irflag |= VIRF_DOOMED; 3589 3590 /* 3591 * Check to see if the vnode is in use. If so, we have to call 3592 * VOP_CLOSE() and VOP_INACTIVE(). 3593 */ 3594 active = vp->v_usecount > 0; 3595 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 3596 VI_UNLOCK(vp); 3597 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 3598 3599 /* 3600 * If purging an active vnode, it must be closed and 3601 * deactivated before being reclaimed. 3602 */ 3603 if (active) 3604 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 3605 if (oweinact || active) { 3606 VI_LOCK(vp); 3607 if ((vp->v_iflag & VI_DOINGINACT) == 0) 3608 vinactive(vp, td); 3609 VI_UNLOCK(vp); 3610 } 3611 if (vp->v_type == VSOCK) 3612 vfs_unp_reclaim(vp); 3613 3614 /* 3615 * Clean out any buffers associated with the vnode. 3616 * If the flush fails, just toss the buffers. 3617 */ 3618 mp = NULL; 3619 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 3620 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 3621 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 3622 while (vinvalbuf(vp, 0, 0, 0) != 0) 3623 ; 3624 } 3625 3626 BO_LOCK(&vp->v_bufobj); 3627 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 3628 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 3629 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 3630 vp->v_bufobj.bo_clean.bv_cnt == 0, 3631 ("vp %p bufobj not invalidated", vp)); 3632 3633 /* 3634 * For VMIO bufobj, BO_DEAD is set later, or in 3635 * vm_object_terminate() after the object's page queue is 3636 * flushed. 3637 */ 3638 object = vp->v_bufobj.bo_object; 3639 if (object == NULL) 3640 vp->v_bufobj.bo_flag |= BO_DEAD; 3641 BO_UNLOCK(&vp->v_bufobj); 3642 3643 /* 3644 * Handle the VM part. Tmpfs handles v_object on its own (the 3645 * OBJT_VNODE check). Nullfs or other bypassing filesystems 3646 * should not touch the object borrowed from the lower vnode 3647 * (the handle check). 3648 */ 3649 if (object != NULL && object->type == OBJT_VNODE && 3650 object->handle == vp) 3651 vnode_destroy_vobject(vp); 3652 3653 /* 3654 * Reclaim the vnode. 3655 */ 3656 if (VOP_RECLAIM(vp, td)) 3657 panic("vgone: cannot reclaim"); 3658 if (mp != NULL) 3659 vn_finished_secondary_write(mp); 3660 VNASSERT(vp->v_object == NULL, vp, 3661 ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag)); 3662 /* 3663 * Clear the advisory locks and wake up waiting threads. 3664 */ 3665 (void)VOP_ADVLOCKPURGE(vp); 3666 vp->v_lockf = NULL; 3667 /* 3668 * Delete from old mount point vnode list. 3669 */ 3670 delmntque(vp); 3671 cache_purge(vp); 3672 /* 3673 * Done with purge, reset to the standard lock and invalidate 3674 * the vnode. 3675 */ 3676 VI_LOCK(vp); 3677 vp->v_vnlock = &vp->v_lock; 3678 vp->v_op = &dead_vnodeops; 3679 vp->v_tag = "none"; 3680 vp->v_type = VBAD; 3681 } 3682 3683 /* 3684 * Calculate the total number of references to a special device. 3685 */ 3686 int 3687 vcount(struct vnode *vp) 3688 { 3689 int count; 3690 3691 dev_lock(); 3692 count = vp->v_rdev->si_usecount; 3693 dev_unlock(); 3694 return (count); 3695 } 3696 3697 /* 3698 * Print out a description of a vnode. 3699 */ 3700 static char *typename[] = 3701 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 3702 "VMARKER"}; 3703 3704 void 3705 vn_printf(struct vnode *vp, const char *fmt, ...) 3706 { 3707 va_list ap; 3708 char buf[256], buf2[16]; 3709 u_long flags; 3710 3711 va_start(ap, fmt); 3712 vprintf(fmt, ap); 3713 va_end(ap); 3714 printf("%p: ", (void *)vp); 3715 printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]); 3716 printf(" usecount %d, writecount %d, refcount %d", 3717 vp->v_usecount, vp->v_writecount, vp->v_holdcnt); 3718 switch (vp->v_type) { 3719 case VDIR: 3720 printf(" mountedhere %p\n", vp->v_mountedhere); 3721 break; 3722 case VCHR: 3723 printf(" rdev %p\n", vp->v_rdev); 3724 break; 3725 case VSOCK: 3726 printf(" socket %p\n", vp->v_unpcb); 3727 break; 3728 case VFIFO: 3729 printf(" fifoinfo %p\n", vp->v_fifoinfo); 3730 break; 3731 default: 3732 printf("\n"); 3733 break; 3734 } 3735 buf[0] = '\0'; 3736 buf[1] = '\0'; 3737 if (vp->v_irflag & VIRF_DOOMED) 3738 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 3739 flags = vp->v_irflag & ~(VIRF_DOOMED); 3740 if (flags != 0) { 3741 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 3742 strlcat(buf, buf2, sizeof(buf)); 3743 } 3744 if (vp->v_vflag & VV_ROOT) 3745 strlcat(buf, "|VV_ROOT", sizeof(buf)); 3746 if (vp->v_vflag & VV_ISTTY) 3747 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 3748 if (vp->v_vflag & VV_NOSYNC) 3749 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 3750 if (vp->v_vflag & VV_ETERNALDEV) 3751 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 3752 if (vp->v_vflag & VV_CACHEDLABEL) 3753 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 3754 if (vp->v_vflag & VV_VMSIZEVNLOCK) 3755 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 3756 if (vp->v_vflag & VV_COPYONWRITE) 3757 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 3758 if (vp->v_vflag & VV_SYSTEM) 3759 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 3760 if (vp->v_vflag & VV_PROCDEP) 3761 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 3762 if (vp->v_vflag & VV_NOKNOTE) 3763 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 3764 if (vp->v_vflag & VV_DELETED) 3765 strlcat(buf, "|VV_DELETED", sizeof(buf)); 3766 if (vp->v_vflag & VV_MD) 3767 strlcat(buf, "|VV_MD", sizeof(buf)); 3768 if (vp->v_vflag & VV_FORCEINSMQ) 3769 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 3770 if (vp->v_vflag & VV_READLINK) 3771 strlcat(buf, "|VV_READLINK", sizeof(buf)); 3772 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 3773 VV_CACHEDLABEL | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 3774 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); 3775 if (flags != 0) { 3776 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 3777 strlcat(buf, buf2, sizeof(buf)); 3778 } 3779 if (vp->v_iflag & VI_TEXT_REF) 3780 strlcat(buf, "|VI_TEXT_REF", sizeof(buf)); 3781 if (vp->v_iflag & VI_MOUNT) 3782 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 3783 if (vp->v_iflag & VI_FREE) 3784 strlcat(buf, "|VI_FREE", sizeof(buf)); 3785 if (vp->v_iflag & VI_ACTIVE) 3786 strlcat(buf, "|VI_ACTIVE", sizeof(buf)); 3787 if (vp->v_iflag & VI_DOINGINACT) 3788 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 3789 if (vp->v_iflag & VI_OWEINACT) 3790 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 3791 flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_FREE | VI_ACTIVE | 3792 VI_DOINGINACT | VI_OWEINACT); 3793 if (flags != 0) { 3794 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 3795 strlcat(buf, buf2, sizeof(buf)); 3796 } 3797 if (vp->v_mflag & VMP_TMPMNTFREELIST) 3798 strlcat(buf, "|VMP_TMPMNTFREELIST", sizeof(buf)); 3799 flags = vp->v_mflag & ~(VMP_TMPMNTFREELIST); 3800 if (flags != 0) { 3801 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 3802 strlcat(buf, buf2, sizeof(buf)); 3803 } 3804 printf(" flags (%s)\n", buf + 1); 3805 if (mtx_owned(VI_MTX(vp))) 3806 printf(" VI_LOCKed"); 3807 if (vp->v_object != NULL) 3808 printf(" v_object %p ref %d pages %d " 3809 "cleanbuf %d dirtybuf %d\n", 3810 vp->v_object, vp->v_object->ref_count, 3811 vp->v_object->resident_page_count, 3812 vp->v_bufobj.bo_clean.bv_cnt, 3813 vp->v_bufobj.bo_dirty.bv_cnt); 3814 printf(" "); 3815 lockmgr_printinfo(vp->v_vnlock); 3816 if (vp->v_data != NULL) 3817 VOP_PRINT(vp); 3818 } 3819 3820 #ifdef DDB 3821 /* 3822 * List all of the locked vnodes in the system. 3823 * Called when debugging the kernel. 3824 */ 3825 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 3826 { 3827 struct mount *mp; 3828 struct vnode *vp; 3829 3830 /* 3831 * Note: because this is DDB, we can't obey the locking semantics 3832 * for these structures, which means we could catch an inconsistent 3833 * state and dereference a nasty pointer. Not much to be done 3834 * about that. 3835 */ 3836 db_printf("Locked vnodes\n"); 3837 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3838 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3839 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 3840 vn_printf(vp, "vnode "); 3841 } 3842 } 3843 } 3844 3845 /* 3846 * Show details about the given vnode. 3847 */ 3848 DB_SHOW_COMMAND(vnode, db_show_vnode) 3849 { 3850 struct vnode *vp; 3851 3852 if (!have_addr) 3853 return; 3854 vp = (struct vnode *)addr; 3855 vn_printf(vp, "vnode "); 3856 } 3857 3858 /* 3859 * Show details about the given mount point. 3860 */ 3861 DB_SHOW_COMMAND(mount, db_show_mount) 3862 { 3863 struct mount *mp; 3864 struct vfsopt *opt; 3865 struct statfs *sp; 3866 struct vnode *vp; 3867 char buf[512]; 3868 uint64_t mflags; 3869 u_int flags; 3870 3871 if (!have_addr) { 3872 /* No address given, print short info about all mount points. */ 3873 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3874 db_printf("%p %s on %s (%s)\n", mp, 3875 mp->mnt_stat.f_mntfromname, 3876 mp->mnt_stat.f_mntonname, 3877 mp->mnt_stat.f_fstypename); 3878 if (db_pager_quit) 3879 break; 3880 } 3881 db_printf("\nMore info: show mount <addr>\n"); 3882 return; 3883 } 3884 3885 mp = (struct mount *)addr; 3886 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 3887 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 3888 3889 buf[0] = '\0'; 3890 mflags = mp->mnt_flag; 3891 #define MNT_FLAG(flag) do { \ 3892 if (mflags & (flag)) { \ 3893 if (buf[0] != '\0') \ 3894 strlcat(buf, ", ", sizeof(buf)); \ 3895 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 3896 mflags &= ~(flag); \ 3897 } \ 3898 } while (0) 3899 MNT_FLAG(MNT_RDONLY); 3900 MNT_FLAG(MNT_SYNCHRONOUS); 3901 MNT_FLAG(MNT_NOEXEC); 3902 MNT_FLAG(MNT_NOSUID); 3903 MNT_FLAG(MNT_NFS4ACLS); 3904 MNT_FLAG(MNT_UNION); 3905 MNT_FLAG(MNT_ASYNC); 3906 MNT_FLAG(MNT_SUIDDIR); 3907 MNT_FLAG(MNT_SOFTDEP); 3908 MNT_FLAG(MNT_NOSYMFOLLOW); 3909 MNT_FLAG(MNT_GJOURNAL); 3910 MNT_FLAG(MNT_MULTILABEL); 3911 MNT_FLAG(MNT_ACLS); 3912 MNT_FLAG(MNT_NOATIME); 3913 MNT_FLAG(MNT_NOCLUSTERR); 3914 MNT_FLAG(MNT_NOCLUSTERW); 3915 MNT_FLAG(MNT_SUJ); 3916 MNT_FLAG(MNT_EXRDONLY); 3917 MNT_FLAG(MNT_EXPORTED); 3918 MNT_FLAG(MNT_DEFEXPORTED); 3919 MNT_FLAG(MNT_EXPORTANON); 3920 MNT_FLAG(MNT_EXKERB); 3921 MNT_FLAG(MNT_EXPUBLIC); 3922 MNT_FLAG(MNT_LOCAL); 3923 MNT_FLAG(MNT_QUOTA); 3924 MNT_FLAG(MNT_ROOTFS); 3925 MNT_FLAG(MNT_USER); 3926 MNT_FLAG(MNT_IGNORE); 3927 MNT_FLAG(MNT_UPDATE); 3928 MNT_FLAG(MNT_DELEXPORT); 3929 MNT_FLAG(MNT_RELOAD); 3930 MNT_FLAG(MNT_FORCE); 3931 MNT_FLAG(MNT_SNAPSHOT); 3932 MNT_FLAG(MNT_BYFSID); 3933 #undef MNT_FLAG 3934 if (mflags != 0) { 3935 if (buf[0] != '\0') 3936 strlcat(buf, ", ", sizeof(buf)); 3937 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3938 "0x%016jx", mflags); 3939 } 3940 db_printf(" mnt_flag = %s\n", buf); 3941 3942 buf[0] = '\0'; 3943 flags = mp->mnt_kern_flag; 3944 #define MNT_KERN_FLAG(flag) do { \ 3945 if (flags & (flag)) { \ 3946 if (buf[0] != '\0') \ 3947 strlcat(buf, ", ", sizeof(buf)); \ 3948 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 3949 flags &= ~(flag); \ 3950 } \ 3951 } while (0) 3952 MNT_KERN_FLAG(MNTK_UNMOUNTF); 3953 MNT_KERN_FLAG(MNTK_ASYNC); 3954 MNT_KERN_FLAG(MNTK_SOFTDEP); 3955 MNT_KERN_FLAG(MNTK_DRAINING); 3956 MNT_KERN_FLAG(MNTK_REFEXPIRE); 3957 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 3958 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 3959 MNT_KERN_FLAG(MNTK_NO_IOPF); 3960 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 3961 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 3962 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 3963 MNT_KERN_FLAG(MNTK_MARKER); 3964 MNT_KERN_FLAG(MNTK_USES_BCACHE); 3965 MNT_KERN_FLAG(MNTK_NOASYNC); 3966 MNT_KERN_FLAG(MNTK_UNMOUNT); 3967 MNT_KERN_FLAG(MNTK_MWAIT); 3968 MNT_KERN_FLAG(MNTK_SUSPEND); 3969 MNT_KERN_FLAG(MNTK_SUSPEND2); 3970 MNT_KERN_FLAG(MNTK_SUSPENDED); 3971 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 3972 MNT_KERN_FLAG(MNTK_NOKNOTE); 3973 #undef MNT_KERN_FLAG 3974 if (flags != 0) { 3975 if (buf[0] != '\0') 3976 strlcat(buf, ", ", sizeof(buf)); 3977 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3978 "0x%08x", flags); 3979 } 3980 db_printf(" mnt_kern_flag = %s\n", buf); 3981 3982 db_printf(" mnt_opt = "); 3983 opt = TAILQ_FIRST(mp->mnt_opt); 3984 if (opt != NULL) { 3985 db_printf("%s", opt->name); 3986 opt = TAILQ_NEXT(opt, link); 3987 while (opt != NULL) { 3988 db_printf(", %s", opt->name); 3989 opt = TAILQ_NEXT(opt, link); 3990 } 3991 } 3992 db_printf("\n"); 3993 3994 sp = &mp->mnt_stat; 3995 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 3996 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 3997 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 3998 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 3999 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4000 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4001 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4002 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4003 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4004 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4005 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4006 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4007 4008 db_printf(" mnt_cred = { uid=%u ruid=%u", 4009 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4010 if (jailed(mp->mnt_cred)) 4011 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4012 db_printf(" }\n"); 4013 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4014 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4015 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4016 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4017 db_printf(" mnt_activevnodelistsize = %d\n", 4018 mp->mnt_activevnodelistsize); 4019 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4020 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4021 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 4022 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4023 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4024 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4025 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4026 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4027 db_printf(" mnt_secondary_accwrites = %d\n", 4028 mp->mnt_secondary_accwrites); 4029 db_printf(" mnt_gjprovider = %s\n", 4030 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4031 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4032 4033 db_printf("\n\nList of active vnodes\n"); 4034 TAILQ_FOREACH(vp, &mp->mnt_activevnodelist, v_actfreelist) { 4035 if (vp->v_type != VMARKER) { 4036 vn_printf(vp, "vnode "); 4037 if (db_pager_quit) 4038 break; 4039 } 4040 } 4041 db_printf("\n\nList of inactive vnodes\n"); 4042 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4043 if (vp->v_type != VMARKER && (vp->v_iflag & VI_ACTIVE) == 0) { 4044 vn_printf(vp, "vnode "); 4045 if (db_pager_quit) 4046 break; 4047 } 4048 } 4049 } 4050 #endif /* DDB */ 4051 4052 /* 4053 * Fill in a struct xvfsconf based on a struct vfsconf. 4054 */ 4055 static int 4056 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4057 { 4058 struct xvfsconf xvfsp; 4059 4060 bzero(&xvfsp, sizeof(xvfsp)); 4061 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4062 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4063 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4064 xvfsp.vfc_flags = vfsp->vfc_flags; 4065 /* 4066 * These are unused in userland, we keep them 4067 * to not break binary compatibility. 4068 */ 4069 xvfsp.vfc_vfsops = NULL; 4070 xvfsp.vfc_next = NULL; 4071 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4072 } 4073 4074 #ifdef COMPAT_FREEBSD32 4075 struct xvfsconf32 { 4076 uint32_t vfc_vfsops; 4077 char vfc_name[MFSNAMELEN]; 4078 int32_t vfc_typenum; 4079 int32_t vfc_refcount; 4080 int32_t vfc_flags; 4081 uint32_t vfc_next; 4082 }; 4083 4084 static int 4085 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4086 { 4087 struct xvfsconf32 xvfsp; 4088 4089 bzero(&xvfsp, sizeof(xvfsp)); 4090 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4091 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4092 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4093 xvfsp.vfc_flags = vfsp->vfc_flags; 4094 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4095 } 4096 #endif 4097 4098 /* 4099 * Top level filesystem related information gathering. 4100 */ 4101 static int 4102 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4103 { 4104 struct vfsconf *vfsp; 4105 int error; 4106 4107 error = 0; 4108 vfsconf_slock(); 4109 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4110 #ifdef COMPAT_FREEBSD32 4111 if (req->flags & SCTL_MASK32) 4112 error = vfsconf2x32(req, vfsp); 4113 else 4114 #endif 4115 error = vfsconf2x(req, vfsp); 4116 if (error) 4117 break; 4118 } 4119 vfsconf_sunlock(); 4120 return (error); 4121 } 4122 4123 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4124 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4125 "S,xvfsconf", "List of all configured filesystems"); 4126 4127 #ifndef BURN_BRIDGES 4128 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4129 4130 static int 4131 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4132 { 4133 int *name = (int *)arg1 - 1; /* XXX */ 4134 u_int namelen = arg2 + 1; /* XXX */ 4135 struct vfsconf *vfsp; 4136 4137 log(LOG_WARNING, "userland calling deprecated sysctl, " 4138 "please rebuild world\n"); 4139 4140 #if 1 || defined(COMPAT_PRELITE2) 4141 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4142 if (namelen == 1) 4143 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4144 #endif 4145 4146 switch (name[1]) { 4147 case VFS_MAXTYPENUM: 4148 if (namelen != 2) 4149 return (ENOTDIR); 4150 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4151 case VFS_CONF: 4152 if (namelen != 3) 4153 return (ENOTDIR); /* overloaded */ 4154 vfsconf_slock(); 4155 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4156 if (vfsp->vfc_typenum == name[2]) 4157 break; 4158 } 4159 vfsconf_sunlock(); 4160 if (vfsp == NULL) 4161 return (EOPNOTSUPP); 4162 #ifdef COMPAT_FREEBSD32 4163 if (req->flags & SCTL_MASK32) 4164 return (vfsconf2x32(req, vfsp)); 4165 else 4166 #endif 4167 return (vfsconf2x(req, vfsp)); 4168 } 4169 return (EOPNOTSUPP); 4170 } 4171 4172 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4173 CTLFLAG_MPSAFE, vfs_sysctl, 4174 "Generic filesystem"); 4175 4176 #if 1 || defined(COMPAT_PRELITE2) 4177 4178 static int 4179 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4180 { 4181 int error; 4182 struct vfsconf *vfsp; 4183 struct ovfsconf ovfs; 4184 4185 vfsconf_slock(); 4186 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4187 bzero(&ovfs, sizeof(ovfs)); 4188 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4189 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4190 ovfs.vfc_index = vfsp->vfc_typenum; 4191 ovfs.vfc_refcount = vfsp->vfc_refcount; 4192 ovfs.vfc_flags = vfsp->vfc_flags; 4193 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4194 if (error != 0) { 4195 vfsconf_sunlock(); 4196 return (error); 4197 } 4198 } 4199 vfsconf_sunlock(); 4200 return (0); 4201 } 4202 4203 #endif /* 1 || COMPAT_PRELITE2 */ 4204 #endif /* !BURN_BRIDGES */ 4205 4206 #define KINFO_VNODESLOP 10 4207 #ifdef notyet 4208 /* 4209 * Dump vnode list (via sysctl). 4210 */ 4211 /* ARGSUSED */ 4212 static int 4213 sysctl_vnode(SYSCTL_HANDLER_ARGS) 4214 { 4215 struct xvnode *xvn; 4216 struct mount *mp; 4217 struct vnode *vp; 4218 int error, len, n; 4219 4220 /* 4221 * Stale numvnodes access is not fatal here. 4222 */ 4223 req->lock = 0; 4224 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 4225 if (!req->oldptr) 4226 /* Make an estimate */ 4227 return (SYSCTL_OUT(req, 0, len)); 4228 4229 error = sysctl_wire_old_buffer(req, 0); 4230 if (error != 0) 4231 return (error); 4232 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 4233 n = 0; 4234 mtx_lock(&mountlist_mtx); 4235 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4236 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 4237 continue; 4238 MNT_ILOCK(mp); 4239 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4240 if (n == len) 4241 break; 4242 vref(vp); 4243 xvn[n].xv_size = sizeof *xvn; 4244 xvn[n].xv_vnode = vp; 4245 xvn[n].xv_id = 0; /* XXX compat */ 4246 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 4247 XV_COPY(usecount); 4248 XV_COPY(writecount); 4249 XV_COPY(holdcnt); 4250 XV_COPY(mount); 4251 XV_COPY(numoutput); 4252 XV_COPY(type); 4253 #undef XV_COPY 4254 xvn[n].xv_flag = vp->v_vflag; 4255 4256 switch (vp->v_type) { 4257 case VREG: 4258 case VDIR: 4259 case VLNK: 4260 break; 4261 case VBLK: 4262 case VCHR: 4263 if (vp->v_rdev == NULL) { 4264 vrele(vp); 4265 continue; 4266 } 4267 xvn[n].xv_dev = dev2udev(vp->v_rdev); 4268 break; 4269 case VSOCK: 4270 xvn[n].xv_socket = vp->v_socket; 4271 break; 4272 case VFIFO: 4273 xvn[n].xv_fifo = vp->v_fifoinfo; 4274 break; 4275 case VNON: 4276 case VBAD: 4277 default: 4278 /* shouldn't happen? */ 4279 vrele(vp); 4280 continue; 4281 } 4282 vrele(vp); 4283 ++n; 4284 } 4285 MNT_IUNLOCK(mp); 4286 mtx_lock(&mountlist_mtx); 4287 vfs_unbusy(mp); 4288 if (n == len) 4289 break; 4290 } 4291 mtx_unlock(&mountlist_mtx); 4292 4293 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 4294 free(xvn, M_TEMP); 4295 return (error); 4296 } 4297 4298 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 4299 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 4300 ""); 4301 #endif 4302 4303 static void 4304 unmount_or_warn(struct mount *mp) 4305 { 4306 int error; 4307 4308 error = dounmount(mp, MNT_FORCE, curthread); 4309 if (error != 0) { 4310 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4311 if (error == EBUSY) 4312 printf("BUSY)\n"); 4313 else 4314 printf("%d)\n", error); 4315 } 4316 } 4317 4318 /* 4319 * Unmount all filesystems. The list is traversed in reverse order 4320 * of mounting to avoid dependencies. 4321 */ 4322 void 4323 vfs_unmountall(void) 4324 { 4325 struct mount *mp, *tmp; 4326 4327 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4328 4329 /* 4330 * Since this only runs when rebooting, it is not interlocked. 4331 */ 4332 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4333 vfs_ref(mp); 4334 4335 /* 4336 * Forcibly unmounting "/dev" before "/" would prevent clean 4337 * unmount of the latter. 4338 */ 4339 if (mp == rootdevmp) 4340 continue; 4341 4342 unmount_or_warn(mp); 4343 } 4344 4345 if (rootdevmp != NULL) 4346 unmount_or_warn(rootdevmp); 4347 } 4348 4349 /* 4350 * perform msync on all vnodes under a mount point 4351 * the mount point must be locked. 4352 */ 4353 void 4354 vfs_msync(struct mount *mp, int flags) 4355 { 4356 struct vnode *vp, *mvp; 4357 struct vm_object *obj; 4358 4359 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4360 4361 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 4362 return; 4363 4364 MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { 4365 obj = vp->v_object; 4366 if (obj != NULL && vm_object_mightbedirty(obj) && 4367 (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) { 4368 if (!vget(vp, 4369 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, 4370 curthread)) { 4371 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */ 4372 vput(vp); 4373 continue; 4374 } 4375 4376 obj = vp->v_object; 4377 if (obj != NULL) { 4378 VM_OBJECT_WLOCK(obj); 4379 vm_object_page_clean(obj, 0, 0, 4380 flags == MNT_WAIT ? 4381 OBJPC_SYNC : OBJPC_NOSYNC); 4382 VM_OBJECT_WUNLOCK(obj); 4383 } 4384 vput(vp); 4385 } 4386 } else 4387 VI_UNLOCK(vp); 4388 } 4389 } 4390 4391 static void 4392 destroy_vpollinfo_free(struct vpollinfo *vi) 4393 { 4394 4395 knlist_destroy(&vi->vpi_selinfo.si_note); 4396 mtx_destroy(&vi->vpi_lock); 4397 uma_zfree(vnodepoll_zone, vi); 4398 } 4399 4400 static void 4401 destroy_vpollinfo(struct vpollinfo *vi) 4402 { 4403 4404 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4405 seldrain(&vi->vpi_selinfo); 4406 destroy_vpollinfo_free(vi); 4407 } 4408 4409 /* 4410 * Initialize per-vnode helper structure to hold poll-related state. 4411 */ 4412 void 4413 v_addpollinfo(struct vnode *vp) 4414 { 4415 struct vpollinfo *vi; 4416 4417 if (vp->v_pollinfo != NULL) 4418 return; 4419 vi = uma_zalloc(vnodepoll_zone, M_WAITOK | M_ZERO); 4420 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4421 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4422 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 4423 VI_LOCK(vp); 4424 if (vp->v_pollinfo != NULL) { 4425 VI_UNLOCK(vp); 4426 destroy_vpollinfo_free(vi); 4427 return; 4428 } 4429 vp->v_pollinfo = vi; 4430 VI_UNLOCK(vp); 4431 } 4432 4433 /* 4434 * Record a process's interest in events which might happen to 4435 * a vnode. Because poll uses the historic select-style interface 4436 * internally, this routine serves as both the ``check for any 4437 * pending events'' and the ``record my interest in future events'' 4438 * functions. (These are done together, while the lock is held, 4439 * to avoid race conditions.) 4440 */ 4441 int 4442 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4443 { 4444 4445 v_addpollinfo(vp); 4446 mtx_lock(&vp->v_pollinfo->vpi_lock); 4447 if (vp->v_pollinfo->vpi_revents & events) { 4448 /* 4449 * This leaves events we are not interested 4450 * in available for the other process which 4451 * which presumably had requested them 4452 * (otherwise they would never have been 4453 * recorded). 4454 */ 4455 events &= vp->v_pollinfo->vpi_revents; 4456 vp->v_pollinfo->vpi_revents &= ~events; 4457 4458 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4459 return (events); 4460 } 4461 vp->v_pollinfo->vpi_events |= events; 4462 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 4463 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4464 return (0); 4465 } 4466 4467 /* 4468 * Routine to create and manage a filesystem syncer vnode. 4469 */ 4470 #define sync_close ((int (*)(struct vop_close_args *))nullop) 4471 static int sync_fsync(struct vop_fsync_args *); 4472 static int sync_inactive(struct vop_inactive_args *); 4473 static int sync_reclaim(struct vop_reclaim_args *); 4474 4475 static struct vop_vector sync_vnodeops = { 4476 .vop_bypass = VOP_EOPNOTSUPP, 4477 .vop_close = sync_close, /* close */ 4478 .vop_fsync = sync_fsync, /* fsync */ 4479 .vop_inactive = sync_inactive, /* inactive */ 4480 .vop_need_inactive = vop_stdneed_inactive, /* need_inactive */ 4481 .vop_reclaim = sync_reclaim, /* reclaim */ 4482 .vop_lock1 = vop_stdlock, /* lock */ 4483 .vop_unlock = vop_stdunlock, /* unlock */ 4484 .vop_islocked = vop_stdislocked, /* islocked */ 4485 }; 4486 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 4487 4488 /* 4489 * Create a new filesystem syncer vnode for the specified mount point. 4490 */ 4491 void 4492 vfs_allocate_syncvnode(struct mount *mp) 4493 { 4494 struct vnode *vp; 4495 struct bufobj *bo; 4496 static long start, incr, next; 4497 int error; 4498 4499 /* Allocate a new vnode */ 4500 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 4501 if (error != 0) 4502 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 4503 vp->v_type = VNON; 4504 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4505 vp->v_vflag |= VV_FORCEINSMQ; 4506 error = insmntque(vp, mp); 4507 if (error != 0) 4508 panic("vfs_allocate_syncvnode: insmntque() failed"); 4509 vp->v_vflag &= ~VV_FORCEINSMQ; 4510 VOP_UNLOCK(vp, 0); 4511 /* 4512 * Place the vnode onto the syncer worklist. We attempt to 4513 * scatter them about on the list so that they will go off 4514 * at evenly distributed times even if all the filesystems 4515 * are mounted at once. 4516 */ 4517 next += incr; 4518 if (next == 0 || next > syncer_maxdelay) { 4519 start /= 2; 4520 incr /= 2; 4521 if (start == 0) { 4522 start = syncer_maxdelay / 2; 4523 incr = syncer_maxdelay; 4524 } 4525 next = start; 4526 } 4527 bo = &vp->v_bufobj; 4528 BO_LOCK(bo); 4529 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 4530 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 4531 mtx_lock(&sync_mtx); 4532 sync_vnode_count++; 4533 if (mp->mnt_syncer == NULL) { 4534 mp->mnt_syncer = vp; 4535 vp = NULL; 4536 } 4537 mtx_unlock(&sync_mtx); 4538 BO_UNLOCK(bo); 4539 if (vp != NULL) { 4540 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4541 vgone(vp); 4542 vput(vp); 4543 } 4544 } 4545 4546 void 4547 vfs_deallocate_syncvnode(struct mount *mp) 4548 { 4549 struct vnode *vp; 4550 4551 mtx_lock(&sync_mtx); 4552 vp = mp->mnt_syncer; 4553 if (vp != NULL) 4554 mp->mnt_syncer = NULL; 4555 mtx_unlock(&sync_mtx); 4556 if (vp != NULL) 4557 vrele(vp); 4558 } 4559 4560 /* 4561 * Do a lazy sync of the filesystem. 4562 */ 4563 static int 4564 sync_fsync(struct vop_fsync_args *ap) 4565 { 4566 struct vnode *syncvp = ap->a_vp; 4567 struct mount *mp = syncvp->v_mount; 4568 int error, save; 4569 struct bufobj *bo; 4570 4571 /* 4572 * We only need to do something if this is a lazy evaluation. 4573 */ 4574 if (ap->a_waitfor != MNT_LAZY) 4575 return (0); 4576 4577 /* 4578 * Move ourselves to the back of the sync list. 4579 */ 4580 bo = &syncvp->v_bufobj; 4581 BO_LOCK(bo); 4582 vn_syncer_add_to_worklist(bo, syncdelay); 4583 BO_UNLOCK(bo); 4584 4585 /* 4586 * Walk the list of vnodes pushing all that are dirty and 4587 * not already on the sync list. 4588 */ 4589 if (vfs_busy(mp, MBF_NOWAIT) != 0) 4590 return (0); 4591 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 4592 vfs_unbusy(mp); 4593 return (0); 4594 } 4595 save = curthread_pflags_set(TDP_SYNCIO); 4596 /* 4597 * The filesystem at hand may be idle with free vnodes stored in the 4598 * batch. Return them instead of letting them stay there indefinitely. 4599 */ 4600 vnlru_return_batch(mp); 4601 vfs_msync(mp, MNT_NOWAIT); 4602 error = VFS_SYNC(mp, MNT_LAZY); 4603 curthread_pflags_restore(save); 4604 vn_finished_write(mp); 4605 vfs_unbusy(mp); 4606 return (error); 4607 } 4608 4609 /* 4610 * The syncer vnode is no referenced. 4611 */ 4612 static int 4613 sync_inactive(struct vop_inactive_args *ap) 4614 { 4615 4616 vgone(ap->a_vp); 4617 return (0); 4618 } 4619 4620 /* 4621 * The syncer vnode is no longer needed and is being decommissioned. 4622 * 4623 * Modifications to the worklist must be protected by sync_mtx. 4624 */ 4625 static int 4626 sync_reclaim(struct vop_reclaim_args *ap) 4627 { 4628 struct vnode *vp = ap->a_vp; 4629 struct bufobj *bo; 4630 4631 bo = &vp->v_bufobj; 4632 BO_LOCK(bo); 4633 mtx_lock(&sync_mtx); 4634 if (vp->v_mount->mnt_syncer == vp) 4635 vp->v_mount->mnt_syncer = NULL; 4636 if (bo->bo_flag & BO_ONWORKLST) { 4637 LIST_REMOVE(bo, bo_synclist); 4638 syncer_worklist_len--; 4639 sync_vnode_count--; 4640 bo->bo_flag &= ~BO_ONWORKLST; 4641 } 4642 mtx_unlock(&sync_mtx); 4643 BO_UNLOCK(bo); 4644 4645 return (0); 4646 } 4647 4648 int 4649 vn_need_pageq_flush(struct vnode *vp) 4650 { 4651 struct vm_object *obj; 4652 int need; 4653 4654 MPASS(mtx_owned(VI_MTX(vp))); 4655 need = 0; 4656 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 4657 vm_object_mightbedirty(obj)) 4658 need = 1; 4659 return (need); 4660 } 4661 4662 /* 4663 * Check if vnode represents a disk device 4664 */ 4665 int 4666 vn_isdisk(struct vnode *vp, int *errp) 4667 { 4668 int error; 4669 4670 if (vp->v_type != VCHR) { 4671 error = ENOTBLK; 4672 goto out; 4673 } 4674 error = 0; 4675 dev_lock(); 4676 if (vp->v_rdev == NULL) 4677 error = ENXIO; 4678 else if (vp->v_rdev->si_devsw == NULL) 4679 error = ENXIO; 4680 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 4681 error = ENOTBLK; 4682 dev_unlock(); 4683 out: 4684 if (errp != NULL) 4685 *errp = error; 4686 return (error == 0); 4687 } 4688 4689 /* 4690 * Common filesystem object access control check routine. Accepts a 4691 * vnode's type, "mode", uid and gid, requested access mode, credentials, 4692 * and optional call-by-reference privused argument allowing vaccess() 4693 * to indicate to the caller whether privilege was used to satisfy the 4694 * request (obsoleted). Returns 0 on success, or an errno on failure. 4695 */ 4696 int 4697 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 4698 accmode_t accmode, struct ucred *cred, int *privused) 4699 { 4700 accmode_t dac_granted; 4701 accmode_t priv_granted; 4702 4703 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 4704 ("invalid bit in accmode")); 4705 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 4706 ("VAPPEND without VWRITE")); 4707 4708 /* 4709 * Look for a normal, non-privileged way to access the file/directory 4710 * as requested. If it exists, go with that. 4711 */ 4712 4713 if (privused != NULL) 4714 *privused = 0; 4715 4716 dac_granted = 0; 4717 4718 /* Check the owner. */ 4719 if (cred->cr_uid == file_uid) { 4720 dac_granted |= VADMIN; 4721 if (file_mode & S_IXUSR) 4722 dac_granted |= VEXEC; 4723 if (file_mode & S_IRUSR) 4724 dac_granted |= VREAD; 4725 if (file_mode & S_IWUSR) 4726 dac_granted |= (VWRITE | VAPPEND); 4727 4728 if ((accmode & dac_granted) == accmode) 4729 return (0); 4730 4731 goto privcheck; 4732 } 4733 4734 /* Otherwise, check the groups (first match) */ 4735 if (groupmember(file_gid, cred)) { 4736 if (file_mode & S_IXGRP) 4737 dac_granted |= VEXEC; 4738 if (file_mode & S_IRGRP) 4739 dac_granted |= VREAD; 4740 if (file_mode & S_IWGRP) 4741 dac_granted |= (VWRITE | VAPPEND); 4742 4743 if ((accmode & dac_granted) == accmode) 4744 return (0); 4745 4746 goto privcheck; 4747 } 4748 4749 /* Otherwise, check everyone else. */ 4750 if (file_mode & S_IXOTH) 4751 dac_granted |= VEXEC; 4752 if (file_mode & S_IROTH) 4753 dac_granted |= VREAD; 4754 if (file_mode & S_IWOTH) 4755 dac_granted |= (VWRITE | VAPPEND); 4756 if ((accmode & dac_granted) == accmode) 4757 return (0); 4758 4759 privcheck: 4760 /* 4761 * Build a privilege mask to determine if the set of privileges 4762 * satisfies the requirements when combined with the granted mask 4763 * from above. For each privilege, if the privilege is required, 4764 * bitwise or the request type onto the priv_granted mask. 4765 */ 4766 priv_granted = 0; 4767 4768 if (type == VDIR) { 4769 /* 4770 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 4771 * requests, instead of PRIV_VFS_EXEC. 4772 */ 4773 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4774 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 4775 priv_granted |= VEXEC; 4776 } else { 4777 /* 4778 * Ensure that at least one execute bit is on. Otherwise, 4779 * a privileged user will always succeed, and we don't want 4780 * this to happen unless the file really is executable. 4781 */ 4782 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4783 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 4784 !priv_check_cred(cred, PRIV_VFS_EXEC)) 4785 priv_granted |= VEXEC; 4786 } 4787 4788 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 4789 !priv_check_cred(cred, PRIV_VFS_READ)) 4790 priv_granted |= VREAD; 4791 4792 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 4793 !priv_check_cred(cred, PRIV_VFS_WRITE)) 4794 priv_granted |= (VWRITE | VAPPEND); 4795 4796 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 4797 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 4798 priv_granted |= VADMIN; 4799 4800 if ((accmode & (priv_granted | dac_granted)) == accmode) { 4801 /* XXX audit: privilege used */ 4802 if (privused != NULL) 4803 *privused = 1; 4804 return (0); 4805 } 4806 4807 return ((accmode & VADMIN) ? EPERM : EACCES); 4808 } 4809 4810 /* 4811 * Credential check based on process requesting service, and per-attribute 4812 * permissions. 4813 */ 4814 int 4815 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 4816 struct thread *td, accmode_t accmode) 4817 { 4818 4819 /* 4820 * Kernel-invoked always succeeds. 4821 */ 4822 if (cred == NOCRED) 4823 return (0); 4824 4825 /* 4826 * Do not allow privileged processes in jail to directly manipulate 4827 * system attributes. 4828 */ 4829 switch (attrnamespace) { 4830 case EXTATTR_NAMESPACE_SYSTEM: 4831 /* Potentially should be: return (EPERM); */ 4832 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 4833 case EXTATTR_NAMESPACE_USER: 4834 return (VOP_ACCESS(vp, accmode, cred, td)); 4835 default: 4836 return (EPERM); 4837 } 4838 } 4839 4840 #ifdef DEBUG_VFS_LOCKS 4841 /* 4842 * This only exists to suppress warnings from unlocked specfs accesses. It is 4843 * no longer ok to have an unlocked VFS. 4844 */ 4845 #define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \ 4846 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 4847 4848 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 4849 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 4850 "Drop into debugger on lock violation"); 4851 4852 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 4853 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 4854 0, "Check for interlock across VOPs"); 4855 4856 int vfs_badlock_print = 1; /* Print lock violations. */ 4857 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 4858 0, "Print lock violations"); 4859 4860 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 4861 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 4862 0, "Print vnode details on lock violations"); 4863 4864 #ifdef KDB 4865 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 4866 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 4867 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 4868 #endif 4869 4870 static void 4871 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 4872 { 4873 4874 #ifdef KDB 4875 if (vfs_badlock_backtrace) 4876 kdb_backtrace(); 4877 #endif 4878 if (vfs_badlock_vnode) 4879 vn_printf(vp, "vnode "); 4880 if (vfs_badlock_print) 4881 printf("%s: %p %s\n", str, (void *)vp, msg); 4882 if (vfs_badlock_ddb) 4883 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 4884 } 4885 4886 void 4887 assert_vi_locked(struct vnode *vp, const char *str) 4888 { 4889 4890 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 4891 vfs_badlock("interlock is not locked but should be", str, vp); 4892 } 4893 4894 void 4895 assert_vi_unlocked(struct vnode *vp, const char *str) 4896 { 4897 4898 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 4899 vfs_badlock("interlock is locked but should not be", str, vp); 4900 } 4901 4902 void 4903 assert_vop_locked(struct vnode *vp, const char *str) 4904 { 4905 int locked; 4906 4907 if (!IGNORE_LOCK(vp)) { 4908 locked = VOP_ISLOCKED(vp); 4909 if (locked == 0 || locked == LK_EXCLOTHER) 4910 vfs_badlock("is not locked but should be", str, vp); 4911 } 4912 } 4913 4914 void 4915 assert_vop_unlocked(struct vnode *vp, const char *str) 4916 { 4917 4918 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 4919 vfs_badlock("is locked but should not be", str, vp); 4920 } 4921 4922 void 4923 assert_vop_elocked(struct vnode *vp, const char *str) 4924 { 4925 4926 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 4927 vfs_badlock("is not exclusive locked but should be", str, vp); 4928 } 4929 #endif /* DEBUG_VFS_LOCKS */ 4930 4931 void 4932 vop_rename_fail(struct vop_rename_args *ap) 4933 { 4934 4935 if (ap->a_tvp != NULL) 4936 vput(ap->a_tvp); 4937 if (ap->a_tdvp == ap->a_tvp) 4938 vrele(ap->a_tdvp); 4939 else 4940 vput(ap->a_tdvp); 4941 vrele(ap->a_fdvp); 4942 vrele(ap->a_fvp); 4943 } 4944 4945 void 4946 vop_rename_pre(void *ap) 4947 { 4948 struct vop_rename_args *a = ap; 4949 4950 #ifdef DEBUG_VFS_LOCKS 4951 if (a->a_tvp) 4952 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 4953 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 4954 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 4955 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 4956 4957 /* Check the source (from). */ 4958 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 4959 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 4960 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 4961 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 4962 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 4963 4964 /* Check the target. */ 4965 if (a->a_tvp) 4966 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 4967 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 4968 #endif 4969 if (a->a_tdvp != a->a_fdvp) 4970 vhold(a->a_fdvp); 4971 if (a->a_tvp != a->a_fvp) 4972 vhold(a->a_fvp); 4973 vhold(a->a_tdvp); 4974 if (a->a_tvp) 4975 vhold(a->a_tvp); 4976 } 4977 4978 #ifdef DEBUG_VFS_LOCKS 4979 void 4980 vop_strategy_pre(void *ap) 4981 { 4982 struct vop_strategy_args *a; 4983 struct buf *bp; 4984 4985 a = ap; 4986 bp = a->a_bp; 4987 4988 /* 4989 * Cluster ops lock their component buffers but not the IO container. 4990 */ 4991 if ((bp->b_flags & B_CLUSTER) != 0) 4992 return; 4993 4994 if (panicstr == NULL && !BUF_ISLOCKED(bp)) { 4995 if (vfs_badlock_print) 4996 printf( 4997 "VOP_STRATEGY: bp is not locked but should be\n"); 4998 if (vfs_badlock_ddb) 4999 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5000 } 5001 } 5002 5003 void 5004 vop_lock_pre(void *ap) 5005 { 5006 struct vop_lock1_args *a = ap; 5007 5008 if ((a->a_flags & LK_INTERLOCK) == 0) 5009 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5010 else 5011 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5012 } 5013 5014 void 5015 vop_lock_post(void *ap, int rc) 5016 { 5017 struct vop_lock1_args *a = ap; 5018 5019 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5020 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5021 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5022 } 5023 5024 void 5025 vop_unlock_pre(void *ap) 5026 { 5027 struct vop_unlock_args *a = ap; 5028 5029 if (a->a_flags & LK_INTERLOCK) 5030 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK"); 5031 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 5032 } 5033 5034 void 5035 vop_unlock_post(void *ap, int rc) 5036 { 5037 struct vop_unlock_args *a = ap; 5038 5039 if (a->a_flags & LK_INTERLOCK) 5040 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK"); 5041 } 5042 5043 void 5044 vop_need_inactive_pre(void *ap) 5045 { 5046 struct vop_need_inactive_args *a = ap; 5047 5048 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5049 } 5050 5051 void 5052 vop_need_inactive_post(void *ap, int rc) 5053 { 5054 struct vop_need_inactive_args *a = ap; 5055 5056 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5057 } 5058 #endif 5059 5060 void 5061 vop_create_post(void *ap, int rc) 5062 { 5063 struct vop_create_args *a = ap; 5064 5065 if (!rc) 5066 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5067 } 5068 5069 void 5070 vop_deleteextattr_post(void *ap, int rc) 5071 { 5072 struct vop_deleteextattr_args *a = ap; 5073 5074 if (!rc) 5075 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5076 } 5077 5078 void 5079 vop_link_post(void *ap, int rc) 5080 { 5081 struct vop_link_args *a = ap; 5082 5083 if (!rc) { 5084 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); 5085 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE); 5086 } 5087 } 5088 5089 void 5090 vop_mkdir_post(void *ap, int rc) 5091 { 5092 struct vop_mkdir_args *a = ap; 5093 5094 if (!rc) 5095 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 5096 } 5097 5098 void 5099 vop_mknod_post(void *ap, int rc) 5100 { 5101 struct vop_mknod_args *a = ap; 5102 5103 if (!rc) 5104 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5105 } 5106 5107 void 5108 vop_reclaim_post(void *ap, int rc) 5109 { 5110 struct vop_reclaim_args *a = ap; 5111 5112 if (!rc) 5113 VFS_KNOTE_LOCKED(a->a_vp, NOTE_REVOKE); 5114 } 5115 5116 void 5117 vop_remove_post(void *ap, int rc) 5118 { 5119 struct vop_remove_args *a = ap; 5120 5121 if (!rc) { 5122 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5123 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 5124 } 5125 } 5126 5127 void 5128 vop_rename_post(void *ap, int rc) 5129 { 5130 struct vop_rename_args *a = ap; 5131 long hint; 5132 5133 if (!rc) { 5134 hint = NOTE_WRITE; 5135 if (a->a_fdvp == a->a_tdvp) { 5136 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 5137 hint |= NOTE_LINK; 5138 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5139 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5140 } else { 5141 hint |= NOTE_EXTEND; 5142 if (a->a_fvp->v_type == VDIR) 5143 hint |= NOTE_LINK; 5144 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5145 5146 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 5147 a->a_tvp->v_type == VDIR) 5148 hint &= ~NOTE_LINK; 5149 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5150 } 5151 5152 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 5153 if (a->a_tvp) 5154 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 5155 } 5156 if (a->a_tdvp != a->a_fdvp) 5157 vdrop(a->a_fdvp); 5158 if (a->a_tvp != a->a_fvp) 5159 vdrop(a->a_fvp); 5160 vdrop(a->a_tdvp); 5161 if (a->a_tvp) 5162 vdrop(a->a_tvp); 5163 } 5164 5165 void 5166 vop_rmdir_post(void *ap, int rc) 5167 { 5168 struct vop_rmdir_args *a = ap; 5169 5170 if (!rc) { 5171 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 5172 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 5173 } 5174 } 5175 5176 void 5177 vop_setattr_post(void *ap, int rc) 5178 { 5179 struct vop_setattr_args *a = ap; 5180 5181 if (!rc) 5182 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5183 } 5184 5185 void 5186 vop_setextattr_post(void *ap, int rc) 5187 { 5188 struct vop_setextattr_args *a = ap; 5189 5190 if (!rc) 5191 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5192 } 5193 5194 void 5195 vop_symlink_post(void *ap, int rc) 5196 { 5197 struct vop_symlink_args *a = ap; 5198 5199 if (!rc) 5200 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5201 } 5202 5203 void 5204 vop_open_post(void *ap, int rc) 5205 { 5206 struct vop_open_args *a = ap; 5207 5208 if (!rc) 5209 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 5210 } 5211 5212 void 5213 vop_close_post(void *ap, int rc) 5214 { 5215 struct vop_close_args *a = ap; 5216 5217 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 5218 !VN_IS_DOOMED(a->a_vp))) { 5219 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 5220 NOTE_CLOSE_WRITE : NOTE_CLOSE); 5221 } 5222 } 5223 5224 void 5225 vop_read_post(void *ap, int rc) 5226 { 5227 struct vop_read_args *a = ap; 5228 5229 if (!rc) 5230 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5231 } 5232 5233 void 5234 vop_readdir_post(void *ap, int rc) 5235 { 5236 struct vop_readdir_args *a = ap; 5237 5238 if (!rc) 5239 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5240 } 5241 5242 static struct knlist fs_knlist; 5243 5244 static void 5245 vfs_event_init(void *arg) 5246 { 5247 knlist_init_mtx(&fs_knlist, NULL); 5248 } 5249 /* XXX - correct order? */ 5250 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 5251 5252 void 5253 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 5254 { 5255 5256 KNOTE_UNLOCKED(&fs_knlist, event); 5257 } 5258 5259 static int filt_fsattach(struct knote *kn); 5260 static void filt_fsdetach(struct knote *kn); 5261 static int filt_fsevent(struct knote *kn, long hint); 5262 5263 struct filterops fs_filtops = { 5264 .f_isfd = 0, 5265 .f_attach = filt_fsattach, 5266 .f_detach = filt_fsdetach, 5267 .f_event = filt_fsevent 5268 }; 5269 5270 static int 5271 filt_fsattach(struct knote *kn) 5272 { 5273 5274 kn->kn_flags |= EV_CLEAR; 5275 knlist_add(&fs_knlist, kn, 0); 5276 return (0); 5277 } 5278 5279 static void 5280 filt_fsdetach(struct knote *kn) 5281 { 5282 5283 knlist_remove(&fs_knlist, kn, 0); 5284 } 5285 5286 static int 5287 filt_fsevent(struct knote *kn, long hint) 5288 { 5289 5290 kn->kn_fflags |= hint; 5291 return (kn->kn_fflags != 0); 5292 } 5293 5294 static int 5295 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 5296 { 5297 struct vfsidctl vc; 5298 int error; 5299 struct mount *mp; 5300 5301 error = SYSCTL_IN(req, &vc, sizeof(vc)); 5302 if (error) 5303 return (error); 5304 if (vc.vc_vers != VFS_CTL_VERS1) 5305 return (EINVAL); 5306 mp = vfs_getvfs(&vc.vc_fsid); 5307 if (mp == NULL) 5308 return (ENOENT); 5309 /* ensure that a specific sysctl goes to the right filesystem. */ 5310 if (strcmp(vc.vc_fstypename, "*") != 0 && 5311 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 5312 vfs_rel(mp); 5313 return (EINVAL); 5314 } 5315 VCTLTOREQ(&vc, req); 5316 error = VFS_SYSCTL(mp, vc.vc_op, req); 5317 vfs_rel(mp); 5318 return (error); 5319 } 5320 5321 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_WR, 5322 NULL, 0, sysctl_vfs_ctl, "", 5323 "Sysctl by fsid"); 5324 5325 /* 5326 * Function to initialize a va_filerev field sensibly. 5327 * XXX: Wouldn't a random number make a lot more sense ?? 5328 */ 5329 u_quad_t 5330 init_va_filerev(void) 5331 { 5332 struct bintime bt; 5333 5334 getbinuptime(&bt); 5335 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 5336 } 5337 5338 static int filt_vfsread(struct knote *kn, long hint); 5339 static int filt_vfswrite(struct knote *kn, long hint); 5340 static int filt_vfsvnode(struct knote *kn, long hint); 5341 static void filt_vfsdetach(struct knote *kn); 5342 static struct filterops vfsread_filtops = { 5343 .f_isfd = 1, 5344 .f_detach = filt_vfsdetach, 5345 .f_event = filt_vfsread 5346 }; 5347 static struct filterops vfswrite_filtops = { 5348 .f_isfd = 1, 5349 .f_detach = filt_vfsdetach, 5350 .f_event = filt_vfswrite 5351 }; 5352 static struct filterops vfsvnode_filtops = { 5353 .f_isfd = 1, 5354 .f_detach = filt_vfsdetach, 5355 .f_event = filt_vfsvnode 5356 }; 5357 5358 static void 5359 vfs_knllock(void *arg) 5360 { 5361 struct vnode *vp = arg; 5362 5363 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5364 } 5365 5366 static void 5367 vfs_knlunlock(void *arg) 5368 { 5369 struct vnode *vp = arg; 5370 5371 VOP_UNLOCK(vp, 0); 5372 } 5373 5374 static void 5375 vfs_knl_assert_locked(void *arg) 5376 { 5377 #ifdef DEBUG_VFS_LOCKS 5378 struct vnode *vp = arg; 5379 5380 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 5381 #endif 5382 } 5383 5384 static void 5385 vfs_knl_assert_unlocked(void *arg) 5386 { 5387 #ifdef DEBUG_VFS_LOCKS 5388 struct vnode *vp = arg; 5389 5390 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 5391 #endif 5392 } 5393 5394 int 5395 vfs_kqfilter(struct vop_kqfilter_args *ap) 5396 { 5397 struct vnode *vp = ap->a_vp; 5398 struct knote *kn = ap->a_kn; 5399 struct knlist *knl; 5400 5401 switch (kn->kn_filter) { 5402 case EVFILT_READ: 5403 kn->kn_fop = &vfsread_filtops; 5404 break; 5405 case EVFILT_WRITE: 5406 kn->kn_fop = &vfswrite_filtops; 5407 break; 5408 case EVFILT_VNODE: 5409 kn->kn_fop = &vfsvnode_filtops; 5410 break; 5411 default: 5412 return (EINVAL); 5413 } 5414 5415 kn->kn_hook = (caddr_t)vp; 5416 5417 v_addpollinfo(vp); 5418 if (vp->v_pollinfo == NULL) 5419 return (ENOMEM); 5420 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 5421 vhold(vp); 5422 knlist_add(knl, kn, 0); 5423 5424 return (0); 5425 } 5426 5427 /* 5428 * Detach knote from vnode 5429 */ 5430 static void 5431 filt_vfsdetach(struct knote *kn) 5432 { 5433 struct vnode *vp = (struct vnode *)kn->kn_hook; 5434 5435 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 5436 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 5437 vdrop(vp); 5438 } 5439 5440 /*ARGSUSED*/ 5441 static int 5442 filt_vfsread(struct knote *kn, long hint) 5443 { 5444 struct vnode *vp = (struct vnode *)kn->kn_hook; 5445 struct vattr va; 5446 int res; 5447 5448 /* 5449 * filesystem is gone, so set the EOF flag and schedule 5450 * the knote for deletion. 5451 */ 5452 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5453 VI_LOCK(vp); 5454 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5455 VI_UNLOCK(vp); 5456 return (1); 5457 } 5458 5459 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 5460 return (0); 5461 5462 VI_LOCK(vp); 5463 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 5464 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 5465 VI_UNLOCK(vp); 5466 return (res); 5467 } 5468 5469 /*ARGSUSED*/ 5470 static int 5471 filt_vfswrite(struct knote *kn, long hint) 5472 { 5473 struct vnode *vp = (struct vnode *)kn->kn_hook; 5474 5475 VI_LOCK(vp); 5476 5477 /* 5478 * filesystem is gone, so set the EOF flag and schedule 5479 * the knote for deletion. 5480 */ 5481 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 5482 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5483 5484 kn->kn_data = 0; 5485 VI_UNLOCK(vp); 5486 return (1); 5487 } 5488 5489 static int 5490 filt_vfsvnode(struct knote *kn, long hint) 5491 { 5492 struct vnode *vp = (struct vnode *)kn->kn_hook; 5493 int res; 5494 5495 VI_LOCK(vp); 5496 if (kn->kn_sfflags & hint) 5497 kn->kn_fflags |= hint; 5498 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5499 kn->kn_flags |= EV_EOF; 5500 VI_UNLOCK(vp); 5501 return (1); 5502 } 5503 res = (kn->kn_fflags != 0); 5504 VI_UNLOCK(vp); 5505 return (res); 5506 } 5507 5508 /* 5509 * Returns whether the directory is empty or not. 5510 * If it is empty, the return value is 0; otherwise 5511 * the return value is an error value (which may 5512 * be ENOTEMPTY). 5513 */ 5514 int 5515 vfs_emptydir(struct vnode *vp) 5516 { 5517 struct uio uio; 5518 struct iovec iov; 5519 struct dirent *dirent, *dp, *endp; 5520 int error, eof; 5521 5522 error = 0; 5523 eof = 0; 5524 5525 ASSERT_VOP_LOCKED(vp, "vfs_emptydir"); 5526 5527 dirent = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK); 5528 iov.iov_base = dirent; 5529 iov.iov_len = sizeof(struct dirent); 5530 5531 uio.uio_iov = &iov; 5532 uio.uio_iovcnt = 1; 5533 uio.uio_offset = 0; 5534 uio.uio_resid = sizeof(struct dirent); 5535 uio.uio_segflg = UIO_SYSSPACE; 5536 uio.uio_rw = UIO_READ; 5537 uio.uio_td = curthread; 5538 5539 while (eof == 0 && error == 0) { 5540 error = VOP_READDIR(vp, &uio, curthread->td_ucred, &eof, 5541 NULL, NULL); 5542 if (error != 0) 5543 break; 5544 endp = (void *)((uint8_t *)dirent + 5545 sizeof(struct dirent) - uio.uio_resid); 5546 for (dp = dirent; dp < endp; 5547 dp = (void *)((uint8_t *)dp + GENERIC_DIRSIZ(dp))) { 5548 if (dp->d_type == DT_WHT) 5549 continue; 5550 if (dp->d_namlen == 0) 5551 continue; 5552 if (dp->d_type != DT_DIR && 5553 dp->d_type != DT_UNKNOWN) { 5554 error = ENOTEMPTY; 5555 break; 5556 } 5557 if (dp->d_namlen > 2) { 5558 error = ENOTEMPTY; 5559 break; 5560 } 5561 if (dp->d_namlen == 1 && 5562 dp->d_name[0] != '.') { 5563 error = ENOTEMPTY; 5564 break; 5565 } 5566 if (dp->d_namlen == 2 && 5567 dp->d_name[1] != '.') { 5568 error = ENOTEMPTY; 5569 break; 5570 } 5571 uio.uio_resid = sizeof(struct dirent); 5572 } 5573 } 5574 free(dirent, M_TEMP); 5575 return (error); 5576 } 5577 5578 int 5579 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 5580 { 5581 int error; 5582 5583 if (dp->d_reclen > ap->a_uio->uio_resid) 5584 return (ENAMETOOLONG); 5585 error = uiomove(dp, dp->d_reclen, ap->a_uio); 5586 if (error) { 5587 if (ap->a_ncookies != NULL) { 5588 if (ap->a_cookies != NULL) 5589 free(ap->a_cookies, M_TEMP); 5590 ap->a_cookies = NULL; 5591 *ap->a_ncookies = 0; 5592 } 5593 return (error); 5594 } 5595 if (ap->a_ncookies == NULL) 5596 return (0); 5597 5598 KASSERT(ap->a_cookies, 5599 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 5600 5601 *ap->a_cookies = realloc(*ap->a_cookies, 5602 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 5603 (*ap->a_cookies)[*ap->a_ncookies] = off; 5604 *ap->a_ncookies += 1; 5605 return (0); 5606 } 5607 5608 /* 5609 * Mark for update the access time of the file if the filesystem 5610 * supports VOP_MARKATIME. This functionality is used by execve and 5611 * mmap, so we want to avoid the I/O implied by directly setting 5612 * va_atime for the sake of efficiency. 5613 */ 5614 void 5615 vfs_mark_atime(struct vnode *vp, struct ucred *cred) 5616 { 5617 struct mount *mp; 5618 5619 mp = vp->v_mount; 5620 ASSERT_VOP_LOCKED(vp, "vfs_mark_atime"); 5621 if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) 5622 (void)VOP_MARKATIME(vp); 5623 } 5624 5625 /* 5626 * The purpose of this routine is to remove granularity from accmode_t, 5627 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 5628 * VADMIN and VAPPEND. 5629 * 5630 * If it returns 0, the caller is supposed to continue with the usual 5631 * access checks using 'accmode' as modified by this routine. If it 5632 * returns nonzero value, the caller is supposed to return that value 5633 * as errno. 5634 * 5635 * Note that after this routine runs, accmode may be zero. 5636 */ 5637 int 5638 vfs_unixify_accmode(accmode_t *accmode) 5639 { 5640 /* 5641 * There is no way to specify explicit "deny" rule using 5642 * file mode or POSIX.1e ACLs. 5643 */ 5644 if (*accmode & VEXPLICIT_DENY) { 5645 *accmode = 0; 5646 return (0); 5647 } 5648 5649 /* 5650 * None of these can be translated into usual access bits. 5651 * Also, the common case for NFSv4 ACLs is to not contain 5652 * either of these bits. Caller should check for VWRITE 5653 * on the containing directory instead. 5654 */ 5655 if (*accmode & (VDELETE_CHILD | VDELETE)) 5656 return (EPERM); 5657 5658 if (*accmode & VADMIN_PERMS) { 5659 *accmode &= ~VADMIN_PERMS; 5660 *accmode |= VADMIN; 5661 } 5662 5663 /* 5664 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 5665 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 5666 */ 5667 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 5668 5669 return (0); 5670 } 5671 5672 /* 5673 * Clear out a doomed vnode (if any) and replace it with a new one as long 5674 * as the fs is not being unmounted. Return the root vnode to the caller. 5675 */ 5676 static int __noinline 5677 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 5678 { 5679 struct vnode *vp; 5680 int error; 5681 5682 restart: 5683 if (mp->mnt_rootvnode != NULL) { 5684 MNT_ILOCK(mp); 5685 vp = mp->mnt_rootvnode; 5686 if (vp != NULL) { 5687 if (!VN_IS_DOOMED(vp)) { 5688 vrefact(vp); 5689 MNT_IUNLOCK(mp); 5690 error = vn_lock(vp, flags); 5691 if (error == 0) { 5692 *vpp = vp; 5693 return (0); 5694 } 5695 vrele(vp); 5696 goto restart; 5697 } 5698 /* 5699 * Clear the old one. 5700 */ 5701 mp->mnt_rootvnode = NULL; 5702 } 5703 MNT_IUNLOCK(mp); 5704 if (vp != NULL) { 5705 /* 5706 * Paired with a fence in vfs_op_thread_exit(). 5707 */ 5708 atomic_thread_fence_acq(); 5709 vfs_op_barrier_wait(mp); 5710 vrele(vp); 5711 } 5712 } 5713 error = VFS_CACHEDROOT(mp, flags, vpp); 5714 if (error != 0) 5715 return (error); 5716 if (mp->mnt_vfs_ops == 0) { 5717 MNT_ILOCK(mp); 5718 if (mp->mnt_vfs_ops != 0) { 5719 MNT_IUNLOCK(mp); 5720 return (0); 5721 } 5722 if (mp->mnt_rootvnode == NULL) { 5723 vrefact(*vpp); 5724 mp->mnt_rootvnode = *vpp; 5725 } else { 5726 if (mp->mnt_rootvnode != *vpp) { 5727 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 5728 panic("%s: mismatch between vnode returned " 5729 " by VFS_CACHEDROOT and the one cached " 5730 " (%p != %p)", 5731 __func__, *vpp, mp->mnt_rootvnode); 5732 } 5733 } 5734 } 5735 MNT_IUNLOCK(mp); 5736 } 5737 return (0); 5738 } 5739 5740 int 5741 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 5742 { 5743 struct vnode *vp; 5744 int error; 5745 5746 if (!vfs_op_thread_enter(mp)) 5747 return (vfs_cache_root_fallback(mp, flags, vpp)); 5748 vp = (struct vnode *)atomic_load_ptr(&mp->mnt_rootvnode); 5749 if (vp == NULL || VN_IS_DOOMED(vp)) { 5750 vfs_op_thread_exit(mp); 5751 return (vfs_cache_root_fallback(mp, flags, vpp)); 5752 } 5753 vrefact(vp); 5754 vfs_op_thread_exit(mp); 5755 error = vn_lock(vp, flags); 5756 if (error != 0) { 5757 vrele(vp); 5758 return (vfs_cache_root_fallback(mp, flags, vpp)); 5759 } 5760 *vpp = vp; 5761 return (0); 5762 } 5763 5764 struct vnode * 5765 vfs_cache_root_clear(struct mount *mp) 5766 { 5767 struct vnode *vp; 5768 5769 /* 5770 * ops > 0 guarantees there is nobody who can see this vnode 5771 */ 5772 MPASS(mp->mnt_vfs_ops > 0); 5773 vp = mp->mnt_rootvnode; 5774 mp->mnt_rootvnode = NULL; 5775 return (vp); 5776 } 5777 5778 void 5779 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 5780 { 5781 5782 MPASS(mp->mnt_vfs_ops > 0); 5783 vrefact(vp); 5784 mp->mnt_rootvnode = vp; 5785 } 5786 5787 /* 5788 * These are helper functions for filesystems to traverse all 5789 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 5790 * 5791 * This interface replaces MNT_VNODE_FOREACH. 5792 */ 5793 5794 MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 5795 5796 struct vnode * 5797 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 5798 { 5799 struct vnode *vp; 5800 5801 if (should_yield()) 5802 kern_yield(PRI_USER); 5803 MNT_ILOCK(mp); 5804 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5805 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 5806 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 5807 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 5808 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 5809 continue; 5810 VI_LOCK(vp); 5811 if (VN_IS_DOOMED(vp)) { 5812 VI_UNLOCK(vp); 5813 continue; 5814 } 5815 break; 5816 } 5817 if (vp == NULL) { 5818 __mnt_vnode_markerfree_all(mvp, mp); 5819 /* MNT_IUNLOCK(mp); -- done in above function */ 5820 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 5821 return (NULL); 5822 } 5823 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 5824 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 5825 MNT_IUNLOCK(mp); 5826 return (vp); 5827 } 5828 5829 struct vnode * 5830 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 5831 { 5832 struct vnode *vp; 5833 5834 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 5835 MNT_ILOCK(mp); 5836 MNT_REF(mp); 5837 (*mvp)->v_mount = mp; 5838 (*mvp)->v_type = VMARKER; 5839 5840 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 5841 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 5842 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 5843 continue; 5844 VI_LOCK(vp); 5845 if (VN_IS_DOOMED(vp)) { 5846 VI_UNLOCK(vp); 5847 continue; 5848 } 5849 break; 5850 } 5851 if (vp == NULL) { 5852 MNT_REL(mp); 5853 MNT_IUNLOCK(mp); 5854 free(*mvp, M_VNODE_MARKER); 5855 *mvp = NULL; 5856 return (NULL); 5857 } 5858 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 5859 MNT_IUNLOCK(mp); 5860 return (vp); 5861 } 5862 5863 void 5864 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 5865 { 5866 5867 if (*mvp == NULL) { 5868 MNT_IUNLOCK(mp); 5869 return; 5870 } 5871 5872 mtx_assert(MNT_MTX(mp), MA_OWNED); 5873 5874 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5875 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 5876 MNT_REL(mp); 5877 MNT_IUNLOCK(mp); 5878 free(*mvp, M_VNODE_MARKER); 5879 *mvp = NULL; 5880 } 5881 5882 /* 5883 * These are helper functions for filesystems to traverse their 5884 * active vnodes. See MNT_VNODE_FOREACH_ACTIVE() in sys/mount.h 5885 */ 5886 static void 5887 mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 5888 { 5889 5890 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5891 5892 MNT_ILOCK(mp); 5893 MNT_REL(mp); 5894 MNT_IUNLOCK(mp); 5895 free(*mvp, M_VNODE_MARKER); 5896 *mvp = NULL; 5897 } 5898 5899 /* 5900 * Relock the mp mount vnode list lock with the vp vnode interlock in the 5901 * conventional lock order during mnt_vnode_next_active iteration. 5902 * 5903 * On entry, the mount vnode list lock is held and the vnode interlock is not. 5904 * The list lock is dropped and reacquired. On success, both locks are held. 5905 * On failure, the mount vnode list lock is held but the vnode interlock is 5906 * not, and the procedure may have yielded. 5907 */ 5908 static bool 5909 mnt_vnode_next_active_relock(struct vnode *mvp, struct mount *mp, 5910 struct vnode *vp) 5911 { 5912 const struct vnode *tmp; 5913 bool held, ret; 5914 5915 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 5916 TAILQ_NEXT(mvp, v_actfreelist) != NULL, mvp, 5917 ("%s: bad marker", __func__)); 5918 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 5919 ("%s: inappropriate vnode", __func__)); 5920 ASSERT_VI_UNLOCKED(vp, __func__); 5921 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 5922 5923 ret = false; 5924 5925 TAILQ_REMOVE(&mp->mnt_activevnodelist, mvp, v_actfreelist); 5926 TAILQ_INSERT_BEFORE(vp, mvp, v_actfreelist); 5927 5928 /* 5929 * Use a hold to prevent vp from disappearing while the mount vnode 5930 * list lock is dropped and reacquired. Normally a hold would be 5931 * acquired with vhold(), but that might try to acquire the vnode 5932 * interlock, which would be a LOR with the mount vnode list lock. 5933 */ 5934 held = refcount_acquire_if_not_zero(&vp->v_holdcnt); 5935 mtx_unlock(&mp->mnt_listmtx); 5936 if (!held) 5937 goto abort; 5938 VI_LOCK(vp); 5939 if (!refcount_release_if_not_last(&vp->v_holdcnt)) { 5940 vdropl(vp); 5941 goto abort; 5942 } 5943 mtx_lock(&mp->mnt_listmtx); 5944 5945 /* 5946 * Determine whether the vnode is still the next one after the marker, 5947 * excepting any other markers. If the vnode has not been doomed by 5948 * vgone() then the hold should have ensured that it remained on the 5949 * active list. If it has been doomed but is still on the active list, 5950 * don't abort, but rather skip over it (avoid spinning on doomed 5951 * vnodes). 5952 */ 5953 tmp = mvp; 5954 do { 5955 tmp = TAILQ_NEXT(tmp, v_actfreelist); 5956 } while (tmp != NULL && tmp->v_type == VMARKER); 5957 if (tmp != vp) { 5958 mtx_unlock(&mp->mnt_listmtx); 5959 VI_UNLOCK(vp); 5960 goto abort; 5961 } 5962 5963 ret = true; 5964 goto out; 5965 abort: 5966 maybe_yield(); 5967 mtx_lock(&mp->mnt_listmtx); 5968 out: 5969 if (ret) 5970 ASSERT_VI_LOCKED(vp, __func__); 5971 else 5972 ASSERT_VI_UNLOCKED(vp, __func__); 5973 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 5974 return (ret); 5975 } 5976 5977 static struct vnode * 5978 mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 5979 { 5980 struct vnode *vp, *nvp; 5981 5982 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 5983 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5984 restart: 5985 vp = TAILQ_NEXT(*mvp, v_actfreelist); 5986 while (vp != NULL) { 5987 if (vp->v_type == VMARKER) { 5988 vp = TAILQ_NEXT(vp, v_actfreelist); 5989 continue; 5990 } 5991 /* 5992 * Try-lock because this is the wrong lock order. If that does 5993 * not succeed, drop the mount vnode list lock and try to 5994 * reacquire it and the vnode interlock in the right order. 5995 */ 5996 if (!VI_TRYLOCK(vp) && 5997 !mnt_vnode_next_active_relock(*mvp, mp, vp)) 5998 goto restart; 5999 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6000 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6001 ("alien vnode on the active list %p %p", vp, mp)); 6002 if (vp->v_mount == mp && !VN_IS_DOOMED(vp)) 6003 break; 6004 nvp = TAILQ_NEXT(vp, v_actfreelist); 6005 VI_UNLOCK(vp); 6006 vp = nvp; 6007 } 6008 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 6009 6010 /* Check if we are done */ 6011 if (vp == NULL) { 6012 mtx_unlock(&mp->mnt_listmtx); 6013 mnt_vnode_markerfree_active(mvp, mp); 6014 return (NULL); 6015 } 6016 TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist); 6017 mtx_unlock(&mp->mnt_listmtx); 6018 ASSERT_VI_LOCKED(vp, "active iter"); 6019 KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp)); 6020 return (vp); 6021 } 6022 6023 struct vnode * 6024 __mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 6025 { 6026 6027 if (should_yield()) 6028 kern_yield(PRI_USER); 6029 mtx_lock(&mp->mnt_listmtx); 6030 return (mnt_vnode_next_active(mvp, mp)); 6031 } 6032 6033 struct vnode * 6034 __mnt_vnode_first_active(struct vnode **mvp, struct mount *mp) 6035 { 6036 struct vnode *vp; 6037 6038 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 6039 MNT_ILOCK(mp); 6040 MNT_REF(mp); 6041 MNT_IUNLOCK(mp); 6042 (*mvp)->v_type = VMARKER; 6043 (*mvp)->v_mount = mp; 6044 6045 mtx_lock(&mp->mnt_listmtx); 6046 vp = TAILQ_FIRST(&mp->mnt_activevnodelist); 6047 if (vp == NULL) { 6048 mtx_unlock(&mp->mnt_listmtx); 6049 mnt_vnode_markerfree_active(mvp, mp); 6050 return (NULL); 6051 } 6052 TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist); 6053 return (mnt_vnode_next_active(mvp, mp)); 6054 } 6055 6056 void 6057 __mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 6058 { 6059 6060 if (*mvp == NULL) 6061 return; 6062 6063 mtx_lock(&mp->mnt_listmtx); 6064 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 6065 mtx_unlock(&mp->mnt_listmtx); 6066 mnt_vnode_markerfree_active(mvp, mp); 6067 } 6068