1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/capsicum.h> 54 #include <sys/condvar.h> 55 #include <sys/conf.h> 56 #include <sys/counter.h> 57 #include <sys/dirent.h> 58 #include <sys/event.h> 59 #include <sys/eventhandler.h> 60 #include <sys/extattr.h> 61 #include <sys/file.h> 62 #include <sys/fcntl.h> 63 #include <sys/jail.h> 64 #include <sys/kdb.h> 65 #include <sys/kernel.h> 66 #include <sys/kthread.h> 67 #include <sys/ktr.h> 68 #include <sys/lockf.h> 69 #include <sys/malloc.h> 70 #include <sys/mount.h> 71 #include <sys/namei.h> 72 #include <sys/pctrie.h> 73 #include <sys/priv.h> 74 #include <sys/reboot.h> 75 #include <sys/refcount.h> 76 #include <sys/rwlock.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smp.h> 80 #include <sys/stat.h> 81 #include <sys/sysctl.h> 82 #include <sys/syslog.h> 83 #include <sys/vmmeter.h> 84 #include <sys/vnode.h> 85 #include <sys/watchdog.h> 86 87 #include <machine/stdarg.h> 88 89 #include <security/mac/mac_framework.h> 90 91 #include <vm/vm.h> 92 #include <vm/vm_object.h> 93 #include <vm/vm_extern.h> 94 #include <vm/pmap.h> 95 #include <vm/vm_map.h> 96 #include <vm/vm_page.h> 97 #include <vm/vm_kern.h> 98 #include <vm/uma.h> 99 100 #ifdef DDB 101 #include <ddb/ddb.h> 102 #endif 103 104 static void delmntque(struct vnode *vp); 105 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 106 int slpflag, int slptimeo); 107 static void syncer_shutdown(void *arg, int howto); 108 static int vtryrecycle(struct vnode *vp); 109 static void v_init_counters(struct vnode *); 110 static void v_incr_devcount(struct vnode *); 111 static void v_decr_devcount(struct vnode *); 112 static void vgonel(struct vnode *); 113 static void vfs_knllock(void *arg); 114 static void vfs_knlunlock(void *arg); 115 static void vfs_knl_assert_locked(void *arg); 116 static void vfs_knl_assert_unlocked(void *arg); 117 static void vnlru_return_batches(struct vfsops *mnt_op); 118 static void destroy_vpollinfo(struct vpollinfo *vi); 119 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 120 daddr_t startlbn, daddr_t endlbn); 121 122 /* 123 * These fences are intended for cases where some synchronization is 124 * needed between access of v_iflags and lockless vnode refcount (v_holdcnt 125 * and v_usecount) updates. Access to v_iflags is generally synchronized 126 * by the interlock, but we have some internal assertions that check vnode 127 * flags without acquiring the lock. Thus, these fences are INVARIANTS-only 128 * for now. 129 */ 130 #ifdef INVARIANTS 131 #define VNODE_REFCOUNT_FENCE_ACQ() atomic_thread_fence_acq() 132 #define VNODE_REFCOUNT_FENCE_REL() atomic_thread_fence_rel() 133 #else 134 #define VNODE_REFCOUNT_FENCE_ACQ() 135 #define VNODE_REFCOUNT_FENCE_REL() 136 #endif 137 138 /* 139 * Number of vnodes in existence. Increased whenever getnewvnode() 140 * allocates a new vnode, decreased in vdropl() for VI_DOOMED vnode. 141 */ 142 static unsigned long numvnodes; 143 144 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 145 "Number of vnodes in existence"); 146 147 static counter_u64_t vnodes_created; 148 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 149 "Number of vnodes created by getnewvnode"); 150 151 static u_long mnt_free_list_batch = 128; 152 SYSCTL_ULONG(_vfs, OID_AUTO, mnt_free_list_batch, CTLFLAG_RW, 153 &mnt_free_list_batch, 0, "Limit of vnodes held on mnt's free list"); 154 155 /* 156 * Conversion tables for conversion from vnode types to inode formats 157 * and back. 158 */ 159 enum vtype iftovt_tab[16] = { 160 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 161 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 162 }; 163 int vttoif_tab[10] = { 164 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 165 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 166 }; 167 168 /* 169 * List of vnodes that are ready for recycling. 170 */ 171 static TAILQ_HEAD(freelst, vnode) vnode_free_list; 172 173 /* 174 * "Free" vnode target. Free vnodes are rarely completely free, but are 175 * just ones that are cheap to recycle. Usually they are for files which 176 * have been stat'd but not read; these usually have inode and namecache 177 * data attached to them. This target is the preferred minimum size of a 178 * sub-cache consisting mostly of such files. The system balances the size 179 * of this sub-cache with its complement to try to prevent either from 180 * thrashing while the other is relatively inactive. The targets express 181 * a preference for the best balance. 182 * 183 * "Above" this target there are 2 further targets (watermarks) related 184 * to recyling of free vnodes. In the best-operating case, the cache is 185 * exactly full, the free list has size between vlowat and vhiwat above the 186 * free target, and recycling from it and normal use maintains this state. 187 * Sometimes the free list is below vlowat or even empty, but this state 188 * is even better for immediate use provided the cache is not full. 189 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 190 * ones) to reach one of these states. The watermarks are currently hard- 191 * coded as 4% and 9% of the available space higher. These and the default 192 * of 25% for wantfreevnodes are too large if the memory size is large. 193 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 194 * whenever vnlru_proc() becomes active. 195 */ 196 static u_long wantfreevnodes; 197 SYSCTL_ULONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, 198 &wantfreevnodes, 0, "Target for minimum number of \"free\" vnodes"); 199 static u_long freevnodes; 200 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 201 &freevnodes, 0, "Number of \"free\" vnodes"); 202 203 static counter_u64_t recycles_count; 204 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 205 "Number of vnodes recycled to meet vnode cache targets"); 206 207 /* 208 * Various variables used for debugging the new implementation of 209 * reassignbuf(). 210 * XXX these are probably of (very) limited utility now. 211 */ 212 static int reassignbufcalls; 213 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW | CTLFLAG_STATS, 214 &reassignbufcalls, 0, "Number of calls to reassignbuf"); 215 216 static counter_u64_t free_owe_inact; 217 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, free_owe_inact, CTLFLAG_RD, &free_owe_inact, 218 "Number of times free vnodes kept on active list due to VFS " 219 "owing inactivation"); 220 221 /* To keep more than one thread at a time from running vfs_getnewfsid */ 222 static struct mtx mntid_mtx; 223 224 /* 225 * Lock for any access to the following: 226 * vnode_free_list 227 * numvnodes 228 * freevnodes 229 */ 230 static struct mtx vnode_free_list_mtx; 231 232 /* Publicly exported FS */ 233 struct nfs_public nfs_pub; 234 235 static uma_zone_t buf_trie_zone; 236 237 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 238 static uma_zone_t vnode_zone; 239 static uma_zone_t vnodepoll_zone; 240 241 /* 242 * The workitem queue. 243 * 244 * It is useful to delay writes of file data and filesystem metadata 245 * for tens of seconds so that quickly created and deleted files need 246 * not waste disk bandwidth being created and removed. To realize this, 247 * we append vnodes to a "workitem" queue. When running with a soft 248 * updates implementation, most pending metadata dependencies should 249 * not wait for more than a few seconds. Thus, mounted on block devices 250 * are delayed only about a half the time that file data is delayed. 251 * Similarly, directory updates are more critical, so are only delayed 252 * about a third the time that file data is delayed. Thus, there are 253 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 254 * one each second (driven off the filesystem syncer process). The 255 * syncer_delayno variable indicates the next queue that is to be processed. 256 * Items that need to be processed soon are placed in this queue: 257 * 258 * syncer_workitem_pending[syncer_delayno] 259 * 260 * A delay of fifteen seconds is done by placing the request fifteen 261 * entries later in the queue: 262 * 263 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 264 * 265 */ 266 static int syncer_delayno; 267 static long syncer_mask; 268 LIST_HEAD(synclist, bufobj); 269 static struct synclist *syncer_workitem_pending; 270 /* 271 * The sync_mtx protects: 272 * bo->bo_synclist 273 * sync_vnode_count 274 * syncer_delayno 275 * syncer_state 276 * syncer_workitem_pending 277 * syncer_worklist_len 278 * rushjob 279 */ 280 static struct mtx sync_mtx; 281 static struct cv sync_wakeup; 282 283 #define SYNCER_MAXDELAY 32 284 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 285 static int syncdelay = 30; /* max time to delay syncing data */ 286 static int filedelay = 30; /* time to delay syncing files */ 287 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 288 "Time to delay syncing files (in seconds)"); 289 static int dirdelay = 29; /* time to delay syncing directories */ 290 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 291 "Time to delay syncing directories (in seconds)"); 292 static int metadelay = 28; /* time to delay syncing metadata */ 293 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 294 "Time to delay syncing metadata (in seconds)"); 295 static int rushjob; /* number of slots to run ASAP */ 296 static int stat_rush_requests; /* number of times I/O speeded up */ 297 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 298 "Number of times I/O speeded up (rush requests)"); 299 300 /* 301 * When shutting down the syncer, run it at four times normal speed. 302 */ 303 #define SYNCER_SHUTDOWN_SPEEDUP 4 304 static int sync_vnode_count; 305 static int syncer_worklist_len; 306 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 307 syncer_state; 308 309 /* Target for maximum number of vnodes. */ 310 int desiredvnodes; 311 static int gapvnodes; /* gap between wanted and desired */ 312 static int vhiwat; /* enough extras after expansion */ 313 static int vlowat; /* minimal extras before expansion */ 314 static int vstir; /* nonzero to stir non-free vnodes */ 315 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 316 317 static int 318 sysctl_update_desiredvnodes(SYSCTL_HANDLER_ARGS) 319 { 320 int error, old_desiredvnodes; 321 322 old_desiredvnodes = desiredvnodes; 323 if ((error = sysctl_handle_int(oidp, arg1, arg2, req)) != 0) 324 return (error); 325 if (old_desiredvnodes != desiredvnodes) { 326 wantfreevnodes = desiredvnodes / 4; 327 /* XXX locking seems to be incomplete. */ 328 vfs_hash_changesize(desiredvnodes); 329 cache_changesize(desiredvnodes); 330 } 331 return (0); 332 } 333 334 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 335 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, &desiredvnodes, 0, 336 sysctl_update_desiredvnodes, "I", "Target for maximum number of vnodes"); 337 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 338 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 339 static int vnlru_nowhere; 340 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 341 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 342 343 static int 344 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 345 { 346 struct vnode *vp; 347 struct nameidata nd; 348 char *buf; 349 unsigned long ndflags; 350 int error; 351 352 if (req->newptr == NULL) 353 return (EINVAL); 354 if (req->newlen >= PATH_MAX) 355 return (E2BIG); 356 357 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 358 error = SYSCTL_IN(req, buf, req->newlen); 359 if (error != 0) 360 goto out; 361 362 buf[req->newlen] = '\0'; 363 364 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1 | NOCACHE | SAVENAME; 365 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf, curthread); 366 if ((error = namei(&nd)) != 0) 367 goto out; 368 vp = nd.ni_vp; 369 370 if ((vp->v_iflag & VI_DOOMED) != 0) { 371 /* 372 * This vnode is being recycled. Return != 0 to let the caller 373 * know that the sysctl had no effect. Return EAGAIN because a 374 * subsequent call will likely succeed (since namei will create 375 * a new vnode if necessary) 376 */ 377 error = EAGAIN; 378 goto putvnode; 379 } 380 381 counter_u64_add(recycles_count, 1); 382 vgone(vp); 383 putvnode: 384 NDFREE(&nd, 0); 385 out: 386 free(buf, M_TEMP); 387 return (error); 388 } 389 390 static int 391 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 392 { 393 struct thread *td = curthread; 394 struct vnode *vp; 395 struct file *fp; 396 int error; 397 int fd; 398 399 if (req->newptr == NULL) 400 return (EBADF); 401 402 error = sysctl_handle_int(oidp, &fd, 0, req); 403 if (error != 0) 404 return (error); 405 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 406 if (error != 0) 407 return (error); 408 vp = fp->f_vnode; 409 410 error = vn_lock(vp, LK_EXCLUSIVE); 411 if (error != 0) 412 goto drop; 413 414 counter_u64_add(recycles_count, 1); 415 vgone(vp); 416 VOP_UNLOCK(vp, 0); 417 drop: 418 fdrop(fp, td); 419 return (error); 420 } 421 422 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 423 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 424 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 425 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 426 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 427 sysctl_ftry_reclaim_vnode, "I", 428 "Try to reclaim a vnode by its file descriptor"); 429 430 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 431 static int vnsz2log; 432 433 /* 434 * Support for the bufobj clean & dirty pctrie. 435 */ 436 static void * 437 buf_trie_alloc(struct pctrie *ptree) 438 { 439 440 return uma_zalloc(buf_trie_zone, M_NOWAIT); 441 } 442 443 static void 444 buf_trie_free(struct pctrie *ptree, void *node) 445 { 446 447 uma_zfree(buf_trie_zone, node); 448 } 449 PCTRIE_DEFINE(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free); 450 451 /* 452 * Initialize the vnode management data structures. 453 * 454 * Reevaluate the following cap on the number of vnodes after the physical 455 * memory size exceeds 512GB. In the limit, as the physical memory size 456 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 457 */ 458 #ifndef MAXVNODES_MAX 459 #define MAXVNODES_MAX (512 * 1024 * 1024 / 64) /* 8M */ 460 #endif 461 462 /* 463 * Initialize a vnode as it first enters the zone. 464 */ 465 static int 466 vnode_init(void *mem, int size, int flags) 467 { 468 struct vnode *vp; 469 470 vp = mem; 471 bzero(vp, size); 472 /* 473 * Setup locks. 474 */ 475 vp->v_vnlock = &vp->v_lock; 476 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 477 /* 478 * By default, don't allow shared locks unless filesystems opt-in. 479 */ 480 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 481 LK_NOSHARE | LK_IS_VNODE); 482 /* 483 * Initialize bufobj. 484 */ 485 bufobj_init(&vp->v_bufobj, vp); 486 /* 487 * Initialize namecache. 488 */ 489 LIST_INIT(&vp->v_cache_src); 490 TAILQ_INIT(&vp->v_cache_dst); 491 /* 492 * Initialize rangelocks. 493 */ 494 rangelock_init(&vp->v_rl); 495 return (0); 496 } 497 498 /* 499 * Free a vnode when it is cleared from the zone. 500 */ 501 static void 502 vnode_fini(void *mem, int size) 503 { 504 struct vnode *vp; 505 struct bufobj *bo; 506 507 vp = mem; 508 rangelock_destroy(&vp->v_rl); 509 lockdestroy(vp->v_vnlock); 510 mtx_destroy(&vp->v_interlock); 511 bo = &vp->v_bufobj; 512 rw_destroy(BO_LOCKPTR(bo)); 513 } 514 515 /* 516 * Provide the size of NFS nclnode and NFS fh for calculation of the 517 * vnode memory consumption. The size is specified directly to 518 * eliminate dependency on NFS-private header. 519 * 520 * Other filesystems may use bigger or smaller (like UFS and ZFS) 521 * private inode data, but the NFS-based estimation is ample enough. 522 * Still, we care about differences in the size between 64- and 32-bit 523 * platforms. 524 * 525 * Namecache structure size is heuristically 526 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 527 */ 528 #ifdef _LP64 529 #define NFS_NCLNODE_SZ (528 + 64) 530 #define NC_SZ 148 531 #else 532 #define NFS_NCLNODE_SZ (360 + 32) 533 #define NC_SZ 92 534 #endif 535 536 static void 537 vntblinit(void *dummy __unused) 538 { 539 u_int i; 540 int physvnodes, virtvnodes; 541 542 /* 543 * Desiredvnodes is a function of the physical memory size and the 544 * kernel's heap size. Generally speaking, it scales with the 545 * physical memory size. The ratio of desiredvnodes to the physical 546 * memory size is 1:16 until desiredvnodes exceeds 98,304. 547 * Thereafter, the 548 * marginal ratio of desiredvnodes to the physical memory size is 549 * 1:64. However, desiredvnodes is limited by the kernel's heap 550 * size. The memory required by desiredvnodes vnodes and vm objects 551 * must not exceed 1/10th of the kernel's heap size. 552 */ 553 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 554 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 555 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 556 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 557 desiredvnodes = min(physvnodes, virtvnodes); 558 if (desiredvnodes > MAXVNODES_MAX) { 559 if (bootverbose) 560 printf("Reducing kern.maxvnodes %d -> %d\n", 561 desiredvnodes, MAXVNODES_MAX); 562 desiredvnodes = MAXVNODES_MAX; 563 } 564 wantfreevnodes = desiredvnodes / 4; 565 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 566 TAILQ_INIT(&vnode_free_list); 567 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 568 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 569 vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); 570 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 571 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 572 /* 573 * Preallocate enough nodes to support one-per buf so that 574 * we can not fail an insert. reassignbuf() callers can not 575 * tolerate the insertion failure. 576 */ 577 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 578 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 579 UMA_ZONE_NOFREE | UMA_ZONE_VM); 580 uma_prealloc(buf_trie_zone, nbuf); 581 582 vnodes_created = counter_u64_alloc(M_WAITOK); 583 recycles_count = counter_u64_alloc(M_WAITOK); 584 free_owe_inact = counter_u64_alloc(M_WAITOK); 585 586 /* 587 * Initialize the filesystem syncer. 588 */ 589 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 590 &syncer_mask); 591 syncer_maxdelay = syncer_mask + 1; 592 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 593 cv_init(&sync_wakeup, "syncer"); 594 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 595 vnsz2log++; 596 vnsz2log--; 597 } 598 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 599 600 601 /* 602 * Mark a mount point as busy. Used to synchronize access and to delay 603 * unmounting. Eventually, mountlist_mtx is not released on failure. 604 * 605 * vfs_busy() is a custom lock, it can block the caller. 606 * vfs_busy() only sleeps if the unmount is active on the mount point. 607 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 608 * vnode belonging to mp. 609 * 610 * Lookup uses vfs_busy() to traverse mount points. 611 * root fs var fs 612 * / vnode lock A / vnode lock (/var) D 613 * /var vnode lock B /log vnode lock(/var/log) E 614 * vfs_busy lock C vfs_busy lock F 615 * 616 * Within each file system, the lock order is C->A->B and F->D->E. 617 * 618 * When traversing across mounts, the system follows that lock order: 619 * 620 * C->A->B 621 * | 622 * +->F->D->E 623 * 624 * The lookup() process for namei("/var") illustrates the process: 625 * VOP_LOOKUP() obtains B while A is held 626 * vfs_busy() obtains a shared lock on F while A and B are held 627 * vput() releases lock on B 628 * vput() releases lock on A 629 * VFS_ROOT() obtains lock on D while shared lock on F is held 630 * vfs_unbusy() releases shared lock on F 631 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 632 * Attempt to lock A (instead of vp_crossmp) while D is held would 633 * violate the global order, causing deadlocks. 634 * 635 * dounmount() locks B while F is drained. 636 */ 637 int 638 vfs_busy(struct mount *mp, int flags) 639 { 640 641 MPASS((flags & ~MBF_MASK) == 0); 642 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 643 644 if (vfs_op_thread_enter(mp)) { 645 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 646 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 647 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 648 vfs_mp_count_add_pcpu(mp, ref, 1); 649 vfs_mp_count_add_pcpu(mp, lockref, 1); 650 vfs_op_thread_exit(mp); 651 if (flags & MBF_MNTLSTLOCK) 652 mtx_unlock(&mountlist_mtx); 653 return (0); 654 } 655 656 MNT_ILOCK(mp); 657 vfs_assert_mount_counters(mp); 658 MNT_REF(mp); 659 /* 660 * If mount point is currently being unmounted, sleep until the 661 * mount point fate is decided. If thread doing the unmounting fails, 662 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 663 * that this mount point has survived the unmount attempt and vfs_busy 664 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 665 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 666 * about to be really destroyed. vfs_busy needs to release its 667 * reference on the mount point in this case and return with ENOENT, 668 * telling the caller that mount mount it tried to busy is no longer 669 * valid. 670 */ 671 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 672 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 673 MNT_REL(mp); 674 MNT_IUNLOCK(mp); 675 CTR1(KTR_VFS, "%s: failed busying before sleeping", 676 __func__); 677 return (ENOENT); 678 } 679 if (flags & MBF_MNTLSTLOCK) 680 mtx_unlock(&mountlist_mtx); 681 mp->mnt_kern_flag |= MNTK_MWAIT; 682 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 683 if (flags & MBF_MNTLSTLOCK) 684 mtx_lock(&mountlist_mtx); 685 MNT_ILOCK(mp); 686 } 687 if (flags & MBF_MNTLSTLOCK) 688 mtx_unlock(&mountlist_mtx); 689 mp->mnt_lockref++; 690 MNT_IUNLOCK(mp); 691 return (0); 692 } 693 694 /* 695 * Free a busy filesystem. 696 */ 697 void 698 vfs_unbusy(struct mount *mp) 699 { 700 int c; 701 702 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 703 704 if (vfs_op_thread_enter(mp)) { 705 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 706 vfs_mp_count_sub_pcpu(mp, lockref, 1); 707 vfs_mp_count_sub_pcpu(mp, ref, 1); 708 vfs_op_thread_exit(mp); 709 return; 710 } 711 712 MNT_ILOCK(mp); 713 vfs_assert_mount_counters(mp); 714 MNT_REL(mp); 715 c = --mp->mnt_lockref; 716 if (mp->mnt_vfs_ops == 0) { 717 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 718 MNT_IUNLOCK(mp); 719 return; 720 } 721 if (c < 0) 722 vfs_dump_mount_counters(mp); 723 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 724 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 725 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 726 mp->mnt_kern_flag &= ~MNTK_DRAINING; 727 wakeup(&mp->mnt_lockref); 728 } 729 MNT_IUNLOCK(mp); 730 } 731 732 /* 733 * Lookup a mount point by filesystem identifier. 734 */ 735 struct mount * 736 vfs_getvfs(fsid_t *fsid) 737 { 738 struct mount *mp; 739 740 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 741 mtx_lock(&mountlist_mtx); 742 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 743 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 744 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 745 vfs_ref(mp); 746 mtx_unlock(&mountlist_mtx); 747 return (mp); 748 } 749 } 750 mtx_unlock(&mountlist_mtx); 751 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 752 return ((struct mount *) 0); 753 } 754 755 /* 756 * Lookup a mount point by filesystem identifier, busying it before 757 * returning. 758 * 759 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 760 * cache for popular filesystem identifiers. The cache is lockess, using 761 * the fact that struct mount's are never freed. In worst case we may 762 * get pointer to unmounted or even different filesystem, so we have to 763 * check what we got, and go slow way if so. 764 */ 765 struct mount * 766 vfs_busyfs(fsid_t *fsid) 767 { 768 #define FSID_CACHE_SIZE 256 769 typedef struct mount * volatile vmp_t; 770 static vmp_t cache[FSID_CACHE_SIZE]; 771 struct mount *mp; 772 int error; 773 uint32_t hash; 774 775 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 776 hash = fsid->val[0] ^ fsid->val[1]; 777 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 778 mp = cache[hash]; 779 if (mp == NULL || 780 mp->mnt_stat.f_fsid.val[0] != fsid->val[0] || 781 mp->mnt_stat.f_fsid.val[1] != fsid->val[1]) 782 goto slow; 783 if (vfs_busy(mp, 0) != 0) { 784 cache[hash] = NULL; 785 goto slow; 786 } 787 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 788 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) 789 return (mp); 790 else 791 vfs_unbusy(mp); 792 793 slow: 794 mtx_lock(&mountlist_mtx); 795 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 796 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 797 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 798 error = vfs_busy(mp, MBF_MNTLSTLOCK); 799 if (error) { 800 cache[hash] = NULL; 801 mtx_unlock(&mountlist_mtx); 802 return (NULL); 803 } 804 cache[hash] = mp; 805 return (mp); 806 } 807 } 808 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 809 mtx_unlock(&mountlist_mtx); 810 return ((struct mount *) 0); 811 } 812 813 /* 814 * Check if a user can access privileged mount options. 815 */ 816 int 817 vfs_suser(struct mount *mp, struct thread *td) 818 { 819 int error; 820 821 if (jailed(td->td_ucred)) { 822 /* 823 * If the jail of the calling thread lacks permission for 824 * this type of file system, deny immediately. 825 */ 826 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 827 return (EPERM); 828 829 /* 830 * If the file system was mounted outside the jail of the 831 * calling thread, deny immediately. 832 */ 833 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 834 return (EPERM); 835 } 836 837 /* 838 * If file system supports delegated administration, we don't check 839 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 840 * by the file system itself. 841 * If this is not the user that did original mount, we check for 842 * the PRIV_VFS_MOUNT_OWNER privilege. 843 */ 844 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 845 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 846 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 847 return (error); 848 } 849 return (0); 850 } 851 852 /* 853 * Get a new unique fsid. Try to make its val[0] unique, since this value 854 * will be used to create fake device numbers for stat(). Also try (but 855 * not so hard) make its val[0] unique mod 2^16, since some emulators only 856 * support 16-bit device numbers. We end up with unique val[0]'s for the 857 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 858 * 859 * Keep in mind that several mounts may be running in parallel. Starting 860 * the search one past where the previous search terminated is both a 861 * micro-optimization and a defense against returning the same fsid to 862 * different mounts. 863 */ 864 void 865 vfs_getnewfsid(struct mount *mp) 866 { 867 static uint16_t mntid_base; 868 struct mount *nmp; 869 fsid_t tfsid; 870 int mtype; 871 872 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 873 mtx_lock(&mntid_mtx); 874 mtype = mp->mnt_vfc->vfc_typenum; 875 tfsid.val[1] = mtype; 876 mtype = (mtype & 0xFF) << 24; 877 for (;;) { 878 tfsid.val[0] = makedev(255, 879 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 880 mntid_base++; 881 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 882 break; 883 vfs_rel(nmp); 884 } 885 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 886 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 887 mtx_unlock(&mntid_mtx); 888 } 889 890 /* 891 * Knob to control the precision of file timestamps: 892 * 893 * 0 = seconds only; nanoseconds zeroed. 894 * 1 = seconds and nanoseconds, accurate within 1/HZ. 895 * 2 = seconds and nanoseconds, truncated to microseconds. 896 * >=3 = seconds and nanoseconds, maximum precision. 897 */ 898 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 899 900 static int timestamp_precision = TSP_USEC; 901 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 902 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 903 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 904 "3+: sec + ns (max. precision))"); 905 906 /* 907 * Get a current timestamp. 908 */ 909 void 910 vfs_timestamp(struct timespec *tsp) 911 { 912 struct timeval tv; 913 914 switch (timestamp_precision) { 915 case TSP_SEC: 916 tsp->tv_sec = time_second; 917 tsp->tv_nsec = 0; 918 break; 919 case TSP_HZ: 920 getnanotime(tsp); 921 break; 922 case TSP_USEC: 923 microtime(&tv); 924 TIMEVAL_TO_TIMESPEC(&tv, tsp); 925 break; 926 case TSP_NSEC: 927 default: 928 nanotime(tsp); 929 break; 930 } 931 } 932 933 /* 934 * Set vnode attributes to VNOVAL 935 */ 936 void 937 vattr_null(struct vattr *vap) 938 { 939 940 vap->va_type = VNON; 941 vap->va_size = VNOVAL; 942 vap->va_bytes = VNOVAL; 943 vap->va_mode = VNOVAL; 944 vap->va_nlink = VNOVAL; 945 vap->va_uid = VNOVAL; 946 vap->va_gid = VNOVAL; 947 vap->va_fsid = VNOVAL; 948 vap->va_fileid = VNOVAL; 949 vap->va_blocksize = VNOVAL; 950 vap->va_rdev = VNOVAL; 951 vap->va_atime.tv_sec = VNOVAL; 952 vap->va_atime.tv_nsec = VNOVAL; 953 vap->va_mtime.tv_sec = VNOVAL; 954 vap->va_mtime.tv_nsec = VNOVAL; 955 vap->va_ctime.tv_sec = VNOVAL; 956 vap->va_ctime.tv_nsec = VNOVAL; 957 vap->va_birthtime.tv_sec = VNOVAL; 958 vap->va_birthtime.tv_nsec = VNOVAL; 959 vap->va_flags = VNOVAL; 960 vap->va_gen = VNOVAL; 961 vap->va_vaflags = 0; 962 } 963 964 /* 965 * This routine is called when we have too many vnodes. It attempts 966 * to free <count> vnodes and will potentially free vnodes that still 967 * have VM backing store (VM backing store is typically the cause 968 * of a vnode blowout so we want to do this). Therefore, this operation 969 * is not considered cheap. 970 * 971 * A number of conditions may prevent a vnode from being reclaimed. 972 * the buffer cache may have references on the vnode, a directory 973 * vnode may still have references due to the namei cache representing 974 * underlying files, or the vnode may be in active use. It is not 975 * desirable to reuse such vnodes. These conditions may cause the 976 * number of vnodes to reach some minimum value regardless of what 977 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 978 * 979 * @param mp Try to reclaim vnodes from this mountpoint 980 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 981 * entries if this argument is strue 982 * @param trigger Only reclaim vnodes with fewer than this many resident 983 * pages. 984 * @return The number of vnodes that were reclaimed. 985 */ 986 static int 987 vlrureclaim(struct mount *mp, bool reclaim_nc_src, int trigger) 988 { 989 struct vnode *vp; 990 int count, done, target; 991 992 done = 0; 993 vn_start_write(NULL, &mp, V_WAIT); 994 MNT_ILOCK(mp); 995 count = mp->mnt_nvnodelistsize; 996 target = count * (int64_t)gapvnodes / imax(desiredvnodes, 1); 997 target = target / 10 + 1; 998 while (count != 0 && done < target) { 999 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 1000 while (vp != NULL && vp->v_type == VMARKER) 1001 vp = TAILQ_NEXT(vp, v_nmntvnodes); 1002 if (vp == NULL) 1003 break; 1004 /* 1005 * XXX LRU is completely broken for non-free vnodes. First 1006 * by calling here in mountpoint order, then by moving 1007 * unselected vnodes to the end here, and most grossly by 1008 * removing the vlruvp() function that was supposed to 1009 * maintain the order. (This function was born broken 1010 * since syncer problems prevented it doing anything.) The 1011 * order is closer to LRC (C = Created). 1012 * 1013 * LRU reclaiming of vnodes seems to have last worked in 1014 * FreeBSD-3 where LRU wasn't mentioned under any spelling. 1015 * Then there was no hold count, and inactive vnodes were 1016 * simply put on the free list in LRU order. The separate 1017 * lists also break LRU. We prefer to reclaim from the 1018 * free list for technical reasons. This tends to thrash 1019 * the free list to keep very unrecently used held vnodes. 1020 * The problem is mitigated by keeping the free list large. 1021 */ 1022 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1023 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1024 --count; 1025 if (!VI_TRYLOCK(vp)) 1026 goto next_iter; 1027 /* 1028 * If it's been deconstructed already, it's still 1029 * referenced, or it exceeds the trigger, skip it. 1030 * Also skip free vnodes. We are trying to make space 1031 * to expand the free list, not reduce it. 1032 */ 1033 if (vp->v_usecount || 1034 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1035 ((vp->v_iflag & VI_FREE) != 0) || 1036 (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL && 1037 vp->v_object->resident_page_count > trigger)) { 1038 VI_UNLOCK(vp); 1039 goto next_iter; 1040 } 1041 MNT_IUNLOCK(mp); 1042 vholdl(vp); 1043 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) { 1044 vdrop(vp); 1045 goto next_iter_mntunlocked; 1046 } 1047 VI_LOCK(vp); 1048 /* 1049 * v_usecount may have been bumped after VOP_LOCK() dropped 1050 * the vnode interlock and before it was locked again. 1051 * 1052 * It is not necessary to recheck VI_DOOMED because it can 1053 * only be set by another thread that holds both the vnode 1054 * lock and vnode interlock. If another thread has the 1055 * vnode lock before we get to VOP_LOCK() and obtains the 1056 * vnode interlock after VOP_LOCK() drops the vnode 1057 * interlock, the other thread will be unable to drop the 1058 * vnode lock before our VOP_LOCK() call fails. 1059 */ 1060 if (vp->v_usecount || 1061 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1062 (vp->v_iflag & VI_FREE) != 0 || 1063 (vp->v_object != NULL && 1064 vp->v_object->resident_page_count > trigger)) { 1065 VOP_UNLOCK(vp, 0); 1066 vdropl(vp); 1067 goto next_iter_mntunlocked; 1068 } 1069 KASSERT((vp->v_iflag & VI_DOOMED) == 0, 1070 ("VI_DOOMED unexpectedly detected in vlrureclaim()")); 1071 counter_u64_add(recycles_count, 1); 1072 vgonel(vp); 1073 VOP_UNLOCK(vp, 0); 1074 vdropl(vp); 1075 done++; 1076 next_iter_mntunlocked: 1077 if (!should_yield()) 1078 goto relock_mnt; 1079 goto yield; 1080 next_iter: 1081 if (!should_yield()) 1082 continue; 1083 MNT_IUNLOCK(mp); 1084 yield: 1085 kern_yield(PRI_USER); 1086 relock_mnt: 1087 MNT_ILOCK(mp); 1088 } 1089 MNT_IUNLOCK(mp); 1090 vn_finished_write(mp); 1091 return done; 1092 } 1093 1094 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1095 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1096 0, 1097 "limit on vnode free requests per call to the vnlru_free routine"); 1098 1099 /* 1100 * Attempt to reduce the free list by the requested amount. 1101 */ 1102 static void 1103 vnlru_free_locked(int count, struct vfsops *mnt_op) 1104 { 1105 struct vnode *vp; 1106 struct mount *mp; 1107 bool tried_batches; 1108 1109 tried_batches = false; 1110 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 1111 if (count > max_vnlru_free) 1112 count = max_vnlru_free; 1113 for (; count > 0; count--) { 1114 vp = TAILQ_FIRST(&vnode_free_list); 1115 /* 1116 * The list can be modified while the free_list_mtx 1117 * has been dropped and vp could be NULL here. 1118 */ 1119 if (vp == NULL) { 1120 if (tried_batches) 1121 break; 1122 mtx_unlock(&vnode_free_list_mtx); 1123 vnlru_return_batches(mnt_op); 1124 tried_batches = true; 1125 mtx_lock(&vnode_free_list_mtx); 1126 continue; 1127 } 1128 1129 VNASSERT(vp->v_op != NULL, vp, 1130 ("vnlru_free: vnode already reclaimed.")); 1131 KASSERT((vp->v_iflag & VI_FREE) != 0, 1132 ("Removing vnode not on freelist")); 1133 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 1134 ("Mangling active vnode")); 1135 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 1136 1137 /* 1138 * Don't recycle if our vnode is from different type 1139 * of mount point. Note that mp is type-safe, the 1140 * check does not reach unmapped address even if 1141 * vnode is reclaimed. 1142 * Don't recycle if we can't get the interlock without 1143 * blocking. 1144 */ 1145 if ((mnt_op != NULL && (mp = vp->v_mount) != NULL && 1146 mp->mnt_op != mnt_op) || !VI_TRYLOCK(vp)) { 1147 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist); 1148 continue; 1149 } 1150 VNASSERT((vp->v_iflag & VI_FREE) != 0 && vp->v_holdcnt == 0, 1151 vp, ("vp inconsistent on freelist")); 1152 1153 /* 1154 * The clear of VI_FREE prevents activation of the 1155 * vnode. There is no sense in putting the vnode on 1156 * the mount point active list, only to remove it 1157 * later during recycling. Inline the relevant part 1158 * of vholdl(), to avoid triggering assertions or 1159 * activating. 1160 */ 1161 freevnodes--; 1162 vp->v_iflag &= ~VI_FREE; 1163 VNODE_REFCOUNT_FENCE_REL(); 1164 refcount_acquire(&vp->v_holdcnt); 1165 1166 mtx_unlock(&vnode_free_list_mtx); 1167 VI_UNLOCK(vp); 1168 vtryrecycle(vp); 1169 /* 1170 * If the recycled succeeded this vdrop will actually free 1171 * the vnode. If not it will simply place it back on 1172 * the free list. 1173 */ 1174 vdrop(vp); 1175 mtx_lock(&vnode_free_list_mtx); 1176 } 1177 } 1178 1179 void 1180 vnlru_free(int count, struct vfsops *mnt_op) 1181 { 1182 1183 mtx_lock(&vnode_free_list_mtx); 1184 vnlru_free_locked(count, mnt_op); 1185 mtx_unlock(&vnode_free_list_mtx); 1186 } 1187 1188 1189 /* XXX some names and initialization are bad for limits and watermarks. */ 1190 static int 1191 vspace(void) 1192 { 1193 int space; 1194 1195 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1196 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1197 vlowat = vhiwat / 2; 1198 if (numvnodes > desiredvnodes) 1199 return (0); 1200 space = desiredvnodes - numvnodes; 1201 if (freevnodes > wantfreevnodes) 1202 space += freevnodes - wantfreevnodes; 1203 return (space); 1204 } 1205 1206 static void 1207 vnlru_return_batch_locked(struct mount *mp) 1208 { 1209 struct vnode *vp; 1210 1211 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 1212 1213 if (mp->mnt_tmpfreevnodelistsize == 0) 1214 return; 1215 1216 TAILQ_FOREACH(vp, &mp->mnt_tmpfreevnodelist, v_actfreelist) { 1217 VNASSERT((vp->v_mflag & VMP_TMPMNTFREELIST) != 0, vp, 1218 ("vnode without VMP_TMPMNTFREELIST on mnt_tmpfreevnodelist")); 1219 vp->v_mflag &= ~VMP_TMPMNTFREELIST; 1220 } 1221 mtx_lock(&vnode_free_list_mtx); 1222 TAILQ_CONCAT(&vnode_free_list, &mp->mnt_tmpfreevnodelist, v_actfreelist); 1223 freevnodes += mp->mnt_tmpfreevnodelistsize; 1224 mtx_unlock(&vnode_free_list_mtx); 1225 mp->mnt_tmpfreevnodelistsize = 0; 1226 } 1227 1228 static void 1229 vnlru_return_batch(struct mount *mp) 1230 { 1231 1232 mtx_lock(&mp->mnt_listmtx); 1233 vnlru_return_batch_locked(mp); 1234 mtx_unlock(&mp->mnt_listmtx); 1235 } 1236 1237 static void 1238 vnlru_return_batches(struct vfsops *mnt_op) 1239 { 1240 struct mount *mp, *nmp; 1241 bool need_unbusy; 1242 1243 mtx_lock(&mountlist_mtx); 1244 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 1245 need_unbusy = false; 1246 if (mnt_op != NULL && mp->mnt_op != mnt_op) 1247 goto next; 1248 if (mp->mnt_tmpfreevnodelistsize == 0) 1249 goto next; 1250 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK) == 0) { 1251 vnlru_return_batch(mp); 1252 need_unbusy = true; 1253 mtx_lock(&mountlist_mtx); 1254 } 1255 next: 1256 nmp = TAILQ_NEXT(mp, mnt_list); 1257 if (need_unbusy) 1258 vfs_unbusy(mp); 1259 } 1260 mtx_unlock(&mountlist_mtx); 1261 } 1262 1263 /* 1264 * Attempt to recycle vnodes in a context that is always safe to block. 1265 * Calling vlrurecycle() from the bowels of filesystem code has some 1266 * interesting deadlock problems. 1267 */ 1268 static struct proc *vnlruproc; 1269 static int vnlruproc_sig; 1270 1271 static void 1272 vnlru_proc(void) 1273 { 1274 struct mount *mp, *nmp; 1275 unsigned long onumvnodes; 1276 int done, force, trigger, usevnodes, vsp; 1277 bool reclaim_nc_src; 1278 1279 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1280 SHUTDOWN_PRI_FIRST); 1281 1282 force = 0; 1283 for (;;) { 1284 kproc_suspend_check(vnlruproc); 1285 mtx_lock(&vnode_free_list_mtx); 1286 /* 1287 * If numvnodes is too large (due to desiredvnodes being 1288 * adjusted using its sysctl, or emergency growth), first 1289 * try to reduce it by discarding from the free list. 1290 */ 1291 if (numvnodes > desiredvnodes) 1292 vnlru_free_locked(numvnodes - desiredvnodes, NULL); 1293 /* 1294 * Sleep if the vnode cache is in a good state. This is 1295 * when it is not over-full and has space for about a 4% 1296 * or 9% expansion (by growing its size or inexcessively 1297 * reducing its free list). Otherwise, try to reclaim 1298 * space for a 10% expansion. 1299 */ 1300 if (vstir && force == 0) { 1301 force = 1; 1302 vstir = 0; 1303 } 1304 vsp = vspace(); 1305 if (vsp >= vlowat && force == 0) { 1306 vnlruproc_sig = 0; 1307 wakeup(&vnlruproc_sig); 1308 msleep(vnlruproc, &vnode_free_list_mtx, 1309 PVFS|PDROP, "vlruwt", hz); 1310 continue; 1311 } 1312 mtx_unlock(&vnode_free_list_mtx); 1313 done = 0; 1314 onumvnodes = numvnodes; 1315 /* 1316 * Calculate parameters for recycling. These are the same 1317 * throughout the loop to give some semblance of fairness. 1318 * The trigger point is to avoid recycling vnodes with lots 1319 * of resident pages. We aren't trying to free memory; we 1320 * are trying to recycle or at least free vnodes. 1321 */ 1322 if (numvnodes <= desiredvnodes) 1323 usevnodes = numvnodes - freevnodes; 1324 else 1325 usevnodes = numvnodes; 1326 if (usevnodes <= 0) 1327 usevnodes = 1; 1328 /* 1329 * The trigger value is is chosen to give a conservatively 1330 * large value to ensure that it alone doesn't prevent 1331 * making progress. The value can easily be so large that 1332 * it is effectively infinite in some congested and 1333 * misconfigured cases, and this is necessary. Normally 1334 * it is about 8 to 100 (pages), which is quite large. 1335 */ 1336 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1337 if (force < 2) 1338 trigger = vsmalltrigger; 1339 reclaim_nc_src = force >= 3; 1340 mtx_lock(&mountlist_mtx); 1341 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 1342 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) { 1343 nmp = TAILQ_NEXT(mp, mnt_list); 1344 continue; 1345 } 1346 done += vlrureclaim(mp, reclaim_nc_src, trigger); 1347 mtx_lock(&mountlist_mtx); 1348 nmp = TAILQ_NEXT(mp, mnt_list); 1349 vfs_unbusy(mp); 1350 } 1351 mtx_unlock(&mountlist_mtx); 1352 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1353 uma_reclaim(UMA_RECLAIM_DRAIN); 1354 if (done == 0) { 1355 if (force == 0 || force == 1) { 1356 force = 2; 1357 continue; 1358 } 1359 if (force == 2) { 1360 force = 3; 1361 continue; 1362 } 1363 force = 0; 1364 vnlru_nowhere++; 1365 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1366 } else 1367 kern_yield(PRI_USER); 1368 /* 1369 * After becoming active to expand above low water, keep 1370 * active until above high water. 1371 */ 1372 vsp = vspace(); 1373 force = vsp < vhiwat; 1374 } 1375 } 1376 1377 static struct kproc_desc vnlru_kp = { 1378 "vnlru", 1379 vnlru_proc, 1380 &vnlruproc 1381 }; 1382 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1383 &vnlru_kp); 1384 1385 /* 1386 * Routines having to do with the management of the vnode table. 1387 */ 1388 1389 /* 1390 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1391 * before we actually vgone(). This function must be called with the vnode 1392 * held to prevent the vnode from being returned to the free list midway 1393 * through vgone(). 1394 */ 1395 static int 1396 vtryrecycle(struct vnode *vp) 1397 { 1398 struct mount *vnmp; 1399 1400 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1401 VNASSERT(vp->v_holdcnt, vp, 1402 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1403 /* 1404 * This vnode may found and locked via some other list, if so we 1405 * can't recycle it yet. 1406 */ 1407 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1408 CTR2(KTR_VFS, 1409 "%s: impossible to recycle, vp %p lock is already held", 1410 __func__, vp); 1411 return (EWOULDBLOCK); 1412 } 1413 /* 1414 * Don't recycle if its filesystem is being suspended. 1415 */ 1416 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1417 VOP_UNLOCK(vp, 0); 1418 CTR2(KTR_VFS, 1419 "%s: impossible to recycle, cannot start the write for %p", 1420 __func__, vp); 1421 return (EBUSY); 1422 } 1423 /* 1424 * If we got this far, we need to acquire the interlock and see if 1425 * anyone picked up this vnode from another list. If not, we will 1426 * mark it with DOOMED via vgonel() so that anyone who does find it 1427 * will skip over it. 1428 */ 1429 VI_LOCK(vp); 1430 if (vp->v_usecount) { 1431 VOP_UNLOCK(vp, 0); 1432 VI_UNLOCK(vp); 1433 vn_finished_write(vnmp); 1434 CTR2(KTR_VFS, 1435 "%s: impossible to recycle, %p is already referenced", 1436 __func__, vp); 1437 return (EBUSY); 1438 } 1439 if ((vp->v_iflag & VI_DOOMED) == 0) { 1440 counter_u64_add(recycles_count, 1); 1441 vgonel(vp); 1442 } 1443 VOP_UNLOCK(vp, 0); 1444 VI_UNLOCK(vp); 1445 vn_finished_write(vnmp); 1446 return (0); 1447 } 1448 1449 static void 1450 vcheckspace(void) 1451 { 1452 int vsp; 1453 1454 vsp = vspace(); 1455 if (vsp < vlowat && vnlruproc_sig == 0) { 1456 vnlruproc_sig = 1; 1457 wakeup(vnlruproc); 1458 } 1459 } 1460 1461 /* 1462 * Wait if necessary for space for a new vnode. 1463 */ 1464 static int 1465 getnewvnode_wait(int suspended) 1466 { 1467 1468 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 1469 if (numvnodes >= desiredvnodes) { 1470 if (suspended) { 1471 /* 1472 * The file system is being suspended. We cannot 1473 * risk a deadlock here, so allow allocation of 1474 * another vnode even if this would give too many. 1475 */ 1476 return (0); 1477 } 1478 if (vnlruproc_sig == 0) { 1479 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 1480 wakeup(vnlruproc); 1481 } 1482 msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS, 1483 "vlruwk", hz); 1484 } 1485 /* Post-adjust like the pre-adjust in getnewvnode(). */ 1486 if (numvnodes + 1 > desiredvnodes && freevnodes > 1) 1487 vnlru_free_locked(1, NULL); 1488 return (numvnodes >= desiredvnodes ? ENFILE : 0); 1489 } 1490 1491 /* 1492 * This hack is fragile, and probably not needed any more now that the 1493 * watermark handling works. 1494 */ 1495 void 1496 getnewvnode_reserve(u_int count) 1497 { 1498 struct thread *td; 1499 1500 /* Pre-adjust like the pre-adjust in getnewvnode(), with any count. */ 1501 /* XXX no longer so quick, but this part is not racy. */ 1502 mtx_lock(&vnode_free_list_mtx); 1503 if (numvnodes + count > desiredvnodes && freevnodes > wantfreevnodes) 1504 vnlru_free_locked(ulmin(numvnodes + count - desiredvnodes, 1505 freevnodes - wantfreevnodes), NULL); 1506 mtx_unlock(&vnode_free_list_mtx); 1507 1508 td = curthread; 1509 /* First try to be quick and racy. */ 1510 if (atomic_fetchadd_long(&numvnodes, count) + count <= desiredvnodes) { 1511 td->td_vp_reserv += count; 1512 vcheckspace(); /* XXX no longer so quick, but more racy */ 1513 return; 1514 } else 1515 atomic_subtract_long(&numvnodes, count); 1516 1517 mtx_lock(&vnode_free_list_mtx); 1518 while (count > 0) { 1519 if (getnewvnode_wait(0) == 0) { 1520 count--; 1521 td->td_vp_reserv++; 1522 atomic_add_long(&numvnodes, 1); 1523 } 1524 } 1525 vcheckspace(); 1526 mtx_unlock(&vnode_free_list_mtx); 1527 } 1528 1529 /* 1530 * This hack is fragile, especially if desiredvnodes or wantvnodes are 1531 * misconfgured or changed significantly. Reducing desiredvnodes below 1532 * the reserved amount should cause bizarre behaviour like reducing it 1533 * below the number of active vnodes -- the system will try to reduce 1534 * numvnodes to match, but should fail, so the subtraction below should 1535 * not overflow. 1536 */ 1537 void 1538 getnewvnode_drop_reserve(void) 1539 { 1540 struct thread *td; 1541 1542 td = curthread; 1543 atomic_subtract_long(&numvnodes, td->td_vp_reserv); 1544 td->td_vp_reserv = 0; 1545 } 1546 1547 /* 1548 * Return the next vnode from the free list. 1549 */ 1550 int 1551 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1552 struct vnode **vpp) 1553 { 1554 struct vnode *vp; 1555 struct thread *td; 1556 struct lock_object *lo; 1557 static int cyclecount; 1558 int error __unused; 1559 1560 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1561 vp = NULL; 1562 td = curthread; 1563 if (td->td_vp_reserv > 0) { 1564 td->td_vp_reserv -= 1; 1565 goto alloc; 1566 } 1567 mtx_lock(&vnode_free_list_mtx); 1568 if (numvnodes < desiredvnodes) 1569 cyclecount = 0; 1570 else if (cyclecount++ >= freevnodes) { 1571 cyclecount = 0; 1572 vstir = 1; 1573 } 1574 /* 1575 * Grow the vnode cache if it will not be above its target max 1576 * after growing. Otherwise, if the free list is nonempty, try 1577 * to reclaim 1 item from it before growing the cache (possibly 1578 * above its target max if the reclamation failed or is delayed). 1579 * Otherwise, wait for some space. In all cases, schedule 1580 * vnlru_proc() if we are getting short of space. The watermarks 1581 * should be chosen so that we never wait or even reclaim from 1582 * the free list to below its target minimum. 1583 */ 1584 if (numvnodes + 1 <= desiredvnodes) 1585 ; 1586 else if (freevnodes > 0) 1587 vnlru_free_locked(1, NULL); 1588 else { 1589 error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag & 1590 MNTK_SUSPEND)); 1591 #if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */ 1592 if (error != 0) { 1593 mtx_unlock(&vnode_free_list_mtx); 1594 return (error); 1595 } 1596 #endif 1597 } 1598 vcheckspace(); 1599 atomic_add_long(&numvnodes, 1); 1600 mtx_unlock(&vnode_free_list_mtx); 1601 alloc: 1602 counter_u64_add(vnodes_created, 1); 1603 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK); 1604 /* 1605 * Locks are given the generic name "vnode" when created. 1606 * Follow the historic practice of using the filesystem 1607 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1608 * 1609 * Locks live in a witness group keyed on their name. Thus, 1610 * when a lock is renamed, it must also move from the witness 1611 * group of its old name to the witness group of its new name. 1612 * 1613 * The change only needs to be made when the vnode moves 1614 * from one filesystem type to another. We ensure that each 1615 * filesystem use a single static name pointer for its tag so 1616 * that we can compare pointers rather than doing a strcmp(). 1617 */ 1618 lo = &vp->v_vnlock->lock_object; 1619 if (lo->lo_name != tag) { 1620 lo->lo_name = tag; 1621 WITNESS_DESTROY(lo); 1622 WITNESS_INIT(lo, tag); 1623 } 1624 /* 1625 * By default, don't allow shared locks unless filesystems opt-in. 1626 */ 1627 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1628 /* 1629 * Finalize various vnode identity bits. 1630 */ 1631 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1632 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1633 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1634 vp->v_type = VNON; 1635 vp->v_tag = tag; 1636 vp->v_op = vops; 1637 v_init_counters(vp); 1638 vp->v_bufobj.bo_ops = &buf_ops_bio; 1639 #ifdef DIAGNOSTIC 1640 if (mp == NULL && vops != &dead_vnodeops) 1641 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1642 #endif 1643 #ifdef MAC 1644 mac_vnode_init(vp); 1645 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1646 mac_vnode_associate_singlelabel(mp, vp); 1647 #endif 1648 if (mp != NULL) { 1649 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1650 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1651 vp->v_vflag |= VV_NOKNOTE; 1652 } 1653 1654 /* 1655 * For the filesystems which do not use vfs_hash_insert(), 1656 * still initialize v_hash to have vfs_hash_index() useful. 1657 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1658 * its own hashing. 1659 */ 1660 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1661 1662 *vpp = vp; 1663 return (0); 1664 } 1665 1666 /* 1667 * Delete from old mount point vnode list, if on one. 1668 */ 1669 static void 1670 delmntque(struct vnode *vp) 1671 { 1672 struct mount *mp; 1673 int active; 1674 1675 mp = vp->v_mount; 1676 if (mp == NULL) 1677 return; 1678 MNT_ILOCK(mp); 1679 VI_LOCK(vp); 1680 KASSERT(mp->mnt_activevnodelistsize <= mp->mnt_nvnodelistsize, 1681 ("Active vnode list size %d > Vnode list size %d", 1682 mp->mnt_activevnodelistsize, mp->mnt_nvnodelistsize)); 1683 active = vp->v_iflag & VI_ACTIVE; 1684 vp->v_iflag &= ~VI_ACTIVE; 1685 if (active) { 1686 mtx_lock(&mp->mnt_listmtx); 1687 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); 1688 mp->mnt_activevnodelistsize--; 1689 mtx_unlock(&mp->mnt_listmtx); 1690 } 1691 vp->v_mount = NULL; 1692 VI_UNLOCK(vp); 1693 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1694 ("bad mount point vnode list size")); 1695 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1696 mp->mnt_nvnodelistsize--; 1697 MNT_REL(mp); 1698 MNT_IUNLOCK(mp); 1699 } 1700 1701 static void 1702 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1703 { 1704 1705 vp->v_data = NULL; 1706 vp->v_op = &dead_vnodeops; 1707 vgone(vp); 1708 vput(vp); 1709 } 1710 1711 /* 1712 * Insert into list of vnodes for the new mount point, if available. 1713 */ 1714 int 1715 insmntque1(struct vnode *vp, struct mount *mp, 1716 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1717 { 1718 1719 KASSERT(vp->v_mount == NULL, 1720 ("insmntque: vnode already on per mount vnode list")); 1721 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1722 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1723 1724 /* 1725 * We acquire the vnode interlock early to ensure that the 1726 * vnode cannot be recycled by another process releasing a 1727 * holdcnt on it before we get it on both the vnode list 1728 * and the active vnode list. The mount mutex protects only 1729 * manipulation of the vnode list and the vnode freelist 1730 * mutex protects only manipulation of the active vnode list. 1731 * Hence the need to hold the vnode interlock throughout. 1732 */ 1733 MNT_ILOCK(mp); 1734 VI_LOCK(vp); 1735 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 1736 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1737 mp->mnt_nvnodelistsize == 0)) && 1738 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1739 VI_UNLOCK(vp); 1740 MNT_IUNLOCK(mp); 1741 if (dtr != NULL) 1742 dtr(vp, dtr_arg); 1743 return (EBUSY); 1744 } 1745 vp->v_mount = mp; 1746 MNT_REF(mp); 1747 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1748 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1749 ("neg mount point vnode list size")); 1750 mp->mnt_nvnodelistsize++; 1751 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 1752 ("Activating already active vnode")); 1753 vp->v_iflag |= VI_ACTIVE; 1754 mtx_lock(&mp->mnt_listmtx); 1755 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 1756 mp->mnt_activevnodelistsize++; 1757 mtx_unlock(&mp->mnt_listmtx); 1758 VI_UNLOCK(vp); 1759 MNT_IUNLOCK(mp); 1760 return (0); 1761 } 1762 1763 int 1764 insmntque(struct vnode *vp, struct mount *mp) 1765 { 1766 1767 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1768 } 1769 1770 /* 1771 * Flush out and invalidate all buffers associated with a bufobj 1772 * Called with the underlying object locked. 1773 */ 1774 int 1775 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1776 { 1777 int error; 1778 1779 BO_LOCK(bo); 1780 if (flags & V_SAVE) { 1781 error = bufobj_wwait(bo, slpflag, slptimeo); 1782 if (error) { 1783 BO_UNLOCK(bo); 1784 return (error); 1785 } 1786 if (bo->bo_dirty.bv_cnt > 0) { 1787 BO_UNLOCK(bo); 1788 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1789 return (error); 1790 /* 1791 * XXX We could save a lock/unlock if this was only 1792 * enabled under INVARIANTS 1793 */ 1794 BO_LOCK(bo); 1795 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1796 panic("vinvalbuf: dirty bufs"); 1797 } 1798 } 1799 /* 1800 * If you alter this loop please notice that interlock is dropped and 1801 * reacquired in flushbuflist. Special care is needed to ensure that 1802 * no race conditions occur from this. 1803 */ 1804 do { 1805 error = flushbuflist(&bo->bo_clean, 1806 flags, bo, slpflag, slptimeo); 1807 if (error == 0 && !(flags & V_CLEANONLY)) 1808 error = flushbuflist(&bo->bo_dirty, 1809 flags, bo, slpflag, slptimeo); 1810 if (error != 0 && error != EAGAIN) { 1811 BO_UNLOCK(bo); 1812 return (error); 1813 } 1814 } while (error != 0); 1815 1816 /* 1817 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1818 * have write I/O in-progress but if there is a VM object then the 1819 * VM object can also have read-I/O in-progress. 1820 */ 1821 do { 1822 bufobj_wwait(bo, 0, 0); 1823 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 1824 BO_UNLOCK(bo); 1825 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 1826 BO_LOCK(bo); 1827 } 1828 } while (bo->bo_numoutput > 0); 1829 BO_UNLOCK(bo); 1830 1831 /* 1832 * Destroy the copy in the VM cache, too. 1833 */ 1834 if (bo->bo_object != NULL && 1835 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 1836 VM_OBJECT_WLOCK(bo->bo_object); 1837 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1838 OBJPR_CLEANONLY : 0); 1839 VM_OBJECT_WUNLOCK(bo->bo_object); 1840 } 1841 1842 #ifdef INVARIANTS 1843 BO_LOCK(bo); 1844 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 1845 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 1846 bo->bo_clean.bv_cnt > 0)) 1847 panic("vinvalbuf: flush failed"); 1848 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 1849 bo->bo_dirty.bv_cnt > 0) 1850 panic("vinvalbuf: flush dirty failed"); 1851 BO_UNLOCK(bo); 1852 #endif 1853 return (0); 1854 } 1855 1856 /* 1857 * Flush out and invalidate all buffers associated with a vnode. 1858 * Called with the underlying object locked. 1859 */ 1860 int 1861 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 1862 { 1863 1864 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 1865 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1866 if (vp->v_object != NULL && vp->v_object->handle != vp) 1867 return (0); 1868 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 1869 } 1870 1871 /* 1872 * Flush out buffers on the specified list. 1873 * 1874 */ 1875 static int 1876 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 1877 int slptimeo) 1878 { 1879 struct buf *bp, *nbp; 1880 int retval, error; 1881 daddr_t lblkno; 1882 b_xflags_t xflags; 1883 1884 ASSERT_BO_WLOCKED(bo); 1885 1886 retval = 0; 1887 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 1888 /* 1889 * If we are flushing both V_NORMAL and V_ALT buffers then 1890 * do not skip any buffers. If we are flushing only V_NORMAL 1891 * buffers then skip buffers marked as BX_ALTDATA. If we are 1892 * flushing only V_ALT buffers then skip buffers not marked 1893 * as BX_ALTDATA. 1894 */ 1895 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 1896 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 1897 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 1898 continue; 1899 } 1900 if (nbp != NULL) { 1901 lblkno = nbp->b_lblkno; 1902 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 1903 } 1904 retval = EAGAIN; 1905 error = BUF_TIMELOCK(bp, 1906 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 1907 "flushbuf", slpflag, slptimeo); 1908 if (error) { 1909 BO_LOCK(bo); 1910 return (error != ENOLCK ? error : EAGAIN); 1911 } 1912 KASSERT(bp->b_bufobj == bo, 1913 ("bp %p wrong b_bufobj %p should be %p", 1914 bp, bp->b_bufobj, bo)); 1915 /* 1916 * XXX Since there are no node locks for NFS, I 1917 * believe there is a slight chance that a delayed 1918 * write will occur while sleeping just above, so 1919 * check for it. 1920 */ 1921 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1922 (flags & V_SAVE)) { 1923 bremfree(bp); 1924 bp->b_flags |= B_ASYNC; 1925 bwrite(bp); 1926 BO_LOCK(bo); 1927 return (EAGAIN); /* XXX: why not loop ? */ 1928 } 1929 bremfree(bp); 1930 bp->b_flags |= (B_INVAL | B_RELBUF); 1931 bp->b_flags &= ~B_ASYNC; 1932 brelse(bp); 1933 BO_LOCK(bo); 1934 if (nbp == NULL) 1935 break; 1936 nbp = gbincore(bo, lblkno); 1937 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1938 != xflags) 1939 break; /* nbp invalid */ 1940 } 1941 return (retval); 1942 } 1943 1944 int 1945 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 1946 { 1947 struct buf *bp; 1948 int error; 1949 daddr_t lblkno; 1950 1951 ASSERT_BO_LOCKED(bo); 1952 1953 for (lblkno = startn;;) { 1954 again: 1955 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 1956 if (bp == NULL || bp->b_lblkno >= endn || 1957 bp->b_lblkno < startn) 1958 break; 1959 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 1960 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 1961 if (error != 0) { 1962 BO_RLOCK(bo); 1963 if (error == ENOLCK) 1964 goto again; 1965 return (error); 1966 } 1967 KASSERT(bp->b_bufobj == bo, 1968 ("bp %p wrong b_bufobj %p should be %p", 1969 bp, bp->b_bufobj, bo)); 1970 lblkno = bp->b_lblkno + 1; 1971 if ((bp->b_flags & B_MANAGED) == 0) 1972 bremfree(bp); 1973 bp->b_flags |= B_RELBUF; 1974 /* 1975 * In the VMIO case, use the B_NOREUSE flag to hint that the 1976 * pages backing each buffer in the range are unlikely to be 1977 * reused. Dirty buffers will have the hint applied once 1978 * they've been written. 1979 */ 1980 if ((bp->b_flags & B_VMIO) != 0) 1981 bp->b_flags |= B_NOREUSE; 1982 brelse(bp); 1983 BO_RLOCK(bo); 1984 } 1985 return (0); 1986 } 1987 1988 /* 1989 * Truncate a file's buffer and pages to a specified length. This 1990 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 1991 * sync activity. 1992 */ 1993 int 1994 vtruncbuf(struct vnode *vp, off_t length, int blksize) 1995 { 1996 struct buf *bp, *nbp; 1997 struct bufobj *bo; 1998 daddr_t startlbn; 1999 2000 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2001 vp, blksize, (uintmax_t)length); 2002 2003 /* 2004 * Round up to the *next* lbn. 2005 */ 2006 startlbn = howmany(length, blksize); 2007 2008 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2009 2010 bo = &vp->v_bufobj; 2011 restart_unlocked: 2012 BO_LOCK(bo); 2013 2014 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2015 ; 2016 2017 if (length > 0) { 2018 restartsync: 2019 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2020 if (bp->b_lblkno > 0) 2021 continue; 2022 /* 2023 * Since we hold the vnode lock this should only 2024 * fail if we're racing with the buf daemon. 2025 */ 2026 if (BUF_LOCK(bp, 2027 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2028 BO_LOCKPTR(bo)) == ENOLCK) 2029 goto restart_unlocked; 2030 2031 VNASSERT((bp->b_flags & B_DELWRI), vp, 2032 ("buf(%p) on dirty queue without DELWRI", bp)); 2033 2034 bremfree(bp); 2035 bawrite(bp); 2036 BO_LOCK(bo); 2037 goto restartsync; 2038 } 2039 } 2040 2041 bufobj_wwait(bo, 0, 0); 2042 BO_UNLOCK(bo); 2043 vnode_pager_setsize(vp, length); 2044 2045 return (0); 2046 } 2047 2048 /* 2049 * Invalidate the cached pages of a file's buffer within the range of block 2050 * numbers [startlbn, endlbn). 2051 */ 2052 void 2053 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2054 int blksize) 2055 { 2056 struct bufobj *bo; 2057 off_t start, end; 2058 2059 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2060 2061 start = blksize * startlbn; 2062 end = blksize * endlbn; 2063 2064 bo = &vp->v_bufobj; 2065 BO_LOCK(bo); 2066 MPASS(blksize == bo->bo_bsize); 2067 2068 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2069 ; 2070 2071 BO_UNLOCK(bo); 2072 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2073 } 2074 2075 static int 2076 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2077 daddr_t startlbn, daddr_t endlbn) 2078 { 2079 struct buf *bp, *nbp; 2080 bool anyfreed; 2081 2082 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2083 ASSERT_BO_LOCKED(bo); 2084 2085 do { 2086 anyfreed = false; 2087 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2088 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2089 continue; 2090 if (BUF_LOCK(bp, 2091 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2092 BO_LOCKPTR(bo)) == ENOLCK) { 2093 BO_LOCK(bo); 2094 return (EAGAIN); 2095 } 2096 2097 bremfree(bp); 2098 bp->b_flags |= B_INVAL | B_RELBUF; 2099 bp->b_flags &= ~B_ASYNC; 2100 brelse(bp); 2101 anyfreed = true; 2102 2103 BO_LOCK(bo); 2104 if (nbp != NULL && 2105 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2106 nbp->b_vp != vp || 2107 (nbp->b_flags & B_DELWRI) != 0)) 2108 return (EAGAIN); 2109 } 2110 2111 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2112 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2113 continue; 2114 if (BUF_LOCK(bp, 2115 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2116 BO_LOCKPTR(bo)) == ENOLCK) { 2117 BO_LOCK(bo); 2118 return (EAGAIN); 2119 } 2120 bremfree(bp); 2121 bp->b_flags |= B_INVAL | B_RELBUF; 2122 bp->b_flags &= ~B_ASYNC; 2123 brelse(bp); 2124 anyfreed = true; 2125 2126 BO_LOCK(bo); 2127 if (nbp != NULL && 2128 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2129 (nbp->b_vp != vp) || 2130 (nbp->b_flags & B_DELWRI) == 0)) 2131 return (EAGAIN); 2132 } 2133 } while (anyfreed); 2134 return (0); 2135 } 2136 2137 static void 2138 buf_vlist_remove(struct buf *bp) 2139 { 2140 struct bufv *bv; 2141 2142 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2143 ASSERT_BO_WLOCKED(bp->b_bufobj); 2144 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != 2145 (BX_VNDIRTY|BX_VNCLEAN), 2146 ("buf_vlist_remove: Buf %p is on two lists", bp)); 2147 if (bp->b_xflags & BX_VNDIRTY) 2148 bv = &bp->b_bufobj->bo_dirty; 2149 else 2150 bv = &bp->b_bufobj->bo_clean; 2151 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2152 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2153 bv->bv_cnt--; 2154 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2155 } 2156 2157 /* 2158 * Add the buffer to the sorted clean or dirty block list. 2159 * 2160 * NOTE: xflags is passed as a constant, optimizing this inline function! 2161 */ 2162 static void 2163 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2164 { 2165 struct bufv *bv; 2166 struct buf *n; 2167 int error; 2168 2169 ASSERT_BO_WLOCKED(bo); 2170 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2171 ("dead bo %p", bo)); 2172 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2173 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2174 bp->b_xflags |= xflags; 2175 if (xflags & BX_VNDIRTY) 2176 bv = &bo->bo_dirty; 2177 else 2178 bv = &bo->bo_clean; 2179 2180 /* 2181 * Keep the list ordered. Optimize empty list insertion. Assume 2182 * we tend to grow at the tail so lookup_le should usually be cheaper 2183 * than _ge. 2184 */ 2185 if (bv->bv_cnt == 0 || 2186 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2187 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2188 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2189 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2190 else 2191 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2192 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2193 if (error) 2194 panic("buf_vlist_add: Preallocated nodes insufficient."); 2195 bv->bv_cnt++; 2196 } 2197 2198 /* 2199 * Look up a buffer using the buffer tries. 2200 */ 2201 struct buf * 2202 gbincore(struct bufobj *bo, daddr_t lblkno) 2203 { 2204 struct buf *bp; 2205 2206 ASSERT_BO_LOCKED(bo); 2207 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2208 if (bp != NULL) 2209 return (bp); 2210 return BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno); 2211 } 2212 2213 /* 2214 * Associate a buffer with a vnode. 2215 */ 2216 void 2217 bgetvp(struct vnode *vp, struct buf *bp) 2218 { 2219 struct bufobj *bo; 2220 2221 bo = &vp->v_bufobj; 2222 ASSERT_BO_WLOCKED(bo); 2223 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2224 2225 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2226 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2227 ("bgetvp: bp already attached! %p", bp)); 2228 2229 vhold(vp); 2230 bp->b_vp = vp; 2231 bp->b_bufobj = bo; 2232 /* 2233 * Insert onto list for new vnode. 2234 */ 2235 buf_vlist_add(bp, bo, BX_VNCLEAN); 2236 } 2237 2238 /* 2239 * Disassociate a buffer from a vnode. 2240 */ 2241 void 2242 brelvp(struct buf *bp) 2243 { 2244 struct bufobj *bo; 2245 struct vnode *vp; 2246 2247 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2248 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2249 2250 /* 2251 * Delete from old vnode list, if on one. 2252 */ 2253 vp = bp->b_vp; /* XXX */ 2254 bo = bp->b_bufobj; 2255 BO_LOCK(bo); 2256 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2257 buf_vlist_remove(bp); 2258 else 2259 panic("brelvp: Buffer %p not on queue.", bp); 2260 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2261 bo->bo_flag &= ~BO_ONWORKLST; 2262 mtx_lock(&sync_mtx); 2263 LIST_REMOVE(bo, bo_synclist); 2264 syncer_worklist_len--; 2265 mtx_unlock(&sync_mtx); 2266 } 2267 bp->b_vp = NULL; 2268 bp->b_bufobj = NULL; 2269 BO_UNLOCK(bo); 2270 vdrop(vp); 2271 } 2272 2273 /* 2274 * Add an item to the syncer work queue. 2275 */ 2276 static void 2277 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2278 { 2279 int slot; 2280 2281 ASSERT_BO_WLOCKED(bo); 2282 2283 mtx_lock(&sync_mtx); 2284 if (bo->bo_flag & BO_ONWORKLST) 2285 LIST_REMOVE(bo, bo_synclist); 2286 else { 2287 bo->bo_flag |= BO_ONWORKLST; 2288 syncer_worklist_len++; 2289 } 2290 2291 if (delay > syncer_maxdelay - 2) 2292 delay = syncer_maxdelay - 2; 2293 slot = (syncer_delayno + delay) & syncer_mask; 2294 2295 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2296 mtx_unlock(&sync_mtx); 2297 } 2298 2299 static int 2300 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2301 { 2302 int error, len; 2303 2304 mtx_lock(&sync_mtx); 2305 len = syncer_worklist_len - sync_vnode_count; 2306 mtx_unlock(&sync_mtx); 2307 error = SYSCTL_OUT(req, &len, sizeof(len)); 2308 return (error); 2309 } 2310 2311 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0, 2312 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2313 2314 static struct proc *updateproc; 2315 static void sched_sync(void); 2316 static struct kproc_desc up_kp = { 2317 "syncer", 2318 sched_sync, 2319 &updateproc 2320 }; 2321 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2322 2323 static int 2324 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2325 { 2326 struct vnode *vp; 2327 struct mount *mp; 2328 2329 *bo = LIST_FIRST(slp); 2330 if (*bo == NULL) 2331 return (0); 2332 vp = bo2vnode(*bo); 2333 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2334 return (1); 2335 /* 2336 * We use vhold in case the vnode does not 2337 * successfully sync. vhold prevents the vnode from 2338 * going away when we unlock the sync_mtx so that 2339 * we can acquire the vnode interlock. 2340 */ 2341 vholdl(vp); 2342 mtx_unlock(&sync_mtx); 2343 VI_UNLOCK(vp); 2344 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2345 vdrop(vp); 2346 mtx_lock(&sync_mtx); 2347 return (*bo == LIST_FIRST(slp)); 2348 } 2349 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2350 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2351 VOP_UNLOCK(vp, 0); 2352 vn_finished_write(mp); 2353 BO_LOCK(*bo); 2354 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2355 /* 2356 * Put us back on the worklist. The worklist 2357 * routine will remove us from our current 2358 * position and then add us back in at a later 2359 * position. 2360 */ 2361 vn_syncer_add_to_worklist(*bo, syncdelay); 2362 } 2363 BO_UNLOCK(*bo); 2364 vdrop(vp); 2365 mtx_lock(&sync_mtx); 2366 return (0); 2367 } 2368 2369 static int first_printf = 1; 2370 2371 /* 2372 * System filesystem synchronizer daemon. 2373 */ 2374 static void 2375 sched_sync(void) 2376 { 2377 struct synclist *next, *slp; 2378 struct bufobj *bo; 2379 long starttime; 2380 struct thread *td = curthread; 2381 int last_work_seen; 2382 int net_worklist_len; 2383 int syncer_final_iter; 2384 int error; 2385 2386 last_work_seen = 0; 2387 syncer_final_iter = 0; 2388 syncer_state = SYNCER_RUNNING; 2389 starttime = time_uptime; 2390 td->td_pflags |= TDP_NORUNNINGBUF; 2391 2392 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2393 SHUTDOWN_PRI_LAST); 2394 2395 mtx_lock(&sync_mtx); 2396 for (;;) { 2397 if (syncer_state == SYNCER_FINAL_DELAY && 2398 syncer_final_iter == 0) { 2399 mtx_unlock(&sync_mtx); 2400 kproc_suspend_check(td->td_proc); 2401 mtx_lock(&sync_mtx); 2402 } 2403 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2404 if (syncer_state != SYNCER_RUNNING && 2405 starttime != time_uptime) { 2406 if (first_printf) { 2407 printf("\nSyncing disks, vnodes remaining... "); 2408 first_printf = 0; 2409 } 2410 printf("%d ", net_worklist_len); 2411 } 2412 starttime = time_uptime; 2413 2414 /* 2415 * Push files whose dirty time has expired. Be careful 2416 * of interrupt race on slp queue. 2417 * 2418 * Skip over empty worklist slots when shutting down. 2419 */ 2420 do { 2421 slp = &syncer_workitem_pending[syncer_delayno]; 2422 syncer_delayno += 1; 2423 if (syncer_delayno == syncer_maxdelay) 2424 syncer_delayno = 0; 2425 next = &syncer_workitem_pending[syncer_delayno]; 2426 /* 2427 * If the worklist has wrapped since the 2428 * it was emptied of all but syncer vnodes, 2429 * switch to the FINAL_DELAY state and run 2430 * for one more second. 2431 */ 2432 if (syncer_state == SYNCER_SHUTTING_DOWN && 2433 net_worklist_len == 0 && 2434 last_work_seen == syncer_delayno) { 2435 syncer_state = SYNCER_FINAL_DELAY; 2436 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2437 } 2438 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2439 syncer_worklist_len > 0); 2440 2441 /* 2442 * Keep track of the last time there was anything 2443 * on the worklist other than syncer vnodes. 2444 * Return to the SHUTTING_DOWN state if any 2445 * new work appears. 2446 */ 2447 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2448 last_work_seen = syncer_delayno; 2449 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2450 syncer_state = SYNCER_SHUTTING_DOWN; 2451 while (!LIST_EMPTY(slp)) { 2452 error = sync_vnode(slp, &bo, td); 2453 if (error == 1) { 2454 LIST_REMOVE(bo, bo_synclist); 2455 LIST_INSERT_HEAD(next, bo, bo_synclist); 2456 continue; 2457 } 2458 2459 if (first_printf == 0) { 2460 /* 2461 * Drop the sync mutex, because some watchdog 2462 * drivers need to sleep while patting 2463 */ 2464 mtx_unlock(&sync_mtx); 2465 wdog_kern_pat(WD_LASTVAL); 2466 mtx_lock(&sync_mtx); 2467 } 2468 2469 } 2470 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2471 syncer_final_iter--; 2472 /* 2473 * The variable rushjob allows the kernel to speed up the 2474 * processing of the filesystem syncer process. A rushjob 2475 * value of N tells the filesystem syncer to process the next 2476 * N seconds worth of work on its queue ASAP. Currently rushjob 2477 * is used by the soft update code to speed up the filesystem 2478 * syncer process when the incore state is getting so far 2479 * ahead of the disk that the kernel memory pool is being 2480 * threatened with exhaustion. 2481 */ 2482 if (rushjob > 0) { 2483 rushjob -= 1; 2484 continue; 2485 } 2486 /* 2487 * Just sleep for a short period of time between 2488 * iterations when shutting down to allow some I/O 2489 * to happen. 2490 * 2491 * If it has taken us less than a second to process the 2492 * current work, then wait. Otherwise start right over 2493 * again. We can still lose time if any single round 2494 * takes more than two seconds, but it does not really 2495 * matter as we are just trying to generally pace the 2496 * filesystem activity. 2497 */ 2498 if (syncer_state != SYNCER_RUNNING || 2499 time_uptime == starttime) { 2500 thread_lock(td); 2501 sched_prio(td, PPAUSE); 2502 thread_unlock(td); 2503 } 2504 if (syncer_state != SYNCER_RUNNING) 2505 cv_timedwait(&sync_wakeup, &sync_mtx, 2506 hz / SYNCER_SHUTDOWN_SPEEDUP); 2507 else if (time_uptime == starttime) 2508 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2509 } 2510 } 2511 2512 /* 2513 * Request the syncer daemon to speed up its work. 2514 * We never push it to speed up more than half of its 2515 * normal turn time, otherwise it could take over the cpu. 2516 */ 2517 int 2518 speedup_syncer(void) 2519 { 2520 int ret = 0; 2521 2522 mtx_lock(&sync_mtx); 2523 if (rushjob < syncdelay / 2) { 2524 rushjob += 1; 2525 stat_rush_requests += 1; 2526 ret = 1; 2527 } 2528 mtx_unlock(&sync_mtx); 2529 cv_broadcast(&sync_wakeup); 2530 return (ret); 2531 } 2532 2533 /* 2534 * Tell the syncer to speed up its work and run though its work 2535 * list several times, then tell it to shut down. 2536 */ 2537 static void 2538 syncer_shutdown(void *arg, int howto) 2539 { 2540 2541 if (howto & RB_NOSYNC) 2542 return; 2543 mtx_lock(&sync_mtx); 2544 syncer_state = SYNCER_SHUTTING_DOWN; 2545 rushjob = 0; 2546 mtx_unlock(&sync_mtx); 2547 cv_broadcast(&sync_wakeup); 2548 kproc_shutdown(arg, howto); 2549 } 2550 2551 void 2552 syncer_suspend(void) 2553 { 2554 2555 syncer_shutdown(updateproc, 0); 2556 } 2557 2558 void 2559 syncer_resume(void) 2560 { 2561 2562 mtx_lock(&sync_mtx); 2563 first_printf = 1; 2564 syncer_state = SYNCER_RUNNING; 2565 mtx_unlock(&sync_mtx); 2566 cv_broadcast(&sync_wakeup); 2567 kproc_resume(updateproc); 2568 } 2569 2570 /* 2571 * Reassign a buffer from one vnode to another. 2572 * Used to assign file specific control information 2573 * (indirect blocks) to the vnode to which they belong. 2574 */ 2575 void 2576 reassignbuf(struct buf *bp) 2577 { 2578 struct vnode *vp; 2579 struct bufobj *bo; 2580 int delay; 2581 #ifdef INVARIANTS 2582 struct bufv *bv; 2583 #endif 2584 2585 vp = bp->b_vp; 2586 bo = bp->b_bufobj; 2587 ++reassignbufcalls; 2588 2589 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2590 bp, bp->b_vp, bp->b_flags); 2591 /* 2592 * B_PAGING flagged buffers cannot be reassigned because their vp 2593 * is not fully linked in. 2594 */ 2595 if (bp->b_flags & B_PAGING) 2596 panic("cannot reassign paging buffer"); 2597 2598 /* 2599 * Delete from old vnode list, if on one. 2600 */ 2601 BO_LOCK(bo); 2602 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2603 buf_vlist_remove(bp); 2604 else 2605 panic("reassignbuf: Buffer %p not on queue.", bp); 2606 /* 2607 * If dirty, put on list of dirty buffers; otherwise insert onto list 2608 * of clean buffers. 2609 */ 2610 if (bp->b_flags & B_DELWRI) { 2611 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2612 switch (vp->v_type) { 2613 case VDIR: 2614 delay = dirdelay; 2615 break; 2616 case VCHR: 2617 delay = metadelay; 2618 break; 2619 default: 2620 delay = filedelay; 2621 } 2622 vn_syncer_add_to_worklist(bo, delay); 2623 } 2624 buf_vlist_add(bp, bo, BX_VNDIRTY); 2625 } else { 2626 buf_vlist_add(bp, bo, BX_VNCLEAN); 2627 2628 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2629 mtx_lock(&sync_mtx); 2630 LIST_REMOVE(bo, bo_synclist); 2631 syncer_worklist_len--; 2632 mtx_unlock(&sync_mtx); 2633 bo->bo_flag &= ~BO_ONWORKLST; 2634 } 2635 } 2636 #ifdef INVARIANTS 2637 bv = &bo->bo_clean; 2638 bp = TAILQ_FIRST(&bv->bv_hd); 2639 KASSERT(bp == NULL || bp->b_bufobj == bo, 2640 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2641 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2642 KASSERT(bp == NULL || bp->b_bufobj == bo, 2643 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2644 bv = &bo->bo_dirty; 2645 bp = TAILQ_FIRST(&bv->bv_hd); 2646 KASSERT(bp == NULL || bp->b_bufobj == bo, 2647 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2648 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2649 KASSERT(bp == NULL || bp->b_bufobj == bo, 2650 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2651 #endif 2652 BO_UNLOCK(bo); 2653 } 2654 2655 static void 2656 v_init_counters(struct vnode *vp) 2657 { 2658 2659 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2660 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2661 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2662 2663 refcount_init(&vp->v_holdcnt, 1); 2664 refcount_init(&vp->v_usecount, 1); 2665 } 2666 2667 /* 2668 * Increment si_usecount of the associated device, if any. 2669 */ 2670 static void 2671 v_incr_devcount(struct vnode *vp) 2672 { 2673 2674 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2675 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2676 dev_lock(); 2677 vp->v_rdev->si_usecount++; 2678 dev_unlock(); 2679 } 2680 } 2681 2682 /* 2683 * Decrement si_usecount of the associated device, if any. 2684 */ 2685 static void 2686 v_decr_devcount(struct vnode *vp) 2687 { 2688 2689 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2690 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2691 dev_lock(); 2692 vp->v_rdev->si_usecount--; 2693 dev_unlock(); 2694 } 2695 } 2696 2697 /* 2698 * Grab a particular vnode from the free list, increment its 2699 * reference count and lock it. VI_DOOMED is set if the vnode 2700 * is being destroyed. Only callers who specify LK_RETRY will 2701 * see doomed vnodes. If inactive processing was delayed in 2702 * vput try to do it here. 2703 * 2704 * Both holdcnt and usecount can be manipulated using atomics without holding 2705 * any locks except in these cases which require the vnode interlock: 2706 * holdcnt: 1->0 and 0->1 2707 * usecount: 0->1 2708 * 2709 * usecount is permitted to transition 1->0 without the interlock because 2710 * vnode is kept live by holdcnt. 2711 */ 2712 static enum vgetstate 2713 _vget_prep(struct vnode *vp, bool interlock) 2714 { 2715 enum vgetstate vs; 2716 2717 if (__predict_true(vp->v_type != VCHR)) { 2718 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2719 vs = VGET_USECOUNT; 2720 } else { 2721 _vhold(vp, interlock); 2722 vs = VGET_HOLDCNT; 2723 } 2724 } else { 2725 if (!interlock) 2726 VI_LOCK(vp); 2727 if (vp->v_usecount == 0) { 2728 vholdl(vp); 2729 vs = VGET_HOLDCNT; 2730 } else { 2731 v_incr_devcount(vp); 2732 refcount_acquire(&vp->v_usecount); 2733 vs = VGET_USECOUNT; 2734 } 2735 if (!interlock) 2736 VI_UNLOCK(vp); 2737 } 2738 return (vs); 2739 } 2740 2741 enum vgetstate 2742 vget_prep(struct vnode *vp) 2743 { 2744 2745 return (_vget_prep(vp, false)); 2746 } 2747 2748 int 2749 vget(struct vnode *vp, int flags, struct thread *td) 2750 { 2751 enum vgetstate vs; 2752 2753 MPASS(td == curthread); 2754 2755 vs = _vget_prep(vp, (flags & LK_INTERLOCK) != 0); 2756 return (vget_finish(vp, flags, vs)); 2757 } 2758 2759 int 2760 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 2761 { 2762 int error, oweinact; 2763 2764 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 2765 ("%s: invalid lock operation", __func__)); 2766 2767 if ((flags & LK_INTERLOCK) != 0) 2768 ASSERT_VI_LOCKED(vp, __func__); 2769 else 2770 ASSERT_VI_UNLOCKED(vp, __func__); 2771 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 2772 if (vs == VGET_USECOUNT) { 2773 VNASSERT(vp->v_usecount > 0, vp, 2774 ("%s: vnode without usecount when VGET_USECOUNT was passed", 2775 __func__)); 2776 } 2777 2778 if ((error = vn_lock(vp, flags)) != 0) { 2779 if (vs == VGET_USECOUNT) 2780 vrele(vp); 2781 else 2782 vdrop(vp); 2783 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2784 vp); 2785 return (error); 2786 } 2787 2788 if (vs == VGET_USECOUNT) { 2789 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2790 ("%s: vnode with usecount and VI_OWEINACT set", __func__)); 2791 return (0); 2792 } 2793 2794 /* 2795 * We hold the vnode. If the usecount is 0 it will be utilized to keep 2796 * the vnode around. Otherwise someone else lended their hold count and 2797 * we have to drop ours. 2798 */ 2799 if (vp->v_type != VCHR && 2800 refcount_acquire_if_not_zero(&vp->v_usecount)) { 2801 #ifdef INVARIANTS 2802 int old = atomic_fetchadd_int(&vp->v_holdcnt, -1) - 1; 2803 VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__)); 2804 #else 2805 refcount_release(&vp->v_holdcnt); 2806 #endif 2807 VNODE_REFCOUNT_FENCE_ACQ(); 2808 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2809 ("%s: vnode with usecount and VI_OWEINACT set", __func__)); 2810 return (0); 2811 } 2812 2813 /* 2814 * We don't guarantee that any particular close will 2815 * trigger inactive processing so just make a best effort 2816 * here at preventing a reference to a removed file. If 2817 * we don't succeed no harm is done. 2818 * 2819 * Upgrade our holdcnt to a usecount. 2820 */ 2821 VI_LOCK(vp); 2822 /* 2823 * See the previous section. By the time we get here we may find 2824 * ourselves in the same spot. 2825 */ 2826 if (vp->v_type != VCHR) { 2827 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2828 #ifdef INVARIANTS 2829 int old = atomic_fetchadd_int(&vp->v_holdcnt, -1) - 1; 2830 VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__)); 2831 #else 2832 refcount_release(&vp->v_holdcnt); 2833 #endif 2834 VNODE_REFCOUNT_FENCE_ACQ(); 2835 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2836 ("%s: vnode with usecount and VI_OWEINACT set", 2837 __func__)); 2838 VI_UNLOCK(vp); 2839 return (0); 2840 } 2841 } else { 2842 if (vp->v_usecount > 0) 2843 refcount_release(&vp->v_holdcnt); 2844 } 2845 if ((vp->v_iflag & VI_OWEINACT) == 0) { 2846 oweinact = 0; 2847 } else { 2848 oweinact = 1; 2849 vp->v_iflag &= ~VI_OWEINACT; 2850 VNODE_REFCOUNT_FENCE_REL(); 2851 } 2852 v_incr_devcount(vp); 2853 refcount_acquire(&vp->v_usecount); 2854 if (oweinact && VOP_ISLOCKED(vp) == LK_EXCLUSIVE && 2855 (flags & LK_NOWAIT) == 0) 2856 vinactive(vp, curthread); 2857 VI_UNLOCK(vp); 2858 return (0); 2859 } 2860 2861 /* 2862 * Increase the reference (use) and hold count of a vnode. 2863 * This will also remove the vnode from the free list if it is presently free. 2864 */ 2865 void 2866 vref(struct vnode *vp) 2867 { 2868 2869 ASSERT_VI_UNLOCKED(vp, __func__); 2870 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2871 if (vp->v_type != VCHR && 2872 refcount_acquire_if_not_zero(&vp->v_usecount)) { 2873 VNODE_REFCOUNT_FENCE_ACQ(); 2874 VNASSERT(vp->v_holdcnt > 0, vp, 2875 ("%s: active vnode not held", __func__)); 2876 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2877 ("%s: vnode with usecount and VI_OWEINACT set", __func__)); 2878 return; 2879 } 2880 VI_LOCK(vp); 2881 vrefl(vp); 2882 VI_UNLOCK(vp); 2883 } 2884 2885 void 2886 vrefl(struct vnode *vp) 2887 { 2888 2889 ASSERT_VI_LOCKED(vp, __func__); 2890 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2891 if (vp->v_type != VCHR && 2892 refcount_acquire_if_not_zero(&vp->v_usecount)) { 2893 VNODE_REFCOUNT_FENCE_ACQ(); 2894 VNASSERT(vp->v_holdcnt > 0, vp, 2895 ("%s: active vnode not held", __func__)); 2896 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2897 ("%s: vnode with usecount and VI_OWEINACT set", __func__)); 2898 return; 2899 } 2900 if (vp->v_usecount == 0) 2901 vholdl(vp); 2902 if ((vp->v_iflag & VI_OWEINACT) != 0) { 2903 vp->v_iflag &= ~VI_OWEINACT; 2904 VNODE_REFCOUNT_FENCE_REL(); 2905 } 2906 v_incr_devcount(vp); 2907 refcount_acquire(&vp->v_usecount); 2908 } 2909 2910 void 2911 vrefact(struct vnode *vp) 2912 { 2913 2914 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2915 if (__predict_false(vp->v_type == VCHR)) { 2916 VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp, 2917 ("%s: wrong ref counts", __func__)); 2918 vref(vp); 2919 return; 2920 } 2921 #ifdef INVARIANTS 2922 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 2923 VNASSERT(old > 0, vp, ("%s: wrong use count", __func__)); 2924 #else 2925 refcount_acquire(&vp->v_usecount); 2926 #endif 2927 } 2928 2929 /* 2930 * Return reference count of a vnode. 2931 * 2932 * The results of this call are only guaranteed when some mechanism is used to 2933 * stop other processes from gaining references to the vnode. This may be the 2934 * case if the caller holds the only reference. This is also useful when stale 2935 * data is acceptable as race conditions may be accounted for by some other 2936 * means. 2937 */ 2938 int 2939 vrefcnt(struct vnode *vp) 2940 { 2941 2942 return (vp->v_usecount); 2943 } 2944 2945 #define VPUTX_VRELE 1 2946 #define VPUTX_VPUT 2 2947 #define VPUTX_VUNREF 3 2948 2949 /* 2950 * Decrement the use and hold counts for a vnode. 2951 * 2952 * See an explanation near vget() as to why atomic operation is safe. 2953 */ 2954 static void 2955 vputx(struct vnode *vp, int func) 2956 { 2957 int error; 2958 2959 KASSERT(vp != NULL, ("vputx: null vp")); 2960 if (func == VPUTX_VUNREF) 2961 ASSERT_VOP_LOCKED(vp, "vunref"); 2962 else if (func == VPUTX_VPUT) 2963 ASSERT_VOP_LOCKED(vp, "vput"); 2964 else 2965 KASSERT(func == VPUTX_VRELE, ("vputx: wrong func")); 2966 ASSERT_VI_UNLOCKED(vp, __func__); 2967 VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp, 2968 ("%s: wrong ref counts", __func__)); 2969 2970 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2971 2972 /* 2973 * It is an invariant that all VOP_* calls operate on a held vnode. 2974 * We may be only having an implicit hold stemming from our usecount, 2975 * which we are about to release. If we unlock the vnode afterwards we 2976 * open a time window where someone else dropped the last usecount and 2977 * proceeded to free the vnode before our unlock finished. For this 2978 * reason we unlock the vnode early. This is a little bit wasteful as 2979 * it may be the vnode is exclusively locked and inactive processing is 2980 * needed, in which case we are adding work. 2981 */ 2982 if (func == VPUTX_VPUT) 2983 VOP_UNLOCK(vp, 0); 2984 2985 /* 2986 * We want to hold the vnode until the inactive finishes to 2987 * prevent vgone() races. We drop the use count here and the 2988 * hold count below when we're done. 2989 */ 2990 if (vp->v_type != VCHR) { 2991 /* 2992 * If we release the last usecount we take ownership of the hold 2993 * count which provides liveness of the vnode, in which case we 2994 * have to vdrop. 2995 */ 2996 if (!refcount_release(&vp->v_usecount)) 2997 return; 2998 VI_LOCK(vp); 2999 /* 3000 * By the time we got here someone else might have transitioned 3001 * the count back to > 0. 3002 */ 3003 if (vp->v_usecount > 0) { 3004 vdropl(vp); 3005 return; 3006 } 3007 } else { 3008 VI_LOCK(vp); 3009 v_decr_devcount(vp); 3010 if (!refcount_release(&vp->v_usecount)) { 3011 VI_UNLOCK(vp); 3012 return; 3013 } 3014 } 3015 if (vp->v_iflag & VI_DOINGINACT) { 3016 vdropl(vp); 3017 return; 3018 } 3019 3020 error = 0; 3021 3022 if (vp->v_usecount != 0) { 3023 vn_printf(vp, "vputx: usecount not zero for vnode "); 3024 panic("vputx: usecount not zero"); 3025 } 3026 3027 CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp); 3028 3029 /* 3030 * Check if the fs wants to perform inactive processing. Note we 3031 * may be only holding the interlock, in which case it is possible 3032 * someone else called vgone on the vnode and ->v_data is now NULL. 3033 * Since vgone performs inactive on its own there is nothing to do 3034 * here but to drop our hold count. 3035 */ 3036 if (__predict_false(vp->v_iflag & VI_DOOMED) || 3037 VOP_NEED_INACTIVE(vp) == 0) { 3038 vdropl(vp); 3039 return; 3040 } 3041 3042 /* 3043 * We must call VOP_INACTIVE with the node locked. Mark 3044 * as VI_DOINGINACT to avoid recursion. 3045 */ 3046 vp->v_iflag |= VI_OWEINACT; 3047 switch (func) { 3048 case VPUTX_VRELE: 3049 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3050 VI_LOCK(vp); 3051 break; 3052 case VPUTX_VPUT: 3053 error = VOP_LOCK(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT); 3054 VI_LOCK(vp); 3055 break; 3056 case VPUTX_VUNREF: 3057 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3058 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3059 VI_LOCK(vp); 3060 } 3061 break; 3062 } 3063 VNASSERT(vp->v_usecount == 0 || (vp->v_iflag & VI_OWEINACT) == 0, vp, 3064 ("vnode with usecount and VI_OWEINACT set")); 3065 if (error == 0) { 3066 if (vp->v_iflag & VI_OWEINACT) 3067 vinactive(vp, curthread); 3068 if (func != VPUTX_VUNREF) 3069 VOP_UNLOCK(vp, 0); 3070 } 3071 vdropl(vp); 3072 } 3073 3074 /* 3075 * Vnode put/release. 3076 * If count drops to zero, call inactive routine and return to freelist. 3077 */ 3078 void 3079 vrele(struct vnode *vp) 3080 { 3081 3082 vputx(vp, VPUTX_VRELE); 3083 } 3084 3085 /* 3086 * Release an already locked vnode. This give the same effects as 3087 * unlock+vrele(), but takes less time and avoids releasing and 3088 * re-aquiring the lock (as vrele() acquires the lock internally.) 3089 */ 3090 void 3091 vput(struct vnode *vp) 3092 { 3093 3094 vputx(vp, VPUTX_VPUT); 3095 } 3096 3097 /* 3098 * Release an exclusively locked vnode. Do not unlock the vnode lock. 3099 */ 3100 void 3101 vunref(struct vnode *vp) 3102 { 3103 3104 vputx(vp, VPUTX_VUNREF); 3105 } 3106 3107 /* 3108 * Increase the hold count and activate if this is the first reference. 3109 */ 3110 void 3111 _vhold(struct vnode *vp, bool locked) 3112 { 3113 struct mount *mp; 3114 3115 if (locked) 3116 ASSERT_VI_LOCKED(vp, __func__); 3117 else 3118 ASSERT_VI_UNLOCKED(vp, __func__); 3119 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3120 if (!locked) { 3121 if (refcount_acquire_if_not_zero(&vp->v_holdcnt)) { 3122 VNODE_REFCOUNT_FENCE_ACQ(); 3123 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 3124 ("_vhold: vnode with holdcnt is free")); 3125 return; 3126 } 3127 VI_LOCK(vp); 3128 } 3129 if ((vp->v_iflag & VI_FREE) == 0) { 3130 refcount_acquire(&vp->v_holdcnt); 3131 if (!locked) 3132 VI_UNLOCK(vp); 3133 return; 3134 } 3135 VNASSERT(vp->v_holdcnt == 0, vp, 3136 ("%s: wrong hold count", __func__)); 3137 VNASSERT(vp->v_op != NULL, vp, 3138 ("%s: vnode already reclaimed.", __func__)); 3139 /* 3140 * Remove a vnode from the free list, mark it as in use, 3141 * and put it on the active list. 3142 */ 3143 VNASSERT(vp->v_mount != NULL, vp, 3144 ("_vhold: vnode not on per mount vnode list")); 3145 mp = vp->v_mount; 3146 mtx_lock(&mp->mnt_listmtx); 3147 if ((vp->v_mflag & VMP_TMPMNTFREELIST) != 0) { 3148 TAILQ_REMOVE(&mp->mnt_tmpfreevnodelist, vp, v_actfreelist); 3149 mp->mnt_tmpfreevnodelistsize--; 3150 vp->v_mflag &= ~VMP_TMPMNTFREELIST; 3151 } else { 3152 mtx_lock(&vnode_free_list_mtx); 3153 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 3154 freevnodes--; 3155 mtx_unlock(&vnode_free_list_mtx); 3156 } 3157 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 3158 ("Activating already active vnode")); 3159 vp->v_iflag &= ~VI_FREE; 3160 vp->v_iflag |= VI_ACTIVE; 3161 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 3162 mp->mnt_activevnodelistsize++; 3163 mtx_unlock(&mp->mnt_listmtx); 3164 refcount_acquire(&vp->v_holdcnt); 3165 if (!locked) 3166 VI_UNLOCK(vp); 3167 } 3168 3169 void 3170 vholdnz(struct vnode *vp) 3171 { 3172 3173 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3174 #ifdef INVARIANTS 3175 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3176 VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__)); 3177 #else 3178 atomic_add_int(&vp->v_holdcnt, 1); 3179 #endif 3180 } 3181 3182 /* 3183 * Drop the hold count of the vnode. If this is the last reference to 3184 * the vnode we place it on the free list unless it has been vgone'd 3185 * (marked VI_DOOMED) in which case we will free it. 3186 * 3187 * Because the vnode vm object keeps a hold reference on the vnode if 3188 * there is at least one resident non-cached page, the vnode cannot 3189 * leave the active list without the page cleanup done. 3190 */ 3191 void 3192 _vdrop(struct vnode *vp, bool locked) 3193 { 3194 struct bufobj *bo; 3195 struct mount *mp; 3196 int active; 3197 3198 if (locked) 3199 ASSERT_VI_LOCKED(vp, __func__); 3200 else 3201 ASSERT_VI_UNLOCKED(vp, __func__); 3202 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3203 if (__predict_false((int)vp->v_holdcnt <= 0)) { 3204 vn_printf(vp, "vdrop: holdcnt %d", vp->v_holdcnt); 3205 panic("vdrop: wrong holdcnt"); 3206 } 3207 if (!locked) { 3208 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3209 return; 3210 VI_LOCK(vp); 3211 } 3212 if (refcount_release(&vp->v_holdcnt) == 0) { 3213 VI_UNLOCK(vp); 3214 return; 3215 } 3216 if ((vp->v_iflag & VI_DOOMED) == 0) { 3217 /* 3218 * Mark a vnode as free: remove it from its active list 3219 * and put it up for recycling on the freelist. 3220 */ 3221 VNASSERT(vp->v_op != NULL, vp, 3222 ("vdropl: vnode already reclaimed.")); 3223 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 3224 ("vnode already free")); 3225 VNASSERT(vp->v_holdcnt == 0, vp, 3226 ("vdropl: freeing when we shouldn't")); 3227 active = vp->v_iflag & VI_ACTIVE; 3228 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3229 vp->v_iflag &= ~VI_ACTIVE; 3230 mp = vp->v_mount; 3231 if (mp != NULL) { 3232 mtx_lock(&mp->mnt_listmtx); 3233 if (active) { 3234 TAILQ_REMOVE(&mp->mnt_activevnodelist, 3235 vp, v_actfreelist); 3236 mp->mnt_activevnodelistsize--; 3237 } 3238 TAILQ_INSERT_TAIL(&mp->mnt_tmpfreevnodelist, 3239 vp, v_actfreelist); 3240 mp->mnt_tmpfreevnodelistsize++; 3241 vp->v_iflag |= VI_FREE; 3242 vp->v_mflag |= VMP_TMPMNTFREELIST; 3243 VI_UNLOCK(vp); 3244 if (mp->mnt_tmpfreevnodelistsize >= 3245 mnt_free_list_batch) 3246 vnlru_return_batch_locked(mp); 3247 mtx_unlock(&mp->mnt_listmtx); 3248 } else { 3249 VNASSERT(active == 0, vp, 3250 ("vdropl: active vnode not on per mount " 3251 "vnode list")); 3252 mtx_lock(&vnode_free_list_mtx); 3253 TAILQ_INSERT_TAIL(&vnode_free_list, vp, 3254 v_actfreelist); 3255 freevnodes++; 3256 vp->v_iflag |= VI_FREE; 3257 VI_UNLOCK(vp); 3258 mtx_unlock(&vnode_free_list_mtx); 3259 } 3260 } else { 3261 VI_UNLOCK(vp); 3262 counter_u64_add(free_owe_inact, 1); 3263 } 3264 return; 3265 } 3266 /* 3267 * The vnode has been marked for destruction, so free it. 3268 * 3269 * The vnode will be returned to the zone where it will 3270 * normally remain until it is needed for another vnode. We 3271 * need to cleanup (or verify that the cleanup has already 3272 * been done) any residual data left from its current use 3273 * so as not to contaminate the freshly allocated vnode. 3274 */ 3275 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 3276 atomic_subtract_long(&numvnodes, 1); 3277 bo = &vp->v_bufobj; 3278 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 3279 ("cleaned vnode still on the free list.")); 3280 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 3281 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); 3282 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 3283 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 3284 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 3285 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 3286 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 3287 ("clean blk trie not empty")); 3288 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 3289 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 3290 ("dirty blk trie not empty")); 3291 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 3292 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 3293 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 3294 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 3295 ("Dangling rangelock waiters")); 3296 VI_UNLOCK(vp); 3297 #ifdef MAC 3298 mac_vnode_destroy(vp); 3299 #endif 3300 if (vp->v_pollinfo != NULL) { 3301 destroy_vpollinfo(vp->v_pollinfo); 3302 vp->v_pollinfo = NULL; 3303 } 3304 #ifdef INVARIANTS 3305 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ 3306 vp->v_op = NULL; 3307 #endif 3308 vp->v_mountedhere = NULL; 3309 vp->v_unpcb = NULL; 3310 vp->v_rdev = NULL; 3311 vp->v_fifoinfo = NULL; 3312 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 3313 vp->v_iflag = 0; 3314 vp->v_vflag = 0; 3315 bo->bo_flag = 0; 3316 uma_zfree(vnode_zone, vp); 3317 } 3318 3319 /* 3320 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3321 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3322 * OWEINACT tracks whether a vnode missed a call to inactive due to a 3323 * failed lock upgrade. 3324 */ 3325 void 3326 vinactive(struct vnode *vp, struct thread *td) 3327 { 3328 struct vm_object *obj; 3329 3330 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3331 ASSERT_VI_LOCKED(vp, "vinactive"); 3332 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3333 ("vinactive: recursed on VI_DOINGINACT")); 3334 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3335 vp->v_iflag |= VI_DOINGINACT; 3336 vp->v_iflag &= ~VI_OWEINACT; 3337 VI_UNLOCK(vp); 3338 /* 3339 * Before moving off the active list, we must be sure that any 3340 * modified pages are converted into the vnode's dirty 3341 * buffers, since these will no longer be checked once the 3342 * vnode is on the inactive list. 3343 * 3344 * The write-out of the dirty pages is asynchronous. At the 3345 * point that VOP_INACTIVE() is called, there could still be 3346 * pending I/O and dirty pages in the object. 3347 */ 3348 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3349 vm_object_mightbedirty(obj)) { 3350 VM_OBJECT_WLOCK(obj); 3351 vm_object_page_clean(obj, 0, 0, 0); 3352 VM_OBJECT_WUNLOCK(obj); 3353 } 3354 VOP_INACTIVE(vp, td); 3355 VI_LOCK(vp); 3356 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3357 ("vinactive: lost VI_DOINGINACT")); 3358 vp->v_iflag &= ~VI_DOINGINACT; 3359 } 3360 3361 /* 3362 * Remove any vnodes in the vnode table belonging to mount point mp. 3363 * 3364 * If FORCECLOSE is not specified, there should not be any active ones, 3365 * return error if any are found (nb: this is a user error, not a 3366 * system error). If FORCECLOSE is specified, detach any active vnodes 3367 * that are found. 3368 * 3369 * If WRITECLOSE is set, only flush out regular file vnodes open for 3370 * writing. 3371 * 3372 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3373 * 3374 * `rootrefs' specifies the base reference count for the root vnode 3375 * of this filesystem. The root vnode is considered busy if its 3376 * v_usecount exceeds this value. On a successful return, vflush(, td) 3377 * will call vrele() on the root vnode exactly rootrefs times. 3378 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3379 * be zero. 3380 */ 3381 #ifdef DIAGNOSTIC 3382 static int busyprt = 0; /* print out busy vnodes */ 3383 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3384 #endif 3385 3386 int 3387 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3388 { 3389 struct vnode *vp, *mvp, *rootvp = NULL; 3390 struct vattr vattr; 3391 int busy = 0, error; 3392 3393 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3394 rootrefs, flags); 3395 if (rootrefs > 0) { 3396 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3397 ("vflush: bad args")); 3398 /* 3399 * Get the filesystem root vnode. We can vput() it 3400 * immediately, since with rootrefs > 0, it won't go away. 3401 */ 3402 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3403 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3404 __func__, error); 3405 return (error); 3406 } 3407 vput(rootvp); 3408 } 3409 loop: 3410 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3411 vholdl(vp); 3412 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3413 if (error) { 3414 vdrop(vp); 3415 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3416 goto loop; 3417 } 3418 /* 3419 * Skip over a vnodes marked VV_SYSTEM. 3420 */ 3421 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3422 VOP_UNLOCK(vp, 0); 3423 vdrop(vp); 3424 continue; 3425 } 3426 /* 3427 * If WRITECLOSE is set, flush out unlinked but still open 3428 * files (even if open only for reading) and regular file 3429 * vnodes open for writing. 3430 */ 3431 if (flags & WRITECLOSE) { 3432 if (vp->v_object != NULL) { 3433 VM_OBJECT_WLOCK(vp->v_object); 3434 vm_object_page_clean(vp->v_object, 0, 0, 0); 3435 VM_OBJECT_WUNLOCK(vp->v_object); 3436 } 3437 error = VOP_FSYNC(vp, MNT_WAIT, td); 3438 if (error != 0) { 3439 VOP_UNLOCK(vp, 0); 3440 vdrop(vp); 3441 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3442 return (error); 3443 } 3444 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3445 VI_LOCK(vp); 3446 3447 if ((vp->v_type == VNON || 3448 (error == 0 && vattr.va_nlink > 0)) && 3449 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3450 VOP_UNLOCK(vp, 0); 3451 vdropl(vp); 3452 continue; 3453 } 3454 } else 3455 VI_LOCK(vp); 3456 /* 3457 * With v_usecount == 0, all we need to do is clear out the 3458 * vnode data structures and we are done. 3459 * 3460 * If FORCECLOSE is set, forcibly close the vnode. 3461 */ 3462 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3463 vgonel(vp); 3464 } else { 3465 busy++; 3466 #ifdef DIAGNOSTIC 3467 if (busyprt) 3468 vn_printf(vp, "vflush: busy vnode "); 3469 #endif 3470 } 3471 VOP_UNLOCK(vp, 0); 3472 vdropl(vp); 3473 } 3474 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3475 /* 3476 * If just the root vnode is busy, and if its refcount 3477 * is equal to `rootrefs', then go ahead and kill it. 3478 */ 3479 VI_LOCK(rootvp); 3480 KASSERT(busy > 0, ("vflush: not busy")); 3481 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3482 ("vflush: usecount %d < rootrefs %d", 3483 rootvp->v_usecount, rootrefs)); 3484 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3485 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3486 vgone(rootvp); 3487 VOP_UNLOCK(rootvp, 0); 3488 busy = 0; 3489 } else 3490 VI_UNLOCK(rootvp); 3491 } 3492 if (busy) { 3493 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3494 busy); 3495 return (EBUSY); 3496 } 3497 for (; rootrefs > 0; rootrefs--) 3498 vrele(rootvp); 3499 return (0); 3500 } 3501 3502 /* 3503 * Recycle an unused vnode to the front of the free list. 3504 */ 3505 int 3506 vrecycle(struct vnode *vp) 3507 { 3508 int recycled; 3509 3510 VI_LOCK(vp); 3511 recycled = vrecyclel(vp); 3512 VI_UNLOCK(vp); 3513 return (recycled); 3514 } 3515 3516 /* 3517 * vrecycle, with the vp interlock held. 3518 */ 3519 int 3520 vrecyclel(struct vnode *vp) 3521 { 3522 int recycled; 3523 3524 ASSERT_VOP_ELOCKED(vp, __func__); 3525 ASSERT_VI_LOCKED(vp, __func__); 3526 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3527 recycled = 0; 3528 if (vp->v_usecount == 0) { 3529 recycled = 1; 3530 vgonel(vp); 3531 } 3532 return (recycled); 3533 } 3534 3535 /* 3536 * Eliminate all activity associated with a vnode 3537 * in preparation for reuse. 3538 */ 3539 void 3540 vgone(struct vnode *vp) 3541 { 3542 VI_LOCK(vp); 3543 vgonel(vp); 3544 VI_UNLOCK(vp); 3545 } 3546 3547 static void 3548 notify_lowervp_vfs_dummy(struct mount *mp __unused, 3549 struct vnode *lowervp __unused) 3550 { 3551 } 3552 3553 /* 3554 * Notify upper mounts about reclaimed or unlinked vnode. 3555 */ 3556 void 3557 vfs_notify_upper(struct vnode *vp, int event) 3558 { 3559 static struct vfsops vgonel_vfsops = { 3560 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 3561 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 3562 }; 3563 struct mount *mp, *ump, *mmp; 3564 3565 mp = vp->v_mount; 3566 if (mp == NULL) 3567 return; 3568 3569 MNT_ILOCK(mp); 3570 if (TAILQ_EMPTY(&mp->mnt_uppers)) 3571 goto unlock; 3572 MNT_IUNLOCK(mp); 3573 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 3574 mmp->mnt_op = &vgonel_vfsops; 3575 mmp->mnt_kern_flag |= MNTK_MARKER; 3576 MNT_ILOCK(mp); 3577 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 3578 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 3579 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 3580 ump = TAILQ_NEXT(ump, mnt_upper_link); 3581 continue; 3582 } 3583 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 3584 MNT_IUNLOCK(mp); 3585 switch (event) { 3586 case VFS_NOTIFY_UPPER_RECLAIM: 3587 VFS_RECLAIM_LOWERVP(ump, vp); 3588 break; 3589 case VFS_NOTIFY_UPPER_UNLINK: 3590 VFS_UNLINK_LOWERVP(ump, vp); 3591 break; 3592 default: 3593 KASSERT(0, ("invalid event %d", event)); 3594 break; 3595 } 3596 MNT_ILOCK(mp); 3597 ump = TAILQ_NEXT(mmp, mnt_upper_link); 3598 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 3599 } 3600 free(mmp, M_TEMP); 3601 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 3602 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 3603 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 3604 wakeup(&mp->mnt_uppers); 3605 } 3606 unlock: 3607 MNT_IUNLOCK(mp); 3608 } 3609 3610 /* 3611 * vgone, with the vp interlock held. 3612 */ 3613 static void 3614 vgonel(struct vnode *vp) 3615 { 3616 struct thread *td; 3617 struct mount *mp; 3618 vm_object_t object; 3619 bool active, oweinact; 3620 3621 ASSERT_VOP_ELOCKED(vp, "vgonel"); 3622 ASSERT_VI_LOCKED(vp, "vgonel"); 3623 VNASSERT(vp->v_holdcnt, vp, 3624 ("vgonel: vp %p has no reference.", vp)); 3625 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3626 td = curthread; 3627 3628 /* 3629 * Don't vgonel if we're already doomed. 3630 */ 3631 if (vp->v_iflag & VI_DOOMED) 3632 return; 3633 vp->v_iflag |= VI_DOOMED; 3634 3635 /* 3636 * Check to see if the vnode is in use. If so, we have to call 3637 * VOP_CLOSE() and VOP_INACTIVE(). 3638 */ 3639 active = vp->v_usecount > 0; 3640 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 3641 VI_UNLOCK(vp); 3642 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 3643 3644 /* 3645 * If purging an active vnode, it must be closed and 3646 * deactivated before being reclaimed. 3647 */ 3648 if (active) 3649 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 3650 if (oweinact || active) { 3651 VI_LOCK(vp); 3652 if ((vp->v_iflag & VI_DOINGINACT) == 0) 3653 vinactive(vp, td); 3654 VI_UNLOCK(vp); 3655 } 3656 if (vp->v_type == VSOCK) 3657 vfs_unp_reclaim(vp); 3658 3659 /* 3660 * Clean out any buffers associated with the vnode. 3661 * If the flush fails, just toss the buffers. 3662 */ 3663 mp = NULL; 3664 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 3665 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 3666 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 3667 while (vinvalbuf(vp, 0, 0, 0) != 0) 3668 ; 3669 } 3670 3671 BO_LOCK(&vp->v_bufobj); 3672 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 3673 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 3674 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 3675 vp->v_bufobj.bo_clean.bv_cnt == 0, 3676 ("vp %p bufobj not invalidated", vp)); 3677 3678 /* 3679 * For VMIO bufobj, BO_DEAD is set later, or in 3680 * vm_object_terminate() after the object's page queue is 3681 * flushed. 3682 */ 3683 object = vp->v_bufobj.bo_object; 3684 if (object == NULL) 3685 vp->v_bufobj.bo_flag |= BO_DEAD; 3686 BO_UNLOCK(&vp->v_bufobj); 3687 3688 /* 3689 * Handle the VM part. Tmpfs handles v_object on its own (the 3690 * OBJT_VNODE check). Nullfs or other bypassing filesystems 3691 * should not touch the object borrowed from the lower vnode 3692 * (the handle check). 3693 */ 3694 if (object != NULL && object->type == OBJT_VNODE && 3695 object->handle == vp) 3696 vnode_destroy_vobject(vp); 3697 3698 /* 3699 * Reclaim the vnode. 3700 */ 3701 if (VOP_RECLAIM(vp, td)) 3702 panic("vgone: cannot reclaim"); 3703 if (mp != NULL) 3704 vn_finished_secondary_write(mp); 3705 VNASSERT(vp->v_object == NULL, vp, 3706 ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag)); 3707 /* 3708 * Clear the advisory locks and wake up waiting threads. 3709 */ 3710 (void)VOP_ADVLOCKPURGE(vp); 3711 vp->v_lockf = NULL; 3712 /* 3713 * Delete from old mount point vnode list. 3714 */ 3715 delmntque(vp); 3716 cache_purge(vp); 3717 /* 3718 * Done with purge, reset to the standard lock and invalidate 3719 * the vnode. 3720 */ 3721 VI_LOCK(vp); 3722 vp->v_vnlock = &vp->v_lock; 3723 vp->v_op = &dead_vnodeops; 3724 vp->v_tag = "none"; 3725 vp->v_type = VBAD; 3726 } 3727 3728 /* 3729 * Calculate the total number of references to a special device. 3730 */ 3731 int 3732 vcount(struct vnode *vp) 3733 { 3734 int count; 3735 3736 dev_lock(); 3737 count = vp->v_rdev->si_usecount; 3738 dev_unlock(); 3739 return (count); 3740 } 3741 3742 /* 3743 * Same as above, but using the struct cdev *as argument 3744 */ 3745 int 3746 count_dev(struct cdev *dev) 3747 { 3748 int count; 3749 3750 dev_lock(); 3751 count = dev->si_usecount; 3752 dev_unlock(); 3753 return(count); 3754 } 3755 3756 /* 3757 * Print out a description of a vnode. 3758 */ 3759 static char *typename[] = 3760 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 3761 "VMARKER"}; 3762 3763 void 3764 vn_printf(struct vnode *vp, const char *fmt, ...) 3765 { 3766 va_list ap; 3767 char buf[256], buf2[16]; 3768 u_long flags; 3769 3770 va_start(ap, fmt); 3771 vprintf(fmt, ap); 3772 va_end(ap); 3773 printf("%p: ", (void *)vp); 3774 printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]); 3775 printf(" usecount %d, writecount %d, refcount %d", 3776 vp->v_usecount, vp->v_writecount, vp->v_holdcnt); 3777 switch (vp->v_type) { 3778 case VDIR: 3779 printf(" mountedhere %p\n", vp->v_mountedhere); 3780 break; 3781 case VCHR: 3782 printf(" rdev %p\n", vp->v_rdev); 3783 break; 3784 case VSOCK: 3785 printf(" socket %p\n", vp->v_unpcb); 3786 break; 3787 case VFIFO: 3788 printf(" fifoinfo %p\n", vp->v_fifoinfo); 3789 break; 3790 default: 3791 printf("\n"); 3792 break; 3793 } 3794 buf[0] = '\0'; 3795 buf[1] = '\0'; 3796 if (vp->v_vflag & VV_ROOT) 3797 strlcat(buf, "|VV_ROOT", sizeof(buf)); 3798 if (vp->v_vflag & VV_ISTTY) 3799 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 3800 if (vp->v_vflag & VV_NOSYNC) 3801 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 3802 if (vp->v_vflag & VV_ETERNALDEV) 3803 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 3804 if (vp->v_vflag & VV_CACHEDLABEL) 3805 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 3806 if (vp->v_vflag & VV_COPYONWRITE) 3807 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 3808 if (vp->v_vflag & VV_SYSTEM) 3809 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 3810 if (vp->v_vflag & VV_PROCDEP) 3811 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 3812 if (vp->v_vflag & VV_NOKNOTE) 3813 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 3814 if (vp->v_vflag & VV_DELETED) 3815 strlcat(buf, "|VV_DELETED", sizeof(buf)); 3816 if (vp->v_vflag & VV_MD) 3817 strlcat(buf, "|VV_MD", sizeof(buf)); 3818 if (vp->v_vflag & VV_FORCEINSMQ) 3819 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 3820 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 3821 VV_CACHEDLABEL | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 3822 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); 3823 if (flags != 0) { 3824 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 3825 strlcat(buf, buf2, sizeof(buf)); 3826 } 3827 if (vp->v_iflag & VI_MOUNT) 3828 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 3829 if (vp->v_iflag & VI_DOOMED) 3830 strlcat(buf, "|VI_DOOMED", sizeof(buf)); 3831 if (vp->v_iflag & VI_FREE) 3832 strlcat(buf, "|VI_FREE", sizeof(buf)); 3833 if (vp->v_iflag & VI_ACTIVE) 3834 strlcat(buf, "|VI_ACTIVE", sizeof(buf)); 3835 if (vp->v_iflag & VI_DOINGINACT) 3836 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 3837 if (vp->v_iflag & VI_OWEINACT) 3838 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 3839 if (vp->v_iflag & VI_TEXT_REF) 3840 strlcat(buf, "|VI_TEXT_REF", sizeof(buf)); 3841 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOOMED | VI_FREE | 3842 VI_ACTIVE | VI_DOINGINACT | VI_OWEINACT | VI_TEXT_REF); 3843 if (flags != 0) { 3844 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 3845 strlcat(buf, buf2, sizeof(buf)); 3846 } 3847 printf(" flags (%s)\n", buf + 1); 3848 if (mtx_owned(VI_MTX(vp))) 3849 printf(" VI_LOCKed"); 3850 if (vp->v_object != NULL) 3851 printf(" v_object %p ref %d pages %d " 3852 "cleanbuf %d dirtybuf %d\n", 3853 vp->v_object, vp->v_object->ref_count, 3854 vp->v_object->resident_page_count, 3855 vp->v_bufobj.bo_clean.bv_cnt, 3856 vp->v_bufobj.bo_dirty.bv_cnt); 3857 printf(" "); 3858 lockmgr_printinfo(vp->v_vnlock); 3859 if (vp->v_data != NULL) 3860 VOP_PRINT(vp); 3861 } 3862 3863 #ifdef DDB 3864 /* 3865 * List all of the locked vnodes in the system. 3866 * Called when debugging the kernel. 3867 */ 3868 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 3869 { 3870 struct mount *mp; 3871 struct vnode *vp; 3872 3873 /* 3874 * Note: because this is DDB, we can't obey the locking semantics 3875 * for these structures, which means we could catch an inconsistent 3876 * state and dereference a nasty pointer. Not much to be done 3877 * about that. 3878 */ 3879 db_printf("Locked vnodes\n"); 3880 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3881 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3882 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 3883 vn_printf(vp, "vnode "); 3884 } 3885 } 3886 } 3887 3888 /* 3889 * Show details about the given vnode. 3890 */ 3891 DB_SHOW_COMMAND(vnode, db_show_vnode) 3892 { 3893 struct vnode *vp; 3894 3895 if (!have_addr) 3896 return; 3897 vp = (struct vnode *)addr; 3898 vn_printf(vp, "vnode "); 3899 } 3900 3901 /* 3902 * Show details about the given mount point. 3903 */ 3904 DB_SHOW_COMMAND(mount, db_show_mount) 3905 { 3906 struct mount *mp; 3907 struct vfsopt *opt; 3908 struct statfs *sp; 3909 struct vnode *vp; 3910 char buf[512]; 3911 uint64_t mflags; 3912 u_int flags; 3913 3914 if (!have_addr) { 3915 /* No address given, print short info about all mount points. */ 3916 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3917 db_printf("%p %s on %s (%s)\n", mp, 3918 mp->mnt_stat.f_mntfromname, 3919 mp->mnt_stat.f_mntonname, 3920 mp->mnt_stat.f_fstypename); 3921 if (db_pager_quit) 3922 break; 3923 } 3924 db_printf("\nMore info: show mount <addr>\n"); 3925 return; 3926 } 3927 3928 mp = (struct mount *)addr; 3929 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 3930 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 3931 3932 buf[0] = '\0'; 3933 mflags = mp->mnt_flag; 3934 #define MNT_FLAG(flag) do { \ 3935 if (mflags & (flag)) { \ 3936 if (buf[0] != '\0') \ 3937 strlcat(buf, ", ", sizeof(buf)); \ 3938 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 3939 mflags &= ~(flag); \ 3940 } \ 3941 } while (0) 3942 MNT_FLAG(MNT_RDONLY); 3943 MNT_FLAG(MNT_SYNCHRONOUS); 3944 MNT_FLAG(MNT_NOEXEC); 3945 MNT_FLAG(MNT_NOSUID); 3946 MNT_FLAG(MNT_NFS4ACLS); 3947 MNT_FLAG(MNT_UNION); 3948 MNT_FLAG(MNT_ASYNC); 3949 MNT_FLAG(MNT_SUIDDIR); 3950 MNT_FLAG(MNT_SOFTDEP); 3951 MNT_FLAG(MNT_NOSYMFOLLOW); 3952 MNT_FLAG(MNT_GJOURNAL); 3953 MNT_FLAG(MNT_MULTILABEL); 3954 MNT_FLAG(MNT_ACLS); 3955 MNT_FLAG(MNT_NOATIME); 3956 MNT_FLAG(MNT_NOCLUSTERR); 3957 MNT_FLAG(MNT_NOCLUSTERW); 3958 MNT_FLAG(MNT_SUJ); 3959 MNT_FLAG(MNT_EXRDONLY); 3960 MNT_FLAG(MNT_EXPORTED); 3961 MNT_FLAG(MNT_DEFEXPORTED); 3962 MNT_FLAG(MNT_EXPORTANON); 3963 MNT_FLAG(MNT_EXKERB); 3964 MNT_FLAG(MNT_EXPUBLIC); 3965 MNT_FLAG(MNT_LOCAL); 3966 MNT_FLAG(MNT_QUOTA); 3967 MNT_FLAG(MNT_ROOTFS); 3968 MNT_FLAG(MNT_USER); 3969 MNT_FLAG(MNT_IGNORE); 3970 MNT_FLAG(MNT_UPDATE); 3971 MNT_FLAG(MNT_DELEXPORT); 3972 MNT_FLAG(MNT_RELOAD); 3973 MNT_FLAG(MNT_FORCE); 3974 MNT_FLAG(MNT_SNAPSHOT); 3975 MNT_FLAG(MNT_BYFSID); 3976 #undef MNT_FLAG 3977 if (mflags != 0) { 3978 if (buf[0] != '\0') 3979 strlcat(buf, ", ", sizeof(buf)); 3980 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3981 "0x%016jx", mflags); 3982 } 3983 db_printf(" mnt_flag = %s\n", buf); 3984 3985 buf[0] = '\0'; 3986 flags = mp->mnt_kern_flag; 3987 #define MNT_KERN_FLAG(flag) do { \ 3988 if (flags & (flag)) { \ 3989 if (buf[0] != '\0') \ 3990 strlcat(buf, ", ", sizeof(buf)); \ 3991 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 3992 flags &= ~(flag); \ 3993 } \ 3994 } while (0) 3995 MNT_KERN_FLAG(MNTK_UNMOUNTF); 3996 MNT_KERN_FLAG(MNTK_ASYNC); 3997 MNT_KERN_FLAG(MNTK_SOFTDEP); 3998 MNT_KERN_FLAG(MNTK_DRAINING); 3999 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4000 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4001 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4002 MNT_KERN_FLAG(MNTK_NO_IOPF); 4003 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 4004 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 4005 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 4006 MNT_KERN_FLAG(MNTK_MARKER); 4007 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4008 MNT_KERN_FLAG(MNTK_NOASYNC); 4009 MNT_KERN_FLAG(MNTK_UNMOUNT); 4010 MNT_KERN_FLAG(MNTK_MWAIT); 4011 MNT_KERN_FLAG(MNTK_SUSPEND); 4012 MNT_KERN_FLAG(MNTK_SUSPEND2); 4013 MNT_KERN_FLAG(MNTK_SUSPENDED); 4014 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4015 MNT_KERN_FLAG(MNTK_NOKNOTE); 4016 #undef MNT_KERN_FLAG 4017 if (flags != 0) { 4018 if (buf[0] != '\0') 4019 strlcat(buf, ", ", sizeof(buf)); 4020 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4021 "0x%08x", flags); 4022 } 4023 db_printf(" mnt_kern_flag = %s\n", buf); 4024 4025 db_printf(" mnt_opt = "); 4026 opt = TAILQ_FIRST(mp->mnt_opt); 4027 if (opt != NULL) { 4028 db_printf("%s", opt->name); 4029 opt = TAILQ_NEXT(opt, link); 4030 while (opt != NULL) { 4031 db_printf(", %s", opt->name); 4032 opt = TAILQ_NEXT(opt, link); 4033 } 4034 } 4035 db_printf("\n"); 4036 4037 sp = &mp->mnt_stat; 4038 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4039 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4040 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4041 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4042 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4043 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4044 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4045 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4046 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4047 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4048 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4049 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4050 4051 db_printf(" mnt_cred = { uid=%u ruid=%u", 4052 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4053 if (jailed(mp->mnt_cred)) 4054 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4055 db_printf(" }\n"); 4056 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4057 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4058 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4059 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4060 db_printf(" mnt_activevnodelistsize = %d\n", 4061 mp->mnt_activevnodelistsize); 4062 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4063 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4064 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 4065 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4066 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4067 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4068 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4069 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4070 db_printf(" mnt_secondary_accwrites = %d\n", 4071 mp->mnt_secondary_accwrites); 4072 db_printf(" mnt_gjprovider = %s\n", 4073 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4074 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4075 4076 db_printf("\n\nList of active vnodes\n"); 4077 TAILQ_FOREACH(vp, &mp->mnt_activevnodelist, v_actfreelist) { 4078 if (vp->v_type != VMARKER) { 4079 vn_printf(vp, "vnode "); 4080 if (db_pager_quit) 4081 break; 4082 } 4083 } 4084 db_printf("\n\nList of inactive vnodes\n"); 4085 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4086 if (vp->v_type != VMARKER && (vp->v_iflag & VI_ACTIVE) == 0) { 4087 vn_printf(vp, "vnode "); 4088 if (db_pager_quit) 4089 break; 4090 } 4091 } 4092 } 4093 #endif /* DDB */ 4094 4095 /* 4096 * Fill in a struct xvfsconf based on a struct vfsconf. 4097 */ 4098 static int 4099 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4100 { 4101 struct xvfsconf xvfsp; 4102 4103 bzero(&xvfsp, sizeof(xvfsp)); 4104 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4105 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4106 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4107 xvfsp.vfc_flags = vfsp->vfc_flags; 4108 /* 4109 * These are unused in userland, we keep them 4110 * to not break binary compatibility. 4111 */ 4112 xvfsp.vfc_vfsops = NULL; 4113 xvfsp.vfc_next = NULL; 4114 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4115 } 4116 4117 #ifdef COMPAT_FREEBSD32 4118 struct xvfsconf32 { 4119 uint32_t vfc_vfsops; 4120 char vfc_name[MFSNAMELEN]; 4121 int32_t vfc_typenum; 4122 int32_t vfc_refcount; 4123 int32_t vfc_flags; 4124 uint32_t vfc_next; 4125 }; 4126 4127 static int 4128 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4129 { 4130 struct xvfsconf32 xvfsp; 4131 4132 bzero(&xvfsp, sizeof(xvfsp)); 4133 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4134 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4135 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4136 xvfsp.vfc_flags = vfsp->vfc_flags; 4137 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4138 } 4139 #endif 4140 4141 /* 4142 * Top level filesystem related information gathering. 4143 */ 4144 static int 4145 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4146 { 4147 struct vfsconf *vfsp; 4148 int error; 4149 4150 error = 0; 4151 vfsconf_slock(); 4152 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4153 #ifdef COMPAT_FREEBSD32 4154 if (req->flags & SCTL_MASK32) 4155 error = vfsconf2x32(req, vfsp); 4156 else 4157 #endif 4158 error = vfsconf2x(req, vfsp); 4159 if (error) 4160 break; 4161 } 4162 vfsconf_sunlock(); 4163 return (error); 4164 } 4165 4166 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4167 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4168 "S,xvfsconf", "List of all configured filesystems"); 4169 4170 #ifndef BURN_BRIDGES 4171 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4172 4173 static int 4174 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4175 { 4176 int *name = (int *)arg1 - 1; /* XXX */ 4177 u_int namelen = arg2 + 1; /* XXX */ 4178 struct vfsconf *vfsp; 4179 4180 log(LOG_WARNING, "userland calling deprecated sysctl, " 4181 "please rebuild world\n"); 4182 4183 #if 1 || defined(COMPAT_PRELITE2) 4184 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4185 if (namelen == 1) 4186 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4187 #endif 4188 4189 switch (name[1]) { 4190 case VFS_MAXTYPENUM: 4191 if (namelen != 2) 4192 return (ENOTDIR); 4193 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4194 case VFS_CONF: 4195 if (namelen != 3) 4196 return (ENOTDIR); /* overloaded */ 4197 vfsconf_slock(); 4198 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4199 if (vfsp->vfc_typenum == name[2]) 4200 break; 4201 } 4202 vfsconf_sunlock(); 4203 if (vfsp == NULL) 4204 return (EOPNOTSUPP); 4205 #ifdef COMPAT_FREEBSD32 4206 if (req->flags & SCTL_MASK32) 4207 return (vfsconf2x32(req, vfsp)); 4208 else 4209 #endif 4210 return (vfsconf2x(req, vfsp)); 4211 } 4212 return (EOPNOTSUPP); 4213 } 4214 4215 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4216 CTLFLAG_MPSAFE, vfs_sysctl, 4217 "Generic filesystem"); 4218 4219 #if 1 || defined(COMPAT_PRELITE2) 4220 4221 static int 4222 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4223 { 4224 int error; 4225 struct vfsconf *vfsp; 4226 struct ovfsconf ovfs; 4227 4228 vfsconf_slock(); 4229 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4230 bzero(&ovfs, sizeof(ovfs)); 4231 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4232 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4233 ovfs.vfc_index = vfsp->vfc_typenum; 4234 ovfs.vfc_refcount = vfsp->vfc_refcount; 4235 ovfs.vfc_flags = vfsp->vfc_flags; 4236 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4237 if (error != 0) { 4238 vfsconf_sunlock(); 4239 return (error); 4240 } 4241 } 4242 vfsconf_sunlock(); 4243 return (0); 4244 } 4245 4246 #endif /* 1 || COMPAT_PRELITE2 */ 4247 #endif /* !BURN_BRIDGES */ 4248 4249 #define KINFO_VNODESLOP 10 4250 #ifdef notyet 4251 /* 4252 * Dump vnode list (via sysctl). 4253 */ 4254 /* ARGSUSED */ 4255 static int 4256 sysctl_vnode(SYSCTL_HANDLER_ARGS) 4257 { 4258 struct xvnode *xvn; 4259 struct mount *mp; 4260 struct vnode *vp; 4261 int error, len, n; 4262 4263 /* 4264 * Stale numvnodes access is not fatal here. 4265 */ 4266 req->lock = 0; 4267 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 4268 if (!req->oldptr) 4269 /* Make an estimate */ 4270 return (SYSCTL_OUT(req, 0, len)); 4271 4272 error = sysctl_wire_old_buffer(req, 0); 4273 if (error != 0) 4274 return (error); 4275 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 4276 n = 0; 4277 mtx_lock(&mountlist_mtx); 4278 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4279 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 4280 continue; 4281 MNT_ILOCK(mp); 4282 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4283 if (n == len) 4284 break; 4285 vref(vp); 4286 xvn[n].xv_size = sizeof *xvn; 4287 xvn[n].xv_vnode = vp; 4288 xvn[n].xv_id = 0; /* XXX compat */ 4289 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 4290 XV_COPY(usecount); 4291 XV_COPY(writecount); 4292 XV_COPY(holdcnt); 4293 XV_COPY(mount); 4294 XV_COPY(numoutput); 4295 XV_COPY(type); 4296 #undef XV_COPY 4297 xvn[n].xv_flag = vp->v_vflag; 4298 4299 switch (vp->v_type) { 4300 case VREG: 4301 case VDIR: 4302 case VLNK: 4303 break; 4304 case VBLK: 4305 case VCHR: 4306 if (vp->v_rdev == NULL) { 4307 vrele(vp); 4308 continue; 4309 } 4310 xvn[n].xv_dev = dev2udev(vp->v_rdev); 4311 break; 4312 case VSOCK: 4313 xvn[n].xv_socket = vp->v_socket; 4314 break; 4315 case VFIFO: 4316 xvn[n].xv_fifo = vp->v_fifoinfo; 4317 break; 4318 case VNON: 4319 case VBAD: 4320 default: 4321 /* shouldn't happen? */ 4322 vrele(vp); 4323 continue; 4324 } 4325 vrele(vp); 4326 ++n; 4327 } 4328 MNT_IUNLOCK(mp); 4329 mtx_lock(&mountlist_mtx); 4330 vfs_unbusy(mp); 4331 if (n == len) 4332 break; 4333 } 4334 mtx_unlock(&mountlist_mtx); 4335 4336 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 4337 free(xvn, M_TEMP); 4338 return (error); 4339 } 4340 4341 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 4342 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 4343 ""); 4344 #endif 4345 4346 static void 4347 unmount_or_warn(struct mount *mp) 4348 { 4349 int error; 4350 4351 error = dounmount(mp, MNT_FORCE, curthread); 4352 if (error != 0) { 4353 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4354 if (error == EBUSY) 4355 printf("BUSY)\n"); 4356 else 4357 printf("%d)\n", error); 4358 } 4359 } 4360 4361 /* 4362 * Unmount all filesystems. The list is traversed in reverse order 4363 * of mounting to avoid dependencies. 4364 */ 4365 void 4366 vfs_unmountall(void) 4367 { 4368 struct mount *mp, *tmp; 4369 4370 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4371 4372 /* 4373 * Since this only runs when rebooting, it is not interlocked. 4374 */ 4375 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4376 vfs_ref(mp); 4377 4378 /* 4379 * Forcibly unmounting "/dev" before "/" would prevent clean 4380 * unmount of the latter. 4381 */ 4382 if (mp == rootdevmp) 4383 continue; 4384 4385 unmount_or_warn(mp); 4386 } 4387 4388 if (rootdevmp != NULL) 4389 unmount_or_warn(rootdevmp); 4390 } 4391 4392 /* 4393 * perform msync on all vnodes under a mount point 4394 * the mount point must be locked. 4395 */ 4396 void 4397 vfs_msync(struct mount *mp, int flags) 4398 { 4399 struct vnode *vp, *mvp; 4400 struct vm_object *obj; 4401 4402 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4403 4404 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 4405 return; 4406 4407 MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { 4408 obj = vp->v_object; 4409 if (obj != NULL && vm_object_mightbedirty(obj) && 4410 (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) { 4411 if (!vget(vp, 4412 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, 4413 curthread)) { 4414 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */ 4415 vput(vp); 4416 continue; 4417 } 4418 4419 obj = vp->v_object; 4420 if (obj != NULL) { 4421 VM_OBJECT_WLOCK(obj); 4422 vm_object_page_clean(obj, 0, 0, 4423 flags == MNT_WAIT ? 4424 OBJPC_SYNC : OBJPC_NOSYNC); 4425 VM_OBJECT_WUNLOCK(obj); 4426 } 4427 vput(vp); 4428 } 4429 } else 4430 VI_UNLOCK(vp); 4431 } 4432 } 4433 4434 static void 4435 destroy_vpollinfo_free(struct vpollinfo *vi) 4436 { 4437 4438 knlist_destroy(&vi->vpi_selinfo.si_note); 4439 mtx_destroy(&vi->vpi_lock); 4440 uma_zfree(vnodepoll_zone, vi); 4441 } 4442 4443 static void 4444 destroy_vpollinfo(struct vpollinfo *vi) 4445 { 4446 4447 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4448 seldrain(&vi->vpi_selinfo); 4449 destroy_vpollinfo_free(vi); 4450 } 4451 4452 /* 4453 * Initialize per-vnode helper structure to hold poll-related state. 4454 */ 4455 void 4456 v_addpollinfo(struct vnode *vp) 4457 { 4458 struct vpollinfo *vi; 4459 4460 if (vp->v_pollinfo != NULL) 4461 return; 4462 vi = uma_zalloc(vnodepoll_zone, M_WAITOK | M_ZERO); 4463 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4464 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4465 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 4466 VI_LOCK(vp); 4467 if (vp->v_pollinfo != NULL) { 4468 VI_UNLOCK(vp); 4469 destroy_vpollinfo_free(vi); 4470 return; 4471 } 4472 vp->v_pollinfo = vi; 4473 VI_UNLOCK(vp); 4474 } 4475 4476 /* 4477 * Record a process's interest in events which might happen to 4478 * a vnode. Because poll uses the historic select-style interface 4479 * internally, this routine serves as both the ``check for any 4480 * pending events'' and the ``record my interest in future events'' 4481 * functions. (These are done together, while the lock is held, 4482 * to avoid race conditions.) 4483 */ 4484 int 4485 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4486 { 4487 4488 v_addpollinfo(vp); 4489 mtx_lock(&vp->v_pollinfo->vpi_lock); 4490 if (vp->v_pollinfo->vpi_revents & events) { 4491 /* 4492 * This leaves events we are not interested 4493 * in available for the other process which 4494 * which presumably had requested them 4495 * (otherwise they would never have been 4496 * recorded). 4497 */ 4498 events &= vp->v_pollinfo->vpi_revents; 4499 vp->v_pollinfo->vpi_revents &= ~events; 4500 4501 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4502 return (events); 4503 } 4504 vp->v_pollinfo->vpi_events |= events; 4505 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 4506 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4507 return (0); 4508 } 4509 4510 /* 4511 * Routine to create and manage a filesystem syncer vnode. 4512 */ 4513 #define sync_close ((int (*)(struct vop_close_args *))nullop) 4514 static int sync_fsync(struct vop_fsync_args *); 4515 static int sync_inactive(struct vop_inactive_args *); 4516 static int sync_reclaim(struct vop_reclaim_args *); 4517 4518 static struct vop_vector sync_vnodeops = { 4519 .vop_bypass = VOP_EOPNOTSUPP, 4520 .vop_close = sync_close, /* close */ 4521 .vop_fsync = sync_fsync, /* fsync */ 4522 .vop_inactive = sync_inactive, /* inactive */ 4523 .vop_need_inactive = vop_stdneed_inactive, /* need_inactive */ 4524 .vop_reclaim = sync_reclaim, /* reclaim */ 4525 .vop_lock1 = vop_stdlock, /* lock */ 4526 .vop_unlock = vop_stdunlock, /* unlock */ 4527 .vop_islocked = vop_stdislocked, /* islocked */ 4528 }; 4529 4530 /* 4531 * Create a new filesystem syncer vnode for the specified mount point. 4532 */ 4533 void 4534 vfs_allocate_syncvnode(struct mount *mp) 4535 { 4536 struct vnode *vp; 4537 struct bufobj *bo; 4538 static long start, incr, next; 4539 int error; 4540 4541 /* Allocate a new vnode */ 4542 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 4543 if (error != 0) 4544 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 4545 vp->v_type = VNON; 4546 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4547 vp->v_vflag |= VV_FORCEINSMQ; 4548 error = insmntque(vp, mp); 4549 if (error != 0) 4550 panic("vfs_allocate_syncvnode: insmntque() failed"); 4551 vp->v_vflag &= ~VV_FORCEINSMQ; 4552 VOP_UNLOCK(vp, 0); 4553 /* 4554 * Place the vnode onto the syncer worklist. We attempt to 4555 * scatter them about on the list so that they will go off 4556 * at evenly distributed times even if all the filesystems 4557 * are mounted at once. 4558 */ 4559 next += incr; 4560 if (next == 0 || next > syncer_maxdelay) { 4561 start /= 2; 4562 incr /= 2; 4563 if (start == 0) { 4564 start = syncer_maxdelay / 2; 4565 incr = syncer_maxdelay; 4566 } 4567 next = start; 4568 } 4569 bo = &vp->v_bufobj; 4570 BO_LOCK(bo); 4571 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 4572 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 4573 mtx_lock(&sync_mtx); 4574 sync_vnode_count++; 4575 if (mp->mnt_syncer == NULL) { 4576 mp->mnt_syncer = vp; 4577 vp = NULL; 4578 } 4579 mtx_unlock(&sync_mtx); 4580 BO_UNLOCK(bo); 4581 if (vp != NULL) { 4582 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4583 vgone(vp); 4584 vput(vp); 4585 } 4586 } 4587 4588 void 4589 vfs_deallocate_syncvnode(struct mount *mp) 4590 { 4591 struct vnode *vp; 4592 4593 mtx_lock(&sync_mtx); 4594 vp = mp->mnt_syncer; 4595 if (vp != NULL) 4596 mp->mnt_syncer = NULL; 4597 mtx_unlock(&sync_mtx); 4598 if (vp != NULL) 4599 vrele(vp); 4600 } 4601 4602 /* 4603 * Do a lazy sync of the filesystem. 4604 */ 4605 static int 4606 sync_fsync(struct vop_fsync_args *ap) 4607 { 4608 struct vnode *syncvp = ap->a_vp; 4609 struct mount *mp = syncvp->v_mount; 4610 int error, save; 4611 struct bufobj *bo; 4612 4613 /* 4614 * We only need to do something if this is a lazy evaluation. 4615 */ 4616 if (ap->a_waitfor != MNT_LAZY) 4617 return (0); 4618 4619 /* 4620 * Move ourselves to the back of the sync list. 4621 */ 4622 bo = &syncvp->v_bufobj; 4623 BO_LOCK(bo); 4624 vn_syncer_add_to_worklist(bo, syncdelay); 4625 BO_UNLOCK(bo); 4626 4627 /* 4628 * Walk the list of vnodes pushing all that are dirty and 4629 * not already on the sync list. 4630 */ 4631 if (vfs_busy(mp, MBF_NOWAIT) != 0) 4632 return (0); 4633 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 4634 vfs_unbusy(mp); 4635 return (0); 4636 } 4637 save = curthread_pflags_set(TDP_SYNCIO); 4638 /* 4639 * The filesystem at hand may be idle with free vnodes stored in the 4640 * batch. Return them instead of letting them stay there indefinitely. 4641 */ 4642 vnlru_return_batch(mp); 4643 vfs_msync(mp, MNT_NOWAIT); 4644 error = VFS_SYNC(mp, MNT_LAZY); 4645 curthread_pflags_restore(save); 4646 vn_finished_write(mp); 4647 vfs_unbusy(mp); 4648 return (error); 4649 } 4650 4651 /* 4652 * The syncer vnode is no referenced. 4653 */ 4654 static int 4655 sync_inactive(struct vop_inactive_args *ap) 4656 { 4657 4658 vgone(ap->a_vp); 4659 return (0); 4660 } 4661 4662 /* 4663 * The syncer vnode is no longer needed and is being decommissioned. 4664 * 4665 * Modifications to the worklist must be protected by sync_mtx. 4666 */ 4667 static int 4668 sync_reclaim(struct vop_reclaim_args *ap) 4669 { 4670 struct vnode *vp = ap->a_vp; 4671 struct bufobj *bo; 4672 4673 bo = &vp->v_bufobj; 4674 BO_LOCK(bo); 4675 mtx_lock(&sync_mtx); 4676 if (vp->v_mount->mnt_syncer == vp) 4677 vp->v_mount->mnt_syncer = NULL; 4678 if (bo->bo_flag & BO_ONWORKLST) { 4679 LIST_REMOVE(bo, bo_synclist); 4680 syncer_worklist_len--; 4681 sync_vnode_count--; 4682 bo->bo_flag &= ~BO_ONWORKLST; 4683 } 4684 mtx_unlock(&sync_mtx); 4685 BO_UNLOCK(bo); 4686 4687 return (0); 4688 } 4689 4690 int 4691 vn_need_pageq_flush(struct vnode *vp) 4692 { 4693 struct vm_object *obj; 4694 int need; 4695 4696 MPASS(mtx_owned(VI_MTX(vp))); 4697 need = 0; 4698 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 4699 vm_object_mightbedirty(obj)) 4700 need = 1; 4701 return (need); 4702 } 4703 4704 /* 4705 * Check if vnode represents a disk device 4706 */ 4707 int 4708 vn_isdisk(struct vnode *vp, int *errp) 4709 { 4710 int error; 4711 4712 if (vp->v_type != VCHR) { 4713 error = ENOTBLK; 4714 goto out; 4715 } 4716 error = 0; 4717 dev_lock(); 4718 if (vp->v_rdev == NULL) 4719 error = ENXIO; 4720 else if (vp->v_rdev->si_devsw == NULL) 4721 error = ENXIO; 4722 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 4723 error = ENOTBLK; 4724 dev_unlock(); 4725 out: 4726 if (errp != NULL) 4727 *errp = error; 4728 return (error == 0); 4729 } 4730 4731 /* 4732 * Common filesystem object access control check routine. Accepts a 4733 * vnode's type, "mode", uid and gid, requested access mode, credentials, 4734 * and optional call-by-reference privused argument allowing vaccess() 4735 * to indicate to the caller whether privilege was used to satisfy the 4736 * request (obsoleted). Returns 0 on success, or an errno on failure. 4737 */ 4738 int 4739 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 4740 accmode_t accmode, struct ucred *cred, int *privused) 4741 { 4742 accmode_t dac_granted; 4743 accmode_t priv_granted; 4744 4745 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 4746 ("invalid bit in accmode")); 4747 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 4748 ("VAPPEND without VWRITE")); 4749 4750 /* 4751 * Look for a normal, non-privileged way to access the file/directory 4752 * as requested. If it exists, go with that. 4753 */ 4754 4755 if (privused != NULL) 4756 *privused = 0; 4757 4758 dac_granted = 0; 4759 4760 /* Check the owner. */ 4761 if (cred->cr_uid == file_uid) { 4762 dac_granted |= VADMIN; 4763 if (file_mode & S_IXUSR) 4764 dac_granted |= VEXEC; 4765 if (file_mode & S_IRUSR) 4766 dac_granted |= VREAD; 4767 if (file_mode & S_IWUSR) 4768 dac_granted |= (VWRITE | VAPPEND); 4769 4770 if ((accmode & dac_granted) == accmode) 4771 return (0); 4772 4773 goto privcheck; 4774 } 4775 4776 /* Otherwise, check the groups (first match) */ 4777 if (groupmember(file_gid, cred)) { 4778 if (file_mode & S_IXGRP) 4779 dac_granted |= VEXEC; 4780 if (file_mode & S_IRGRP) 4781 dac_granted |= VREAD; 4782 if (file_mode & S_IWGRP) 4783 dac_granted |= (VWRITE | VAPPEND); 4784 4785 if ((accmode & dac_granted) == accmode) 4786 return (0); 4787 4788 goto privcheck; 4789 } 4790 4791 /* Otherwise, check everyone else. */ 4792 if (file_mode & S_IXOTH) 4793 dac_granted |= VEXEC; 4794 if (file_mode & S_IROTH) 4795 dac_granted |= VREAD; 4796 if (file_mode & S_IWOTH) 4797 dac_granted |= (VWRITE | VAPPEND); 4798 if ((accmode & dac_granted) == accmode) 4799 return (0); 4800 4801 privcheck: 4802 /* 4803 * Build a privilege mask to determine if the set of privileges 4804 * satisfies the requirements when combined with the granted mask 4805 * from above. For each privilege, if the privilege is required, 4806 * bitwise or the request type onto the priv_granted mask. 4807 */ 4808 priv_granted = 0; 4809 4810 if (type == VDIR) { 4811 /* 4812 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 4813 * requests, instead of PRIV_VFS_EXEC. 4814 */ 4815 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4816 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 4817 priv_granted |= VEXEC; 4818 } else { 4819 /* 4820 * Ensure that at least one execute bit is on. Otherwise, 4821 * a privileged user will always succeed, and we don't want 4822 * this to happen unless the file really is executable. 4823 */ 4824 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4825 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 4826 !priv_check_cred(cred, PRIV_VFS_EXEC)) 4827 priv_granted |= VEXEC; 4828 } 4829 4830 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 4831 !priv_check_cred(cred, PRIV_VFS_READ)) 4832 priv_granted |= VREAD; 4833 4834 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 4835 !priv_check_cred(cred, PRIV_VFS_WRITE)) 4836 priv_granted |= (VWRITE | VAPPEND); 4837 4838 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 4839 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 4840 priv_granted |= VADMIN; 4841 4842 if ((accmode & (priv_granted | dac_granted)) == accmode) { 4843 /* XXX audit: privilege used */ 4844 if (privused != NULL) 4845 *privused = 1; 4846 return (0); 4847 } 4848 4849 return ((accmode & VADMIN) ? EPERM : EACCES); 4850 } 4851 4852 /* 4853 * Credential check based on process requesting service, and per-attribute 4854 * permissions. 4855 */ 4856 int 4857 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 4858 struct thread *td, accmode_t accmode) 4859 { 4860 4861 /* 4862 * Kernel-invoked always succeeds. 4863 */ 4864 if (cred == NOCRED) 4865 return (0); 4866 4867 /* 4868 * Do not allow privileged processes in jail to directly manipulate 4869 * system attributes. 4870 */ 4871 switch (attrnamespace) { 4872 case EXTATTR_NAMESPACE_SYSTEM: 4873 /* Potentially should be: return (EPERM); */ 4874 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 4875 case EXTATTR_NAMESPACE_USER: 4876 return (VOP_ACCESS(vp, accmode, cred, td)); 4877 default: 4878 return (EPERM); 4879 } 4880 } 4881 4882 #ifdef DEBUG_VFS_LOCKS 4883 /* 4884 * This only exists to suppress warnings from unlocked specfs accesses. It is 4885 * no longer ok to have an unlocked VFS. 4886 */ 4887 #define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \ 4888 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 4889 4890 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 4891 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 4892 "Drop into debugger on lock violation"); 4893 4894 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 4895 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 4896 0, "Check for interlock across VOPs"); 4897 4898 int vfs_badlock_print = 1; /* Print lock violations. */ 4899 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 4900 0, "Print lock violations"); 4901 4902 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 4903 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 4904 0, "Print vnode details on lock violations"); 4905 4906 #ifdef KDB 4907 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 4908 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 4909 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 4910 #endif 4911 4912 static void 4913 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 4914 { 4915 4916 #ifdef KDB 4917 if (vfs_badlock_backtrace) 4918 kdb_backtrace(); 4919 #endif 4920 if (vfs_badlock_vnode) 4921 vn_printf(vp, "vnode "); 4922 if (vfs_badlock_print) 4923 printf("%s: %p %s\n", str, (void *)vp, msg); 4924 if (vfs_badlock_ddb) 4925 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 4926 } 4927 4928 void 4929 assert_vi_locked(struct vnode *vp, const char *str) 4930 { 4931 4932 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 4933 vfs_badlock("interlock is not locked but should be", str, vp); 4934 } 4935 4936 void 4937 assert_vi_unlocked(struct vnode *vp, const char *str) 4938 { 4939 4940 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 4941 vfs_badlock("interlock is locked but should not be", str, vp); 4942 } 4943 4944 void 4945 assert_vop_locked(struct vnode *vp, const char *str) 4946 { 4947 int locked; 4948 4949 if (!IGNORE_LOCK(vp)) { 4950 locked = VOP_ISLOCKED(vp); 4951 if (locked == 0 || locked == LK_EXCLOTHER) 4952 vfs_badlock("is not locked but should be", str, vp); 4953 } 4954 } 4955 4956 void 4957 assert_vop_unlocked(struct vnode *vp, const char *str) 4958 { 4959 4960 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 4961 vfs_badlock("is locked but should not be", str, vp); 4962 } 4963 4964 void 4965 assert_vop_elocked(struct vnode *vp, const char *str) 4966 { 4967 4968 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 4969 vfs_badlock("is not exclusive locked but should be", str, vp); 4970 } 4971 #endif /* DEBUG_VFS_LOCKS */ 4972 4973 void 4974 vop_rename_fail(struct vop_rename_args *ap) 4975 { 4976 4977 if (ap->a_tvp != NULL) 4978 vput(ap->a_tvp); 4979 if (ap->a_tdvp == ap->a_tvp) 4980 vrele(ap->a_tdvp); 4981 else 4982 vput(ap->a_tdvp); 4983 vrele(ap->a_fdvp); 4984 vrele(ap->a_fvp); 4985 } 4986 4987 void 4988 vop_rename_pre(void *ap) 4989 { 4990 struct vop_rename_args *a = ap; 4991 4992 #ifdef DEBUG_VFS_LOCKS 4993 if (a->a_tvp) 4994 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 4995 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 4996 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 4997 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 4998 4999 /* Check the source (from). */ 5000 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5001 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5002 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5003 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5004 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5005 5006 /* Check the target. */ 5007 if (a->a_tvp) 5008 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5009 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5010 #endif 5011 if (a->a_tdvp != a->a_fdvp) 5012 vhold(a->a_fdvp); 5013 if (a->a_tvp != a->a_fvp) 5014 vhold(a->a_fvp); 5015 vhold(a->a_tdvp); 5016 if (a->a_tvp) 5017 vhold(a->a_tvp); 5018 } 5019 5020 #ifdef DEBUG_VFS_LOCKS 5021 void 5022 vop_strategy_pre(void *ap) 5023 { 5024 struct vop_strategy_args *a; 5025 struct buf *bp; 5026 5027 a = ap; 5028 bp = a->a_bp; 5029 5030 /* 5031 * Cluster ops lock their component buffers but not the IO container. 5032 */ 5033 if ((bp->b_flags & B_CLUSTER) != 0) 5034 return; 5035 5036 if (panicstr == NULL && !BUF_ISLOCKED(bp)) { 5037 if (vfs_badlock_print) 5038 printf( 5039 "VOP_STRATEGY: bp is not locked but should be\n"); 5040 if (vfs_badlock_ddb) 5041 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5042 } 5043 } 5044 5045 void 5046 vop_lock_pre(void *ap) 5047 { 5048 struct vop_lock1_args *a = ap; 5049 5050 if ((a->a_flags & LK_INTERLOCK) == 0) 5051 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5052 else 5053 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5054 } 5055 5056 void 5057 vop_lock_post(void *ap, int rc) 5058 { 5059 struct vop_lock1_args *a = ap; 5060 5061 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5062 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5063 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5064 } 5065 5066 void 5067 vop_unlock_pre(void *ap) 5068 { 5069 struct vop_unlock_args *a = ap; 5070 5071 if (a->a_flags & LK_INTERLOCK) 5072 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK"); 5073 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 5074 } 5075 5076 void 5077 vop_unlock_post(void *ap, int rc) 5078 { 5079 struct vop_unlock_args *a = ap; 5080 5081 if (a->a_flags & LK_INTERLOCK) 5082 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK"); 5083 } 5084 5085 void 5086 vop_need_inactive_pre(void *ap) 5087 { 5088 struct vop_need_inactive_args *a = ap; 5089 5090 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5091 } 5092 5093 void 5094 vop_need_inactive_post(void *ap, int rc) 5095 { 5096 struct vop_need_inactive_args *a = ap; 5097 5098 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5099 } 5100 #endif 5101 5102 void 5103 vop_create_post(void *ap, int rc) 5104 { 5105 struct vop_create_args *a = ap; 5106 5107 if (!rc) 5108 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5109 } 5110 5111 void 5112 vop_deleteextattr_post(void *ap, int rc) 5113 { 5114 struct vop_deleteextattr_args *a = ap; 5115 5116 if (!rc) 5117 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5118 } 5119 5120 void 5121 vop_link_post(void *ap, int rc) 5122 { 5123 struct vop_link_args *a = ap; 5124 5125 if (!rc) { 5126 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); 5127 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE); 5128 } 5129 } 5130 5131 void 5132 vop_mkdir_post(void *ap, int rc) 5133 { 5134 struct vop_mkdir_args *a = ap; 5135 5136 if (!rc) 5137 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 5138 } 5139 5140 void 5141 vop_mknod_post(void *ap, int rc) 5142 { 5143 struct vop_mknod_args *a = ap; 5144 5145 if (!rc) 5146 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5147 } 5148 5149 void 5150 vop_reclaim_post(void *ap, int rc) 5151 { 5152 struct vop_reclaim_args *a = ap; 5153 5154 if (!rc) 5155 VFS_KNOTE_LOCKED(a->a_vp, NOTE_REVOKE); 5156 } 5157 5158 void 5159 vop_remove_post(void *ap, int rc) 5160 { 5161 struct vop_remove_args *a = ap; 5162 5163 if (!rc) { 5164 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5165 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 5166 } 5167 } 5168 5169 void 5170 vop_rename_post(void *ap, int rc) 5171 { 5172 struct vop_rename_args *a = ap; 5173 long hint; 5174 5175 if (!rc) { 5176 hint = NOTE_WRITE; 5177 if (a->a_fdvp == a->a_tdvp) { 5178 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 5179 hint |= NOTE_LINK; 5180 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5181 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5182 } else { 5183 hint |= NOTE_EXTEND; 5184 if (a->a_fvp->v_type == VDIR) 5185 hint |= NOTE_LINK; 5186 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5187 5188 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 5189 a->a_tvp->v_type == VDIR) 5190 hint &= ~NOTE_LINK; 5191 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5192 } 5193 5194 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 5195 if (a->a_tvp) 5196 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 5197 } 5198 if (a->a_tdvp != a->a_fdvp) 5199 vdrop(a->a_fdvp); 5200 if (a->a_tvp != a->a_fvp) 5201 vdrop(a->a_fvp); 5202 vdrop(a->a_tdvp); 5203 if (a->a_tvp) 5204 vdrop(a->a_tvp); 5205 } 5206 5207 void 5208 vop_rmdir_post(void *ap, int rc) 5209 { 5210 struct vop_rmdir_args *a = ap; 5211 5212 if (!rc) { 5213 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 5214 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 5215 } 5216 } 5217 5218 void 5219 vop_setattr_post(void *ap, int rc) 5220 { 5221 struct vop_setattr_args *a = ap; 5222 5223 if (!rc) 5224 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5225 } 5226 5227 void 5228 vop_setextattr_post(void *ap, int rc) 5229 { 5230 struct vop_setextattr_args *a = ap; 5231 5232 if (!rc) 5233 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5234 } 5235 5236 void 5237 vop_symlink_post(void *ap, int rc) 5238 { 5239 struct vop_symlink_args *a = ap; 5240 5241 if (!rc) 5242 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5243 } 5244 5245 void 5246 vop_open_post(void *ap, int rc) 5247 { 5248 struct vop_open_args *a = ap; 5249 5250 if (!rc) 5251 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 5252 } 5253 5254 void 5255 vop_close_post(void *ap, int rc) 5256 { 5257 struct vop_close_args *a = ap; 5258 5259 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 5260 (a->a_vp->v_iflag & VI_DOOMED) == 0)) { 5261 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 5262 NOTE_CLOSE_WRITE : NOTE_CLOSE); 5263 } 5264 } 5265 5266 void 5267 vop_read_post(void *ap, int rc) 5268 { 5269 struct vop_read_args *a = ap; 5270 5271 if (!rc) 5272 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5273 } 5274 5275 void 5276 vop_readdir_post(void *ap, int rc) 5277 { 5278 struct vop_readdir_args *a = ap; 5279 5280 if (!rc) 5281 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5282 } 5283 5284 static struct knlist fs_knlist; 5285 5286 static void 5287 vfs_event_init(void *arg) 5288 { 5289 knlist_init_mtx(&fs_knlist, NULL); 5290 } 5291 /* XXX - correct order? */ 5292 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 5293 5294 void 5295 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 5296 { 5297 5298 KNOTE_UNLOCKED(&fs_knlist, event); 5299 } 5300 5301 static int filt_fsattach(struct knote *kn); 5302 static void filt_fsdetach(struct knote *kn); 5303 static int filt_fsevent(struct knote *kn, long hint); 5304 5305 struct filterops fs_filtops = { 5306 .f_isfd = 0, 5307 .f_attach = filt_fsattach, 5308 .f_detach = filt_fsdetach, 5309 .f_event = filt_fsevent 5310 }; 5311 5312 static int 5313 filt_fsattach(struct knote *kn) 5314 { 5315 5316 kn->kn_flags |= EV_CLEAR; 5317 knlist_add(&fs_knlist, kn, 0); 5318 return (0); 5319 } 5320 5321 static void 5322 filt_fsdetach(struct knote *kn) 5323 { 5324 5325 knlist_remove(&fs_knlist, kn, 0); 5326 } 5327 5328 static int 5329 filt_fsevent(struct knote *kn, long hint) 5330 { 5331 5332 kn->kn_fflags |= hint; 5333 return (kn->kn_fflags != 0); 5334 } 5335 5336 static int 5337 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 5338 { 5339 struct vfsidctl vc; 5340 int error; 5341 struct mount *mp; 5342 5343 error = SYSCTL_IN(req, &vc, sizeof(vc)); 5344 if (error) 5345 return (error); 5346 if (vc.vc_vers != VFS_CTL_VERS1) 5347 return (EINVAL); 5348 mp = vfs_getvfs(&vc.vc_fsid); 5349 if (mp == NULL) 5350 return (ENOENT); 5351 /* ensure that a specific sysctl goes to the right filesystem. */ 5352 if (strcmp(vc.vc_fstypename, "*") != 0 && 5353 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 5354 vfs_rel(mp); 5355 return (EINVAL); 5356 } 5357 VCTLTOREQ(&vc, req); 5358 error = VFS_SYSCTL(mp, vc.vc_op, req); 5359 vfs_rel(mp); 5360 return (error); 5361 } 5362 5363 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_WR, 5364 NULL, 0, sysctl_vfs_ctl, "", 5365 "Sysctl by fsid"); 5366 5367 /* 5368 * Function to initialize a va_filerev field sensibly. 5369 * XXX: Wouldn't a random number make a lot more sense ?? 5370 */ 5371 u_quad_t 5372 init_va_filerev(void) 5373 { 5374 struct bintime bt; 5375 5376 getbinuptime(&bt); 5377 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 5378 } 5379 5380 static int filt_vfsread(struct knote *kn, long hint); 5381 static int filt_vfswrite(struct knote *kn, long hint); 5382 static int filt_vfsvnode(struct knote *kn, long hint); 5383 static void filt_vfsdetach(struct knote *kn); 5384 static struct filterops vfsread_filtops = { 5385 .f_isfd = 1, 5386 .f_detach = filt_vfsdetach, 5387 .f_event = filt_vfsread 5388 }; 5389 static struct filterops vfswrite_filtops = { 5390 .f_isfd = 1, 5391 .f_detach = filt_vfsdetach, 5392 .f_event = filt_vfswrite 5393 }; 5394 static struct filterops vfsvnode_filtops = { 5395 .f_isfd = 1, 5396 .f_detach = filt_vfsdetach, 5397 .f_event = filt_vfsvnode 5398 }; 5399 5400 static void 5401 vfs_knllock(void *arg) 5402 { 5403 struct vnode *vp = arg; 5404 5405 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5406 } 5407 5408 static void 5409 vfs_knlunlock(void *arg) 5410 { 5411 struct vnode *vp = arg; 5412 5413 VOP_UNLOCK(vp, 0); 5414 } 5415 5416 static void 5417 vfs_knl_assert_locked(void *arg) 5418 { 5419 #ifdef DEBUG_VFS_LOCKS 5420 struct vnode *vp = arg; 5421 5422 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 5423 #endif 5424 } 5425 5426 static void 5427 vfs_knl_assert_unlocked(void *arg) 5428 { 5429 #ifdef DEBUG_VFS_LOCKS 5430 struct vnode *vp = arg; 5431 5432 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 5433 #endif 5434 } 5435 5436 int 5437 vfs_kqfilter(struct vop_kqfilter_args *ap) 5438 { 5439 struct vnode *vp = ap->a_vp; 5440 struct knote *kn = ap->a_kn; 5441 struct knlist *knl; 5442 5443 switch (kn->kn_filter) { 5444 case EVFILT_READ: 5445 kn->kn_fop = &vfsread_filtops; 5446 break; 5447 case EVFILT_WRITE: 5448 kn->kn_fop = &vfswrite_filtops; 5449 break; 5450 case EVFILT_VNODE: 5451 kn->kn_fop = &vfsvnode_filtops; 5452 break; 5453 default: 5454 return (EINVAL); 5455 } 5456 5457 kn->kn_hook = (caddr_t)vp; 5458 5459 v_addpollinfo(vp); 5460 if (vp->v_pollinfo == NULL) 5461 return (ENOMEM); 5462 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 5463 vhold(vp); 5464 knlist_add(knl, kn, 0); 5465 5466 return (0); 5467 } 5468 5469 /* 5470 * Detach knote from vnode 5471 */ 5472 static void 5473 filt_vfsdetach(struct knote *kn) 5474 { 5475 struct vnode *vp = (struct vnode *)kn->kn_hook; 5476 5477 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 5478 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 5479 vdrop(vp); 5480 } 5481 5482 /*ARGSUSED*/ 5483 static int 5484 filt_vfsread(struct knote *kn, long hint) 5485 { 5486 struct vnode *vp = (struct vnode *)kn->kn_hook; 5487 struct vattr va; 5488 int res; 5489 5490 /* 5491 * filesystem is gone, so set the EOF flag and schedule 5492 * the knote for deletion. 5493 */ 5494 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5495 VI_LOCK(vp); 5496 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5497 VI_UNLOCK(vp); 5498 return (1); 5499 } 5500 5501 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 5502 return (0); 5503 5504 VI_LOCK(vp); 5505 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 5506 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 5507 VI_UNLOCK(vp); 5508 return (res); 5509 } 5510 5511 /*ARGSUSED*/ 5512 static int 5513 filt_vfswrite(struct knote *kn, long hint) 5514 { 5515 struct vnode *vp = (struct vnode *)kn->kn_hook; 5516 5517 VI_LOCK(vp); 5518 5519 /* 5520 * filesystem is gone, so set the EOF flag and schedule 5521 * the knote for deletion. 5522 */ 5523 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 5524 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5525 5526 kn->kn_data = 0; 5527 VI_UNLOCK(vp); 5528 return (1); 5529 } 5530 5531 static int 5532 filt_vfsvnode(struct knote *kn, long hint) 5533 { 5534 struct vnode *vp = (struct vnode *)kn->kn_hook; 5535 int res; 5536 5537 VI_LOCK(vp); 5538 if (kn->kn_sfflags & hint) 5539 kn->kn_fflags |= hint; 5540 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5541 kn->kn_flags |= EV_EOF; 5542 VI_UNLOCK(vp); 5543 return (1); 5544 } 5545 res = (kn->kn_fflags != 0); 5546 VI_UNLOCK(vp); 5547 return (res); 5548 } 5549 5550 /* 5551 * Returns whether the directory is empty or not. 5552 * If it is empty, the return value is 0; otherwise 5553 * the return value is an error value (which may 5554 * be ENOTEMPTY). 5555 */ 5556 int 5557 vfs_emptydir(struct vnode *vp) 5558 { 5559 struct uio uio; 5560 struct iovec iov; 5561 struct dirent *dirent, *dp, *endp; 5562 int error, eof; 5563 5564 error = 0; 5565 eof = 0; 5566 5567 ASSERT_VOP_LOCKED(vp, "vfs_emptydir"); 5568 5569 dirent = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK); 5570 iov.iov_base = dirent; 5571 iov.iov_len = sizeof(struct dirent); 5572 5573 uio.uio_iov = &iov; 5574 uio.uio_iovcnt = 1; 5575 uio.uio_offset = 0; 5576 uio.uio_resid = sizeof(struct dirent); 5577 uio.uio_segflg = UIO_SYSSPACE; 5578 uio.uio_rw = UIO_READ; 5579 uio.uio_td = curthread; 5580 5581 while (eof == 0 && error == 0) { 5582 error = VOP_READDIR(vp, &uio, curthread->td_ucred, &eof, 5583 NULL, NULL); 5584 if (error != 0) 5585 break; 5586 endp = (void *)((uint8_t *)dirent + 5587 sizeof(struct dirent) - uio.uio_resid); 5588 for (dp = dirent; dp < endp; 5589 dp = (void *)((uint8_t *)dp + GENERIC_DIRSIZ(dp))) { 5590 if (dp->d_type == DT_WHT) 5591 continue; 5592 if (dp->d_namlen == 0) 5593 continue; 5594 if (dp->d_type != DT_DIR && 5595 dp->d_type != DT_UNKNOWN) { 5596 error = ENOTEMPTY; 5597 break; 5598 } 5599 if (dp->d_namlen > 2) { 5600 error = ENOTEMPTY; 5601 break; 5602 } 5603 if (dp->d_namlen == 1 && 5604 dp->d_name[0] != '.') { 5605 error = ENOTEMPTY; 5606 break; 5607 } 5608 if (dp->d_namlen == 2 && 5609 dp->d_name[1] != '.') { 5610 error = ENOTEMPTY; 5611 break; 5612 } 5613 uio.uio_resid = sizeof(struct dirent); 5614 } 5615 } 5616 free(dirent, M_TEMP); 5617 return (error); 5618 } 5619 5620 int 5621 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 5622 { 5623 int error; 5624 5625 if (dp->d_reclen > ap->a_uio->uio_resid) 5626 return (ENAMETOOLONG); 5627 error = uiomove(dp, dp->d_reclen, ap->a_uio); 5628 if (error) { 5629 if (ap->a_ncookies != NULL) { 5630 if (ap->a_cookies != NULL) 5631 free(ap->a_cookies, M_TEMP); 5632 ap->a_cookies = NULL; 5633 *ap->a_ncookies = 0; 5634 } 5635 return (error); 5636 } 5637 if (ap->a_ncookies == NULL) 5638 return (0); 5639 5640 KASSERT(ap->a_cookies, 5641 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 5642 5643 *ap->a_cookies = realloc(*ap->a_cookies, 5644 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 5645 (*ap->a_cookies)[*ap->a_ncookies] = off; 5646 *ap->a_ncookies += 1; 5647 return (0); 5648 } 5649 5650 /* 5651 * Mark for update the access time of the file if the filesystem 5652 * supports VOP_MARKATIME. This functionality is used by execve and 5653 * mmap, so we want to avoid the I/O implied by directly setting 5654 * va_atime for the sake of efficiency. 5655 */ 5656 void 5657 vfs_mark_atime(struct vnode *vp, struct ucred *cred) 5658 { 5659 struct mount *mp; 5660 5661 mp = vp->v_mount; 5662 ASSERT_VOP_LOCKED(vp, "vfs_mark_atime"); 5663 if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) 5664 (void)VOP_MARKATIME(vp); 5665 } 5666 5667 /* 5668 * The purpose of this routine is to remove granularity from accmode_t, 5669 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 5670 * VADMIN and VAPPEND. 5671 * 5672 * If it returns 0, the caller is supposed to continue with the usual 5673 * access checks using 'accmode' as modified by this routine. If it 5674 * returns nonzero value, the caller is supposed to return that value 5675 * as errno. 5676 * 5677 * Note that after this routine runs, accmode may be zero. 5678 */ 5679 int 5680 vfs_unixify_accmode(accmode_t *accmode) 5681 { 5682 /* 5683 * There is no way to specify explicit "deny" rule using 5684 * file mode or POSIX.1e ACLs. 5685 */ 5686 if (*accmode & VEXPLICIT_DENY) { 5687 *accmode = 0; 5688 return (0); 5689 } 5690 5691 /* 5692 * None of these can be translated into usual access bits. 5693 * Also, the common case for NFSv4 ACLs is to not contain 5694 * either of these bits. Caller should check for VWRITE 5695 * on the containing directory instead. 5696 */ 5697 if (*accmode & (VDELETE_CHILD | VDELETE)) 5698 return (EPERM); 5699 5700 if (*accmode & VADMIN_PERMS) { 5701 *accmode &= ~VADMIN_PERMS; 5702 *accmode |= VADMIN; 5703 } 5704 5705 /* 5706 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 5707 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 5708 */ 5709 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 5710 5711 return (0); 5712 } 5713 5714 /* 5715 * Clear out a doomed vnode (if any) and replace it with a new one as long 5716 * as the fs is not being unmounted. Return the root vnode to the caller. 5717 */ 5718 static int __noinline 5719 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 5720 { 5721 struct vnode *vp; 5722 int error; 5723 5724 restart: 5725 if (mp->mnt_rootvnode != NULL) { 5726 MNT_ILOCK(mp); 5727 vp = mp->mnt_rootvnode; 5728 if (vp != NULL) { 5729 if ((vp->v_iflag & VI_DOOMED) == 0) { 5730 vrefact(vp); 5731 MNT_IUNLOCK(mp); 5732 error = vn_lock(vp, flags); 5733 if (error == 0) { 5734 *vpp = vp; 5735 return (0); 5736 } 5737 vrele(vp); 5738 goto restart; 5739 } 5740 /* 5741 * Clear the old one. 5742 */ 5743 mp->mnt_rootvnode = NULL; 5744 } 5745 MNT_IUNLOCK(mp); 5746 if (vp != NULL) { 5747 /* 5748 * Paired with a fence in vfs_op_thread_exit(). 5749 */ 5750 atomic_thread_fence_acq(); 5751 vfs_op_barrier_wait(mp); 5752 vrele(vp); 5753 } 5754 } 5755 error = VFS_CACHEDROOT(mp, flags, vpp); 5756 if (error != 0) 5757 return (error); 5758 if (mp->mnt_vfs_ops == 0) { 5759 MNT_ILOCK(mp); 5760 if (mp->mnt_vfs_ops != 0) { 5761 MNT_IUNLOCK(mp); 5762 return (0); 5763 } 5764 if (mp->mnt_rootvnode == NULL) { 5765 vrefact(*vpp); 5766 mp->mnt_rootvnode = *vpp; 5767 } else { 5768 if (mp->mnt_rootvnode != *vpp) { 5769 if ((mp->mnt_rootvnode->v_iflag & VI_DOOMED) == 0) { 5770 panic("%s: mismatch between vnode returned " 5771 " by VFS_CACHEDROOT and the one cached " 5772 " (%p != %p)", 5773 __func__, *vpp, mp->mnt_rootvnode); 5774 } 5775 } 5776 } 5777 MNT_IUNLOCK(mp); 5778 } 5779 return (0); 5780 } 5781 5782 int 5783 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 5784 { 5785 struct vnode *vp; 5786 int error; 5787 5788 if (!vfs_op_thread_enter(mp)) 5789 return (vfs_cache_root_fallback(mp, flags, vpp)); 5790 vp = (struct vnode *)atomic_load_ptr(&mp->mnt_rootvnode); 5791 if (vp == NULL || (vp->v_iflag & VI_DOOMED)) { 5792 vfs_op_thread_exit(mp); 5793 return (vfs_cache_root_fallback(mp, flags, vpp)); 5794 } 5795 vrefact(vp); 5796 vfs_op_thread_exit(mp); 5797 error = vn_lock(vp, flags); 5798 if (error != 0) { 5799 vrele(vp); 5800 return (vfs_cache_root_fallback(mp, flags, vpp)); 5801 } 5802 *vpp = vp; 5803 return (0); 5804 } 5805 5806 struct vnode * 5807 vfs_cache_root_clear(struct mount *mp) 5808 { 5809 struct vnode *vp; 5810 5811 /* 5812 * ops > 0 guarantees there is nobody who can see this vnode 5813 */ 5814 MPASS(mp->mnt_vfs_ops > 0); 5815 vp = mp->mnt_rootvnode; 5816 mp->mnt_rootvnode = NULL; 5817 return (vp); 5818 } 5819 5820 void 5821 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 5822 { 5823 5824 MPASS(mp->mnt_vfs_ops > 0); 5825 vrefact(vp); 5826 mp->mnt_rootvnode = vp; 5827 } 5828 5829 /* 5830 * These are helper functions for filesystems to traverse all 5831 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 5832 * 5833 * This interface replaces MNT_VNODE_FOREACH. 5834 */ 5835 5836 MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 5837 5838 struct vnode * 5839 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 5840 { 5841 struct vnode *vp; 5842 5843 if (should_yield()) 5844 kern_yield(PRI_USER); 5845 MNT_ILOCK(mp); 5846 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5847 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 5848 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 5849 /* Allow a racy peek at VI_DOOMED to save a lock acquisition. */ 5850 if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0) 5851 continue; 5852 VI_LOCK(vp); 5853 if ((vp->v_iflag & VI_DOOMED) != 0) { 5854 VI_UNLOCK(vp); 5855 continue; 5856 } 5857 break; 5858 } 5859 if (vp == NULL) { 5860 __mnt_vnode_markerfree_all(mvp, mp); 5861 /* MNT_IUNLOCK(mp); -- done in above function */ 5862 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 5863 return (NULL); 5864 } 5865 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 5866 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 5867 MNT_IUNLOCK(mp); 5868 return (vp); 5869 } 5870 5871 struct vnode * 5872 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 5873 { 5874 struct vnode *vp; 5875 5876 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 5877 MNT_ILOCK(mp); 5878 MNT_REF(mp); 5879 (*mvp)->v_mount = mp; 5880 (*mvp)->v_type = VMARKER; 5881 5882 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 5883 /* Allow a racy peek at VI_DOOMED to save a lock acquisition. */ 5884 if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0) 5885 continue; 5886 VI_LOCK(vp); 5887 if ((vp->v_iflag & VI_DOOMED) != 0) { 5888 VI_UNLOCK(vp); 5889 continue; 5890 } 5891 break; 5892 } 5893 if (vp == NULL) { 5894 MNT_REL(mp); 5895 MNT_IUNLOCK(mp); 5896 free(*mvp, M_VNODE_MARKER); 5897 *mvp = NULL; 5898 return (NULL); 5899 } 5900 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 5901 MNT_IUNLOCK(mp); 5902 return (vp); 5903 } 5904 5905 void 5906 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 5907 { 5908 5909 if (*mvp == NULL) { 5910 MNT_IUNLOCK(mp); 5911 return; 5912 } 5913 5914 mtx_assert(MNT_MTX(mp), MA_OWNED); 5915 5916 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5917 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 5918 MNT_REL(mp); 5919 MNT_IUNLOCK(mp); 5920 free(*mvp, M_VNODE_MARKER); 5921 *mvp = NULL; 5922 } 5923 5924 /* 5925 * These are helper functions for filesystems to traverse their 5926 * active vnodes. See MNT_VNODE_FOREACH_ACTIVE() in sys/mount.h 5927 */ 5928 static void 5929 mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 5930 { 5931 5932 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5933 5934 MNT_ILOCK(mp); 5935 MNT_REL(mp); 5936 MNT_IUNLOCK(mp); 5937 free(*mvp, M_VNODE_MARKER); 5938 *mvp = NULL; 5939 } 5940 5941 /* 5942 * Relock the mp mount vnode list lock with the vp vnode interlock in the 5943 * conventional lock order during mnt_vnode_next_active iteration. 5944 * 5945 * On entry, the mount vnode list lock is held and the vnode interlock is not. 5946 * The list lock is dropped and reacquired. On success, both locks are held. 5947 * On failure, the mount vnode list lock is held but the vnode interlock is 5948 * not, and the procedure may have yielded. 5949 */ 5950 static bool 5951 mnt_vnode_next_active_relock(struct vnode *mvp, struct mount *mp, 5952 struct vnode *vp) 5953 { 5954 const struct vnode *tmp; 5955 bool held, ret; 5956 5957 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 5958 TAILQ_NEXT(mvp, v_actfreelist) != NULL, mvp, 5959 ("%s: bad marker", __func__)); 5960 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 5961 ("%s: inappropriate vnode", __func__)); 5962 ASSERT_VI_UNLOCKED(vp, __func__); 5963 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 5964 5965 ret = false; 5966 5967 TAILQ_REMOVE(&mp->mnt_activevnodelist, mvp, v_actfreelist); 5968 TAILQ_INSERT_BEFORE(vp, mvp, v_actfreelist); 5969 5970 /* 5971 * Use a hold to prevent vp from disappearing while the mount vnode 5972 * list lock is dropped and reacquired. Normally a hold would be 5973 * acquired with vhold(), but that might try to acquire the vnode 5974 * interlock, which would be a LOR with the mount vnode list lock. 5975 */ 5976 held = refcount_acquire_if_not_zero(&vp->v_holdcnt); 5977 mtx_unlock(&mp->mnt_listmtx); 5978 if (!held) 5979 goto abort; 5980 VI_LOCK(vp); 5981 if (!refcount_release_if_not_last(&vp->v_holdcnt)) { 5982 vdropl(vp); 5983 goto abort; 5984 } 5985 mtx_lock(&mp->mnt_listmtx); 5986 5987 /* 5988 * Determine whether the vnode is still the next one after the marker, 5989 * excepting any other markers. If the vnode has not been doomed by 5990 * vgone() then the hold should have ensured that it remained on the 5991 * active list. If it has been doomed but is still on the active list, 5992 * don't abort, but rather skip over it (avoid spinning on doomed 5993 * vnodes). 5994 */ 5995 tmp = mvp; 5996 do { 5997 tmp = TAILQ_NEXT(tmp, v_actfreelist); 5998 } while (tmp != NULL && tmp->v_type == VMARKER); 5999 if (tmp != vp) { 6000 mtx_unlock(&mp->mnt_listmtx); 6001 VI_UNLOCK(vp); 6002 goto abort; 6003 } 6004 6005 ret = true; 6006 goto out; 6007 abort: 6008 maybe_yield(); 6009 mtx_lock(&mp->mnt_listmtx); 6010 out: 6011 if (ret) 6012 ASSERT_VI_LOCKED(vp, __func__); 6013 else 6014 ASSERT_VI_UNLOCKED(vp, __func__); 6015 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6016 return (ret); 6017 } 6018 6019 static struct vnode * 6020 mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 6021 { 6022 struct vnode *vp, *nvp; 6023 6024 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6025 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6026 restart: 6027 vp = TAILQ_NEXT(*mvp, v_actfreelist); 6028 while (vp != NULL) { 6029 if (vp->v_type == VMARKER) { 6030 vp = TAILQ_NEXT(vp, v_actfreelist); 6031 continue; 6032 } 6033 /* 6034 * Try-lock because this is the wrong lock order. If that does 6035 * not succeed, drop the mount vnode list lock and try to 6036 * reacquire it and the vnode interlock in the right order. 6037 */ 6038 if (!VI_TRYLOCK(vp) && 6039 !mnt_vnode_next_active_relock(*mvp, mp, vp)) 6040 goto restart; 6041 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6042 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6043 ("alien vnode on the active list %p %p", vp, mp)); 6044 if (vp->v_mount == mp && (vp->v_iflag & VI_DOOMED) == 0) 6045 break; 6046 nvp = TAILQ_NEXT(vp, v_actfreelist); 6047 VI_UNLOCK(vp); 6048 vp = nvp; 6049 } 6050 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 6051 6052 /* Check if we are done */ 6053 if (vp == NULL) { 6054 mtx_unlock(&mp->mnt_listmtx); 6055 mnt_vnode_markerfree_active(mvp, mp); 6056 return (NULL); 6057 } 6058 TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist); 6059 mtx_unlock(&mp->mnt_listmtx); 6060 ASSERT_VI_LOCKED(vp, "active iter"); 6061 KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp)); 6062 return (vp); 6063 } 6064 6065 struct vnode * 6066 __mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 6067 { 6068 6069 if (should_yield()) 6070 kern_yield(PRI_USER); 6071 mtx_lock(&mp->mnt_listmtx); 6072 return (mnt_vnode_next_active(mvp, mp)); 6073 } 6074 6075 struct vnode * 6076 __mnt_vnode_first_active(struct vnode **mvp, struct mount *mp) 6077 { 6078 struct vnode *vp; 6079 6080 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 6081 MNT_ILOCK(mp); 6082 MNT_REF(mp); 6083 MNT_IUNLOCK(mp); 6084 (*mvp)->v_type = VMARKER; 6085 (*mvp)->v_mount = mp; 6086 6087 mtx_lock(&mp->mnt_listmtx); 6088 vp = TAILQ_FIRST(&mp->mnt_activevnodelist); 6089 if (vp == NULL) { 6090 mtx_unlock(&mp->mnt_listmtx); 6091 mnt_vnode_markerfree_active(mvp, mp); 6092 return (NULL); 6093 } 6094 TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist); 6095 return (mnt_vnode_next_active(mvp, mp)); 6096 } 6097 6098 void 6099 __mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 6100 { 6101 6102 if (*mvp == NULL) 6103 return; 6104 6105 mtx_lock(&mp->mnt_listmtx); 6106 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 6107 mtx_unlock(&mp->mnt_listmtx); 6108 mnt_vnode_markerfree_active(mvp, mp); 6109 } 6110