1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/capsicum.h> 54 #include <sys/condvar.h> 55 #include <sys/conf.h> 56 #include <sys/counter.h> 57 #include <sys/dirent.h> 58 #include <sys/event.h> 59 #include <sys/eventhandler.h> 60 #include <sys/extattr.h> 61 #include <sys/file.h> 62 #include <sys/fcntl.h> 63 #include <sys/jail.h> 64 #include <sys/kdb.h> 65 #include <sys/kernel.h> 66 #include <sys/kthread.h> 67 #include <sys/ktr.h> 68 #include <sys/lockf.h> 69 #include <sys/malloc.h> 70 #include <sys/mount.h> 71 #include <sys/namei.h> 72 #include <sys/pctrie.h> 73 #include <sys/priv.h> 74 #include <sys/reboot.h> 75 #include <sys/refcount.h> 76 #include <sys/rwlock.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smp.h> 80 #include <sys/stat.h> 81 #include <sys/sysctl.h> 82 #include <sys/syslog.h> 83 #include <sys/vmmeter.h> 84 #include <sys/vnode.h> 85 #include <sys/watchdog.h> 86 87 #include <machine/stdarg.h> 88 89 #include <security/mac/mac_framework.h> 90 91 #include <vm/vm.h> 92 #include <vm/vm_object.h> 93 #include <vm/vm_extern.h> 94 #include <vm/pmap.h> 95 #include <vm/vm_map.h> 96 #include <vm/vm_page.h> 97 #include <vm/vm_kern.h> 98 #include <vm/uma.h> 99 100 #ifdef DDB 101 #include <ddb/ddb.h> 102 #endif 103 104 static void delmntque(struct vnode *vp); 105 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 106 int slpflag, int slptimeo); 107 static void syncer_shutdown(void *arg, int howto); 108 static int vtryrecycle(struct vnode *vp); 109 static void v_init_counters(struct vnode *); 110 static void v_incr_devcount(struct vnode *); 111 static void v_decr_devcount(struct vnode *); 112 static void vgonel(struct vnode *); 113 static void vfs_knllock(void *arg); 114 static void vfs_knlunlock(void *arg); 115 static void vfs_knl_assert_locked(void *arg); 116 static void vfs_knl_assert_unlocked(void *arg); 117 static void vnlru_return_batches(struct vfsops *mnt_op); 118 static void destroy_vpollinfo(struct vpollinfo *vi); 119 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 120 daddr_t startlbn, daddr_t endlbn); 121 122 /* 123 * These fences are intended for cases where some synchronization is 124 * needed between access of v_iflags and lockless vnode refcount (v_holdcnt 125 * and v_usecount) updates. Access to v_iflags is generally synchronized 126 * by the interlock, but we have some internal assertions that check vnode 127 * flags without acquiring the lock. Thus, these fences are INVARIANTS-only 128 * for now. 129 */ 130 #ifdef INVARIANTS 131 #define VNODE_REFCOUNT_FENCE_ACQ() atomic_thread_fence_acq() 132 #define VNODE_REFCOUNT_FENCE_REL() atomic_thread_fence_rel() 133 #else 134 #define VNODE_REFCOUNT_FENCE_ACQ() 135 #define VNODE_REFCOUNT_FENCE_REL() 136 #endif 137 138 /* 139 * Number of vnodes in existence. Increased whenever getnewvnode() 140 * allocates a new vnode, decreased in vdropl() for VI_DOOMED vnode. 141 */ 142 static unsigned long numvnodes; 143 144 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 145 "Number of vnodes in existence"); 146 147 static counter_u64_t vnodes_created; 148 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 149 "Number of vnodes created by getnewvnode"); 150 151 static u_long mnt_free_list_batch = 128; 152 SYSCTL_ULONG(_vfs, OID_AUTO, mnt_free_list_batch, CTLFLAG_RW, 153 &mnt_free_list_batch, 0, "Limit of vnodes held on mnt's free list"); 154 155 /* 156 * Conversion tables for conversion from vnode types to inode formats 157 * and back. 158 */ 159 enum vtype iftovt_tab[16] = { 160 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 161 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 162 }; 163 int vttoif_tab[10] = { 164 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 165 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 166 }; 167 168 /* 169 * List of vnodes that are ready for recycling. 170 */ 171 static TAILQ_HEAD(freelst, vnode) vnode_free_list; 172 173 /* 174 * "Free" vnode target. Free vnodes are rarely completely free, but are 175 * just ones that are cheap to recycle. Usually they are for files which 176 * have been stat'd but not read; these usually have inode and namecache 177 * data attached to them. This target is the preferred minimum size of a 178 * sub-cache consisting mostly of such files. The system balances the size 179 * of this sub-cache with its complement to try to prevent either from 180 * thrashing while the other is relatively inactive. The targets express 181 * a preference for the best balance. 182 * 183 * "Above" this target there are 2 further targets (watermarks) related 184 * to recyling of free vnodes. In the best-operating case, the cache is 185 * exactly full, the free list has size between vlowat and vhiwat above the 186 * free target, and recycling from it and normal use maintains this state. 187 * Sometimes the free list is below vlowat or even empty, but this state 188 * is even better for immediate use provided the cache is not full. 189 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 190 * ones) to reach one of these states. The watermarks are currently hard- 191 * coded as 4% and 9% of the available space higher. These and the default 192 * of 25% for wantfreevnodes are too large if the memory size is large. 193 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 194 * whenever vnlru_proc() becomes active. 195 */ 196 static u_long wantfreevnodes; 197 SYSCTL_ULONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, 198 &wantfreevnodes, 0, "Target for minimum number of \"free\" vnodes"); 199 static u_long freevnodes; 200 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 201 &freevnodes, 0, "Number of \"free\" vnodes"); 202 203 static counter_u64_t recycles_count; 204 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 205 "Number of vnodes recycled to meet vnode cache targets"); 206 207 /* 208 * Various variables used for debugging the new implementation of 209 * reassignbuf(). 210 * XXX these are probably of (very) limited utility now. 211 */ 212 static int reassignbufcalls; 213 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW | CTLFLAG_STATS, 214 &reassignbufcalls, 0, "Number of calls to reassignbuf"); 215 216 static counter_u64_t free_owe_inact; 217 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, free_owe_inact, CTLFLAG_RD, &free_owe_inact, 218 "Number of times free vnodes kept on active list due to VFS " 219 "owing inactivation"); 220 221 /* To keep more than one thread at a time from running vfs_getnewfsid */ 222 static struct mtx mntid_mtx; 223 224 /* 225 * Lock for any access to the following: 226 * vnode_free_list 227 * numvnodes 228 * freevnodes 229 */ 230 static struct mtx vnode_free_list_mtx; 231 232 /* Publicly exported FS */ 233 struct nfs_public nfs_pub; 234 235 static uma_zone_t buf_trie_zone; 236 237 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 238 static uma_zone_t vnode_zone; 239 static uma_zone_t vnodepoll_zone; 240 241 /* 242 * The workitem queue. 243 * 244 * It is useful to delay writes of file data and filesystem metadata 245 * for tens of seconds so that quickly created and deleted files need 246 * not waste disk bandwidth being created and removed. To realize this, 247 * we append vnodes to a "workitem" queue. When running with a soft 248 * updates implementation, most pending metadata dependencies should 249 * not wait for more than a few seconds. Thus, mounted on block devices 250 * are delayed only about a half the time that file data is delayed. 251 * Similarly, directory updates are more critical, so are only delayed 252 * about a third the time that file data is delayed. Thus, there are 253 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 254 * one each second (driven off the filesystem syncer process). The 255 * syncer_delayno variable indicates the next queue that is to be processed. 256 * Items that need to be processed soon are placed in this queue: 257 * 258 * syncer_workitem_pending[syncer_delayno] 259 * 260 * A delay of fifteen seconds is done by placing the request fifteen 261 * entries later in the queue: 262 * 263 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 264 * 265 */ 266 static int syncer_delayno; 267 static long syncer_mask; 268 LIST_HEAD(synclist, bufobj); 269 static struct synclist *syncer_workitem_pending; 270 /* 271 * The sync_mtx protects: 272 * bo->bo_synclist 273 * sync_vnode_count 274 * syncer_delayno 275 * syncer_state 276 * syncer_workitem_pending 277 * syncer_worklist_len 278 * rushjob 279 */ 280 static struct mtx sync_mtx; 281 static struct cv sync_wakeup; 282 283 #define SYNCER_MAXDELAY 32 284 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 285 static int syncdelay = 30; /* max time to delay syncing data */ 286 static int filedelay = 30; /* time to delay syncing files */ 287 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 288 "Time to delay syncing files (in seconds)"); 289 static int dirdelay = 29; /* time to delay syncing directories */ 290 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 291 "Time to delay syncing directories (in seconds)"); 292 static int metadelay = 28; /* time to delay syncing metadata */ 293 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 294 "Time to delay syncing metadata (in seconds)"); 295 static int rushjob; /* number of slots to run ASAP */ 296 static int stat_rush_requests; /* number of times I/O speeded up */ 297 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 298 "Number of times I/O speeded up (rush requests)"); 299 300 /* 301 * When shutting down the syncer, run it at four times normal speed. 302 */ 303 #define SYNCER_SHUTDOWN_SPEEDUP 4 304 static int sync_vnode_count; 305 static int syncer_worklist_len; 306 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 307 syncer_state; 308 309 /* Target for maximum number of vnodes. */ 310 int desiredvnodes; 311 static int gapvnodes; /* gap between wanted and desired */ 312 static int vhiwat; /* enough extras after expansion */ 313 static int vlowat; /* minimal extras before expansion */ 314 static int vstir; /* nonzero to stir non-free vnodes */ 315 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 316 317 static int 318 sysctl_update_desiredvnodes(SYSCTL_HANDLER_ARGS) 319 { 320 int error, old_desiredvnodes; 321 322 old_desiredvnodes = desiredvnodes; 323 if ((error = sysctl_handle_int(oidp, arg1, arg2, req)) != 0) 324 return (error); 325 if (old_desiredvnodes != desiredvnodes) { 326 wantfreevnodes = desiredvnodes / 4; 327 /* XXX locking seems to be incomplete. */ 328 vfs_hash_changesize(desiredvnodes); 329 cache_changesize(desiredvnodes); 330 } 331 return (0); 332 } 333 334 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 335 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, &desiredvnodes, 0, 336 sysctl_update_desiredvnodes, "I", "Target for maximum number of vnodes"); 337 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 338 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 339 static int vnlru_nowhere; 340 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 341 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 342 343 static int 344 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 345 { 346 struct vnode *vp; 347 struct nameidata nd; 348 char *buf; 349 unsigned long ndflags; 350 int error; 351 352 if (req->newptr == NULL) 353 return (EINVAL); 354 if (req->newlen >= PATH_MAX) 355 return (E2BIG); 356 357 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 358 error = SYSCTL_IN(req, buf, req->newlen); 359 if (error != 0) 360 goto out; 361 362 buf[req->newlen] = '\0'; 363 364 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1 | NOCACHE | SAVENAME; 365 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf, curthread); 366 if ((error = namei(&nd)) != 0) 367 goto out; 368 vp = nd.ni_vp; 369 370 if ((vp->v_iflag & VI_DOOMED) != 0) { 371 /* 372 * This vnode is being recycled. Return != 0 to let the caller 373 * know that the sysctl had no effect. Return EAGAIN because a 374 * subsequent call will likely succeed (since namei will create 375 * a new vnode if necessary) 376 */ 377 error = EAGAIN; 378 goto putvnode; 379 } 380 381 counter_u64_add(recycles_count, 1); 382 vgone(vp); 383 putvnode: 384 NDFREE(&nd, 0); 385 out: 386 free(buf, M_TEMP); 387 return (error); 388 } 389 390 static int 391 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 392 { 393 struct thread *td = curthread; 394 struct vnode *vp; 395 struct file *fp; 396 int error; 397 int fd; 398 399 if (req->newptr == NULL) 400 return (EBADF); 401 402 error = sysctl_handle_int(oidp, &fd, 0, req); 403 if (error != 0) 404 return (error); 405 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 406 if (error != 0) 407 return (error); 408 vp = fp->f_vnode; 409 410 error = vn_lock(vp, LK_EXCLUSIVE); 411 if (error != 0) 412 goto drop; 413 414 counter_u64_add(recycles_count, 1); 415 vgone(vp); 416 VOP_UNLOCK(vp, 0); 417 drop: 418 fdrop(fp, td); 419 return (error); 420 } 421 422 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 423 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 424 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 425 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 426 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 427 sysctl_ftry_reclaim_vnode, "I", 428 "Try to reclaim a vnode by its file descriptor"); 429 430 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 431 static int vnsz2log; 432 433 /* 434 * Support for the bufobj clean & dirty pctrie. 435 */ 436 static void * 437 buf_trie_alloc(struct pctrie *ptree) 438 { 439 440 return uma_zalloc(buf_trie_zone, M_NOWAIT); 441 } 442 443 static void 444 buf_trie_free(struct pctrie *ptree, void *node) 445 { 446 447 uma_zfree(buf_trie_zone, node); 448 } 449 PCTRIE_DEFINE(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free); 450 451 /* 452 * Initialize the vnode management data structures. 453 * 454 * Reevaluate the following cap on the number of vnodes after the physical 455 * memory size exceeds 512GB. In the limit, as the physical memory size 456 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 457 */ 458 #ifndef MAXVNODES_MAX 459 #define MAXVNODES_MAX (512 * 1024 * 1024 / 64) /* 8M */ 460 #endif 461 462 /* 463 * Initialize a vnode as it first enters the zone. 464 */ 465 static int 466 vnode_init(void *mem, int size, int flags) 467 { 468 struct vnode *vp; 469 470 vp = mem; 471 bzero(vp, size); 472 /* 473 * Setup locks. 474 */ 475 vp->v_vnlock = &vp->v_lock; 476 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 477 /* 478 * By default, don't allow shared locks unless filesystems opt-in. 479 */ 480 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 481 LK_NOSHARE | LK_IS_VNODE); 482 /* 483 * Initialize bufobj. 484 */ 485 bufobj_init(&vp->v_bufobj, vp); 486 /* 487 * Initialize namecache. 488 */ 489 LIST_INIT(&vp->v_cache_src); 490 TAILQ_INIT(&vp->v_cache_dst); 491 /* 492 * Initialize rangelocks. 493 */ 494 rangelock_init(&vp->v_rl); 495 return (0); 496 } 497 498 /* 499 * Free a vnode when it is cleared from the zone. 500 */ 501 static void 502 vnode_fini(void *mem, int size) 503 { 504 struct vnode *vp; 505 struct bufobj *bo; 506 507 vp = mem; 508 rangelock_destroy(&vp->v_rl); 509 lockdestroy(vp->v_vnlock); 510 mtx_destroy(&vp->v_interlock); 511 bo = &vp->v_bufobj; 512 rw_destroy(BO_LOCKPTR(bo)); 513 } 514 515 /* 516 * Provide the size of NFS nclnode and NFS fh for calculation of the 517 * vnode memory consumption. The size is specified directly to 518 * eliminate dependency on NFS-private header. 519 * 520 * Other filesystems may use bigger or smaller (like UFS and ZFS) 521 * private inode data, but the NFS-based estimation is ample enough. 522 * Still, we care about differences in the size between 64- and 32-bit 523 * platforms. 524 * 525 * Namecache structure size is heuristically 526 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 527 */ 528 #ifdef _LP64 529 #define NFS_NCLNODE_SZ (528 + 64) 530 #define NC_SZ 148 531 #else 532 #define NFS_NCLNODE_SZ (360 + 32) 533 #define NC_SZ 92 534 #endif 535 536 static void 537 vntblinit(void *dummy __unused) 538 { 539 u_int i; 540 int physvnodes, virtvnodes; 541 542 /* 543 * Desiredvnodes is a function of the physical memory size and the 544 * kernel's heap size. Generally speaking, it scales with the 545 * physical memory size. The ratio of desiredvnodes to the physical 546 * memory size is 1:16 until desiredvnodes exceeds 98,304. 547 * Thereafter, the 548 * marginal ratio of desiredvnodes to the physical memory size is 549 * 1:64. However, desiredvnodes is limited by the kernel's heap 550 * size. The memory required by desiredvnodes vnodes and vm objects 551 * must not exceed 1/10th of the kernel's heap size. 552 */ 553 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 554 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 555 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 556 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 557 desiredvnodes = min(physvnodes, virtvnodes); 558 if (desiredvnodes > MAXVNODES_MAX) { 559 if (bootverbose) 560 printf("Reducing kern.maxvnodes %d -> %d\n", 561 desiredvnodes, MAXVNODES_MAX); 562 desiredvnodes = MAXVNODES_MAX; 563 } 564 wantfreevnodes = desiredvnodes / 4; 565 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 566 TAILQ_INIT(&vnode_free_list); 567 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 568 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 569 vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); 570 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 571 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 572 /* 573 * Preallocate enough nodes to support one-per buf so that 574 * we can not fail an insert. reassignbuf() callers can not 575 * tolerate the insertion failure. 576 */ 577 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 578 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 579 UMA_ZONE_NOFREE | UMA_ZONE_VM); 580 uma_prealloc(buf_trie_zone, nbuf); 581 582 vnodes_created = counter_u64_alloc(M_WAITOK); 583 recycles_count = counter_u64_alloc(M_WAITOK); 584 free_owe_inact = counter_u64_alloc(M_WAITOK); 585 586 /* 587 * Initialize the filesystem syncer. 588 */ 589 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 590 &syncer_mask); 591 syncer_maxdelay = syncer_mask + 1; 592 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 593 cv_init(&sync_wakeup, "syncer"); 594 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 595 vnsz2log++; 596 vnsz2log--; 597 } 598 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 599 600 601 /* 602 * Mark a mount point as busy. Used to synchronize access and to delay 603 * unmounting. Eventually, mountlist_mtx is not released on failure. 604 * 605 * vfs_busy() is a custom lock, it can block the caller. 606 * vfs_busy() only sleeps if the unmount is active on the mount point. 607 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 608 * vnode belonging to mp. 609 * 610 * Lookup uses vfs_busy() to traverse mount points. 611 * root fs var fs 612 * / vnode lock A / vnode lock (/var) D 613 * /var vnode lock B /log vnode lock(/var/log) E 614 * vfs_busy lock C vfs_busy lock F 615 * 616 * Within each file system, the lock order is C->A->B and F->D->E. 617 * 618 * When traversing across mounts, the system follows that lock order: 619 * 620 * C->A->B 621 * | 622 * +->F->D->E 623 * 624 * The lookup() process for namei("/var") illustrates the process: 625 * VOP_LOOKUP() obtains B while A is held 626 * vfs_busy() obtains a shared lock on F while A and B are held 627 * vput() releases lock on B 628 * vput() releases lock on A 629 * VFS_ROOT() obtains lock on D while shared lock on F is held 630 * vfs_unbusy() releases shared lock on F 631 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 632 * Attempt to lock A (instead of vp_crossmp) while D is held would 633 * violate the global order, causing deadlocks. 634 * 635 * dounmount() locks B while F is drained. 636 */ 637 int 638 vfs_busy(struct mount *mp, int flags) 639 { 640 641 MPASS((flags & ~MBF_MASK) == 0); 642 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 643 644 if (vfs_op_thread_enter(mp)) { 645 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 646 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 647 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 648 vfs_mp_count_add_pcpu(mp, ref, 1); 649 vfs_mp_count_add_pcpu(mp, lockref, 1); 650 vfs_op_thread_exit(mp); 651 if (flags & MBF_MNTLSTLOCK) 652 mtx_unlock(&mountlist_mtx); 653 return (0); 654 } 655 656 MNT_ILOCK(mp); 657 vfs_assert_mount_counters(mp); 658 MNT_REF(mp); 659 /* 660 * If mount point is currently being unmounted, sleep until the 661 * mount point fate is decided. If thread doing the unmounting fails, 662 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 663 * that this mount point has survived the unmount attempt and vfs_busy 664 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 665 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 666 * about to be really destroyed. vfs_busy needs to release its 667 * reference on the mount point in this case and return with ENOENT, 668 * telling the caller that mount mount it tried to busy is no longer 669 * valid. 670 */ 671 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 672 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 673 MNT_REL(mp); 674 MNT_IUNLOCK(mp); 675 CTR1(KTR_VFS, "%s: failed busying before sleeping", 676 __func__); 677 return (ENOENT); 678 } 679 if (flags & MBF_MNTLSTLOCK) 680 mtx_unlock(&mountlist_mtx); 681 mp->mnt_kern_flag |= MNTK_MWAIT; 682 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 683 if (flags & MBF_MNTLSTLOCK) 684 mtx_lock(&mountlist_mtx); 685 MNT_ILOCK(mp); 686 } 687 if (flags & MBF_MNTLSTLOCK) 688 mtx_unlock(&mountlist_mtx); 689 mp->mnt_lockref++; 690 MNT_IUNLOCK(mp); 691 return (0); 692 } 693 694 /* 695 * Free a busy filesystem. 696 */ 697 void 698 vfs_unbusy(struct mount *mp) 699 { 700 int c; 701 702 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 703 704 if (vfs_op_thread_enter(mp)) { 705 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 706 vfs_mp_count_sub_pcpu(mp, lockref, 1); 707 vfs_mp_count_sub_pcpu(mp, ref, 1); 708 vfs_op_thread_exit(mp); 709 return; 710 } 711 712 MNT_ILOCK(mp); 713 vfs_assert_mount_counters(mp); 714 MNT_REL(mp); 715 c = --mp->mnt_lockref; 716 if (mp->mnt_vfs_ops == 0) { 717 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 718 MNT_IUNLOCK(mp); 719 return; 720 } 721 if (c < 0) 722 vfs_dump_mount_counters(mp); 723 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 724 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 725 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 726 mp->mnt_kern_flag &= ~MNTK_DRAINING; 727 wakeup(&mp->mnt_lockref); 728 } 729 MNT_IUNLOCK(mp); 730 } 731 732 /* 733 * Lookup a mount point by filesystem identifier. 734 */ 735 struct mount * 736 vfs_getvfs(fsid_t *fsid) 737 { 738 struct mount *mp; 739 740 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 741 mtx_lock(&mountlist_mtx); 742 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 743 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 744 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 745 vfs_ref(mp); 746 mtx_unlock(&mountlist_mtx); 747 return (mp); 748 } 749 } 750 mtx_unlock(&mountlist_mtx); 751 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 752 return ((struct mount *) 0); 753 } 754 755 /* 756 * Lookup a mount point by filesystem identifier, busying it before 757 * returning. 758 * 759 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 760 * cache for popular filesystem identifiers. The cache is lockess, using 761 * the fact that struct mount's are never freed. In worst case we may 762 * get pointer to unmounted or even different filesystem, so we have to 763 * check what we got, and go slow way if so. 764 */ 765 struct mount * 766 vfs_busyfs(fsid_t *fsid) 767 { 768 #define FSID_CACHE_SIZE 256 769 typedef struct mount * volatile vmp_t; 770 static vmp_t cache[FSID_CACHE_SIZE]; 771 struct mount *mp; 772 int error; 773 uint32_t hash; 774 775 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 776 hash = fsid->val[0] ^ fsid->val[1]; 777 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 778 mp = cache[hash]; 779 if (mp == NULL || 780 mp->mnt_stat.f_fsid.val[0] != fsid->val[0] || 781 mp->mnt_stat.f_fsid.val[1] != fsid->val[1]) 782 goto slow; 783 if (vfs_busy(mp, 0) != 0) { 784 cache[hash] = NULL; 785 goto slow; 786 } 787 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 788 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) 789 return (mp); 790 else 791 vfs_unbusy(mp); 792 793 slow: 794 mtx_lock(&mountlist_mtx); 795 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 796 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 797 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 798 error = vfs_busy(mp, MBF_MNTLSTLOCK); 799 if (error) { 800 cache[hash] = NULL; 801 mtx_unlock(&mountlist_mtx); 802 return (NULL); 803 } 804 cache[hash] = mp; 805 return (mp); 806 } 807 } 808 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 809 mtx_unlock(&mountlist_mtx); 810 return ((struct mount *) 0); 811 } 812 813 /* 814 * Check if a user can access privileged mount options. 815 */ 816 int 817 vfs_suser(struct mount *mp, struct thread *td) 818 { 819 int error; 820 821 if (jailed(td->td_ucred)) { 822 /* 823 * If the jail of the calling thread lacks permission for 824 * this type of file system, deny immediately. 825 */ 826 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 827 return (EPERM); 828 829 /* 830 * If the file system was mounted outside the jail of the 831 * calling thread, deny immediately. 832 */ 833 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 834 return (EPERM); 835 } 836 837 /* 838 * If file system supports delegated administration, we don't check 839 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 840 * by the file system itself. 841 * If this is not the user that did original mount, we check for 842 * the PRIV_VFS_MOUNT_OWNER privilege. 843 */ 844 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 845 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 846 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 847 return (error); 848 } 849 return (0); 850 } 851 852 /* 853 * Get a new unique fsid. Try to make its val[0] unique, since this value 854 * will be used to create fake device numbers for stat(). Also try (but 855 * not so hard) make its val[0] unique mod 2^16, since some emulators only 856 * support 16-bit device numbers. We end up with unique val[0]'s for the 857 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 858 * 859 * Keep in mind that several mounts may be running in parallel. Starting 860 * the search one past where the previous search terminated is both a 861 * micro-optimization and a defense against returning the same fsid to 862 * different mounts. 863 */ 864 void 865 vfs_getnewfsid(struct mount *mp) 866 { 867 static uint16_t mntid_base; 868 struct mount *nmp; 869 fsid_t tfsid; 870 int mtype; 871 872 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 873 mtx_lock(&mntid_mtx); 874 mtype = mp->mnt_vfc->vfc_typenum; 875 tfsid.val[1] = mtype; 876 mtype = (mtype & 0xFF) << 24; 877 for (;;) { 878 tfsid.val[0] = makedev(255, 879 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 880 mntid_base++; 881 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 882 break; 883 vfs_rel(nmp); 884 } 885 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 886 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 887 mtx_unlock(&mntid_mtx); 888 } 889 890 /* 891 * Knob to control the precision of file timestamps: 892 * 893 * 0 = seconds only; nanoseconds zeroed. 894 * 1 = seconds and nanoseconds, accurate within 1/HZ. 895 * 2 = seconds and nanoseconds, truncated to microseconds. 896 * >=3 = seconds and nanoseconds, maximum precision. 897 */ 898 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 899 900 static int timestamp_precision = TSP_USEC; 901 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 902 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 903 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 904 "3+: sec + ns (max. precision))"); 905 906 /* 907 * Get a current timestamp. 908 */ 909 void 910 vfs_timestamp(struct timespec *tsp) 911 { 912 struct timeval tv; 913 914 switch (timestamp_precision) { 915 case TSP_SEC: 916 tsp->tv_sec = time_second; 917 tsp->tv_nsec = 0; 918 break; 919 case TSP_HZ: 920 getnanotime(tsp); 921 break; 922 case TSP_USEC: 923 microtime(&tv); 924 TIMEVAL_TO_TIMESPEC(&tv, tsp); 925 break; 926 case TSP_NSEC: 927 default: 928 nanotime(tsp); 929 break; 930 } 931 } 932 933 /* 934 * Set vnode attributes to VNOVAL 935 */ 936 void 937 vattr_null(struct vattr *vap) 938 { 939 940 vap->va_type = VNON; 941 vap->va_size = VNOVAL; 942 vap->va_bytes = VNOVAL; 943 vap->va_mode = VNOVAL; 944 vap->va_nlink = VNOVAL; 945 vap->va_uid = VNOVAL; 946 vap->va_gid = VNOVAL; 947 vap->va_fsid = VNOVAL; 948 vap->va_fileid = VNOVAL; 949 vap->va_blocksize = VNOVAL; 950 vap->va_rdev = VNOVAL; 951 vap->va_atime.tv_sec = VNOVAL; 952 vap->va_atime.tv_nsec = VNOVAL; 953 vap->va_mtime.tv_sec = VNOVAL; 954 vap->va_mtime.tv_nsec = VNOVAL; 955 vap->va_ctime.tv_sec = VNOVAL; 956 vap->va_ctime.tv_nsec = VNOVAL; 957 vap->va_birthtime.tv_sec = VNOVAL; 958 vap->va_birthtime.tv_nsec = VNOVAL; 959 vap->va_flags = VNOVAL; 960 vap->va_gen = VNOVAL; 961 vap->va_vaflags = 0; 962 } 963 964 /* 965 * This routine is called when we have too many vnodes. It attempts 966 * to free <count> vnodes and will potentially free vnodes that still 967 * have VM backing store (VM backing store is typically the cause 968 * of a vnode blowout so we want to do this). Therefore, this operation 969 * is not considered cheap. 970 * 971 * A number of conditions may prevent a vnode from being reclaimed. 972 * the buffer cache may have references on the vnode, a directory 973 * vnode may still have references due to the namei cache representing 974 * underlying files, or the vnode may be in active use. It is not 975 * desirable to reuse such vnodes. These conditions may cause the 976 * number of vnodes to reach some minimum value regardless of what 977 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 978 * 979 * @param mp Try to reclaim vnodes from this mountpoint 980 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 981 * entries if this argument is strue 982 * @param trigger Only reclaim vnodes with fewer than this many resident 983 * pages. 984 * @return The number of vnodes that were reclaimed. 985 */ 986 static int 987 vlrureclaim(struct mount *mp, bool reclaim_nc_src, int trigger) 988 { 989 struct vnode *vp; 990 int count, done, target; 991 992 done = 0; 993 vn_start_write(NULL, &mp, V_WAIT); 994 MNT_ILOCK(mp); 995 count = mp->mnt_nvnodelistsize; 996 target = count * (int64_t)gapvnodes / imax(desiredvnodes, 1); 997 target = target / 10 + 1; 998 while (count != 0 && done < target) { 999 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 1000 while (vp != NULL && vp->v_type == VMARKER) 1001 vp = TAILQ_NEXT(vp, v_nmntvnodes); 1002 if (vp == NULL) 1003 break; 1004 /* 1005 * XXX LRU is completely broken for non-free vnodes. First 1006 * by calling here in mountpoint order, then by moving 1007 * unselected vnodes to the end here, and most grossly by 1008 * removing the vlruvp() function that was supposed to 1009 * maintain the order. (This function was born broken 1010 * since syncer problems prevented it doing anything.) The 1011 * order is closer to LRC (C = Created). 1012 * 1013 * LRU reclaiming of vnodes seems to have last worked in 1014 * FreeBSD-3 where LRU wasn't mentioned under any spelling. 1015 * Then there was no hold count, and inactive vnodes were 1016 * simply put on the free list in LRU order. The separate 1017 * lists also break LRU. We prefer to reclaim from the 1018 * free list for technical reasons. This tends to thrash 1019 * the free list to keep very unrecently used held vnodes. 1020 * The problem is mitigated by keeping the free list large. 1021 */ 1022 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1023 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1024 --count; 1025 if (!VI_TRYLOCK(vp)) 1026 goto next_iter; 1027 /* 1028 * If it's been deconstructed already, it's still 1029 * referenced, or it exceeds the trigger, skip it. 1030 * Also skip free vnodes. We are trying to make space 1031 * to expand the free list, not reduce it. 1032 */ 1033 if (vp->v_usecount || 1034 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1035 ((vp->v_iflag & VI_FREE) != 0) || 1036 (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL && 1037 vp->v_object->resident_page_count > trigger)) { 1038 VI_UNLOCK(vp); 1039 goto next_iter; 1040 } 1041 MNT_IUNLOCK(mp); 1042 vholdl(vp); 1043 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) { 1044 vdrop(vp); 1045 goto next_iter_mntunlocked; 1046 } 1047 VI_LOCK(vp); 1048 /* 1049 * v_usecount may have been bumped after VOP_LOCK() dropped 1050 * the vnode interlock and before it was locked again. 1051 * 1052 * It is not necessary to recheck VI_DOOMED because it can 1053 * only be set by another thread that holds both the vnode 1054 * lock and vnode interlock. If another thread has the 1055 * vnode lock before we get to VOP_LOCK() and obtains the 1056 * vnode interlock after VOP_LOCK() drops the vnode 1057 * interlock, the other thread will be unable to drop the 1058 * vnode lock before our VOP_LOCK() call fails. 1059 */ 1060 if (vp->v_usecount || 1061 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1062 (vp->v_iflag & VI_FREE) != 0 || 1063 (vp->v_object != NULL && 1064 vp->v_object->resident_page_count > trigger)) { 1065 VOP_UNLOCK(vp, 0); 1066 vdropl(vp); 1067 goto next_iter_mntunlocked; 1068 } 1069 KASSERT((vp->v_iflag & VI_DOOMED) == 0, 1070 ("VI_DOOMED unexpectedly detected in vlrureclaim()")); 1071 counter_u64_add(recycles_count, 1); 1072 vgonel(vp); 1073 VOP_UNLOCK(vp, 0); 1074 vdropl(vp); 1075 done++; 1076 next_iter_mntunlocked: 1077 if (!should_yield()) 1078 goto relock_mnt; 1079 goto yield; 1080 next_iter: 1081 if (!should_yield()) 1082 continue; 1083 MNT_IUNLOCK(mp); 1084 yield: 1085 kern_yield(PRI_USER); 1086 relock_mnt: 1087 MNT_ILOCK(mp); 1088 } 1089 MNT_IUNLOCK(mp); 1090 vn_finished_write(mp); 1091 return done; 1092 } 1093 1094 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1095 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1096 0, 1097 "limit on vnode free requests per call to the vnlru_free routine"); 1098 1099 /* 1100 * Attempt to reduce the free list by the requested amount. 1101 */ 1102 static void 1103 vnlru_free_locked(int count, struct vfsops *mnt_op) 1104 { 1105 struct vnode *vp; 1106 struct mount *mp; 1107 bool tried_batches; 1108 1109 tried_batches = false; 1110 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 1111 if (count > max_vnlru_free) 1112 count = max_vnlru_free; 1113 for (; count > 0; count--) { 1114 vp = TAILQ_FIRST(&vnode_free_list); 1115 /* 1116 * The list can be modified while the free_list_mtx 1117 * has been dropped and vp could be NULL here. 1118 */ 1119 if (vp == NULL) { 1120 if (tried_batches) 1121 break; 1122 mtx_unlock(&vnode_free_list_mtx); 1123 vnlru_return_batches(mnt_op); 1124 tried_batches = true; 1125 mtx_lock(&vnode_free_list_mtx); 1126 continue; 1127 } 1128 1129 VNASSERT(vp->v_op != NULL, vp, 1130 ("vnlru_free: vnode already reclaimed.")); 1131 KASSERT((vp->v_iflag & VI_FREE) != 0, 1132 ("Removing vnode not on freelist")); 1133 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 1134 ("Mangling active vnode")); 1135 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 1136 1137 /* 1138 * Don't recycle if our vnode is from different type 1139 * of mount point. Note that mp is type-safe, the 1140 * check does not reach unmapped address even if 1141 * vnode is reclaimed. 1142 * Don't recycle if we can't get the interlock without 1143 * blocking. 1144 */ 1145 if ((mnt_op != NULL && (mp = vp->v_mount) != NULL && 1146 mp->mnt_op != mnt_op) || !VI_TRYLOCK(vp)) { 1147 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist); 1148 continue; 1149 } 1150 VNASSERT((vp->v_iflag & VI_FREE) != 0 && vp->v_holdcnt == 0, 1151 vp, ("vp inconsistent on freelist")); 1152 1153 /* 1154 * The clear of VI_FREE prevents activation of the 1155 * vnode. There is no sense in putting the vnode on 1156 * the mount point active list, only to remove it 1157 * later during recycling. Inline the relevant part 1158 * of vholdl(), to avoid triggering assertions or 1159 * activating. 1160 */ 1161 freevnodes--; 1162 vp->v_iflag &= ~VI_FREE; 1163 VNODE_REFCOUNT_FENCE_REL(); 1164 refcount_acquire(&vp->v_holdcnt); 1165 1166 mtx_unlock(&vnode_free_list_mtx); 1167 VI_UNLOCK(vp); 1168 vtryrecycle(vp); 1169 /* 1170 * If the recycled succeeded this vdrop will actually free 1171 * the vnode. If not it will simply place it back on 1172 * the free list. 1173 */ 1174 vdrop(vp); 1175 mtx_lock(&vnode_free_list_mtx); 1176 } 1177 } 1178 1179 void 1180 vnlru_free(int count, struct vfsops *mnt_op) 1181 { 1182 1183 mtx_lock(&vnode_free_list_mtx); 1184 vnlru_free_locked(count, mnt_op); 1185 mtx_unlock(&vnode_free_list_mtx); 1186 } 1187 1188 1189 /* XXX some names and initialization are bad for limits and watermarks. */ 1190 static int 1191 vspace(void) 1192 { 1193 int space; 1194 1195 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1196 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1197 vlowat = vhiwat / 2; 1198 if (numvnodes > desiredvnodes) 1199 return (0); 1200 space = desiredvnodes - numvnodes; 1201 if (freevnodes > wantfreevnodes) 1202 space += freevnodes - wantfreevnodes; 1203 return (space); 1204 } 1205 1206 static void 1207 vnlru_return_batch_locked(struct mount *mp) 1208 { 1209 struct vnode *vp; 1210 1211 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 1212 1213 if (mp->mnt_tmpfreevnodelistsize == 0) 1214 return; 1215 1216 TAILQ_FOREACH(vp, &mp->mnt_tmpfreevnodelist, v_actfreelist) { 1217 VNASSERT((vp->v_mflag & VMP_TMPMNTFREELIST) != 0, vp, 1218 ("vnode without VMP_TMPMNTFREELIST on mnt_tmpfreevnodelist")); 1219 vp->v_mflag &= ~VMP_TMPMNTFREELIST; 1220 } 1221 mtx_lock(&vnode_free_list_mtx); 1222 TAILQ_CONCAT(&vnode_free_list, &mp->mnt_tmpfreevnodelist, v_actfreelist); 1223 freevnodes += mp->mnt_tmpfreevnodelistsize; 1224 mtx_unlock(&vnode_free_list_mtx); 1225 mp->mnt_tmpfreevnodelistsize = 0; 1226 } 1227 1228 static void 1229 vnlru_return_batch(struct mount *mp) 1230 { 1231 1232 mtx_lock(&mp->mnt_listmtx); 1233 vnlru_return_batch_locked(mp); 1234 mtx_unlock(&mp->mnt_listmtx); 1235 } 1236 1237 static void 1238 vnlru_return_batches(struct vfsops *mnt_op) 1239 { 1240 struct mount *mp, *nmp; 1241 bool need_unbusy; 1242 1243 mtx_lock(&mountlist_mtx); 1244 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 1245 need_unbusy = false; 1246 if (mnt_op != NULL && mp->mnt_op != mnt_op) 1247 goto next; 1248 if (mp->mnt_tmpfreevnodelistsize == 0) 1249 goto next; 1250 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK) == 0) { 1251 vnlru_return_batch(mp); 1252 need_unbusy = true; 1253 mtx_lock(&mountlist_mtx); 1254 } 1255 next: 1256 nmp = TAILQ_NEXT(mp, mnt_list); 1257 if (need_unbusy) 1258 vfs_unbusy(mp); 1259 } 1260 mtx_unlock(&mountlist_mtx); 1261 } 1262 1263 /* 1264 * Attempt to recycle vnodes in a context that is always safe to block. 1265 * Calling vlrurecycle() from the bowels of filesystem code has some 1266 * interesting deadlock problems. 1267 */ 1268 static struct proc *vnlruproc; 1269 static int vnlruproc_sig; 1270 1271 static void 1272 vnlru_proc(void) 1273 { 1274 struct mount *mp, *nmp; 1275 unsigned long onumvnodes; 1276 int done, force, trigger, usevnodes; 1277 bool reclaim_nc_src; 1278 1279 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1280 SHUTDOWN_PRI_FIRST); 1281 1282 force = 0; 1283 for (;;) { 1284 kproc_suspend_check(vnlruproc); 1285 mtx_lock(&vnode_free_list_mtx); 1286 /* 1287 * If numvnodes is too large (due to desiredvnodes being 1288 * adjusted using its sysctl, or emergency growth), first 1289 * try to reduce it by discarding from the free list. 1290 */ 1291 if (numvnodes > desiredvnodes) 1292 vnlru_free_locked(numvnodes - desiredvnodes, NULL); 1293 /* 1294 * Sleep if the vnode cache is in a good state. This is 1295 * when it is not over-full and has space for about a 4% 1296 * or 9% expansion (by growing its size or inexcessively 1297 * reducing its free list). Otherwise, try to reclaim 1298 * space for a 10% expansion. 1299 */ 1300 if (vstir && force == 0) { 1301 force = 1; 1302 vstir = 0; 1303 } 1304 if (vspace() >= vlowat && force == 0) { 1305 vnlruproc_sig = 0; 1306 wakeup(&vnlruproc_sig); 1307 msleep(vnlruproc, &vnode_free_list_mtx, 1308 PVFS|PDROP, "vlruwt", hz); 1309 continue; 1310 } 1311 mtx_unlock(&vnode_free_list_mtx); 1312 done = 0; 1313 onumvnodes = numvnodes; 1314 /* 1315 * Calculate parameters for recycling. These are the same 1316 * throughout the loop to give some semblance of fairness. 1317 * The trigger point is to avoid recycling vnodes with lots 1318 * of resident pages. We aren't trying to free memory; we 1319 * are trying to recycle or at least free vnodes. 1320 */ 1321 if (numvnodes <= desiredvnodes) 1322 usevnodes = numvnodes - freevnodes; 1323 else 1324 usevnodes = numvnodes; 1325 if (usevnodes <= 0) 1326 usevnodes = 1; 1327 /* 1328 * The trigger value is is chosen to give a conservatively 1329 * large value to ensure that it alone doesn't prevent 1330 * making progress. The value can easily be so large that 1331 * it is effectively infinite in some congested and 1332 * misconfigured cases, and this is necessary. Normally 1333 * it is about 8 to 100 (pages), which is quite large. 1334 */ 1335 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1336 if (force < 2) 1337 trigger = vsmalltrigger; 1338 reclaim_nc_src = force >= 3; 1339 mtx_lock(&mountlist_mtx); 1340 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 1341 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) { 1342 nmp = TAILQ_NEXT(mp, mnt_list); 1343 continue; 1344 } 1345 done += vlrureclaim(mp, reclaim_nc_src, trigger); 1346 mtx_lock(&mountlist_mtx); 1347 nmp = TAILQ_NEXT(mp, mnt_list); 1348 vfs_unbusy(mp); 1349 } 1350 mtx_unlock(&mountlist_mtx); 1351 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1352 uma_reclaim(UMA_RECLAIM_DRAIN); 1353 if (done == 0) { 1354 if (force == 0 || force == 1) { 1355 force = 2; 1356 continue; 1357 } 1358 if (force == 2) { 1359 force = 3; 1360 continue; 1361 } 1362 force = 0; 1363 vnlru_nowhere++; 1364 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1365 } else 1366 kern_yield(PRI_USER); 1367 /* 1368 * After becoming active to expand above low water, keep 1369 * active until above high water. 1370 */ 1371 force = vspace() < vhiwat; 1372 } 1373 } 1374 1375 static struct kproc_desc vnlru_kp = { 1376 "vnlru", 1377 vnlru_proc, 1378 &vnlruproc 1379 }; 1380 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1381 &vnlru_kp); 1382 1383 /* 1384 * Routines having to do with the management of the vnode table. 1385 */ 1386 1387 /* 1388 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1389 * before we actually vgone(). This function must be called with the vnode 1390 * held to prevent the vnode from being returned to the free list midway 1391 * through vgone(). 1392 */ 1393 static int 1394 vtryrecycle(struct vnode *vp) 1395 { 1396 struct mount *vnmp; 1397 1398 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1399 VNASSERT(vp->v_holdcnt, vp, 1400 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1401 /* 1402 * This vnode may found and locked via some other list, if so we 1403 * can't recycle it yet. 1404 */ 1405 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1406 CTR2(KTR_VFS, 1407 "%s: impossible to recycle, vp %p lock is already held", 1408 __func__, vp); 1409 return (EWOULDBLOCK); 1410 } 1411 /* 1412 * Don't recycle if its filesystem is being suspended. 1413 */ 1414 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1415 VOP_UNLOCK(vp, 0); 1416 CTR2(KTR_VFS, 1417 "%s: impossible to recycle, cannot start the write for %p", 1418 __func__, vp); 1419 return (EBUSY); 1420 } 1421 /* 1422 * If we got this far, we need to acquire the interlock and see if 1423 * anyone picked up this vnode from another list. If not, we will 1424 * mark it with DOOMED via vgonel() so that anyone who does find it 1425 * will skip over it. 1426 */ 1427 VI_LOCK(vp); 1428 if (vp->v_usecount) { 1429 VOP_UNLOCK(vp, 0); 1430 VI_UNLOCK(vp); 1431 vn_finished_write(vnmp); 1432 CTR2(KTR_VFS, 1433 "%s: impossible to recycle, %p is already referenced", 1434 __func__, vp); 1435 return (EBUSY); 1436 } 1437 if ((vp->v_iflag & VI_DOOMED) == 0) { 1438 counter_u64_add(recycles_count, 1); 1439 vgonel(vp); 1440 } 1441 VOP_UNLOCK(vp, 0); 1442 VI_UNLOCK(vp); 1443 vn_finished_write(vnmp); 1444 return (0); 1445 } 1446 1447 static void 1448 vcheckspace(void) 1449 { 1450 1451 if (vspace() < vlowat && vnlruproc_sig == 0) { 1452 vnlruproc_sig = 1; 1453 wakeup(vnlruproc); 1454 } 1455 } 1456 1457 /* 1458 * Wait if necessary for space for a new vnode. 1459 */ 1460 static int 1461 getnewvnode_wait(int suspended) 1462 { 1463 1464 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 1465 if (numvnodes >= desiredvnodes) { 1466 if (suspended) { 1467 /* 1468 * The file system is being suspended. We cannot 1469 * risk a deadlock here, so allow allocation of 1470 * another vnode even if this would give too many. 1471 */ 1472 return (0); 1473 } 1474 if (vnlruproc_sig == 0) { 1475 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 1476 wakeup(vnlruproc); 1477 } 1478 msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS, 1479 "vlruwk", hz); 1480 } 1481 /* Post-adjust like the pre-adjust in getnewvnode(). */ 1482 if (numvnodes + 1 > desiredvnodes && freevnodes > 1) 1483 vnlru_free_locked(1, NULL); 1484 return (numvnodes >= desiredvnodes ? ENFILE : 0); 1485 } 1486 1487 /* 1488 * This hack is fragile, and probably not needed any more now that the 1489 * watermark handling works. 1490 */ 1491 void 1492 getnewvnode_reserve(u_int count) 1493 { 1494 struct thread *td; 1495 1496 /* Pre-adjust like the pre-adjust in getnewvnode(), with any count. */ 1497 /* XXX no longer so quick, but this part is not racy. */ 1498 mtx_lock(&vnode_free_list_mtx); 1499 if (numvnodes + count > desiredvnodes && freevnodes > wantfreevnodes) 1500 vnlru_free_locked(ulmin(numvnodes + count - desiredvnodes, 1501 freevnodes - wantfreevnodes), NULL); 1502 mtx_unlock(&vnode_free_list_mtx); 1503 1504 td = curthread; 1505 /* First try to be quick and racy. */ 1506 if (atomic_fetchadd_long(&numvnodes, count) + count <= desiredvnodes) { 1507 td->td_vp_reserv += count; 1508 vcheckspace(); /* XXX no longer so quick, but more racy */ 1509 return; 1510 } else 1511 atomic_subtract_long(&numvnodes, count); 1512 1513 mtx_lock(&vnode_free_list_mtx); 1514 while (count > 0) { 1515 if (getnewvnode_wait(0) == 0) { 1516 count--; 1517 td->td_vp_reserv++; 1518 atomic_add_long(&numvnodes, 1); 1519 } 1520 } 1521 vcheckspace(); 1522 mtx_unlock(&vnode_free_list_mtx); 1523 } 1524 1525 /* 1526 * This hack is fragile, especially if desiredvnodes or wantvnodes are 1527 * misconfgured or changed significantly. Reducing desiredvnodes below 1528 * the reserved amount should cause bizarre behaviour like reducing it 1529 * below the number of active vnodes -- the system will try to reduce 1530 * numvnodes to match, but should fail, so the subtraction below should 1531 * not overflow. 1532 */ 1533 void 1534 getnewvnode_drop_reserve(void) 1535 { 1536 struct thread *td; 1537 1538 td = curthread; 1539 atomic_subtract_long(&numvnodes, td->td_vp_reserv); 1540 td->td_vp_reserv = 0; 1541 } 1542 1543 /* 1544 * Return the next vnode from the free list. 1545 */ 1546 int 1547 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1548 struct vnode **vpp) 1549 { 1550 struct vnode *vp; 1551 struct thread *td; 1552 struct lock_object *lo; 1553 static int cyclecount; 1554 int error __unused; 1555 1556 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1557 vp = NULL; 1558 td = curthread; 1559 if (td->td_vp_reserv > 0) { 1560 td->td_vp_reserv -= 1; 1561 goto alloc; 1562 } 1563 mtx_lock(&vnode_free_list_mtx); 1564 if (numvnodes < desiredvnodes) 1565 cyclecount = 0; 1566 else if (cyclecount++ >= freevnodes) { 1567 cyclecount = 0; 1568 vstir = 1; 1569 } 1570 /* 1571 * Grow the vnode cache if it will not be above its target max 1572 * after growing. Otherwise, if the free list is nonempty, try 1573 * to reclaim 1 item from it before growing the cache (possibly 1574 * above its target max if the reclamation failed or is delayed). 1575 * Otherwise, wait for some space. In all cases, schedule 1576 * vnlru_proc() if we are getting short of space. The watermarks 1577 * should be chosen so that we never wait or even reclaim from 1578 * the free list to below its target minimum. 1579 */ 1580 if (numvnodes + 1 <= desiredvnodes) 1581 ; 1582 else if (freevnodes > 0) 1583 vnlru_free_locked(1, NULL); 1584 else { 1585 error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag & 1586 MNTK_SUSPEND)); 1587 #if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */ 1588 if (error != 0) { 1589 mtx_unlock(&vnode_free_list_mtx); 1590 return (error); 1591 } 1592 #endif 1593 } 1594 vcheckspace(); 1595 atomic_add_long(&numvnodes, 1); 1596 mtx_unlock(&vnode_free_list_mtx); 1597 alloc: 1598 counter_u64_add(vnodes_created, 1); 1599 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK); 1600 /* 1601 * Locks are given the generic name "vnode" when created. 1602 * Follow the historic practice of using the filesystem 1603 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1604 * 1605 * Locks live in a witness group keyed on their name. Thus, 1606 * when a lock is renamed, it must also move from the witness 1607 * group of its old name to the witness group of its new name. 1608 * 1609 * The change only needs to be made when the vnode moves 1610 * from one filesystem type to another. We ensure that each 1611 * filesystem use a single static name pointer for its tag so 1612 * that we can compare pointers rather than doing a strcmp(). 1613 */ 1614 lo = &vp->v_vnlock->lock_object; 1615 if (lo->lo_name != tag) { 1616 lo->lo_name = tag; 1617 WITNESS_DESTROY(lo); 1618 WITNESS_INIT(lo, tag); 1619 } 1620 /* 1621 * By default, don't allow shared locks unless filesystems opt-in. 1622 */ 1623 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1624 /* 1625 * Finalize various vnode identity bits. 1626 */ 1627 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1628 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1629 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1630 vp->v_type = VNON; 1631 vp->v_tag = tag; 1632 vp->v_op = vops; 1633 v_init_counters(vp); 1634 vp->v_bufobj.bo_ops = &buf_ops_bio; 1635 #ifdef DIAGNOSTIC 1636 if (mp == NULL && vops != &dead_vnodeops) 1637 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1638 #endif 1639 #ifdef MAC 1640 mac_vnode_init(vp); 1641 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1642 mac_vnode_associate_singlelabel(mp, vp); 1643 #endif 1644 if (mp != NULL) { 1645 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1646 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1647 vp->v_vflag |= VV_NOKNOTE; 1648 } 1649 1650 /* 1651 * For the filesystems which do not use vfs_hash_insert(), 1652 * still initialize v_hash to have vfs_hash_index() useful. 1653 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1654 * its own hashing. 1655 */ 1656 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1657 1658 *vpp = vp; 1659 return (0); 1660 } 1661 1662 /* 1663 * Delete from old mount point vnode list, if on one. 1664 */ 1665 static void 1666 delmntque(struct vnode *vp) 1667 { 1668 struct mount *mp; 1669 int active; 1670 1671 mp = vp->v_mount; 1672 if (mp == NULL) 1673 return; 1674 MNT_ILOCK(mp); 1675 VI_LOCK(vp); 1676 KASSERT(mp->mnt_activevnodelistsize <= mp->mnt_nvnodelistsize, 1677 ("Active vnode list size %d > Vnode list size %d", 1678 mp->mnt_activevnodelistsize, mp->mnt_nvnodelistsize)); 1679 active = vp->v_iflag & VI_ACTIVE; 1680 vp->v_iflag &= ~VI_ACTIVE; 1681 if (active) { 1682 mtx_lock(&mp->mnt_listmtx); 1683 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); 1684 mp->mnt_activevnodelistsize--; 1685 mtx_unlock(&mp->mnt_listmtx); 1686 } 1687 vp->v_mount = NULL; 1688 VI_UNLOCK(vp); 1689 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1690 ("bad mount point vnode list size")); 1691 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1692 mp->mnt_nvnodelistsize--; 1693 MNT_REL(mp); 1694 MNT_IUNLOCK(mp); 1695 } 1696 1697 static void 1698 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1699 { 1700 1701 vp->v_data = NULL; 1702 vp->v_op = &dead_vnodeops; 1703 vgone(vp); 1704 vput(vp); 1705 } 1706 1707 /* 1708 * Insert into list of vnodes for the new mount point, if available. 1709 */ 1710 int 1711 insmntque1(struct vnode *vp, struct mount *mp, 1712 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1713 { 1714 1715 KASSERT(vp->v_mount == NULL, 1716 ("insmntque: vnode already on per mount vnode list")); 1717 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1718 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1719 1720 /* 1721 * We acquire the vnode interlock early to ensure that the 1722 * vnode cannot be recycled by another process releasing a 1723 * holdcnt on it before we get it on both the vnode list 1724 * and the active vnode list. The mount mutex protects only 1725 * manipulation of the vnode list and the vnode freelist 1726 * mutex protects only manipulation of the active vnode list. 1727 * Hence the need to hold the vnode interlock throughout. 1728 */ 1729 MNT_ILOCK(mp); 1730 VI_LOCK(vp); 1731 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 1732 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1733 mp->mnt_nvnodelistsize == 0)) && 1734 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1735 VI_UNLOCK(vp); 1736 MNT_IUNLOCK(mp); 1737 if (dtr != NULL) 1738 dtr(vp, dtr_arg); 1739 return (EBUSY); 1740 } 1741 vp->v_mount = mp; 1742 MNT_REF(mp); 1743 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1744 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1745 ("neg mount point vnode list size")); 1746 mp->mnt_nvnodelistsize++; 1747 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 1748 ("Activating already active vnode")); 1749 vp->v_iflag |= VI_ACTIVE; 1750 mtx_lock(&mp->mnt_listmtx); 1751 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 1752 mp->mnt_activevnodelistsize++; 1753 mtx_unlock(&mp->mnt_listmtx); 1754 VI_UNLOCK(vp); 1755 MNT_IUNLOCK(mp); 1756 return (0); 1757 } 1758 1759 int 1760 insmntque(struct vnode *vp, struct mount *mp) 1761 { 1762 1763 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1764 } 1765 1766 /* 1767 * Flush out and invalidate all buffers associated with a bufobj 1768 * Called with the underlying object locked. 1769 */ 1770 int 1771 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1772 { 1773 int error; 1774 1775 BO_LOCK(bo); 1776 if (flags & V_SAVE) { 1777 error = bufobj_wwait(bo, slpflag, slptimeo); 1778 if (error) { 1779 BO_UNLOCK(bo); 1780 return (error); 1781 } 1782 if (bo->bo_dirty.bv_cnt > 0) { 1783 BO_UNLOCK(bo); 1784 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1785 return (error); 1786 /* 1787 * XXX We could save a lock/unlock if this was only 1788 * enabled under INVARIANTS 1789 */ 1790 BO_LOCK(bo); 1791 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1792 panic("vinvalbuf: dirty bufs"); 1793 } 1794 } 1795 /* 1796 * If you alter this loop please notice that interlock is dropped and 1797 * reacquired in flushbuflist. Special care is needed to ensure that 1798 * no race conditions occur from this. 1799 */ 1800 do { 1801 error = flushbuflist(&bo->bo_clean, 1802 flags, bo, slpflag, slptimeo); 1803 if (error == 0 && !(flags & V_CLEANONLY)) 1804 error = flushbuflist(&bo->bo_dirty, 1805 flags, bo, slpflag, slptimeo); 1806 if (error != 0 && error != EAGAIN) { 1807 BO_UNLOCK(bo); 1808 return (error); 1809 } 1810 } while (error != 0); 1811 1812 /* 1813 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1814 * have write I/O in-progress but if there is a VM object then the 1815 * VM object can also have read-I/O in-progress. 1816 */ 1817 do { 1818 bufobj_wwait(bo, 0, 0); 1819 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 1820 BO_UNLOCK(bo); 1821 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 1822 BO_LOCK(bo); 1823 } 1824 } while (bo->bo_numoutput > 0); 1825 BO_UNLOCK(bo); 1826 1827 /* 1828 * Destroy the copy in the VM cache, too. 1829 */ 1830 if (bo->bo_object != NULL && 1831 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 1832 VM_OBJECT_WLOCK(bo->bo_object); 1833 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1834 OBJPR_CLEANONLY : 0); 1835 VM_OBJECT_WUNLOCK(bo->bo_object); 1836 } 1837 1838 #ifdef INVARIANTS 1839 BO_LOCK(bo); 1840 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 1841 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 1842 bo->bo_clean.bv_cnt > 0)) 1843 panic("vinvalbuf: flush failed"); 1844 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 1845 bo->bo_dirty.bv_cnt > 0) 1846 panic("vinvalbuf: flush dirty failed"); 1847 BO_UNLOCK(bo); 1848 #endif 1849 return (0); 1850 } 1851 1852 /* 1853 * Flush out and invalidate all buffers associated with a vnode. 1854 * Called with the underlying object locked. 1855 */ 1856 int 1857 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 1858 { 1859 1860 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 1861 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1862 if (vp->v_object != NULL && vp->v_object->handle != vp) 1863 return (0); 1864 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 1865 } 1866 1867 /* 1868 * Flush out buffers on the specified list. 1869 * 1870 */ 1871 static int 1872 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 1873 int slptimeo) 1874 { 1875 struct buf *bp, *nbp; 1876 int retval, error; 1877 daddr_t lblkno; 1878 b_xflags_t xflags; 1879 1880 ASSERT_BO_WLOCKED(bo); 1881 1882 retval = 0; 1883 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 1884 /* 1885 * If we are flushing both V_NORMAL and V_ALT buffers then 1886 * do not skip any buffers. If we are flushing only V_NORMAL 1887 * buffers then skip buffers marked as BX_ALTDATA. If we are 1888 * flushing only V_ALT buffers then skip buffers not marked 1889 * as BX_ALTDATA. 1890 */ 1891 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 1892 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 1893 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 1894 continue; 1895 } 1896 if (nbp != NULL) { 1897 lblkno = nbp->b_lblkno; 1898 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 1899 } 1900 retval = EAGAIN; 1901 error = BUF_TIMELOCK(bp, 1902 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 1903 "flushbuf", slpflag, slptimeo); 1904 if (error) { 1905 BO_LOCK(bo); 1906 return (error != ENOLCK ? error : EAGAIN); 1907 } 1908 KASSERT(bp->b_bufobj == bo, 1909 ("bp %p wrong b_bufobj %p should be %p", 1910 bp, bp->b_bufobj, bo)); 1911 /* 1912 * XXX Since there are no node locks for NFS, I 1913 * believe there is a slight chance that a delayed 1914 * write will occur while sleeping just above, so 1915 * check for it. 1916 */ 1917 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1918 (flags & V_SAVE)) { 1919 bremfree(bp); 1920 bp->b_flags |= B_ASYNC; 1921 bwrite(bp); 1922 BO_LOCK(bo); 1923 return (EAGAIN); /* XXX: why not loop ? */ 1924 } 1925 bremfree(bp); 1926 bp->b_flags |= (B_INVAL | B_RELBUF); 1927 bp->b_flags &= ~B_ASYNC; 1928 brelse(bp); 1929 BO_LOCK(bo); 1930 if (nbp == NULL) 1931 break; 1932 nbp = gbincore(bo, lblkno); 1933 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1934 != xflags) 1935 break; /* nbp invalid */ 1936 } 1937 return (retval); 1938 } 1939 1940 int 1941 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 1942 { 1943 struct buf *bp; 1944 int error; 1945 daddr_t lblkno; 1946 1947 ASSERT_BO_LOCKED(bo); 1948 1949 for (lblkno = startn;;) { 1950 again: 1951 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 1952 if (bp == NULL || bp->b_lblkno >= endn || 1953 bp->b_lblkno < startn) 1954 break; 1955 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 1956 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 1957 if (error != 0) { 1958 BO_RLOCK(bo); 1959 if (error == ENOLCK) 1960 goto again; 1961 return (error); 1962 } 1963 KASSERT(bp->b_bufobj == bo, 1964 ("bp %p wrong b_bufobj %p should be %p", 1965 bp, bp->b_bufobj, bo)); 1966 lblkno = bp->b_lblkno + 1; 1967 if ((bp->b_flags & B_MANAGED) == 0) 1968 bremfree(bp); 1969 bp->b_flags |= B_RELBUF; 1970 /* 1971 * In the VMIO case, use the B_NOREUSE flag to hint that the 1972 * pages backing each buffer in the range are unlikely to be 1973 * reused. Dirty buffers will have the hint applied once 1974 * they've been written. 1975 */ 1976 if ((bp->b_flags & B_VMIO) != 0) 1977 bp->b_flags |= B_NOREUSE; 1978 brelse(bp); 1979 BO_RLOCK(bo); 1980 } 1981 return (0); 1982 } 1983 1984 /* 1985 * Truncate a file's buffer and pages to a specified length. This 1986 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 1987 * sync activity. 1988 */ 1989 int 1990 vtruncbuf(struct vnode *vp, off_t length, int blksize) 1991 { 1992 struct buf *bp, *nbp; 1993 struct bufobj *bo; 1994 daddr_t startlbn; 1995 1996 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 1997 vp, blksize, (uintmax_t)length); 1998 1999 /* 2000 * Round up to the *next* lbn. 2001 */ 2002 startlbn = howmany(length, blksize); 2003 2004 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2005 2006 bo = &vp->v_bufobj; 2007 restart_unlocked: 2008 BO_LOCK(bo); 2009 2010 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2011 ; 2012 2013 if (length > 0) { 2014 restartsync: 2015 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2016 if (bp->b_lblkno > 0) 2017 continue; 2018 /* 2019 * Since we hold the vnode lock this should only 2020 * fail if we're racing with the buf daemon. 2021 */ 2022 if (BUF_LOCK(bp, 2023 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2024 BO_LOCKPTR(bo)) == ENOLCK) 2025 goto restart_unlocked; 2026 2027 VNASSERT((bp->b_flags & B_DELWRI), vp, 2028 ("buf(%p) on dirty queue without DELWRI", bp)); 2029 2030 bremfree(bp); 2031 bawrite(bp); 2032 BO_LOCK(bo); 2033 goto restartsync; 2034 } 2035 } 2036 2037 bufobj_wwait(bo, 0, 0); 2038 BO_UNLOCK(bo); 2039 vnode_pager_setsize(vp, length); 2040 2041 return (0); 2042 } 2043 2044 /* 2045 * Invalidate the cached pages of a file's buffer within the range of block 2046 * numbers [startlbn, endlbn). 2047 */ 2048 void 2049 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2050 int blksize) 2051 { 2052 struct bufobj *bo; 2053 off_t start, end; 2054 2055 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2056 2057 start = blksize * startlbn; 2058 end = blksize * endlbn; 2059 2060 bo = &vp->v_bufobj; 2061 BO_LOCK(bo); 2062 MPASS(blksize == bo->bo_bsize); 2063 2064 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2065 ; 2066 2067 BO_UNLOCK(bo); 2068 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2069 } 2070 2071 static int 2072 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2073 daddr_t startlbn, daddr_t endlbn) 2074 { 2075 struct buf *bp, *nbp; 2076 bool anyfreed; 2077 2078 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2079 ASSERT_BO_LOCKED(bo); 2080 2081 do { 2082 anyfreed = false; 2083 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2084 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2085 continue; 2086 if (BUF_LOCK(bp, 2087 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2088 BO_LOCKPTR(bo)) == ENOLCK) { 2089 BO_LOCK(bo); 2090 return (EAGAIN); 2091 } 2092 2093 bremfree(bp); 2094 bp->b_flags |= B_INVAL | B_RELBUF; 2095 bp->b_flags &= ~B_ASYNC; 2096 brelse(bp); 2097 anyfreed = true; 2098 2099 BO_LOCK(bo); 2100 if (nbp != NULL && 2101 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2102 nbp->b_vp != vp || 2103 (nbp->b_flags & B_DELWRI) != 0)) 2104 return (EAGAIN); 2105 } 2106 2107 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2108 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2109 continue; 2110 if (BUF_LOCK(bp, 2111 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2112 BO_LOCKPTR(bo)) == ENOLCK) { 2113 BO_LOCK(bo); 2114 return (EAGAIN); 2115 } 2116 bremfree(bp); 2117 bp->b_flags |= B_INVAL | B_RELBUF; 2118 bp->b_flags &= ~B_ASYNC; 2119 brelse(bp); 2120 anyfreed = true; 2121 2122 BO_LOCK(bo); 2123 if (nbp != NULL && 2124 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2125 (nbp->b_vp != vp) || 2126 (nbp->b_flags & B_DELWRI) == 0)) 2127 return (EAGAIN); 2128 } 2129 } while (anyfreed); 2130 return (0); 2131 } 2132 2133 static void 2134 buf_vlist_remove(struct buf *bp) 2135 { 2136 struct bufv *bv; 2137 2138 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2139 ASSERT_BO_WLOCKED(bp->b_bufobj); 2140 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != 2141 (BX_VNDIRTY|BX_VNCLEAN), 2142 ("buf_vlist_remove: Buf %p is on two lists", bp)); 2143 if (bp->b_xflags & BX_VNDIRTY) 2144 bv = &bp->b_bufobj->bo_dirty; 2145 else 2146 bv = &bp->b_bufobj->bo_clean; 2147 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2148 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2149 bv->bv_cnt--; 2150 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2151 } 2152 2153 /* 2154 * Add the buffer to the sorted clean or dirty block list. 2155 * 2156 * NOTE: xflags is passed as a constant, optimizing this inline function! 2157 */ 2158 static void 2159 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2160 { 2161 struct bufv *bv; 2162 struct buf *n; 2163 int error; 2164 2165 ASSERT_BO_WLOCKED(bo); 2166 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2167 ("dead bo %p", bo)); 2168 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2169 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2170 bp->b_xflags |= xflags; 2171 if (xflags & BX_VNDIRTY) 2172 bv = &bo->bo_dirty; 2173 else 2174 bv = &bo->bo_clean; 2175 2176 /* 2177 * Keep the list ordered. Optimize empty list insertion. Assume 2178 * we tend to grow at the tail so lookup_le should usually be cheaper 2179 * than _ge. 2180 */ 2181 if (bv->bv_cnt == 0 || 2182 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2183 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2184 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2185 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2186 else 2187 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2188 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2189 if (error) 2190 panic("buf_vlist_add: Preallocated nodes insufficient."); 2191 bv->bv_cnt++; 2192 } 2193 2194 /* 2195 * Look up a buffer using the buffer tries. 2196 */ 2197 struct buf * 2198 gbincore(struct bufobj *bo, daddr_t lblkno) 2199 { 2200 struct buf *bp; 2201 2202 ASSERT_BO_LOCKED(bo); 2203 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2204 if (bp != NULL) 2205 return (bp); 2206 return BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno); 2207 } 2208 2209 /* 2210 * Associate a buffer with a vnode. 2211 */ 2212 void 2213 bgetvp(struct vnode *vp, struct buf *bp) 2214 { 2215 struct bufobj *bo; 2216 2217 bo = &vp->v_bufobj; 2218 ASSERT_BO_WLOCKED(bo); 2219 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2220 2221 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2222 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2223 ("bgetvp: bp already attached! %p", bp)); 2224 2225 vhold(vp); 2226 bp->b_vp = vp; 2227 bp->b_bufobj = bo; 2228 /* 2229 * Insert onto list for new vnode. 2230 */ 2231 buf_vlist_add(bp, bo, BX_VNCLEAN); 2232 } 2233 2234 /* 2235 * Disassociate a buffer from a vnode. 2236 */ 2237 void 2238 brelvp(struct buf *bp) 2239 { 2240 struct bufobj *bo; 2241 struct vnode *vp; 2242 2243 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2244 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2245 2246 /* 2247 * Delete from old vnode list, if on one. 2248 */ 2249 vp = bp->b_vp; /* XXX */ 2250 bo = bp->b_bufobj; 2251 BO_LOCK(bo); 2252 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2253 buf_vlist_remove(bp); 2254 else 2255 panic("brelvp: Buffer %p not on queue.", bp); 2256 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2257 bo->bo_flag &= ~BO_ONWORKLST; 2258 mtx_lock(&sync_mtx); 2259 LIST_REMOVE(bo, bo_synclist); 2260 syncer_worklist_len--; 2261 mtx_unlock(&sync_mtx); 2262 } 2263 bp->b_vp = NULL; 2264 bp->b_bufobj = NULL; 2265 BO_UNLOCK(bo); 2266 vdrop(vp); 2267 } 2268 2269 /* 2270 * Add an item to the syncer work queue. 2271 */ 2272 static void 2273 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2274 { 2275 int slot; 2276 2277 ASSERT_BO_WLOCKED(bo); 2278 2279 mtx_lock(&sync_mtx); 2280 if (bo->bo_flag & BO_ONWORKLST) 2281 LIST_REMOVE(bo, bo_synclist); 2282 else { 2283 bo->bo_flag |= BO_ONWORKLST; 2284 syncer_worklist_len++; 2285 } 2286 2287 if (delay > syncer_maxdelay - 2) 2288 delay = syncer_maxdelay - 2; 2289 slot = (syncer_delayno + delay) & syncer_mask; 2290 2291 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2292 mtx_unlock(&sync_mtx); 2293 } 2294 2295 static int 2296 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2297 { 2298 int error, len; 2299 2300 mtx_lock(&sync_mtx); 2301 len = syncer_worklist_len - sync_vnode_count; 2302 mtx_unlock(&sync_mtx); 2303 error = SYSCTL_OUT(req, &len, sizeof(len)); 2304 return (error); 2305 } 2306 2307 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0, 2308 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2309 2310 static struct proc *updateproc; 2311 static void sched_sync(void); 2312 static struct kproc_desc up_kp = { 2313 "syncer", 2314 sched_sync, 2315 &updateproc 2316 }; 2317 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2318 2319 static int 2320 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2321 { 2322 struct vnode *vp; 2323 struct mount *mp; 2324 2325 *bo = LIST_FIRST(slp); 2326 if (*bo == NULL) 2327 return (0); 2328 vp = bo2vnode(*bo); 2329 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2330 return (1); 2331 /* 2332 * We use vhold in case the vnode does not 2333 * successfully sync. vhold prevents the vnode from 2334 * going away when we unlock the sync_mtx so that 2335 * we can acquire the vnode interlock. 2336 */ 2337 vholdl(vp); 2338 mtx_unlock(&sync_mtx); 2339 VI_UNLOCK(vp); 2340 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2341 vdrop(vp); 2342 mtx_lock(&sync_mtx); 2343 return (*bo == LIST_FIRST(slp)); 2344 } 2345 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2346 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2347 VOP_UNLOCK(vp, 0); 2348 vn_finished_write(mp); 2349 BO_LOCK(*bo); 2350 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2351 /* 2352 * Put us back on the worklist. The worklist 2353 * routine will remove us from our current 2354 * position and then add us back in at a later 2355 * position. 2356 */ 2357 vn_syncer_add_to_worklist(*bo, syncdelay); 2358 } 2359 BO_UNLOCK(*bo); 2360 vdrop(vp); 2361 mtx_lock(&sync_mtx); 2362 return (0); 2363 } 2364 2365 static int first_printf = 1; 2366 2367 /* 2368 * System filesystem synchronizer daemon. 2369 */ 2370 static void 2371 sched_sync(void) 2372 { 2373 struct synclist *next, *slp; 2374 struct bufobj *bo; 2375 long starttime; 2376 struct thread *td = curthread; 2377 int last_work_seen; 2378 int net_worklist_len; 2379 int syncer_final_iter; 2380 int error; 2381 2382 last_work_seen = 0; 2383 syncer_final_iter = 0; 2384 syncer_state = SYNCER_RUNNING; 2385 starttime = time_uptime; 2386 td->td_pflags |= TDP_NORUNNINGBUF; 2387 2388 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2389 SHUTDOWN_PRI_LAST); 2390 2391 mtx_lock(&sync_mtx); 2392 for (;;) { 2393 if (syncer_state == SYNCER_FINAL_DELAY && 2394 syncer_final_iter == 0) { 2395 mtx_unlock(&sync_mtx); 2396 kproc_suspend_check(td->td_proc); 2397 mtx_lock(&sync_mtx); 2398 } 2399 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2400 if (syncer_state != SYNCER_RUNNING && 2401 starttime != time_uptime) { 2402 if (first_printf) { 2403 printf("\nSyncing disks, vnodes remaining... "); 2404 first_printf = 0; 2405 } 2406 printf("%d ", net_worklist_len); 2407 } 2408 starttime = time_uptime; 2409 2410 /* 2411 * Push files whose dirty time has expired. Be careful 2412 * of interrupt race on slp queue. 2413 * 2414 * Skip over empty worklist slots when shutting down. 2415 */ 2416 do { 2417 slp = &syncer_workitem_pending[syncer_delayno]; 2418 syncer_delayno += 1; 2419 if (syncer_delayno == syncer_maxdelay) 2420 syncer_delayno = 0; 2421 next = &syncer_workitem_pending[syncer_delayno]; 2422 /* 2423 * If the worklist has wrapped since the 2424 * it was emptied of all but syncer vnodes, 2425 * switch to the FINAL_DELAY state and run 2426 * for one more second. 2427 */ 2428 if (syncer_state == SYNCER_SHUTTING_DOWN && 2429 net_worklist_len == 0 && 2430 last_work_seen == syncer_delayno) { 2431 syncer_state = SYNCER_FINAL_DELAY; 2432 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2433 } 2434 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2435 syncer_worklist_len > 0); 2436 2437 /* 2438 * Keep track of the last time there was anything 2439 * on the worklist other than syncer vnodes. 2440 * Return to the SHUTTING_DOWN state if any 2441 * new work appears. 2442 */ 2443 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2444 last_work_seen = syncer_delayno; 2445 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2446 syncer_state = SYNCER_SHUTTING_DOWN; 2447 while (!LIST_EMPTY(slp)) { 2448 error = sync_vnode(slp, &bo, td); 2449 if (error == 1) { 2450 LIST_REMOVE(bo, bo_synclist); 2451 LIST_INSERT_HEAD(next, bo, bo_synclist); 2452 continue; 2453 } 2454 2455 if (first_printf == 0) { 2456 /* 2457 * Drop the sync mutex, because some watchdog 2458 * drivers need to sleep while patting 2459 */ 2460 mtx_unlock(&sync_mtx); 2461 wdog_kern_pat(WD_LASTVAL); 2462 mtx_lock(&sync_mtx); 2463 } 2464 2465 } 2466 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2467 syncer_final_iter--; 2468 /* 2469 * The variable rushjob allows the kernel to speed up the 2470 * processing of the filesystem syncer process. A rushjob 2471 * value of N tells the filesystem syncer to process the next 2472 * N seconds worth of work on its queue ASAP. Currently rushjob 2473 * is used by the soft update code to speed up the filesystem 2474 * syncer process when the incore state is getting so far 2475 * ahead of the disk that the kernel memory pool is being 2476 * threatened with exhaustion. 2477 */ 2478 if (rushjob > 0) { 2479 rushjob -= 1; 2480 continue; 2481 } 2482 /* 2483 * Just sleep for a short period of time between 2484 * iterations when shutting down to allow some I/O 2485 * to happen. 2486 * 2487 * If it has taken us less than a second to process the 2488 * current work, then wait. Otherwise start right over 2489 * again. We can still lose time if any single round 2490 * takes more than two seconds, but it does not really 2491 * matter as we are just trying to generally pace the 2492 * filesystem activity. 2493 */ 2494 if (syncer_state != SYNCER_RUNNING || 2495 time_uptime == starttime) { 2496 thread_lock(td); 2497 sched_prio(td, PPAUSE); 2498 thread_unlock(td); 2499 } 2500 if (syncer_state != SYNCER_RUNNING) 2501 cv_timedwait(&sync_wakeup, &sync_mtx, 2502 hz / SYNCER_SHUTDOWN_SPEEDUP); 2503 else if (time_uptime == starttime) 2504 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2505 } 2506 } 2507 2508 /* 2509 * Request the syncer daemon to speed up its work. 2510 * We never push it to speed up more than half of its 2511 * normal turn time, otherwise it could take over the cpu. 2512 */ 2513 int 2514 speedup_syncer(void) 2515 { 2516 int ret = 0; 2517 2518 mtx_lock(&sync_mtx); 2519 if (rushjob < syncdelay / 2) { 2520 rushjob += 1; 2521 stat_rush_requests += 1; 2522 ret = 1; 2523 } 2524 mtx_unlock(&sync_mtx); 2525 cv_broadcast(&sync_wakeup); 2526 return (ret); 2527 } 2528 2529 /* 2530 * Tell the syncer to speed up its work and run though its work 2531 * list several times, then tell it to shut down. 2532 */ 2533 static void 2534 syncer_shutdown(void *arg, int howto) 2535 { 2536 2537 if (howto & RB_NOSYNC) 2538 return; 2539 mtx_lock(&sync_mtx); 2540 syncer_state = SYNCER_SHUTTING_DOWN; 2541 rushjob = 0; 2542 mtx_unlock(&sync_mtx); 2543 cv_broadcast(&sync_wakeup); 2544 kproc_shutdown(arg, howto); 2545 } 2546 2547 void 2548 syncer_suspend(void) 2549 { 2550 2551 syncer_shutdown(updateproc, 0); 2552 } 2553 2554 void 2555 syncer_resume(void) 2556 { 2557 2558 mtx_lock(&sync_mtx); 2559 first_printf = 1; 2560 syncer_state = SYNCER_RUNNING; 2561 mtx_unlock(&sync_mtx); 2562 cv_broadcast(&sync_wakeup); 2563 kproc_resume(updateproc); 2564 } 2565 2566 /* 2567 * Reassign a buffer from one vnode to another. 2568 * Used to assign file specific control information 2569 * (indirect blocks) to the vnode to which they belong. 2570 */ 2571 void 2572 reassignbuf(struct buf *bp) 2573 { 2574 struct vnode *vp; 2575 struct bufobj *bo; 2576 int delay; 2577 #ifdef INVARIANTS 2578 struct bufv *bv; 2579 #endif 2580 2581 vp = bp->b_vp; 2582 bo = bp->b_bufobj; 2583 ++reassignbufcalls; 2584 2585 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2586 bp, bp->b_vp, bp->b_flags); 2587 /* 2588 * B_PAGING flagged buffers cannot be reassigned because their vp 2589 * is not fully linked in. 2590 */ 2591 if (bp->b_flags & B_PAGING) 2592 panic("cannot reassign paging buffer"); 2593 2594 /* 2595 * Delete from old vnode list, if on one. 2596 */ 2597 BO_LOCK(bo); 2598 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2599 buf_vlist_remove(bp); 2600 else 2601 panic("reassignbuf: Buffer %p not on queue.", bp); 2602 /* 2603 * If dirty, put on list of dirty buffers; otherwise insert onto list 2604 * of clean buffers. 2605 */ 2606 if (bp->b_flags & B_DELWRI) { 2607 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2608 switch (vp->v_type) { 2609 case VDIR: 2610 delay = dirdelay; 2611 break; 2612 case VCHR: 2613 delay = metadelay; 2614 break; 2615 default: 2616 delay = filedelay; 2617 } 2618 vn_syncer_add_to_worklist(bo, delay); 2619 } 2620 buf_vlist_add(bp, bo, BX_VNDIRTY); 2621 } else { 2622 buf_vlist_add(bp, bo, BX_VNCLEAN); 2623 2624 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2625 mtx_lock(&sync_mtx); 2626 LIST_REMOVE(bo, bo_synclist); 2627 syncer_worklist_len--; 2628 mtx_unlock(&sync_mtx); 2629 bo->bo_flag &= ~BO_ONWORKLST; 2630 } 2631 } 2632 #ifdef INVARIANTS 2633 bv = &bo->bo_clean; 2634 bp = TAILQ_FIRST(&bv->bv_hd); 2635 KASSERT(bp == NULL || bp->b_bufobj == bo, 2636 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2637 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2638 KASSERT(bp == NULL || bp->b_bufobj == bo, 2639 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2640 bv = &bo->bo_dirty; 2641 bp = TAILQ_FIRST(&bv->bv_hd); 2642 KASSERT(bp == NULL || bp->b_bufobj == bo, 2643 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2644 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2645 KASSERT(bp == NULL || bp->b_bufobj == bo, 2646 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2647 #endif 2648 BO_UNLOCK(bo); 2649 } 2650 2651 static void 2652 v_init_counters(struct vnode *vp) 2653 { 2654 2655 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2656 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2657 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2658 2659 refcount_init(&vp->v_holdcnt, 1); 2660 refcount_init(&vp->v_usecount, 1); 2661 } 2662 2663 /* 2664 * Increment si_usecount of the associated device, if any. 2665 */ 2666 static void 2667 v_incr_devcount(struct vnode *vp) 2668 { 2669 2670 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2671 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2672 dev_lock(); 2673 vp->v_rdev->si_usecount++; 2674 dev_unlock(); 2675 } 2676 } 2677 2678 /* 2679 * Decrement si_usecount of the associated device, if any. 2680 */ 2681 static void 2682 v_decr_devcount(struct vnode *vp) 2683 { 2684 2685 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2686 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2687 dev_lock(); 2688 vp->v_rdev->si_usecount--; 2689 dev_unlock(); 2690 } 2691 } 2692 2693 /* 2694 * Grab a particular vnode from the free list, increment its 2695 * reference count and lock it. VI_DOOMED is set if the vnode 2696 * is being destroyed. Only callers who specify LK_RETRY will 2697 * see doomed vnodes. If inactive processing was delayed in 2698 * vput try to do it here. 2699 * 2700 * Both holdcnt and usecount can be manipulated using atomics without holding 2701 * any locks except in these cases which require the vnode interlock: 2702 * holdcnt: 1->0 and 0->1 2703 * usecount: 0->1 2704 * 2705 * usecount is permitted to transition 1->0 without the interlock because 2706 * vnode is kept live by holdcnt. 2707 */ 2708 static enum vgetstate 2709 _vget_prep(struct vnode *vp, bool interlock) 2710 { 2711 enum vgetstate vs; 2712 2713 if (__predict_true(vp->v_type != VCHR)) { 2714 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2715 vs = VGET_USECOUNT; 2716 } else { 2717 _vhold(vp, interlock); 2718 vs = VGET_HOLDCNT; 2719 } 2720 } else { 2721 if (!interlock) 2722 VI_LOCK(vp); 2723 if (vp->v_usecount == 0) { 2724 vholdl(vp); 2725 vs = VGET_HOLDCNT; 2726 } else { 2727 v_incr_devcount(vp); 2728 refcount_acquire(&vp->v_usecount); 2729 vs = VGET_USECOUNT; 2730 } 2731 if (!interlock) 2732 VI_UNLOCK(vp); 2733 } 2734 return (vs); 2735 } 2736 2737 enum vgetstate 2738 vget_prep(struct vnode *vp) 2739 { 2740 2741 return (_vget_prep(vp, false)); 2742 } 2743 2744 int 2745 vget(struct vnode *vp, int flags, struct thread *td) 2746 { 2747 enum vgetstate vs; 2748 2749 MPASS(td == curthread); 2750 2751 vs = _vget_prep(vp, (flags & LK_INTERLOCK) != 0); 2752 return (vget_finish(vp, flags, vs)); 2753 } 2754 2755 int 2756 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 2757 { 2758 int error, oweinact; 2759 2760 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 2761 ("%s: invalid lock operation", __func__)); 2762 2763 if ((flags & LK_INTERLOCK) != 0) 2764 ASSERT_VI_LOCKED(vp, __func__); 2765 else 2766 ASSERT_VI_UNLOCKED(vp, __func__); 2767 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 2768 if (vs == VGET_USECOUNT) { 2769 VNASSERT(vp->v_usecount > 0, vp, 2770 ("%s: vnode without usecount when VGET_USECOUNT was passed", 2771 __func__)); 2772 } 2773 2774 if ((error = vn_lock(vp, flags)) != 0) { 2775 if (vs == VGET_USECOUNT) 2776 vrele(vp); 2777 else 2778 vdrop(vp); 2779 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2780 vp); 2781 return (error); 2782 } 2783 2784 if (vs == VGET_USECOUNT) { 2785 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2786 ("%s: vnode with usecount and VI_OWEINACT set", __func__)); 2787 return (0); 2788 } 2789 2790 /* 2791 * We hold the vnode. If the usecount is 0 it will be utilized to keep 2792 * the vnode around. Otherwise someone else lended their hold count and 2793 * we have to drop ours. 2794 */ 2795 if (vp->v_type != VCHR && 2796 refcount_acquire_if_not_zero(&vp->v_usecount)) { 2797 #ifdef INVARIANTS 2798 int old = atomic_fetchadd_int(&vp->v_holdcnt, -1) - 1; 2799 VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__)); 2800 #else 2801 refcount_release(&vp->v_holdcnt); 2802 #endif 2803 VNODE_REFCOUNT_FENCE_ACQ(); 2804 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2805 ("%s: vnode with usecount and VI_OWEINACT set", __func__)); 2806 return (0); 2807 } 2808 2809 /* 2810 * We don't guarantee that any particular close will 2811 * trigger inactive processing so just make a best effort 2812 * here at preventing a reference to a removed file. If 2813 * we don't succeed no harm is done. 2814 * 2815 * Upgrade our holdcnt to a usecount. 2816 */ 2817 VI_LOCK(vp); 2818 /* 2819 * See the previous section. By the time we get here we may find 2820 * ourselves in the same spot. 2821 */ 2822 if (vp->v_type != VCHR) { 2823 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2824 #ifdef INVARIANTS 2825 int old = atomic_fetchadd_int(&vp->v_holdcnt, -1) - 1; 2826 VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__)); 2827 #else 2828 refcount_release(&vp->v_holdcnt); 2829 #endif 2830 VNODE_REFCOUNT_FENCE_ACQ(); 2831 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2832 ("%s: vnode with usecount and VI_OWEINACT set", 2833 __func__)); 2834 VI_UNLOCK(vp); 2835 return (0); 2836 } 2837 } else { 2838 if (vp->v_usecount > 0) 2839 refcount_release(&vp->v_holdcnt); 2840 } 2841 if ((vp->v_iflag & VI_OWEINACT) == 0) { 2842 oweinact = 0; 2843 } else { 2844 oweinact = 1; 2845 vp->v_iflag &= ~VI_OWEINACT; 2846 VNODE_REFCOUNT_FENCE_REL(); 2847 } 2848 v_incr_devcount(vp); 2849 refcount_acquire(&vp->v_usecount); 2850 if (oweinact && VOP_ISLOCKED(vp) == LK_EXCLUSIVE && 2851 (flags & LK_NOWAIT) == 0) 2852 vinactive(vp, curthread); 2853 VI_UNLOCK(vp); 2854 return (0); 2855 } 2856 2857 /* 2858 * Increase the reference (use) and hold count of a vnode. 2859 * This will also remove the vnode from the free list if it is presently free. 2860 */ 2861 void 2862 vref(struct vnode *vp) 2863 { 2864 2865 ASSERT_VI_UNLOCKED(vp, __func__); 2866 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2867 if (vp->v_type != VCHR && 2868 refcount_acquire_if_not_zero(&vp->v_usecount)) { 2869 VNODE_REFCOUNT_FENCE_ACQ(); 2870 VNASSERT(vp->v_holdcnt > 0, vp, 2871 ("%s: active vnode not held", __func__)); 2872 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2873 ("%s: vnode with usecount and VI_OWEINACT set", __func__)); 2874 return; 2875 } 2876 VI_LOCK(vp); 2877 vrefl(vp); 2878 VI_UNLOCK(vp); 2879 } 2880 2881 void 2882 vrefl(struct vnode *vp) 2883 { 2884 2885 ASSERT_VI_LOCKED(vp, __func__); 2886 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2887 if (vp->v_type != VCHR && 2888 refcount_acquire_if_not_zero(&vp->v_usecount)) { 2889 VNODE_REFCOUNT_FENCE_ACQ(); 2890 VNASSERT(vp->v_holdcnt > 0, vp, 2891 ("%s: active vnode not held", __func__)); 2892 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2893 ("%s: vnode with usecount and VI_OWEINACT set", __func__)); 2894 return; 2895 } 2896 if (vp->v_usecount == 0) 2897 vholdl(vp); 2898 if ((vp->v_iflag & VI_OWEINACT) != 0) { 2899 vp->v_iflag &= ~VI_OWEINACT; 2900 VNODE_REFCOUNT_FENCE_REL(); 2901 } 2902 v_incr_devcount(vp); 2903 refcount_acquire(&vp->v_usecount); 2904 } 2905 2906 void 2907 vrefact(struct vnode *vp) 2908 { 2909 2910 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2911 if (__predict_false(vp->v_type == VCHR)) { 2912 VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp, 2913 ("%s: wrong ref counts", __func__)); 2914 vref(vp); 2915 return; 2916 } 2917 #ifdef INVARIANTS 2918 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 2919 VNASSERT(old > 0, vp, ("%s: wrong use count", __func__)); 2920 #else 2921 refcount_acquire(&vp->v_usecount); 2922 #endif 2923 } 2924 2925 /* 2926 * Return reference count of a vnode. 2927 * 2928 * The results of this call are only guaranteed when some mechanism is used to 2929 * stop other processes from gaining references to the vnode. This may be the 2930 * case if the caller holds the only reference. This is also useful when stale 2931 * data is acceptable as race conditions may be accounted for by some other 2932 * means. 2933 */ 2934 int 2935 vrefcnt(struct vnode *vp) 2936 { 2937 2938 return (vp->v_usecount); 2939 } 2940 2941 #define VPUTX_VRELE 1 2942 #define VPUTX_VPUT 2 2943 #define VPUTX_VUNREF 3 2944 2945 /* 2946 * Decrement the use and hold counts for a vnode. 2947 * 2948 * See an explanation near vget() as to why atomic operation is safe. 2949 */ 2950 static void 2951 vputx(struct vnode *vp, int func) 2952 { 2953 int error; 2954 2955 KASSERT(vp != NULL, ("vputx: null vp")); 2956 if (func == VPUTX_VUNREF) 2957 ASSERT_VOP_LOCKED(vp, "vunref"); 2958 else if (func == VPUTX_VPUT) 2959 ASSERT_VOP_LOCKED(vp, "vput"); 2960 else 2961 KASSERT(func == VPUTX_VRELE, ("vputx: wrong func")); 2962 ASSERT_VI_UNLOCKED(vp, __func__); 2963 VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp, 2964 ("%s: wrong ref counts", __func__)); 2965 2966 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2967 2968 /* 2969 * It is an invariant that all VOP_* calls operate on a held vnode. 2970 * We may be only having an implicit hold stemming from our usecount, 2971 * which we are about to release. If we unlock the vnode afterwards we 2972 * open a time window where someone else dropped the last usecount and 2973 * proceeded to free the vnode before our unlock finished. For this 2974 * reason we unlock the vnode early. This is a little bit wasteful as 2975 * it may be the vnode is exclusively locked and inactive processing is 2976 * needed, in which case we are adding work. 2977 */ 2978 if (func == VPUTX_VPUT) 2979 VOP_UNLOCK(vp, 0); 2980 2981 /* 2982 * We want to hold the vnode until the inactive finishes to 2983 * prevent vgone() races. We drop the use count here and the 2984 * hold count below when we're done. 2985 */ 2986 if (vp->v_type != VCHR) { 2987 /* 2988 * If we release the last usecount we take ownership of the hold 2989 * count which provides liveness of the vnode, in which case we 2990 * have to vdrop. 2991 */ 2992 if (!refcount_release(&vp->v_usecount)) 2993 return; 2994 VI_LOCK(vp); 2995 /* 2996 * By the time we got here someone else might have transitioned 2997 * the count back to > 0. 2998 */ 2999 if (vp->v_usecount > 0) { 3000 vdropl(vp); 3001 return; 3002 } 3003 } else { 3004 VI_LOCK(vp); 3005 v_decr_devcount(vp); 3006 if (!refcount_release(&vp->v_usecount)) { 3007 VI_UNLOCK(vp); 3008 return; 3009 } 3010 } 3011 if (vp->v_iflag & VI_DOINGINACT) { 3012 vdropl(vp); 3013 return; 3014 } 3015 3016 error = 0; 3017 3018 if (vp->v_usecount != 0) { 3019 vn_printf(vp, "vputx: usecount not zero for vnode "); 3020 panic("vputx: usecount not zero"); 3021 } 3022 3023 CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp); 3024 3025 /* 3026 * Check if the fs wants to perform inactive processing. Note we 3027 * may be only holding the interlock, in which case it is possible 3028 * someone else called vgone on the vnode and ->v_data is now NULL. 3029 * Since vgone performs inactive on its own there is nothing to do 3030 * here but to drop our hold count. 3031 */ 3032 if (__predict_false(vp->v_iflag & VI_DOOMED) || 3033 VOP_NEED_INACTIVE(vp) == 0) { 3034 vdropl(vp); 3035 return; 3036 } 3037 3038 /* 3039 * We must call VOP_INACTIVE with the node locked. Mark 3040 * as VI_DOINGINACT to avoid recursion. 3041 */ 3042 vp->v_iflag |= VI_OWEINACT; 3043 switch (func) { 3044 case VPUTX_VRELE: 3045 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3046 VI_LOCK(vp); 3047 break; 3048 case VPUTX_VPUT: 3049 error = VOP_LOCK(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT); 3050 VI_LOCK(vp); 3051 break; 3052 case VPUTX_VUNREF: 3053 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3054 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3055 VI_LOCK(vp); 3056 } 3057 break; 3058 } 3059 VNASSERT(vp->v_usecount == 0 || (vp->v_iflag & VI_OWEINACT) == 0, vp, 3060 ("vnode with usecount and VI_OWEINACT set")); 3061 if (error == 0) { 3062 if (vp->v_iflag & VI_OWEINACT) 3063 vinactive(vp, curthread); 3064 if (func != VPUTX_VUNREF) 3065 VOP_UNLOCK(vp, 0); 3066 } 3067 vdropl(vp); 3068 } 3069 3070 /* 3071 * Vnode put/release. 3072 * If count drops to zero, call inactive routine and return to freelist. 3073 */ 3074 void 3075 vrele(struct vnode *vp) 3076 { 3077 3078 vputx(vp, VPUTX_VRELE); 3079 } 3080 3081 /* 3082 * Release an already locked vnode. This give the same effects as 3083 * unlock+vrele(), but takes less time and avoids releasing and 3084 * re-aquiring the lock (as vrele() acquires the lock internally.) 3085 */ 3086 void 3087 vput(struct vnode *vp) 3088 { 3089 3090 vputx(vp, VPUTX_VPUT); 3091 } 3092 3093 /* 3094 * Release an exclusively locked vnode. Do not unlock the vnode lock. 3095 */ 3096 void 3097 vunref(struct vnode *vp) 3098 { 3099 3100 vputx(vp, VPUTX_VUNREF); 3101 } 3102 3103 /* 3104 * Increase the hold count and activate if this is the first reference. 3105 */ 3106 void 3107 _vhold(struct vnode *vp, bool locked) 3108 { 3109 struct mount *mp; 3110 3111 if (locked) 3112 ASSERT_VI_LOCKED(vp, __func__); 3113 else 3114 ASSERT_VI_UNLOCKED(vp, __func__); 3115 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3116 if (!locked) { 3117 if (refcount_acquire_if_not_zero(&vp->v_holdcnt)) { 3118 VNODE_REFCOUNT_FENCE_ACQ(); 3119 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 3120 ("_vhold: vnode with holdcnt is free")); 3121 return; 3122 } 3123 VI_LOCK(vp); 3124 } 3125 if ((vp->v_iflag & VI_FREE) == 0) { 3126 refcount_acquire(&vp->v_holdcnt); 3127 if (!locked) 3128 VI_UNLOCK(vp); 3129 return; 3130 } 3131 VNASSERT(vp->v_holdcnt == 0, vp, 3132 ("%s: wrong hold count", __func__)); 3133 VNASSERT(vp->v_op != NULL, vp, 3134 ("%s: vnode already reclaimed.", __func__)); 3135 /* 3136 * Remove a vnode from the free list, mark it as in use, 3137 * and put it on the active list. 3138 */ 3139 VNASSERT(vp->v_mount != NULL, vp, 3140 ("_vhold: vnode not on per mount vnode list")); 3141 mp = vp->v_mount; 3142 mtx_lock(&mp->mnt_listmtx); 3143 if ((vp->v_mflag & VMP_TMPMNTFREELIST) != 0) { 3144 TAILQ_REMOVE(&mp->mnt_tmpfreevnodelist, vp, v_actfreelist); 3145 mp->mnt_tmpfreevnodelistsize--; 3146 vp->v_mflag &= ~VMP_TMPMNTFREELIST; 3147 } else { 3148 mtx_lock(&vnode_free_list_mtx); 3149 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 3150 freevnodes--; 3151 mtx_unlock(&vnode_free_list_mtx); 3152 } 3153 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 3154 ("Activating already active vnode")); 3155 vp->v_iflag &= ~VI_FREE; 3156 vp->v_iflag |= VI_ACTIVE; 3157 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 3158 mp->mnt_activevnodelistsize++; 3159 mtx_unlock(&mp->mnt_listmtx); 3160 refcount_acquire(&vp->v_holdcnt); 3161 if (!locked) 3162 VI_UNLOCK(vp); 3163 } 3164 3165 void 3166 vholdnz(struct vnode *vp) 3167 { 3168 3169 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3170 #ifdef INVARIANTS 3171 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3172 VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__)); 3173 #else 3174 atomic_add_int(&vp->v_holdcnt, 1); 3175 #endif 3176 } 3177 3178 /* 3179 * Drop the hold count of the vnode. If this is the last reference to 3180 * the vnode we place it on the free list unless it has been vgone'd 3181 * (marked VI_DOOMED) in which case we will free it. 3182 * 3183 * Because the vnode vm object keeps a hold reference on the vnode if 3184 * there is at least one resident non-cached page, the vnode cannot 3185 * leave the active list without the page cleanup done. 3186 */ 3187 void 3188 _vdrop(struct vnode *vp, bool locked) 3189 { 3190 struct bufobj *bo; 3191 struct mount *mp; 3192 int active; 3193 3194 if (locked) 3195 ASSERT_VI_LOCKED(vp, __func__); 3196 else 3197 ASSERT_VI_UNLOCKED(vp, __func__); 3198 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3199 if (__predict_false((int)vp->v_holdcnt <= 0)) { 3200 vn_printf(vp, "vdrop: holdcnt %d", vp->v_holdcnt); 3201 panic("vdrop: wrong holdcnt"); 3202 } 3203 if (!locked) { 3204 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3205 return; 3206 VI_LOCK(vp); 3207 } 3208 if (refcount_release(&vp->v_holdcnt) == 0) { 3209 VI_UNLOCK(vp); 3210 return; 3211 } 3212 if ((vp->v_iflag & VI_DOOMED) == 0) { 3213 /* 3214 * Mark a vnode as free: remove it from its active list 3215 * and put it up for recycling on the freelist. 3216 */ 3217 VNASSERT(vp->v_op != NULL, vp, 3218 ("vdropl: vnode already reclaimed.")); 3219 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 3220 ("vnode already free")); 3221 VNASSERT(vp->v_holdcnt == 0, vp, 3222 ("vdropl: freeing when we shouldn't")); 3223 active = vp->v_iflag & VI_ACTIVE; 3224 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3225 vp->v_iflag &= ~VI_ACTIVE; 3226 mp = vp->v_mount; 3227 if (mp != NULL) { 3228 mtx_lock(&mp->mnt_listmtx); 3229 if (active) { 3230 TAILQ_REMOVE(&mp->mnt_activevnodelist, 3231 vp, v_actfreelist); 3232 mp->mnt_activevnodelistsize--; 3233 } 3234 TAILQ_INSERT_TAIL(&mp->mnt_tmpfreevnodelist, 3235 vp, v_actfreelist); 3236 mp->mnt_tmpfreevnodelistsize++; 3237 vp->v_iflag |= VI_FREE; 3238 vp->v_mflag |= VMP_TMPMNTFREELIST; 3239 VI_UNLOCK(vp); 3240 if (mp->mnt_tmpfreevnodelistsize >= 3241 mnt_free_list_batch) 3242 vnlru_return_batch_locked(mp); 3243 mtx_unlock(&mp->mnt_listmtx); 3244 } else { 3245 VNASSERT(active == 0, vp, 3246 ("vdropl: active vnode not on per mount " 3247 "vnode list")); 3248 mtx_lock(&vnode_free_list_mtx); 3249 TAILQ_INSERT_TAIL(&vnode_free_list, vp, 3250 v_actfreelist); 3251 freevnodes++; 3252 vp->v_iflag |= VI_FREE; 3253 VI_UNLOCK(vp); 3254 mtx_unlock(&vnode_free_list_mtx); 3255 } 3256 } else { 3257 VI_UNLOCK(vp); 3258 counter_u64_add(free_owe_inact, 1); 3259 } 3260 return; 3261 } 3262 /* 3263 * The vnode has been marked for destruction, so free it. 3264 * 3265 * The vnode will be returned to the zone where it will 3266 * normally remain until it is needed for another vnode. We 3267 * need to cleanup (or verify that the cleanup has already 3268 * been done) any residual data left from its current use 3269 * so as not to contaminate the freshly allocated vnode. 3270 */ 3271 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 3272 atomic_subtract_long(&numvnodes, 1); 3273 bo = &vp->v_bufobj; 3274 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 3275 ("cleaned vnode still on the free list.")); 3276 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 3277 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); 3278 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 3279 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 3280 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 3281 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 3282 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 3283 ("clean blk trie not empty")); 3284 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 3285 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 3286 ("dirty blk trie not empty")); 3287 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 3288 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 3289 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 3290 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 3291 ("Dangling rangelock waiters")); 3292 VI_UNLOCK(vp); 3293 #ifdef MAC 3294 mac_vnode_destroy(vp); 3295 #endif 3296 if (vp->v_pollinfo != NULL) { 3297 destroy_vpollinfo(vp->v_pollinfo); 3298 vp->v_pollinfo = NULL; 3299 } 3300 #ifdef INVARIANTS 3301 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ 3302 vp->v_op = NULL; 3303 #endif 3304 vp->v_mountedhere = NULL; 3305 vp->v_unpcb = NULL; 3306 vp->v_rdev = NULL; 3307 vp->v_fifoinfo = NULL; 3308 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 3309 vp->v_iflag = 0; 3310 vp->v_vflag = 0; 3311 bo->bo_flag = 0; 3312 uma_zfree(vnode_zone, vp); 3313 } 3314 3315 /* 3316 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3317 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3318 * OWEINACT tracks whether a vnode missed a call to inactive due to a 3319 * failed lock upgrade. 3320 */ 3321 void 3322 vinactive(struct vnode *vp, struct thread *td) 3323 { 3324 struct vm_object *obj; 3325 3326 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3327 ASSERT_VI_LOCKED(vp, "vinactive"); 3328 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3329 ("vinactive: recursed on VI_DOINGINACT")); 3330 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3331 vp->v_iflag |= VI_DOINGINACT; 3332 vp->v_iflag &= ~VI_OWEINACT; 3333 VI_UNLOCK(vp); 3334 /* 3335 * Before moving off the active list, we must be sure that any 3336 * modified pages are converted into the vnode's dirty 3337 * buffers, since these will no longer be checked once the 3338 * vnode is on the inactive list. 3339 * 3340 * The write-out of the dirty pages is asynchronous. At the 3341 * point that VOP_INACTIVE() is called, there could still be 3342 * pending I/O and dirty pages in the object. 3343 */ 3344 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3345 (obj->flags & OBJ_MIGHTBEDIRTY) != 0) { 3346 VM_OBJECT_WLOCK(obj); 3347 vm_object_page_clean(obj, 0, 0, 0); 3348 VM_OBJECT_WUNLOCK(obj); 3349 } 3350 VOP_INACTIVE(vp, td); 3351 VI_LOCK(vp); 3352 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3353 ("vinactive: lost VI_DOINGINACT")); 3354 vp->v_iflag &= ~VI_DOINGINACT; 3355 } 3356 3357 /* 3358 * Remove any vnodes in the vnode table belonging to mount point mp. 3359 * 3360 * If FORCECLOSE is not specified, there should not be any active ones, 3361 * return error if any are found (nb: this is a user error, not a 3362 * system error). If FORCECLOSE is specified, detach any active vnodes 3363 * that are found. 3364 * 3365 * If WRITECLOSE is set, only flush out regular file vnodes open for 3366 * writing. 3367 * 3368 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3369 * 3370 * `rootrefs' specifies the base reference count for the root vnode 3371 * of this filesystem. The root vnode is considered busy if its 3372 * v_usecount exceeds this value. On a successful return, vflush(, td) 3373 * will call vrele() on the root vnode exactly rootrefs times. 3374 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3375 * be zero. 3376 */ 3377 #ifdef DIAGNOSTIC 3378 static int busyprt = 0; /* print out busy vnodes */ 3379 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3380 #endif 3381 3382 int 3383 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3384 { 3385 struct vnode *vp, *mvp, *rootvp = NULL; 3386 struct vattr vattr; 3387 int busy = 0, error; 3388 3389 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3390 rootrefs, flags); 3391 if (rootrefs > 0) { 3392 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3393 ("vflush: bad args")); 3394 /* 3395 * Get the filesystem root vnode. We can vput() it 3396 * immediately, since with rootrefs > 0, it won't go away. 3397 */ 3398 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3399 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3400 __func__, error); 3401 return (error); 3402 } 3403 vput(rootvp); 3404 } 3405 loop: 3406 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3407 vholdl(vp); 3408 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3409 if (error) { 3410 vdrop(vp); 3411 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3412 goto loop; 3413 } 3414 /* 3415 * Skip over a vnodes marked VV_SYSTEM. 3416 */ 3417 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3418 VOP_UNLOCK(vp, 0); 3419 vdrop(vp); 3420 continue; 3421 } 3422 /* 3423 * If WRITECLOSE is set, flush out unlinked but still open 3424 * files (even if open only for reading) and regular file 3425 * vnodes open for writing. 3426 */ 3427 if (flags & WRITECLOSE) { 3428 if (vp->v_object != NULL) { 3429 VM_OBJECT_WLOCK(vp->v_object); 3430 vm_object_page_clean(vp->v_object, 0, 0, 0); 3431 VM_OBJECT_WUNLOCK(vp->v_object); 3432 } 3433 error = VOP_FSYNC(vp, MNT_WAIT, td); 3434 if (error != 0) { 3435 VOP_UNLOCK(vp, 0); 3436 vdrop(vp); 3437 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3438 return (error); 3439 } 3440 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3441 VI_LOCK(vp); 3442 3443 if ((vp->v_type == VNON || 3444 (error == 0 && vattr.va_nlink > 0)) && 3445 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3446 VOP_UNLOCK(vp, 0); 3447 vdropl(vp); 3448 continue; 3449 } 3450 } else 3451 VI_LOCK(vp); 3452 /* 3453 * With v_usecount == 0, all we need to do is clear out the 3454 * vnode data structures and we are done. 3455 * 3456 * If FORCECLOSE is set, forcibly close the vnode. 3457 */ 3458 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3459 vgonel(vp); 3460 } else { 3461 busy++; 3462 #ifdef DIAGNOSTIC 3463 if (busyprt) 3464 vn_printf(vp, "vflush: busy vnode "); 3465 #endif 3466 } 3467 VOP_UNLOCK(vp, 0); 3468 vdropl(vp); 3469 } 3470 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3471 /* 3472 * If just the root vnode is busy, and if its refcount 3473 * is equal to `rootrefs', then go ahead and kill it. 3474 */ 3475 VI_LOCK(rootvp); 3476 KASSERT(busy > 0, ("vflush: not busy")); 3477 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3478 ("vflush: usecount %d < rootrefs %d", 3479 rootvp->v_usecount, rootrefs)); 3480 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3481 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3482 vgone(rootvp); 3483 VOP_UNLOCK(rootvp, 0); 3484 busy = 0; 3485 } else 3486 VI_UNLOCK(rootvp); 3487 } 3488 if (busy) { 3489 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3490 busy); 3491 return (EBUSY); 3492 } 3493 for (; rootrefs > 0; rootrefs--) 3494 vrele(rootvp); 3495 return (0); 3496 } 3497 3498 /* 3499 * Recycle an unused vnode to the front of the free list. 3500 */ 3501 int 3502 vrecycle(struct vnode *vp) 3503 { 3504 int recycled; 3505 3506 VI_LOCK(vp); 3507 recycled = vrecyclel(vp); 3508 VI_UNLOCK(vp); 3509 return (recycled); 3510 } 3511 3512 /* 3513 * vrecycle, with the vp interlock held. 3514 */ 3515 int 3516 vrecyclel(struct vnode *vp) 3517 { 3518 int recycled; 3519 3520 ASSERT_VOP_ELOCKED(vp, __func__); 3521 ASSERT_VI_LOCKED(vp, __func__); 3522 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3523 recycled = 0; 3524 if (vp->v_usecount == 0) { 3525 recycled = 1; 3526 vgonel(vp); 3527 } 3528 return (recycled); 3529 } 3530 3531 /* 3532 * Eliminate all activity associated with a vnode 3533 * in preparation for reuse. 3534 */ 3535 void 3536 vgone(struct vnode *vp) 3537 { 3538 VI_LOCK(vp); 3539 vgonel(vp); 3540 VI_UNLOCK(vp); 3541 } 3542 3543 static void 3544 notify_lowervp_vfs_dummy(struct mount *mp __unused, 3545 struct vnode *lowervp __unused) 3546 { 3547 } 3548 3549 /* 3550 * Notify upper mounts about reclaimed or unlinked vnode. 3551 */ 3552 void 3553 vfs_notify_upper(struct vnode *vp, int event) 3554 { 3555 static struct vfsops vgonel_vfsops = { 3556 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 3557 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 3558 }; 3559 struct mount *mp, *ump, *mmp; 3560 3561 mp = vp->v_mount; 3562 if (mp == NULL) 3563 return; 3564 3565 MNT_ILOCK(mp); 3566 if (TAILQ_EMPTY(&mp->mnt_uppers)) 3567 goto unlock; 3568 MNT_IUNLOCK(mp); 3569 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 3570 mmp->mnt_op = &vgonel_vfsops; 3571 mmp->mnt_kern_flag |= MNTK_MARKER; 3572 MNT_ILOCK(mp); 3573 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 3574 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 3575 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 3576 ump = TAILQ_NEXT(ump, mnt_upper_link); 3577 continue; 3578 } 3579 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 3580 MNT_IUNLOCK(mp); 3581 switch (event) { 3582 case VFS_NOTIFY_UPPER_RECLAIM: 3583 VFS_RECLAIM_LOWERVP(ump, vp); 3584 break; 3585 case VFS_NOTIFY_UPPER_UNLINK: 3586 VFS_UNLINK_LOWERVP(ump, vp); 3587 break; 3588 default: 3589 KASSERT(0, ("invalid event %d", event)); 3590 break; 3591 } 3592 MNT_ILOCK(mp); 3593 ump = TAILQ_NEXT(mmp, mnt_upper_link); 3594 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 3595 } 3596 free(mmp, M_TEMP); 3597 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 3598 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 3599 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 3600 wakeup(&mp->mnt_uppers); 3601 } 3602 unlock: 3603 MNT_IUNLOCK(mp); 3604 } 3605 3606 /* 3607 * vgone, with the vp interlock held. 3608 */ 3609 static void 3610 vgonel(struct vnode *vp) 3611 { 3612 struct thread *td; 3613 struct mount *mp; 3614 vm_object_t object; 3615 bool active, oweinact; 3616 3617 ASSERT_VOP_ELOCKED(vp, "vgonel"); 3618 ASSERT_VI_LOCKED(vp, "vgonel"); 3619 VNASSERT(vp->v_holdcnt, vp, 3620 ("vgonel: vp %p has no reference.", vp)); 3621 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3622 td = curthread; 3623 3624 /* 3625 * Don't vgonel if we're already doomed. 3626 */ 3627 if (vp->v_iflag & VI_DOOMED) 3628 return; 3629 vp->v_iflag |= VI_DOOMED; 3630 3631 /* 3632 * Check to see if the vnode is in use. If so, we have to call 3633 * VOP_CLOSE() and VOP_INACTIVE(). 3634 */ 3635 active = vp->v_usecount > 0; 3636 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 3637 VI_UNLOCK(vp); 3638 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 3639 3640 /* 3641 * If purging an active vnode, it must be closed and 3642 * deactivated before being reclaimed. 3643 */ 3644 if (active) 3645 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 3646 if (oweinact || active) { 3647 VI_LOCK(vp); 3648 if ((vp->v_iflag & VI_DOINGINACT) == 0) 3649 vinactive(vp, td); 3650 VI_UNLOCK(vp); 3651 } 3652 if (vp->v_type == VSOCK) 3653 vfs_unp_reclaim(vp); 3654 3655 /* 3656 * Clean out any buffers associated with the vnode. 3657 * If the flush fails, just toss the buffers. 3658 */ 3659 mp = NULL; 3660 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 3661 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 3662 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 3663 while (vinvalbuf(vp, 0, 0, 0) != 0) 3664 ; 3665 } 3666 3667 BO_LOCK(&vp->v_bufobj); 3668 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 3669 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 3670 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 3671 vp->v_bufobj.bo_clean.bv_cnt == 0, 3672 ("vp %p bufobj not invalidated", vp)); 3673 3674 /* 3675 * For VMIO bufobj, BO_DEAD is set later, or in 3676 * vm_object_terminate() after the object's page queue is 3677 * flushed. 3678 */ 3679 object = vp->v_bufobj.bo_object; 3680 if (object == NULL) 3681 vp->v_bufobj.bo_flag |= BO_DEAD; 3682 BO_UNLOCK(&vp->v_bufobj); 3683 3684 /* 3685 * Handle the VM part. Tmpfs handles v_object on its own (the 3686 * OBJT_VNODE check). Nullfs or other bypassing filesystems 3687 * should not touch the object borrowed from the lower vnode 3688 * (the handle check). 3689 */ 3690 if (object != NULL && object->type == OBJT_VNODE && 3691 object->handle == vp) 3692 vnode_destroy_vobject(vp); 3693 3694 /* 3695 * Reclaim the vnode. 3696 */ 3697 if (VOP_RECLAIM(vp, td)) 3698 panic("vgone: cannot reclaim"); 3699 if (mp != NULL) 3700 vn_finished_secondary_write(mp); 3701 VNASSERT(vp->v_object == NULL, vp, 3702 ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag)); 3703 /* 3704 * Clear the advisory locks and wake up waiting threads. 3705 */ 3706 (void)VOP_ADVLOCKPURGE(vp); 3707 vp->v_lockf = NULL; 3708 /* 3709 * Delete from old mount point vnode list. 3710 */ 3711 delmntque(vp); 3712 cache_purge(vp); 3713 /* 3714 * Done with purge, reset to the standard lock and invalidate 3715 * the vnode. 3716 */ 3717 VI_LOCK(vp); 3718 vp->v_vnlock = &vp->v_lock; 3719 vp->v_op = &dead_vnodeops; 3720 vp->v_tag = "none"; 3721 vp->v_type = VBAD; 3722 } 3723 3724 /* 3725 * Calculate the total number of references to a special device. 3726 */ 3727 int 3728 vcount(struct vnode *vp) 3729 { 3730 int count; 3731 3732 dev_lock(); 3733 count = vp->v_rdev->si_usecount; 3734 dev_unlock(); 3735 return (count); 3736 } 3737 3738 /* 3739 * Same as above, but using the struct cdev *as argument 3740 */ 3741 int 3742 count_dev(struct cdev *dev) 3743 { 3744 int count; 3745 3746 dev_lock(); 3747 count = dev->si_usecount; 3748 dev_unlock(); 3749 return(count); 3750 } 3751 3752 /* 3753 * Print out a description of a vnode. 3754 */ 3755 static char *typename[] = 3756 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 3757 "VMARKER"}; 3758 3759 void 3760 vn_printf(struct vnode *vp, const char *fmt, ...) 3761 { 3762 va_list ap; 3763 char buf[256], buf2[16]; 3764 u_long flags; 3765 3766 va_start(ap, fmt); 3767 vprintf(fmt, ap); 3768 va_end(ap); 3769 printf("%p: ", (void *)vp); 3770 printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]); 3771 printf(" usecount %d, writecount %d, refcount %d", 3772 vp->v_usecount, vp->v_writecount, vp->v_holdcnt); 3773 switch (vp->v_type) { 3774 case VDIR: 3775 printf(" mountedhere %p\n", vp->v_mountedhere); 3776 break; 3777 case VCHR: 3778 printf(" rdev %p\n", vp->v_rdev); 3779 break; 3780 case VSOCK: 3781 printf(" socket %p\n", vp->v_unpcb); 3782 break; 3783 case VFIFO: 3784 printf(" fifoinfo %p\n", vp->v_fifoinfo); 3785 break; 3786 default: 3787 printf("\n"); 3788 break; 3789 } 3790 buf[0] = '\0'; 3791 buf[1] = '\0'; 3792 if (vp->v_vflag & VV_ROOT) 3793 strlcat(buf, "|VV_ROOT", sizeof(buf)); 3794 if (vp->v_vflag & VV_ISTTY) 3795 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 3796 if (vp->v_vflag & VV_NOSYNC) 3797 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 3798 if (vp->v_vflag & VV_ETERNALDEV) 3799 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 3800 if (vp->v_vflag & VV_CACHEDLABEL) 3801 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 3802 if (vp->v_vflag & VV_COPYONWRITE) 3803 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 3804 if (vp->v_vflag & VV_SYSTEM) 3805 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 3806 if (vp->v_vflag & VV_PROCDEP) 3807 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 3808 if (vp->v_vflag & VV_NOKNOTE) 3809 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 3810 if (vp->v_vflag & VV_DELETED) 3811 strlcat(buf, "|VV_DELETED", sizeof(buf)); 3812 if (vp->v_vflag & VV_MD) 3813 strlcat(buf, "|VV_MD", sizeof(buf)); 3814 if (vp->v_vflag & VV_FORCEINSMQ) 3815 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 3816 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 3817 VV_CACHEDLABEL | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 3818 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); 3819 if (flags != 0) { 3820 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 3821 strlcat(buf, buf2, sizeof(buf)); 3822 } 3823 if (vp->v_iflag & VI_MOUNT) 3824 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 3825 if (vp->v_iflag & VI_DOOMED) 3826 strlcat(buf, "|VI_DOOMED", sizeof(buf)); 3827 if (vp->v_iflag & VI_FREE) 3828 strlcat(buf, "|VI_FREE", sizeof(buf)); 3829 if (vp->v_iflag & VI_ACTIVE) 3830 strlcat(buf, "|VI_ACTIVE", sizeof(buf)); 3831 if (vp->v_iflag & VI_DOINGINACT) 3832 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 3833 if (vp->v_iflag & VI_OWEINACT) 3834 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 3835 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOOMED | VI_FREE | 3836 VI_ACTIVE | VI_DOINGINACT | VI_OWEINACT); 3837 if (flags != 0) { 3838 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 3839 strlcat(buf, buf2, sizeof(buf)); 3840 } 3841 printf(" flags (%s)\n", buf + 1); 3842 if (mtx_owned(VI_MTX(vp))) 3843 printf(" VI_LOCKed"); 3844 if (vp->v_object != NULL) 3845 printf(" v_object %p ref %d pages %d " 3846 "cleanbuf %d dirtybuf %d\n", 3847 vp->v_object, vp->v_object->ref_count, 3848 vp->v_object->resident_page_count, 3849 vp->v_bufobj.bo_clean.bv_cnt, 3850 vp->v_bufobj.bo_dirty.bv_cnt); 3851 printf(" "); 3852 lockmgr_printinfo(vp->v_vnlock); 3853 if (vp->v_data != NULL) 3854 VOP_PRINT(vp); 3855 } 3856 3857 #ifdef DDB 3858 /* 3859 * List all of the locked vnodes in the system. 3860 * Called when debugging the kernel. 3861 */ 3862 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 3863 { 3864 struct mount *mp; 3865 struct vnode *vp; 3866 3867 /* 3868 * Note: because this is DDB, we can't obey the locking semantics 3869 * for these structures, which means we could catch an inconsistent 3870 * state and dereference a nasty pointer. Not much to be done 3871 * about that. 3872 */ 3873 db_printf("Locked vnodes\n"); 3874 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3875 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3876 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 3877 vn_printf(vp, "vnode "); 3878 } 3879 } 3880 } 3881 3882 /* 3883 * Show details about the given vnode. 3884 */ 3885 DB_SHOW_COMMAND(vnode, db_show_vnode) 3886 { 3887 struct vnode *vp; 3888 3889 if (!have_addr) 3890 return; 3891 vp = (struct vnode *)addr; 3892 vn_printf(vp, "vnode "); 3893 } 3894 3895 /* 3896 * Show details about the given mount point. 3897 */ 3898 DB_SHOW_COMMAND(mount, db_show_mount) 3899 { 3900 struct mount *mp; 3901 struct vfsopt *opt; 3902 struct statfs *sp; 3903 struct vnode *vp; 3904 char buf[512]; 3905 uint64_t mflags; 3906 u_int flags; 3907 3908 if (!have_addr) { 3909 /* No address given, print short info about all mount points. */ 3910 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3911 db_printf("%p %s on %s (%s)\n", mp, 3912 mp->mnt_stat.f_mntfromname, 3913 mp->mnt_stat.f_mntonname, 3914 mp->mnt_stat.f_fstypename); 3915 if (db_pager_quit) 3916 break; 3917 } 3918 db_printf("\nMore info: show mount <addr>\n"); 3919 return; 3920 } 3921 3922 mp = (struct mount *)addr; 3923 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 3924 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 3925 3926 buf[0] = '\0'; 3927 mflags = mp->mnt_flag; 3928 #define MNT_FLAG(flag) do { \ 3929 if (mflags & (flag)) { \ 3930 if (buf[0] != '\0') \ 3931 strlcat(buf, ", ", sizeof(buf)); \ 3932 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 3933 mflags &= ~(flag); \ 3934 } \ 3935 } while (0) 3936 MNT_FLAG(MNT_RDONLY); 3937 MNT_FLAG(MNT_SYNCHRONOUS); 3938 MNT_FLAG(MNT_NOEXEC); 3939 MNT_FLAG(MNT_NOSUID); 3940 MNT_FLAG(MNT_NFS4ACLS); 3941 MNT_FLAG(MNT_UNION); 3942 MNT_FLAG(MNT_ASYNC); 3943 MNT_FLAG(MNT_SUIDDIR); 3944 MNT_FLAG(MNT_SOFTDEP); 3945 MNT_FLAG(MNT_NOSYMFOLLOW); 3946 MNT_FLAG(MNT_GJOURNAL); 3947 MNT_FLAG(MNT_MULTILABEL); 3948 MNT_FLAG(MNT_ACLS); 3949 MNT_FLAG(MNT_NOATIME); 3950 MNT_FLAG(MNT_NOCLUSTERR); 3951 MNT_FLAG(MNT_NOCLUSTERW); 3952 MNT_FLAG(MNT_SUJ); 3953 MNT_FLAG(MNT_EXRDONLY); 3954 MNT_FLAG(MNT_EXPORTED); 3955 MNT_FLAG(MNT_DEFEXPORTED); 3956 MNT_FLAG(MNT_EXPORTANON); 3957 MNT_FLAG(MNT_EXKERB); 3958 MNT_FLAG(MNT_EXPUBLIC); 3959 MNT_FLAG(MNT_LOCAL); 3960 MNT_FLAG(MNT_QUOTA); 3961 MNT_FLAG(MNT_ROOTFS); 3962 MNT_FLAG(MNT_USER); 3963 MNT_FLAG(MNT_IGNORE); 3964 MNT_FLAG(MNT_UPDATE); 3965 MNT_FLAG(MNT_DELEXPORT); 3966 MNT_FLAG(MNT_RELOAD); 3967 MNT_FLAG(MNT_FORCE); 3968 MNT_FLAG(MNT_SNAPSHOT); 3969 MNT_FLAG(MNT_BYFSID); 3970 #undef MNT_FLAG 3971 if (mflags != 0) { 3972 if (buf[0] != '\0') 3973 strlcat(buf, ", ", sizeof(buf)); 3974 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3975 "0x%016jx", mflags); 3976 } 3977 db_printf(" mnt_flag = %s\n", buf); 3978 3979 buf[0] = '\0'; 3980 flags = mp->mnt_kern_flag; 3981 #define MNT_KERN_FLAG(flag) do { \ 3982 if (flags & (flag)) { \ 3983 if (buf[0] != '\0') \ 3984 strlcat(buf, ", ", sizeof(buf)); \ 3985 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 3986 flags &= ~(flag); \ 3987 } \ 3988 } while (0) 3989 MNT_KERN_FLAG(MNTK_UNMOUNTF); 3990 MNT_KERN_FLAG(MNTK_ASYNC); 3991 MNT_KERN_FLAG(MNTK_SOFTDEP); 3992 MNT_KERN_FLAG(MNTK_DRAINING); 3993 MNT_KERN_FLAG(MNTK_REFEXPIRE); 3994 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 3995 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 3996 MNT_KERN_FLAG(MNTK_NO_IOPF); 3997 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 3998 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 3999 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 4000 MNT_KERN_FLAG(MNTK_MARKER); 4001 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4002 MNT_KERN_FLAG(MNTK_NOASYNC); 4003 MNT_KERN_FLAG(MNTK_UNMOUNT); 4004 MNT_KERN_FLAG(MNTK_MWAIT); 4005 MNT_KERN_FLAG(MNTK_SUSPEND); 4006 MNT_KERN_FLAG(MNTK_SUSPEND2); 4007 MNT_KERN_FLAG(MNTK_SUSPENDED); 4008 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4009 MNT_KERN_FLAG(MNTK_NOKNOTE); 4010 #undef MNT_KERN_FLAG 4011 if (flags != 0) { 4012 if (buf[0] != '\0') 4013 strlcat(buf, ", ", sizeof(buf)); 4014 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4015 "0x%08x", flags); 4016 } 4017 db_printf(" mnt_kern_flag = %s\n", buf); 4018 4019 db_printf(" mnt_opt = "); 4020 opt = TAILQ_FIRST(mp->mnt_opt); 4021 if (opt != NULL) { 4022 db_printf("%s", opt->name); 4023 opt = TAILQ_NEXT(opt, link); 4024 while (opt != NULL) { 4025 db_printf(", %s", opt->name); 4026 opt = TAILQ_NEXT(opt, link); 4027 } 4028 } 4029 db_printf("\n"); 4030 4031 sp = &mp->mnt_stat; 4032 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4033 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4034 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4035 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4036 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4037 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4038 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4039 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4040 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4041 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4042 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4043 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4044 4045 db_printf(" mnt_cred = { uid=%u ruid=%u", 4046 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4047 if (jailed(mp->mnt_cred)) 4048 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4049 db_printf(" }\n"); 4050 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4051 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4052 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4053 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4054 db_printf(" mnt_activevnodelistsize = %d\n", 4055 mp->mnt_activevnodelistsize); 4056 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4057 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4058 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 4059 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4060 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4061 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4062 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4063 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4064 db_printf(" mnt_secondary_accwrites = %d\n", 4065 mp->mnt_secondary_accwrites); 4066 db_printf(" mnt_gjprovider = %s\n", 4067 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4068 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4069 4070 db_printf("\n\nList of active vnodes\n"); 4071 TAILQ_FOREACH(vp, &mp->mnt_activevnodelist, v_actfreelist) { 4072 if (vp->v_type != VMARKER) { 4073 vn_printf(vp, "vnode "); 4074 if (db_pager_quit) 4075 break; 4076 } 4077 } 4078 db_printf("\n\nList of inactive vnodes\n"); 4079 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4080 if (vp->v_type != VMARKER && (vp->v_iflag & VI_ACTIVE) == 0) { 4081 vn_printf(vp, "vnode "); 4082 if (db_pager_quit) 4083 break; 4084 } 4085 } 4086 } 4087 #endif /* DDB */ 4088 4089 /* 4090 * Fill in a struct xvfsconf based on a struct vfsconf. 4091 */ 4092 static int 4093 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4094 { 4095 struct xvfsconf xvfsp; 4096 4097 bzero(&xvfsp, sizeof(xvfsp)); 4098 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4099 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4100 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4101 xvfsp.vfc_flags = vfsp->vfc_flags; 4102 /* 4103 * These are unused in userland, we keep them 4104 * to not break binary compatibility. 4105 */ 4106 xvfsp.vfc_vfsops = NULL; 4107 xvfsp.vfc_next = NULL; 4108 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4109 } 4110 4111 #ifdef COMPAT_FREEBSD32 4112 struct xvfsconf32 { 4113 uint32_t vfc_vfsops; 4114 char vfc_name[MFSNAMELEN]; 4115 int32_t vfc_typenum; 4116 int32_t vfc_refcount; 4117 int32_t vfc_flags; 4118 uint32_t vfc_next; 4119 }; 4120 4121 static int 4122 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4123 { 4124 struct xvfsconf32 xvfsp; 4125 4126 bzero(&xvfsp, sizeof(xvfsp)); 4127 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4128 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4129 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4130 xvfsp.vfc_flags = vfsp->vfc_flags; 4131 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4132 } 4133 #endif 4134 4135 /* 4136 * Top level filesystem related information gathering. 4137 */ 4138 static int 4139 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4140 { 4141 struct vfsconf *vfsp; 4142 int error; 4143 4144 error = 0; 4145 vfsconf_slock(); 4146 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4147 #ifdef COMPAT_FREEBSD32 4148 if (req->flags & SCTL_MASK32) 4149 error = vfsconf2x32(req, vfsp); 4150 else 4151 #endif 4152 error = vfsconf2x(req, vfsp); 4153 if (error) 4154 break; 4155 } 4156 vfsconf_sunlock(); 4157 return (error); 4158 } 4159 4160 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4161 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4162 "S,xvfsconf", "List of all configured filesystems"); 4163 4164 #ifndef BURN_BRIDGES 4165 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4166 4167 static int 4168 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4169 { 4170 int *name = (int *)arg1 - 1; /* XXX */ 4171 u_int namelen = arg2 + 1; /* XXX */ 4172 struct vfsconf *vfsp; 4173 4174 log(LOG_WARNING, "userland calling deprecated sysctl, " 4175 "please rebuild world\n"); 4176 4177 #if 1 || defined(COMPAT_PRELITE2) 4178 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4179 if (namelen == 1) 4180 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4181 #endif 4182 4183 switch (name[1]) { 4184 case VFS_MAXTYPENUM: 4185 if (namelen != 2) 4186 return (ENOTDIR); 4187 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4188 case VFS_CONF: 4189 if (namelen != 3) 4190 return (ENOTDIR); /* overloaded */ 4191 vfsconf_slock(); 4192 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4193 if (vfsp->vfc_typenum == name[2]) 4194 break; 4195 } 4196 vfsconf_sunlock(); 4197 if (vfsp == NULL) 4198 return (EOPNOTSUPP); 4199 #ifdef COMPAT_FREEBSD32 4200 if (req->flags & SCTL_MASK32) 4201 return (vfsconf2x32(req, vfsp)); 4202 else 4203 #endif 4204 return (vfsconf2x(req, vfsp)); 4205 } 4206 return (EOPNOTSUPP); 4207 } 4208 4209 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4210 CTLFLAG_MPSAFE, vfs_sysctl, 4211 "Generic filesystem"); 4212 4213 #if 1 || defined(COMPAT_PRELITE2) 4214 4215 static int 4216 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4217 { 4218 int error; 4219 struct vfsconf *vfsp; 4220 struct ovfsconf ovfs; 4221 4222 vfsconf_slock(); 4223 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4224 bzero(&ovfs, sizeof(ovfs)); 4225 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4226 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4227 ovfs.vfc_index = vfsp->vfc_typenum; 4228 ovfs.vfc_refcount = vfsp->vfc_refcount; 4229 ovfs.vfc_flags = vfsp->vfc_flags; 4230 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4231 if (error != 0) { 4232 vfsconf_sunlock(); 4233 return (error); 4234 } 4235 } 4236 vfsconf_sunlock(); 4237 return (0); 4238 } 4239 4240 #endif /* 1 || COMPAT_PRELITE2 */ 4241 #endif /* !BURN_BRIDGES */ 4242 4243 #define KINFO_VNODESLOP 10 4244 #ifdef notyet 4245 /* 4246 * Dump vnode list (via sysctl). 4247 */ 4248 /* ARGSUSED */ 4249 static int 4250 sysctl_vnode(SYSCTL_HANDLER_ARGS) 4251 { 4252 struct xvnode *xvn; 4253 struct mount *mp; 4254 struct vnode *vp; 4255 int error, len, n; 4256 4257 /* 4258 * Stale numvnodes access is not fatal here. 4259 */ 4260 req->lock = 0; 4261 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 4262 if (!req->oldptr) 4263 /* Make an estimate */ 4264 return (SYSCTL_OUT(req, 0, len)); 4265 4266 error = sysctl_wire_old_buffer(req, 0); 4267 if (error != 0) 4268 return (error); 4269 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 4270 n = 0; 4271 mtx_lock(&mountlist_mtx); 4272 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4273 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 4274 continue; 4275 MNT_ILOCK(mp); 4276 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4277 if (n == len) 4278 break; 4279 vref(vp); 4280 xvn[n].xv_size = sizeof *xvn; 4281 xvn[n].xv_vnode = vp; 4282 xvn[n].xv_id = 0; /* XXX compat */ 4283 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 4284 XV_COPY(usecount); 4285 XV_COPY(writecount); 4286 XV_COPY(holdcnt); 4287 XV_COPY(mount); 4288 XV_COPY(numoutput); 4289 XV_COPY(type); 4290 #undef XV_COPY 4291 xvn[n].xv_flag = vp->v_vflag; 4292 4293 switch (vp->v_type) { 4294 case VREG: 4295 case VDIR: 4296 case VLNK: 4297 break; 4298 case VBLK: 4299 case VCHR: 4300 if (vp->v_rdev == NULL) { 4301 vrele(vp); 4302 continue; 4303 } 4304 xvn[n].xv_dev = dev2udev(vp->v_rdev); 4305 break; 4306 case VSOCK: 4307 xvn[n].xv_socket = vp->v_socket; 4308 break; 4309 case VFIFO: 4310 xvn[n].xv_fifo = vp->v_fifoinfo; 4311 break; 4312 case VNON: 4313 case VBAD: 4314 default: 4315 /* shouldn't happen? */ 4316 vrele(vp); 4317 continue; 4318 } 4319 vrele(vp); 4320 ++n; 4321 } 4322 MNT_IUNLOCK(mp); 4323 mtx_lock(&mountlist_mtx); 4324 vfs_unbusy(mp); 4325 if (n == len) 4326 break; 4327 } 4328 mtx_unlock(&mountlist_mtx); 4329 4330 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 4331 free(xvn, M_TEMP); 4332 return (error); 4333 } 4334 4335 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 4336 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 4337 ""); 4338 #endif 4339 4340 static void 4341 unmount_or_warn(struct mount *mp) 4342 { 4343 int error; 4344 4345 error = dounmount(mp, MNT_FORCE, curthread); 4346 if (error != 0) { 4347 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4348 if (error == EBUSY) 4349 printf("BUSY)\n"); 4350 else 4351 printf("%d)\n", error); 4352 } 4353 } 4354 4355 /* 4356 * Unmount all filesystems. The list is traversed in reverse order 4357 * of mounting to avoid dependencies. 4358 */ 4359 void 4360 vfs_unmountall(void) 4361 { 4362 struct mount *mp, *tmp; 4363 4364 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4365 4366 /* 4367 * Since this only runs when rebooting, it is not interlocked. 4368 */ 4369 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4370 vfs_ref(mp); 4371 4372 /* 4373 * Forcibly unmounting "/dev" before "/" would prevent clean 4374 * unmount of the latter. 4375 */ 4376 if (mp == rootdevmp) 4377 continue; 4378 4379 unmount_or_warn(mp); 4380 } 4381 4382 if (rootdevmp != NULL) 4383 unmount_or_warn(rootdevmp); 4384 } 4385 4386 /* 4387 * perform msync on all vnodes under a mount point 4388 * the mount point must be locked. 4389 */ 4390 void 4391 vfs_msync(struct mount *mp, int flags) 4392 { 4393 struct vnode *vp, *mvp; 4394 struct vm_object *obj; 4395 4396 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4397 4398 vnlru_return_batch(mp); 4399 4400 MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { 4401 obj = vp->v_object; 4402 if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0 && 4403 (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) { 4404 if (!vget(vp, 4405 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, 4406 curthread)) { 4407 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */ 4408 vput(vp); 4409 continue; 4410 } 4411 4412 obj = vp->v_object; 4413 if (obj != NULL) { 4414 VM_OBJECT_WLOCK(obj); 4415 vm_object_page_clean(obj, 0, 0, 4416 flags == MNT_WAIT ? 4417 OBJPC_SYNC : OBJPC_NOSYNC); 4418 VM_OBJECT_WUNLOCK(obj); 4419 } 4420 vput(vp); 4421 } 4422 } else 4423 VI_UNLOCK(vp); 4424 } 4425 } 4426 4427 static void 4428 destroy_vpollinfo_free(struct vpollinfo *vi) 4429 { 4430 4431 knlist_destroy(&vi->vpi_selinfo.si_note); 4432 mtx_destroy(&vi->vpi_lock); 4433 uma_zfree(vnodepoll_zone, vi); 4434 } 4435 4436 static void 4437 destroy_vpollinfo(struct vpollinfo *vi) 4438 { 4439 4440 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4441 seldrain(&vi->vpi_selinfo); 4442 destroy_vpollinfo_free(vi); 4443 } 4444 4445 /* 4446 * Initialize per-vnode helper structure to hold poll-related state. 4447 */ 4448 void 4449 v_addpollinfo(struct vnode *vp) 4450 { 4451 struct vpollinfo *vi; 4452 4453 if (vp->v_pollinfo != NULL) 4454 return; 4455 vi = uma_zalloc(vnodepoll_zone, M_WAITOK | M_ZERO); 4456 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4457 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4458 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 4459 VI_LOCK(vp); 4460 if (vp->v_pollinfo != NULL) { 4461 VI_UNLOCK(vp); 4462 destroy_vpollinfo_free(vi); 4463 return; 4464 } 4465 vp->v_pollinfo = vi; 4466 VI_UNLOCK(vp); 4467 } 4468 4469 /* 4470 * Record a process's interest in events which might happen to 4471 * a vnode. Because poll uses the historic select-style interface 4472 * internally, this routine serves as both the ``check for any 4473 * pending events'' and the ``record my interest in future events'' 4474 * functions. (These are done together, while the lock is held, 4475 * to avoid race conditions.) 4476 */ 4477 int 4478 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4479 { 4480 4481 v_addpollinfo(vp); 4482 mtx_lock(&vp->v_pollinfo->vpi_lock); 4483 if (vp->v_pollinfo->vpi_revents & events) { 4484 /* 4485 * This leaves events we are not interested 4486 * in available for the other process which 4487 * which presumably had requested them 4488 * (otherwise they would never have been 4489 * recorded). 4490 */ 4491 events &= vp->v_pollinfo->vpi_revents; 4492 vp->v_pollinfo->vpi_revents &= ~events; 4493 4494 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4495 return (events); 4496 } 4497 vp->v_pollinfo->vpi_events |= events; 4498 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 4499 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4500 return (0); 4501 } 4502 4503 /* 4504 * Routine to create and manage a filesystem syncer vnode. 4505 */ 4506 #define sync_close ((int (*)(struct vop_close_args *))nullop) 4507 static int sync_fsync(struct vop_fsync_args *); 4508 static int sync_inactive(struct vop_inactive_args *); 4509 static int sync_reclaim(struct vop_reclaim_args *); 4510 4511 static struct vop_vector sync_vnodeops = { 4512 .vop_bypass = VOP_EOPNOTSUPP, 4513 .vop_close = sync_close, /* close */ 4514 .vop_fsync = sync_fsync, /* fsync */ 4515 .vop_inactive = sync_inactive, /* inactive */ 4516 .vop_need_inactive = vop_stdneed_inactive, /* need_inactive */ 4517 .vop_reclaim = sync_reclaim, /* reclaim */ 4518 .vop_lock1 = vop_stdlock, /* lock */ 4519 .vop_unlock = vop_stdunlock, /* unlock */ 4520 .vop_islocked = vop_stdislocked, /* islocked */ 4521 }; 4522 4523 /* 4524 * Create a new filesystem syncer vnode for the specified mount point. 4525 */ 4526 void 4527 vfs_allocate_syncvnode(struct mount *mp) 4528 { 4529 struct vnode *vp; 4530 struct bufobj *bo; 4531 static long start, incr, next; 4532 int error; 4533 4534 /* Allocate a new vnode */ 4535 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 4536 if (error != 0) 4537 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 4538 vp->v_type = VNON; 4539 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4540 vp->v_vflag |= VV_FORCEINSMQ; 4541 error = insmntque(vp, mp); 4542 if (error != 0) 4543 panic("vfs_allocate_syncvnode: insmntque() failed"); 4544 vp->v_vflag &= ~VV_FORCEINSMQ; 4545 VOP_UNLOCK(vp, 0); 4546 /* 4547 * Place the vnode onto the syncer worklist. We attempt to 4548 * scatter them about on the list so that they will go off 4549 * at evenly distributed times even if all the filesystems 4550 * are mounted at once. 4551 */ 4552 next += incr; 4553 if (next == 0 || next > syncer_maxdelay) { 4554 start /= 2; 4555 incr /= 2; 4556 if (start == 0) { 4557 start = syncer_maxdelay / 2; 4558 incr = syncer_maxdelay; 4559 } 4560 next = start; 4561 } 4562 bo = &vp->v_bufobj; 4563 BO_LOCK(bo); 4564 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 4565 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 4566 mtx_lock(&sync_mtx); 4567 sync_vnode_count++; 4568 if (mp->mnt_syncer == NULL) { 4569 mp->mnt_syncer = vp; 4570 vp = NULL; 4571 } 4572 mtx_unlock(&sync_mtx); 4573 BO_UNLOCK(bo); 4574 if (vp != NULL) { 4575 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4576 vgone(vp); 4577 vput(vp); 4578 } 4579 } 4580 4581 void 4582 vfs_deallocate_syncvnode(struct mount *mp) 4583 { 4584 struct vnode *vp; 4585 4586 mtx_lock(&sync_mtx); 4587 vp = mp->mnt_syncer; 4588 if (vp != NULL) 4589 mp->mnt_syncer = NULL; 4590 mtx_unlock(&sync_mtx); 4591 if (vp != NULL) 4592 vrele(vp); 4593 } 4594 4595 /* 4596 * Do a lazy sync of the filesystem. 4597 */ 4598 static int 4599 sync_fsync(struct vop_fsync_args *ap) 4600 { 4601 struct vnode *syncvp = ap->a_vp; 4602 struct mount *mp = syncvp->v_mount; 4603 int error, save; 4604 struct bufobj *bo; 4605 4606 /* 4607 * We only need to do something if this is a lazy evaluation. 4608 */ 4609 if (ap->a_waitfor != MNT_LAZY) 4610 return (0); 4611 4612 /* 4613 * Move ourselves to the back of the sync list. 4614 */ 4615 bo = &syncvp->v_bufobj; 4616 BO_LOCK(bo); 4617 vn_syncer_add_to_worklist(bo, syncdelay); 4618 BO_UNLOCK(bo); 4619 4620 /* 4621 * Walk the list of vnodes pushing all that are dirty and 4622 * not already on the sync list. 4623 */ 4624 if (vfs_busy(mp, MBF_NOWAIT) != 0) 4625 return (0); 4626 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 4627 vfs_unbusy(mp); 4628 return (0); 4629 } 4630 save = curthread_pflags_set(TDP_SYNCIO); 4631 vfs_msync(mp, MNT_NOWAIT); 4632 error = VFS_SYNC(mp, MNT_LAZY); 4633 curthread_pflags_restore(save); 4634 vn_finished_write(mp); 4635 vfs_unbusy(mp); 4636 return (error); 4637 } 4638 4639 /* 4640 * The syncer vnode is no referenced. 4641 */ 4642 static int 4643 sync_inactive(struct vop_inactive_args *ap) 4644 { 4645 4646 vgone(ap->a_vp); 4647 return (0); 4648 } 4649 4650 /* 4651 * The syncer vnode is no longer needed and is being decommissioned. 4652 * 4653 * Modifications to the worklist must be protected by sync_mtx. 4654 */ 4655 static int 4656 sync_reclaim(struct vop_reclaim_args *ap) 4657 { 4658 struct vnode *vp = ap->a_vp; 4659 struct bufobj *bo; 4660 4661 bo = &vp->v_bufobj; 4662 BO_LOCK(bo); 4663 mtx_lock(&sync_mtx); 4664 if (vp->v_mount->mnt_syncer == vp) 4665 vp->v_mount->mnt_syncer = NULL; 4666 if (bo->bo_flag & BO_ONWORKLST) { 4667 LIST_REMOVE(bo, bo_synclist); 4668 syncer_worklist_len--; 4669 sync_vnode_count--; 4670 bo->bo_flag &= ~BO_ONWORKLST; 4671 } 4672 mtx_unlock(&sync_mtx); 4673 BO_UNLOCK(bo); 4674 4675 return (0); 4676 } 4677 4678 int 4679 vn_need_pageq_flush(struct vnode *vp) 4680 { 4681 struct vm_object *obj; 4682 int need; 4683 4684 MPASS(mtx_owned(VI_MTX(vp))); 4685 need = 0; 4686 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 4687 (obj->flags & OBJ_MIGHTBEDIRTY) != 0) 4688 need = 1; 4689 return (need); 4690 } 4691 4692 /* 4693 * Check if vnode represents a disk device 4694 */ 4695 int 4696 vn_isdisk(struct vnode *vp, int *errp) 4697 { 4698 int error; 4699 4700 if (vp->v_type != VCHR) { 4701 error = ENOTBLK; 4702 goto out; 4703 } 4704 error = 0; 4705 dev_lock(); 4706 if (vp->v_rdev == NULL) 4707 error = ENXIO; 4708 else if (vp->v_rdev->si_devsw == NULL) 4709 error = ENXIO; 4710 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 4711 error = ENOTBLK; 4712 dev_unlock(); 4713 out: 4714 if (errp != NULL) 4715 *errp = error; 4716 return (error == 0); 4717 } 4718 4719 /* 4720 * Common filesystem object access control check routine. Accepts a 4721 * vnode's type, "mode", uid and gid, requested access mode, credentials, 4722 * and optional call-by-reference privused argument allowing vaccess() 4723 * to indicate to the caller whether privilege was used to satisfy the 4724 * request (obsoleted). Returns 0 on success, or an errno on failure. 4725 */ 4726 int 4727 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 4728 accmode_t accmode, struct ucred *cred, int *privused) 4729 { 4730 accmode_t dac_granted; 4731 accmode_t priv_granted; 4732 4733 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 4734 ("invalid bit in accmode")); 4735 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 4736 ("VAPPEND without VWRITE")); 4737 4738 /* 4739 * Look for a normal, non-privileged way to access the file/directory 4740 * as requested. If it exists, go with that. 4741 */ 4742 4743 if (privused != NULL) 4744 *privused = 0; 4745 4746 dac_granted = 0; 4747 4748 /* Check the owner. */ 4749 if (cred->cr_uid == file_uid) { 4750 dac_granted |= VADMIN; 4751 if (file_mode & S_IXUSR) 4752 dac_granted |= VEXEC; 4753 if (file_mode & S_IRUSR) 4754 dac_granted |= VREAD; 4755 if (file_mode & S_IWUSR) 4756 dac_granted |= (VWRITE | VAPPEND); 4757 4758 if ((accmode & dac_granted) == accmode) 4759 return (0); 4760 4761 goto privcheck; 4762 } 4763 4764 /* Otherwise, check the groups (first match) */ 4765 if (groupmember(file_gid, cred)) { 4766 if (file_mode & S_IXGRP) 4767 dac_granted |= VEXEC; 4768 if (file_mode & S_IRGRP) 4769 dac_granted |= VREAD; 4770 if (file_mode & S_IWGRP) 4771 dac_granted |= (VWRITE | VAPPEND); 4772 4773 if ((accmode & dac_granted) == accmode) 4774 return (0); 4775 4776 goto privcheck; 4777 } 4778 4779 /* Otherwise, check everyone else. */ 4780 if (file_mode & S_IXOTH) 4781 dac_granted |= VEXEC; 4782 if (file_mode & S_IROTH) 4783 dac_granted |= VREAD; 4784 if (file_mode & S_IWOTH) 4785 dac_granted |= (VWRITE | VAPPEND); 4786 if ((accmode & dac_granted) == accmode) 4787 return (0); 4788 4789 privcheck: 4790 /* 4791 * Build a privilege mask to determine if the set of privileges 4792 * satisfies the requirements when combined with the granted mask 4793 * from above. For each privilege, if the privilege is required, 4794 * bitwise or the request type onto the priv_granted mask. 4795 */ 4796 priv_granted = 0; 4797 4798 if (type == VDIR) { 4799 /* 4800 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 4801 * requests, instead of PRIV_VFS_EXEC. 4802 */ 4803 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4804 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 4805 priv_granted |= VEXEC; 4806 } else { 4807 /* 4808 * Ensure that at least one execute bit is on. Otherwise, 4809 * a privileged user will always succeed, and we don't want 4810 * this to happen unless the file really is executable. 4811 */ 4812 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4813 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 4814 !priv_check_cred(cred, PRIV_VFS_EXEC)) 4815 priv_granted |= VEXEC; 4816 } 4817 4818 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 4819 !priv_check_cred(cred, PRIV_VFS_READ)) 4820 priv_granted |= VREAD; 4821 4822 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 4823 !priv_check_cred(cred, PRIV_VFS_WRITE)) 4824 priv_granted |= (VWRITE | VAPPEND); 4825 4826 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 4827 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 4828 priv_granted |= VADMIN; 4829 4830 if ((accmode & (priv_granted | dac_granted)) == accmode) { 4831 /* XXX audit: privilege used */ 4832 if (privused != NULL) 4833 *privused = 1; 4834 return (0); 4835 } 4836 4837 return ((accmode & VADMIN) ? EPERM : EACCES); 4838 } 4839 4840 /* 4841 * Credential check based on process requesting service, and per-attribute 4842 * permissions. 4843 */ 4844 int 4845 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 4846 struct thread *td, accmode_t accmode) 4847 { 4848 4849 /* 4850 * Kernel-invoked always succeeds. 4851 */ 4852 if (cred == NOCRED) 4853 return (0); 4854 4855 /* 4856 * Do not allow privileged processes in jail to directly manipulate 4857 * system attributes. 4858 */ 4859 switch (attrnamespace) { 4860 case EXTATTR_NAMESPACE_SYSTEM: 4861 /* Potentially should be: return (EPERM); */ 4862 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 4863 case EXTATTR_NAMESPACE_USER: 4864 return (VOP_ACCESS(vp, accmode, cred, td)); 4865 default: 4866 return (EPERM); 4867 } 4868 } 4869 4870 #ifdef DEBUG_VFS_LOCKS 4871 /* 4872 * This only exists to suppress warnings from unlocked specfs accesses. It is 4873 * no longer ok to have an unlocked VFS. 4874 */ 4875 #define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \ 4876 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 4877 4878 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 4879 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 4880 "Drop into debugger on lock violation"); 4881 4882 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 4883 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 4884 0, "Check for interlock across VOPs"); 4885 4886 int vfs_badlock_print = 1; /* Print lock violations. */ 4887 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 4888 0, "Print lock violations"); 4889 4890 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 4891 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 4892 0, "Print vnode details on lock violations"); 4893 4894 #ifdef KDB 4895 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 4896 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 4897 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 4898 #endif 4899 4900 static void 4901 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 4902 { 4903 4904 #ifdef KDB 4905 if (vfs_badlock_backtrace) 4906 kdb_backtrace(); 4907 #endif 4908 if (vfs_badlock_vnode) 4909 vn_printf(vp, "vnode "); 4910 if (vfs_badlock_print) 4911 printf("%s: %p %s\n", str, (void *)vp, msg); 4912 if (vfs_badlock_ddb) 4913 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 4914 } 4915 4916 void 4917 assert_vi_locked(struct vnode *vp, const char *str) 4918 { 4919 4920 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 4921 vfs_badlock("interlock is not locked but should be", str, vp); 4922 } 4923 4924 void 4925 assert_vi_unlocked(struct vnode *vp, const char *str) 4926 { 4927 4928 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 4929 vfs_badlock("interlock is locked but should not be", str, vp); 4930 } 4931 4932 void 4933 assert_vop_locked(struct vnode *vp, const char *str) 4934 { 4935 int locked; 4936 4937 if (!IGNORE_LOCK(vp)) { 4938 locked = VOP_ISLOCKED(vp); 4939 if (locked == 0 || locked == LK_EXCLOTHER) 4940 vfs_badlock("is not locked but should be", str, vp); 4941 } 4942 } 4943 4944 void 4945 assert_vop_unlocked(struct vnode *vp, const char *str) 4946 { 4947 4948 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 4949 vfs_badlock("is locked but should not be", str, vp); 4950 } 4951 4952 void 4953 assert_vop_elocked(struct vnode *vp, const char *str) 4954 { 4955 4956 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 4957 vfs_badlock("is not exclusive locked but should be", str, vp); 4958 } 4959 #endif /* DEBUG_VFS_LOCKS */ 4960 4961 void 4962 vop_rename_fail(struct vop_rename_args *ap) 4963 { 4964 4965 if (ap->a_tvp != NULL) 4966 vput(ap->a_tvp); 4967 if (ap->a_tdvp == ap->a_tvp) 4968 vrele(ap->a_tdvp); 4969 else 4970 vput(ap->a_tdvp); 4971 vrele(ap->a_fdvp); 4972 vrele(ap->a_fvp); 4973 } 4974 4975 void 4976 vop_rename_pre(void *ap) 4977 { 4978 struct vop_rename_args *a = ap; 4979 4980 #ifdef DEBUG_VFS_LOCKS 4981 if (a->a_tvp) 4982 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 4983 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 4984 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 4985 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 4986 4987 /* Check the source (from). */ 4988 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 4989 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 4990 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 4991 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 4992 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 4993 4994 /* Check the target. */ 4995 if (a->a_tvp) 4996 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 4997 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 4998 #endif 4999 if (a->a_tdvp != a->a_fdvp) 5000 vhold(a->a_fdvp); 5001 if (a->a_tvp != a->a_fvp) 5002 vhold(a->a_fvp); 5003 vhold(a->a_tdvp); 5004 if (a->a_tvp) 5005 vhold(a->a_tvp); 5006 } 5007 5008 #ifdef DEBUG_VFS_LOCKS 5009 void 5010 vop_strategy_pre(void *ap) 5011 { 5012 struct vop_strategy_args *a; 5013 struct buf *bp; 5014 5015 a = ap; 5016 bp = a->a_bp; 5017 5018 /* 5019 * Cluster ops lock their component buffers but not the IO container. 5020 */ 5021 if ((bp->b_flags & B_CLUSTER) != 0) 5022 return; 5023 5024 if (panicstr == NULL && !BUF_ISLOCKED(bp)) { 5025 if (vfs_badlock_print) 5026 printf( 5027 "VOP_STRATEGY: bp is not locked but should be\n"); 5028 if (vfs_badlock_ddb) 5029 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5030 } 5031 } 5032 5033 void 5034 vop_lock_pre(void *ap) 5035 { 5036 struct vop_lock1_args *a = ap; 5037 5038 if ((a->a_flags & LK_INTERLOCK) == 0) 5039 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5040 else 5041 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5042 } 5043 5044 void 5045 vop_lock_post(void *ap, int rc) 5046 { 5047 struct vop_lock1_args *a = ap; 5048 5049 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5050 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5051 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5052 } 5053 5054 void 5055 vop_unlock_pre(void *ap) 5056 { 5057 struct vop_unlock_args *a = ap; 5058 5059 if (a->a_flags & LK_INTERLOCK) 5060 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK"); 5061 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 5062 } 5063 5064 void 5065 vop_unlock_post(void *ap, int rc) 5066 { 5067 struct vop_unlock_args *a = ap; 5068 5069 if (a->a_flags & LK_INTERLOCK) 5070 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK"); 5071 } 5072 5073 void 5074 vop_need_inactive_pre(void *ap) 5075 { 5076 struct vop_need_inactive_args *a = ap; 5077 5078 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5079 } 5080 5081 void 5082 vop_need_inactive_post(void *ap, int rc) 5083 { 5084 struct vop_need_inactive_args *a = ap; 5085 5086 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5087 } 5088 #endif 5089 5090 void 5091 vop_create_post(void *ap, int rc) 5092 { 5093 struct vop_create_args *a = ap; 5094 5095 if (!rc) 5096 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5097 } 5098 5099 void 5100 vop_deleteextattr_post(void *ap, int rc) 5101 { 5102 struct vop_deleteextattr_args *a = ap; 5103 5104 if (!rc) 5105 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5106 } 5107 5108 void 5109 vop_link_post(void *ap, int rc) 5110 { 5111 struct vop_link_args *a = ap; 5112 5113 if (!rc) { 5114 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); 5115 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE); 5116 } 5117 } 5118 5119 void 5120 vop_mkdir_post(void *ap, int rc) 5121 { 5122 struct vop_mkdir_args *a = ap; 5123 5124 if (!rc) 5125 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 5126 } 5127 5128 void 5129 vop_mknod_post(void *ap, int rc) 5130 { 5131 struct vop_mknod_args *a = ap; 5132 5133 if (!rc) 5134 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5135 } 5136 5137 void 5138 vop_reclaim_post(void *ap, int rc) 5139 { 5140 struct vop_reclaim_args *a = ap; 5141 5142 if (!rc) 5143 VFS_KNOTE_LOCKED(a->a_vp, NOTE_REVOKE); 5144 } 5145 5146 void 5147 vop_remove_post(void *ap, int rc) 5148 { 5149 struct vop_remove_args *a = ap; 5150 5151 if (!rc) { 5152 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5153 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 5154 } 5155 } 5156 5157 void 5158 vop_rename_post(void *ap, int rc) 5159 { 5160 struct vop_rename_args *a = ap; 5161 long hint; 5162 5163 if (!rc) { 5164 hint = NOTE_WRITE; 5165 if (a->a_fdvp == a->a_tdvp) { 5166 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 5167 hint |= NOTE_LINK; 5168 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5169 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5170 } else { 5171 hint |= NOTE_EXTEND; 5172 if (a->a_fvp->v_type == VDIR) 5173 hint |= NOTE_LINK; 5174 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5175 5176 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 5177 a->a_tvp->v_type == VDIR) 5178 hint &= ~NOTE_LINK; 5179 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5180 } 5181 5182 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 5183 if (a->a_tvp) 5184 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 5185 } 5186 if (a->a_tdvp != a->a_fdvp) 5187 vdrop(a->a_fdvp); 5188 if (a->a_tvp != a->a_fvp) 5189 vdrop(a->a_fvp); 5190 vdrop(a->a_tdvp); 5191 if (a->a_tvp) 5192 vdrop(a->a_tvp); 5193 } 5194 5195 void 5196 vop_rmdir_post(void *ap, int rc) 5197 { 5198 struct vop_rmdir_args *a = ap; 5199 5200 if (!rc) { 5201 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 5202 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 5203 } 5204 } 5205 5206 void 5207 vop_setattr_post(void *ap, int rc) 5208 { 5209 struct vop_setattr_args *a = ap; 5210 5211 if (!rc) 5212 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5213 } 5214 5215 void 5216 vop_setextattr_post(void *ap, int rc) 5217 { 5218 struct vop_setextattr_args *a = ap; 5219 5220 if (!rc) 5221 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5222 } 5223 5224 void 5225 vop_symlink_post(void *ap, int rc) 5226 { 5227 struct vop_symlink_args *a = ap; 5228 5229 if (!rc) 5230 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5231 } 5232 5233 void 5234 vop_open_post(void *ap, int rc) 5235 { 5236 struct vop_open_args *a = ap; 5237 5238 if (!rc) 5239 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 5240 } 5241 5242 void 5243 vop_close_post(void *ap, int rc) 5244 { 5245 struct vop_close_args *a = ap; 5246 5247 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 5248 (a->a_vp->v_iflag & VI_DOOMED) == 0)) { 5249 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 5250 NOTE_CLOSE_WRITE : NOTE_CLOSE); 5251 } 5252 } 5253 5254 void 5255 vop_read_post(void *ap, int rc) 5256 { 5257 struct vop_read_args *a = ap; 5258 5259 if (!rc) 5260 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5261 } 5262 5263 void 5264 vop_readdir_post(void *ap, int rc) 5265 { 5266 struct vop_readdir_args *a = ap; 5267 5268 if (!rc) 5269 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5270 } 5271 5272 static struct knlist fs_knlist; 5273 5274 static void 5275 vfs_event_init(void *arg) 5276 { 5277 knlist_init_mtx(&fs_knlist, NULL); 5278 } 5279 /* XXX - correct order? */ 5280 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 5281 5282 void 5283 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 5284 { 5285 5286 KNOTE_UNLOCKED(&fs_knlist, event); 5287 } 5288 5289 static int filt_fsattach(struct knote *kn); 5290 static void filt_fsdetach(struct knote *kn); 5291 static int filt_fsevent(struct knote *kn, long hint); 5292 5293 struct filterops fs_filtops = { 5294 .f_isfd = 0, 5295 .f_attach = filt_fsattach, 5296 .f_detach = filt_fsdetach, 5297 .f_event = filt_fsevent 5298 }; 5299 5300 static int 5301 filt_fsattach(struct knote *kn) 5302 { 5303 5304 kn->kn_flags |= EV_CLEAR; 5305 knlist_add(&fs_knlist, kn, 0); 5306 return (0); 5307 } 5308 5309 static void 5310 filt_fsdetach(struct knote *kn) 5311 { 5312 5313 knlist_remove(&fs_knlist, kn, 0); 5314 } 5315 5316 static int 5317 filt_fsevent(struct knote *kn, long hint) 5318 { 5319 5320 kn->kn_fflags |= hint; 5321 return (kn->kn_fflags != 0); 5322 } 5323 5324 static int 5325 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 5326 { 5327 struct vfsidctl vc; 5328 int error; 5329 struct mount *mp; 5330 5331 error = SYSCTL_IN(req, &vc, sizeof(vc)); 5332 if (error) 5333 return (error); 5334 if (vc.vc_vers != VFS_CTL_VERS1) 5335 return (EINVAL); 5336 mp = vfs_getvfs(&vc.vc_fsid); 5337 if (mp == NULL) 5338 return (ENOENT); 5339 /* ensure that a specific sysctl goes to the right filesystem. */ 5340 if (strcmp(vc.vc_fstypename, "*") != 0 && 5341 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 5342 vfs_rel(mp); 5343 return (EINVAL); 5344 } 5345 VCTLTOREQ(&vc, req); 5346 error = VFS_SYSCTL(mp, vc.vc_op, req); 5347 vfs_rel(mp); 5348 return (error); 5349 } 5350 5351 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_WR, 5352 NULL, 0, sysctl_vfs_ctl, "", 5353 "Sysctl by fsid"); 5354 5355 /* 5356 * Function to initialize a va_filerev field sensibly. 5357 * XXX: Wouldn't a random number make a lot more sense ?? 5358 */ 5359 u_quad_t 5360 init_va_filerev(void) 5361 { 5362 struct bintime bt; 5363 5364 getbinuptime(&bt); 5365 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 5366 } 5367 5368 static int filt_vfsread(struct knote *kn, long hint); 5369 static int filt_vfswrite(struct knote *kn, long hint); 5370 static int filt_vfsvnode(struct knote *kn, long hint); 5371 static void filt_vfsdetach(struct knote *kn); 5372 static struct filterops vfsread_filtops = { 5373 .f_isfd = 1, 5374 .f_detach = filt_vfsdetach, 5375 .f_event = filt_vfsread 5376 }; 5377 static struct filterops vfswrite_filtops = { 5378 .f_isfd = 1, 5379 .f_detach = filt_vfsdetach, 5380 .f_event = filt_vfswrite 5381 }; 5382 static struct filterops vfsvnode_filtops = { 5383 .f_isfd = 1, 5384 .f_detach = filt_vfsdetach, 5385 .f_event = filt_vfsvnode 5386 }; 5387 5388 static void 5389 vfs_knllock(void *arg) 5390 { 5391 struct vnode *vp = arg; 5392 5393 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5394 } 5395 5396 static void 5397 vfs_knlunlock(void *arg) 5398 { 5399 struct vnode *vp = arg; 5400 5401 VOP_UNLOCK(vp, 0); 5402 } 5403 5404 static void 5405 vfs_knl_assert_locked(void *arg) 5406 { 5407 #ifdef DEBUG_VFS_LOCKS 5408 struct vnode *vp = arg; 5409 5410 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 5411 #endif 5412 } 5413 5414 static void 5415 vfs_knl_assert_unlocked(void *arg) 5416 { 5417 #ifdef DEBUG_VFS_LOCKS 5418 struct vnode *vp = arg; 5419 5420 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 5421 #endif 5422 } 5423 5424 int 5425 vfs_kqfilter(struct vop_kqfilter_args *ap) 5426 { 5427 struct vnode *vp = ap->a_vp; 5428 struct knote *kn = ap->a_kn; 5429 struct knlist *knl; 5430 5431 switch (kn->kn_filter) { 5432 case EVFILT_READ: 5433 kn->kn_fop = &vfsread_filtops; 5434 break; 5435 case EVFILT_WRITE: 5436 kn->kn_fop = &vfswrite_filtops; 5437 break; 5438 case EVFILT_VNODE: 5439 kn->kn_fop = &vfsvnode_filtops; 5440 break; 5441 default: 5442 return (EINVAL); 5443 } 5444 5445 kn->kn_hook = (caddr_t)vp; 5446 5447 v_addpollinfo(vp); 5448 if (vp->v_pollinfo == NULL) 5449 return (ENOMEM); 5450 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 5451 vhold(vp); 5452 knlist_add(knl, kn, 0); 5453 5454 return (0); 5455 } 5456 5457 /* 5458 * Detach knote from vnode 5459 */ 5460 static void 5461 filt_vfsdetach(struct knote *kn) 5462 { 5463 struct vnode *vp = (struct vnode *)kn->kn_hook; 5464 5465 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 5466 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 5467 vdrop(vp); 5468 } 5469 5470 /*ARGSUSED*/ 5471 static int 5472 filt_vfsread(struct knote *kn, long hint) 5473 { 5474 struct vnode *vp = (struct vnode *)kn->kn_hook; 5475 struct vattr va; 5476 int res; 5477 5478 /* 5479 * filesystem is gone, so set the EOF flag and schedule 5480 * the knote for deletion. 5481 */ 5482 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5483 VI_LOCK(vp); 5484 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5485 VI_UNLOCK(vp); 5486 return (1); 5487 } 5488 5489 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 5490 return (0); 5491 5492 VI_LOCK(vp); 5493 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 5494 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 5495 VI_UNLOCK(vp); 5496 return (res); 5497 } 5498 5499 /*ARGSUSED*/ 5500 static int 5501 filt_vfswrite(struct knote *kn, long hint) 5502 { 5503 struct vnode *vp = (struct vnode *)kn->kn_hook; 5504 5505 VI_LOCK(vp); 5506 5507 /* 5508 * filesystem is gone, so set the EOF flag and schedule 5509 * the knote for deletion. 5510 */ 5511 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 5512 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5513 5514 kn->kn_data = 0; 5515 VI_UNLOCK(vp); 5516 return (1); 5517 } 5518 5519 static int 5520 filt_vfsvnode(struct knote *kn, long hint) 5521 { 5522 struct vnode *vp = (struct vnode *)kn->kn_hook; 5523 int res; 5524 5525 VI_LOCK(vp); 5526 if (kn->kn_sfflags & hint) 5527 kn->kn_fflags |= hint; 5528 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5529 kn->kn_flags |= EV_EOF; 5530 VI_UNLOCK(vp); 5531 return (1); 5532 } 5533 res = (kn->kn_fflags != 0); 5534 VI_UNLOCK(vp); 5535 return (res); 5536 } 5537 5538 /* 5539 * Returns whether the directory is empty or not. 5540 * If it is empty, the return value is 0; otherwise 5541 * the return value is an error value (which may 5542 * be ENOTEMPTY). 5543 */ 5544 int 5545 vfs_emptydir(struct vnode *vp) 5546 { 5547 struct uio uio; 5548 struct iovec iov; 5549 struct dirent *dirent, *dp, *endp; 5550 int error, eof; 5551 5552 error = 0; 5553 eof = 0; 5554 5555 ASSERT_VOP_LOCKED(vp, "vfs_emptydir"); 5556 5557 dirent = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK); 5558 iov.iov_base = dirent; 5559 iov.iov_len = sizeof(struct dirent); 5560 5561 uio.uio_iov = &iov; 5562 uio.uio_iovcnt = 1; 5563 uio.uio_offset = 0; 5564 uio.uio_resid = sizeof(struct dirent); 5565 uio.uio_segflg = UIO_SYSSPACE; 5566 uio.uio_rw = UIO_READ; 5567 uio.uio_td = curthread; 5568 5569 while (eof == 0 && error == 0) { 5570 error = VOP_READDIR(vp, &uio, curthread->td_ucred, &eof, 5571 NULL, NULL); 5572 if (error != 0) 5573 break; 5574 endp = (void *)((uint8_t *)dirent + 5575 sizeof(struct dirent) - uio.uio_resid); 5576 for (dp = dirent; dp < endp; 5577 dp = (void *)((uint8_t *)dp + GENERIC_DIRSIZ(dp))) { 5578 if (dp->d_type == DT_WHT) 5579 continue; 5580 if (dp->d_namlen == 0) 5581 continue; 5582 if (dp->d_type != DT_DIR && 5583 dp->d_type != DT_UNKNOWN) { 5584 error = ENOTEMPTY; 5585 break; 5586 } 5587 if (dp->d_namlen > 2) { 5588 error = ENOTEMPTY; 5589 break; 5590 } 5591 if (dp->d_namlen == 1 && 5592 dp->d_name[0] != '.') { 5593 error = ENOTEMPTY; 5594 break; 5595 } 5596 if (dp->d_namlen == 2 && 5597 dp->d_name[1] != '.') { 5598 error = ENOTEMPTY; 5599 break; 5600 } 5601 uio.uio_resid = sizeof(struct dirent); 5602 } 5603 } 5604 free(dirent, M_TEMP); 5605 return (error); 5606 } 5607 5608 int 5609 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 5610 { 5611 int error; 5612 5613 if (dp->d_reclen > ap->a_uio->uio_resid) 5614 return (ENAMETOOLONG); 5615 error = uiomove(dp, dp->d_reclen, ap->a_uio); 5616 if (error) { 5617 if (ap->a_ncookies != NULL) { 5618 if (ap->a_cookies != NULL) 5619 free(ap->a_cookies, M_TEMP); 5620 ap->a_cookies = NULL; 5621 *ap->a_ncookies = 0; 5622 } 5623 return (error); 5624 } 5625 if (ap->a_ncookies == NULL) 5626 return (0); 5627 5628 KASSERT(ap->a_cookies, 5629 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 5630 5631 *ap->a_cookies = realloc(*ap->a_cookies, 5632 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 5633 (*ap->a_cookies)[*ap->a_ncookies] = off; 5634 *ap->a_ncookies += 1; 5635 return (0); 5636 } 5637 5638 /* 5639 * Mark for update the access time of the file if the filesystem 5640 * supports VOP_MARKATIME. This functionality is used by execve and 5641 * mmap, so we want to avoid the I/O implied by directly setting 5642 * va_atime for the sake of efficiency. 5643 */ 5644 void 5645 vfs_mark_atime(struct vnode *vp, struct ucred *cred) 5646 { 5647 struct mount *mp; 5648 5649 mp = vp->v_mount; 5650 ASSERT_VOP_LOCKED(vp, "vfs_mark_atime"); 5651 if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) 5652 (void)VOP_MARKATIME(vp); 5653 } 5654 5655 /* 5656 * The purpose of this routine is to remove granularity from accmode_t, 5657 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 5658 * VADMIN and VAPPEND. 5659 * 5660 * If it returns 0, the caller is supposed to continue with the usual 5661 * access checks using 'accmode' as modified by this routine. If it 5662 * returns nonzero value, the caller is supposed to return that value 5663 * as errno. 5664 * 5665 * Note that after this routine runs, accmode may be zero. 5666 */ 5667 int 5668 vfs_unixify_accmode(accmode_t *accmode) 5669 { 5670 /* 5671 * There is no way to specify explicit "deny" rule using 5672 * file mode or POSIX.1e ACLs. 5673 */ 5674 if (*accmode & VEXPLICIT_DENY) { 5675 *accmode = 0; 5676 return (0); 5677 } 5678 5679 /* 5680 * None of these can be translated into usual access bits. 5681 * Also, the common case for NFSv4 ACLs is to not contain 5682 * either of these bits. Caller should check for VWRITE 5683 * on the containing directory instead. 5684 */ 5685 if (*accmode & (VDELETE_CHILD | VDELETE)) 5686 return (EPERM); 5687 5688 if (*accmode & VADMIN_PERMS) { 5689 *accmode &= ~VADMIN_PERMS; 5690 *accmode |= VADMIN; 5691 } 5692 5693 /* 5694 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 5695 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 5696 */ 5697 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 5698 5699 return (0); 5700 } 5701 5702 /* 5703 * Clear out a doomed vnode (if any) and replace it with a new one as long 5704 * as the fs is not being unmounted. Return the root vnode to the caller. 5705 */ 5706 static int __noinline 5707 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 5708 { 5709 struct vnode *vp; 5710 int error; 5711 5712 restart: 5713 if (mp->mnt_rootvnode != NULL) { 5714 MNT_ILOCK(mp); 5715 vp = mp->mnt_rootvnode; 5716 if (vp != NULL) { 5717 if ((vp->v_iflag & VI_DOOMED) == 0) { 5718 vrefact(vp); 5719 MNT_IUNLOCK(mp); 5720 error = vn_lock(vp, flags); 5721 if (error == 0) { 5722 *vpp = vp; 5723 return (0); 5724 } 5725 vrele(vp); 5726 goto restart; 5727 } 5728 /* 5729 * Clear the old one. 5730 */ 5731 mp->mnt_rootvnode = NULL; 5732 } 5733 MNT_IUNLOCK(mp); 5734 if (vp != NULL) { 5735 /* 5736 * Paired with a fence in vfs_op_thread_exit(). 5737 */ 5738 atomic_thread_fence_acq(); 5739 vfs_op_barrier_wait(mp); 5740 vrele(vp); 5741 } 5742 } 5743 error = VFS_CACHEDROOT(mp, flags, vpp); 5744 if (error != 0) 5745 return (error); 5746 if (mp->mnt_vfs_ops == 0) { 5747 MNT_ILOCK(mp); 5748 if (mp->mnt_vfs_ops != 0) { 5749 MNT_IUNLOCK(mp); 5750 return (0); 5751 } 5752 if (mp->mnt_rootvnode == NULL) { 5753 vrefact(*vpp); 5754 mp->mnt_rootvnode = *vpp; 5755 } else { 5756 if (mp->mnt_rootvnode != *vpp) { 5757 if ((mp->mnt_rootvnode->v_iflag & VI_DOOMED) == 0) { 5758 panic("%s: mismatch between vnode returned " 5759 " by VFS_CACHEDROOT and the one cached " 5760 " (%p != %p)", 5761 __func__, *vpp, mp->mnt_rootvnode); 5762 } 5763 } 5764 } 5765 MNT_IUNLOCK(mp); 5766 } 5767 return (0); 5768 } 5769 5770 int 5771 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 5772 { 5773 struct vnode *vp; 5774 int error; 5775 5776 if (!vfs_op_thread_enter(mp)) 5777 return (vfs_cache_root_fallback(mp, flags, vpp)); 5778 vp = (struct vnode *)atomic_load_ptr(&mp->mnt_rootvnode); 5779 if (vp == NULL || (vp->v_iflag & VI_DOOMED)) { 5780 vfs_op_thread_exit(mp); 5781 return (vfs_cache_root_fallback(mp, flags, vpp)); 5782 } 5783 vrefact(vp); 5784 vfs_op_thread_exit(mp); 5785 error = vn_lock(vp, flags); 5786 if (error != 0) { 5787 vrele(vp); 5788 return (vfs_cache_root_fallback(mp, flags, vpp)); 5789 } 5790 *vpp = vp; 5791 return (0); 5792 } 5793 5794 struct vnode * 5795 vfs_cache_root_clear(struct mount *mp) 5796 { 5797 struct vnode *vp; 5798 5799 /* 5800 * ops > 0 guarantees there is nobody who can see this vnode 5801 */ 5802 MPASS(mp->mnt_vfs_ops > 0); 5803 vp = mp->mnt_rootvnode; 5804 mp->mnt_rootvnode = NULL; 5805 return (vp); 5806 } 5807 5808 void 5809 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 5810 { 5811 5812 MPASS(mp->mnt_vfs_ops > 0); 5813 vrefact(vp); 5814 mp->mnt_rootvnode = vp; 5815 } 5816 5817 /* 5818 * These are helper functions for filesystems to traverse all 5819 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 5820 * 5821 * This interface replaces MNT_VNODE_FOREACH. 5822 */ 5823 5824 MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 5825 5826 struct vnode * 5827 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 5828 { 5829 struct vnode *vp; 5830 5831 if (should_yield()) 5832 kern_yield(PRI_USER); 5833 MNT_ILOCK(mp); 5834 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5835 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 5836 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 5837 /* Allow a racy peek at VI_DOOMED to save a lock acquisition. */ 5838 if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0) 5839 continue; 5840 VI_LOCK(vp); 5841 if ((vp->v_iflag & VI_DOOMED) != 0) { 5842 VI_UNLOCK(vp); 5843 continue; 5844 } 5845 break; 5846 } 5847 if (vp == NULL) { 5848 __mnt_vnode_markerfree_all(mvp, mp); 5849 /* MNT_IUNLOCK(mp); -- done in above function */ 5850 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 5851 return (NULL); 5852 } 5853 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 5854 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 5855 MNT_IUNLOCK(mp); 5856 return (vp); 5857 } 5858 5859 struct vnode * 5860 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 5861 { 5862 struct vnode *vp; 5863 5864 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 5865 MNT_ILOCK(mp); 5866 MNT_REF(mp); 5867 (*mvp)->v_mount = mp; 5868 (*mvp)->v_type = VMARKER; 5869 5870 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 5871 /* Allow a racy peek at VI_DOOMED to save a lock acquisition. */ 5872 if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0) 5873 continue; 5874 VI_LOCK(vp); 5875 if ((vp->v_iflag & VI_DOOMED) != 0) { 5876 VI_UNLOCK(vp); 5877 continue; 5878 } 5879 break; 5880 } 5881 if (vp == NULL) { 5882 MNT_REL(mp); 5883 MNT_IUNLOCK(mp); 5884 free(*mvp, M_VNODE_MARKER); 5885 *mvp = NULL; 5886 return (NULL); 5887 } 5888 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 5889 MNT_IUNLOCK(mp); 5890 return (vp); 5891 } 5892 5893 void 5894 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 5895 { 5896 5897 if (*mvp == NULL) { 5898 MNT_IUNLOCK(mp); 5899 return; 5900 } 5901 5902 mtx_assert(MNT_MTX(mp), MA_OWNED); 5903 5904 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5905 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 5906 MNT_REL(mp); 5907 MNT_IUNLOCK(mp); 5908 free(*mvp, M_VNODE_MARKER); 5909 *mvp = NULL; 5910 } 5911 5912 /* 5913 * These are helper functions for filesystems to traverse their 5914 * active vnodes. See MNT_VNODE_FOREACH_ACTIVE() in sys/mount.h 5915 */ 5916 static void 5917 mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 5918 { 5919 5920 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5921 5922 MNT_ILOCK(mp); 5923 MNT_REL(mp); 5924 MNT_IUNLOCK(mp); 5925 free(*mvp, M_VNODE_MARKER); 5926 *mvp = NULL; 5927 } 5928 5929 /* 5930 * Relock the mp mount vnode list lock with the vp vnode interlock in the 5931 * conventional lock order during mnt_vnode_next_active iteration. 5932 * 5933 * On entry, the mount vnode list lock is held and the vnode interlock is not. 5934 * The list lock is dropped and reacquired. On success, both locks are held. 5935 * On failure, the mount vnode list lock is held but the vnode interlock is 5936 * not, and the procedure may have yielded. 5937 */ 5938 static bool 5939 mnt_vnode_next_active_relock(struct vnode *mvp, struct mount *mp, 5940 struct vnode *vp) 5941 { 5942 const struct vnode *tmp; 5943 bool held, ret; 5944 5945 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 5946 TAILQ_NEXT(mvp, v_actfreelist) != NULL, mvp, 5947 ("%s: bad marker", __func__)); 5948 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 5949 ("%s: inappropriate vnode", __func__)); 5950 ASSERT_VI_UNLOCKED(vp, __func__); 5951 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 5952 5953 ret = false; 5954 5955 TAILQ_REMOVE(&mp->mnt_activevnodelist, mvp, v_actfreelist); 5956 TAILQ_INSERT_BEFORE(vp, mvp, v_actfreelist); 5957 5958 /* 5959 * Use a hold to prevent vp from disappearing while the mount vnode 5960 * list lock is dropped and reacquired. Normally a hold would be 5961 * acquired with vhold(), but that might try to acquire the vnode 5962 * interlock, which would be a LOR with the mount vnode list lock. 5963 */ 5964 held = refcount_acquire_if_not_zero(&vp->v_holdcnt); 5965 mtx_unlock(&mp->mnt_listmtx); 5966 if (!held) 5967 goto abort; 5968 VI_LOCK(vp); 5969 if (!refcount_release_if_not_last(&vp->v_holdcnt)) { 5970 vdropl(vp); 5971 goto abort; 5972 } 5973 mtx_lock(&mp->mnt_listmtx); 5974 5975 /* 5976 * Determine whether the vnode is still the next one after the marker, 5977 * excepting any other markers. If the vnode has not been doomed by 5978 * vgone() then the hold should have ensured that it remained on the 5979 * active list. If it has been doomed but is still on the active list, 5980 * don't abort, but rather skip over it (avoid spinning on doomed 5981 * vnodes). 5982 */ 5983 tmp = mvp; 5984 do { 5985 tmp = TAILQ_NEXT(tmp, v_actfreelist); 5986 } while (tmp != NULL && tmp->v_type == VMARKER); 5987 if (tmp != vp) { 5988 mtx_unlock(&mp->mnt_listmtx); 5989 VI_UNLOCK(vp); 5990 goto abort; 5991 } 5992 5993 ret = true; 5994 goto out; 5995 abort: 5996 maybe_yield(); 5997 mtx_lock(&mp->mnt_listmtx); 5998 out: 5999 if (ret) 6000 ASSERT_VI_LOCKED(vp, __func__); 6001 else 6002 ASSERT_VI_UNLOCKED(vp, __func__); 6003 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6004 return (ret); 6005 } 6006 6007 static struct vnode * 6008 mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 6009 { 6010 struct vnode *vp, *nvp; 6011 6012 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6013 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6014 restart: 6015 vp = TAILQ_NEXT(*mvp, v_actfreelist); 6016 while (vp != NULL) { 6017 if (vp->v_type == VMARKER) { 6018 vp = TAILQ_NEXT(vp, v_actfreelist); 6019 continue; 6020 } 6021 /* 6022 * Try-lock because this is the wrong lock order. If that does 6023 * not succeed, drop the mount vnode list lock and try to 6024 * reacquire it and the vnode interlock in the right order. 6025 */ 6026 if (!VI_TRYLOCK(vp) && 6027 !mnt_vnode_next_active_relock(*mvp, mp, vp)) 6028 goto restart; 6029 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6030 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6031 ("alien vnode on the active list %p %p", vp, mp)); 6032 if (vp->v_mount == mp && (vp->v_iflag & VI_DOOMED) == 0) 6033 break; 6034 nvp = TAILQ_NEXT(vp, v_actfreelist); 6035 VI_UNLOCK(vp); 6036 vp = nvp; 6037 } 6038 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 6039 6040 /* Check if we are done */ 6041 if (vp == NULL) { 6042 mtx_unlock(&mp->mnt_listmtx); 6043 mnt_vnode_markerfree_active(mvp, mp); 6044 return (NULL); 6045 } 6046 TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist); 6047 mtx_unlock(&mp->mnt_listmtx); 6048 ASSERT_VI_LOCKED(vp, "active iter"); 6049 KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp)); 6050 return (vp); 6051 } 6052 6053 struct vnode * 6054 __mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 6055 { 6056 6057 if (should_yield()) 6058 kern_yield(PRI_USER); 6059 mtx_lock(&mp->mnt_listmtx); 6060 return (mnt_vnode_next_active(mvp, mp)); 6061 } 6062 6063 struct vnode * 6064 __mnt_vnode_first_active(struct vnode **mvp, struct mount *mp) 6065 { 6066 struct vnode *vp; 6067 6068 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 6069 MNT_ILOCK(mp); 6070 MNT_REF(mp); 6071 MNT_IUNLOCK(mp); 6072 (*mvp)->v_type = VMARKER; 6073 (*mvp)->v_mount = mp; 6074 6075 mtx_lock(&mp->mnt_listmtx); 6076 vp = TAILQ_FIRST(&mp->mnt_activevnodelist); 6077 if (vp == NULL) { 6078 mtx_unlock(&mp->mnt_listmtx); 6079 mnt_vnode_markerfree_active(mvp, mp); 6080 return (NULL); 6081 } 6082 TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist); 6083 return (mnt_vnode_next_active(mvp, mp)); 6084 } 6085 6086 void 6087 __mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 6088 { 6089 6090 if (*mvp == NULL) 6091 return; 6092 6093 mtx_lock(&mp->mnt_listmtx); 6094 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 6095 mtx_unlock(&mp->mnt_listmtx); 6096 mnt_vnode_markerfree_active(mvp, mp); 6097 } 6098