1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 /* 38 * External virtual filesystem routines 39 */ 40 41 #include <sys/cdefs.h> 42 #include "opt_ddb.h" 43 #include "opt_watchdog.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/asan.h> 48 #include <sys/bio.h> 49 #include <sys/buf.h> 50 #include <sys/capsicum.h> 51 #include <sys/condvar.h> 52 #include <sys/conf.h> 53 #include <sys/counter.h> 54 #include <sys/dirent.h> 55 #include <sys/event.h> 56 #include <sys/eventhandler.h> 57 #include <sys/extattr.h> 58 #include <sys/file.h> 59 #include <sys/fcntl.h> 60 #include <sys/jail.h> 61 #include <sys/kdb.h> 62 #include <sys/kernel.h> 63 #include <sys/kthread.h> 64 #include <sys/ktr.h> 65 #include <sys/limits.h> 66 #include <sys/lockf.h> 67 #include <sys/malloc.h> 68 #include <sys/mount.h> 69 #include <sys/namei.h> 70 #include <sys/pctrie.h> 71 #include <sys/priv.h> 72 #include <sys/reboot.h> 73 #include <sys/refcount.h> 74 #include <sys/rwlock.h> 75 #include <sys/sched.h> 76 #include <sys/sleepqueue.h> 77 #include <sys/smr.h> 78 #include <sys/smp.h> 79 #include <sys/stat.h> 80 #include <sys/sysctl.h> 81 #include <sys/syslog.h> 82 #include <sys/vmmeter.h> 83 #include <sys/vnode.h> 84 #include <sys/watchdog.h> 85 86 #include <machine/stdarg.h> 87 88 #include <security/mac/mac_framework.h> 89 90 #include <vm/vm.h> 91 #include <vm/vm_object.h> 92 #include <vm/vm_extern.h> 93 #include <vm/pmap.h> 94 #include <vm/vm_map.h> 95 #include <vm/vm_page.h> 96 #include <vm/vm_kern.h> 97 #include <vm/vnode_pager.h> 98 #include <vm/uma.h> 99 100 #if defined(DEBUG_VFS_LOCKS) && (!defined(INVARIANTS) || !defined(WITNESS)) 101 #error DEBUG_VFS_LOCKS requires INVARIANTS and WITNESS 102 #endif 103 104 #ifdef DDB 105 #include <ddb/ddb.h> 106 #endif 107 108 static void delmntque(struct vnode *vp); 109 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 110 int slpflag, int slptimeo); 111 static void syncer_shutdown(void *arg, int howto); 112 static int vtryrecycle(struct vnode *vp, bool isvnlru); 113 static void v_init_counters(struct vnode *); 114 static void vn_seqc_init(struct vnode *); 115 static void vn_seqc_write_end_free(struct vnode *vp); 116 static void vgonel(struct vnode *); 117 static bool vhold_recycle_free(struct vnode *); 118 static void vdropl_recycle(struct vnode *vp); 119 static void vdrop_recycle(struct vnode *vp); 120 static void vfs_knllock(void *arg); 121 static void vfs_knlunlock(void *arg); 122 static void vfs_knl_assert_lock(void *arg, int what); 123 static void destroy_vpollinfo(struct vpollinfo *vi); 124 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 125 daddr_t startlbn, daddr_t endlbn); 126 static void vnlru_recalc(void); 127 128 static SYSCTL_NODE(_vfs, OID_AUTO, vnode, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 129 "vnode configuration and statistics"); 130 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, param, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 131 "vnode configuration"); 132 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 133 "vnode statistics"); 134 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, vnlru, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 135 "vnode recycling"); 136 137 /* 138 * Number of vnodes in existence. Increased whenever getnewvnode() 139 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 140 */ 141 static u_long __exclusive_cache_line numvnodes; 142 143 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 144 "Number of vnodes in existence (legacy)"); 145 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, count, CTLFLAG_RD, &numvnodes, 0, 146 "Number of vnodes in existence"); 147 148 static counter_u64_t vnodes_created; 149 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 150 "Number of vnodes created by getnewvnode (legacy)"); 151 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, created, CTLFLAG_RD, &vnodes_created, 152 "Number of vnodes created by getnewvnode"); 153 154 /* 155 * Conversion tables for conversion from vnode types to inode formats 156 * and back. 157 */ 158 __enum_uint8(vtype) iftovt_tab[16] = { 159 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 160 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 161 }; 162 int vttoif_tab[10] = { 163 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 164 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 165 }; 166 167 /* 168 * List of allocates vnodes in the system. 169 */ 170 static TAILQ_HEAD(freelst, vnode) vnode_list; 171 static struct vnode *vnode_list_free_marker; 172 static struct vnode *vnode_list_reclaim_marker; 173 174 /* 175 * "Free" vnode target. Free vnodes are rarely completely free, but are 176 * just ones that are cheap to recycle. Usually they are for files which 177 * have been stat'd but not read; these usually have inode and namecache 178 * data attached to them. This target is the preferred minimum size of a 179 * sub-cache consisting mostly of such files. The system balances the size 180 * of this sub-cache with its complement to try to prevent either from 181 * thrashing while the other is relatively inactive. The targets express 182 * a preference for the best balance. 183 * 184 * "Above" this target there are 2 further targets (watermarks) related 185 * to recyling of free vnodes. In the best-operating case, the cache is 186 * exactly full, the free list has size between vlowat and vhiwat above the 187 * free target, and recycling from it and normal use maintains this state. 188 * Sometimes the free list is below vlowat or even empty, but this state 189 * is even better for immediate use provided the cache is not full. 190 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 191 * ones) to reach one of these states. The watermarks are currently hard- 192 * coded as 4% and 9% of the available space higher. These and the default 193 * of 25% for wantfreevnodes are too large if the memory size is large. 194 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 195 * whenever vnlru_proc() becomes active. 196 */ 197 static long wantfreevnodes; 198 static long __exclusive_cache_line freevnodes; 199 static long freevnodes_old; 200 201 static u_long recycles_count; 202 SYSCTL_ULONG(_vfs, OID_AUTO, recycles, CTLFLAG_RD | CTLFLAG_STATS, &recycles_count, 0, 203 "Number of vnodes recycled to meet vnode cache targets (legacy)"); 204 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, recycles, CTLFLAG_RD | CTLFLAG_STATS, 205 &recycles_count, 0, 206 "Number of vnodes recycled to meet vnode cache targets"); 207 208 static u_long recycles_free_count; 209 SYSCTL_ULONG(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD | CTLFLAG_STATS, 210 &recycles_free_count, 0, 211 "Number of free vnodes recycled to meet vnode cache targets (legacy)"); 212 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, recycles_free, CTLFLAG_RD | CTLFLAG_STATS, 213 &recycles_free_count, 0, 214 "Number of free vnodes recycled to meet vnode cache targets"); 215 216 static counter_u64_t direct_recycles_free_count; 217 SYSCTL_COUNTER_U64(_vfs_vnode_vnlru, OID_AUTO, direct_recycles_free, CTLFLAG_RD, 218 &direct_recycles_free_count, 219 "Number of free vnodes recycled by vn_alloc callers to meet vnode cache targets"); 220 221 static counter_u64_t vnode_skipped_requeues; 222 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, skipped_requeues, CTLFLAG_RD, &vnode_skipped_requeues, 223 "Number of times LRU requeue was skipped due to lock contention"); 224 225 static __read_mostly bool vnode_can_skip_requeue; 226 SYSCTL_BOOL(_vfs_vnode_param, OID_AUTO, can_skip_requeue, CTLFLAG_RW, 227 &vnode_can_skip_requeue, 0, "Is LRU requeue skippable"); 228 229 static u_long deferred_inact; 230 SYSCTL_ULONG(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, 231 &deferred_inact, 0, "Number of times inactive processing was deferred"); 232 233 /* To keep more than one thread at a time from running vfs_getnewfsid */ 234 static struct mtx mntid_mtx; 235 236 /* 237 * Lock for any access to the following: 238 * vnode_list 239 * numvnodes 240 * freevnodes 241 */ 242 static struct mtx __exclusive_cache_line vnode_list_mtx; 243 244 /* Publicly exported FS */ 245 struct nfs_public nfs_pub; 246 247 static uma_zone_t buf_trie_zone; 248 static smr_t buf_trie_smr; 249 250 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 251 static uma_zone_t vnode_zone; 252 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll"); 253 254 __read_frequently smr_t vfs_smr; 255 256 /* 257 * The workitem queue. 258 * 259 * It is useful to delay writes of file data and filesystem metadata 260 * for tens of seconds so that quickly created and deleted files need 261 * not waste disk bandwidth being created and removed. To realize this, 262 * we append vnodes to a "workitem" queue. When running with a soft 263 * updates implementation, most pending metadata dependencies should 264 * not wait for more than a few seconds. Thus, mounted on block devices 265 * are delayed only about a half the time that file data is delayed. 266 * Similarly, directory updates are more critical, so are only delayed 267 * about a third the time that file data is delayed. Thus, there are 268 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 269 * one each second (driven off the filesystem syncer process). The 270 * syncer_delayno variable indicates the next queue that is to be processed. 271 * Items that need to be processed soon are placed in this queue: 272 * 273 * syncer_workitem_pending[syncer_delayno] 274 * 275 * A delay of fifteen seconds is done by placing the request fifteen 276 * entries later in the queue: 277 * 278 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 279 * 280 */ 281 static int syncer_delayno; 282 static long syncer_mask; 283 LIST_HEAD(synclist, bufobj); 284 static struct synclist *syncer_workitem_pending; 285 /* 286 * The sync_mtx protects: 287 * bo->bo_synclist 288 * sync_vnode_count 289 * syncer_delayno 290 * syncer_state 291 * syncer_workitem_pending 292 * syncer_worklist_len 293 * rushjob 294 */ 295 static struct mtx sync_mtx; 296 static struct cv sync_wakeup; 297 298 #define SYNCER_MAXDELAY 32 299 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 300 static int syncdelay = 30; /* max time to delay syncing data */ 301 static int filedelay = 30; /* time to delay syncing files */ 302 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 303 "Time to delay syncing files (in seconds)"); 304 static int dirdelay = 29; /* time to delay syncing directories */ 305 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 306 "Time to delay syncing directories (in seconds)"); 307 static int metadelay = 28; /* time to delay syncing metadata */ 308 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 309 "Time to delay syncing metadata (in seconds)"); 310 static int rushjob; /* number of slots to run ASAP */ 311 static int stat_rush_requests; /* number of times I/O speeded up */ 312 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 313 "Number of times I/O speeded up (rush requests)"); 314 315 #define VDBATCH_SIZE 8 316 struct vdbatch { 317 u_int index; 318 struct mtx lock; 319 struct vnode *tab[VDBATCH_SIZE]; 320 }; 321 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 322 323 static void vdbatch_dequeue(struct vnode *vp); 324 325 /* 326 * The syncer will require at least SYNCER_MAXDELAY iterations to shutdown; 327 * we probably don't want to pause for the whole second each time. 328 */ 329 #define SYNCER_SHUTDOWN_SPEEDUP 32 330 static int sync_vnode_count; 331 static int syncer_worklist_len; 332 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 333 syncer_state; 334 335 /* Target for maximum number of vnodes. */ 336 u_long desiredvnodes; 337 static u_long gapvnodes; /* gap between wanted and desired */ 338 static u_long vhiwat; /* enough extras after expansion */ 339 static u_long vlowat; /* minimal extras before expansion */ 340 static bool vstir; /* nonzero to stir non-free vnodes */ 341 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 342 343 static u_long vnlru_read_freevnodes(void); 344 345 /* 346 * Note that no attempt is made to sanitize these parameters. 347 */ 348 static int 349 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 350 { 351 u_long val; 352 int error; 353 354 val = desiredvnodes; 355 error = sysctl_handle_long(oidp, &val, 0, req); 356 if (error != 0 || req->newptr == NULL) 357 return (error); 358 359 if (val == desiredvnodes) 360 return (0); 361 mtx_lock(&vnode_list_mtx); 362 desiredvnodes = val; 363 wantfreevnodes = desiredvnodes / 4; 364 vnlru_recalc(); 365 mtx_unlock(&vnode_list_mtx); 366 /* 367 * XXX There is no protection against multiple threads changing 368 * desiredvnodes at the same time. Locking above only helps vnlru and 369 * getnewvnode. 370 */ 371 vfs_hash_changesize(desiredvnodes); 372 cache_changesize(desiredvnodes); 373 return (0); 374 } 375 376 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 377 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 378 "LU", "Target for maximum number of vnodes (legacy)"); 379 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, limit, 380 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 381 "LU", "Target for maximum number of vnodes"); 382 383 static int 384 sysctl_freevnodes(SYSCTL_HANDLER_ARGS) 385 { 386 u_long rfreevnodes; 387 388 rfreevnodes = vnlru_read_freevnodes(); 389 return (sysctl_handle_long(oidp, &rfreevnodes, 0, req)); 390 } 391 392 SYSCTL_PROC(_vfs, OID_AUTO, freevnodes, 393 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes, 394 "LU", "Number of \"free\" vnodes (legacy)"); 395 SYSCTL_PROC(_vfs_vnode_stats, OID_AUTO, free, 396 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes, 397 "LU", "Number of \"free\" vnodes"); 398 399 static int 400 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 401 { 402 u_long val; 403 int error; 404 405 val = wantfreevnodes; 406 error = sysctl_handle_long(oidp, &val, 0, req); 407 if (error != 0 || req->newptr == NULL) 408 return (error); 409 410 if (val == wantfreevnodes) 411 return (0); 412 mtx_lock(&vnode_list_mtx); 413 wantfreevnodes = val; 414 vnlru_recalc(); 415 mtx_unlock(&vnode_list_mtx); 416 return (0); 417 } 418 419 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 420 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 421 "LU", "Target for minimum number of \"free\" vnodes (legacy)"); 422 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, wantfree, 423 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 424 "LU", "Target for minimum number of \"free\" vnodes"); 425 426 static int vnlru_nowhere; 427 SYSCTL_INT(_vfs_vnode_vnlru, OID_AUTO, failed_runs, CTLFLAG_RD | CTLFLAG_STATS, 428 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 429 430 static int 431 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 432 { 433 struct vnode *vp; 434 struct nameidata nd; 435 char *buf; 436 unsigned long ndflags; 437 int error; 438 439 if (req->newptr == NULL) 440 return (EINVAL); 441 if (req->newlen >= PATH_MAX) 442 return (E2BIG); 443 444 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 445 error = SYSCTL_IN(req, buf, req->newlen); 446 if (error != 0) 447 goto out; 448 449 buf[req->newlen] = '\0'; 450 451 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1; 452 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf); 453 if ((error = namei(&nd)) != 0) 454 goto out; 455 vp = nd.ni_vp; 456 457 if (VN_IS_DOOMED(vp)) { 458 /* 459 * This vnode is being recycled. Return != 0 to let the caller 460 * know that the sysctl had no effect. Return EAGAIN because a 461 * subsequent call will likely succeed (since namei will create 462 * a new vnode if necessary) 463 */ 464 error = EAGAIN; 465 goto putvnode; 466 } 467 468 vgone(vp); 469 putvnode: 470 vput(vp); 471 NDFREE_PNBUF(&nd); 472 out: 473 free(buf, M_TEMP); 474 return (error); 475 } 476 477 static int 478 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 479 { 480 struct thread *td = curthread; 481 struct vnode *vp; 482 struct file *fp; 483 int error; 484 int fd; 485 486 if (req->newptr == NULL) 487 return (EBADF); 488 489 error = sysctl_handle_int(oidp, &fd, 0, req); 490 if (error != 0) 491 return (error); 492 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 493 if (error != 0) 494 return (error); 495 vp = fp->f_vnode; 496 497 error = vn_lock(vp, LK_EXCLUSIVE); 498 if (error != 0) 499 goto drop; 500 501 vgone(vp); 502 VOP_UNLOCK(vp); 503 drop: 504 fdrop(fp, td); 505 return (error); 506 } 507 508 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 509 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 510 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 511 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 512 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 513 sysctl_ftry_reclaim_vnode, "I", 514 "Try to reclaim a vnode by its file descriptor"); 515 516 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 517 #define vnsz2log 8 518 #ifndef DEBUG_LOCKS 519 _Static_assert(sizeof(struct vnode) >= 1UL << vnsz2log && 520 sizeof(struct vnode) < 1UL << (vnsz2log + 1), 521 "vnsz2log needs to be updated"); 522 #endif 523 524 /* 525 * Support for the bufobj clean & dirty pctrie. 526 */ 527 static void * 528 buf_trie_alloc(struct pctrie *ptree) 529 { 530 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 531 } 532 533 static void 534 buf_trie_free(struct pctrie *ptree, void *node) 535 { 536 uma_zfree_smr(buf_trie_zone, node); 537 } 538 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 539 buf_trie_smr); 540 541 /* 542 * Lookup the next element greater than or equal to lblkno, accounting for the 543 * fact that, for pctries, negative values are greater than nonnegative ones. 544 */ 545 static struct buf * 546 buf_lookup_ge(struct bufv *bv, daddr_t lblkno) 547 { 548 struct buf *bp; 549 550 bp = BUF_PCTRIE_LOOKUP_GE(&bv->bv_root, lblkno); 551 if (bp == NULL && lblkno < 0) 552 bp = BUF_PCTRIE_LOOKUP_GE(&bv->bv_root, 0); 553 if (bp != NULL && bp->b_lblkno < lblkno) 554 bp = NULL; 555 return (bp); 556 } 557 558 /* 559 * Insert bp, and find the next element smaller than bp, accounting for the fact 560 * that, for pctries, negative values are greater than nonnegative ones. 561 */ 562 static int 563 buf_insert_lookup_le(struct bufv *bv, struct buf *bp, struct buf **n) 564 { 565 int error; 566 567 error = BUF_PCTRIE_INSERT_LOOKUP_LE(&bv->bv_root, bp, n); 568 if (error != EEXIST) { 569 if (*n == NULL && bp->b_lblkno >= 0) 570 *n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, ~0L); 571 if (*n != NULL && (*n)->b_lblkno >= bp->b_lblkno) 572 *n = NULL; 573 } 574 return (error); 575 } 576 577 /* 578 * Initialize the vnode management data structures. 579 * 580 * Reevaluate the following cap on the number of vnodes after the physical 581 * memory size exceeds 512GB. In the limit, as the physical memory size 582 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 583 */ 584 #ifndef MAXVNODES_MAX 585 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 586 #endif 587 588 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 589 590 static struct vnode * 591 vn_alloc_marker(struct mount *mp) 592 { 593 struct vnode *vp; 594 595 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 596 vp->v_type = VMARKER; 597 vp->v_mount = mp; 598 599 return (vp); 600 } 601 602 static void 603 vn_free_marker(struct vnode *vp) 604 { 605 606 MPASS(vp->v_type == VMARKER); 607 free(vp, M_VNODE_MARKER); 608 } 609 610 #ifdef KASAN 611 static int 612 vnode_ctor(void *mem, int size, void *arg __unused, int flags __unused) 613 { 614 kasan_mark(mem, size, roundup2(size, UMA_ALIGN_PTR + 1), 0); 615 return (0); 616 } 617 618 static void 619 vnode_dtor(void *mem, int size, void *arg __unused) 620 { 621 size_t end1, end2, off1, off2; 622 623 _Static_assert(offsetof(struct vnode, v_vnodelist) < 624 offsetof(struct vnode, v_dbatchcpu), 625 "KASAN marks require updating"); 626 627 off1 = offsetof(struct vnode, v_vnodelist); 628 off2 = offsetof(struct vnode, v_dbatchcpu); 629 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist); 630 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu); 631 632 /* 633 * Access to the v_vnodelist and v_dbatchcpu fields are permitted even 634 * after the vnode has been freed. Try to get some KASAN coverage by 635 * marking everything except those two fields as invalid. Because 636 * KASAN's tracking is not byte-granular, any preceding fields sharing 637 * the same 8-byte aligned word must also be marked valid. 638 */ 639 640 /* Handle the area from the start until v_vnodelist... */ 641 off1 = rounddown2(off1, KASAN_SHADOW_SCALE); 642 kasan_mark(mem, off1, off1, KASAN_UMA_FREED); 643 644 /* ... then the area between v_vnodelist and v_dbatchcpu ... */ 645 off1 = roundup2(end1, KASAN_SHADOW_SCALE); 646 off2 = rounddown2(off2, KASAN_SHADOW_SCALE); 647 if (off2 > off1) 648 kasan_mark((void *)((char *)mem + off1), off2 - off1, 649 off2 - off1, KASAN_UMA_FREED); 650 651 /* ... and finally the area from v_dbatchcpu to the end. */ 652 off2 = roundup2(end2, KASAN_SHADOW_SCALE); 653 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2, 654 KASAN_UMA_FREED); 655 } 656 #endif /* KASAN */ 657 658 /* 659 * Initialize a vnode as it first enters the zone. 660 */ 661 static int 662 vnode_init(void *mem, int size, int flags) 663 { 664 struct vnode *vp; 665 666 vp = mem; 667 bzero(vp, size); 668 /* 669 * Setup locks. 670 */ 671 vp->v_vnlock = &vp->v_lock; 672 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 673 /* 674 * By default, don't allow shared locks unless filesystems opt-in. 675 */ 676 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 677 LK_NOSHARE | LK_IS_VNODE); 678 /* 679 * Initialize bufobj. 680 */ 681 bufobj_init(&vp->v_bufobj, vp); 682 /* 683 * Initialize namecache. 684 */ 685 cache_vnode_init(vp); 686 /* 687 * Initialize rangelocks. 688 */ 689 rangelock_init(&vp->v_rl); 690 691 vp->v_dbatchcpu = NOCPU; 692 693 vp->v_state = VSTATE_DEAD; 694 695 /* 696 * Check vhold_recycle_free for an explanation. 697 */ 698 vp->v_holdcnt = VHOLD_NO_SMR; 699 vp->v_type = VNON; 700 mtx_lock(&vnode_list_mtx); 701 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 702 mtx_unlock(&vnode_list_mtx); 703 return (0); 704 } 705 706 /* 707 * Free a vnode when it is cleared from the zone. 708 */ 709 static void 710 vnode_fini(void *mem, int size) 711 { 712 struct vnode *vp; 713 struct bufobj *bo; 714 715 vp = mem; 716 vdbatch_dequeue(vp); 717 mtx_lock(&vnode_list_mtx); 718 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 719 mtx_unlock(&vnode_list_mtx); 720 rangelock_destroy(&vp->v_rl); 721 lockdestroy(vp->v_vnlock); 722 mtx_destroy(&vp->v_interlock); 723 bo = &vp->v_bufobj; 724 rw_destroy(BO_LOCKPTR(bo)); 725 726 kasan_mark(mem, size, size, 0); 727 } 728 729 /* 730 * Provide the size of NFS nclnode and NFS fh for calculation of the 731 * vnode memory consumption. The size is specified directly to 732 * eliminate dependency on NFS-private header. 733 * 734 * Other filesystems may use bigger or smaller (like UFS and ZFS) 735 * private inode data, but the NFS-based estimation is ample enough. 736 * Still, we care about differences in the size between 64- and 32-bit 737 * platforms. 738 * 739 * Namecache structure size is heuristically 740 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 741 */ 742 #ifdef _LP64 743 #define NFS_NCLNODE_SZ (528 + 64) 744 #define NC_SZ 148 745 #else 746 #define NFS_NCLNODE_SZ (360 + 32) 747 #define NC_SZ 92 748 #endif 749 750 static void 751 vntblinit(void *dummy __unused) 752 { 753 struct vdbatch *vd; 754 uma_ctor ctor; 755 uma_dtor dtor; 756 int cpu, physvnodes, virtvnodes; 757 758 /* 759 * Desiredvnodes is a function of the physical memory size and the 760 * kernel's heap size. Generally speaking, it scales with the 761 * physical memory size. The ratio of desiredvnodes to the physical 762 * memory size is 1:16 until desiredvnodes exceeds 98,304. 763 * Thereafter, the 764 * marginal ratio of desiredvnodes to the physical memory size is 765 * 1:64. However, desiredvnodes is limited by the kernel's heap 766 * size. The memory required by desiredvnodes vnodes and vm objects 767 * must not exceed 1/10th of the kernel's heap size. 768 */ 769 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 770 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 771 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 772 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 773 desiredvnodes = min(physvnodes, virtvnodes); 774 if (desiredvnodes > MAXVNODES_MAX) { 775 if (bootverbose) 776 printf("Reducing kern.maxvnodes %lu -> %lu\n", 777 desiredvnodes, MAXVNODES_MAX); 778 desiredvnodes = MAXVNODES_MAX; 779 } 780 wantfreevnodes = desiredvnodes / 4; 781 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 782 TAILQ_INIT(&vnode_list); 783 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 784 /* 785 * The lock is taken to appease WITNESS. 786 */ 787 mtx_lock(&vnode_list_mtx); 788 vnlru_recalc(); 789 mtx_unlock(&vnode_list_mtx); 790 vnode_list_free_marker = vn_alloc_marker(NULL); 791 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 792 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 793 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 794 795 #ifdef KASAN 796 ctor = vnode_ctor; 797 dtor = vnode_dtor; 798 #else 799 ctor = NULL; 800 dtor = NULL; 801 #endif 802 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor, 803 vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_NOKASAN); 804 uma_zone_set_smr(vnode_zone, vfs_smr); 805 806 /* 807 * Preallocate enough nodes to support one-per buf so that 808 * we can not fail an insert. reassignbuf() callers can not 809 * tolerate the insertion failure. 810 */ 811 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 812 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 813 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 814 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 815 uma_prealloc(buf_trie_zone, nbuf); 816 817 vnodes_created = counter_u64_alloc(M_WAITOK); 818 direct_recycles_free_count = counter_u64_alloc(M_WAITOK); 819 vnode_skipped_requeues = counter_u64_alloc(M_WAITOK); 820 821 /* 822 * Initialize the filesystem syncer. 823 */ 824 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 825 &syncer_mask); 826 syncer_maxdelay = syncer_mask + 1; 827 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 828 cv_init(&sync_wakeup, "syncer"); 829 830 CPU_FOREACH(cpu) { 831 vd = DPCPU_ID_PTR((cpu), vd); 832 bzero(vd, sizeof(*vd)); 833 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 834 } 835 } 836 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 837 838 /* 839 * Mark a mount point as busy. Used to synchronize access and to delay 840 * unmounting. Eventually, mountlist_mtx is not released on failure. 841 * 842 * vfs_busy() is a custom lock, it can block the caller. 843 * vfs_busy() only sleeps if the unmount is active on the mount point. 844 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 845 * vnode belonging to mp. 846 * 847 * Lookup uses vfs_busy() to traverse mount points. 848 * root fs var fs 849 * / vnode lock A / vnode lock (/var) D 850 * /var vnode lock B /log vnode lock(/var/log) E 851 * vfs_busy lock C vfs_busy lock F 852 * 853 * Within each file system, the lock order is C->A->B and F->D->E. 854 * 855 * When traversing across mounts, the system follows that lock order: 856 * 857 * C->A->B 858 * | 859 * +->F->D->E 860 * 861 * The lookup() process for namei("/var") illustrates the process: 862 * 1. VOP_LOOKUP() obtains B while A is held 863 * 2. vfs_busy() obtains a shared lock on F while A and B are held 864 * 3. vput() releases lock on B 865 * 4. vput() releases lock on A 866 * 5. VFS_ROOT() obtains lock on D while shared lock on F is held 867 * 6. vfs_unbusy() releases shared lock on F 868 * 7. vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 869 * Attempt to lock A (instead of vp_crossmp) while D is held would 870 * violate the global order, causing deadlocks. 871 * 872 * dounmount() locks B while F is drained. Note that for stacked 873 * filesystems, D and B in the example above may be the same lock, 874 * which introdues potential lock order reversal deadlock between 875 * dounmount() and step 5 above. These filesystems may avoid the LOR 876 * by setting VV_CROSSLOCK on the covered vnode so that lock B will 877 * remain held until after step 5. 878 */ 879 int 880 vfs_busy(struct mount *mp, int flags) 881 { 882 struct mount_pcpu *mpcpu; 883 884 MPASS((flags & ~MBF_MASK) == 0); 885 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 886 887 if (vfs_op_thread_enter(mp, mpcpu)) { 888 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 889 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 890 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 891 vfs_mp_count_add_pcpu(mpcpu, ref, 1); 892 vfs_mp_count_add_pcpu(mpcpu, lockref, 1); 893 vfs_op_thread_exit(mp, mpcpu); 894 if (flags & MBF_MNTLSTLOCK) 895 mtx_unlock(&mountlist_mtx); 896 return (0); 897 } 898 899 MNT_ILOCK(mp); 900 vfs_assert_mount_counters(mp); 901 MNT_REF(mp); 902 /* 903 * If mount point is currently being unmounted, sleep until the 904 * mount point fate is decided. If thread doing the unmounting fails, 905 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 906 * that this mount point has survived the unmount attempt and vfs_busy 907 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 908 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 909 * about to be really destroyed. vfs_busy needs to release its 910 * reference on the mount point in this case and return with ENOENT, 911 * telling the caller the mount it tried to busy is no longer valid. 912 */ 913 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 914 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), 915 ("%s: non-empty upper mount list with pending unmount", 916 __func__)); 917 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 918 MNT_REL(mp); 919 MNT_IUNLOCK(mp); 920 CTR1(KTR_VFS, "%s: failed busying before sleeping", 921 __func__); 922 return (ENOENT); 923 } 924 if (flags & MBF_MNTLSTLOCK) 925 mtx_unlock(&mountlist_mtx); 926 mp->mnt_kern_flag |= MNTK_MWAIT; 927 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 928 if (flags & MBF_MNTLSTLOCK) 929 mtx_lock(&mountlist_mtx); 930 MNT_ILOCK(mp); 931 } 932 if (flags & MBF_MNTLSTLOCK) 933 mtx_unlock(&mountlist_mtx); 934 mp->mnt_lockref++; 935 MNT_IUNLOCK(mp); 936 return (0); 937 } 938 939 /* 940 * Free a busy filesystem. 941 */ 942 void 943 vfs_unbusy(struct mount *mp) 944 { 945 struct mount_pcpu *mpcpu; 946 int c; 947 948 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 949 950 if (vfs_op_thread_enter(mp, mpcpu)) { 951 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 952 vfs_mp_count_sub_pcpu(mpcpu, lockref, 1); 953 vfs_mp_count_sub_pcpu(mpcpu, ref, 1); 954 vfs_op_thread_exit(mp, mpcpu); 955 return; 956 } 957 958 MNT_ILOCK(mp); 959 vfs_assert_mount_counters(mp); 960 MNT_REL(mp); 961 c = --mp->mnt_lockref; 962 if (mp->mnt_vfs_ops == 0) { 963 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 964 MNT_IUNLOCK(mp); 965 return; 966 } 967 if (c < 0) 968 vfs_dump_mount_counters(mp); 969 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 970 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 971 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 972 mp->mnt_kern_flag &= ~MNTK_DRAINING; 973 wakeup(&mp->mnt_lockref); 974 } 975 MNT_IUNLOCK(mp); 976 } 977 978 /* 979 * Lookup a mount point by filesystem identifier. 980 */ 981 struct mount * 982 vfs_getvfs(fsid_t *fsid) 983 { 984 struct mount *mp; 985 986 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 987 mtx_lock(&mountlist_mtx); 988 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 989 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 990 vfs_ref(mp); 991 mtx_unlock(&mountlist_mtx); 992 return (mp); 993 } 994 } 995 mtx_unlock(&mountlist_mtx); 996 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 997 return ((struct mount *) 0); 998 } 999 1000 /* 1001 * Lookup a mount point by filesystem identifier, busying it before 1002 * returning. 1003 * 1004 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 1005 * cache for popular filesystem identifiers. The cache is lockess, using 1006 * the fact that struct mount's are never freed. In worst case we may 1007 * get pointer to unmounted or even different filesystem, so we have to 1008 * check what we got, and go slow way if so. 1009 */ 1010 struct mount * 1011 vfs_busyfs(fsid_t *fsid) 1012 { 1013 #define FSID_CACHE_SIZE 256 1014 typedef struct mount * volatile vmp_t; 1015 static vmp_t cache[FSID_CACHE_SIZE]; 1016 struct mount *mp; 1017 int error; 1018 uint32_t hash; 1019 1020 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 1021 hash = fsid->val[0] ^ fsid->val[1]; 1022 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 1023 mp = cache[hash]; 1024 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 1025 goto slow; 1026 if (vfs_busy(mp, 0) != 0) { 1027 cache[hash] = NULL; 1028 goto slow; 1029 } 1030 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 1031 return (mp); 1032 else 1033 vfs_unbusy(mp); 1034 1035 slow: 1036 mtx_lock(&mountlist_mtx); 1037 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 1038 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 1039 error = vfs_busy(mp, MBF_MNTLSTLOCK); 1040 if (error) { 1041 cache[hash] = NULL; 1042 mtx_unlock(&mountlist_mtx); 1043 return (NULL); 1044 } 1045 cache[hash] = mp; 1046 return (mp); 1047 } 1048 } 1049 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 1050 mtx_unlock(&mountlist_mtx); 1051 return ((struct mount *) 0); 1052 } 1053 1054 /* 1055 * Check if a user can access privileged mount options. 1056 */ 1057 int 1058 vfs_suser(struct mount *mp, struct thread *td) 1059 { 1060 int error; 1061 1062 if (jailed(td->td_ucred)) { 1063 /* 1064 * If the jail of the calling thread lacks permission for 1065 * this type of file system, deny immediately. 1066 */ 1067 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 1068 return (EPERM); 1069 1070 /* 1071 * If the file system was mounted outside the jail of the 1072 * calling thread, deny immediately. 1073 */ 1074 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 1075 return (EPERM); 1076 } 1077 1078 /* 1079 * If file system supports delegated administration, we don't check 1080 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 1081 * by the file system itself. 1082 * If this is not the user that did original mount, we check for 1083 * the PRIV_VFS_MOUNT_OWNER privilege. 1084 */ 1085 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 1086 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 1087 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 1088 return (error); 1089 } 1090 return (0); 1091 } 1092 1093 /* 1094 * Get a new unique fsid. Try to make its val[0] unique, since this value 1095 * will be used to create fake device numbers for stat(). Also try (but 1096 * not so hard) make its val[0] unique mod 2^16, since some emulators only 1097 * support 16-bit device numbers. We end up with unique val[0]'s for the 1098 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 1099 * 1100 * Keep in mind that several mounts may be running in parallel. Starting 1101 * the search one past where the previous search terminated is both a 1102 * micro-optimization and a defense against returning the same fsid to 1103 * different mounts. 1104 */ 1105 void 1106 vfs_getnewfsid(struct mount *mp) 1107 { 1108 static uint16_t mntid_base; 1109 struct mount *nmp; 1110 fsid_t tfsid; 1111 int mtype; 1112 1113 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 1114 mtx_lock(&mntid_mtx); 1115 mtype = mp->mnt_vfc->vfc_typenum; 1116 tfsid.val[1] = mtype; 1117 mtype = (mtype & 0xFF) << 24; 1118 for (;;) { 1119 tfsid.val[0] = makedev(255, 1120 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 1121 mntid_base++; 1122 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 1123 break; 1124 vfs_rel(nmp); 1125 } 1126 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 1127 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 1128 mtx_unlock(&mntid_mtx); 1129 } 1130 1131 /* 1132 * Knob to control the precision of file timestamps: 1133 * 1134 * 0 = seconds only; nanoseconds zeroed. 1135 * 1 = seconds and nanoseconds, accurate within 1/HZ. 1136 * 2 = seconds and nanoseconds, truncated to microseconds. 1137 * >=3 = seconds and nanoseconds, maximum precision. 1138 */ 1139 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 1140 1141 static int timestamp_precision = TSP_USEC; 1142 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 1143 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 1144 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 1145 "3+: sec + ns (max. precision))"); 1146 1147 /* 1148 * Get a current timestamp. 1149 */ 1150 void 1151 vfs_timestamp(struct timespec *tsp) 1152 { 1153 struct timeval tv; 1154 1155 switch (timestamp_precision) { 1156 case TSP_SEC: 1157 tsp->tv_sec = time_second; 1158 tsp->tv_nsec = 0; 1159 break; 1160 case TSP_HZ: 1161 getnanotime(tsp); 1162 break; 1163 case TSP_USEC: 1164 microtime(&tv); 1165 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1166 break; 1167 case TSP_NSEC: 1168 default: 1169 nanotime(tsp); 1170 break; 1171 } 1172 } 1173 1174 /* 1175 * Set vnode attributes to VNOVAL 1176 */ 1177 void 1178 vattr_null(struct vattr *vap) 1179 { 1180 1181 vap->va_type = VNON; 1182 vap->va_size = VNOVAL; 1183 vap->va_bytes = VNOVAL; 1184 vap->va_mode = VNOVAL; 1185 vap->va_nlink = VNOVAL; 1186 vap->va_uid = VNOVAL; 1187 vap->va_gid = VNOVAL; 1188 vap->va_fsid = VNOVAL; 1189 vap->va_fileid = VNOVAL; 1190 vap->va_blocksize = VNOVAL; 1191 vap->va_rdev = VNOVAL; 1192 vap->va_atime.tv_sec = VNOVAL; 1193 vap->va_atime.tv_nsec = VNOVAL; 1194 vap->va_mtime.tv_sec = VNOVAL; 1195 vap->va_mtime.tv_nsec = VNOVAL; 1196 vap->va_ctime.tv_sec = VNOVAL; 1197 vap->va_ctime.tv_nsec = VNOVAL; 1198 vap->va_birthtime.tv_sec = VNOVAL; 1199 vap->va_birthtime.tv_nsec = VNOVAL; 1200 vap->va_flags = VNOVAL; 1201 vap->va_gen = VNOVAL; 1202 vap->va_vaflags = 0; 1203 } 1204 1205 /* 1206 * Try to reduce the total number of vnodes. 1207 * 1208 * This routine (and its user) are buggy in at least the following ways: 1209 * - all parameters were picked years ago when RAM sizes were significantly 1210 * smaller 1211 * - it can pick vnodes based on pages used by the vm object, but filesystems 1212 * like ZFS don't use it making the pick broken 1213 * - since ZFS has its own aging policy it gets partially combated by this one 1214 * - a dedicated method should be provided for filesystems to let them decide 1215 * whether the vnode should be recycled 1216 * 1217 * This routine is called when we have too many vnodes. It attempts 1218 * to free <count> vnodes and will potentially free vnodes that still 1219 * have VM backing store (VM backing store is typically the cause 1220 * of a vnode blowout so we want to do this). Therefore, this operation 1221 * is not considered cheap. 1222 * 1223 * A number of conditions may prevent a vnode from being reclaimed. 1224 * the buffer cache may have references on the vnode, a directory 1225 * vnode may still have references due to the namei cache representing 1226 * underlying files, or the vnode may be in active use. It is not 1227 * desirable to reuse such vnodes. These conditions may cause the 1228 * number of vnodes to reach some minimum value regardless of what 1229 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1230 * 1231 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1232 * entries if this argument is strue 1233 * @param trigger Only reclaim vnodes with fewer than this many resident 1234 * pages. 1235 * @param target How many vnodes to reclaim. 1236 * @return The number of vnodes that were reclaimed. 1237 */ 1238 static int 1239 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1240 { 1241 struct vnode *vp, *mvp; 1242 struct mount *mp; 1243 struct vm_object *object; 1244 u_long done; 1245 bool retried; 1246 1247 mtx_assert(&vnode_list_mtx, MA_OWNED); 1248 1249 retried = false; 1250 done = 0; 1251 1252 mvp = vnode_list_reclaim_marker; 1253 restart: 1254 vp = mvp; 1255 while (done < target) { 1256 vp = TAILQ_NEXT(vp, v_vnodelist); 1257 if (__predict_false(vp == NULL)) 1258 break; 1259 1260 if (__predict_false(vp->v_type == VMARKER)) 1261 continue; 1262 1263 /* 1264 * If it's been deconstructed already, it's still 1265 * referenced, or it exceeds the trigger, skip it. 1266 * Also skip free vnodes. We are trying to make space 1267 * for more free vnodes, not reduce their count. 1268 */ 1269 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1270 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1271 goto next_iter; 1272 1273 if (vp->v_type == VBAD || vp->v_type == VNON) 1274 goto next_iter; 1275 1276 object = atomic_load_ptr(&vp->v_object); 1277 if (object == NULL || object->resident_page_count > trigger) { 1278 goto next_iter; 1279 } 1280 1281 /* 1282 * Handle races against vnode allocation. Filesystems lock the 1283 * vnode some time after it gets returned from getnewvnode, 1284 * despite type and hold count being manipulated earlier. 1285 * Resorting to checking v_mount restores guarantees present 1286 * before the global list was reworked to contain all vnodes. 1287 */ 1288 if (!VI_TRYLOCK(vp)) 1289 goto next_iter; 1290 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1291 VI_UNLOCK(vp); 1292 goto next_iter; 1293 } 1294 if (vp->v_mount == NULL) { 1295 VI_UNLOCK(vp); 1296 goto next_iter; 1297 } 1298 vholdl(vp); 1299 VI_UNLOCK(vp); 1300 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1301 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1302 mtx_unlock(&vnode_list_mtx); 1303 1304 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1305 vdrop_recycle(vp); 1306 goto next_iter_unlocked; 1307 } 1308 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1309 vdrop_recycle(vp); 1310 vn_finished_write(mp); 1311 goto next_iter_unlocked; 1312 } 1313 1314 VI_LOCK(vp); 1315 if (vp->v_usecount > 0 || 1316 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1317 (vp->v_object != NULL && vp->v_object->handle == vp && 1318 vp->v_object->resident_page_count > trigger)) { 1319 VOP_UNLOCK(vp); 1320 vdropl_recycle(vp); 1321 vn_finished_write(mp); 1322 goto next_iter_unlocked; 1323 } 1324 recycles_count++; 1325 vgonel(vp); 1326 VOP_UNLOCK(vp); 1327 vdropl_recycle(vp); 1328 vn_finished_write(mp); 1329 done++; 1330 next_iter_unlocked: 1331 maybe_yield(); 1332 mtx_lock(&vnode_list_mtx); 1333 goto restart; 1334 next_iter: 1335 MPASS(vp->v_type != VMARKER); 1336 if (!should_yield()) 1337 continue; 1338 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1339 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1340 mtx_unlock(&vnode_list_mtx); 1341 kern_yield(PRI_USER); 1342 mtx_lock(&vnode_list_mtx); 1343 goto restart; 1344 } 1345 if (done == 0 && !retried) { 1346 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1347 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1348 retried = true; 1349 goto restart; 1350 } 1351 return (done); 1352 } 1353 1354 static int max_free_per_call = 10000; 1355 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_free_per_call, 0, 1356 "limit on vnode free requests per call to the vnlru_free routine (legacy)"); 1357 SYSCTL_INT(_vfs_vnode_vnlru, OID_AUTO, max_free_per_call, CTLFLAG_RW, 1358 &max_free_per_call, 0, 1359 "limit on vnode free requests per call to the vnlru_free routine"); 1360 1361 /* 1362 * Attempt to recycle requested amount of free vnodes. 1363 */ 1364 static int 1365 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp, bool isvnlru) 1366 { 1367 struct vnode *vp; 1368 struct mount *mp; 1369 int ocount; 1370 bool retried; 1371 1372 mtx_assert(&vnode_list_mtx, MA_OWNED); 1373 if (count > max_free_per_call) 1374 count = max_free_per_call; 1375 if (count == 0) { 1376 mtx_unlock(&vnode_list_mtx); 1377 return (0); 1378 } 1379 ocount = count; 1380 retried = false; 1381 vp = mvp; 1382 for (;;) { 1383 vp = TAILQ_NEXT(vp, v_vnodelist); 1384 if (__predict_false(vp == NULL)) { 1385 /* 1386 * The free vnode marker can be past eligible vnodes: 1387 * 1. if vdbatch_process trylock failed 1388 * 2. if vtryrecycle failed 1389 * 1390 * If so, start the scan from scratch. 1391 */ 1392 if (!retried && vnlru_read_freevnodes() > 0) { 1393 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1394 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1395 vp = mvp; 1396 retried = true; 1397 continue; 1398 } 1399 1400 /* 1401 * Give up 1402 */ 1403 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1404 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1405 mtx_unlock(&vnode_list_mtx); 1406 break; 1407 } 1408 if (__predict_false(vp->v_type == VMARKER)) 1409 continue; 1410 if (vp->v_holdcnt > 0) 1411 continue; 1412 /* 1413 * Don't recycle if our vnode is from different type 1414 * of mount point. Note that mp is type-safe, the 1415 * check does not reach unmapped address even if 1416 * vnode is reclaimed. 1417 */ 1418 if (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1419 mp->mnt_op != mnt_op) { 1420 continue; 1421 } 1422 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1423 continue; 1424 } 1425 if (!vhold_recycle_free(vp)) 1426 continue; 1427 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1428 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1429 mtx_unlock(&vnode_list_mtx); 1430 /* 1431 * FIXME: ignores the return value, meaning it may be nothing 1432 * got recycled but it claims otherwise to the caller. 1433 * 1434 * Originally the value started being ignored in 2005 with 1435 * 114a1006a8204aa156e1f9ad6476cdff89cada7f . 1436 * 1437 * Respecting the value can run into significant stalls if most 1438 * vnodes belong to one file system and it has writes 1439 * suspended. In presence of many threads and millions of 1440 * vnodes they keep contending on the vnode_list_mtx lock only 1441 * to find vnodes they can't recycle. 1442 * 1443 * The solution would be to pre-check if the vnode is likely to 1444 * be recycle-able, but it needs to happen with the 1445 * vnode_list_mtx lock held. This runs into a problem where 1446 * VOP_GETWRITEMOUNT (currently needed to find out about if 1447 * writes are frozen) can take locks which LOR against it. 1448 * 1449 * Check nullfs for one example (null_getwritemount). 1450 */ 1451 vtryrecycle(vp, isvnlru); 1452 count--; 1453 if (count == 0) { 1454 break; 1455 } 1456 mtx_lock(&vnode_list_mtx); 1457 vp = mvp; 1458 } 1459 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1460 return (ocount - count); 1461 } 1462 1463 /* 1464 * XXX: returns without vnode_list_mtx locked! 1465 */ 1466 static int 1467 vnlru_free_locked_direct(int count) 1468 { 1469 int ret; 1470 1471 mtx_assert(&vnode_list_mtx, MA_OWNED); 1472 ret = vnlru_free_impl(count, NULL, vnode_list_free_marker, false); 1473 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1474 return (ret); 1475 } 1476 1477 static int 1478 vnlru_free_locked_vnlru(int count) 1479 { 1480 int ret; 1481 1482 mtx_assert(&vnode_list_mtx, MA_OWNED); 1483 ret = vnlru_free_impl(count, NULL, vnode_list_free_marker, true); 1484 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1485 return (ret); 1486 } 1487 1488 static int 1489 vnlru_free_vnlru(int count) 1490 { 1491 1492 mtx_lock(&vnode_list_mtx); 1493 return (vnlru_free_locked_vnlru(count)); 1494 } 1495 1496 void 1497 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp) 1498 { 1499 1500 MPASS(mnt_op != NULL); 1501 MPASS(mvp != NULL); 1502 VNPASS(mvp->v_type == VMARKER, mvp); 1503 mtx_lock(&vnode_list_mtx); 1504 vnlru_free_impl(count, mnt_op, mvp, true); 1505 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1506 } 1507 1508 struct vnode * 1509 vnlru_alloc_marker(void) 1510 { 1511 struct vnode *mvp; 1512 1513 mvp = vn_alloc_marker(NULL); 1514 mtx_lock(&vnode_list_mtx); 1515 TAILQ_INSERT_BEFORE(vnode_list_free_marker, mvp, v_vnodelist); 1516 mtx_unlock(&vnode_list_mtx); 1517 return (mvp); 1518 } 1519 1520 void 1521 vnlru_free_marker(struct vnode *mvp) 1522 { 1523 mtx_lock(&vnode_list_mtx); 1524 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1525 mtx_unlock(&vnode_list_mtx); 1526 vn_free_marker(mvp); 1527 } 1528 1529 static void 1530 vnlru_recalc(void) 1531 { 1532 1533 mtx_assert(&vnode_list_mtx, MA_OWNED); 1534 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1535 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1536 vlowat = vhiwat / 2; 1537 } 1538 1539 /* 1540 * Attempt to recycle vnodes in a context that is always safe to block. 1541 * Calling vlrurecycle() from the bowels of filesystem code has some 1542 * interesting deadlock problems. 1543 */ 1544 static struct proc *vnlruproc; 1545 static int vnlruproc_sig; 1546 static u_long vnlruproc_kicks; 1547 1548 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, kicks, CTLFLAG_RD, &vnlruproc_kicks, 0, 1549 "Number of times vnlru awakened due to vnode shortage"); 1550 1551 #define VNLRU_COUNT_SLOP 100 1552 1553 /* 1554 * The main freevnodes counter is only updated when a counter local to CPU 1555 * diverges from 0 by more than VNLRU_FREEVNODES_SLOP. CPUs are conditionally 1556 * walked to compute a more accurate total. 1557 * 1558 * Note: the actual value at any given moment can still exceed slop, but it 1559 * should not be by significant margin in practice. 1560 */ 1561 #define VNLRU_FREEVNODES_SLOP 126 1562 1563 static void __noinline 1564 vfs_freevnodes_rollup(int8_t *lfreevnodes) 1565 { 1566 1567 atomic_add_long(&freevnodes, *lfreevnodes); 1568 *lfreevnodes = 0; 1569 critical_exit(); 1570 } 1571 1572 static __inline void 1573 vfs_freevnodes_inc(void) 1574 { 1575 int8_t *lfreevnodes; 1576 1577 critical_enter(); 1578 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1579 (*lfreevnodes)++; 1580 if (__predict_false(*lfreevnodes == VNLRU_FREEVNODES_SLOP)) 1581 vfs_freevnodes_rollup(lfreevnodes); 1582 else 1583 critical_exit(); 1584 } 1585 1586 static __inline void 1587 vfs_freevnodes_dec(void) 1588 { 1589 int8_t *lfreevnodes; 1590 1591 critical_enter(); 1592 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1593 (*lfreevnodes)--; 1594 if (__predict_false(*lfreevnodes == -VNLRU_FREEVNODES_SLOP)) 1595 vfs_freevnodes_rollup(lfreevnodes); 1596 else 1597 critical_exit(); 1598 } 1599 1600 static u_long 1601 vnlru_read_freevnodes(void) 1602 { 1603 long slop, rfreevnodes, rfreevnodes_old; 1604 int cpu; 1605 1606 rfreevnodes = atomic_load_long(&freevnodes); 1607 rfreevnodes_old = atomic_load_long(&freevnodes_old); 1608 1609 if (rfreevnodes > rfreevnodes_old) 1610 slop = rfreevnodes - rfreevnodes_old; 1611 else 1612 slop = rfreevnodes_old - rfreevnodes; 1613 if (slop < VNLRU_FREEVNODES_SLOP) 1614 return (rfreevnodes >= 0 ? rfreevnodes : 0); 1615 CPU_FOREACH(cpu) { 1616 rfreevnodes += cpuid_to_pcpu[cpu]->pc_vfs_freevnodes; 1617 } 1618 atomic_store_long(&freevnodes_old, rfreevnodes); 1619 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1620 } 1621 1622 static bool 1623 vnlru_under(u_long rnumvnodes, u_long limit) 1624 { 1625 u_long rfreevnodes, space; 1626 1627 if (__predict_false(rnumvnodes > desiredvnodes)) 1628 return (true); 1629 1630 space = desiredvnodes - rnumvnodes; 1631 if (space < limit) { 1632 rfreevnodes = vnlru_read_freevnodes(); 1633 if (rfreevnodes > wantfreevnodes) 1634 space += rfreevnodes - wantfreevnodes; 1635 } 1636 return (space < limit); 1637 } 1638 1639 static void 1640 vnlru_kick_locked(void) 1641 { 1642 1643 mtx_assert(&vnode_list_mtx, MA_OWNED); 1644 if (vnlruproc_sig == 0) { 1645 vnlruproc_sig = 1; 1646 vnlruproc_kicks++; 1647 wakeup(vnlruproc); 1648 } 1649 } 1650 1651 static void 1652 vnlru_kick_cond(void) 1653 { 1654 1655 if (vnlru_read_freevnodes() > wantfreevnodes) 1656 return; 1657 1658 if (vnlruproc_sig) 1659 return; 1660 mtx_lock(&vnode_list_mtx); 1661 vnlru_kick_locked(); 1662 mtx_unlock(&vnode_list_mtx); 1663 } 1664 1665 static void 1666 vnlru_proc_sleep(void) 1667 { 1668 1669 if (vnlruproc_sig) { 1670 vnlruproc_sig = 0; 1671 wakeup(&vnlruproc_sig); 1672 } 1673 msleep(vnlruproc, &vnode_list_mtx, PVFS|PDROP, "vlruwt", hz); 1674 } 1675 1676 /* 1677 * A lighter version of the machinery below. 1678 * 1679 * Tries to reach goals only by recycling free vnodes and does not invoke 1680 * uma_reclaim(UMA_RECLAIM_DRAIN). 1681 * 1682 * This works around pathological behavior in vnlru in presence of tons of free 1683 * vnodes, but without having to rewrite the machinery at this time. Said 1684 * behavior boils down to continuously trying to reclaim all kinds of vnodes 1685 * (cycling through all levels of "force") when the count is transiently above 1686 * limit. This happens a lot when all vnodes are used up and vn_alloc 1687 * speculatively increments the counter. 1688 * 1689 * Sample testcase: vnode limit 8388608, 20 separate directory trees each with 1690 * 1 million files in total and 20 find(1) processes stating them in parallel 1691 * (one per each tree). 1692 * 1693 * On a kernel with only stock machinery this needs anywhere between 60 and 120 1694 * seconds to execute (time varies *wildly* between runs). With the workaround 1695 * it consistently stays around 20 seconds [it got further down with later 1696 * changes]. 1697 * 1698 * That is to say the entire thing needs a fundamental redesign (most notably 1699 * to accommodate faster recycling), the above only tries to get it ouf the way. 1700 * 1701 * Return values are: 1702 * -1 -- fallback to regular vnlru loop 1703 * 0 -- do nothing, go to sleep 1704 * >0 -- recycle this many vnodes 1705 */ 1706 static long 1707 vnlru_proc_light_pick(void) 1708 { 1709 u_long rnumvnodes, rfreevnodes; 1710 1711 if (vstir || vnlruproc_sig == 1) 1712 return (-1); 1713 1714 rnumvnodes = atomic_load_long(&numvnodes); 1715 rfreevnodes = vnlru_read_freevnodes(); 1716 1717 /* 1718 * vnode limit might have changed and now we may be at a significant 1719 * excess. Bail if we can't sort it out with free vnodes. 1720 * 1721 * Due to atomic updates the count can legitimately go above 1722 * the limit for a short period, don't bother doing anything in 1723 * that case. 1724 */ 1725 if (rnumvnodes > desiredvnodes + VNLRU_COUNT_SLOP + 10) { 1726 if (rnumvnodes - rfreevnodes >= desiredvnodes || 1727 rfreevnodes <= wantfreevnodes) { 1728 return (-1); 1729 } 1730 1731 return (rnumvnodes - desiredvnodes); 1732 } 1733 1734 /* 1735 * Don't try to reach wantfreevnodes target if there are too few vnodes 1736 * to begin with. 1737 */ 1738 if (rnumvnodes < wantfreevnodes) { 1739 return (0); 1740 } 1741 1742 if (rfreevnodes < wantfreevnodes) { 1743 return (-1); 1744 } 1745 1746 return (0); 1747 } 1748 1749 static bool 1750 vnlru_proc_light(void) 1751 { 1752 long freecount; 1753 1754 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1755 1756 freecount = vnlru_proc_light_pick(); 1757 if (freecount == -1) 1758 return (false); 1759 1760 if (freecount != 0) { 1761 vnlru_free_vnlru(freecount); 1762 } 1763 1764 mtx_lock(&vnode_list_mtx); 1765 vnlru_proc_sleep(); 1766 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1767 return (true); 1768 } 1769 1770 static u_long uma_reclaim_calls; 1771 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, uma_reclaim_calls, CTLFLAG_RD | CTLFLAG_STATS, 1772 &uma_reclaim_calls, 0, "Number of calls to uma_reclaim"); 1773 1774 static void 1775 vnlru_proc(void) 1776 { 1777 u_long rnumvnodes, rfreevnodes, target; 1778 unsigned long onumvnodes; 1779 int done, force, trigger, usevnodes; 1780 bool reclaim_nc_src, want_reread; 1781 1782 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1783 SHUTDOWN_PRI_FIRST); 1784 1785 force = 0; 1786 want_reread = false; 1787 for (;;) { 1788 kproc_suspend_check(vnlruproc); 1789 1790 if (force == 0 && vnlru_proc_light()) 1791 continue; 1792 1793 mtx_lock(&vnode_list_mtx); 1794 rnumvnodes = atomic_load_long(&numvnodes); 1795 1796 if (want_reread) { 1797 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1798 want_reread = false; 1799 } 1800 1801 /* 1802 * If numvnodes is too large (due to desiredvnodes being 1803 * adjusted using its sysctl, or emergency growth), first 1804 * try to reduce it by discarding free vnodes. 1805 */ 1806 if (rnumvnodes > desiredvnodes + 10) { 1807 vnlru_free_locked_vnlru(rnumvnodes - desiredvnodes); 1808 mtx_lock(&vnode_list_mtx); 1809 rnumvnodes = atomic_load_long(&numvnodes); 1810 } 1811 /* 1812 * Sleep if the vnode cache is in a good state. This is 1813 * when it is not over-full and has space for about a 4% 1814 * or 9% expansion (by growing its size or inexcessively 1815 * reducing free vnode count). Otherwise, try to reclaim 1816 * space for a 10% expansion. 1817 */ 1818 if (vstir && force == 0) { 1819 force = 1; 1820 vstir = false; 1821 } 1822 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1823 vnlru_proc_sleep(); 1824 continue; 1825 } 1826 rfreevnodes = vnlru_read_freevnodes(); 1827 1828 onumvnodes = rnumvnodes; 1829 /* 1830 * Calculate parameters for recycling. These are the same 1831 * throughout the loop to give some semblance of fairness. 1832 * The trigger point is to avoid recycling vnodes with lots 1833 * of resident pages. We aren't trying to free memory; we 1834 * are trying to recycle or at least free vnodes. 1835 */ 1836 if (rnumvnodes <= desiredvnodes) 1837 usevnodes = rnumvnodes - rfreevnodes; 1838 else 1839 usevnodes = rnumvnodes; 1840 if (usevnodes <= 0) 1841 usevnodes = 1; 1842 /* 1843 * The trigger value is chosen to give a conservatively 1844 * large value to ensure that it alone doesn't prevent 1845 * making progress. The value can easily be so large that 1846 * it is effectively infinite in some congested and 1847 * misconfigured cases, and this is necessary. Normally 1848 * it is about 8 to 100 (pages), which is quite large. 1849 */ 1850 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1851 if (force < 2) 1852 trigger = vsmalltrigger; 1853 reclaim_nc_src = force >= 3; 1854 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1855 target = target / 10 + 1; 1856 done = vlrureclaim(reclaim_nc_src, trigger, target); 1857 mtx_unlock(&vnode_list_mtx); 1858 /* 1859 * Total number of vnodes can transiently go slightly above the 1860 * limit (see vn_alloc_hard), no need to call uma_reclaim if 1861 * this happens. 1862 */ 1863 if (onumvnodes + VNLRU_COUNT_SLOP + 1000 > desiredvnodes && 1864 numvnodes <= desiredvnodes) { 1865 uma_reclaim_calls++; 1866 uma_reclaim(UMA_RECLAIM_DRAIN); 1867 } 1868 if (done == 0) { 1869 if (force == 0 || force == 1) { 1870 force = 2; 1871 continue; 1872 } 1873 if (force == 2) { 1874 force = 3; 1875 continue; 1876 } 1877 want_reread = true; 1878 force = 0; 1879 vnlru_nowhere++; 1880 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1881 } else { 1882 want_reread = true; 1883 kern_yield(PRI_USER); 1884 } 1885 } 1886 } 1887 1888 static struct kproc_desc vnlru_kp = { 1889 "vnlru", 1890 vnlru_proc, 1891 &vnlruproc 1892 }; 1893 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1894 &vnlru_kp); 1895 1896 /* 1897 * Routines having to do with the management of the vnode table. 1898 */ 1899 1900 /* 1901 * Try to recycle a freed vnode. 1902 */ 1903 static int 1904 vtryrecycle(struct vnode *vp, bool isvnlru) 1905 { 1906 struct mount *vnmp; 1907 1908 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1909 VNPASS(vp->v_holdcnt > 0, vp); 1910 /* 1911 * This vnode may found and locked via some other list, if so we 1912 * can't recycle it yet. 1913 */ 1914 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1915 CTR2(KTR_VFS, 1916 "%s: impossible to recycle, vp %p lock is already held", 1917 __func__, vp); 1918 vdrop_recycle(vp); 1919 return (EWOULDBLOCK); 1920 } 1921 /* 1922 * Don't recycle if its filesystem is being suspended. 1923 */ 1924 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1925 VOP_UNLOCK(vp); 1926 CTR2(KTR_VFS, 1927 "%s: impossible to recycle, cannot start the write for %p", 1928 __func__, vp); 1929 vdrop_recycle(vp); 1930 return (EBUSY); 1931 } 1932 /* 1933 * If we got this far, we need to acquire the interlock and see if 1934 * anyone picked up this vnode from another list. If not, we will 1935 * mark it with DOOMED via vgonel() so that anyone who does find it 1936 * will skip over it. 1937 */ 1938 VI_LOCK(vp); 1939 if (vp->v_usecount) { 1940 VOP_UNLOCK(vp); 1941 vdropl_recycle(vp); 1942 vn_finished_write(vnmp); 1943 CTR2(KTR_VFS, 1944 "%s: impossible to recycle, %p is already referenced", 1945 __func__, vp); 1946 return (EBUSY); 1947 } 1948 if (!VN_IS_DOOMED(vp)) { 1949 if (isvnlru) 1950 recycles_free_count++; 1951 else 1952 counter_u64_add(direct_recycles_free_count, 1); 1953 vgonel(vp); 1954 } 1955 VOP_UNLOCK(vp); 1956 vdropl_recycle(vp); 1957 vn_finished_write(vnmp); 1958 return (0); 1959 } 1960 1961 /* 1962 * Allocate a new vnode. 1963 * 1964 * The operation never returns an error. Returning an error was disabled 1965 * in r145385 (dated 2005) with the following comment: 1966 * 1967 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1968 * 1969 * Given the age of this commit (almost 15 years at the time of writing this 1970 * comment) restoring the ability to fail requires a significant audit of 1971 * all codepaths. 1972 * 1973 * The routine can try to free a vnode or stall for up to 1 second waiting for 1974 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1975 */ 1976 static u_long vn_alloc_cyclecount; 1977 static u_long vn_alloc_sleeps; 1978 1979 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, alloc_sleeps, CTLFLAG_RD, &vn_alloc_sleeps, 0, 1980 "Number of times vnode allocation blocked waiting on vnlru"); 1981 1982 static struct vnode * __noinline 1983 vn_alloc_hard(struct mount *mp, u_long rnumvnodes, bool bumped) 1984 { 1985 u_long rfreevnodes; 1986 1987 if (bumped) { 1988 if (rnumvnodes > desiredvnodes + VNLRU_COUNT_SLOP) { 1989 atomic_subtract_long(&numvnodes, 1); 1990 bumped = false; 1991 } 1992 } 1993 1994 mtx_lock(&vnode_list_mtx); 1995 1996 rfreevnodes = vnlru_read_freevnodes(); 1997 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1998 vn_alloc_cyclecount = 0; 1999 vstir = true; 2000 } 2001 /* 2002 * Grow the vnode cache if it will not be above its target max after 2003 * growing. Otherwise, if there is at least one free vnode, try to 2004 * reclaim 1 item from it before growing the cache (possibly above its 2005 * target max if the reclamation failed or is delayed). 2006 */ 2007 if (vnlru_free_locked_direct(1) > 0) 2008 goto alloc; 2009 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 2010 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 2011 /* 2012 * Wait for space for a new vnode. 2013 */ 2014 if (bumped) { 2015 atomic_subtract_long(&numvnodes, 1); 2016 bumped = false; 2017 } 2018 mtx_lock(&vnode_list_mtx); 2019 vnlru_kick_locked(); 2020 vn_alloc_sleeps++; 2021 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 2022 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 2023 vnlru_read_freevnodes() > 1) 2024 vnlru_free_locked_direct(1); 2025 else 2026 mtx_unlock(&vnode_list_mtx); 2027 } 2028 alloc: 2029 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 2030 if (!bumped) 2031 atomic_add_long(&numvnodes, 1); 2032 vnlru_kick_cond(); 2033 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 2034 } 2035 2036 static struct vnode * 2037 vn_alloc(struct mount *mp) 2038 { 2039 u_long rnumvnodes; 2040 2041 if (__predict_false(vn_alloc_cyclecount != 0)) 2042 return (vn_alloc_hard(mp, 0, false)); 2043 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 2044 if (__predict_false(vnlru_under(rnumvnodes, vlowat))) { 2045 return (vn_alloc_hard(mp, rnumvnodes, true)); 2046 } 2047 2048 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 2049 } 2050 2051 static void 2052 vn_free(struct vnode *vp) 2053 { 2054 2055 atomic_subtract_long(&numvnodes, 1); 2056 uma_zfree_smr(vnode_zone, vp); 2057 } 2058 2059 /* 2060 * Allocate a new vnode. 2061 */ 2062 int 2063 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 2064 struct vnode **vpp) 2065 { 2066 struct vnode *vp; 2067 struct thread *td; 2068 struct lock_object *lo; 2069 2070 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 2071 2072 KASSERT(vops->registered, 2073 ("%s: not registered vector op %p\n", __func__, vops)); 2074 cache_validate_vop_vector(mp, vops); 2075 2076 td = curthread; 2077 if (td->td_vp_reserved != NULL) { 2078 vp = td->td_vp_reserved; 2079 td->td_vp_reserved = NULL; 2080 } else { 2081 vp = vn_alloc(mp); 2082 } 2083 counter_u64_add(vnodes_created, 1); 2084 2085 vn_set_state(vp, VSTATE_UNINITIALIZED); 2086 2087 /* 2088 * Locks are given the generic name "vnode" when created. 2089 * Follow the historic practice of using the filesystem 2090 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 2091 * 2092 * Locks live in a witness group keyed on their name. Thus, 2093 * when a lock is renamed, it must also move from the witness 2094 * group of its old name to the witness group of its new name. 2095 * 2096 * The change only needs to be made when the vnode moves 2097 * from one filesystem type to another. We ensure that each 2098 * filesystem use a single static name pointer for its tag so 2099 * that we can compare pointers rather than doing a strcmp(). 2100 */ 2101 lo = &vp->v_vnlock->lock_object; 2102 #ifdef WITNESS 2103 if (lo->lo_name != tag) { 2104 #endif 2105 lo->lo_name = tag; 2106 #ifdef WITNESS 2107 WITNESS_DESTROY(lo); 2108 WITNESS_INIT(lo, tag); 2109 } 2110 #endif 2111 /* 2112 * By default, don't allow shared locks unless filesystems opt-in. 2113 */ 2114 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 2115 /* 2116 * Finalize various vnode identity bits. 2117 */ 2118 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 2119 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 2120 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 2121 vp->v_type = VNON; 2122 vp->v_op = vops; 2123 vp->v_irflag = 0; 2124 v_init_counters(vp); 2125 vn_seqc_init(vp); 2126 vp->v_bufobj.bo_ops = &buf_ops_bio; 2127 #ifdef DIAGNOSTIC 2128 if (mp == NULL && vops != &dead_vnodeops) 2129 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 2130 #endif 2131 #ifdef MAC 2132 mac_vnode_init(vp); 2133 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 2134 mac_vnode_associate_singlelabel(mp, vp); 2135 #endif 2136 if (mp != NULL) { 2137 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 2138 } 2139 2140 /* 2141 * For the filesystems which do not use vfs_hash_insert(), 2142 * still initialize v_hash to have vfs_hash_index() useful. 2143 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 2144 * its own hashing. 2145 */ 2146 vp->v_hash = (uintptr_t)vp >> vnsz2log; 2147 2148 *vpp = vp; 2149 return (0); 2150 } 2151 2152 void 2153 getnewvnode_reserve(void) 2154 { 2155 struct thread *td; 2156 2157 td = curthread; 2158 MPASS(td->td_vp_reserved == NULL); 2159 td->td_vp_reserved = vn_alloc(NULL); 2160 } 2161 2162 void 2163 getnewvnode_drop_reserve(void) 2164 { 2165 struct thread *td; 2166 2167 td = curthread; 2168 if (td->td_vp_reserved != NULL) { 2169 vn_free(td->td_vp_reserved); 2170 td->td_vp_reserved = NULL; 2171 } 2172 } 2173 2174 static void __noinline 2175 freevnode(struct vnode *vp) 2176 { 2177 struct bufobj *bo; 2178 2179 /* 2180 * The vnode has been marked for destruction, so free it. 2181 * 2182 * The vnode will be returned to the zone where it will 2183 * normally remain until it is needed for another vnode. We 2184 * need to cleanup (or verify that the cleanup has already 2185 * been done) any residual data left from its current use 2186 * so as not to contaminate the freshly allocated vnode. 2187 */ 2188 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 2189 /* 2190 * Paired with vgone. 2191 */ 2192 vn_seqc_write_end_free(vp); 2193 2194 bo = &vp->v_bufobj; 2195 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 2196 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 2197 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 2198 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 2199 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 2200 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 2201 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 2202 ("clean blk trie not empty")); 2203 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 2204 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 2205 ("dirty blk trie not empty")); 2206 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, 2207 ("Leaked inactivation")); 2208 VI_UNLOCK(vp); 2209 cache_assert_no_entries(vp); 2210 2211 #ifdef MAC 2212 mac_vnode_destroy(vp); 2213 #endif 2214 if (vp->v_pollinfo != NULL) { 2215 /* 2216 * Use LK_NOWAIT to shut up witness about the lock. We may get 2217 * here while having another vnode locked when trying to 2218 * satisfy a lookup and needing to recycle. 2219 */ 2220 VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT); 2221 destroy_vpollinfo(vp->v_pollinfo); 2222 VOP_UNLOCK(vp); 2223 vp->v_pollinfo = NULL; 2224 } 2225 vp->v_mountedhere = NULL; 2226 vp->v_unpcb = NULL; 2227 vp->v_rdev = NULL; 2228 vp->v_fifoinfo = NULL; 2229 vp->v_iflag = 0; 2230 vp->v_vflag = 0; 2231 bo->bo_flag = 0; 2232 vn_free(vp); 2233 } 2234 2235 /* 2236 * Delete from old mount point vnode list, if on one. 2237 */ 2238 static void 2239 delmntque(struct vnode *vp) 2240 { 2241 struct mount *mp; 2242 2243 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 2244 2245 mp = vp->v_mount; 2246 MNT_ILOCK(mp); 2247 VI_LOCK(vp); 2248 vp->v_mount = NULL; 2249 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 2250 ("bad mount point vnode list size")); 2251 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2252 mp->mnt_nvnodelistsize--; 2253 MNT_REL(mp); 2254 MNT_IUNLOCK(mp); 2255 /* 2256 * The caller expects the interlock to be still held. 2257 */ 2258 ASSERT_VI_LOCKED(vp, __func__); 2259 } 2260 2261 static int 2262 insmntque1_int(struct vnode *vp, struct mount *mp, bool dtr) 2263 { 2264 2265 KASSERT(vp->v_mount == NULL, 2266 ("insmntque: vnode already on per mount vnode list")); 2267 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 2268 if ((mp->mnt_kern_flag & MNTK_UNLOCKED_INSMNTQUE) == 0) { 2269 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 2270 } else { 2271 KASSERT(!dtr, 2272 ("%s: can't have MNTK_UNLOCKED_INSMNTQUE and cleanup", 2273 __func__)); 2274 } 2275 2276 /* 2277 * We acquire the vnode interlock early to ensure that the 2278 * vnode cannot be recycled by another process releasing a 2279 * holdcnt on it before we get it on both the vnode list 2280 * and the active vnode list. The mount mutex protects only 2281 * manipulation of the vnode list and the vnode freelist 2282 * mutex protects only manipulation of the active vnode list. 2283 * Hence the need to hold the vnode interlock throughout. 2284 */ 2285 MNT_ILOCK(mp); 2286 VI_LOCK(vp); 2287 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 2288 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 2289 mp->mnt_nvnodelistsize == 0)) && 2290 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 2291 VI_UNLOCK(vp); 2292 MNT_IUNLOCK(mp); 2293 if (dtr) { 2294 vp->v_data = NULL; 2295 vp->v_op = &dead_vnodeops; 2296 vgone(vp); 2297 vput(vp); 2298 } 2299 return (EBUSY); 2300 } 2301 vp->v_mount = mp; 2302 MNT_REF(mp); 2303 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2304 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 2305 ("neg mount point vnode list size")); 2306 mp->mnt_nvnodelistsize++; 2307 VI_UNLOCK(vp); 2308 MNT_IUNLOCK(mp); 2309 return (0); 2310 } 2311 2312 /* 2313 * Insert into list of vnodes for the new mount point, if available. 2314 * insmntque() reclaims the vnode on insertion failure, insmntque1() 2315 * leaves handling of the vnode to the caller. 2316 */ 2317 int 2318 insmntque(struct vnode *vp, struct mount *mp) 2319 { 2320 return (insmntque1_int(vp, mp, true)); 2321 } 2322 2323 int 2324 insmntque1(struct vnode *vp, struct mount *mp) 2325 { 2326 return (insmntque1_int(vp, mp, false)); 2327 } 2328 2329 /* 2330 * Flush out and invalidate all buffers associated with a bufobj 2331 * Called with the underlying object locked. 2332 */ 2333 int 2334 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 2335 { 2336 int error; 2337 2338 BO_LOCK(bo); 2339 if (flags & V_SAVE) { 2340 error = bufobj_wwait(bo, slpflag, slptimeo); 2341 if (error) { 2342 BO_UNLOCK(bo); 2343 return (error); 2344 } 2345 if (bo->bo_dirty.bv_cnt > 0) { 2346 BO_UNLOCK(bo); 2347 do { 2348 error = BO_SYNC(bo, MNT_WAIT); 2349 } while (error == ERELOOKUP); 2350 if (error != 0) 2351 return (error); 2352 BO_LOCK(bo); 2353 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 2354 BO_UNLOCK(bo); 2355 return (EBUSY); 2356 } 2357 } 2358 } 2359 /* 2360 * If you alter this loop please notice that interlock is dropped and 2361 * reacquired in flushbuflist. Special care is needed to ensure that 2362 * no race conditions occur from this. 2363 */ 2364 do { 2365 error = flushbuflist(&bo->bo_clean, 2366 flags, bo, slpflag, slptimeo); 2367 if (error == 0 && !(flags & V_CLEANONLY)) 2368 error = flushbuflist(&bo->bo_dirty, 2369 flags, bo, slpflag, slptimeo); 2370 if (error != 0 && error != EAGAIN) { 2371 BO_UNLOCK(bo); 2372 return (error); 2373 } 2374 } while (error != 0); 2375 2376 /* 2377 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 2378 * have write I/O in-progress but if there is a VM object then the 2379 * VM object can also have read-I/O in-progress. 2380 */ 2381 do { 2382 bufobj_wwait(bo, 0, 0); 2383 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 2384 BO_UNLOCK(bo); 2385 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 2386 BO_LOCK(bo); 2387 } 2388 } while (bo->bo_numoutput > 0); 2389 BO_UNLOCK(bo); 2390 2391 /* 2392 * Destroy the copy in the VM cache, too. 2393 */ 2394 if (bo->bo_object != NULL && 2395 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 2396 VM_OBJECT_WLOCK(bo->bo_object); 2397 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 2398 OBJPR_CLEANONLY : 0); 2399 VM_OBJECT_WUNLOCK(bo->bo_object); 2400 } 2401 2402 #ifdef INVARIANTS 2403 BO_LOCK(bo); 2404 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 2405 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 2406 bo->bo_clean.bv_cnt > 0)) 2407 panic("vinvalbuf: flush failed"); 2408 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 2409 bo->bo_dirty.bv_cnt > 0) 2410 panic("vinvalbuf: flush dirty failed"); 2411 BO_UNLOCK(bo); 2412 #endif 2413 return (0); 2414 } 2415 2416 /* 2417 * Flush out and invalidate all buffers associated with a vnode. 2418 * Called with the underlying object locked. 2419 */ 2420 int 2421 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 2422 { 2423 2424 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2425 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 2426 if (vp->v_object != NULL && vp->v_object->handle != vp) 2427 return (0); 2428 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 2429 } 2430 2431 /* 2432 * Flush out buffers on the specified list. 2433 * 2434 */ 2435 static int 2436 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2437 int slptimeo) 2438 { 2439 struct buf *bp, *nbp; 2440 int retval, error; 2441 daddr_t lblkno; 2442 b_xflags_t xflags; 2443 2444 ASSERT_BO_WLOCKED(bo); 2445 2446 retval = 0; 2447 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2448 /* 2449 * If we are flushing both V_NORMAL and V_ALT buffers then 2450 * do not skip any buffers. If we are flushing only V_NORMAL 2451 * buffers then skip buffers marked as BX_ALTDATA. If we are 2452 * flushing only V_ALT buffers then skip buffers not marked 2453 * as BX_ALTDATA. 2454 */ 2455 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2456 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2457 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2458 continue; 2459 } 2460 if (nbp != NULL) { 2461 lblkno = nbp->b_lblkno; 2462 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2463 } 2464 retval = EAGAIN; 2465 error = BUF_TIMELOCK(bp, 2466 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2467 "flushbuf", slpflag, slptimeo); 2468 if (error) { 2469 BO_LOCK(bo); 2470 return (error != ENOLCK ? error : EAGAIN); 2471 } 2472 KASSERT(bp->b_bufobj == bo, 2473 ("bp %p wrong b_bufobj %p should be %p", 2474 bp, bp->b_bufobj, bo)); 2475 /* 2476 * XXX Since there are no node locks for NFS, I 2477 * believe there is a slight chance that a delayed 2478 * write will occur while sleeping just above, so 2479 * check for it. 2480 */ 2481 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2482 (flags & V_SAVE)) { 2483 bremfree(bp); 2484 bp->b_flags |= B_ASYNC; 2485 bwrite(bp); 2486 BO_LOCK(bo); 2487 return (EAGAIN); /* XXX: why not loop ? */ 2488 } 2489 bremfree(bp); 2490 bp->b_flags |= (B_INVAL | B_RELBUF); 2491 bp->b_flags &= ~B_ASYNC; 2492 brelse(bp); 2493 BO_LOCK(bo); 2494 if (nbp == NULL) 2495 break; 2496 nbp = gbincore(bo, lblkno); 2497 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2498 != xflags) 2499 break; /* nbp invalid */ 2500 } 2501 return (retval); 2502 } 2503 2504 int 2505 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2506 { 2507 struct buf *bp; 2508 int error; 2509 daddr_t lblkno; 2510 2511 ASSERT_BO_LOCKED(bo); 2512 2513 for (lblkno = startn;;) { 2514 again: 2515 bp = buf_lookup_ge(bufv, lblkno); 2516 if (bp == NULL || bp->b_lblkno >= endn) 2517 break; 2518 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2519 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2520 if (error != 0) { 2521 BO_RLOCK(bo); 2522 if (error == ENOLCK) 2523 goto again; 2524 return (error); 2525 } 2526 KASSERT(bp->b_bufobj == bo, 2527 ("bp %p wrong b_bufobj %p should be %p", 2528 bp, bp->b_bufobj, bo)); 2529 lblkno = bp->b_lblkno + 1; 2530 if ((bp->b_flags & B_MANAGED) == 0) 2531 bremfree(bp); 2532 bp->b_flags |= B_RELBUF; 2533 /* 2534 * In the VMIO case, use the B_NOREUSE flag to hint that the 2535 * pages backing each buffer in the range are unlikely to be 2536 * reused. Dirty buffers will have the hint applied once 2537 * they've been written. 2538 */ 2539 if ((bp->b_flags & B_VMIO) != 0) 2540 bp->b_flags |= B_NOREUSE; 2541 brelse(bp); 2542 BO_RLOCK(bo); 2543 } 2544 return (0); 2545 } 2546 2547 /* 2548 * Truncate a file's buffer and pages to a specified length. This 2549 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2550 * sync activity. 2551 */ 2552 int 2553 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2554 { 2555 struct buf *bp, *nbp; 2556 struct bufobj *bo; 2557 daddr_t startlbn; 2558 2559 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2560 vp, blksize, (uintmax_t)length); 2561 2562 /* 2563 * Round up to the *next* lbn. 2564 */ 2565 startlbn = howmany(length, blksize); 2566 2567 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2568 2569 bo = &vp->v_bufobj; 2570 restart_unlocked: 2571 BO_LOCK(bo); 2572 2573 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2574 ; 2575 2576 if (length > 0) { 2577 /* 2578 * Write out vnode metadata, e.g. indirect blocks. 2579 */ 2580 restartsync: 2581 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2582 if (bp->b_lblkno >= 0) 2583 continue; 2584 /* 2585 * Since we hold the vnode lock this should only 2586 * fail if we're racing with the buf daemon. 2587 */ 2588 if (BUF_LOCK(bp, 2589 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2590 BO_LOCKPTR(bo)) == ENOLCK) 2591 goto restart_unlocked; 2592 2593 VNASSERT((bp->b_flags & B_DELWRI), vp, 2594 ("buf(%p) on dirty queue without DELWRI", bp)); 2595 2596 bremfree(bp); 2597 bawrite(bp); 2598 BO_LOCK(bo); 2599 goto restartsync; 2600 } 2601 } 2602 2603 bufobj_wwait(bo, 0, 0); 2604 BO_UNLOCK(bo); 2605 vnode_pager_setsize(vp, length); 2606 2607 return (0); 2608 } 2609 2610 /* 2611 * Invalidate the cached pages of a file's buffer within the range of block 2612 * numbers [startlbn, endlbn). 2613 */ 2614 void 2615 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2616 int blksize) 2617 { 2618 struct bufobj *bo; 2619 off_t start, end; 2620 2621 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2622 2623 start = blksize * startlbn; 2624 end = blksize * endlbn; 2625 2626 bo = &vp->v_bufobj; 2627 BO_LOCK(bo); 2628 MPASS(blksize == bo->bo_bsize); 2629 2630 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2631 ; 2632 2633 BO_UNLOCK(bo); 2634 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2635 } 2636 2637 static int 2638 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2639 daddr_t startlbn, daddr_t endlbn) 2640 { 2641 struct bufv *bv; 2642 struct buf *bp, *nbp; 2643 uint8_t anyfreed; 2644 bool clean; 2645 2646 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2647 ASSERT_BO_LOCKED(bo); 2648 2649 anyfreed = 1; 2650 clean = true; 2651 do { 2652 bv = clean ? &bo->bo_clean : &bo->bo_dirty; 2653 bp = buf_lookup_ge(bv, startlbn); 2654 if (bp == NULL) 2655 continue; 2656 TAILQ_FOREACH_FROM_SAFE(bp, &bv->bv_hd, b_bobufs, nbp) { 2657 if (bp->b_lblkno >= endlbn) 2658 break; 2659 if (BUF_LOCK(bp, 2660 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2661 BO_LOCKPTR(bo)) == ENOLCK) { 2662 BO_LOCK(bo); 2663 return (EAGAIN); 2664 } 2665 2666 bremfree(bp); 2667 bp->b_flags |= B_INVAL | B_RELBUF; 2668 bp->b_flags &= ~B_ASYNC; 2669 brelse(bp); 2670 anyfreed = 2; 2671 2672 BO_LOCK(bo); 2673 if (nbp != NULL && 2674 (((nbp->b_xflags & 2675 (clean ? BX_VNCLEAN : BX_VNDIRTY)) == 0) || 2676 nbp->b_vp != vp || 2677 (nbp->b_flags & B_DELWRI) == (clean? B_DELWRI: 0))) 2678 return (EAGAIN); 2679 } 2680 } while (clean = !clean, anyfreed-- > 0); 2681 return (0); 2682 } 2683 2684 static void 2685 buf_vlist_remove(struct buf *bp) 2686 { 2687 struct bufv *bv; 2688 b_xflags_t flags; 2689 2690 flags = bp->b_xflags; 2691 2692 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2693 ASSERT_BO_WLOCKED(bp->b_bufobj); 2694 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 && 2695 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN), 2696 ("%s: buffer %p has invalid queue state", __func__, bp)); 2697 2698 if ((flags & BX_VNDIRTY) != 0) 2699 bv = &bp->b_bufobj->bo_dirty; 2700 else 2701 bv = &bp->b_bufobj->bo_clean; 2702 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2703 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2704 bv->bv_cnt--; 2705 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2706 } 2707 2708 /* 2709 * Add the buffer to the sorted clean or dirty block list. Return zero on 2710 * success, EEXIST if a buffer with this identity already exists, or another 2711 * error on allocation failure. 2712 */ 2713 static inline int 2714 buf_vlist_find_or_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2715 { 2716 struct bufv *bv; 2717 struct buf *n; 2718 int error; 2719 2720 ASSERT_BO_WLOCKED(bo); 2721 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2722 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2723 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2724 ("dead bo %p", bo)); 2725 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == xflags, 2726 ("buf_vlist_add: b_xflags %#x not set on bp %p", xflags, bp)); 2727 2728 if (xflags & BX_VNDIRTY) 2729 bv = &bo->bo_dirty; 2730 else 2731 bv = &bo->bo_clean; 2732 2733 error = buf_insert_lookup_le(bv, bp, &n); 2734 if (n == NULL) { 2735 KASSERT(error != EEXIST, 2736 ("buf_vlist_add: EEXIST but no existing buf found: bp %p", 2737 bp)); 2738 } else { 2739 KASSERT(n->b_lblkno <= bp->b_lblkno, 2740 ("buf_vlist_add: out of order insert/lookup: bp %p n %p", 2741 bp, n)); 2742 KASSERT((n->b_lblkno == bp->b_lblkno) == (error == EEXIST), 2743 ("buf_vlist_add: inconsistent result for existing buf: " 2744 "error %d bp %p n %p", error, bp, n)); 2745 } 2746 if (error != 0) 2747 return (error); 2748 2749 /* Keep the list ordered. */ 2750 if (n == NULL) { 2751 KASSERT(TAILQ_EMPTY(&bv->bv_hd) || 2752 bp->b_lblkno < TAILQ_FIRST(&bv->bv_hd)->b_lblkno, 2753 ("buf_vlist_add: queue order: " 2754 "%p should be before first %p", 2755 bp, TAILQ_FIRST(&bv->bv_hd))); 2756 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2757 } else { 2758 KASSERT(TAILQ_NEXT(n, b_bobufs) == NULL || 2759 bp->b_lblkno < TAILQ_NEXT(n, b_bobufs)->b_lblkno, 2760 ("buf_vlist_add: queue order: " 2761 "%p should be before next %p", 2762 bp, TAILQ_NEXT(n, b_bobufs))); 2763 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2764 } 2765 2766 bv->bv_cnt++; 2767 return (0); 2768 } 2769 2770 /* 2771 * Add the buffer to the sorted clean or dirty block list. 2772 * 2773 * NOTE: xflags is passed as a constant, optimizing this inline function! 2774 */ 2775 static void 2776 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2777 { 2778 int error; 2779 2780 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0, 2781 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2782 bp->b_xflags |= xflags; 2783 error = buf_vlist_find_or_add(bp, bo, xflags); 2784 if (error) 2785 panic("buf_vlist_add: error=%d", error); 2786 } 2787 2788 /* 2789 * Look up a buffer using the buffer tries. 2790 */ 2791 struct buf * 2792 gbincore(struct bufobj *bo, daddr_t lblkno) 2793 { 2794 struct buf *bp; 2795 2796 ASSERT_BO_LOCKED(bo); 2797 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2798 if (bp != NULL) 2799 return (bp); 2800 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2801 } 2802 2803 /* 2804 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2805 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2806 * stability of the result. Like other lockless lookups, the found buf may 2807 * already be invalid by the time this function returns. 2808 */ 2809 struct buf * 2810 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2811 { 2812 struct buf *bp; 2813 2814 ASSERT_BO_UNLOCKED(bo); 2815 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2816 if (bp != NULL) 2817 return (bp); 2818 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2819 } 2820 2821 /* 2822 * Associate a buffer with a vnode. 2823 */ 2824 int 2825 bgetvp(struct vnode *vp, struct buf *bp) 2826 { 2827 struct bufobj *bo; 2828 int error; 2829 2830 bo = &vp->v_bufobj; 2831 ASSERT_BO_UNLOCKED(bo); 2832 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2833 2834 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2835 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2836 ("bgetvp: bp already attached! %p", bp)); 2837 2838 /* 2839 * Add the buf to the vnode's clean list unless we lost a race and find 2840 * an existing buf in either dirty or clean. 2841 */ 2842 bp->b_vp = vp; 2843 bp->b_bufobj = bo; 2844 bp->b_xflags |= BX_VNCLEAN; 2845 error = EEXIST; 2846 BO_LOCK(bo); 2847 if (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, bp->b_lblkno) == NULL) 2848 error = buf_vlist_find_or_add(bp, bo, BX_VNCLEAN); 2849 BO_UNLOCK(bo); 2850 if (__predict_true(error == 0)) { 2851 vhold(vp); 2852 return (0); 2853 } 2854 if (error != EEXIST) 2855 panic("bgetvp: buf_vlist_add error: %d", error); 2856 bp->b_vp = NULL; 2857 bp->b_bufobj = NULL; 2858 bp->b_xflags &= ~BX_VNCLEAN; 2859 return (error); 2860 } 2861 2862 /* 2863 * Disassociate a buffer from a vnode. 2864 */ 2865 void 2866 brelvp(struct buf *bp) 2867 { 2868 struct bufobj *bo; 2869 struct vnode *vp; 2870 2871 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2872 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2873 2874 /* 2875 * Delete from old vnode list, if on one. 2876 */ 2877 vp = bp->b_vp; /* XXX */ 2878 bo = bp->b_bufobj; 2879 BO_LOCK(bo); 2880 buf_vlist_remove(bp); 2881 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2882 bo->bo_flag &= ~BO_ONWORKLST; 2883 mtx_lock(&sync_mtx); 2884 LIST_REMOVE(bo, bo_synclist); 2885 syncer_worklist_len--; 2886 mtx_unlock(&sync_mtx); 2887 } 2888 bp->b_vp = NULL; 2889 bp->b_bufobj = NULL; 2890 BO_UNLOCK(bo); 2891 vdrop(vp); 2892 } 2893 2894 /* 2895 * Add an item to the syncer work queue. 2896 */ 2897 static void 2898 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2899 { 2900 int slot; 2901 2902 ASSERT_BO_WLOCKED(bo); 2903 2904 mtx_lock(&sync_mtx); 2905 if (bo->bo_flag & BO_ONWORKLST) 2906 LIST_REMOVE(bo, bo_synclist); 2907 else { 2908 bo->bo_flag |= BO_ONWORKLST; 2909 syncer_worklist_len++; 2910 } 2911 2912 if (delay > syncer_maxdelay - 2) 2913 delay = syncer_maxdelay - 2; 2914 slot = (syncer_delayno + delay) & syncer_mask; 2915 2916 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2917 mtx_unlock(&sync_mtx); 2918 } 2919 2920 static int 2921 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2922 { 2923 int error, len; 2924 2925 mtx_lock(&sync_mtx); 2926 len = syncer_worklist_len - sync_vnode_count; 2927 mtx_unlock(&sync_mtx); 2928 error = SYSCTL_OUT(req, &len, sizeof(len)); 2929 return (error); 2930 } 2931 2932 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2933 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2934 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2935 2936 static struct proc *updateproc; 2937 static void sched_sync(void); 2938 static struct kproc_desc up_kp = { 2939 "syncer", 2940 sched_sync, 2941 &updateproc 2942 }; 2943 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2944 2945 static int 2946 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2947 { 2948 struct vnode *vp; 2949 struct mount *mp; 2950 2951 *bo = LIST_FIRST(slp); 2952 if (*bo == NULL) 2953 return (0); 2954 vp = bo2vnode(*bo); 2955 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2956 return (1); 2957 /* 2958 * We use vhold in case the vnode does not 2959 * successfully sync. vhold prevents the vnode from 2960 * going away when we unlock the sync_mtx so that 2961 * we can acquire the vnode interlock. 2962 */ 2963 vholdl(vp); 2964 mtx_unlock(&sync_mtx); 2965 VI_UNLOCK(vp); 2966 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2967 vdrop(vp); 2968 mtx_lock(&sync_mtx); 2969 return (*bo == LIST_FIRST(slp)); 2970 } 2971 MPASSERT(mp == NULL || (curthread->td_pflags & TDP_IGNSUSP) != 0 || 2972 (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0, mp, 2973 ("suspended mp syncing vp %p", vp)); 2974 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2975 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2976 VOP_UNLOCK(vp); 2977 vn_finished_write(mp); 2978 BO_LOCK(*bo); 2979 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2980 /* 2981 * Put us back on the worklist. The worklist 2982 * routine will remove us from our current 2983 * position and then add us back in at a later 2984 * position. 2985 */ 2986 vn_syncer_add_to_worklist(*bo, syncdelay); 2987 } 2988 BO_UNLOCK(*bo); 2989 vdrop(vp); 2990 mtx_lock(&sync_mtx); 2991 return (0); 2992 } 2993 2994 static int first_printf = 1; 2995 2996 /* 2997 * System filesystem synchronizer daemon. 2998 */ 2999 static void 3000 sched_sync(void) 3001 { 3002 struct synclist *next, *slp; 3003 struct bufobj *bo; 3004 long starttime; 3005 struct thread *td = curthread; 3006 int last_work_seen; 3007 int net_worklist_len; 3008 int syncer_final_iter; 3009 int error; 3010 3011 last_work_seen = 0; 3012 syncer_final_iter = 0; 3013 syncer_state = SYNCER_RUNNING; 3014 starttime = time_uptime; 3015 td->td_pflags |= TDP_NORUNNINGBUF; 3016 3017 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 3018 SHUTDOWN_PRI_LAST); 3019 3020 mtx_lock(&sync_mtx); 3021 for (;;) { 3022 if (syncer_state == SYNCER_FINAL_DELAY && 3023 syncer_final_iter == 0) { 3024 mtx_unlock(&sync_mtx); 3025 kproc_suspend_check(td->td_proc); 3026 mtx_lock(&sync_mtx); 3027 } 3028 net_worklist_len = syncer_worklist_len - sync_vnode_count; 3029 if (syncer_state != SYNCER_RUNNING && 3030 starttime != time_uptime) { 3031 if (first_printf) { 3032 printf("\nSyncing disks, vnodes remaining... "); 3033 first_printf = 0; 3034 } 3035 printf("%d ", net_worklist_len); 3036 } 3037 starttime = time_uptime; 3038 3039 /* 3040 * Push files whose dirty time has expired. Be careful 3041 * of interrupt race on slp queue. 3042 * 3043 * Skip over empty worklist slots when shutting down. 3044 */ 3045 do { 3046 slp = &syncer_workitem_pending[syncer_delayno]; 3047 syncer_delayno += 1; 3048 if (syncer_delayno == syncer_maxdelay) 3049 syncer_delayno = 0; 3050 next = &syncer_workitem_pending[syncer_delayno]; 3051 /* 3052 * If the worklist has wrapped since the 3053 * it was emptied of all but syncer vnodes, 3054 * switch to the FINAL_DELAY state and run 3055 * for one more second. 3056 */ 3057 if (syncer_state == SYNCER_SHUTTING_DOWN && 3058 net_worklist_len == 0 && 3059 last_work_seen == syncer_delayno) { 3060 syncer_state = SYNCER_FINAL_DELAY; 3061 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 3062 } 3063 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 3064 syncer_worklist_len > 0); 3065 3066 /* 3067 * Keep track of the last time there was anything 3068 * on the worklist other than syncer vnodes. 3069 * Return to the SHUTTING_DOWN state if any 3070 * new work appears. 3071 */ 3072 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 3073 last_work_seen = syncer_delayno; 3074 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 3075 syncer_state = SYNCER_SHUTTING_DOWN; 3076 while (!LIST_EMPTY(slp)) { 3077 error = sync_vnode(slp, &bo, td); 3078 if (error == 1) { 3079 LIST_REMOVE(bo, bo_synclist); 3080 LIST_INSERT_HEAD(next, bo, bo_synclist); 3081 continue; 3082 } 3083 3084 if (first_printf == 0) { 3085 /* 3086 * Drop the sync mutex, because some watchdog 3087 * drivers need to sleep while patting 3088 */ 3089 mtx_unlock(&sync_mtx); 3090 wdog_kern_pat(WD_LASTVAL); 3091 mtx_lock(&sync_mtx); 3092 } 3093 } 3094 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 3095 syncer_final_iter--; 3096 /* 3097 * The variable rushjob allows the kernel to speed up the 3098 * processing of the filesystem syncer process. A rushjob 3099 * value of N tells the filesystem syncer to process the next 3100 * N seconds worth of work on its queue ASAP. Currently rushjob 3101 * is used by the soft update code to speed up the filesystem 3102 * syncer process when the incore state is getting so far 3103 * ahead of the disk that the kernel memory pool is being 3104 * threatened with exhaustion. 3105 */ 3106 if (rushjob > 0) { 3107 rushjob -= 1; 3108 continue; 3109 } 3110 /* 3111 * Just sleep for a short period of time between 3112 * iterations when shutting down to allow some I/O 3113 * to happen. 3114 * 3115 * If it has taken us less than a second to process the 3116 * current work, then wait. Otherwise start right over 3117 * again. We can still lose time if any single round 3118 * takes more than two seconds, but it does not really 3119 * matter as we are just trying to generally pace the 3120 * filesystem activity. 3121 */ 3122 if (syncer_state != SYNCER_RUNNING || 3123 time_uptime == starttime) { 3124 thread_lock(td); 3125 sched_prio(td, PPAUSE); 3126 thread_unlock(td); 3127 } 3128 if (syncer_state != SYNCER_RUNNING) 3129 cv_timedwait(&sync_wakeup, &sync_mtx, 3130 hz / SYNCER_SHUTDOWN_SPEEDUP); 3131 else if (time_uptime == starttime) 3132 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 3133 } 3134 } 3135 3136 /* 3137 * Request the syncer daemon to speed up its work. 3138 * We never push it to speed up more than half of its 3139 * normal turn time, otherwise it could take over the cpu. 3140 */ 3141 int 3142 speedup_syncer(void) 3143 { 3144 int ret = 0; 3145 3146 mtx_lock(&sync_mtx); 3147 if (rushjob < syncdelay / 2) { 3148 rushjob += 1; 3149 stat_rush_requests += 1; 3150 ret = 1; 3151 } 3152 mtx_unlock(&sync_mtx); 3153 cv_broadcast(&sync_wakeup); 3154 return (ret); 3155 } 3156 3157 /* 3158 * Tell the syncer to speed up its work and run though its work 3159 * list several times, then tell it to shut down. 3160 */ 3161 static void 3162 syncer_shutdown(void *arg, int howto) 3163 { 3164 3165 if (howto & RB_NOSYNC) 3166 return; 3167 mtx_lock(&sync_mtx); 3168 syncer_state = SYNCER_SHUTTING_DOWN; 3169 rushjob = 0; 3170 mtx_unlock(&sync_mtx); 3171 cv_broadcast(&sync_wakeup); 3172 kproc_shutdown(arg, howto); 3173 } 3174 3175 void 3176 syncer_suspend(void) 3177 { 3178 3179 syncer_shutdown(updateproc, 0); 3180 } 3181 3182 void 3183 syncer_resume(void) 3184 { 3185 3186 mtx_lock(&sync_mtx); 3187 first_printf = 1; 3188 syncer_state = SYNCER_RUNNING; 3189 mtx_unlock(&sync_mtx); 3190 cv_broadcast(&sync_wakeup); 3191 kproc_resume(updateproc); 3192 } 3193 3194 /* 3195 * Move the buffer between the clean and dirty lists of its vnode. 3196 */ 3197 void 3198 reassignbuf(struct buf *bp) 3199 { 3200 struct vnode *vp; 3201 struct bufobj *bo; 3202 int delay; 3203 #ifdef INVARIANTS 3204 struct bufv *bv; 3205 #endif 3206 3207 vp = bp->b_vp; 3208 bo = bp->b_bufobj; 3209 3210 KASSERT((bp->b_flags & B_PAGING) == 0, 3211 ("%s: cannot reassign paging buffer %p", __func__, bp)); 3212 3213 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 3214 bp, bp->b_vp, bp->b_flags); 3215 3216 BO_LOCK(bo); 3217 if ((bo->bo_flag & BO_NONSTERILE) == 0) { 3218 /* 3219 * Coordinate with getblk's unlocked lookup. Make 3220 * BO_NONSTERILE visible before the first reassignbuf produces 3221 * any side effect. This could be outside the bo lock if we 3222 * used a separate atomic flag field. 3223 */ 3224 bo->bo_flag |= BO_NONSTERILE; 3225 atomic_thread_fence_rel(); 3226 } 3227 buf_vlist_remove(bp); 3228 3229 /* 3230 * If dirty, put on list of dirty buffers; otherwise insert onto list 3231 * of clean buffers. 3232 */ 3233 if (bp->b_flags & B_DELWRI) { 3234 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 3235 switch (vp->v_type) { 3236 case VDIR: 3237 delay = dirdelay; 3238 break; 3239 case VCHR: 3240 delay = metadelay; 3241 break; 3242 default: 3243 delay = filedelay; 3244 } 3245 vn_syncer_add_to_worklist(bo, delay); 3246 } 3247 buf_vlist_add(bp, bo, BX_VNDIRTY); 3248 } else { 3249 buf_vlist_add(bp, bo, BX_VNCLEAN); 3250 3251 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 3252 mtx_lock(&sync_mtx); 3253 LIST_REMOVE(bo, bo_synclist); 3254 syncer_worklist_len--; 3255 mtx_unlock(&sync_mtx); 3256 bo->bo_flag &= ~BO_ONWORKLST; 3257 } 3258 } 3259 #ifdef INVARIANTS 3260 bv = &bo->bo_clean; 3261 bp = TAILQ_FIRST(&bv->bv_hd); 3262 KASSERT(bp == NULL || bp->b_bufobj == bo, 3263 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3264 bp = TAILQ_LAST(&bv->bv_hd, buflists); 3265 KASSERT(bp == NULL || bp->b_bufobj == bo, 3266 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3267 bv = &bo->bo_dirty; 3268 bp = TAILQ_FIRST(&bv->bv_hd); 3269 KASSERT(bp == NULL || bp->b_bufobj == bo, 3270 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3271 bp = TAILQ_LAST(&bv->bv_hd, buflists); 3272 KASSERT(bp == NULL || bp->b_bufobj == bo, 3273 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3274 #endif 3275 BO_UNLOCK(bo); 3276 } 3277 3278 static void 3279 v_init_counters(struct vnode *vp) 3280 { 3281 3282 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 3283 vp, ("%s called for an initialized vnode", __FUNCTION__)); 3284 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 3285 3286 refcount_init(&vp->v_holdcnt, 1); 3287 refcount_init(&vp->v_usecount, 1); 3288 } 3289 3290 /* 3291 * Get a usecount on a vnode. 3292 * 3293 * vget and vget_finish may fail to lock the vnode if they lose a race against 3294 * it being doomed. LK_RETRY can be passed in flags to lock it anyway. 3295 * 3296 * Consumers which don't guarantee liveness of the vnode can use SMR to 3297 * try to get a reference. Note this operation can fail since the vnode 3298 * may be awaiting getting freed by the time they get to it. 3299 */ 3300 enum vgetstate 3301 vget_prep_smr(struct vnode *vp) 3302 { 3303 enum vgetstate vs; 3304 3305 VFS_SMR_ASSERT_ENTERED(); 3306 3307 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3308 vs = VGET_USECOUNT; 3309 } else { 3310 if (vhold_smr(vp)) 3311 vs = VGET_HOLDCNT; 3312 else 3313 vs = VGET_NONE; 3314 } 3315 return (vs); 3316 } 3317 3318 enum vgetstate 3319 vget_prep(struct vnode *vp) 3320 { 3321 enum vgetstate vs; 3322 3323 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3324 vs = VGET_USECOUNT; 3325 } else { 3326 vhold(vp); 3327 vs = VGET_HOLDCNT; 3328 } 3329 return (vs); 3330 } 3331 3332 void 3333 vget_abort(struct vnode *vp, enum vgetstate vs) 3334 { 3335 3336 switch (vs) { 3337 case VGET_USECOUNT: 3338 vrele(vp); 3339 break; 3340 case VGET_HOLDCNT: 3341 vdrop(vp); 3342 break; 3343 default: 3344 __assert_unreachable(); 3345 } 3346 } 3347 3348 int 3349 vget(struct vnode *vp, int flags) 3350 { 3351 enum vgetstate vs; 3352 3353 vs = vget_prep(vp); 3354 return (vget_finish(vp, flags, vs)); 3355 } 3356 3357 int 3358 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 3359 { 3360 int error; 3361 3362 if ((flags & LK_INTERLOCK) != 0) 3363 ASSERT_VI_LOCKED(vp, __func__); 3364 else 3365 ASSERT_VI_UNLOCKED(vp, __func__); 3366 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3367 VNPASS(vp->v_holdcnt > 0, vp); 3368 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3369 3370 error = vn_lock(vp, flags); 3371 if (__predict_false(error != 0)) { 3372 vget_abort(vp, vs); 3373 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 3374 vp); 3375 return (error); 3376 } 3377 3378 vget_finish_ref(vp, vs); 3379 return (0); 3380 } 3381 3382 void 3383 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 3384 { 3385 int old; 3386 3387 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3388 VNPASS(vp->v_holdcnt > 0, vp); 3389 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3390 3391 if (vs == VGET_USECOUNT) 3392 return; 3393 3394 /* 3395 * We hold the vnode. If the usecount is 0 it will be utilized to keep 3396 * the vnode around. Otherwise someone else lended their hold count and 3397 * we have to drop ours. 3398 */ 3399 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3400 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3401 if (old != 0) { 3402 #ifdef INVARIANTS 3403 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3404 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3405 #else 3406 refcount_release(&vp->v_holdcnt); 3407 #endif 3408 } 3409 } 3410 3411 void 3412 vref(struct vnode *vp) 3413 { 3414 enum vgetstate vs; 3415 3416 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3417 vs = vget_prep(vp); 3418 vget_finish_ref(vp, vs); 3419 } 3420 3421 void 3422 vrefact(struct vnode *vp) 3423 { 3424 int old __diagused; 3425 3426 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3427 old = refcount_acquire(&vp->v_usecount); 3428 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3429 } 3430 3431 void 3432 vlazy(struct vnode *vp) 3433 { 3434 struct mount *mp; 3435 3436 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 3437 3438 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 3439 return; 3440 /* 3441 * We may get here for inactive routines after the vnode got doomed. 3442 */ 3443 if (VN_IS_DOOMED(vp)) 3444 return; 3445 mp = vp->v_mount; 3446 mtx_lock(&mp->mnt_listmtx); 3447 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 3448 vp->v_mflag |= VMP_LAZYLIST; 3449 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3450 mp->mnt_lazyvnodelistsize++; 3451 } 3452 mtx_unlock(&mp->mnt_listmtx); 3453 } 3454 3455 static void 3456 vunlazy(struct vnode *vp) 3457 { 3458 struct mount *mp; 3459 3460 ASSERT_VI_LOCKED(vp, __func__); 3461 VNPASS(!VN_IS_DOOMED(vp), vp); 3462 3463 mp = vp->v_mount; 3464 mtx_lock(&mp->mnt_listmtx); 3465 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3466 /* 3467 * Don't remove the vnode from the lazy list if another thread 3468 * has increased the hold count. It may have re-enqueued the 3469 * vnode to the lazy list and is now responsible for its 3470 * removal. 3471 */ 3472 if (vp->v_holdcnt == 0) { 3473 vp->v_mflag &= ~VMP_LAZYLIST; 3474 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3475 mp->mnt_lazyvnodelistsize--; 3476 } 3477 mtx_unlock(&mp->mnt_listmtx); 3478 } 3479 3480 /* 3481 * This routine is only meant to be called from vgonel prior to dooming 3482 * the vnode. 3483 */ 3484 static void 3485 vunlazy_gone(struct vnode *vp) 3486 { 3487 struct mount *mp; 3488 3489 ASSERT_VOP_ELOCKED(vp, __func__); 3490 ASSERT_VI_LOCKED(vp, __func__); 3491 VNPASS(!VN_IS_DOOMED(vp), vp); 3492 3493 if (vp->v_mflag & VMP_LAZYLIST) { 3494 mp = vp->v_mount; 3495 mtx_lock(&mp->mnt_listmtx); 3496 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3497 vp->v_mflag &= ~VMP_LAZYLIST; 3498 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3499 mp->mnt_lazyvnodelistsize--; 3500 mtx_unlock(&mp->mnt_listmtx); 3501 } 3502 } 3503 3504 static void 3505 vdefer_inactive(struct vnode *vp) 3506 { 3507 3508 ASSERT_VI_LOCKED(vp, __func__); 3509 VNPASS(vp->v_holdcnt > 0, vp); 3510 if (VN_IS_DOOMED(vp)) { 3511 vdropl(vp); 3512 return; 3513 } 3514 if (vp->v_iflag & VI_DEFINACT) { 3515 VNPASS(vp->v_holdcnt > 1, vp); 3516 vdropl(vp); 3517 return; 3518 } 3519 if (vp->v_usecount > 0) { 3520 vp->v_iflag &= ~VI_OWEINACT; 3521 vdropl(vp); 3522 return; 3523 } 3524 vlazy(vp); 3525 vp->v_iflag |= VI_DEFINACT; 3526 VI_UNLOCK(vp); 3527 atomic_add_long(&deferred_inact, 1); 3528 } 3529 3530 static void 3531 vdefer_inactive_unlocked(struct vnode *vp) 3532 { 3533 3534 VI_LOCK(vp); 3535 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3536 vdropl(vp); 3537 return; 3538 } 3539 vdefer_inactive(vp); 3540 } 3541 3542 enum vput_op { VRELE, VPUT, VUNREF }; 3543 3544 /* 3545 * Handle ->v_usecount transitioning to 0. 3546 * 3547 * By releasing the last usecount we take ownership of the hold count which 3548 * provides liveness of the vnode, meaning we have to vdrop. 3549 * 3550 * For all vnodes we may need to perform inactive processing. It requires an 3551 * exclusive lock on the vnode, while it is legal to call here with only a 3552 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3553 * inactive processing gets deferred to the syncer. 3554 * 3555 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3556 * on the lock being held all the way until VOP_INACTIVE. This in particular 3557 * happens with UFS which adds half-constructed vnodes to the hash, where they 3558 * can be found by other code. 3559 */ 3560 static void 3561 vput_final(struct vnode *vp, enum vput_op func) 3562 { 3563 int error; 3564 bool want_unlock; 3565 3566 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3567 VNPASS(vp->v_holdcnt > 0, vp); 3568 3569 VI_LOCK(vp); 3570 3571 /* 3572 * By the time we got here someone else might have transitioned 3573 * the count back to > 0. 3574 */ 3575 if (vp->v_usecount > 0) 3576 goto out; 3577 3578 /* 3579 * If the vnode is doomed vgone already performed inactive processing 3580 * (if needed). 3581 */ 3582 if (VN_IS_DOOMED(vp)) 3583 goto out; 3584 3585 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3586 goto out; 3587 3588 if (vp->v_iflag & VI_DOINGINACT) 3589 goto out; 3590 3591 /* 3592 * Locking operations here will drop the interlock and possibly the 3593 * vnode lock, opening a window where the vnode can get doomed all the 3594 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3595 * perform inactive. 3596 */ 3597 vp->v_iflag |= VI_OWEINACT; 3598 want_unlock = false; 3599 error = 0; 3600 switch (func) { 3601 case VRELE: 3602 switch (VOP_ISLOCKED(vp)) { 3603 case LK_EXCLUSIVE: 3604 break; 3605 case LK_EXCLOTHER: 3606 case 0: 3607 want_unlock = true; 3608 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3609 VI_LOCK(vp); 3610 break; 3611 default: 3612 /* 3613 * The lock has at least one sharer, but we have no way 3614 * to conclude whether this is us. Play it safe and 3615 * defer processing. 3616 */ 3617 error = EAGAIN; 3618 break; 3619 } 3620 break; 3621 case VPUT: 3622 want_unlock = true; 3623 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3624 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3625 LK_NOWAIT); 3626 VI_LOCK(vp); 3627 } 3628 break; 3629 case VUNREF: 3630 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3631 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3632 VI_LOCK(vp); 3633 } 3634 break; 3635 } 3636 if (error == 0) { 3637 if (func == VUNREF) { 3638 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp, 3639 ("recursive vunref")); 3640 vp->v_vflag |= VV_UNREF; 3641 } 3642 for (;;) { 3643 error = vinactive(vp); 3644 if (want_unlock) 3645 VOP_UNLOCK(vp); 3646 if (error != ERELOOKUP || !want_unlock) 3647 break; 3648 VOP_LOCK(vp, LK_EXCLUSIVE); 3649 } 3650 if (func == VUNREF) 3651 vp->v_vflag &= ~VV_UNREF; 3652 vdropl(vp); 3653 } else { 3654 vdefer_inactive(vp); 3655 } 3656 return; 3657 out: 3658 if (func == VPUT) 3659 VOP_UNLOCK(vp); 3660 vdropl(vp); 3661 } 3662 3663 /* 3664 * Decrement ->v_usecount for a vnode. 3665 * 3666 * Releasing the last use count requires additional processing, see vput_final 3667 * above for details. 3668 * 3669 * Comment above each variant denotes lock state on entry and exit. 3670 */ 3671 3672 /* 3673 * in: any 3674 * out: same as passed in 3675 */ 3676 void 3677 vrele(struct vnode *vp) 3678 { 3679 3680 ASSERT_VI_UNLOCKED(vp, __func__); 3681 if (!refcount_release(&vp->v_usecount)) 3682 return; 3683 vput_final(vp, VRELE); 3684 } 3685 3686 /* 3687 * in: locked 3688 * out: unlocked 3689 */ 3690 void 3691 vput(struct vnode *vp) 3692 { 3693 3694 ASSERT_VOP_LOCKED(vp, __func__); 3695 ASSERT_VI_UNLOCKED(vp, __func__); 3696 if (!refcount_release(&vp->v_usecount)) { 3697 VOP_UNLOCK(vp); 3698 return; 3699 } 3700 vput_final(vp, VPUT); 3701 } 3702 3703 /* 3704 * in: locked 3705 * out: locked 3706 */ 3707 void 3708 vunref(struct vnode *vp) 3709 { 3710 3711 ASSERT_VOP_LOCKED(vp, __func__); 3712 ASSERT_VI_UNLOCKED(vp, __func__); 3713 if (!refcount_release(&vp->v_usecount)) 3714 return; 3715 vput_final(vp, VUNREF); 3716 } 3717 3718 void 3719 vhold(struct vnode *vp) 3720 { 3721 int old; 3722 3723 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3724 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3725 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3726 ("%s: wrong hold count %d", __func__, old)); 3727 if (old == 0) 3728 vfs_freevnodes_dec(); 3729 } 3730 3731 void 3732 vholdnz(struct vnode *vp) 3733 { 3734 3735 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3736 #ifdef INVARIANTS 3737 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3738 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3739 ("%s: wrong hold count %d", __func__, old)); 3740 #else 3741 atomic_add_int(&vp->v_holdcnt, 1); 3742 #endif 3743 } 3744 3745 /* 3746 * Grab a hold count unless the vnode is freed. 3747 * 3748 * Only use this routine if vfs smr is the only protection you have against 3749 * freeing the vnode. 3750 * 3751 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3752 * is not set. After the flag is set the vnode becomes immutable to anyone but 3753 * the thread which managed to set the flag. 3754 * 3755 * It may be tempting to replace the loop with: 3756 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3757 * if (count & VHOLD_NO_SMR) { 3758 * backpedal and error out; 3759 * } 3760 * 3761 * However, while this is more performant, it hinders debugging by eliminating 3762 * the previously mentioned invariant. 3763 */ 3764 bool 3765 vhold_smr(struct vnode *vp) 3766 { 3767 int count; 3768 3769 VFS_SMR_ASSERT_ENTERED(); 3770 3771 count = atomic_load_int(&vp->v_holdcnt); 3772 for (;;) { 3773 if (count & VHOLD_NO_SMR) { 3774 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3775 ("non-zero hold count with flags %d\n", count)); 3776 return (false); 3777 } 3778 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3779 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3780 if (count == 0) 3781 vfs_freevnodes_dec(); 3782 return (true); 3783 } 3784 } 3785 } 3786 3787 /* 3788 * Hold a free vnode for recycling. 3789 * 3790 * Note: vnode_init references this comment. 3791 * 3792 * Attempts to recycle only need the global vnode list lock and have no use for 3793 * SMR. 3794 * 3795 * However, vnodes get inserted into the global list before they get fully 3796 * initialized and stay there until UMA decides to free the memory. This in 3797 * particular means the target can be found before it becomes usable and after 3798 * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to 3799 * VHOLD_NO_SMR. 3800 * 3801 * Note: the vnode may gain more references after we transition the count 0->1. 3802 */ 3803 static bool 3804 vhold_recycle_free(struct vnode *vp) 3805 { 3806 int count; 3807 3808 mtx_assert(&vnode_list_mtx, MA_OWNED); 3809 3810 count = atomic_load_int(&vp->v_holdcnt); 3811 for (;;) { 3812 if (count & VHOLD_NO_SMR) { 3813 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3814 ("non-zero hold count with flags %d\n", count)); 3815 return (false); 3816 } 3817 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3818 if (count > 0) { 3819 return (false); 3820 } 3821 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3822 vfs_freevnodes_dec(); 3823 return (true); 3824 } 3825 } 3826 } 3827 3828 static void __noinline 3829 vdbatch_process(struct vdbatch *vd) 3830 { 3831 struct vnode *vp; 3832 int i; 3833 3834 mtx_assert(&vd->lock, MA_OWNED); 3835 MPASS(curthread->td_pinned > 0); 3836 MPASS(vd->index == VDBATCH_SIZE); 3837 3838 /* 3839 * Attempt to requeue the passed batch, but give up easily. 3840 * 3841 * Despite batching the mechanism is prone to transient *significant* 3842 * lock contention, where vnode_list_mtx becomes the primary bottleneck 3843 * if multiple CPUs get here (one real-world example is highly parallel 3844 * do-nothing make , which will stat *tons* of vnodes). Since it is 3845 * quasi-LRU (read: not that great even if fully honoured) provide an 3846 * option to just dodge the problem. Parties which don't like it are 3847 * welcome to implement something better. 3848 */ 3849 if (vnode_can_skip_requeue) { 3850 if (!mtx_trylock(&vnode_list_mtx)) { 3851 counter_u64_add(vnode_skipped_requeues, 1); 3852 critical_enter(); 3853 for (i = 0; i < VDBATCH_SIZE; i++) { 3854 vp = vd->tab[i]; 3855 vd->tab[i] = NULL; 3856 MPASS(vp->v_dbatchcpu != NOCPU); 3857 vp->v_dbatchcpu = NOCPU; 3858 } 3859 vd->index = 0; 3860 critical_exit(); 3861 return; 3862 3863 } 3864 /* fallthrough to locked processing */ 3865 } else { 3866 mtx_lock(&vnode_list_mtx); 3867 } 3868 3869 mtx_assert(&vnode_list_mtx, MA_OWNED); 3870 critical_enter(); 3871 for (i = 0; i < VDBATCH_SIZE; i++) { 3872 vp = vd->tab[i]; 3873 vd->tab[i] = NULL; 3874 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3875 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3876 MPASS(vp->v_dbatchcpu != NOCPU); 3877 vp->v_dbatchcpu = NOCPU; 3878 } 3879 mtx_unlock(&vnode_list_mtx); 3880 vd->index = 0; 3881 critical_exit(); 3882 } 3883 3884 static void 3885 vdbatch_enqueue(struct vnode *vp) 3886 { 3887 struct vdbatch *vd; 3888 3889 ASSERT_VI_LOCKED(vp, __func__); 3890 VNPASS(!VN_IS_DOOMED(vp), vp); 3891 3892 if (vp->v_dbatchcpu != NOCPU) { 3893 VI_UNLOCK(vp); 3894 return; 3895 } 3896 3897 sched_pin(); 3898 vd = DPCPU_PTR(vd); 3899 mtx_lock(&vd->lock); 3900 MPASS(vd->index < VDBATCH_SIZE); 3901 MPASS(vd->tab[vd->index] == NULL); 3902 /* 3903 * A hack: we depend on being pinned so that we know what to put in 3904 * ->v_dbatchcpu. 3905 */ 3906 vp->v_dbatchcpu = curcpu; 3907 vd->tab[vd->index] = vp; 3908 vd->index++; 3909 VI_UNLOCK(vp); 3910 if (vd->index == VDBATCH_SIZE) 3911 vdbatch_process(vd); 3912 mtx_unlock(&vd->lock); 3913 sched_unpin(); 3914 } 3915 3916 /* 3917 * This routine must only be called for vnodes which are about to be 3918 * deallocated. Supporting dequeue for arbitrary vndoes would require 3919 * validating that the locked batch matches. 3920 */ 3921 static void 3922 vdbatch_dequeue(struct vnode *vp) 3923 { 3924 struct vdbatch *vd; 3925 int i; 3926 short cpu; 3927 3928 VNPASS(vp->v_type == VBAD || vp->v_type == VNON, vp); 3929 3930 cpu = vp->v_dbatchcpu; 3931 if (cpu == NOCPU) 3932 return; 3933 3934 vd = DPCPU_ID_PTR(cpu, vd); 3935 mtx_lock(&vd->lock); 3936 for (i = 0; i < vd->index; i++) { 3937 if (vd->tab[i] != vp) 3938 continue; 3939 vp->v_dbatchcpu = NOCPU; 3940 vd->index--; 3941 vd->tab[i] = vd->tab[vd->index]; 3942 vd->tab[vd->index] = NULL; 3943 break; 3944 } 3945 mtx_unlock(&vd->lock); 3946 /* 3947 * Either we dequeued the vnode above or the target CPU beat us to it. 3948 */ 3949 MPASS(vp->v_dbatchcpu == NOCPU); 3950 } 3951 3952 /* 3953 * Drop the hold count of the vnode. 3954 * 3955 * It will only get freed if this is the last hold *and* it has been vgone'd. 3956 * 3957 * Because the vnode vm object keeps a hold reference on the vnode if 3958 * there is at least one resident non-cached page, the vnode cannot 3959 * leave the active list without the page cleanup done. 3960 */ 3961 static void __noinline 3962 vdropl_final(struct vnode *vp) 3963 { 3964 3965 ASSERT_VI_LOCKED(vp, __func__); 3966 VNPASS(VN_IS_DOOMED(vp), vp); 3967 /* 3968 * Set the VHOLD_NO_SMR flag. 3969 * 3970 * We may be racing against vhold_smr. If they win we can just pretend 3971 * we never got this far, they will vdrop later. 3972 */ 3973 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { 3974 vfs_freevnodes_inc(); 3975 VI_UNLOCK(vp); 3976 /* 3977 * We lost the aforementioned race. Any subsequent access is 3978 * invalid as they might have managed to vdropl on their own. 3979 */ 3980 return; 3981 } 3982 /* 3983 * Don't bump freevnodes as this one is going away. 3984 */ 3985 freevnode(vp); 3986 } 3987 3988 void 3989 vdrop(struct vnode *vp) 3990 { 3991 3992 ASSERT_VI_UNLOCKED(vp, __func__); 3993 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3994 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3995 return; 3996 VI_LOCK(vp); 3997 vdropl(vp); 3998 } 3999 4000 static __always_inline void 4001 vdropl_impl(struct vnode *vp, bool enqueue) 4002 { 4003 4004 ASSERT_VI_LOCKED(vp, __func__); 4005 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4006 if (!refcount_release(&vp->v_holdcnt)) { 4007 VI_UNLOCK(vp); 4008 return; 4009 } 4010 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp); 4011 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 4012 if (VN_IS_DOOMED(vp)) { 4013 vdropl_final(vp); 4014 return; 4015 } 4016 4017 vfs_freevnodes_inc(); 4018 if (vp->v_mflag & VMP_LAZYLIST) { 4019 vunlazy(vp); 4020 } 4021 4022 if (!enqueue) { 4023 VI_UNLOCK(vp); 4024 return; 4025 } 4026 4027 /* 4028 * Also unlocks the interlock. We can't assert on it as we 4029 * released our hold and by now the vnode might have been 4030 * freed. 4031 */ 4032 vdbatch_enqueue(vp); 4033 } 4034 4035 void 4036 vdropl(struct vnode *vp) 4037 { 4038 4039 vdropl_impl(vp, true); 4040 } 4041 4042 /* 4043 * vdrop a vnode when recycling 4044 * 4045 * This is a special case routine only to be used when recycling, differs from 4046 * regular vdrop by not requeieing the vnode on LRU. 4047 * 4048 * Consider a case where vtryrecycle continuously fails with all vnodes (due to 4049 * e.g., frozen writes on the filesystem), filling the batch and causing it to 4050 * be requeued. Then vnlru will end up revisiting the same vnodes. This is a 4051 * loop which can last for as long as writes are frozen. 4052 */ 4053 static void 4054 vdropl_recycle(struct vnode *vp) 4055 { 4056 4057 vdropl_impl(vp, false); 4058 } 4059 4060 static void 4061 vdrop_recycle(struct vnode *vp) 4062 { 4063 4064 VI_LOCK(vp); 4065 vdropl_recycle(vp); 4066 } 4067 4068 /* 4069 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 4070 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 4071 */ 4072 static int 4073 vinactivef(struct vnode *vp) 4074 { 4075 int error; 4076 4077 ASSERT_VOP_ELOCKED(vp, "vinactive"); 4078 ASSERT_VI_LOCKED(vp, "vinactive"); 4079 VNPASS((vp->v_iflag & VI_DOINGINACT) == 0, vp); 4080 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4081 vp->v_iflag |= VI_DOINGINACT; 4082 vp->v_iflag &= ~VI_OWEINACT; 4083 VI_UNLOCK(vp); 4084 4085 /* 4086 * Before moving off the active list, we must be sure that any 4087 * modified pages are converted into the vnode's dirty 4088 * buffers, since these will no longer be checked once the 4089 * vnode is on the inactive list. 4090 * 4091 * The write-out of the dirty pages is asynchronous. At the 4092 * point that VOP_INACTIVE() is called, there could still be 4093 * pending I/O and dirty pages in the object. 4094 */ 4095 if ((vp->v_vflag & VV_NOSYNC) == 0) 4096 vnode_pager_clean_async(vp); 4097 4098 error = VOP_INACTIVE(vp); 4099 VI_LOCK(vp); 4100 VNPASS(vp->v_iflag & VI_DOINGINACT, vp); 4101 vp->v_iflag &= ~VI_DOINGINACT; 4102 return (error); 4103 } 4104 4105 int 4106 vinactive(struct vnode *vp) 4107 { 4108 4109 ASSERT_VOP_ELOCKED(vp, "vinactive"); 4110 ASSERT_VI_LOCKED(vp, "vinactive"); 4111 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4112 4113 if ((vp->v_iflag & VI_OWEINACT) == 0) 4114 return (0); 4115 if (vp->v_iflag & VI_DOINGINACT) 4116 return (0); 4117 if (vp->v_usecount > 0) { 4118 vp->v_iflag &= ~VI_OWEINACT; 4119 return (0); 4120 } 4121 return (vinactivef(vp)); 4122 } 4123 4124 /* 4125 * Remove any vnodes in the vnode table belonging to mount point mp. 4126 * 4127 * If FORCECLOSE is not specified, there should not be any active ones, 4128 * return error if any are found (nb: this is a user error, not a 4129 * system error). If FORCECLOSE is specified, detach any active vnodes 4130 * that are found. 4131 * 4132 * If WRITECLOSE is set, only flush out regular file vnodes open for 4133 * writing. 4134 * 4135 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 4136 * 4137 * `rootrefs' specifies the base reference count for the root vnode 4138 * of this filesystem. The root vnode is considered busy if its 4139 * v_usecount exceeds this value. On a successful return, vflush(, td) 4140 * will call vrele() on the root vnode exactly rootrefs times. 4141 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 4142 * be zero. 4143 */ 4144 #ifdef DIAGNOSTIC 4145 static int busyprt = 0; /* print out busy vnodes */ 4146 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 4147 #endif 4148 4149 int 4150 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 4151 { 4152 struct vnode *vp, *mvp, *rootvp = NULL; 4153 struct vattr vattr; 4154 int busy = 0, error; 4155 4156 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 4157 rootrefs, flags); 4158 if (rootrefs > 0) { 4159 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 4160 ("vflush: bad args")); 4161 /* 4162 * Get the filesystem root vnode. We can vput() it 4163 * immediately, since with rootrefs > 0, it won't go away. 4164 */ 4165 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 4166 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 4167 __func__, error); 4168 return (error); 4169 } 4170 vput(rootvp); 4171 } 4172 loop: 4173 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 4174 vholdl(vp); 4175 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 4176 if (error) { 4177 vdrop(vp); 4178 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 4179 goto loop; 4180 } 4181 /* 4182 * Skip over a vnodes marked VV_SYSTEM. 4183 */ 4184 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 4185 VOP_UNLOCK(vp); 4186 vdrop(vp); 4187 continue; 4188 } 4189 /* 4190 * If WRITECLOSE is set, flush out unlinked but still open 4191 * files (even if open only for reading) and regular file 4192 * vnodes open for writing. 4193 */ 4194 if (flags & WRITECLOSE) { 4195 vnode_pager_clean_async(vp); 4196 do { 4197 error = VOP_FSYNC(vp, MNT_WAIT, td); 4198 } while (error == ERELOOKUP); 4199 if (error != 0) { 4200 VOP_UNLOCK(vp); 4201 vdrop(vp); 4202 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 4203 return (error); 4204 } 4205 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 4206 VI_LOCK(vp); 4207 4208 if ((vp->v_type == VNON || 4209 (error == 0 && vattr.va_nlink > 0)) && 4210 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 4211 VOP_UNLOCK(vp); 4212 vdropl(vp); 4213 continue; 4214 } 4215 } else 4216 VI_LOCK(vp); 4217 /* 4218 * With v_usecount == 0, all we need to do is clear out the 4219 * vnode data structures and we are done. 4220 * 4221 * If FORCECLOSE is set, forcibly close the vnode. 4222 */ 4223 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 4224 vgonel(vp); 4225 } else { 4226 busy++; 4227 #ifdef DIAGNOSTIC 4228 if (busyprt) 4229 vn_printf(vp, "vflush: busy vnode "); 4230 #endif 4231 } 4232 VOP_UNLOCK(vp); 4233 vdropl(vp); 4234 } 4235 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 4236 /* 4237 * If just the root vnode is busy, and if its refcount 4238 * is equal to `rootrefs', then go ahead and kill it. 4239 */ 4240 VI_LOCK(rootvp); 4241 KASSERT(busy > 0, ("vflush: not busy")); 4242 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 4243 ("vflush: usecount %d < rootrefs %d", 4244 rootvp->v_usecount, rootrefs)); 4245 if (busy == 1 && rootvp->v_usecount == rootrefs) { 4246 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 4247 vgone(rootvp); 4248 VOP_UNLOCK(rootvp); 4249 busy = 0; 4250 } else 4251 VI_UNLOCK(rootvp); 4252 } 4253 if (busy) { 4254 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 4255 busy); 4256 return (EBUSY); 4257 } 4258 for (; rootrefs > 0; rootrefs--) 4259 vrele(rootvp); 4260 return (0); 4261 } 4262 4263 /* 4264 * Recycle an unused vnode. 4265 */ 4266 int 4267 vrecycle(struct vnode *vp) 4268 { 4269 int recycled; 4270 4271 VI_LOCK(vp); 4272 recycled = vrecyclel(vp); 4273 VI_UNLOCK(vp); 4274 return (recycled); 4275 } 4276 4277 /* 4278 * vrecycle, with the vp interlock held. 4279 */ 4280 int 4281 vrecyclel(struct vnode *vp) 4282 { 4283 int recycled; 4284 4285 ASSERT_VOP_ELOCKED(vp, __func__); 4286 ASSERT_VI_LOCKED(vp, __func__); 4287 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4288 recycled = 0; 4289 if (vp->v_usecount == 0) { 4290 recycled = 1; 4291 vgonel(vp); 4292 } 4293 return (recycled); 4294 } 4295 4296 /* 4297 * Eliminate all activity associated with a vnode 4298 * in preparation for reuse. 4299 */ 4300 void 4301 vgone(struct vnode *vp) 4302 { 4303 VI_LOCK(vp); 4304 vgonel(vp); 4305 VI_UNLOCK(vp); 4306 } 4307 4308 /* 4309 * Notify upper mounts about reclaimed or unlinked vnode. 4310 */ 4311 void 4312 vfs_notify_upper(struct vnode *vp, enum vfs_notify_upper_type event) 4313 { 4314 struct mount *mp; 4315 struct mount_upper_node *ump; 4316 4317 mp = atomic_load_ptr(&vp->v_mount); 4318 if (mp == NULL) 4319 return; 4320 if (TAILQ_EMPTY(&mp->mnt_notify)) 4321 return; 4322 4323 MNT_ILOCK(mp); 4324 mp->mnt_upper_pending++; 4325 KASSERT(mp->mnt_upper_pending > 0, 4326 ("%s: mnt_upper_pending %d", __func__, mp->mnt_upper_pending)); 4327 TAILQ_FOREACH(ump, &mp->mnt_notify, mnt_upper_link) { 4328 MNT_IUNLOCK(mp); 4329 switch (event) { 4330 case VFS_NOTIFY_UPPER_RECLAIM: 4331 VFS_RECLAIM_LOWERVP(ump->mp, vp); 4332 break; 4333 case VFS_NOTIFY_UPPER_UNLINK: 4334 VFS_UNLINK_LOWERVP(ump->mp, vp); 4335 break; 4336 } 4337 MNT_ILOCK(mp); 4338 } 4339 mp->mnt_upper_pending--; 4340 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && 4341 mp->mnt_upper_pending == 0) { 4342 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; 4343 wakeup(&mp->mnt_uppers); 4344 } 4345 MNT_IUNLOCK(mp); 4346 } 4347 4348 /* 4349 * vgone, with the vp interlock held. 4350 */ 4351 static void 4352 vgonel(struct vnode *vp) 4353 { 4354 struct thread *td; 4355 struct mount *mp; 4356 vm_object_t object; 4357 bool active, doinginact, oweinact; 4358 4359 ASSERT_VOP_ELOCKED(vp, "vgonel"); 4360 ASSERT_VI_LOCKED(vp, "vgonel"); 4361 VNASSERT(vp->v_holdcnt, vp, 4362 ("vgonel: vp %p has no reference.", vp)); 4363 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4364 td = curthread; 4365 4366 /* 4367 * Don't vgonel if we're already doomed. 4368 */ 4369 if (VN_IS_DOOMED(vp)) { 4370 VNPASS(vn_get_state(vp) == VSTATE_DESTROYING || \ 4371 vn_get_state(vp) == VSTATE_DEAD, vp); 4372 return; 4373 } 4374 /* 4375 * Paired with freevnode. 4376 */ 4377 vn_seqc_write_begin_locked(vp); 4378 vunlazy_gone(vp); 4379 vn_irflag_set_locked(vp, VIRF_DOOMED); 4380 vn_set_state(vp, VSTATE_DESTROYING); 4381 4382 /* 4383 * Check to see if the vnode is in use. If so, we have to 4384 * call VOP_CLOSE() and VOP_INACTIVE(). 4385 * 4386 * It could be that VOP_INACTIVE() requested reclamation, in 4387 * which case we should avoid recursion, so check 4388 * VI_DOINGINACT. This is not precise but good enough. 4389 */ 4390 active = vp->v_usecount > 0; 4391 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4392 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; 4393 4394 /* 4395 * If we need to do inactive VI_OWEINACT will be set. 4396 */ 4397 if (vp->v_iflag & VI_DEFINACT) { 4398 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 4399 vp->v_iflag &= ~VI_DEFINACT; 4400 vdropl(vp); 4401 } else { 4402 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 4403 VI_UNLOCK(vp); 4404 } 4405 cache_purge_vgone(vp); 4406 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 4407 4408 /* 4409 * If purging an active vnode, it must be closed and 4410 * deactivated before being reclaimed. 4411 */ 4412 if (active) 4413 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 4414 if (!doinginact) { 4415 do { 4416 if (oweinact || active) { 4417 VI_LOCK(vp); 4418 vinactivef(vp); 4419 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4420 VI_UNLOCK(vp); 4421 } 4422 } while (oweinact); 4423 } 4424 if (vp->v_type == VSOCK) 4425 vfs_unp_reclaim(vp); 4426 4427 /* 4428 * Clean out any buffers associated with the vnode. 4429 * If the flush fails, just toss the buffers. 4430 */ 4431 mp = NULL; 4432 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 4433 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 4434 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 4435 while (vinvalbuf(vp, 0, 0, 0) != 0) 4436 ; 4437 } 4438 4439 BO_LOCK(&vp->v_bufobj); 4440 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 4441 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 4442 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 4443 vp->v_bufobj.bo_clean.bv_cnt == 0, 4444 ("vp %p bufobj not invalidated", vp)); 4445 4446 /* 4447 * For VMIO bufobj, BO_DEAD is set later, or in 4448 * vm_object_terminate() after the object's page queue is 4449 * flushed. 4450 */ 4451 object = vp->v_bufobj.bo_object; 4452 if (object == NULL) 4453 vp->v_bufobj.bo_flag |= BO_DEAD; 4454 BO_UNLOCK(&vp->v_bufobj); 4455 4456 /* 4457 * Handle the VM part. Tmpfs handles v_object on its own (the 4458 * OBJT_VNODE check). Nullfs or other bypassing filesystems 4459 * should not touch the object borrowed from the lower vnode 4460 * (the handle check). 4461 */ 4462 if (object != NULL && object->type == OBJT_VNODE && 4463 object->handle == vp) 4464 vnode_destroy_vobject(vp); 4465 4466 /* 4467 * Reclaim the vnode. 4468 */ 4469 if (VOP_RECLAIM(vp)) 4470 panic("vgone: cannot reclaim"); 4471 if (mp != NULL) 4472 vn_finished_secondary_write(mp); 4473 VNASSERT(vp->v_object == NULL, vp, 4474 ("vop_reclaim left v_object vp=%p", vp)); 4475 /* 4476 * Clear the advisory locks and wake up waiting threads. 4477 */ 4478 if (vp->v_lockf != NULL) { 4479 (void)VOP_ADVLOCKPURGE(vp); 4480 vp->v_lockf = NULL; 4481 } 4482 /* 4483 * Delete from old mount point vnode list. 4484 */ 4485 if (vp->v_mount == NULL) { 4486 VI_LOCK(vp); 4487 } else { 4488 delmntque(vp); 4489 ASSERT_VI_LOCKED(vp, "vgonel 2"); 4490 } 4491 /* 4492 * Done with purge, reset to the standard lock and invalidate 4493 * the vnode. 4494 */ 4495 vp->v_vnlock = &vp->v_lock; 4496 vp->v_op = &dead_vnodeops; 4497 vp->v_type = VBAD; 4498 vn_set_state(vp, VSTATE_DEAD); 4499 } 4500 4501 /* 4502 * Print out a description of a vnode. 4503 */ 4504 static const char *const vtypename[] = { 4505 [VNON] = "VNON", 4506 [VREG] = "VREG", 4507 [VDIR] = "VDIR", 4508 [VBLK] = "VBLK", 4509 [VCHR] = "VCHR", 4510 [VLNK] = "VLNK", 4511 [VSOCK] = "VSOCK", 4512 [VFIFO] = "VFIFO", 4513 [VBAD] = "VBAD", 4514 [VMARKER] = "VMARKER", 4515 }; 4516 _Static_assert(nitems(vtypename) == VLASTTYPE + 1, 4517 "vnode type name not added to vtypename"); 4518 4519 static const char *const vstatename[] = { 4520 [VSTATE_UNINITIALIZED] = "VSTATE_UNINITIALIZED", 4521 [VSTATE_CONSTRUCTED] = "VSTATE_CONSTRUCTED", 4522 [VSTATE_DESTROYING] = "VSTATE_DESTROYING", 4523 [VSTATE_DEAD] = "VSTATE_DEAD", 4524 }; 4525 _Static_assert(nitems(vstatename) == VLASTSTATE + 1, 4526 "vnode state name not added to vstatename"); 4527 4528 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 4529 "new hold count flag not added to vn_printf"); 4530 4531 void 4532 vn_printf(struct vnode *vp, const char *fmt, ...) 4533 { 4534 va_list ap; 4535 char buf[256], buf2[16]; 4536 u_long flags; 4537 u_int holdcnt; 4538 short irflag; 4539 4540 va_start(ap, fmt); 4541 vprintf(fmt, ap); 4542 va_end(ap); 4543 printf("%p: ", (void *)vp); 4544 printf("type %s state %s op %p\n", vtypename[vp->v_type], 4545 vstatename[vp->v_state], vp->v_op); 4546 holdcnt = atomic_load_int(&vp->v_holdcnt); 4547 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 4548 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 4549 vp->v_seqc_users); 4550 switch (vp->v_type) { 4551 case VDIR: 4552 printf(" mountedhere %p\n", vp->v_mountedhere); 4553 break; 4554 case VCHR: 4555 printf(" rdev %p\n", vp->v_rdev); 4556 break; 4557 case VSOCK: 4558 printf(" socket %p\n", vp->v_unpcb); 4559 break; 4560 case VFIFO: 4561 printf(" fifoinfo %p\n", vp->v_fifoinfo); 4562 break; 4563 default: 4564 printf("\n"); 4565 break; 4566 } 4567 buf[0] = '\0'; 4568 buf[1] = '\0'; 4569 if (holdcnt & VHOLD_NO_SMR) 4570 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 4571 printf(" hold count flags (%s)\n", buf + 1); 4572 4573 buf[0] = '\0'; 4574 buf[1] = '\0'; 4575 irflag = vn_irflag_read(vp); 4576 if (irflag & VIRF_DOOMED) 4577 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 4578 if (irflag & VIRF_PGREAD) 4579 strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); 4580 if (irflag & VIRF_MOUNTPOINT) 4581 strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf)); 4582 if (irflag & VIRF_TEXT_REF) 4583 strlcat(buf, "|VIRF_TEXT_REF", sizeof(buf)); 4584 flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT | VIRF_TEXT_REF); 4585 if (flags != 0) { 4586 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 4587 strlcat(buf, buf2, sizeof(buf)); 4588 } 4589 if (vp->v_vflag & VV_ROOT) 4590 strlcat(buf, "|VV_ROOT", sizeof(buf)); 4591 if (vp->v_vflag & VV_ISTTY) 4592 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4593 if (vp->v_vflag & VV_NOSYNC) 4594 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4595 if (vp->v_vflag & VV_ETERNALDEV) 4596 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4597 if (vp->v_vflag & VV_CACHEDLABEL) 4598 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4599 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4600 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4601 if (vp->v_vflag & VV_COPYONWRITE) 4602 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4603 if (vp->v_vflag & VV_SYSTEM) 4604 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4605 if (vp->v_vflag & VV_PROCDEP) 4606 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4607 if (vp->v_vflag & VV_DELETED) 4608 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4609 if (vp->v_vflag & VV_MD) 4610 strlcat(buf, "|VV_MD", sizeof(buf)); 4611 if (vp->v_vflag & VV_FORCEINSMQ) 4612 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4613 if (vp->v_vflag & VV_READLINK) 4614 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4615 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4616 VV_CACHEDLABEL | VV_VMSIZEVNLOCK | VV_COPYONWRITE | VV_SYSTEM | 4617 VV_PROCDEP | VV_DELETED | VV_MD | VV_FORCEINSMQ | VV_READLINK); 4618 if (flags != 0) { 4619 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4620 strlcat(buf, buf2, sizeof(buf)); 4621 } 4622 if (vp->v_iflag & VI_MOUNT) 4623 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4624 if (vp->v_iflag & VI_DOINGINACT) 4625 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4626 if (vp->v_iflag & VI_OWEINACT) 4627 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4628 if (vp->v_iflag & VI_DEFINACT) 4629 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4630 if (vp->v_iflag & VI_FOPENING) 4631 strlcat(buf, "|VI_FOPENING", sizeof(buf)); 4632 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOINGINACT | 4633 VI_OWEINACT | VI_DEFINACT | VI_FOPENING); 4634 if (flags != 0) { 4635 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4636 strlcat(buf, buf2, sizeof(buf)); 4637 } 4638 if (vp->v_mflag & VMP_LAZYLIST) 4639 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4640 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4641 if (flags != 0) { 4642 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4643 strlcat(buf, buf2, sizeof(buf)); 4644 } 4645 printf(" flags (%s)", buf + 1); 4646 if (mtx_owned(VI_MTX(vp))) 4647 printf(" VI_LOCKed"); 4648 printf("\n"); 4649 if (vp->v_object != NULL) 4650 printf(" v_object %p ref %d pages %d " 4651 "cleanbuf %d dirtybuf %d\n", 4652 vp->v_object, vp->v_object->ref_count, 4653 vp->v_object->resident_page_count, 4654 vp->v_bufobj.bo_clean.bv_cnt, 4655 vp->v_bufobj.bo_dirty.bv_cnt); 4656 printf(" "); 4657 lockmgr_printinfo(vp->v_vnlock); 4658 if (vp->v_data != NULL) 4659 VOP_PRINT(vp); 4660 } 4661 4662 #ifdef DDB 4663 /* 4664 * List all of the locked vnodes in the system. 4665 * Called when debugging the kernel. 4666 */ 4667 DB_SHOW_COMMAND_FLAGS(lockedvnods, lockedvnodes, DB_CMD_MEMSAFE) 4668 { 4669 struct mount *mp; 4670 struct vnode *vp; 4671 4672 /* 4673 * Note: because this is DDB, we can't obey the locking semantics 4674 * for these structures, which means we could catch an inconsistent 4675 * state and dereference a nasty pointer. Not much to be done 4676 * about that. 4677 */ 4678 db_printf("Locked vnodes\n"); 4679 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4680 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4681 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4682 vn_printf(vp, "vnode "); 4683 } 4684 } 4685 } 4686 4687 /* 4688 * Show details about the given vnode. 4689 */ 4690 DB_SHOW_COMMAND(vnode, db_show_vnode) 4691 { 4692 struct vnode *vp; 4693 4694 if (!have_addr) 4695 return; 4696 vp = (struct vnode *)addr; 4697 vn_printf(vp, "vnode "); 4698 } 4699 4700 /* 4701 * Show details about the given mount point. 4702 */ 4703 DB_SHOW_COMMAND(mount, db_show_mount) 4704 { 4705 struct mount *mp; 4706 struct vfsopt *opt; 4707 struct statfs *sp; 4708 struct vnode *vp; 4709 char buf[512]; 4710 uint64_t mflags; 4711 u_int flags; 4712 4713 if (!have_addr) { 4714 /* No address given, print short info about all mount points. */ 4715 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4716 db_printf("%p %s on %s (%s)\n", mp, 4717 mp->mnt_stat.f_mntfromname, 4718 mp->mnt_stat.f_mntonname, 4719 mp->mnt_stat.f_fstypename); 4720 if (db_pager_quit) 4721 break; 4722 } 4723 db_printf("\nMore info: show mount <addr>\n"); 4724 return; 4725 } 4726 4727 mp = (struct mount *)addr; 4728 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4729 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4730 4731 buf[0] = '\0'; 4732 mflags = mp->mnt_flag; 4733 #define MNT_FLAG(flag) do { \ 4734 if (mflags & (flag)) { \ 4735 if (buf[0] != '\0') \ 4736 strlcat(buf, ", ", sizeof(buf)); \ 4737 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4738 mflags &= ~(flag); \ 4739 } \ 4740 } while (0) 4741 MNT_FLAG(MNT_RDONLY); 4742 MNT_FLAG(MNT_SYNCHRONOUS); 4743 MNT_FLAG(MNT_NOEXEC); 4744 MNT_FLAG(MNT_NOSUID); 4745 MNT_FLAG(MNT_NFS4ACLS); 4746 MNT_FLAG(MNT_UNION); 4747 MNT_FLAG(MNT_ASYNC); 4748 MNT_FLAG(MNT_SUIDDIR); 4749 MNT_FLAG(MNT_SOFTDEP); 4750 MNT_FLAG(MNT_NOSYMFOLLOW); 4751 MNT_FLAG(MNT_GJOURNAL); 4752 MNT_FLAG(MNT_MULTILABEL); 4753 MNT_FLAG(MNT_ACLS); 4754 MNT_FLAG(MNT_NOATIME); 4755 MNT_FLAG(MNT_NOCLUSTERR); 4756 MNT_FLAG(MNT_NOCLUSTERW); 4757 MNT_FLAG(MNT_SUJ); 4758 MNT_FLAG(MNT_EXRDONLY); 4759 MNT_FLAG(MNT_EXPORTED); 4760 MNT_FLAG(MNT_DEFEXPORTED); 4761 MNT_FLAG(MNT_EXPORTANON); 4762 MNT_FLAG(MNT_EXKERB); 4763 MNT_FLAG(MNT_EXPUBLIC); 4764 MNT_FLAG(MNT_LOCAL); 4765 MNT_FLAG(MNT_QUOTA); 4766 MNT_FLAG(MNT_ROOTFS); 4767 MNT_FLAG(MNT_USER); 4768 MNT_FLAG(MNT_IGNORE); 4769 MNT_FLAG(MNT_UPDATE); 4770 MNT_FLAG(MNT_DELEXPORT); 4771 MNT_FLAG(MNT_RELOAD); 4772 MNT_FLAG(MNT_FORCE); 4773 MNT_FLAG(MNT_SNAPSHOT); 4774 MNT_FLAG(MNT_BYFSID); 4775 #undef MNT_FLAG 4776 if (mflags != 0) { 4777 if (buf[0] != '\0') 4778 strlcat(buf, ", ", sizeof(buf)); 4779 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4780 "0x%016jx", mflags); 4781 } 4782 db_printf(" mnt_flag = %s\n", buf); 4783 4784 buf[0] = '\0'; 4785 flags = mp->mnt_kern_flag; 4786 #define MNT_KERN_FLAG(flag) do { \ 4787 if (flags & (flag)) { \ 4788 if (buf[0] != '\0') \ 4789 strlcat(buf, ", ", sizeof(buf)); \ 4790 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4791 flags &= ~(flag); \ 4792 } \ 4793 } while (0) 4794 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4795 MNT_KERN_FLAG(MNTK_ASYNC); 4796 MNT_KERN_FLAG(MNTK_SOFTDEP); 4797 MNT_KERN_FLAG(MNTK_NOMSYNC); 4798 MNT_KERN_FLAG(MNTK_DRAINING); 4799 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4800 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4801 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4802 MNT_KERN_FLAG(MNTK_NO_IOPF); 4803 MNT_KERN_FLAG(MNTK_RECURSE); 4804 MNT_KERN_FLAG(MNTK_UPPER_WAITER); 4805 MNT_KERN_FLAG(MNTK_UNLOCKED_INSMNTQUE); 4806 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4807 MNT_KERN_FLAG(MNTK_VMSETSIZE_BUG); 4808 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4809 MNT_KERN_FLAG(MNTK_TASKQUEUE_WAITER); 4810 MNT_KERN_FLAG(MNTK_NOASYNC); 4811 MNT_KERN_FLAG(MNTK_UNMOUNT); 4812 MNT_KERN_FLAG(MNTK_MWAIT); 4813 MNT_KERN_FLAG(MNTK_SUSPEND); 4814 MNT_KERN_FLAG(MNTK_SUSPEND2); 4815 MNT_KERN_FLAG(MNTK_SUSPENDED); 4816 MNT_KERN_FLAG(MNTK_NULL_NOCACHE); 4817 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4818 #undef MNT_KERN_FLAG 4819 if (flags != 0) { 4820 if (buf[0] != '\0') 4821 strlcat(buf, ", ", sizeof(buf)); 4822 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4823 "0x%08x", flags); 4824 } 4825 db_printf(" mnt_kern_flag = %s\n", buf); 4826 4827 db_printf(" mnt_opt = "); 4828 opt = TAILQ_FIRST(mp->mnt_opt); 4829 if (opt != NULL) { 4830 db_printf("%s", opt->name); 4831 opt = TAILQ_NEXT(opt, link); 4832 while (opt != NULL) { 4833 db_printf(", %s", opt->name); 4834 opt = TAILQ_NEXT(opt, link); 4835 } 4836 } 4837 db_printf("\n"); 4838 4839 sp = &mp->mnt_stat; 4840 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4841 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4842 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4843 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4844 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4845 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4846 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4847 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4848 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4849 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4850 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4851 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4852 4853 db_printf(" mnt_cred = { uid=%u ruid=%u", 4854 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4855 if (jailed(mp->mnt_cred)) 4856 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4857 db_printf(" }\n"); 4858 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4859 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4860 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4861 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4862 db_printf(" mnt_lazyvnodelistsize = %d\n", 4863 mp->mnt_lazyvnodelistsize); 4864 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4865 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4866 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4867 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4868 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4869 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4870 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4871 db_printf(" mnt_secondary_accwrites = %d\n", 4872 mp->mnt_secondary_accwrites); 4873 db_printf(" mnt_gjprovider = %s\n", 4874 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4875 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4876 4877 db_printf("\n\nList of active vnodes\n"); 4878 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4879 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4880 vn_printf(vp, "vnode "); 4881 if (db_pager_quit) 4882 break; 4883 } 4884 } 4885 db_printf("\n\nList of inactive vnodes\n"); 4886 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4887 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4888 vn_printf(vp, "vnode "); 4889 if (db_pager_quit) 4890 break; 4891 } 4892 } 4893 } 4894 #endif /* DDB */ 4895 4896 /* 4897 * Fill in a struct xvfsconf based on a struct vfsconf. 4898 */ 4899 static int 4900 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4901 { 4902 struct xvfsconf xvfsp; 4903 4904 bzero(&xvfsp, sizeof(xvfsp)); 4905 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4906 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4907 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4908 xvfsp.vfc_flags = vfsp->vfc_flags; 4909 /* 4910 * These are unused in userland, we keep them 4911 * to not break binary compatibility. 4912 */ 4913 xvfsp.vfc_vfsops = NULL; 4914 xvfsp.vfc_next = NULL; 4915 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4916 } 4917 4918 #ifdef COMPAT_FREEBSD32 4919 struct xvfsconf32 { 4920 uint32_t vfc_vfsops; 4921 char vfc_name[MFSNAMELEN]; 4922 int32_t vfc_typenum; 4923 int32_t vfc_refcount; 4924 int32_t vfc_flags; 4925 uint32_t vfc_next; 4926 }; 4927 4928 static int 4929 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4930 { 4931 struct xvfsconf32 xvfsp; 4932 4933 bzero(&xvfsp, sizeof(xvfsp)); 4934 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4935 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4936 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4937 xvfsp.vfc_flags = vfsp->vfc_flags; 4938 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4939 } 4940 #endif 4941 4942 /* 4943 * Top level filesystem related information gathering. 4944 */ 4945 static int 4946 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4947 { 4948 struct vfsconf *vfsp; 4949 int error; 4950 4951 error = 0; 4952 vfsconf_slock(); 4953 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4954 #ifdef COMPAT_FREEBSD32 4955 if (req->flags & SCTL_MASK32) 4956 error = vfsconf2x32(req, vfsp); 4957 else 4958 #endif 4959 error = vfsconf2x(req, vfsp); 4960 if (error) 4961 break; 4962 } 4963 vfsconf_sunlock(); 4964 return (error); 4965 } 4966 4967 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4968 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4969 "S,xvfsconf", "List of all configured filesystems"); 4970 4971 #ifndef BURN_BRIDGES 4972 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4973 4974 static int 4975 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4976 { 4977 int *name = (int *)arg1 - 1; /* XXX */ 4978 u_int namelen = arg2 + 1; /* XXX */ 4979 struct vfsconf *vfsp; 4980 4981 log(LOG_WARNING, "userland calling deprecated sysctl, " 4982 "please rebuild world\n"); 4983 4984 #if 1 || defined(COMPAT_PRELITE2) 4985 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4986 if (namelen == 1) 4987 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4988 #endif 4989 4990 switch (name[1]) { 4991 case VFS_MAXTYPENUM: 4992 if (namelen != 2) 4993 return (ENOTDIR); 4994 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4995 case VFS_CONF: 4996 if (namelen != 3) 4997 return (ENOTDIR); /* overloaded */ 4998 vfsconf_slock(); 4999 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 5000 if (vfsp->vfc_typenum == name[2]) 5001 break; 5002 } 5003 vfsconf_sunlock(); 5004 if (vfsp == NULL) 5005 return (EOPNOTSUPP); 5006 #ifdef COMPAT_FREEBSD32 5007 if (req->flags & SCTL_MASK32) 5008 return (vfsconf2x32(req, vfsp)); 5009 else 5010 #endif 5011 return (vfsconf2x(req, vfsp)); 5012 } 5013 return (EOPNOTSUPP); 5014 } 5015 5016 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 5017 CTLFLAG_MPSAFE, vfs_sysctl, 5018 "Generic filesystem"); 5019 5020 #if 1 || defined(COMPAT_PRELITE2) 5021 5022 static int 5023 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 5024 { 5025 int error; 5026 struct vfsconf *vfsp; 5027 struct ovfsconf ovfs; 5028 5029 vfsconf_slock(); 5030 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 5031 bzero(&ovfs, sizeof(ovfs)); 5032 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 5033 strcpy(ovfs.vfc_name, vfsp->vfc_name); 5034 ovfs.vfc_index = vfsp->vfc_typenum; 5035 ovfs.vfc_refcount = vfsp->vfc_refcount; 5036 ovfs.vfc_flags = vfsp->vfc_flags; 5037 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 5038 if (error != 0) { 5039 vfsconf_sunlock(); 5040 return (error); 5041 } 5042 } 5043 vfsconf_sunlock(); 5044 return (0); 5045 } 5046 5047 #endif /* 1 || COMPAT_PRELITE2 */ 5048 #endif /* !BURN_BRIDGES */ 5049 5050 static void 5051 unmount_or_warn(struct mount *mp) 5052 { 5053 int error; 5054 5055 error = dounmount(mp, MNT_FORCE, curthread); 5056 if (error != 0) { 5057 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 5058 if (error == EBUSY) 5059 printf("BUSY)\n"); 5060 else 5061 printf("%d)\n", error); 5062 } 5063 } 5064 5065 /* 5066 * Unmount all filesystems. The list is traversed in reverse order 5067 * of mounting to avoid dependencies. 5068 */ 5069 void 5070 vfs_unmountall(void) 5071 { 5072 struct mount *mp, *tmp; 5073 5074 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 5075 5076 /* 5077 * Since this only runs when rebooting, it is not interlocked. 5078 */ 5079 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 5080 vfs_ref(mp); 5081 5082 /* 5083 * Forcibly unmounting "/dev" before "/" would prevent clean 5084 * unmount of the latter. 5085 */ 5086 if (mp == rootdevmp) 5087 continue; 5088 5089 unmount_or_warn(mp); 5090 } 5091 5092 if (rootdevmp != NULL) 5093 unmount_or_warn(rootdevmp); 5094 } 5095 5096 static void 5097 vfs_deferred_inactive(struct vnode *vp, int lkflags) 5098 { 5099 5100 ASSERT_VI_LOCKED(vp, __func__); 5101 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 5102 if ((vp->v_iflag & VI_OWEINACT) == 0) { 5103 vdropl(vp); 5104 return; 5105 } 5106 if (vn_lock(vp, lkflags) == 0) { 5107 VI_LOCK(vp); 5108 vinactive(vp); 5109 VOP_UNLOCK(vp); 5110 vdropl(vp); 5111 return; 5112 } 5113 vdefer_inactive_unlocked(vp); 5114 } 5115 5116 static int 5117 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 5118 { 5119 5120 return (vp->v_iflag & VI_DEFINACT); 5121 } 5122 5123 static void __noinline 5124 vfs_periodic_inactive(struct mount *mp, int flags) 5125 { 5126 struct vnode *vp, *mvp; 5127 int lkflags; 5128 5129 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 5130 if (flags != MNT_WAIT) 5131 lkflags |= LK_NOWAIT; 5132 5133 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 5134 if ((vp->v_iflag & VI_DEFINACT) == 0) { 5135 VI_UNLOCK(vp); 5136 continue; 5137 } 5138 vp->v_iflag &= ~VI_DEFINACT; 5139 vfs_deferred_inactive(vp, lkflags); 5140 } 5141 } 5142 5143 static inline bool 5144 vfs_want_msync(struct vnode *vp) 5145 { 5146 struct vm_object *obj; 5147 5148 /* 5149 * This test may be performed without any locks held. 5150 * We rely on vm_object's type stability. 5151 */ 5152 if (vp->v_vflag & VV_NOSYNC) 5153 return (false); 5154 obj = vp->v_object; 5155 return (obj != NULL && vm_object_mightbedirty(obj)); 5156 } 5157 5158 static int 5159 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 5160 { 5161 5162 if (vp->v_vflag & VV_NOSYNC) 5163 return (false); 5164 if (vp->v_iflag & VI_DEFINACT) 5165 return (true); 5166 return (vfs_want_msync(vp)); 5167 } 5168 5169 static void __noinline 5170 vfs_periodic_msync_inactive(struct mount *mp, int flags) 5171 { 5172 struct vnode *vp, *mvp; 5173 int lkflags; 5174 bool seen_defer; 5175 5176 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 5177 if (flags != MNT_WAIT) 5178 lkflags |= LK_NOWAIT; 5179 5180 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 5181 seen_defer = false; 5182 if (vp->v_iflag & VI_DEFINACT) { 5183 vp->v_iflag &= ~VI_DEFINACT; 5184 seen_defer = true; 5185 } 5186 if (!vfs_want_msync(vp)) { 5187 if (seen_defer) 5188 vfs_deferred_inactive(vp, lkflags); 5189 else 5190 VI_UNLOCK(vp); 5191 continue; 5192 } 5193 if (vget(vp, lkflags) == 0) { 5194 if ((vp->v_vflag & VV_NOSYNC) == 0) { 5195 if (flags == MNT_WAIT) 5196 vnode_pager_clean_sync(vp); 5197 else 5198 vnode_pager_clean_async(vp); 5199 } 5200 vput(vp); 5201 if (seen_defer) 5202 vdrop(vp); 5203 } else { 5204 if (seen_defer) 5205 vdefer_inactive_unlocked(vp); 5206 } 5207 } 5208 } 5209 5210 void 5211 vfs_periodic(struct mount *mp, int flags) 5212 { 5213 5214 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 5215 5216 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 5217 vfs_periodic_inactive(mp, flags); 5218 else 5219 vfs_periodic_msync_inactive(mp, flags); 5220 } 5221 5222 static void 5223 destroy_vpollinfo_free(struct vpollinfo *vi) 5224 { 5225 5226 knlist_destroy(&vi->vpi_selinfo.si_note); 5227 mtx_destroy(&vi->vpi_lock); 5228 free(vi, M_VNODEPOLL); 5229 } 5230 5231 static void 5232 destroy_vpollinfo(struct vpollinfo *vi) 5233 { 5234 5235 knlist_clear(&vi->vpi_selinfo.si_note, 1); 5236 seldrain(&vi->vpi_selinfo); 5237 destroy_vpollinfo_free(vi); 5238 } 5239 5240 /* 5241 * Initialize per-vnode helper structure to hold poll-related state. 5242 */ 5243 void 5244 v_addpollinfo(struct vnode *vp) 5245 { 5246 struct vpollinfo *vi; 5247 5248 if (vp->v_pollinfo != NULL) 5249 return; 5250 vi = malloc(sizeof(*vi), M_VNODEPOLL, M_WAITOK | M_ZERO); 5251 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 5252 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 5253 vfs_knlunlock, vfs_knl_assert_lock); 5254 VI_LOCK(vp); 5255 if (vp->v_pollinfo != NULL) { 5256 VI_UNLOCK(vp); 5257 destroy_vpollinfo_free(vi); 5258 return; 5259 } 5260 vp->v_pollinfo = vi; 5261 VI_UNLOCK(vp); 5262 } 5263 5264 /* 5265 * Record a process's interest in events which might happen to 5266 * a vnode. Because poll uses the historic select-style interface 5267 * internally, this routine serves as both the ``check for any 5268 * pending events'' and the ``record my interest in future events'' 5269 * functions. (These are done together, while the lock is held, 5270 * to avoid race conditions.) 5271 */ 5272 int 5273 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 5274 { 5275 5276 v_addpollinfo(vp); 5277 mtx_lock(&vp->v_pollinfo->vpi_lock); 5278 if (vp->v_pollinfo->vpi_revents & events) { 5279 /* 5280 * This leaves events we are not interested 5281 * in available for the other process which 5282 * which presumably had requested them 5283 * (otherwise they would never have been 5284 * recorded). 5285 */ 5286 events &= vp->v_pollinfo->vpi_revents; 5287 vp->v_pollinfo->vpi_revents &= ~events; 5288 5289 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5290 return (events); 5291 } 5292 vp->v_pollinfo->vpi_events |= events; 5293 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 5294 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5295 return (0); 5296 } 5297 5298 /* 5299 * Routine to create and manage a filesystem syncer vnode. 5300 */ 5301 #define sync_close ((int (*)(struct vop_close_args *))nullop) 5302 static int sync_fsync(struct vop_fsync_args *); 5303 static int sync_inactive(struct vop_inactive_args *); 5304 static int sync_reclaim(struct vop_reclaim_args *); 5305 5306 static struct vop_vector sync_vnodeops = { 5307 .vop_bypass = VOP_EOPNOTSUPP, 5308 .vop_close = sync_close, 5309 .vop_fsync = sync_fsync, 5310 .vop_getwritemount = vop_stdgetwritemount, 5311 .vop_inactive = sync_inactive, 5312 .vop_need_inactive = vop_stdneed_inactive, 5313 .vop_reclaim = sync_reclaim, 5314 .vop_lock1 = vop_stdlock, 5315 .vop_unlock = vop_stdunlock, 5316 .vop_islocked = vop_stdislocked, 5317 .vop_fplookup_vexec = VOP_EAGAIN, 5318 .vop_fplookup_symlink = VOP_EAGAIN, 5319 }; 5320 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 5321 5322 /* 5323 * Create a new filesystem syncer vnode for the specified mount point. 5324 */ 5325 void 5326 vfs_allocate_syncvnode(struct mount *mp) 5327 { 5328 struct vnode *vp; 5329 struct bufobj *bo; 5330 static long start, incr, next; 5331 int error; 5332 5333 /* Allocate a new vnode */ 5334 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 5335 if (error != 0) 5336 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 5337 vp->v_type = VNON; 5338 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5339 vp->v_vflag |= VV_FORCEINSMQ; 5340 error = insmntque1(vp, mp); 5341 if (error != 0) 5342 panic("vfs_allocate_syncvnode: insmntque() failed"); 5343 vp->v_vflag &= ~VV_FORCEINSMQ; 5344 vn_set_state(vp, VSTATE_CONSTRUCTED); 5345 VOP_UNLOCK(vp); 5346 /* 5347 * Place the vnode onto the syncer worklist. We attempt to 5348 * scatter them about on the list so that they will go off 5349 * at evenly distributed times even if all the filesystems 5350 * are mounted at once. 5351 */ 5352 next += incr; 5353 if (next == 0 || next > syncer_maxdelay) { 5354 start /= 2; 5355 incr /= 2; 5356 if (start == 0) { 5357 start = syncer_maxdelay / 2; 5358 incr = syncer_maxdelay; 5359 } 5360 next = start; 5361 } 5362 bo = &vp->v_bufobj; 5363 BO_LOCK(bo); 5364 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 5365 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 5366 mtx_lock(&sync_mtx); 5367 sync_vnode_count++; 5368 if (mp->mnt_syncer == NULL) { 5369 mp->mnt_syncer = vp; 5370 vp = NULL; 5371 } 5372 mtx_unlock(&sync_mtx); 5373 BO_UNLOCK(bo); 5374 if (vp != NULL) { 5375 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5376 vgone(vp); 5377 vput(vp); 5378 } 5379 } 5380 5381 void 5382 vfs_deallocate_syncvnode(struct mount *mp) 5383 { 5384 struct vnode *vp; 5385 5386 mtx_lock(&sync_mtx); 5387 vp = mp->mnt_syncer; 5388 if (vp != NULL) 5389 mp->mnt_syncer = NULL; 5390 mtx_unlock(&sync_mtx); 5391 if (vp != NULL) 5392 vrele(vp); 5393 } 5394 5395 /* 5396 * Do a lazy sync of the filesystem. 5397 */ 5398 static int 5399 sync_fsync(struct vop_fsync_args *ap) 5400 { 5401 struct vnode *syncvp = ap->a_vp; 5402 struct mount *mp = syncvp->v_mount; 5403 int error, save; 5404 struct bufobj *bo; 5405 5406 /* 5407 * We only need to do something if this is a lazy evaluation. 5408 */ 5409 if (ap->a_waitfor != MNT_LAZY) 5410 return (0); 5411 5412 /* 5413 * Move ourselves to the back of the sync list. 5414 */ 5415 bo = &syncvp->v_bufobj; 5416 BO_LOCK(bo); 5417 vn_syncer_add_to_worklist(bo, syncdelay); 5418 BO_UNLOCK(bo); 5419 5420 /* 5421 * Walk the list of vnodes pushing all that are dirty and 5422 * not already on the sync list. 5423 */ 5424 if (vfs_busy(mp, MBF_NOWAIT) != 0) 5425 return (0); 5426 VOP_UNLOCK(syncvp); 5427 save = curthread_pflags_set(TDP_SYNCIO); 5428 /* 5429 * The filesystem at hand may be idle with free vnodes stored in the 5430 * batch. Return them instead of letting them stay there indefinitely. 5431 */ 5432 vfs_periodic(mp, MNT_NOWAIT); 5433 error = VFS_SYNC(mp, MNT_LAZY); 5434 curthread_pflags_restore(save); 5435 vn_lock(syncvp, LK_EXCLUSIVE | LK_RETRY); 5436 vfs_unbusy(mp); 5437 return (error); 5438 } 5439 5440 /* 5441 * The syncer vnode is no referenced. 5442 */ 5443 static int 5444 sync_inactive(struct vop_inactive_args *ap) 5445 { 5446 5447 vgone(ap->a_vp); 5448 return (0); 5449 } 5450 5451 /* 5452 * The syncer vnode is no longer needed and is being decommissioned. 5453 * 5454 * Modifications to the worklist must be protected by sync_mtx. 5455 */ 5456 static int 5457 sync_reclaim(struct vop_reclaim_args *ap) 5458 { 5459 struct vnode *vp = ap->a_vp; 5460 struct bufobj *bo; 5461 5462 bo = &vp->v_bufobj; 5463 BO_LOCK(bo); 5464 mtx_lock(&sync_mtx); 5465 if (vp->v_mount->mnt_syncer == vp) 5466 vp->v_mount->mnt_syncer = NULL; 5467 if (bo->bo_flag & BO_ONWORKLST) { 5468 LIST_REMOVE(bo, bo_synclist); 5469 syncer_worklist_len--; 5470 sync_vnode_count--; 5471 bo->bo_flag &= ~BO_ONWORKLST; 5472 } 5473 mtx_unlock(&sync_mtx); 5474 BO_UNLOCK(bo); 5475 5476 return (0); 5477 } 5478 5479 int 5480 vn_need_pageq_flush(struct vnode *vp) 5481 { 5482 struct vm_object *obj; 5483 5484 obj = vp->v_object; 5485 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 5486 vm_object_mightbedirty(obj)); 5487 } 5488 5489 /* 5490 * Check if vnode represents a disk device 5491 */ 5492 bool 5493 vn_isdisk_error(struct vnode *vp, int *errp) 5494 { 5495 int error; 5496 5497 if (vp->v_type != VCHR) { 5498 error = ENOTBLK; 5499 goto out; 5500 } 5501 error = 0; 5502 dev_lock(); 5503 if (vp->v_rdev == NULL) 5504 error = ENXIO; 5505 else if (vp->v_rdev->si_devsw == NULL) 5506 error = ENXIO; 5507 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5508 error = ENOTBLK; 5509 dev_unlock(); 5510 out: 5511 *errp = error; 5512 return (error == 0); 5513 } 5514 5515 bool 5516 vn_isdisk(struct vnode *vp) 5517 { 5518 int error; 5519 5520 return (vn_isdisk_error(vp, &error)); 5521 } 5522 5523 /* 5524 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5525 * the comment above cache_fplookup for details. 5526 */ 5527 int 5528 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5529 { 5530 int error; 5531 5532 VFS_SMR_ASSERT_ENTERED(); 5533 5534 /* Check the owner. */ 5535 if (cred->cr_uid == file_uid) { 5536 if (file_mode & S_IXUSR) 5537 return (0); 5538 goto out_error; 5539 } 5540 5541 /* Otherwise, check the groups (first match) */ 5542 if (groupmember(file_gid, cred)) { 5543 if (file_mode & S_IXGRP) 5544 return (0); 5545 goto out_error; 5546 } 5547 5548 /* Otherwise, check everyone else. */ 5549 if (file_mode & S_IXOTH) 5550 return (0); 5551 out_error: 5552 /* 5553 * Permission check failed, but it is possible denial will get overwritten 5554 * (e.g., when root is traversing through a 700 directory owned by someone 5555 * else). 5556 * 5557 * vaccess() calls priv_check_cred which in turn can descent into MAC 5558 * modules overriding this result. It's quite unclear what semantics 5559 * are allowed for them to operate, thus for safety we don't call them 5560 * from within the SMR section. This also means if any such modules 5561 * are present, we have to let the regular lookup decide. 5562 */ 5563 error = priv_check_cred_vfs_lookup_nomac(cred); 5564 switch (error) { 5565 case 0: 5566 return (0); 5567 case EAGAIN: 5568 /* 5569 * MAC modules present. 5570 */ 5571 return (EAGAIN); 5572 case EPERM: 5573 return (EACCES); 5574 default: 5575 return (error); 5576 } 5577 } 5578 5579 /* 5580 * Common filesystem object access control check routine. Accepts a 5581 * vnode's type, "mode", uid and gid, requested access mode, and credentials. 5582 * Returns 0 on success, or an errno on failure. 5583 */ 5584 int 5585 vaccess(__enum_uint8(vtype) type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5586 accmode_t accmode, struct ucred *cred) 5587 { 5588 accmode_t dac_granted; 5589 accmode_t priv_granted; 5590 5591 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5592 ("invalid bit in accmode")); 5593 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5594 ("VAPPEND without VWRITE")); 5595 5596 /* 5597 * Look for a normal, non-privileged way to access the file/directory 5598 * as requested. If it exists, go with that. 5599 */ 5600 5601 dac_granted = 0; 5602 5603 /* Check the owner. */ 5604 if (cred->cr_uid == file_uid) { 5605 dac_granted |= VADMIN; 5606 if (file_mode & S_IXUSR) 5607 dac_granted |= VEXEC; 5608 if (file_mode & S_IRUSR) 5609 dac_granted |= VREAD; 5610 if (file_mode & S_IWUSR) 5611 dac_granted |= (VWRITE | VAPPEND); 5612 5613 if ((accmode & dac_granted) == accmode) 5614 return (0); 5615 5616 goto privcheck; 5617 } 5618 5619 /* Otherwise, check the groups (first match) */ 5620 if (groupmember(file_gid, cred)) { 5621 if (file_mode & S_IXGRP) 5622 dac_granted |= VEXEC; 5623 if (file_mode & S_IRGRP) 5624 dac_granted |= VREAD; 5625 if (file_mode & S_IWGRP) 5626 dac_granted |= (VWRITE | VAPPEND); 5627 5628 if ((accmode & dac_granted) == accmode) 5629 return (0); 5630 5631 goto privcheck; 5632 } 5633 5634 /* Otherwise, check everyone else. */ 5635 if (file_mode & S_IXOTH) 5636 dac_granted |= VEXEC; 5637 if (file_mode & S_IROTH) 5638 dac_granted |= VREAD; 5639 if (file_mode & S_IWOTH) 5640 dac_granted |= (VWRITE | VAPPEND); 5641 if ((accmode & dac_granted) == accmode) 5642 return (0); 5643 5644 privcheck: 5645 /* 5646 * Build a privilege mask to determine if the set of privileges 5647 * satisfies the requirements when combined with the granted mask 5648 * from above. For each privilege, if the privilege is required, 5649 * bitwise or the request type onto the priv_granted mask. 5650 */ 5651 priv_granted = 0; 5652 5653 if (type == VDIR) { 5654 /* 5655 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5656 * requests, instead of PRIV_VFS_EXEC. 5657 */ 5658 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5659 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5660 priv_granted |= VEXEC; 5661 } else { 5662 /* 5663 * Ensure that at least one execute bit is on. Otherwise, 5664 * a privileged user will always succeed, and we don't want 5665 * this to happen unless the file really is executable. 5666 */ 5667 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5668 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5669 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5670 priv_granted |= VEXEC; 5671 } 5672 5673 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5674 !priv_check_cred(cred, PRIV_VFS_READ)) 5675 priv_granted |= VREAD; 5676 5677 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5678 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5679 priv_granted |= (VWRITE | VAPPEND); 5680 5681 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5682 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5683 priv_granted |= VADMIN; 5684 5685 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5686 return (0); 5687 } 5688 5689 return ((accmode & VADMIN) ? EPERM : EACCES); 5690 } 5691 5692 /* 5693 * Credential check based on process requesting service, and per-attribute 5694 * permissions. 5695 */ 5696 int 5697 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5698 struct thread *td, accmode_t accmode) 5699 { 5700 5701 /* 5702 * Kernel-invoked always succeeds. 5703 */ 5704 if (cred == NOCRED) 5705 return (0); 5706 5707 /* 5708 * Do not allow privileged processes in jail to directly manipulate 5709 * system attributes. 5710 */ 5711 switch (attrnamespace) { 5712 case EXTATTR_NAMESPACE_SYSTEM: 5713 /* Potentially should be: return (EPERM); */ 5714 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5715 case EXTATTR_NAMESPACE_USER: 5716 return (VOP_ACCESS(vp, accmode, cred, td)); 5717 default: 5718 return (EPERM); 5719 } 5720 } 5721 5722 #ifdef DEBUG_VFS_LOCKS 5723 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5724 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5725 "Drop into debugger on lock violation"); 5726 5727 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5728 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5729 0, "Check for interlock across VOPs"); 5730 5731 int vfs_badlock_print = 1; /* Print lock violations. */ 5732 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5733 0, "Print lock violations"); 5734 5735 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5736 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5737 0, "Print vnode details on lock violations"); 5738 5739 #ifdef KDB 5740 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5741 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5742 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5743 #endif 5744 5745 static void 5746 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5747 { 5748 5749 #ifdef KDB 5750 if (vfs_badlock_backtrace) 5751 kdb_backtrace(); 5752 #endif 5753 if (vfs_badlock_vnode) 5754 vn_printf(vp, "vnode "); 5755 if (vfs_badlock_print) 5756 printf("%s: %p %s\n", str, (void *)vp, msg); 5757 if (vfs_badlock_ddb) 5758 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5759 } 5760 5761 void 5762 assert_vi_locked(struct vnode *vp, const char *str) 5763 { 5764 5765 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5766 vfs_badlock("interlock is not locked but should be", str, vp); 5767 } 5768 5769 void 5770 assert_vi_unlocked(struct vnode *vp, const char *str) 5771 { 5772 5773 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5774 vfs_badlock("interlock is locked but should not be", str, vp); 5775 } 5776 5777 void 5778 assert_vop_locked(struct vnode *vp, const char *str) 5779 { 5780 if (KERNEL_PANICKED() || vp == NULL) 5781 return; 5782 5783 #ifdef WITNESS 5784 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && 5785 witness_is_owned(&vp->v_vnlock->lock_object) == -1) 5786 #else 5787 int locked = VOP_ISLOCKED(vp); 5788 if (locked == 0 || locked == LK_EXCLOTHER) 5789 #endif 5790 vfs_badlock("is not locked but should be", str, vp); 5791 } 5792 5793 void 5794 assert_vop_unlocked(struct vnode *vp, const char *str) 5795 { 5796 if (KERNEL_PANICKED() || vp == NULL) 5797 return; 5798 5799 #ifdef WITNESS 5800 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && 5801 witness_is_owned(&vp->v_vnlock->lock_object) == 1) 5802 #else 5803 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5804 #endif 5805 vfs_badlock("is locked but should not be", str, vp); 5806 } 5807 5808 void 5809 assert_vop_elocked(struct vnode *vp, const char *str) 5810 { 5811 if (KERNEL_PANICKED() || vp == NULL) 5812 return; 5813 5814 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5815 vfs_badlock("is not exclusive locked but should be", str, vp); 5816 } 5817 #endif /* DEBUG_VFS_LOCKS */ 5818 5819 void 5820 vop_rename_fail(struct vop_rename_args *ap) 5821 { 5822 5823 if (ap->a_tvp != NULL) 5824 vput(ap->a_tvp); 5825 if (ap->a_tdvp == ap->a_tvp) 5826 vrele(ap->a_tdvp); 5827 else 5828 vput(ap->a_tdvp); 5829 vrele(ap->a_fdvp); 5830 vrele(ap->a_fvp); 5831 } 5832 5833 void 5834 vop_rename_pre(void *ap) 5835 { 5836 struct vop_rename_args *a = ap; 5837 5838 #ifdef DEBUG_VFS_LOCKS 5839 if (a->a_tvp) 5840 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5841 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5842 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5843 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5844 5845 /* Check the source (from). */ 5846 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5847 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5848 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5849 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5850 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5851 5852 /* Check the target. */ 5853 if (a->a_tvp) 5854 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5855 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5856 #endif 5857 /* 5858 * It may be tempting to add vn_seqc_write_begin/end calls here and 5859 * in vop_rename_post but that's not going to work out since some 5860 * filesystems relookup vnodes mid-rename. This is probably a bug. 5861 * 5862 * For now filesystems are expected to do the relevant calls after they 5863 * decide what vnodes to operate on. 5864 */ 5865 if (a->a_tdvp != a->a_fdvp) 5866 vhold(a->a_fdvp); 5867 if (a->a_tvp != a->a_fvp) 5868 vhold(a->a_fvp); 5869 vhold(a->a_tdvp); 5870 if (a->a_tvp) 5871 vhold(a->a_tvp); 5872 } 5873 5874 #ifdef DEBUG_VFS_LOCKS 5875 void 5876 vop_fplookup_vexec_debugpre(void *ap __unused) 5877 { 5878 5879 VFS_SMR_ASSERT_ENTERED(); 5880 } 5881 5882 void 5883 vop_fplookup_vexec_debugpost(void *ap, int rc) 5884 { 5885 struct vop_fplookup_vexec_args *a; 5886 struct vnode *vp; 5887 5888 a = ap; 5889 vp = a->a_vp; 5890 5891 VFS_SMR_ASSERT_ENTERED(); 5892 if (rc == EOPNOTSUPP) 5893 VNPASS(VN_IS_DOOMED(vp), vp); 5894 } 5895 5896 void 5897 vop_fplookup_symlink_debugpre(void *ap __unused) 5898 { 5899 5900 VFS_SMR_ASSERT_ENTERED(); 5901 } 5902 5903 void 5904 vop_fplookup_symlink_debugpost(void *ap __unused, int rc __unused) 5905 { 5906 5907 VFS_SMR_ASSERT_ENTERED(); 5908 } 5909 5910 static void 5911 vop_fsync_debugprepost(struct vnode *vp, const char *name) 5912 { 5913 if (vp->v_type == VCHR) 5914 ; 5915 /* 5916 * The shared vs. exclusive locking policy for fsync() 5917 * is actually determined by vp's write mount as indicated 5918 * by VOP_GETWRITEMOUNT(), which for stacked filesystems 5919 * may not be the same as vp->v_mount. However, if the 5920 * underlying filesystem which really handles the fsync() 5921 * supports shared locking, the stacked filesystem must also 5922 * be prepared for its VOP_FSYNC() operation to be called 5923 * with only a shared lock. On the other hand, if the 5924 * stacked filesystem claims support for shared write 5925 * locking but the underlying filesystem does not, and the 5926 * caller incorrectly uses a shared lock, this condition 5927 * should still be caught when the stacked filesystem 5928 * invokes VOP_FSYNC() on the underlying filesystem. 5929 */ 5930 else if (MNT_SHARED_WRITES(vp->v_mount)) 5931 ASSERT_VOP_LOCKED(vp, name); 5932 else 5933 ASSERT_VOP_ELOCKED(vp, name); 5934 } 5935 5936 void 5937 vop_fsync_debugpre(void *a) 5938 { 5939 struct vop_fsync_args *ap; 5940 5941 ap = a; 5942 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5943 } 5944 5945 void 5946 vop_fsync_debugpost(void *a, int rc __unused) 5947 { 5948 struct vop_fsync_args *ap; 5949 5950 ap = a; 5951 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5952 } 5953 5954 void 5955 vop_fdatasync_debugpre(void *a) 5956 { 5957 struct vop_fdatasync_args *ap; 5958 5959 ap = a; 5960 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5961 } 5962 5963 void 5964 vop_fdatasync_debugpost(void *a, int rc __unused) 5965 { 5966 struct vop_fdatasync_args *ap; 5967 5968 ap = a; 5969 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5970 } 5971 5972 void 5973 vop_strategy_debugpre(void *ap) 5974 { 5975 struct vop_strategy_args *a; 5976 struct buf *bp; 5977 5978 a = ap; 5979 bp = a->a_bp; 5980 5981 /* 5982 * Cluster ops lock their component buffers but not the IO container. 5983 */ 5984 if ((bp->b_flags & B_CLUSTER) != 0) 5985 return; 5986 5987 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5988 if (vfs_badlock_print) 5989 printf( 5990 "VOP_STRATEGY: bp is not locked but should be\n"); 5991 if (vfs_badlock_ddb) 5992 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5993 } 5994 } 5995 5996 void 5997 vop_lock_debugpre(void *ap) 5998 { 5999 struct vop_lock1_args *a = ap; 6000 6001 if ((a->a_flags & LK_INTERLOCK) == 0) 6002 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 6003 else 6004 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 6005 } 6006 6007 void 6008 vop_lock_debugpost(void *ap, int rc) 6009 { 6010 struct vop_lock1_args *a = ap; 6011 6012 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 6013 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 6014 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 6015 } 6016 6017 void 6018 vop_unlock_debugpre(void *ap) 6019 { 6020 struct vop_unlock_args *a = ap; 6021 struct vnode *vp = a->a_vp; 6022 6023 VNPASS(vn_get_state(vp) != VSTATE_UNINITIALIZED, vp); 6024 ASSERT_VOP_LOCKED(vp, "VOP_UNLOCK"); 6025 } 6026 6027 void 6028 vop_need_inactive_debugpre(void *ap) 6029 { 6030 struct vop_need_inactive_args *a = ap; 6031 6032 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 6033 } 6034 6035 void 6036 vop_need_inactive_debugpost(void *ap, int rc) 6037 { 6038 struct vop_need_inactive_args *a = ap; 6039 6040 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 6041 } 6042 #endif 6043 6044 void 6045 vop_create_pre(void *ap) 6046 { 6047 struct vop_create_args *a; 6048 struct vnode *dvp; 6049 6050 a = ap; 6051 dvp = a->a_dvp; 6052 vn_seqc_write_begin(dvp); 6053 } 6054 6055 void 6056 vop_create_post(void *ap, int rc) 6057 { 6058 struct vop_create_args *a; 6059 struct vnode *dvp; 6060 6061 a = ap; 6062 dvp = a->a_dvp; 6063 vn_seqc_write_end(dvp); 6064 if (!rc) 6065 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6066 } 6067 6068 void 6069 vop_whiteout_pre(void *ap) 6070 { 6071 struct vop_whiteout_args *a; 6072 struct vnode *dvp; 6073 6074 a = ap; 6075 dvp = a->a_dvp; 6076 vn_seqc_write_begin(dvp); 6077 } 6078 6079 void 6080 vop_whiteout_post(void *ap, int rc) 6081 { 6082 struct vop_whiteout_args *a; 6083 struct vnode *dvp; 6084 6085 a = ap; 6086 dvp = a->a_dvp; 6087 vn_seqc_write_end(dvp); 6088 } 6089 6090 void 6091 vop_deleteextattr_pre(void *ap) 6092 { 6093 struct vop_deleteextattr_args *a; 6094 struct vnode *vp; 6095 6096 a = ap; 6097 vp = a->a_vp; 6098 vn_seqc_write_begin(vp); 6099 } 6100 6101 void 6102 vop_deleteextattr_post(void *ap, int rc) 6103 { 6104 struct vop_deleteextattr_args *a; 6105 struct vnode *vp; 6106 6107 a = ap; 6108 vp = a->a_vp; 6109 vn_seqc_write_end(vp); 6110 if (!rc) 6111 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 6112 } 6113 6114 void 6115 vop_link_pre(void *ap) 6116 { 6117 struct vop_link_args *a; 6118 struct vnode *vp, *tdvp; 6119 6120 a = ap; 6121 vp = a->a_vp; 6122 tdvp = a->a_tdvp; 6123 vn_seqc_write_begin(vp); 6124 vn_seqc_write_begin(tdvp); 6125 } 6126 6127 void 6128 vop_link_post(void *ap, int rc) 6129 { 6130 struct vop_link_args *a; 6131 struct vnode *vp, *tdvp; 6132 6133 a = ap; 6134 vp = a->a_vp; 6135 tdvp = a->a_tdvp; 6136 vn_seqc_write_end(vp); 6137 vn_seqc_write_end(tdvp); 6138 if (!rc) { 6139 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 6140 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 6141 } 6142 } 6143 6144 void 6145 vop_mkdir_pre(void *ap) 6146 { 6147 struct vop_mkdir_args *a; 6148 struct vnode *dvp; 6149 6150 a = ap; 6151 dvp = a->a_dvp; 6152 vn_seqc_write_begin(dvp); 6153 } 6154 6155 void 6156 vop_mkdir_post(void *ap, int rc) 6157 { 6158 struct vop_mkdir_args *a; 6159 struct vnode *dvp; 6160 6161 a = ap; 6162 dvp = a->a_dvp; 6163 vn_seqc_write_end(dvp); 6164 if (!rc) 6165 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 6166 } 6167 6168 #ifdef DEBUG_VFS_LOCKS 6169 void 6170 vop_mkdir_debugpost(void *ap, int rc) 6171 { 6172 struct vop_mkdir_args *a; 6173 6174 a = ap; 6175 if (!rc) 6176 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); 6177 } 6178 #endif 6179 6180 void 6181 vop_mknod_pre(void *ap) 6182 { 6183 struct vop_mknod_args *a; 6184 struct vnode *dvp; 6185 6186 a = ap; 6187 dvp = a->a_dvp; 6188 vn_seqc_write_begin(dvp); 6189 } 6190 6191 void 6192 vop_mknod_post(void *ap, int rc) 6193 { 6194 struct vop_mknod_args *a; 6195 struct vnode *dvp; 6196 6197 a = ap; 6198 dvp = a->a_dvp; 6199 vn_seqc_write_end(dvp); 6200 if (!rc) 6201 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6202 } 6203 6204 void 6205 vop_reclaim_post(void *ap, int rc) 6206 { 6207 struct vop_reclaim_args *a; 6208 struct vnode *vp; 6209 6210 a = ap; 6211 vp = a->a_vp; 6212 ASSERT_VOP_IN_SEQC(vp); 6213 if (!rc) 6214 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 6215 } 6216 6217 void 6218 vop_remove_pre(void *ap) 6219 { 6220 struct vop_remove_args *a; 6221 struct vnode *dvp, *vp; 6222 6223 a = ap; 6224 dvp = a->a_dvp; 6225 vp = a->a_vp; 6226 vn_seqc_write_begin(dvp); 6227 vn_seqc_write_begin(vp); 6228 } 6229 6230 void 6231 vop_remove_post(void *ap, int rc) 6232 { 6233 struct vop_remove_args *a; 6234 struct vnode *dvp, *vp; 6235 6236 a = ap; 6237 dvp = a->a_dvp; 6238 vp = a->a_vp; 6239 vn_seqc_write_end(dvp); 6240 vn_seqc_write_end(vp); 6241 if (!rc) { 6242 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6243 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 6244 } 6245 } 6246 6247 void 6248 vop_rename_post(void *ap, int rc) 6249 { 6250 struct vop_rename_args *a = ap; 6251 long hint; 6252 6253 if (!rc) { 6254 hint = NOTE_WRITE; 6255 if (a->a_fdvp == a->a_tdvp) { 6256 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 6257 hint |= NOTE_LINK; 6258 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 6259 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 6260 } else { 6261 hint |= NOTE_EXTEND; 6262 if (a->a_fvp->v_type == VDIR) 6263 hint |= NOTE_LINK; 6264 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 6265 6266 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 6267 a->a_tvp->v_type == VDIR) 6268 hint &= ~NOTE_LINK; 6269 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 6270 } 6271 6272 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 6273 if (a->a_tvp) 6274 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 6275 } 6276 if (a->a_tdvp != a->a_fdvp) 6277 vdrop(a->a_fdvp); 6278 if (a->a_tvp != a->a_fvp) 6279 vdrop(a->a_fvp); 6280 vdrop(a->a_tdvp); 6281 if (a->a_tvp) 6282 vdrop(a->a_tvp); 6283 } 6284 6285 void 6286 vop_rmdir_pre(void *ap) 6287 { 6288 struct vop_rmdir_args *a; 6289 struct vnode *dvp, *vp; 6290 6291 a = ap; 6292 dvp = a->a_dvp; 6293 vp = a->a_vp; 6294 vn_seqc_write_begin(dvp); 6295 vn_seqc_write_begin(vp); 6296 } 6297 6298 void 6299 vop_rmdir_post(void *ap, int rc) 6300 { 6301 struct vop_rmdir_args *a; 6302 struct vnode *dvp, *vp; 6303 6304 a = ap; 6305 dvp = a->a_dvp; 6306 vp = a->a_vp; 6307 vn_seqc_write_end(dvp); 6308 vn_seqc_write_end(vp); 6309 if (!rc) { 6310 vp->v_vflag |= VV_UNLINKED; 6311 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 6312 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 6313 } 6314 } 6315 6316 void 6317 vop_setattr_pre(void *ap) 6318 { 6319 struct vop_setattr_args *a; 6320 struct vnode *vp; 6321 6322 a = ap; 6323 vp = a->a_vp; 6324 vn_seqc_write_begin(vp); 6325 } 6326 6327 void 6328 vop_setattr_post(void *ap, int rc) 6329 { 6330 struct vop_setattr_args *a; 6331 struct vnode *vp; 6332 6333 a = ap; 6334 vp = a->a_vp; 6335 vn_seqc_write_end(vp); 6336 if (!rc) 6337 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6338 } 6339 6340 void 6341 vop_setacl_pre(void *ap) 6342 { 6343 struct vop_setacl_args *a; 6344 struct vnode *vp; 6345 6346 a = ap; 6347 vp = a->a_vp; 6348 vn_seqc_write_begin(vp); 6349 } 6350 6351 void 6352 vop_setacl_post(void *ap, int rc __unused) 6353 { 6354 struct vop_setacl_args *a; 6355 struct vnode *vp; 6356 6357 a = ap; 6358 vp = a->a_vp; 6359 vn_seqc_write_end(vp); 6360 } 6361 6362 void 6363 vop_setextattr_pre(void *ap) 6364 { 6365 struct vop_setextattr_args *a; 6366 struct vnode *vp; 6367 6368 a = ap; 6369 vp = a->a_vp; 6370 vn_seqc_write_begin(vp); 6371 } 6372 6373 void 6374 vop_setextattr_post(void *ap, int rc) 6375 { 6376 struct vop_setextattr_args *a; 6377 struct vnode *vp; 6378 6379 a = ap; 6380 vp = a->a_vp; 6381 vn_seqc_write_end(vp); 6382 if (!rc) 6383 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6384 } 6385 6386 void 6387 vop_symlink_pre(void *ap) 6388 { 6389 struct vop_symlink_args *a; 6390 struct vnode *dvp; 6391 6392 a = ap; 6393 dvp = a->a_dvp; 6394 vn_seqc_write_begin(dvp); 6395 } 6396 6397 void 6398 vop_symlink_post(void *ap, int rc) 6399 { 6400 struct vop_symlink_args *a; 6401 struct vnode *dvp; 6402 6403 a = ap; 6404 dvp = a->a_dvp; 6405 vn_seqc_write_end(dvp); 6406 if (!rc) 6407 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6408 } 6409 6410 void 6411 vop_open_post(void *ap, int rc) 6412 { 6413 struct vop_open_args *a = ap; 6414 6415 if (!rc) 6416 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 6417 } 6418 6419 void 6420 vop_close_post(void *ap, int rc) 6421 { 6422 struct vop_close_args *a = ap; 6423 6424 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 6425 !VN_IS_DOOMED(a->a_vp))) { 6426 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 6427 NOTE_CLOSE_WRITE : NOTE_CLOSE); 6428 } 6429 } 6430 6431 void 6432 vop_read_post(void *ap, int rc) 6433 { 6434 struct vop_read_args *a = ap; 6435 6436 if (!rc) 6437 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6438 } 6439 6440 void 6441 vop_read_pgcache_post(void *ap, int rc) 6442 { 6443 struct vop_read_pgcache_args *a = ap; 6444 6445 if (!rc) 6446 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); 6447 } 6448 6449 void 6450 vop_readdir_post(void *ap, int rc) 6451 { 6452 struct vop_readdir_args *a = ap; 6453 6454 if (!rc) 6455 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6456 } 6457 6458 static struct knlist fs_knlist; 6459 6460 static void 6461 vfs_event_init(void *arg) 6462 { 6463 knlist_init_mtx(&fs_knlist, NULL); 6464 } 6465 /* XXX - correct order? */ 6466 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 6467 6468 void 6469 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 6470 { 6471 6472 KNOTE_UNLOCKED(&fs_knlist, event); 6473 } 6474 6475 static int filt_fsattach(struct knote *kn); 6476 static void filt_fsdetach(struct knote *kn); 6477 static int filt_fsevent(struct knote *kn, long hint); 6478 6479 const struct filterops fs_filtops = { 6480 .f_isfd = 0, 6481 .f_attach = filt_fsattach, 6482 .f_detach = filt_fsdetach, 6483 .f_event = filt_fsevent 6484 }; 6485 6486 static int 6487 filt_fsattach(struct knote *kn) 6488 { 6489 6490 kn->kn_flags |= EV_CLEAR; 6491 knlist_add(&fs_knlist, kn, 0); 6492 return (0); 6493 } 6494 6495 static void 6496 filt_fsdetach(struct knote *kn) 6497 { 6498 6499 knlist_remove(&fs_knlist, kn, 0); 6500 } 6501 6502 static int 6503 filt_fsevent(struct knote *kn, long hint) 6504 { 6505 6506 kn->kn_fflags |= kn->kn_sfflags & hint; 6507 6508 return (kn->kn_fflags != 0); 6509 } 6510 6511 static int 6512 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 6513 { 6514 struct vfsidctl vc; 6515 int error; 6516 struct mount *mp; 6517 6518 error = SYSCTL_IN(req, &vc, sizeof(vc)); 6519 if (error) 6520 return (error); 6521 if (vc.vc_vers != VFS_CTL_VERS1) 6522 return (EINVAL); 6523 mp = vfs_getvfs(&vc.vc_fsid); 6524 if (mp == NULL) 6525 return (ENOENT); 6526 /* ensure that a specific sysctl goes to the right filesystem. */ 6527 if (strcmp(vc.vc_fstypename, "*") != 0 && 6528 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 6529 vfs_rel(mp); 6530 return (EINVAL); 6531 } 6532 VCTLTOREQ(&vc, req); 6533 error = VFS_SYSCTL(mp, vc.vc_op, req); 6534 vfs_rel(mp); 6535 return (error); 6536 } 6537 6538 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 6539 NULL, 0, sysctl_vfs_ctl, "", 6540 "Sysctl by fsid"); 6541 6542 /* 6543 * Function to initialize a va_filerev field sensibly. 6544 * XXX: Wouldn't a random number make a lot more sense ?? 6545 */ 6546 u_quad_t 6547 init_va_filerev(void) 6548 { 6549 struct bintime bt; 6550 6551 getbinuptime(&bt); 6552 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 6553 } 6554 6555 static int filt_vfsread(struct knote *kn, long hint); 6556 static int filt_vfswrite(struct knote *kn, long hint); 6557 static int filt_vfsvnode(struct knote *kn, long hint); 6558 static void filt_vfsdetach(struct knote *kn); 6559 static const struct filterops vfsread_filtops = { 6560 .f_isfd = 1, 6561 .f_detach = filt_vfsdetach, 6562 .f_event = filt_vfsread 6563 }; 6564 static const struct filterops vfswrite_filtops = { 6565 .f_isfd = 1, 6566 .f_detach = filt_vfsdetach, 6567 .f_event = filt_vfswrite 6568 }; 6569 static const struct filterops vfsvnode_filtops = { 6570 .f_isfd = 1, 6571 .f_detach = filt_vfsdetach, 6572 .f_event = filt_vfsvnode 6573 }; 6574 6575 static void 6576 vfs_knllock(void *arg) 6577 { 6578 struct vnode *vp = arg; 6579 6580 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 6581 } 6582 6583 static void 6584 vfs_knlunlock(void *arg) 6585 { 6586 struct vnode *vp = arg; 6587 6588 VOP_UNLOCK(vp); 6589 } 6590 6591 static void 6592 vfs_knl_assert_lock(void *arg, int what) 6593 { 6594 #ifdef DEBUG_VFS_LOCKS 6595 struct vnode *vp = arg; 6596 6597 if (what == LA_LOCKED) 6598 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6599 else 6600 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6601 #endif 6602 } 6603 6604 int 6605 vfs_kqfilter(struct vop_kqfilter_args *ap) 6606 { 6607 struct vnode *vp = ap->a_vp; 6608 struct knote *kn = ap->a_kn; 6609 struct knlist *knl; 6610 6611 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ && 6612 kn->kn_filter != EVFILT_WRITE), 6613 ("READ/WRITE filter on a FIFO leaked through")); 6614 switch (kn->kn_filter) { 6615 case EVFILT_READ: 6616 kn->kn_fop = &vfsread_filtops; 6617 break; 6618 case EVFILT_WRITE: 6619 kn->kn_fop = &vfswrite_filtops; 6620 break; 6621 case EVFILT_VNODE: 6622 kn->kn_fop = &vfsvnode_filtops; 6623 break; 6624 default: 6625 return (EINVAL); 6626 } 6627 6628 kn->kn_hook = (caddr_t)vp; 6629 6630 v_addpollinfo(vp); 6631 if (vp->v_pollinfo == NULL) 6632 return (ENOMEM); 6633 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6634 vhold(vp); 6635 knlist_add(knl, kn, 0); 6636 6637 return (0); 6638 } 6639 6640 /* 6641 * Detach knote from vnode 6642 */ 6643 static void 6644 filt_vfsdetach(struct knote *kn) 6645 { 6646 struct vnode *vp = (struct vnode *)kn->kn_hook; 6647 6648 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6649 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6650 vdrop(vp); 6651 } 6652 6653 /*ARGSUSED*/ 6654 static int 6655 filt_vfsread(struct knote *kn, long hint) 6656 { 6657 struct vnode *vp = (struct vnode *)kn->kn_hook; 6658 off_t size; 6659 int res; 6660 6661 /* 6662 * filesystem is gone, so set the EOF flag and schedule 6663 * the knote for deletion. 6664 */ 6665 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6666 VI_LOCK(vp); 6667 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6668 VI_UNLOCK(vp); 6669 return (1); 6670 } 6671 6672 if (vn_getsize_locked(vp, &size, curthread->td_ucred) != 0) 6673 return (0); 6674 6675 VI_LOCK(vp); 6676 kn->kn_data = size - kn->kn_fp->f_offset; 6677 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6678 VI_UNLOCK(vp); 6679 return (res); 6680 } 6681 6682 /*ARGSUSED*/ 6683 static int 6684 filt_vfswrite(struct knote *kn, long hint) 6685 { 6686 struct vnode *vp = (struct vnode *)kn->kn_hook; 6687 6688 VI_LOCK(vp); 6689 6690 /* 6691 * filesystem is gone, so set the EOF flag and schedule 6692 * the knote for deletion. 6693 */ 6694 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6695 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6696 6697 kn->kn_data = 0; 6698 VI_UNLOCK(vp); 6699 return (1); 6700 } 6701 6702 static int 6703 filt_vfsvnode(struct knote *kn, long hint) 6704 { 6705 struct vnode *vp = (struct vnode *)kn->kn_hook; 6706 int res; 6707 6708 VI_LOCK(vp); 6709 if (kn->kn_sfflags & hint) 6710 kn->kn_fflags |= hint; 6711 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6712 kn->kn_flags |= EV_EOF; 6713 VI_UNLOCK(vp); 6714 return (1); 6715 } 6716 res = (kn->kn_fflags != 0); 6717 VI_UNLOCK(vp); 6718 return (res); 6719 } 6720 6721 int 6722 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6723 { 6724 int error; 6725 6726 if (dp->d_reclen > ap->a_uio->uio_resid) 6727 return (ENAMETOOLONG); 6728 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6729 if (error) { 6730 if (ap->a_ncookies != NULL) { 6731 if (ap->a_cookies != NULL) 6732 free(ap->a_cookies, M_TEMP); 6733 ap->a_cookies = NULL; 6734 *ap->a_ncookies = 0; 6735 } 6736 return (error); 6737 } 6738 if (ap->a_ncookies == NULL) 6739 return (0); 6740 6741 KASSERT(ap->a_cookies, 6742 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6743 6744 *ap->a_cookies = realloc(*ap->a_cookies, 6745 (*ap->a_ncookies + 1) * sizeof(uint64_t), M_TEMP, M_WAITOK | M_ZERO); 6746 (*ap->a_cookies)[*ap->a_ncookies] = off; 6747 *ap->a_ncookies += 1; 6748 return (0); 6749 } 6750 6751 /* 6752 * The purpose of this routine is to remove granularity from accmode_t, 6753 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6754 * VADMIN and VAPPEND. 6755 * 6756 * If it returns 0, the caller is supposed to continue with the usual 6757 * access checks using 'accmode' as modified by this routine. If it 6758 * returns nonzero value, the caller is supposed to return that value 6759 * as errno. 6760 * 6761 * Note that after this routine runs, accmode may be zero. 6762 */ 6763 int 6764 vfs_unixify_accmode(accmode_t *accmode) 6765 { 6766 /* 6767 * There is no way to specify explicit "deny" rule using 6768 * file mode or POSIX.1e ACLs. 6769 */ 6770 if (*accmode & VEXPLICIT_DENY) { 6771 *accmode = 0; 6772 return (0); 6773 } 6774 6775 /* 6776 * None of these can be translated into usual access bits. 6777 * Also, the common case for NFSv4 ACLs is to not contain 6778 * either of these bits. Caller should check for VWRITE 6779 * on the containing directory instead. 6780 */ 6781 if (*accmode & (VDELETE_CHILD | VDELETE)) 6782 return (EPERM); 6783 6784 if (*accmode & VADMIN_PERMS) { 6785 *accmode &= ~VADMIN_PERMS; 6786 *accmode |= VADMIN; 6787 } 6788 6789 /* 6790 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6791 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6792 */ 6793 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6794 6795 return (0); 6796 } 6797 6798 /* 6799 * Clear out a doomed vnode (if any) and replace it with a new one as long 6800 * as the fs is not being unmounted. Return the root vnode to the caller. 6801 */ 6802 static int __noinline 6803 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6804 { 6805 struct vnode *vp; 6806 int error; 6807 6808 restart: 6809 if (mp->mnt_rootvnode != NULL) { 6810 MNT_ILOCK(mp); 6811 vp = mp->mnt_rootvnode; 6812 if (vp != NULL) { 6813 if (!VN_IS_DOOMED(vp)) { 6814 vrefact(vp); 6815 MNT_IUNLOCK(mp); 6816 error = vn_lock(vp, flags); 6817 if (error == 0) { 6818 *vpp = vp; 6819 return (0); 6820 } 6821 vrele(vp); 6822 goto restart; 6823 } 6824 /* 6825 * Clear the old one. 6826 */ 6827 mp->mnt_rootvnode = NULL; 6828 } 6829 MNT_IUNLOCK(mp); 6830 if (vp != NULL) { 6831 vfs_op_barrier_wait(mp); 6832 vrele(vp); 6833 } 6834 } 6835 error = VFS_CACHEDROOT(mp, flags, vpp); 6836 if (error != 0) 6837 return (error); 6838 if (mp->mnt_vfs_ops == 0) { 6839 MNT_ILOCK(mp); 6840 if (mp->mnt_vfs_ops != 0) { 6841 MNT_IUNLOCK(mp); 6842 return (0); 6843 } 6844 if (mp->mnt_rootvnode == NULL) { 6845 vrefact(*vpp); 6846 mp->mnt_rootvnode = *vpp; 6847 } else { 6848 if (mp->mnt_rootvnode != *vpp) { 6849 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6850 panic("%s: mismatch between vnode returned " 6851 " by VFS_CACHEDROOT and the one cached " 6852 " (%p != %p)", 6853 __func__, *vpp, mp->mnt_rootvnode); 6854 } 6855 } 6856 } 6857 MNT_IUNLOCK(mp); 6858 } 6859 return (0); 6860 } 6861 6862 int 6863 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6864 { 6865 struct mount_pcpu *mpcpu; 6866 struct vnode *vp; 6867 int error; 6868 6869 if (!vfs_op_thread_enter(mp, mpcpu)) 6870 return (vfs_cache_root_fallback(mp, flags, vpp)); 6871 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6872 if (vp == NULL || VN_IS_DOOMED(vp)) { 6873 vfs_op_thread_exit(mp, mpcpu); 6874 return (vfs_cache_root_fallback(mp, flags, vpp)); 6875 } 6876 vrefact(vp); 6877 vfs_op_thread_exit(mp, mpcpu); 6878 error = vn_lock(vp, flags); 6879 if (error != 0) { 6880 vrele(vp); 6881 return (vfs_cache_root_fallback(mp, flags, vpp)); 6882 } 6883 *vpp = vp; 6884 return (0); 6885 } 6886 6887 struct vnode * 6888 vfs_cache_root_clear(struct mount *mp) 6889 { 6890 struct vnode *vp; 6891 6892 /* 6893 * ops > 0 guarantees there is nobody who can see this vnode 6894 */ 6895 MPASS(mp->mnt_vfs_ops > 0); 6896 vp = mp->mnt_rootvnode; 6897 if (vp != NULL) 6898 vn_seqc_write_begin(vp); 6899 mp->mnt_rootvnode = NULL; 6900 return (vp); 6901 } 6902 6903 void 6904 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6905 { 6906 6907 MPASS(mp->mnt_vfs_ops > 0); 6908 vrefact(vp); 6909 mp->mnt_rootvnode = vp; 6910 } 6911 6912 /* 6913 * These are helper functions for filesystems to traverse all 6914 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6915 * 6916 * This interface replaces MNT_VNODE_FOREACH. 6917 */ 6918 6919 struct vnode * 6920 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6921 { 6922 struct vnode *vp; 6923 6924 maybe_yield(); 6925 MNT_ILOCK(mp); 6926 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6927 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6928 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6929 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6930 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6931 continue; 6932 VI_LOCK(vp); 6933 if (VN_IS_DOOMED(vp)) { 6934 VI_UNLOCK(vp); 6935 continue; 6936 } 6937 break; 6938 } 6939 if (vp == NULL) { 6940 __mnt_vnode_markerfree_all(mvp, mp); 6941 /* MNT_IUNLOCK(mp); -- done in above function */ 6942 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6943 return (NULL); 6944 } 6945 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6946 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6947 MNT_IUNLOCK(mp); 6948 return (vp); 6949 } 6950 6951 struct vnode * 6952 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6953 { 6954 struct vnode *vp; 6955 6956 *mvp = vn_alloc_marker(mp); 6957 MNT_ILOCK(mp); 6958 MNT_REF(mp); 6959 6960 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6961 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6962 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6963 continue; 6964 VI_LOCK(vp); 6965 if (VN_IS_DOOMED(vp)) { 6966 VI_UNLOCK(vp); 6967 continue; 6968 } 6969 break; 6970 } 6971 if (vp == NULL) { 6972 MNT_REL(mp); 6973 MNT_IUNLOCK(mp); 6974 vn_free_marker(*mvp); 6975 *mvp = NULL; 6976 return (NULL); 6977 } 6978 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6979 MNT_IUNLOCK(mp); 6980 return (vp); 6981 } 6982 6983 void 6984 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6985 { 6986 6987 if (*mvp == NULL) { 6988 MNT_IUNLOCK(mp); 6989 return; 6990 } 6991 6992 mtx_assert(MNT_MTX(mp), MA_OWNED); 6993 6994 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6995 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6996 MNT_REL(mp); 6997 MNT_IUNLOCK(mp); 6998 vn_free_marker(*mvp); 6999 *mvp = NULL; 7000 } 7001 7002 /* 7003 * These are helper functions for filesystems to traverse their 7004 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 7005 */ 7006 static void 7007 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 7008 { 7009 7010 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 7011 7012 MNT_ILOCK(mp); 7013 MNT_REL(mp); 7014 MNT_IUNLOCK(mp); 7015 vn_free_marker(*mvp); 7016 *mvp = NULL; 7017 } 7018 7019 /* 7020 * Relock the mp mount vnode list lock with the vp vnode interlock in the 7021 * conventional lock order during mnt_vnode_next_lazy iteration. 7022 * 7023 * On entry, the mount vnode list lock is held and the vnode interlock is not. 7024 * The list lock is dropped and reacquired. On success, both locks are held. 7025 * On failure, the mount vnode list lock is held but the vnode interlock is 7026 * not, and the procedure may have yielded. 7027 */ 7028 static bool 7029 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 7030 struct vnode *vp) 7031 { 7032 7033 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 7034 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 7035 ("%s: bad marker", __func__)); 7036 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 7037 ("%s: inappropriate vnode", __func__)); 7038 ASSERT_VI_UNLOCKED(vp, __func__); 7039 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 7040 7041 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 7042 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 7043 7044 /* 7045 * Note we may be racing against vdrop which transitioned the hold 7046 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 7047 * if we are the only user after we get the interlock we will just 7048 * vdrop. 7049 */ 7050 vhold(vp); 7051 mtx_unlock(&mp->mnt_listmtx); 7052 VI_LOCK(vp); 7053 if (VN_IS_DOOMED(vp)) { 7054 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 7055 goto out_lost; 7056 } 7057 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 7058 /* 7059 * There is nothing to do if we are the last user. 7060 */ 7061 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 7062 goto out_lost; 7063 mtx_lock(&mp->mnt_listmtx); 7064 return (true); 7065 out_lost: 7066 vdropl(vp); 7067 maybe_yield(); 7068 mtx_lock(&mp->mnt_listmtx); 7069 return (false); 7070 } 7071 7072 static struct vnode * 7073 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 7074 void *cbarg) 7075 { 7076 struct vnode *vp; 7077 7078 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 7079 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 7080 restart: 7081 vp = TAILQ_NEXT(*mvp, v_lazylist); 7082 while (vp != NULL) { 7083 if (vp->v_type == VMARKER) { 7084 vp = TAILQ_NEXT(vp, v_lazylist); 7085 continue; 7086 } 7087 /* 7088 * See if we want to process the vnode. Note we may encounter a 7089 * long string of vnodes we don't care about and hog the list 7090 * as a result. Check for it and requeue the marker. 7091 */ 7092 VNPASS(!VN_IS_DOOMED(vp), vp); 7093 if (!cb(vp, cbarg)) { 7094 if (!should_yield()) { 7095 vp = TAILQ_NEXT(vp, v_lazylist); 7096 continue; 7097 } 7098 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 7099 v_lazylist); 7100 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 7101 v_lazylist); 7102 mtx_unlock(&mp->mnt_listmtx); 7103 kern_yield(PRI_USER); 7104 mtx_lock(&mp->mnt_listmtx); 7105 goto restart; 7106 } 7107 /* 7108 * Try-lock because this is the wrong lock order. 7109 */ 7110 if (!VI_TRYLOCK(vp) && 7111 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 7112 goto restart; 7113 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 7114 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 7115 ("alien vnode on the lazy list %p %p", vp, mp)); 7116 VNPASS(vp->v_mount == mp, vp); 7117 VNPASS(!VN_IS_DOOMED(vp), vp); 7118 break; 7119 } 7120 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 7121 7122 /* Check if we are done */ 7123 if (vp == NULL) { 7124 mtx_unlock(&mp->mnt_listmtx); 7125 mnt_vnode_markerfree_lazy(mvp, mp); 7126 return (NULL); 7127 } 7128 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 7129 mtx_unlock(&mp->mnt_listmtx); 7130 ASSERT_VI_LOCKED(vp, "lazy iter"); 7131 return (vp); 7132 } 7133 7134 struct vnode * 7135 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 7136 void *cbarg) 7137 { 7138 7139 maybe_yield(); 7140 mtx_lock(&mp->mnt_listmtx); 7141 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 7142 } 7143 7144 struct vnode * 7145 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 7146 void *cbarg) 7147 { 7148 struct vnode *vp; 7149 7150 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 7151 return (NULL); 7152 7153 *mvp = vn_alloc_marker(mp); 7154 MNT_ILOCK(mp); 7155 MNT_REF(mp); 7156 MNT_IUNLOCK(mp); 7157 7158 mtx_lock(&mp->mnt_listmtx); 7159 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 7160 if (vp == NULL) { 7161 mtx_unlock(&mp->mnt_listmtx); 7162 mnt_vnode_markerfree_lazy(mvp, mp); 7163 return (NULL); 7164 } 7165 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 7166 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 7167 } 7168 7169 void 7170 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 7171 { 7172 7173 if (*mvp == NULL) 7174 return; 7175 7176 mtx_lock(&mp->mnt_listmtx); 7177 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 7178 mtx_unlock(&mp->mnt_listmtx); 7179 mnt_vnode_markerfree_lazy(mvp, mp); 7180 } 7181 7182 int 7183 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 7184 { 7185 7186 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 7187 cnp->cn_flags &= ~NOEXECCHECK; 7188 return (0); 7189 } 7190 7191 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread)); 7192 } 7193 7194 /* 7195 * Do not use this variant unless you have means other than the hold count 7196 * to prevent the vnode from getting freed. 7197 */ 7198 void 7199 vn_seqc_write_begin_locked(struct vnode *vp) 7200 { 7201 7202 ASSERT_VI_LOCKED(vp, __func__); 7203 VNPASS(vp->v_holdcnt > 0, vp); 7204 VNPASS(vp->v_seqc_users >= 0, vp); 7205 vp->v_seqc_users++; 7206 if (vp->v_seqc_users == 1) 7207 seqc_sleepable_write_begin(&vp->v_seqc); 7208 } 7209 7210 void 7211 vn_seqc_write_begin(struct vnode *vp) 7212 { 7213 7214 VI_LOCK(vp); 7215 vn_seqc_write_begin_locked(vp); 7216 VI_UNLOCK(vp); 7217 } 7218 7219 void 7220 vn_seqc_write_end_locked(struct vnode *vp) 7221 { 7222 7223 ASSERT_VI_LOCKED(vp, __func__); 7224 VNPASS(vp->v_seqc_users > 0, vp); 7225 vp->v_seqc_users--; 7226 if (vp->v_seqc_users == 0) 7227 seqc_sleepable_write_end(&vp->v_seqc); 7228 } 7229 7230 void 7231 vn_seqc_write_end(struct vnode *vp) 7232 { 7233 7234 VI_LOCK(vp); 7235 vn_seqc_write_end_locked(vp); 7236 VI_UNLOCK(vp); 7237 } 7238 7239 /* 7240 * Special case handling for allocating and freeing vnodes. 7241 * 7242 * The counter remains unchanged on free so that a doomed vnode will 7243 * keep testing as in modify as long as it is accessible with SMR. 7244 */ 7245 static void 7246 vn_seqc_init(struct vnode *vp) 7247 { 7248 7249 vp->v_seqc = 0; 7250 vp->v_seqc_users = 0; 7251 } 7252 7253 static void 7254 vn_seqc_write_end_free(struct vnode *vp) 7255 { 7256 7257 VNPASS(seqc_in_modify(vp->v_seqc), vp); 7258 VNPASS(vp->v_seqc_users == 1, vp); 7259 } 7260 7261 void 7262 vn_irflag_set_locked(struct vnode *vp, short toset) 7263 { 7264 short flags; 7265 7266 ASSERT_VI_LOCKED(vp, __func__); 7267 flags = vn_irflag_read(vp); 7268 VNASSERT((flags & toset) == 0, vp, 7269 ("%s: some of the passed flags already set (have %d, passed %d)\n", 7270 __func__, flags, toset)); 7271 atomic_store_short(&vp->v_irflag, flags | toset); 7272 } 7273 7274 void 7275 vn_irflag_set(struct vnode *vp, short toset) 7276 { 7277 7278 VI_LOCK(vp); 7279 vn_irflag_set_locked(vp, toset); 7280 VI_UNLOCK(vp); 7281 } 7282 7283 void 7284 vn_irflag_set_cond_locked(struct vnode *vp, short toset) 7285 { 7286 short flags; 7287 7288 ASSERT_VI_LOCKED(vp, __func__); 7289 flags = vn_irflag_read(vp); 7290 atomic_store_short(&vp->v_irflag, flags | toset); 7291 } 7292 7293 void 7294 vn_irflag_set_cond(struct vnode *vp, short toset) 7295 { 7296 7297 VI_LOCK(vp); 7298 vn_irflag_set_cond_locked(vp, toset); 7299 VI_UNLOCK(vp); 7300 } 7301 7302 void 7303 vn_irflag_unset_locked(struct vnode *vp, short tounset) 7304 { 7305 short flags; 7306 7307 ASSERT_VI_LOCKED(vp, __func__); 7308 flags = vn_irflag_read(vp); 7309 VNASSERT((flags & tounset) == tounset, vp, 7310 ("%s: some of the passed flags not set (have %d, passed %d)\n", 7311 __func__, flags, tounset)); 7312 atomic_store_short(&vp->v_irflag, flags & ~tounset); 7313 } 7314 7315 void 7316 vn_irflag_unset(struct vnode *vp, short tounset) 7317 { 7318 7319 VI_LOCK(vp); 7320 vn_irflag_unset_locked(vp, tounset); 7321 VI_UNLOCK(vp); 7322 } 7323 7324 int 7325 vn_getsize_locked(struct vnode *vp, off_t *size, struct ucred *cred) 7326 { 7327 struct vattr vattr; 7328 int error; 7329 7330 ASSERT_VOP_LOCKED(vp, __func__); 7331 error = VOP_GETATTR(vp, &vattr, cred); 7332 if (__predict_true(error == 0)) { 7333 if (vattr.va_size <= OFF_MAX) 7334 *size = vattr.va_size; 7335 else 7336 error = EFBIG; 7337 } 7338 return (error); 7339 } 7340 7341 int 7342 vn_getsize(struct vnode *vp, off_t *size, struct ucred *cred) 7343 { 7344 int error; 7345 7346 VOP_LOCK(vp, LK_SHARED); 7347 error = vn_getsize_locked(vp, size, cred); 7348 VOP_UNLOCK(vp); 7349 return (error); 7350 } 7351 7352 #ifdef INVARIANTS 7353 void 7354 vn_set_state_validate(struct vnode *vp, __enum_uint8(vstate) state) 7355 { 7356 7357 switch (vp->v_state) { 7358 case VSTATE_UNINITIALIZED: 7359 switch (state) { 7360 case VSTATE_CONSTRUCTED: 7361 case VSTATE_DESTROYING: 7362 return; 7363 default: 7364 break; 7365 } 7366 break; 7367 case VSTATE_CONSTRUCTED: 7368 ASSERT_VOP_ELOCKED(vp, __func__); 7369 switch (state) { 7370 case VSTATE_DESTROYING: 7371 return; 7372 default: 7373 break; 7374 } 7375 break; 7376 case VSTATE_DESTROYING: 7377 ASSERT_VOP_ELOCKED(vp, __func__); 7378 switch (state) { 7379 case VSTATE_DEAD: 7380 return; 7381 default: 7382 break; 7383 } 7384 break; 7385 case VSTATE_DEAD: 7386 switch (state) { 7387 case VSTATE_UNINITIALIZED: 7388 return; 7389 default: 7390 break; 7391 } 7392 break; 7393 } 7394 7395 vn_printf(vp, "invalid state transition %d -> %d\n", vp->v_state, state); 7396 panic("invalid state transition %d -> %d\n", vp->v_state, state); 7397 } 7398 #endif 7399