1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 /* 38 * External virtual filesystem routines 39 */ 40 41 #include <sys/cdefs.h> 42 #include "opt_ddb.h" 43 #include "opt_watchdog.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/asan.h> 48 #include <sys/bio.h> 49 #include <sys/buf.h> 50 #include <sys/capsicum.h> 51 #include <sys/condvar.h> 52 #include <sys/conf.h> 53 #include <sys/counter.h> 54 #include <sys/dirent.h> 55 #include <sys/event.h> 56 #include <sys/eventhandler.h> 57 #include <sys/extattr.h> 58 #include <sys/file.h> 59 #include <sys/fcntl.h> 60 #include <sys/jail.h> 61 #include <sys/kdb.h> 62 #include <sys/kernel.h> 63 #include <sys/kthread.h> 64 #include <sys/ktr.h> 65 #include <sys/limits.h> 66 #include <sys/lockf.h> 67 #include <sys/malloc.h> 68 #include <sys/mount.h> 69 #include <sys/namei.h> 70 #include <sys/pctrie.h> 71 #include <sys/priv.h> 72 #include <sys/reboot.h> 73 #include <sys/refcount.h> 74 #include <sys/rwlock.h> 75 #include <sys/sched.h> 76 #include <sys/sleepqueue.h> 77 #include <sys/smr.h> 78 #include <sys/smp.h> 79 #include <sys/stat.h> 80 #include <sys/sysctl.h> 81 #include <sys/syslog.h> 82 #include <sys/vmmeter.h> 83 #include <sys/vnode.h> 84 #include <sys/watchdog.h> 85 86 #include <machine/stdarg.h> 87 88 #include <security/mac/mac_framework.h> 89 90 #include <vm/vm.h> 91 #include <vm/vm_object.h> 92 #include <vm/vm_extern.h> 93 #include <vm/pmap.h> 94 #include <vm/vm_map.h> 95 #include <vm/vm_page.h> 96 #include <vm/vm_kern.h> 97 #include <vm/vnode_pager.h> 98 #include <vm/uma.h> 99 100 #if defined(DEBUG_VFS_LOCKS) && (!defined(INVARIANTS) || !defined(WITNESS)) 101 #error DEBUG_VFS_LOCKS requires INVARIANTS and WITNESS 102 #endif 103 104 #ifdef DDB 105 #include <ddb/ddb.h> 106 #endif 107 108 static void delmntque(struct vnode *vp); 109 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 110 int slpflag, int slptimeo); 111 static void syncer_shutdown(void *arg, int howto); 112 static int vtryrecycle(struct vnode *vp, bool isvnlru); 113 static void v_init_counters(struct vnode *); 114 static void vn_seqc_init(struct vnode *); 115 static void vn_seqc_write_end_free(struct vnode *vp); 116 static void vgonel(struct vnode *); 117 static bool vhold_recycle_free(struct vnode *); 118 static void vdropl_recycle(struct vnode *vp); 119 static void vdrop_recycle(struct vnode *vp); 120 static void vfs_knllock(void *arg); 121 static void vfs_knlunlock(void *arg); 122 static void vfs_knl_assert_lock(void *arg, int what); 123 static void destroy_vpollinfo(struct vpollinfo *vi); 124 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 125 daddr_t startlbn, daddr_t endlbn); 126 static void vnlru_recalc(void); 127 128 static SYSCTL_NODE(_vfs, OID_AUTO, vnode, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 129 "vnode configuration and statistics"); 130 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, param, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 131 "vnode configuration"); 132 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 133 "vnode statistics"); 134 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, vnlru, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 135 "vnode recycling"); 136 137 /* 138 * Number of vnodes in existence. Increased whenever getnewvnode() 139 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 140 */ 141 static u_long __exclusive_cache_line numvnodes; 142 143 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 144 "Number of vnodes in existence (legacy)"); 145 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, count, CTLFLAG_RD, &numvnodes, 0, 146 "Number of vnodes in existence"); 147 148 static counter_u64_t vnodes_created; 149 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 150 "Number of vnodes created by getnewvnode (legacy)"); 151 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, created, CTLFLAG_RD, &vnodes_created, 152 "Number of vnodes created by getnewvnode"); 153 154 /* 155 * Conversion tables for conversion from vnode types to inode formats 156 * and back. 157 */ 158 __enum_uint8(vtype) iftovt_tab[16] = { 159 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 160 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 161 }; 162 int vttoif_tab[10] = { 163 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 164 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 165 }; 166 167 /* 168 * List of allocates vnodes in the system. 169 */ 170 static TAILQ_HEAD(freelst, vnode) vnode_list; 171 static struct vnode *vnode_list_free_marker; 172 static struct vnode *vnode_list_reclaim_marker; 173 174 /* 175 * "Free" vnode target. Free vnodes are rarely completely free, but are 176 * just ones that are cheap to recycle. Usually they are for files which 177 * have been stat'd but not read; these usually have inode and namecache 178 * data attached to them. This target is the preferred minimum size of a 179 * sub-cache consisting mostly of such files. The system balances the size 180 * of this sub-cache with its complement to try to prevent either from 181 * thrashing while the other is relatively inactive. The targets express 182 * a preference for the best balance. 183 * 184 * "Above" this target there are 2 further targets (watermarks) related 185 * to recyling of free vnodes. In the best-operating case, the cache is 186 * exactly full, the free list has size between vlowat and vhiwat above the 187 * free target, and recycling from it and normal use maintains this state. 188 * Sometimes the free list is below vlowat or even empty, but this state 189 * is even better for immediate use provided the cache is not full. 190 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 191 * ones) to reach one of these states. The watermarks are currently hard- 192 * coded as 4% and 9% of the available space higher. These and the default 193 * of 25% for wantfreevnodes are too large if the memory size is large. 194 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 195 * whenever vnlru_proc() becomes active. 196 */ 197 static long wantfreevnodes; 198 static long __exclusive_cache_line freevnodes; 199 static long freevnodes_old; 200 201 static u_long recycles_count; 202 SYSCTL_ULONG(_vfs, OID_AUTO, recycles, CTLFLAG_RD | CTLFLAG_STATS, &recycles_count, 0, 203 "Number of vnodes recycled to meet vnode cache targets (legacy)"); 204 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, recycles, CTLFLAG_RD | CTLFLAG_STATS, 205 &recycles_count, 0, 206 "Number of vnodes recycled to meet vnode cache targets"); 207 208 static u_long recycles_free_count; 209 SYSCTL_ULONG(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD | CTLFLAG_STATS, 210 &recycles_free_count, 0, 211 "Number of free vnodes recycled to meet vnode cache targets (legacy)"); 212 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, recycles_free, CTLFLAG_RD | CTLFLAG_STATS, 213 &recycles_free_count, 0, 214 "Number of free vnodes recycled to meet vnode cache targets"); 215 216 static counter_u64_t direct_recycles_free_count; 217 SYSCTL_COUNTER_U64(_vfs_vnode_vnlru, OID_AUTO, direct_recycles_free, CTLFLAG_RD, 218 &direct_recycles_free_count, 219 "Number of free vnodes recycled by vn_alloc callers to meet vnode cache targets"); 220 221 static counter_u64_t vnode_skipped_requeues; 222 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, skipped_requeues, CTLFLAG_RD, &vnode_skipped_requeues, 223 "Number of times LRU requeue was skipped due to lock contention"); 224 225 static __read_mostly bool vnode_can_skip_requeue; 226 SYSCTL_BOOL(_vfs_vnode_param, OID_AUTO, can_skip_requeue, CTLFLAG_RW, 227 &vnode_can_skip_requeue, 0, "Is LRU requeue skippable"); 228 229 static u_long deferred_inact; 230 SYSCTL_ULONG(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, 231 &deferred_inact, 0, "Number of times inactive processing was deferred"); 232 233 /* To keep more than one thread at a time from running vfs_getnewfsid */ 234 static struct mtx mntid_mtx; 235 236 /* 237 * Lock for any access to the following: 238 * vnode_list 239 * numvnodes 240 * freevnodes 241 */ 242 static struct mtx __exclusive_cache_line vnode_list_mtx; 243 244 /* Publicly exported FS */ 245 struct nfs_public nfs_pub; 246 247 static uma_zone_t buf_trie_zone; 248 static smr_t buf_trie_smr; 249 250 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 251 static uma_zone_t vnode_zone; 252 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll"); 253 254 __read_frequently smr_t vfs_smr; 255 256 /* 257 * The workitem queue. 258 * 259 * It is useful to delay writes of file data and filesystem metadata 260 * for tens of seconds so that quickly created and deleted files need 261 * not waste disk bandwidth being created and removed. To realize this, 262 * we append vnodes to a "workitem" queue. When running with a soft 263 * updates implementation, most pending metadata dependencies should 264 * not wait for more than a few seconds. Thus, mounted on block devices 265 * are delayed only about a half the time that file data is delayed. 266 * Similarly, directory updates are more critical, so are only delayed 267 * about a third the time that file data is delayed. Thus, there are 268 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 269 * one each second (driven off the filesystem syncer process). The 270 * syncer_delayno variable indicates the next queue that is to be processed. 271 * Items that need to be processed soon are placed in this queue: 272 * 273 * syncer_workitem_pending[syncer_delayno] 274 * 275 * A delay of fifteen seconds is done by placing the request fifteen 276 * entries later in the queue: 277 * 278 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 279 * 280 */ 281 static int syncer_delayno; 282 static long syncer_mask; 283 LIST_HEAD(synclist, bufobj); 284 static struct synclist *syncer_workitem_pending; 285 /* 286 * The sync_mtx protects: 287 * bo->bo_synclist 288 * sync_vnode_count 289 * syncer_delayno 290 * syncer_state 291 * syncer_workitem_pending 292 * syncer_worklist_len 293 * rushjob 294 */ 295 static struct mtx sync_mtx; 296 static struct cv sync_wakeup; 297 298 #define SYNCER_MAXDELAY 32 299 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 300 static int syncdelay = 30; /* max time to delay syncing data */ 301 static int filedelay = 30; /* time to delay syncing files */ 302 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 303 "Time to delay syncing files (in seconds)"); 304 static int dirdelay = 29; /* time to delay syncing directories */ 305 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 306 "Time to delay syncing directories (in seconds)"); 307 static int metadelay = 28; /* time to delay syncing metadata */ 308 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 309 "Time to delay syncing metadata (in seconds)"); 310 static int rushjob; /* number of slots to run ASAP */ 311 static int stat_rush_requests; /* number of times I/O speeded up */ 312 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 313 "Number of times I/O speeded up (rush requests)"); 314 315 #define VDBATCH_SIZE 8 316 struct vdbatch { 317 u_int index; 318 struct mtx lock; 319 struct vnode *tab[VDBATCH_SIZE]; 320 }; 321 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 322 323 static void vdbatch_dequeue(struct vnode *vp); 324 325 /* 326 * When shutting down the syncer, run it at four times normal speed. 327 */ 328 #define SYNCER_SHUTDOWN_SPEEDUP 4 329 static int sync_vnode_count; 330 static int syncer_worklist_len; 331 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 332 syncer_state; 333 334 /* Target for maximum number of vnodes. */ 335 u_long desiredvnodes; 336 static u_long gapvnodes; /* gap between wanted and desired */ 337 static u_long vhiwat; /* enough extras after expansion */ 338 static u_long vlowat; /* minimal extras before expansion */ 339 static bool vstir; /* nonzero to stir non-free vnodes */ 340 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 341 342 static u_long vnlru_read_freevnodes(void); 343 344 /* 345 * Note that no attempt is made to sanitize these parameters. 346 */ 347 static int 348 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 349 { 350 u_long val; 351 int error; 352 353 val = desiredvnodes; 354 error = sysctl_handle_long(oidp, &val, 0, req); 355 if (error != 0 || req->newptr == NULL) 356 return (error); 357 358 if (val == desiredvnodes) 359 return (0); 360 mtx_lock(&vnode_list_mtx); 361 desiredvnodes = val; 362 wantfreevnodes = desiredvnodes / 4; 363 vnlru_recalc(); 364 mtx_unlock(&vnode_list_mtx); 365 /* 366 * XXX There is no protection against multiple threads changing 367 * desiredvnodes at the same time. Locking above only helps vnlru and 368 * getnewvnode. 369 */ 370 vfs_hash_changesize(desiredvnodes); 371 cache_changesize(desiredvnodes); 372 return (0); 373 } 374 375 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 376 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 377 "LU", "Target for maximum number of vnodes (legacy)"); 378 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, limit, 379 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 380 "LU", "Target for maximum number of vnodes"); 381 382 static int 383 sysctl_freevnodes(SYSCTL_HANDLER_ARGS) 384 { 385 u_long rfreevnodes; 386 387 rfreevnodes = vnlru_read_freevnodes(); 388 return (sysctl_handle_long(oidp, &rfreevnodes, 0, req)); 389 } 390 391 SYSCTL_PROC(_vfs, OID_AUTO, freevnodes, 392 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes, 393 "LU", "Number of \"free\" vnodes (legacy)"); 394 SYSCTL_PROC(_vfs_vnode_stats, OID_AUTO, free, 395 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes, 396 "LU", "Number of \"free\" vnodes"); 397 398 static int 399 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 400 { 401 u_long val; 402 int error; 403 404 val = wantfreevnodes; 405 error = sysctl_handle_long(oidp, &val, 0, req); 406 if (error != 0 || req->newptr == NULL) 407 return (error); 408 409 if (val == wantfreevnodes) 410 return (0); 411 mtx_lock(&vnode_list_mtx); 412 wantfreevnodes = val; 413 vnlru_recalc(); 414 mtx_unlock(&vnode_list_mtx); 415 return (0); 416 } 417 418 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 419 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 420 "LU", "Target for minimum number of \"free\" vnodes (legacy)"); 421 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, wantfree, 422 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 423 "LU", "Target for minimum number of \"free\" vnodes"); 424 425 static int vnlru_nowhere; 426 SYSCTL_INT(_vfs_vnode_vnlru, OID_AUTO, failed_runs, CTLFLAG_RD | CTLFLAG_STATS, 427 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 428 429 static int 430 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 431 { 432 struct vnode *vp; 433 struct nameidata nd; 434 char *buf; 435 unsigned long ndflags; 436 int error; 437 438 if (req->newptr == NULL) 439 return (EINVAL); 440 if (req->newlen >= PATH_MAX) 441 return (E2BIG); 442 443 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 444 error = SYSCTL_IN(req, buf, req->newlen); 445 if (error != 0) 446 goto out; 447 448 buf[req->newlen] = '\0'; 449 450 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1; 451 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf); 452 if ((error = namei(&nd)) != 0) 453 goto out; 454 vp = nd.ni_vp; 455 456 if (VN_IS_DOOMED(vp)) { 457 /* 458 * This vnode is being recycled. Return != 0 to let the caller 459 * know that the sysctl had no effect. Return EAGAIN because a 460 * subsequent call will likely succeed (since namei will create 461 * a new vnode if necessary) 462 */ 463 error = EAGAIN; 464 goto putvnode; 465 } 466 467 vgone(vp); 468 putvnode: 469 vput(vp); 470 NDFREE_PNBUF(&nd); 471 out: 472 free(buf, M_TEMP); 473 return (error); 474 } 475 476 static int 477 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 478 { 479 struct thread *td = curthread; 480 struct vnode *vp; 481 struct file *fp; 482 int error; 483 int fd; 484 485 if (req->newptr == NULL) 486 return (EBADF); 487 488 error = sysctl_handle_int(oidp, &fd, 0, req); 489 if (error != 0) 490 return (error); 491 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 492 if (error != 0) 493 return (error); 494 vp = fp->f_vnode; 495 496 error = vn_lock(vp, LK_EXCLUSIVE); 497 if (error != 0) 498 goto drop; 499 500 vgone(vp); 501 VOP_UNLOCK(vp); 502 drop: 503 fdrop(fp, td); 504 return (error); 505 } 506 507 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 508 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 509 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 510 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 511 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 512 sysctl_ftry_reclaim_vnode, "I", 513 "Try to reclaim a vnode by its file descriptor"); 514 515 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 516 #define vnsz2log 8 517 #ifndef DEBUG_LOCKS 518 _Static_assert(sizeof(struct vnode) >= 1UL << vnsz2log && 519 sizeof(struct vnode) < 1UL << (vnsz2log + 1), 520 "vnsz2log needs to be updated"); 521 #endif 522 523 /* 524 * Support for the bufobj clean & dirty pctrie. 525 */ 526 static void * 527 buf_trie_alloc(struct pctrie *ptree) 528 { 529 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 530 } 531 532 static void 533 buf_trie_free(struct pctrie *ptree, void *node) 534 { 535 uma_zfree_smr(buf_trie_zone, node); 536 } 537 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 538 buf_trie_smr); 539 540 /* 541 * Lookup the next element greater than or equal to lblkno, accounting for the 542 * fact that, for pctries, negative values are greater than nonnegative ones. 543 */ 544 static struct buf * 545 buf_lookup_ge(struct bufv *bv, daddr_t lblkno) 546 { 547 struct buf *bp; 548 549 bp = BUF_PCTRIE_LOOKUP_GE(&bv->bv_root, lblkno); 550 if (bp == NULL && lblkno < 0) 551 bp = BUF_PCTRIE_LOOKUP_GE(&bv->bv_root, 0); 552 if (bp != NULL && bp->b_lblkno < lblkno) 553 bp = NULL; 554 return (bp); 555 } 556 557 /* 558 * Insert bp, and find the next element smaller than bp, accounting for the fact 559 * that, for pctries, negative values are greater than nonnegative ones. 560 */ 561 static int 562 buf_insert_lookup_le(struct bufv *bv, struct buf *bp, struct buf **n) 563 { 564 int error; 565 566 error = BUF_PCTRIE_INSERT_LOOKUP_LE(&bv->bv_root, bp, n); 567 if (error != EEXIST) { 568 if (*n == NULL && bp->b_lblkno >= 0) 569 *n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, ~0L); 570 if (*n != NULL && (*n)->b_lblkno >= bp->b_lblkno) 571 *n = NULL; 572 } 573 return (error); 574 } 575 576 /* 577 * Initialize the vnode management data structures. 578 * 579 * Reevaluate the following cap on the number of vnodes after the physical 580 * memory size exceeds 512GB. In the limit, as the physical memory size 581 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 582 */ 583 #ifndef MAXVNODES_MAX 584 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 585 #endif 586 587 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 588 589 static struct vnode * 590 vn_alloc_marker(struct mount *mp) 591 { 592 struct vnode *vp; 593 594 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 595 vp->v_type = VMARKER; 596 vp->v_mount = mp; 597 598 return (vp); 599 } 600 601 static void 602 vn_free_marker(struct vnode *vp) 603 { 604 605 MPASS(vp->v_type == VMARKER); 606 free(vp, M_VNODE_MARKER); 607 } 608 609 #ifdef KASAN 610 static int 611 vnode_ctor(void *mem, int size, void *arg __unused, int flags __unused) 612 { 613 kasan_mark(mem, size, roundup2(size, UMA_ALIGN_PTR + 1), 0); 614 return (0); 615 } 616 617 static void 618 vnode_dtor(void *mem, int size, void *arg __unused) 619 { 620 size_t end1, end2, off1, off2; 621 622 _Static_assert(offsetof(struct vnode, v_vnodelist) < 623 offsetof(struct vnode, v_dbatchcpu), 624 "KASAN marks require updating"); 625 626 off1 = offsetof(struct vnode, v_vnodelist); 627 off2 = offsetof(struct vnode, v_dbatchcpu); 628 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist); 629 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu); 630 631 /* 632 * Access to the v_vnodelist and v_dbatchcpu fields are permitted even 633 * after the vnode has been freed. Try to get some KASAN coverage by 634 * marking everything except those two fields as invalid. Because 635 * KASAN's tracking is not byte-granular, any preceding fields sharing 636 * the same 8-byte aligned word must also be marked valid. 637 */ 638 639 /* Handle the area from the start until v_vnodelist... */ 640 off1 = rounddown2(off1, KASAN_SHADOW_SCALE); 641 kasan_mark(mem, off1, off1, KASAN_UMA_FREED); 642 643 /* ... then the area between v_vnodelist and v_dbatchcpu ... */ 644 off1 = roundup2(end1, KASAN_SHADOW_SCALE); 645 off2 = rounddown2(off2, KASAN_SHADOW_SCALE); 646 if (off2 > off1) 647 kasan_mark((void *)((char *)mem + off1), off2 - off1, 648 off2 - off1, KASAN_UMA_FREED); 649 650 /* ... and finally the area from v_dbatchcpu to the end. */ 651 off2 = roundup2(end2, KASAN_SHADOW_SCALE); 652 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2, 653 KASAN_UMA_FREED); 654 } 655 #endif /* KASAN */ 656 657 /* 658 * Initialize a vnode as it first enters the zone. 659 */ 660 static int 661 vnode_init(void *mem, int size, int flags) 662 { 663 struct vnode *vp; 664 665 vp = mem; 666 bzero(vp, size); 667 /* 668 * Setup locks. 669 */ 670 vp->v_vnlock = &vp->v_lock; 671 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 672 /* 673 * By default, don't allow shared locks unless filesystems opt-in. 674 */ 675 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 676 LK_NOSHARE | LK_IS_VNODE); 677 /* 678 * Initialize bufobj. 679 */ 680 bufobj_init(&vp->v_bufobj, vp); 681 /* 682 * Initialize namecache. 683 */ 684 cache_vnode_init(vp); 685 /* 686 * Initialize rangelocks. 687 */ 688 rangelock_init(&vp->v_rl); 689 690 vp->v_dbatchcpu = NOCPU; 691 692 vp->v_state = VSTATE_DEAD; 693 694 /* 695 * Check vhold_recycle_free for an explanation. 696 */ 697 vp->v_holdcnt = VHOLD_NO_SMR; 698 vp->v_type = VNON; 699 mtx_lock(&vnode_list_mtx); 700 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 701 mtx_unlock(&vnode_list_mtx); 702 return (0); 703 } 704 705 /* 706 * Free a vnode when it is cleared from the zone. 707 */ 708 static void 709 vnode_fini(void *mem, int size) 710 { 711 struct vnode *vp; 712 struct bufobj *bo; 713 714 vp = mem; 715 vdbatch_dequeue(vp); 716 mtx_lock(&vnode_list_mtx); 717 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 718 mtx_unlock(&vnode_list_mtx); 719 rangelock_destroy(&vp->v_rl); 720 lockdestroy(vp->v_vnlock); 721 mtx_destroy(&vp->v_interlock); 722 bo = &vp->v_bufobj; 723 rw_destroy(BO_LOCKPTR(bo)); 724 725 kasan_mark(mem, size, size, 0); 726 } 727 728 /* 729 * Provide the size of NFS nclnode and NFS fh for calculation of the 730 * vnode memory consumption. The size is specified directly to 731 * eliminate dependency on NFS-private header. 732 * 733 * Other filesystems may use bigger or smaller (like UFS and ZFS) 734 * private inode data, but the NFS-based estimation is ample enough. 735 * Still, we care about differences in the size between 64- and 32-bit 736 * platforms. 737 * 738 * Namecache structure size is heuristically 739 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 740 */ 741 #ifdef _LP64 742 #define NFS_NCLNODE_SZ (528 + 64) 743 #define NC_SZ 148 744 #else 745 #define NFS_NCLNODE_SZ (360 + 32) 746 #define NC_SZ 92 747 #endif 748 749 static void 750 vntblinit(void *dummy __unused) 751 { 752 struct vdbatch *vd; 753 uma_ctor ctor; 754 uma_dtor dtor; 755 int cpu, physvnodes, virtvnodes; 756 757 /* 758 * Desiredvnodes is a function of the physical memory size and the 759 * kernel's heap size. Generally speaking, it scales with the 760 * physical memory size. The ratio of desiredvnodes to the physical 761 * memory size is 1:16 until desiredvnodes exceeds 98,304. 762 * Thereafter, the 763 * marginal ratio of desiredvnodes to the physical memory size is 764 * 1:64. However, desiredvnodes is limited by the kernel's heap 765 * size. The memory required by desiredvnodes vnodes and vm objects 766 * must not exceed 1/10th of the kernel's heap size. 767 */ 768 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 769 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 770 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 771 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 772 desiredvnodes = min(physvnodes, virtvnodes); 773 if (desiredvnodes > MAXVNODES_MAX) { 774 if (bootverbose) 775 printf("Reducing kern.maxvnodes %lu -> %lu\n", 776 desiredvnodes, MAXVNODES_MAX); 777 desiredvnodes = MAXVNODES_MAX; 778 } 779 wantfreevnodes = desiredvnodes / 4; 780 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 781 TAILQ_INIT(&vnode_list); 782 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 783 /* 784 * The lock is taken to appease WITNESS. 785 */ 786 mtx_lock(&vnode_list_mtx); 787 vnlru_recalc(); 788 mtx_unlock(&vnode_list_mtx); 789 vnode_list_free_marker = vn_alloc_marker(NULL); 790 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 791 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 792 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 793 794 #ifdef KASAN 795 ctor = vnode_ctor; 796 dtor = vnode_dtor; 797 #else 798 ctor = NULL; 799 dtor = NULL; 800 #endif 801 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor, 802 vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_NOKASAN); 803 uma_zone_set_smr(vnode_zone, vfs_smr); 804 805 /* 806 * Preallocate enough nodes to support one-per buf so that 807 * we can not fail an insert. reassignbuf() callers can not 808 * tolerate the insertion failure. 809 */ 810 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 811 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 812 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 813 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 814 uma_prealloc(buf_trie_zone, nbuf); 815 816 vnodes_created = counter_u64_alloc(M_WAITOK); 817 direct_recycles_free_count = counter_u64_alloc(M_WAITOK); 818 vnode_skipped_requeues = counter_u64_alloc(M_WAITOK); 819 820 /* 821 * Initialize the filesystem syncer. 822 */ 823 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 824 &syncer_mask); 825 syncer_maxdelay = syncer_mask + 1; 826 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 827 cv_init(&sync_wakeup, "syncer"); 828 829 CPU_FOREACH(cpu) { 830 vd = DPCPU_ID_PTR((cpu), vd); 831 bzero(vd, sizeof(*vd)); 832 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 833 } 834 } 835 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 836 837 /* 838 * Mark a mount point as busy. Used to synchronize access and to delay 839 * unmounting. Eventually, mountlist_mtx is not released on failure. 840 * 841 * vfs_busy() is a custom lock, it can block the caller. 842 * vfs_busy() only sleeps if the unmount is active on the mount point. 843 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 844 * vnode belonging to mp. 845 * 846 * Lookup uses vfs_busy() to traverse mount points. 847 * root fs var fs 848 * / vnode lock A / vnode lock (/var) D 849 * /var vnode lock B /log vnode lock(/var/log) E 850 * vfs_busy lock C vfs_busy lock F 851 * 852 * Within each file system, the lock order is C->A->B and F->D->E. 853 * 854 * When traversing across mounts, the system follows that lock order: 855 * 856 * C->A->B 857 * | 858 * +->F->D->E 859 * 860 * The lookup() process for namei("/var") illustrates the process: 861 * 1. VOP_LOOKUP() obtains B while A is held 862 * 2. vfs_busy() obtains a shared lock on F while A and B are held 863 * 3. vput() releases lock on B 864 * 4. vput() releases lock on A 865 * 5. VFS_ROOT() obtains lock on D while shared lock on F is held 866 * 6. vfs_unbusy() releases shared lock on F 867 * 7. vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 868 * Attempt to lock A (instead of vp_crossmp) while D is held would 869 * violate the global order, causing deadlocks. 870 * 871 * dounmount() locks B while F is drained. Note that for stacked 872 * filesystems, D and B in the example above may be the same lock, 873 * which introdues potential lock order reversal deadlock between 874 * dounmount() and step 5 above. These filesystems may avoid the LOR 875 * by setting VV_CROSSLOCK on the covered vnode so that lock B will 876 * remain held until after step 5. 877 */ 878 int 879 vfs_busy(struct mount *mp, int flags) 880 { 881 struct mount_pcpu *mpcpu; 882 883 MPASS((flags & ~MBF_MASK) == 0); 884 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 885 886 if (vfs_op_thread_enter(mp, mpcpu)) { 887 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 888 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 889 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 890 vfs_mp_count_add_pcpu(mpcpu, ref, 1); 891 vfs_mp_count_add_pcpu(mpcpu, lockref, 1); 892 vfs_op_thread_exit(mp, mpcpu); 893 if (flags & MBF_MNTLSTLOCK) 894 mtx_unlock(&mountlist_mtx); 895 return (0); 896 } 897 898 MNT_ILOCK(mp); 899 vfs_assert_mount_counters(mp); 900 MNT_REF(mp); 901 /* 902 * If mount point is currently being unmounted, sleep until the 903 * mount point fate is decided. If thread doing the unmounting fails, 904 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 905 * that this mount point has survived the unmount attempt and vfs_busy 906 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 907 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 908 * about to be really destroyed. vfs_busy needs to release its 909 * reference on the mount point in this case and return with ENOENT, 910 * telling the caller the mount it tried to busy is no longer valid. 911 */ 912 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 913 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), 914 ("%s: non-empty upper mount list with pending unmount", 915 __func__)); 916 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 917 MNT_REL(mp); 918 MNT_IUNLOCK(mp); 919 CTR1(KTR_VFS, "%s: failed busying before sleeping", 920 __func__); 921 return (ENOENT); 922 } 923 if (flags & MBF_MNTLSTLOCK) 924 mtx_unlock(&mountlist_mtx); 925 mp->mnt_kern_flag |= MNTK_MWAIT; 926 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 927 if (flags & MBF_MNTLSTLOCK) 928 mtx_lock(&mountlist_mtx); 929 MNT_ILOCK(mp); 930 } 931 if (flags & MBF_MNTLSTLOCK) 932 mtx_unlock(&mountlist_mtx); 933 mp->mnt_lockref++; 934 MNT_IUNLOCK(mp); 935 return (0); 936 } 937 938 /* 939 * Free a busy filesystem. 940 */ 941 void 942 vfs_unbusy(struct mount *mp) 943 { 944 struct mount_pcpu *mpcpu; 945 int c; 946 947 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 948 949 if (vfs_op_thread_enter(mp, mpcpu)) { 950 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 951 vfs_mp_count_sub_pcpu(mpcpu, lockref, 1); 952 vfs_mp_count_sub_pcpu(mpcpu, ref, 1); 953 vfs_op_thread_exit(mp, mpcpu); 954 return; 955 } 956 957 MNT_ILOCK(mp); 958 vfs_assert_mount_counters(mp); 959 MNT_REL(mp); 960 c = --mp->mnt_lockref; 961 if (mp->mnt_vfs_ops == 0) { 962 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 963 MNT_IUNLOCK(mp); 964 return; 965 } 966 if (c < 0) 967 vfs_dump_mount_counters(mp); 968 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 969 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 970 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 971 mp->mnt_kern_flag &= ~MNTK_DRAINING; 972 wakeup(&mp->mnt_lockref); 973 } 974 MNT_IUNLOCK(mp); 975 } 976 977 /* 978 * Lookup a mount point by filesystem identifier. 979 */ 980 struct mount * 981 vfs_getvfs(fsid_t *fsid) 982 { 983 struct mount *mp; 984 985 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 986 mtx_lock(&mountlist_mtx); 987 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 988 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 989 vfs_ref(mp); 990 mtx_unlock(&mountlist_mtx); 991 return (mp); 992 } 993 } 994 mtx_unlock(&mountlist_mtx); 995 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 996 return ((struct mount *) 0); 997 } 998 999 /* 1000 * Lookup a mount point by filesystem identifier, busying it before 1001 * returning. 1002 * 1003 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 1004 * cache for popular filesystem identifiers. The cache is lockess, using 1005 * the fact that struct mount's are never freed. In worst case we may 1006 * get pointer to unmounted or even different filesystem, so we have to 1007 * check what we got, and go slow way if so. 1008 */ 1009 struct mount * 1010 vfs_busyfs(fsid_t *fsid) 1011 { 1012 #define FSID_CACHE_SIZE 256 1013 typedef struct mount * volatile vmp_t; 1014 static vmp_t cache[FSID_CACHE_SIZE]; 1015 struct mount *mp; 1016 int error; 1017 uint32_t hash; 1018 1019 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 1020 hash = fsid->val[0] ^ fsid->val[1]; 1021 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 1022 mp = cache[hash]; 1023 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 1024 goto slow; 1025 if (vfs_busy(mp, 0) != 0) { 1026 cache[hash] = NULL; 1027 goto slow; 1028 } 1029 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 1030 return (mp); 1031 else 1032 vfs_unbusy(mp); 1033 1034 slow: 1035 mtx_lock(&mountlist_mtx); 1036 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 1037 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 1038 error = vfs_busy(mp, MBF_MNTLSTLOCK); 1039 if (error) { 1040 cache[hash] = NULL; 1041 mtx_unlock(&mountlist_mtx); 1042 return (NULL); 1043 } 1044 cache[hash] = mp; 1045 return (mp); 1046 } 1047 } 1048 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 1049 mtx_unlock(&mountlist_mtx); 1050 return ((struct mount *) 0); 1051 } 1052 1053 /* 1054 * Check if a user can access privileged mount options. 1055 */ 1056 int 1057 vfs_suser(struct mount *mp, struct thread *td) 1058 { 1059 int error; 1060 1061 if (jailed(td->td_ucred)) { 1062 /* 1063 * If the jail of the calling thread lacks permission for 1064 * this type of file system, deny immediately. 1065 */ 1066 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 1067 return (EPERM); 1068 1069 /* 1070 * If the file system was mounted outside the jail of the 1071 * calling thread, deny immediately. 1072 */ 1073 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 1074 return (EPERM); 1075 } 1076 1077 /* 1078 * If file system supports delegated administration, we don't check 1079 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 1080 * by the file system itself. 1081 * If this is not the user that did original mount, we check for 1082 * the PRIV_VFS_MOUNT_OWNER privilege. 1083 */ 1084 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 1085 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 1086 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 1087 return (error); 1088 } 1089 return (0); 1090 } 1091 1092 /* 1093 * Get a new unique fsid. Try to make its val[0] unique, since this value 1094 * will be used to create fake device numbers for stat(). Also try (but 1095 * not so hard) make its val[0] unique mod 2^16, since some emulators only 1096 * support 16-bit device numbers. We end up with unique val[0]'s for the 1097 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 1098 * 1099 * Keep in mind that several mounts may be running in parallel. Starting 1100 * the search one past where the previous search terminated is both a 1101 * micro-optimization and a defense against returning the same fsid to 1102 * different mounts. 1103 */ 1104 void 1105 vfs_getnewfsid(struct mount *mp) 1106 { 1107 static uint16_t mntid_base; 1108 struct mount *nmp; 1109 fsid_t tfsid; 1110 int mtype; 1111 1112 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 1113 mtx_lock(&mntid_mtx); 1114 mtype = mp->mnt_vfc->vfc_typenum; 1115 tfsid.val[1] = mtype; 1116 mtype = (mtype & 0xFF) << 24; 1117 for (;;) { 1118 tfsid.val[0] = makedev(255, 1119 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 1120 mntid_base++; 1121 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 1122 break; 1123 vfs_rel(nmp); 1124 } 1125 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 1126 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 1127 mtx_unlock(&mntid_mtx); 1128 } 1129 1130 /* 1131 * Knob to control the precision of file timestamps: 1132 * 1133 * 0 = seconds only; nanoseconds zeroed. 1134 * 1 = seconds and nanoseconds, accurate within 1/HZ. 1135 * 2 = seconds and nanoseconds, truncated to microseconds. 1136 * >=3 = seconds and nanoseconds, maximum precision. 1137 */ 1138 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 1139 1140 static int timestamp_precision = TSP_USEC; 1141 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 1142 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 1143 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 1144 "3+: sec + ns (max. precision))"); 1145 1146 /* 1147 * Get a current timestamp. 1148 */ 1149 void 1150 vfs_timestamp(struct timespec *tsp) 1151 { 1152 struct timeval tv; 1153 1154 switch (timestamp_precision) { 1155 case TSP_SEC: 1156 tsp->tv_sec = time_second; 1157 tsp->tv_nsec = 0; 1158 break; 1159 case TSP_HZ: 1160 getnanotime(tsp); 1161 break; 1162 case TSP_USEC: 1163 microtime(&tv); 1164 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1165 break; 1166 case TSP_NSEC: 1167 default: 1168 nanotime(tsp); 1169 break; 1170 } 1171 } 1172 1173 /* 1174 * Set vnode attributes to VNOVAL 1175 */ 1176 void 1177 vattr_null(struct vattr *vap) 1178 { 1179 1180 vap->va_type = VNON; 1181 vap->va_size = VNOVAL; 1182 vap->va_bytes = VNOVAL; 1183 vap->va_mode = VNOVAL; 1184 vap->va_nlink = VNOVAL; 1185 vap->va_uid = VNOVAL; 1186 vap->va_gid = VNOVAL; 1187 vap->va_fsid = VNOVAL; 1188 vap->va_fileid = VNOVAL; 1189 vap->va_blocksize = VNOVAL; 1190 vap->va_rdev = VNOVAL; 1191 vap->va_atime.tv_sec = VNOVAL; 1192 vap->va_atime.tv_nsec = VNOVAL; 1193 vap->va_mtime.tv_sec = VNOVAL; 1194 vap->va_mtime.tv_nsec = VNOVAL; 1195 vap->va_ctime.tv_sec = VNOVAL; 1196 vap->va_ctime.tv_nsec = VNOVAL; 1197 vap->va_birthtime.tv_sec = VNOVAL; 1198 vap->va_birthtime.tv_nsec = VNOVAL; 1199 vap->va_flags = VNOVAL; 1200 vap->va_gen = VNOVAL; 1201 vap->va_vaflags = 0; 1202 } 1203 1204 /* 1205 * Try to reduce the total number of vnodes. 1206 * 1207 * This routine (and its user) are buggy in at least the following ways: 1208 * - all parameters were picked years ago when RAM sizes were significantly 1209 * smaller 1210 * - it can pick vnodes based on pages used by the vm object, but filesystems 1211 * like ZFS don't use it making the pick broken 1212 * - since ZFS has its own aging policy it gets partially combated by this one 1213 * - a dedicated method should be provided for filesystems to let them decide 1214 * whether the vnode should be recycled 1215 * 1216 * This routine is called when we have too many vnodes. It attempts 1217 * to free <count> vnodes and will potentially free vnodes that still 1218 * have VM backing store (VM backing store is typically the cause 1219 * of a vnode blowout so we want to do this). Therefore, this operation 1220 * is not considered cheap. 1221 * 1222 * A number of conditions may prevent a vnode from being reclaimed. 1223 * the buffer cache may have references on the vnode, a directory 1224 * vnode may still have references due to the namei cache representing 1225 * underlying files, or the vnode may be in active use. It is not 1226 * desirable to reuse such vnodes. These conditions may cause the 1227 * number of vnodes to reach some minimum value regardless of what 1228 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1229 * 1230 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1231 * entries if this argument is strue 1232 * @param trigger Only reclaim vnodes with fewer than this many resident 1233 * pages. 1234 * @param target How many vnodes to reclaim. 1235 * @return The number of vnodes that were reclaimed. 1236 */ 1237 static int 1238 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1239 { 1240 struct vnode *vp, *mvp; 1241 struct mount *mp; 1242 struct vm_object *object; 1243 u_long done; 1244 bool retried; 1245 1246 mtx_assert(&vnode_list_mtx, MA_OWNED); 1247 1248 retried = false; 1249 done = 0; 1250 1251 mvp = vnode_list_reclaim_marker; 1252 restart: 1253 vp = mvp; 1254 while (done < target) { 1255 vp = TAILQ_NEXT(vp, v_vnodelist); 1256 if (__predict_false(vp == NULL)) 1257 break; 1258 1259 if (__predict_false(vp->v_type == VMARKER)) 1260 continue; 1261 1262 /* 1263 * If it's been deconstructed already, it's still 1264 * referenced, or it exceeds the trigger, skip it. 1265 * Also skip free vnodes. We are trying to make space 1266 * for more free vnodes, not reduce their count. 1267 */ 1268 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1269 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1270 goto next_iter; 1271 1272 if (vp->v_type == VBAD || vp->v_type == VNON) 1273 goto next_iter; 1274 1275 object = atomic_load_ptr(&vp->v_object); 1276 if (object == NULL || object->resident_page_count > trigger) { 1277 goto next_iter; 1278 } 1279 1280 /* 1281 * Handle races against vnode allocation. Filesystems lock the 1282 * vnode some time after it gets returned from getnewvnode, 1283 * despite type and hold count being manipulated earlier. 1284 * Resorting to checking v_mount restores guarantees present 1285 * before the global list was reworked to contain all vnodes. 1286 */ 1287 if (!VI_TRYLOCK(vp)) 1288 goto next_iter; 1289 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1290 VI_UNLOCK(vp); 1291 goto next_iter; 1292 } 1293 if (vp->v_mount == NULL) { 1294 VI_UNLOCK(vp); 1295 goto next_iter; 1296 } 1297 vholdl(vp); 1298 VI_UNLOCK(vp); 1299 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1300 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1301 mtx_unlock(&vnode_list_mtx); 1302 1303 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1304 vdrop_recycle(vp); 1305 goto next_iter_unlocked; 1306 } 1307 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1308 vdrop_recycle(vp); 1309 vn_finished_write(mp); 1310 goto next_iter_unlocked; 1311 } 1312 1313 VI_LOCK(vp); 1314 if (vp->v_usecount > 0 || 1315 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1316 (vp->v_object != NULL && vp->v_object->handle == vp && 1317 vp->v_object->resident_page_count > trigger)) { 1318 VOP_UNLOCK(vp); 1319 vdropl_recycle(vp); 1320 vn_finished_write(mp); 1321 goto next_iter_unlocked; 1322 } 1323 recycles_count++; 1324 vgonel(vp); 1325 VOP_UNLOCK(vp); 1326 vdropl_recycle(vp); 1327 vn_finished_write(mp); 1328 done++; 1329 next_iter_unlocked: 1330 maybe_yield(); 1331 mtx_lock(&vnode_list_mtx); 1332 goto restart; 1333 next_iter: 1334 MPASS(vp->v_type != VMARKER); 1335 if (!should_yield()) 1336 continue; 1337 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1338 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1339 mtx_unlock(&vnode_list_mtx); 1340 kern_yield(PRI_USER); 1341 mtx_lock(&vnode_list_mtx); 1342 goto restart; 1343 } 1344 if (done == 0 && !retried) { 1345 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1346 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1347 retried = true; 1348 goto restart; 1349 } 1350 return (done); 1351 } 1352 1353 static int max_free_per_call = 10000; 1354 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_free_per_call, 0, 1355 "limit on vnode free requests per call to the vnlru_free routine (legacy)"); 1356 SYSCTL_INT(_vfs_vnode_vnlru, OID_AUTO, max_free_per_call, CTLFLAG_RW, 1357 &max_free_per_call, 0, 1358 "limit on vnode free requests per call to the vnlru_free routine"); 1359 1360 /* 1361 * Attempt to recycle requested amount of free vnodes. 1362 */ 1363 static int 1364 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp, bool isvnlru) 1365 { 1366 struct vnode *vp; 1367 struct mount *mp; 1368 int ocount; 1369 bool retried; 1370 1371 mtx_assert(&vnode_list_mtx, MA_OWNED); 1372 if (count > max_free_per_call) 1373 count = max_free_per_call; 1374 if (count == 0) { 1375 mtx_unlock(&vnode_list_mtx); 1376 return (0); 1377 } 1378 ocount = count; 1379 retried = false; 1380 vp = mvp; 1381 for (;;) { 1382 vp = TAILQ_NEXT(vp, v_vnodelist); 1383 if (__predict_false(vp == NULL)) { 1384 /* 1385 * The free vnode marker can be past eligible vnodes: 1386 * 1. if vdbatch_process trylock failed 1387 * 2. if vtryrecycle failed 1388 * 1389 * If so, start the scan from scratch. 1390 */ 1391 if (!retried && vnlru_read_freevnodes() > 0) { 1392 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1393 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1394 vp = mvp; 1395 retried = true; 1396 continue; 1397 } 1398 1399 /* 1400 * Give up 1401 */ 1402 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1403 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1404 mtx_unlock(&vnode_list_mtx); 1405 break; 1406 } 1407 if (__predict_false(vp->v_type == VMARKER)) 1408 continue; 1409 if (vp->v_holdcnt > 0) 1410 continue; 1411 /* 1412 * Don't recycle if our vnode is from different type 1413 * of mount point. Note that mp is type-safe, the 1414 * check does not reach unmapped address even if 1415 * vnode is reclaimed. 1416 */ 1417 if (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1418 mp->mnt_op != mnt_op) { 1419 continue; 1420 } 1421 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1422 continue; 1423 } 1424 if (!vhold_recycle_free(vp)) 1425 continue; 1426 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1427 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1428 mtx_unlock(&vnode_list_mtx); 1429 /* 1430 * FIXME: ignores the return value, meaning it may be nothing 1431 * got recycled but it claims otherwise to the caller. 1432 * 1433 * Originally the value started being ignored in 2005 with 1434 * 114a1006a8204aa156e1f9ad6476cdff89cada7f . 1435 * 1436 * Respecting the value can run into significant stalls if most 1437 * vnodes belong to one file system and it has writes 1438 * suspended. In presence of many threads and millions of 1439 * vnodes they keep contending on the vnode_list_mtx lock only 1440 * to find vnodes they can't recycle. 1441 * 1442 * The solution would be to pre-check if the vnode is likely to 1443 * be recycle-able, but it needs to happen with the 1444 * vnode_list_mtx lock held. This runs into a problem where 1445 * VOP_GETWRITEMOUNT (currently needed to find out about if 1446 * writes are frozen) can take locks which LOR against it. 1447 * 1448 * Check nullfs for one example (null_getwritemount). 1449 */ 1450 vtryrecycle(vp, isvnlru); 1451 count--; 1452 if (count == 0) { 1453 break; 1454 } 1455 mtx_lock(&vnode_list_mtx); 1456 vp = mvp; 1457 } 1458 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1459 return (ocount - count); 1460 } 1461 1462 /* 1463 * XXX: returns without vnode_list_mtx locked! 1464 */ 1465 static int 1466 vnlru_free_locked_direct(int count) 1467 { 1468 int ret; 1469 1470 mtx_assert(&vnode_list_mtx, MA_OWNED); 1471 ret = vnlru_free_impl(count, NULL, vnode_list_free_marker, false); 1472 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1473 return (ret); 1474 } 1475 1476 static int 1477 vnlru_free_locked_vnlru(int count) 1478 { 1479 int ret; 1480 1481 mtx_assert(&vnode_list_mtx, MA_OWNED); 1482 ret = vnlru_free_impl(count, NULL, vnode_list_free_marker, true); 1483 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1484 return (ret); 1485 } 1486 1487 static int 1488 vnlru_free_vnlru(int count) 1489 { 1490 1491 mtx_lock(&vnode_list_mtx); 1492 return (vnlru_free_locked_vnlru(count)); 1493 } 1494 1495 void 1496 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp) 1497 { 1498 1499 MPASS(mnt_op != NULL); 1500 MPASS(mvp != NULL); 1501 VNPASS(mvp->v_type == VMARKER, mvp); 1502 mtx_lock(&vnode_list_mtx); 1503 vnlru_free_impl(count, mnt_op, mvp, true); 1504 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1505 } 1506 1507 struct vnode * 1508 vnlru_alloc_marker(void) 1509 { 1510 struct vnode *mvp; 1511 1512 mvp = vn_alloc_marker(NULL); 1513 mtx_lock(&vnode_list_mtx); 1514 TAILQ_INSERT_BEFORE(vnode_list_free_marker, mvp, v_vnodelist); 1515 mtx_unlock(&vnode_list_mtx); 1516 return (mvp); 1517 } 1518 1519 void 1520 vnlru_free_marker(struct vnode *mvp) 1521 { 1522 mtx_lock(&vnode_list_mtx); 1523 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1524 mtx_unlock(&vnode_list_mtx); 1525 vn_free_marker(mvp); 1526 } 1527 1528 static void 1529 vnlru_recalc(void) 1530 { 1531 1532 mtx_assert(&vnode_list_mtx, MA_OWNED); 1533 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1534 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1535 vlowat = vhiwat / 2; 1536 } 1537 1538 /* 1539 * Attempt to recycle vnodes in a context that is always safe to block. 1540 * Calling vlrurecycle() from the bowels of filesystem code has some 1541 * interesting deadlock problems. 1542 */ 1543 static struct proc *vnlruproc; 1544 static int vnlruproc_sig; 1545 static u_long vnlruproc_kicks; 1546 1547 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, kicks, CTLFLAG_RD, &vnlruproc_kicks, 0, 1548 "Number of times vnlru awakened due to vnode shortage"); 1549 1550 #define VNLRU_COUNT_SLOP 100 1551 1552 /* 1553 * The main freevnodes counter is only updated when a counter local to CPU 1554 * diverges from 0 by more than VNLRU_FREEVNODES_SLOP. CPUs are conditionally 1555 * walked to compute a more accurate total. 1556 * 1557 * Note: the actual value at any given moment can still exceed slop, but it 1558 * should not be by significant margin in practice. 1559 */ 1560 #define VNLRU_FREEVNODES_SLOP 126 1561 1562 static void __noinline 1563 vfs_freevnodes_rollup(int8_t *lfreevnodes) 1564 { 1565 1566 atomic_add_long(&freevnodes, *lfreevnodes); 1567 *lfreevnodes = 0; 1568 critical_exit(); 1569 } 1570 1571 static __inline void 1572 vfs_freevnodes_inc(void) 1573 { 1574 int8_t *lfreevnodes; 1575 1576 critical_enter(); 1577 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1578 (*lfreevnodes)++; 1579 if (__predict_false(*lfreevnodes == VNLRU_FREEVNODES_SLOP)) 1580 vfs_freevnodes_rollup(lfreevnodes); 1581 else 1582 critical_exit(); 1583 } 1584 1585 static __inline void 1586 vfs_freevnodes_dec(void) 1587 { 1588 int8_t *lfreevnodes; 1589 1590 critical_enter(); 1591 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1592 (*lfreevnodes)--; 1593 if (__predict_false(*lfreevnodes == -VNLRU_FREEVNODES_SLOP)) 1594 vfs_freevnodes_rollup(lfreevnodes); 1595 else 1596 critical_exit(); 1597 } 1598 1599 static u_long 1600 vnlru_read_freevnodes(void) 1601 { 1602 long slop, rfreevnodes, rfreevnodes_old; 1603 int cpu; 1604 1605 rfreevnodes = atomic_load_long(&freevnodes); 1606 rfreevnodes_old = atomic_load_long(&freevnodes_old); 1607 1608 if (rfreevnodes > rfreevnodes_old) 1609 slop = rfreevnodes - rfreevnodes_old; 1610 else 1611 slop = rfreevnodes_old - rfreevnodes; 1612 if (slop < VNLRU_FREEVNODES_SLOP) 1613 return (rfreevnodes >= 0 ? rfreevnodes : 0); 1614 CPU_FOREACH(cpu) { 1615 rfreevnodes += cpuid_to_pcpu[cpu]->pc_vfs_freevnodes; 1616 } 1617 atomic_store_long(&freevnodes_old, rfreevnodes); 1618 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1619 } 1620 1621 static bool 1622 vnlru_under(u_long rnumvnodes, u_long limit) 1623 { 1624 u_long rfreevnodes, space; 1625 1626 if (__predict_false(rnumvnodes > desiredvnodes)) 1627 return (true); 1628 1629 space = desiredvnodes - rnumvnodes; 1630 if (space < limit) { 1631 rfreevnodes = vnlru_read_freevnodes(); 1632 if (rfreevnodes > wantfreevnodes) 1633 space += rfreevnodes - wantfreevnodes; 1634 } 1635 return (space < limit); 1636 } 1637 1638 static void 1639 vnlru_kick_locked(void) 1640 { 1641 1642 mtx_assert(&vnode_list_mtx, MA_OWNED); 1643 if (vnlruproc_sig == 0) { 1644 vnlruproc_sig = 1; 1645 vnlruproc_kicks++; 1646 wakeup(vnlruproc); 1647 } 1648 } 1649 1650 static void 1651 vnlru_kick_cond(void) 1652 { 1653 1654 if (vnlru_read_freevnodes() > wantfreevnodes) 1655 return; 1656 1657 if (vnlruproc_sig) 1658 return; 1659 mtx_lock(&vnode_list_mtx); 1660 vnlru_kick_locked(); 1661 mtx_unlock(&vnode_list_mtx); 1662 } 1663 1664 static void 1665 vnlru_proc_sleep(void) 1666 { 1667 1668 if (vnlruproc_sig) { 1669 vnlruproc_sig = 0; 1670 wakeup(&vnlruproc_sig); 1671 } 1672 msleep(vnlruproc, &vnode_list_mtx, PVFS|PDROP, "vlruwt", hz); 1673 } 1674 1675 /* 1676 * A lighter version of the machinery below. 1677 * 1678 * Tries to reach goals only by recycling free vnodes and does not invoke 1679 * uma_reclaim(UMA_RECLAIM_DRAIN). 1680 * 1681 * This works around pathological behavior in vnlru in presence of tons of free 1682 * vnodes, but without having to rewrite the machinery at this time. Said 1683 * behavior boils down to continuously trying to reclaim all kinds of vnodes 1684 * (cycling through all levels of "force") when the count is transiently above 1685 * limit. This happens a lot when all vnodes are used up and vn_alloc 1686 * speculatively increments the counter. 1687 * 1688 * Sample testcase: vnode limit 8388608, 20 separate directory trees each with 1689 * 1 million files in total and 20 find(1) processes stating them in parallel 1690 * (one per each tree). 1691 * 1692 * On a kernel with only stock machinery this needs anywhere between 60 and 120 1693 * seconds to execute (time varies *wildly* between runs). With the workaround 1694 * it consistently stays around 20 seconds [it got further down with later 1695 * changes]. 1696 * 1697 * That is to say the entire thing needs a fundamental redesign (most notably 1698 * to accommodate faster recycling), the above only tries to get it ouf the way. 1699 * 1700 * Return values are: 1701 * -1 -- fallback to regular vnlru loop 1702 * 0 -- do nothing, go to sleep 1703 * >0 -- recycle this many vnodes 1704 */ 1705 static long 1706 vnlru_proc_light_pick(void) 1707 { 1708 u_long rnumvnodes, rfreevnodes; 1709 1710 if (vstir || vnlruproc_sig == 1) 1711 return (-1); 1712 1713 rnumvnodes = atomic_load_long(&numvnodes); 1714 rfreevnodes = vnlru_read_freevnodes(); 1715 1716 /* 1717 * vnode limit might have changed and now we may be at a significant 1718 * excess. Bail if we can't sort it out with free vnodes. 1719 * 1720 * Due to atomic updates the count can legitimately go above 1721 * the limit for a short period, don't bother doing anything in 1722 * that case. 1723 */ 1724 if (rnumvnodes > desiredvnodes + VNLRU_COUNT_SLOP + 10) { 1725 if (rnumvnodes - rfreevnodes >= desiredvnodes || 1726 rfreevnodes <= wantfreevnodes) { 1727 return (-1); 1728 } 1729 1730 return (rnumvnodes - desiredvnodes); 1731 } 1732 1733 /* 1734 * Don't try to reach wantfreevnodes target if there are too few vnodes 1735 * to begin with. 1736 */ 1737 if (rnumvnodes < wantfreevnodes) { 1738 return (0); 1739 } 1740 1741 if (rfreevnodes < wantfreevnodes) { 1742 return (-1); 1743 } 1744 1745 return (0); 1746 } 1747 1748 static bool 1749 vnlru_proc_light(void) 1750 { 1751 long freecount; 1752 1753 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1754 1755 freecount = vnlru_proc_light_pick(); 1756 if (freecount == -1) 1757 return (false); 1758 1759 if (freecount != 0) { 1760 vnlru_free_vnlru(freecount); 1761 } 1762 1763 mtx_lock(&vnode_list_mtx); 1764 vnlru_proc_sleep(); 1765 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1766 return (true); 1767 } 1768 1769 static u_long uma_reclaim_calls; 1770 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, uma_reclaim_calls, CTLFLAG_RD | CTLFLAG_STATS, 1771 &uma_reclaim_calls, 0, "Number of calls to uma_reclaim"); 1772 1773 static void 1774 vnlru_proc(void) 1775 { 1776 u_long rnumvnodes, rfreevnodes, target; 1777 unsigned long onumvnodes; 1778 int done, force, trigger, usevnodes; 1779 bool reclaim_nc_src, want_reread; 1780 1781 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1782 SHUTDOWN_PRI_FIRST); 1783 1784 force = 0; 1785 want_reread = false; 1786 for (;;) { 1787 kproc_suspend_check(vnlruproc); 1788 1789 if (force == 0 && vnlru_proc_light()) 1790 continue; 1791 1792 mtx_lock(&vnode_list_mtx); 1793 rnumvnodes = atomic_load_long(&numvnodes); 1794 1795 if (want_reread) { 1796 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1797 want_reread = false; 1798 } 1799 1800 /* 1801 * If numvnodes is too large (due to desiredvnodes being 1802 * adjusted using its sysctl, or emergency growth), first 1803 * try to reduce it by discarding free vnodes. 1804 */ 1805 if (rnumvnodes > desiredvnodes + 10) { 1806 vnlru_free_locked_vnlru(rnumvnodes - desiredvnodes); 1807 mtx_lock(&vnode_list_mtx); 1808 rnumvnodes = atomic_load_long(&numvnodes); 1809 } 1810 /* 1811 * Sleep if the vnode cache is in a good state. This is 1812 * when it is not over-full and has space for about a 4% 1813 * or 9% expansion (by growing its size or inexcessively 1814 * reducing free vnode count). Otherwise, try to reclaim 1815 * space for a 10% expansion. 1816 */ 1817 if (vstir && force == 0) { 1818 force = 1; 1819 vstir = false; 1820 } 1821 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1822 vnlru_proc_sleep(); 1823 continue; 1824 } 1825 rfreevnodes = vnlru_read_freevnodes(); 1826 1827 onumvnodes = rnumvnodes; 1828 /* 1829 * Calculate parameters for recycling. These are the same 1830 * throughout the loop to give some semblance of fairness. 1831 * The trigger point is to avoid recycling vnodes with lots 1832 * of resident pages. We aren't trying to free memory; we 1833 * are trying to recycle or at least free vnodes. 1834 */ 1835 if (rnumvnodes <= desiredvnodes) 1836 usevnodes = rnumvnodes - rfreevnodes; 1837 else 1838 usevnodes = rnumvnodes; 1839 if (usevnodes <= 0) 1840 usevnodes = 1; 1841 /* 1842 * The trigger value is chosen to give a conservatively 1843 * large value to ensure that it alone doesn't prevent 1844 * making progress. The value can easily be so large that 1845 * it is effectively infinite in some congested and 1846 * misconfigured cases, and this is necessary. Normally 1847 * it is about 8 to 100 (pages), which is quite large. 1848 */ 1849 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1850 if (force < 2) 1851 trigger = vsmalltrigger; 1852 reclaim_nc_src = force >= 3; 1853 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1854 target = target / 10 + 1; 1855 done = vlrureclaim(reclaim_nc_src, trigger, target); 1856 mtx_unlock(&vnode_list_mtx); 1857 /* 1858 * Total number of vnodes can transiently go slightly above the 1859 * limit (see vn_alloc_hard), no need to call uma_reclaim if 1860 * this happens. 1861 */ 1862 if (onumvnodes + VNLRU_COUNT_SLOP + 1000 > desiredvnodes && 1863 numvnodes <= desiredvnodes) { 1864 uma_reclaim_calls++; 1865 uma_reclaim(UMA_RECLAIM_DRAIN); 1866 } 1867 if (done == 0) { 1868 if (force == 0 || force == 1) { 1869 force = 2; 1870 continue; 1871 } 1872 if (force == 2) { 1873 force = 3; 1874 continue; 1875 } 1876 want_reread = true; 1877 force = 0; 1878 vnlru_nowhere++; 1879 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1880 } else { 1881 want_reread = true; 1882 kern_yield(PRI_USER); 1883 } 1884 } 1885 } 1886 1887 static struct kproc_desc vnlru_kp = { 1888 "vnlru", 1889 vnlru_proc, 1890 &vnlruproc 1891 }; 1892 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1893 &vnlru_kp); 1894 1895 /* 1896 * Routines having to do with the management of the vnode table. 1897 */ 1898 1899 /* 1900 * Try to recycle a freed vnode. 1901 */ 1902 static int 1903 vtryrecycle(struct vnode *vp, bool isvnlru) 1904 { 1905 struct mount *vnmp; 1906 1907 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1908 VNPASS(vp->v_holdcnt > 0, vp); 1909 /* 1910 * This vnode may found and locked via some other list, if so we 1911 * can't recycle it yet. 1912 */ 1913 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1914 CTR2(KTR_VFS, 1915 "%s: impossible to recycle, vp %p lock is already held", 1916 __func__, vp); 1917 vdrop_recycle(vp); 1918 return (EWOULDBLOCK); 1919 } 1920 /* 1921 * Don't recycle if its filesystem is being suspended. 1922 */ 1923 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1924 VOP_UNLOCK(vp); 1925 CTR2(KTR_VFS, 1926 "%s: impossible to recycle, cannot start the write for %p", 1927 __func__, vp); 1928 vdrop_recycle(vp); 1929 return (EBUSY); 1930 } 1931 /* 1932 * If we got this far, we need to acquire the interlock and see if 1933 * anyone picked up this vnode from another list. If not, we will 1934 * mark it with DOOMED via vgonel() so that anyone who does find it 1935 * will skip over it. 1936 */ 1937 VI_LOCK(vp); 1938 if (vp->v_usecount) { 1939 VOP_UNLOCK(vp); 1940 vdropl_recycle(vp); 1941 vn_finished_write(vnmp); 1942 CTR2(KTR_VFS, 1943 "%s: impossible to recycle, %p is already referenced", 1944 __func__, vp); 1945 return (EBUSY); 1946 } 1947 if (!VN_IS_DOOMED(vp)) { 1948 if (isvnlru) 1949 recycles_free_count++; 1950 else 1951 counter_u64_add(direct_recycles_free_count, 1); 1952 vgonel(vp); 1953 } 1954 VOP_UNLOCK(vp); 1955 vdropl_recycle(vp); 1956 vn_finished_write(vnmp); 1957 return (0); 1958 } 1959 1960 /* 1961 * Allocate a new vnode. 1962 * 1963 * The operation never returns an error. Returning an error was disabled 1964 * in r145385 (dated 2005) with the following comment: 1965 * 1966 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1967 * 1968 * Given the age of this commit (almost 15 years at the time of writing this 1969 * comment) restoring the ability to fail requires a significant audit of 1970 * all codepaths. 1971 * 1972 * The routine can try to free a vnode or stall for up to 1 second waiting for 1973 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1974 */ 1975 static u_long vn_alloc_cyclecount; 1976 static u_long vn_alloc_sleeps; 1977 1978 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, alloc_sleeps, CTLFLAG_RD, &vn_alloc_sleeps, 0, 1979 "Number of times vnode allocation blocked waiting on vnlru"); 1980 1981 static struct vnode * __noinline 1982 vn_alloc_hard(struct mount *mp, u_long rnumvnodes, bool bumped) 1983 { 1984 u_long rfreevnodes; 1985 1986 if (bumped) { 1987 if (rnumvnodes > desiredvnodes + VNLRU_COUNT_SLOP) { 1988 atomic_subtract_long(&numvnodes, 1); 1989 bumped = false; 1990 } 1991 } 1992 1993 mtx_lock(&vnode_list_mtx); 1994 1995 if (vn_alloc_cyclecount != 0) { 1996 rnumvnodes = atomic_load_long(&numvnodes); 1997 if (rnumvnodes + 1 < desiredvnodes) { 1998 vn_alloc_cyclecount = 0; 1999 mtx_unlock(&vnode_list_mtx); 2000 goto alloc; 2001 } 2002 2003 rfreevnodes = vnlru_read_freevnodes(); 2004 if (rfreevnodes < wantfreevnodes) { 2005 if (vn_alloc_cyclecount++ >= rfreevnodes) { 2006 vn_alloc_cyclecount = 0; 2007 vstir = true; 2008 } 2009 } else { 2010 vn_alloc_cyclecount = 0; 2011 } 2012 } 2013 2014 /* 2015 * Grow the vnode cache if it will not be above its target max after 2016 * growing. Otherwise, if there is at least one free vnode, try to 2017 * reclaim 1 item from it before growing the cache (possibly above its 2018 * target max if the reclamation failed or is delayed). 2019 */ 2020 if (vnlru_free_locked_direct(1) > 0) 2021 goto alloc; 2022 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 2023 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 2024 /* 2025 * Wait for space for a new vnode. 2026 */ 2027 if (bumped) { 2028 atomic_subtract_long(&numvnodes, 1); 2029 bumped = false; 2030 } 2031 mtx_lock(&vnode_list_mtx); 2032 vnlru_kick_locked(); 2033 vn_alloc_sleeps++; 2034 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 2035 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 2036 vnlru_read_freevnodes() > 1) 2037 vnlru_free_locked_direct(1); 2038 else 2039 mtx_unlock(&vnode_list_mtx); 2040 } 2041 alloc: 2042 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 2043 if (!bumped) 2044 atomic_add_long(&numvnodes, 1); 2045 vnlru_kick_cond(); 2046 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 2047 } 2048 2049 static struct vnode * 2050 vn_alloc(struct mount *mp) 2051 { 2052 u_long rnumvnodes; 2053 2054 if (__predict_false(vn_alloc_cyclecount != 0)) 2055 return (vn_alloc_hard(mp, 0, false)); 2056 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 2057 if (__predict_false(vnlru_under(rnumvnodes, vlowat))) { 2058 return (vn_alloc_hard(mp, rnumvnodes, true)); 2059 } 2060 2061 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 2062 } 2063 2064 static void 2065 vn_free(struct vnode *vp) 2066 { 2067 2068 atomic_subtract_long(&numvnodes, 1); 2069 uma_zfree_smr(vnode_zone, vp); 2070 } 2071 2072 /* 2073 * Allocate a new vnode. 2074 */ 2075 int 2076 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 2077 struct vnode **vpp) 2078 { 2079 struct vnode *vp; 2080 struct thread *td; 2081 struct lock_object *lo; 2082 2083 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 2084 2085 KASSERT(vops->registered, 2086 ("%s: not registered vector op %p\n", __func__, vops)); 2087 cache_validate_vop_vector(mp, vops); 2088 2089 td = curthread; 2090 if (td->td_vp_reserved != NULL) { 2091 vp = td->td_vp_reserved; 2092 td->td_vp_reserved = NULL; 2093 } else { 2094 vp = vn_alloc(mp); 2095 } 2096 counter_u64_add(vnodes_created, 1); 2097 2098 vn_set_state(vp, VSTATE_UNINITIALIZED); 2099 2100 /* 2101 * Locks are given the generic name "vnode" when created. 2102 * Follow the historic practice of using the filesystem 2103 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 2104 * 2105 * Locks live in a witness group keyed on their name. Thus, 2106 * when a lock is renamed, it must also move from the witness 2107 * group of its old name to the witness group of its new name. 2108 * 2109 * The change only needs to be made when the vnode moves 2110 * from one filesystem type to another. We ensure that each 2111 * filesystem use a single static name pointer for its tag so 2112 * that we can compare pointers rather than doing a strcmp(). 2113 */ 2114 lo = &vp->v_vnlock->lock_object; 2115 #ifdef WITNESS 2116 if (lo->lo_name != tag) { 2117 #endif 2118 lo->lo_name = tag; 2119 #ifdef WITNESS 2120 WITNESS_DESTROY(lo); 2121 WITNESS_INIT(lo, tag); 2122 } 2123 #endif 2124 /* 2125 * By default, don't allow shared locks unless filesystems opt-in. 2126 */ 2127 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 2128 /* 2129 * Finalize various vnode identity bits. 2130 */ 2131 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 2132 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 2133 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 2134 vp->v_type = VNON; 2135 vp->v_op = vops; 2136 vp->v_irflag = 0; 2137 v_init_counters(vp); 2138 vn_seqc_init(vp); 2139 vp->v_bufobj.bo_ops = &buf_ops_bio; 2140 #ifdef DIAGNOSTIC 2141 if (mp == NULL && vops != &dead_vnodeops) 2142 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 2143 #endif 2144 #ifdef MAC 2145 mac_vnode_init(vp); 2146 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 2147 mac_vnode_associate_singlelabel(mp, vp); 2148 #endif 2149 if (mp != NULL) { 2150 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 2151 } 2152 2153 /* 2154 * For the filesystems which do not use vfs_hash_insert(), 2155 * still initialize v_hash to have vfs_hash_index() useful. 2156 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 2157 * its own hashing. 2158 */ 2159 vp->v_hash = (uintptr_t)vp >> vnsz2log; 2160 2161 *vpp = vp; 2162 return (0); 2163 } 2164 2165 void 2166 getnewvnode_reserve(void) 2167 { 2168 struct thread *td; 2169 2170 td = curthread; 2171 MPASS(td->td_vp_reserved == NULL); 2172 td->td_vp_reserved = vn_alloc(NULL); 2173 } 2174 2175 void 2176 getnewvnode_drop_reserve(void) 2177 { 2178 struct thread *td; 2179 2180 td = curthread; 2181 if (td->td_vp_reserved != NULL) { 2182 vn_free(td->td_vp_reserved); 2183 td->td_vp_reserved = NULL; 2184 } 2185 } 2186 2187 static void __noinline 2188 freevnode(struct vnode *vp) 2189 { 2190 struct bufobj *bo; 2191 2192 /* 2193 * The vnode has been marked for destruction, so free it. 2194 * 2195 * The vnode will be returned to the zone where it will 2196 * normally remain until it is needed for another vnode. We 2197 * need to cleanup (or verify that the cleanup has already 2198 * been done) any residual data left from its current use 2199 * so as not to contaminate the freshly allocated vnode. 2200 */ 2201 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 2202 /* 2203 * Paired with vgone. 2204 */ 2205 vn_seqc_write_end_free(vp); 2206 2207 bo = &vp->v_bufobj; 2208 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 2209 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 2210 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 2211 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 2212 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 2213 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 2214 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 2215 ("clean blk trie not empty")); 2216 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 2217 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 2218 ("dirty blk trie not empty")); 2219 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, 2220 ("Leaked inactivation")); 2221 VI_UNLOCK(vp); 2222 cache_assert_no_entries(vp); 2223 2224 #ifdef MAC 2225 mac_vnode_destroy(vp); 2226 #endif 2227 if (vp->v_pollinfo != NULL) { 2228 /* 2229 * Use LK_NOWAIT to shut up witness about the lock. We may get 2230 * here while having another vnode locked when trying to 2231 * satisfy a lookup and needing to recycle. 2232 */ 2233 VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT); 2234 destroy_vpollinfo(vp->v_pollinfo); 2235 VOP_UNLOCK(vp); 2236 vp->v_pollinfo = NULL; 2237 } 2238 vp->v_mountedhere = NULL; 2239 vp->v_unpcb = NULL; 2240 vp->v_rdev = NULL; 2241 vp->v_fifoinfo = NULL; 2242 vp->v_iflag = 0; 2243 vp->v_vflag = 0; 2244 bo->bo_flag = 0; 2245 vn_free(vp); 2246 } 2247 2248 /* 2249 * Delete from old mount point vnode list, if on one. 2250 */ 2251 static void 2252 delmntque(struct vnode *vp) 2253 { 2254 struct mount *mp; 2255 2256 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 2257 2258 mp = vp->v_mount; 2259 MNT_ILOCK(mp); 2260 VI_LOCK(vp); 2261 vp->v_mount = NULL; 2262 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 2263 ("bad mount point vnode list size")); 2264 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2265 mp->mnt_nvnodelistsize--; 2266 MNT_REL(mp); 2267 MNT_IUNLOCK(mp); 2268 /* 2269 * The caller expects the interlock to be still held. 2270 */ 2271 ASSERT_VI_LOCKED(vp, __func__); 2272 } 2273 2274 static int 2275 insmntque1_int(struct vnode *vp, struct mount *mp, bool dtr) 2276 { 2277 2278 KASSERT(vp->v_mount == NULL, 2279 ("insmntque: vnode already on per mount vnode list")); 2280 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 2281 if ((mp->mnt_kern_flag & MNTK_UNLOCKED_INSMNTQUE) == 0) { 2282 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 2283 } else { 2284 KASSERT(!dtr, 2285 ("%s: can't have MNTK_UNLOCKED_INSMNTQUE and cleanup", 2286 __func__)); 2287 } 2288 2289 /* 2290 * We acquire the vnode interlock early to ensure that the 2291 * vnode cannot be recycled by another process releasing a 2292 * holdcnt on it before we get it on both the vnode list 2293 * and the active vnode list. The mount mutex protects only 2294 * manipulation of the vnode list and the vnode freelist 2295 * mutex protects only manipulation of the active vnode list. 2296 * Hence the need to hold the vnode interlock throughout. 2297 */ 2298 MNT_ILOCK(mp); 2299 VI_LOCK(vp); 2300 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 2301 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 2302 mp->mnt_nvnodelistsize == 0)) && 2303 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 2304 VI_UNLOCK(vp); 2305 MNT_IUNLOCK(mp); 2306 if (dtr) { 2307 vp->v_data = NULL; 2308 vp->v_op = &dead_vnodeops; 2309 vgone(vp); 2310 vput(vp); 2311 } 2312 return (EBUSY); 2313 } 2314 vp->v_mount = mp; 2315 MNT_REF(mp); 2316 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2317 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 2318 ("neg mount point vnode list size")); 2319 mp->mnt_nvnodelistsize++; 2320 VI_UNLOCK(vp); 2321 MNT_IUNLOCK(mp); 2322 return (0); 2323 } 2324 2325 /* 2326 * Insert into list of vnodes for the new mount point, if available. 2327 * insmntque() reclaims the vnode on insertion failure, insmntque1() 2328 * leaves handling of the vnode to the caller. 2329 */ 2330 int 2331 insmntque(struct vnode *vp, struct mount *mp) 2332 { 2333 return (insmntque1_int(vp, mp, true)); 2334 } 2335 2336 int 2337 insmntque1(struct vnode *vp, struct mount *mp) 2338 { 2339 return (insmntque1_int(vp, mp, false)); 2340 } 2341 2342 /* 2343 * Flush out and invalidate all buffers associated with a bufobj 2344 * Called with the underlying object locked. 2345 */ 2346 int 2347 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 2348 { 2349 int error; 2350 2351 BO_LOCK(bo); 2352 if (flags & V_SAVE) { 2353 error = bufobj_wwait(bo, slpflag, slptimeo); 2354 if (error) { 2355 BO_UNLOCK(bo); 2356 return (error); 2357 } 2358 if (bo->bo_dirty.bv_cnt > 0) { 2359 BO_UNLOCK(bo); 2360 do { 2361 error = BO_SYNC(bo, MNT_WAIT); 2362 } while (error == ERELOOKUP); 2363 if (error != 0) 2364 return (error); 2365 BO_LOCK(bo); 2366 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 2367 BO_UNLOCK(bo); 2368 return (EBUSY); 2369 } 2370 } 2371 } 2372 /* 2373 * If you alter this loop please notice that interlock is dropped and 2374 * reacquired in flushbuflist. Special care is needed to ensure that 2375 * no race conditions occur from this. 2376 */ 2377 do { 2378 error = flushbuflist(&bo->bo_clean, 2379 flags, bo, slpflag, slptimeo); 2380 if (error == 0 && !(flags & V_CLEANONLY)) 2381 error = flushbuflist(&bo->bo_dirty, 2382 flags, bo, slpflag, slptimeo); 2383 if (error != 0 && error != EAGAIN) { 2384 BO_UNLOCK(bo); 2385 return (error); 2386 } 2387 } while (error != 0); 2388 2389 /* 2390 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 2391 * have write I/O in-progress but if there is a VM object then the 2392 * VM object can also have read-I/O in-progress. 2393 */ 2394 do { 2395 bufobj_wwait(bo, 0, 0); 2396 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 2397 BO_UNLOCK(bo); 2398 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 2399 BO_LOCK(bo); 2400 } 2401 } while (bo->bo_numoutput > 0); 2402 BO_UNLOCK(bo); 2403 2404 /* 2405 * Destroy the copy in the VM cache, too. 2406 */ 2407 if (bo->bo_object != NULL && 2408 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 2409 VM_OBJECT_WLOCK(bo->bo_object); 2410 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 2411 OBJPR_CLEANONLY : 0); 2412 VM_OBJECT_WUNLOCK(bo->bo_object); 2413 } 2414 2415 #ifdef INVARIANTS 2416 BO_LOCK(bo); 2417 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 2418 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 2419 bo->bo_clean.bv_cnt > 0)) 2420 panic("vinvalbuf: flush failed"); 2421 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 2422 bo->bo_dirty.bv_cnt > 0) 2423 panic("vinvalbuf: flush dirty failed"); 2424 BO_UNLOCK(bo); 2425 #endif 2426 return (0); 2427 } 2428 2429 /* 2430 * Flush out and invalidate all buffers associated with a vnode. 2431 * Called with the underlying object locked. 2432 */ 2433 int 2434 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 2435 { 2436 2437 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2438 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 2439 if (vp->v_object != NULL && vp->v_object->handle != vp) 2440 return (0); 2441 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 2442 } 2443 2444 /* 2445 * Flush out buffers on the specified list. 2446 * 2447 */ 2448 static int 2449 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2450 int slptimeo) 2451 { 2452 struct buf *bp, *nbp; 2453 int retval, error; 2454 daddr_t lblkno; 2455 b_xflags_t xflags; 2456 2457 ASSERT_BO_WLOCKED(bo); 2458 2459 retval = 0; 2460 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2461 /* 2462 * If we are flushing both V_NORMAL and V_ALT buffers then 2463 * do not skip any buffers. If we are flushing only V_NORMAL 2464 * buffers then skip buffers marked as BX_ALTDATA. If we are 2465 * flushing only V_ALT buffers then skip buffers not marked 2466 * as BX_ALTDATA. 2467 */ 2468 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2469 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2470 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2471 continue; 2472 } 2473 if (nbp != NULL) { 2474 lblkno = nbp->b_lblkno; 2475 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2476 } 2477 retval = EAGAIN; 2478 error = BUF_TIMELOCK(bp, 2479 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2480 "flushbuf", slpflag, slptimeo); 2481 if (error) { 2482 BO_LOCK(bo); 2483 return (error != ENOLCK ? error : EAGAIN); 2484 } 2485 KASSERT(bp->b_bufobj == bo, 2486 ("bp %p wrong b_bufobj %p should be %p", 2487 bp, bp->b_bufobj, bo)); 2488 /* 2489 * XXX Since there are no node locks for NFS, I 2490 * believe there is a slight chance that a delayed 2491 * write will occur while sleeping just above, so 2492 * check for it. 2493 */ 2494 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2495 (flags & V_SAVE)) { 2496 bremfree(bp); 2497 bp->b_flags |= B_ASYNC; 2498 bwrite(bp); 2499 BO_LOCK(bo); 2500 return (EAGAIN); /* XXX: why not loop ? */ 2501 } 2502 bremfree(bp); 2503 bp->b_flags |= (B_INVAL | B_RELBUF); 2504 bp->b_flags &= ~B_ASYNC; 2505 brelse(bp); 2506 BO_LOCK(bo); 2507 if (nbp == NULL) 2508 break; 2509 nbp = gbincore(bo, lblkno); 2510 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2511 != xflags) 2512 break; /* nbp invalid */ 2513 } 2514 return (retval); 2515 } 2516 2517 int 2518 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2519 { 2520 struct buf *bp; 2521 int error; 2522 daddr_t lblkno; 2523 2524 ASSERT_BO_LOCKED(bo); 2525 2526 for (lblkno = startn;;) { 2527 again: 2528 bp = buf_lookup_ge(bufv, lblkno); 2529 if (bp == NULL || bp->b_lblkno >= endn) 2530 break; 2531 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2532 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2533 if (error != 0) { 2534 BO_RLOCK(bo); 2535 if (error == ENOLCK) 2536 goto again; 2537 return (error); 2538 } 2539 KASSERT(bp->b_bufobj == bo, 2540 ("bp %p wrong b_bufobj %p should be %p", 2541 bp, bp->b_bufobj, bo)); 2542 lblkno = bp->b_lblkno + 1; 2543 if ((bp->b_flags & B_MANAGED) == 0) 2544 bremfree(bp); 2545 bp->b_flags |= B_RELBUF; 2546 /* 2547 * In the VMIO case, use the B_NOREUSE flag to hint that the 2548 * pages backing each buffer in the range are unlikely to be 2549 * reused. Dirty buffers will have the hint applied once 2550 * they've been written. 2551 */ 2552 if ((bp->b_flags & B_VMIO) != 0) 2553 bp->b_flags |= B_NOREUSE; 2554 brelse(bp); 2555 BO_RLOCK(bo); 2556 } 2557 return (0); 2558 } 2559 2560 /* 2561 * Truncate a file's buffer and pages to a specified length. This 2562 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2563 * sync activity. 2564 */ 2565 int 2566 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2567 { 2568 struct buf *bp, *nbp; 2569 struct bufobj *bo; 2570 daddr_t startlbn; 2571 2572 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2573 vp, blksize, (uintmax_t)length); 2574 2575 /* 2576 * Round up to the *next* lbn. 2577 */ 2578 startlbn = howmany(length, blksize); 2579 2580 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2581 2582 bo = &vp->v_bufobj; 2583 restart_unlocked: 2584 BO_LOCK(bo); 2585 2586 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2587 ; 2588 2589 if (length > 0) { 2590 /* 2591 * Write out vnode metadata, e.g. indirect blocks. 2592 */ 2593 restartsync: 2594 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2595 if (bp->b_lblkno >= 0) 2596 continue; 2597 /* 2598 * Since we hold the vnode lock this should only 2599 * fail if we're racing with the buf daemon. 2600 */ 2601 if (BUF_LOCK(bp, 2602 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2603 BO_LOCKPTR(bo)) == ENOLCK) 2604 goto restart_unlocked; 2605 2606 VNASSERT((bp->b_flags & B_DELWRI), vp, 2607 ("buf(%p) on dirty queue without DELWRI", bp)); 2608 2609 bremfree(bp); 2610 bawrite(bp); 2611 BO_LOCK(bo); 2612 goto restartsync; 2613 } 2614 } 2615 2616 bufobj_wwait(bo, 0, 0); 2617 BO_UNLOCK(bo); 2618 vnode_pager_setsize(vp, length); 2619 2620 return (0); 2621 } 2622 2623 /* 2624 * Invalidate the cached pages of a file's buffer within the range of block 2625 * numbers [startlbn, endlbn). 2626 */ 2627 void 2628 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2629 int blksize) 2630 { 2631 struct bufobj *bo; 2632 off_t start, end; 2633 2634 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2635 2636 start = blksize * startlbn; 2637 end = blksize * endlbn; 2638 2639 bo = &vp->v_bufobj; 2640 BO_LOCK(bo); 2641 MPASS(blksize == bo->bo_bsize); 2642 2643 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2644 ; 2645 2646 BO_UNLOCK(bo); 2647 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2648 } 2649 2650 static int 2651 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2652 daddr_t startlbn, daddr_t endlbn) 2653 { 2654 struct bufv *bv; 2655 struct buf *bp, *nbp; 2656 uint8_t anyfreed; 2657 bool clean; 2658 2659 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2660 ASSERT_BO_LOCKED(bo); 2661 2662 anyfreed = 1; 2663 clean = true; 2664 do { 2665 bv = clean ? &bo->bo_clean : &bo->bo_dirty; 2666 bp = buf_lookup_ge(bv, startlbn); 2667 if (bp == NULL) 2668 continue; 2669 TAILQ_FOREACH_FROM_SAFE(bp, &bv->bv_hd, b_bobufs, nbp) { 2670 if (bp->b_lblkno >= endlbn) 2671 break; 2672 if (BUF_LOCK(bp, 2673 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2674 BO_LOCKPTR(bo)) == ENOLCK) { 2675 BO_LOCK(bo); 2676 return (EAGAIN); 2677 } 2678 2679 bremfree(bp); 2680 bp->b_flags |= B_INVAL | B_RELBUF; 2681 bp->b_flags &= ~B_ASYNC; 2682 brelse(bp); 2683 anyfreed = 2; 2684 2685 BO_LOCK(bo); 2686 if (nbp != NULL && 2687 (((nbp->b_xflags & 2688 (clean ? BX_VNCLEAN : BX_VNDIRTY)) == 0) || 2689 nbp->b_vp != vp || 2690 (nbp->b_flags & B_DELWRI) == (clean? B_DELWRI: 0))) 2691 return (EAGAIN); 2692 } 2693 } while (clean = !clean, anyfreed-- > 0); 2694 return (0); 2695 } 2696 2697 static void 2698 buf_vlist_remove(struct buf *bp) 2699 { 2700 struct bufv *bv; 2701 b_xflags_t flags; 2702 2703 flags = bp->b_xflags; 2704 2705 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2706 ASSERT_BO_WLOCKED(bp->b_bufobj); 2707 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 && 2708 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN), 2709 ("%s: buffer %p has invalid queue state", __func__, bp)); 2710 2711 if ((flags & BX_VNDIRTY) != 0) 2712 bv = &bp->b_bufobj->bo_dirty; 2713 else 2714 bv = &bp->b_bufobj->bo_clean; 2715 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2716 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2717 bv->bv_cnt--; 2718 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2719 } 2720 2721 /* 2722 * Add the buffer to the sorted clean or dirty block list. Return zero on 2723 * success, EEXIST if a buffer with this identity already exists, or another 2724 * error on allocation failure. 2725 */ 2726 static inline int 2727 buf_vlist_find_or_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2728 { 2729 struct bufv *bv; 2730 struct buf *n; 2731 int error; 2732 2733 ASSERT_BO_WLOCKED(bo); 2734 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2735 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2736 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2737 ("dead bo %p", bo)); 2738 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == xflags, 2739 ("buf_vlist_add: b_xflags %#x not set on bp %p", xflags, bp)); 2740 2741 if (xflags & BX_VNDIRTY) 2742 bv = &bo->bo_dirty; 2743 else 2744 bv = &bo->bo_clean; 2745 2746 error = buf_insert_lookup_le(bv, bp, &n); 2747 if (n == NULL) { 2748 KASSERT(error != EEXIST, 2749 ("buf_vlist_add: EEXIST but no existing buf found: bp %p", 2750 bp)); 2751 } else { 2752 KASSERT(n->b_lblkno <= bp->b_lblkno, 2753 ("buf_vlist_add: out of order insert/lookup: bp %p n %p", 2754 bp, n)); 2755 KASSERT((n->b_lblkno == bp->b_lblkno) == (error == EEXIST), 2756 ("buf_vlist_add: inconsistent result for existing buf: " 2757 "error %d bp %p n %p", error, bp, n)); 2758 } 2759 if (error != 0) 2760 return (error); 2761 2762 /* Keep the list ordered. */ 2763 if (n == NULL) { 2764 KASSERT(TAILQ_EMPTY(&bv->bv_hd) || 2765 bp->b_lblkno < TAILQ_FIRST(&bv->bv_hd)->b_lblkno, 2766 ("buf_vlist_add: queue order: " 2767 "%p should be before first %p", 2768 bp, TAILQ_FIRST(&bv->bv_hd))); 2769 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2770 } else { 2771 KASSERT(TAILQ_NEXT(n, b_bobufs) == NULL || 2772 bp->b_lblkno < TAILQ_NEXT(n, b_bobufs)->b_lblkno, 2773 ("buf_vlist_add: queue order: " 2774 "%p should be before next %p", 2775 bp, TAILQ_NEXT(n, b_bobufs))); 2776 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2777 } 2778 2779 bv->bv_cnt++; 2780 return (0); 2781 } 2782 2783 /* 2784 * Add the buffer to the sorted clean or dirty block list. 2785 * 2786 * NOTE: xflags is passed as a constant, optimizing this inline function! 2787 */ 2788 static void 2789 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2790 { 2791 int error; 2792 2793 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0, 2794 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2795 bp->b_xflags |= xflags; 2796 error = buf_vlist_find_or_add(bp, bo, xflags); 2797 if (error) 2798 panic("buf_vlist_add: error=%d", error); 2799 } 2800 2801 /* 2802 * Look up a buffer using the buffer tries. 2803 */ 2804 struct buf * 2805 gbincore(struct bufobj *bo, daddr_t lblkno) 2806 { 2807 struct buf *bp; 2808 2809 ASSERT_BO_LOCKED(bo); 2810 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2811 if (bp != NULL) 2812 return (bp); 2813 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2814 } 2815 2816 /* 2817 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2818 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2819 * stability of the result. Like other lockless lookups, the found buf may 2820 * already be invalid by the time this function returns. 2821 */ 2822 struct buf * 2823 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2824 { 2825 struct buf *bp; 2826 2827 ASSERT_BO_UNLOCKED(bo); 2828 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2829 if (bp != NULL) 2830 return (bp); 2831 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2832 } 2833 2834 /* 2835 * Associate a buffer with a vnode. 2836 */ 2837 int 2838 bgetvp(struct vnode *vp, struct buf *bp) 2839 { 2840 struct bufobj *bo; 2841 int error; 2842 2843 bo = &vp->v_bufobj; 2844 ASSERT_BO_UNLOCKED(bo); 2845 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2846 2847 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2848 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2849 ("bgetvp: bp already attached! %p", bp)); 2850 2851 /* 2852 * Add the buf to the vnode's clean list unless we lost a race and find 2853 * an existing buf in either dirty or clean. 2854 */ 2855 bp->b_vp = vp; 2856 bp->b_bufobj = bo; 2857 bp->b_xflags |= BX_VNCLEAN; 2858 error = EEXIST; 2859 BO_LOCK(bo); 2860 if (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, bp->b_lblkno) == NULL) 2861 error = buf_vlist_find_or_add(bp, bo, BX_VNCLEAN); 2862 BO_UNLOCK(bo); 2863 if (__predict_true(error == 0)) { 2864 vhold(vp); 2865 return (0); 2866 } 2867 if (error != EEXIST) 2868 panic("bgetvp: buf_vlist_add error: %d", error); 2869 bp->b_vp = NULL; 2870 bp->b_bufobj = NULL; 2871 bp->b_xflags &= ~BX_VNCLEAN; 2872 return (error); 2873 } 2874 2875 /* 2876 * Disassociate a buffer from a vnode. 2877 */ 2878 void 2879 brelvp(struct buf *bp) 2880 { 2881 struct bufobj *bo; 2882 struct vnode *vp; 2883 2884 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2885 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2886 2887 /* 2888 * Delete from old vnode list, if on one. 2889 */ 2890 vp = bp->b_vp; /* XXX */ 2891 bo = bp->b_bufobj; 2892 BO_LOCK(bo); 2893 buf_vlist_remove(bp); 2894 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2895 bo->bo_flag &= ~BO_ONWORKLST; 2896 mtx_lock(&sync_mtx); 2897 LIST_REMOVE(bo, bo_synclist); 2898 syncer_worklist_len--; 2899 mtx_unlock(&sync_mtx); 2900 } 2901 bp->b_vp = NULL; 2902 bp->b_bufobj = NULL; 2903 BO_UNLOCK(bo); 2904 vdrop(vp); 2905 } 2906 2907 /* 2908 * Add an item to the syncer work queue. 2909 */ 2910 static void 2911 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2912 { 2913 int slot; 2914 2915 ASSERT_BO_WLOCKED(bo); 2916 2917 mtx_lock(&sync_mtx); 2918 if (bo->bo_flag & BO_ONWORKLST) 2919 LIST_REMOVE(bo, bo_synclist); 2920 else { 2921 bo->bo_flag |= BO_ONWORKLST; 2922 syncer_worklist_len++; 2923 } 2924 2925 if (delay > syncer_maxdelay - 2) 2926 delay = syncer_maxdelay - 2; 2927 slot = (syncer_delayno + delay) & syncer_mask; 2928 2929 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2930 mtx_unlock(&sync_mtx); 2931 } 2932 2933 static int 2934 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2935 { 2936 int error, len; 2937 2938 mtx_lock(&sync_mtx); 2939 len = syncer_worklist_len - sync_vnode_count; 2940 mtx_unlock(&sync_mtx); 2941 error = SYSCTL_OUT(req, &len, sizeof(len)); 2942 return (error); 2943 } 2944 2945 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2946 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2947 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2948 2949 static struct proc *updateproc; 2950 static void sched_sync(void); 2951 static struct kproc_desc up_kp = { 2952 "syncer", 2953 sched_sync, 2954 &updateproc 2955 }; 2956 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2957 2958 static int 2959 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2960 { 2961 struct vnode *vp; 2962 struct mount *mp; 2963 2964 *bo = LIST_FIRST(slp); 2965 if (*bo == NULL) 2966 return (0); 2967 vp = bo2vnode(*bo); 2968 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2969 return (1); 2970 /* 2971 * We use vhold in case the vnode does not 2972 * successfully sync. vhold prevents the vnode from 2973 * going away when we unlock the sync_mtx so that 2974 * we can acquire the vnode interlock. 2975 */ 2976 vholdl(vp); 2977 mtx_unlock(&sync_mtx); 2978 VI_UNLOCK(vp); 2979 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2980 vdrop(vp); 2981 mtx_lock(&sync_mtx); 2982 return (*bo == LIST_FIRST(slp)); 2983 } 2984 MPASSERT(mp == NULL || (curthread->td_pflags & TDP_IGNSUSP) != 0 || 2985 (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0, mp, 2986 ("suspended mp syncing vp %p", vp)); 2987 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2988 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2989 VOP_UNLOCK(vp); 2990 vn_finished_write(mp); 2991 BO_LOCK(*bo); 2992 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2993 /* 2994 * Put us back on the worklist. The worklist 2995 * routine will remove us from our current 2996 * position and then add us back in at a later 2997 * position. 2998 */ 2999 vn_syncer_add_to_worklist(*bo, syncdelay); 3000 } 3001 BO_UNLOCK(*bo); 3002 vdrop(vp); 3003 mtx_lock(&sync_mtx); 3004 return (0); 3005 } 3006 3007 static int first_printf = 1; 3008 3009 /* 3010 * System filesystem synchronizer daemon. 3011 */ 3012 static void 3013 sched_sync(void) 3014 { 3015 struct synclist *next, *slp; 3016 struct bufobj *bo; 3017 long starttime; 3018 struct thread *td = curthread; 3019 int last_work_seen; 3020 int net_worklist_len; 3021 int syncer_final_iter; 3022 int error; 3023 3024 last_work_seen = 0; 3025 syncer_final_iter = 0; 3026 syncer_state = SYNCER_RUNNING; 3027 starttime = time_uptime; 3028 td->td_pflags |= TDP_NORUNNINGBUF; 3029 3030 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 3031 SHUTDOWN_PRI_LAST); 3032 3033 mtx_lock(&sync_mtx); 3034 for (;;) { 3035 if (syncer_state == SYNCER_FINAL_DELAY && 3036 syncer_final_iter == 0) { 3037 mtx_unlock(&sync_mtx); 3038 kproc_suspend_check(td->td_proc); 3039 mtx_lock(&sync_mtx); 3040 } 3041 net_worklist_len = syncer_worklist_len - sync_vnode_count; 3042 if (syncer_state != SYNCER_RUNNING && 3043 starttime != time_uptime) { 3044 if (first_printf) { 3045 printf("\nSyncing disks, vnodes remaining... "); 3046 first_printf = 0; 3047 } 3048 printf("%d ", net_worklist_len); 3049 } 3050 starttime = time_uptime; 3051 3052 /* 3053 * Push files whose dirty time has expired. Be careful 3054 * of interrupt race on slp queue. 3055 * 3056 * Skip over empty worklist slots when shutting down. 3057 */ 3058 do { 3059 slp = &syncer_workitem_pending[syncer_delayno]; 3060 syncer_delayno += 1; 3061 if (syncer_delayno == syncer_maxdelay) 3062 syncer_delayno = 0; 3063 next = &syncer_workitem_pending[syncer_delayno]; 3064 /* 3065 * If the worklist has wrapped since the 3066 * it was emptied of all but syncer vnodes, 3067 * switch to the FINAL_DELAY state and run 3068 * for one more second. 3069 */ 3070 if (syncer_state == SYNCER_SHUTTING_DOWN && 3071 net_worklist_len == 0 && 3072 last_work_seen == syncer_delayno) { 3073 syncer_state = SYNCER_FINAL_DELAY; 3074 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 3075 } 3076 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 3077 syncer_worklist_len > 0); 3078 3079 /* 3080 * Keep track of the last time there was anything 3081 * on the worklist other than syncer vnodes. 3082 * Return to the SHUTTING_DOWN state if any 3083 * new work appears. 3084 */ 3085 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 3086 last_work_seen = syncer_delayno; 3087 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 3088 syncer_state = SYNCER_SHUTTING_DOWN; 3089 while (!LIST_EMPTY(slp)) { 3090 error = sync_vnode(slp, &bo, td); 3091 if (error == 1) { 3092 LIST_REMOVE(bo, bo_synclist); 3093 LIST_INSERT_HEAD(next, bo, bo_synclist); 3094 continue; 3095 } 3096 3097 if (first_printf == 0) { 3098 /* 3099 * Drop the sync mutex, because some watchdog 3100 * drivers need to sleep while patting 3101 */ 3102 mtx_unlock(&sync_mtx); 3103 wdog_kern_pat(WD_LASTVAL); 3104 mtx_lock(&sync_mtx); 3105 } 3106 } 3107 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 3108 syncer_final_iter--; 3109 /* 3110 * The variable rushjob allows the kernel to speed up the 3111 * processing of the filesystem syncer process. A rushjob 3112 * value of N tells the filesystem syncer to process the next 3113 * N seconds worth of work on its queue ASAP. Currently rushjob 3114 * is used by the soft update code to speed up the filesystem 3115 * syncer process when the incore state is getting so far 3116 * ahead of the disk that the kernel memory pool is being 3117 * threatened with exhaustion. 3118 */ 3119 if (rushjob > 0) { 3120 rushjob -= 1; 3121 continue; 3122 } 3123 /* 3124 * Just sleep for a short period of time between 3125 * iterations when shutting down to allow some I/O 3126 * to happen. 3127 * 3128 * If it has taken us less than a second to process the 3129 * current work, then wait. Otherwise start right over 3130 * again. We can still lose time if any single round 3131 * takes more than two seconds, but it does not really 3132 * matter as we are just trying to generally pace the 3133 * filesystem activity. 3134 */ 3135 if (syncer_state != SYNCER_RUNNING || 3136 time_uptime == starttime) { 3137 thread_lock(td); 3138 sched_prio(td, PPAUSE); 3139 thread_unlock(td); 3140 } 3141 if (syncer_state != SYNCER_RUNNING) 3142 cv_timedwait(&sync_wakeup, &sync_mtx, 3143 hz / SYNCER_SHUTDOWN_SPEEDUP); 3144 else if (time_uptime == starttime) 3145 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 3146 } 3147 } 3148 3149 /* 3150 * Request the syncer daemon to speed up its work. 3151 * We never push it to speed up more than half of its 3152 * normal turn time, otherwise it could take over the cpu. 3153 */ 3154 int 3155 speedup_syncer(void) 3156 { 3157 int ret = 0; 3158 3159 mtx_lock(&sync_mtx); 3160 if (rushjob < syncdelay / 2) { 3161 rushjob += 1; 3162 stat_rush_requests += 1; 3163 ret = 1; 3164 } 3165 mtx_unlock(&sync_mtx); 3166 cv_broadcast(&sync_wakeup); 3167 return (ret); 3168 } 3169 3170 /* 3171 * Tell the syncer to speed up its work and run though its work 3172 * list several times, then tell it to shut down. 3173 */ 3174 static void 3175 syncer_shutdown(void *arg, int howto) 3176 { 3177 3178 if (howto & RB_NOSYNC) 3179 return; 3180 mtx_lock(&sync_mtx); 3181 syncer_state = SYNCER_SHUTTING_DOWN; 3182 rushjob = 0; 3183 mtx_unlock(&sync_mtx); 3184 cv_broadcast(&sync_wakeup); 3185 kproc_shutdown(arg, howto); 3186 } 3187 3188 void 3189 syncer_suspend(void) 3190 { 3191 3192 syncer_shutdown(updateproc, 0); 3193 } 3194 3195 void 3196 syncer_resume(void) 3197 { 3198 3199 mtx_lock(&sync_mtx); 3200 first_printf = 1; 3201 syncer_state = SYNCER_RUNNING; 3202 mtx_unlock(&sync_mtx); 3203 cv_broadcast(&sync_wakeup); 3204 kproc_resume(updateproc); 3205 } 3206 3207 /* 3208 * Move the buffer between the clean and dirty lists of its vnode. 3209 */ 3210 void 3211 reassignbuf(struct buf *bp) 3212 { 3213 struct vnode *vp; 3214 struct bufobj *bo; 3215 int delay; 3216 #ifdef INVARIANTS 3217 struct bufv *bv; 3218 #endif 3219 3220 vp = bp->b_vp; 3221 bo = bp->b_bufobj; 3222 3223 KASSERT((bp->b_flags & B_PAGING) == 0, 3224 ("%s: cannot reassign paging buffer %p", __func__, bp)); 3225 3226 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 3227 bp, bp->b_vp, bp->b_flags); 3228 3229 BO_LOCK(bo); 3230 if ((bo->bo_flag & BO_NONSTERILE) == 0) { 3231 /* 3232 * Coordinate with getblk's unlocked lookup. Make 3233 * BO_NONSTERILE visible before the first reassignbuf produces 3234 * any side effect. This could be outside the bo lock if we 3235 * used a separate atomic flag field. 3236 */ 3237 bo->bo_flag |= BO_NONSTERILE; 3238 atomic_thread_fence_rel(); 3239 } 3240 buf_vlist_remove(bp); 3241 3242 /* 3243 * If dirty, put on list of dirty buffers; otherwise insert onto list 3244 * of clean buffers. 3245 */ 3246 if (bp->b_flags & B_DELWRI) { 3247 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 3248 switch (vp->v_type) { 3249 case VDIR: 3250 delay = dirdelay; 3251 break; 3252 case VCHR: 3253 delay = metadelay; 3254 break; 3255 default: 3256 delay = filedelay; 3257 } 3258 vn_syncer_add_to_worklist(bo, delay); 3259 } 3260 buf_vlist_add(bp, bo, BX_VNDIRTY); 3261 } else { 3262 buf_vlist_add(bp, bo, BX_VNCLEAN); 3263 3264 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 3265 mtx_lock(&sync_mtx); 3266 LIST_REMOVE(bo, bo_synclist); 3267 syncer_worklist_len--; 3268 mtx_unlock(&sync_mtx); 3269 bo->bo_flag &= ~BO_ONWORKLST; 3270 } 3271 } 3272 #ifdef INVARIANTS 3273 bv = &bo->bo_clean; 3274 bp = TAILQ_FIRST(&bv->bv_hd); 3275 KASSERT(bp == NULL || bp->b_bufobj == bo, 3276 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3277 bp = TAILQ_LAST(&bv->bv_hd, buflists); 3278 KASSERT(bp == NULL || bp->b_bufobj == bo, 3279 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3280 bv = &bo->bo_dirty; 3281 bp = TAILQ_FIRST(&bv->bv_hd); 3282 KASSERT(bp == NULL || bp->b_bufobj == bo, 3283 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3284 bp = TAILQ_LAST(&bv->bv_hd, buflists); 3285 KASSERT(bp == NULL || bp->b_bufobj == bo, 3286 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3287 #endif 3288 BO_UNLOCK(bo); 3289 } 3290 3291 static void 3292 v_init_counters(struct vnode *vp) 3293 { 3294 3295 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 3296 vp, ("%s called for an initialized vnode", __FUNCTION__)); 3297 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 3298 3299 refcount_init(&vp->v_holdcnt, 1); 3300 refcount_init(&vp->v_usecount, 1); 3301 } 3302 3303 /* 3304 * Get a usecount on a vnode. 3305 * 3306 * vget and vget_finish may fail to lock the vnode if they lose a race against 3307 * it being doomed. LK_RETRY can be passed in flags to lock it anyway. 3308 * 3309 * Consumers which don't guarantee liveness of the vnode can use SMR to 3310 * try to get a reference. Note this operation can fail since the vnode 3311 * may be awaiting getting freed by the time they get to it. 3312 */ 3313 enum vgetstate 3314 vget_prep_smr(struct vnode *vp) 3315 { 3316 enum vgetstate vs; 3317 3318 VFS_SMR_ASSERT_ENTERED(); 3319 3320 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3321 vs = VGET_USECOUNT; 3322 } else { 3323 if (vhold_smr(vp)) 3324 vs = VGET_HOLDCNT; 3325 else 3326 vs = VGET_NONE; 3327 } 3328 return (vs); 3329 } 3330 3331 enum vgetstate 3332 vget_prep(struct vnode *vp) 3333 { 3334 enum vgetstate vs; 3335 3336 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3337 vs = VGET_USECOUNT; 3338 } else { 3339 vhold(vp); 3340 vs = VGET_HOLDCNT; 3341 } 3342 return (vs); 3343 } 3344 3345 void 3346 vget_abort(struct vnode *vp, enum vgetstate vs) 3347 { 3348 3349 switch (vs) { 3350 case VGET_USECOUNT: 3351 vrele(vp); 3352 break; 3353 case VGET_HOLDCNT: 3354 vdrop(vp); 3355 break; 3356 default: 3357 __assert_unreachable(); 3358 } 3359 } 3360 3361 int 3362 vget(struct vnode *vp, int flags) 3363 { 3364 enum vgetstate vs; 3365 3366 vs = vget_prep(vp); 3367 return (vget_finish(vp, flags, vs)); 3368 } 3369 3370 int 3371 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 3372 { 3373 int error; 3374 3375 if ((flags & LK_INTERLOCK) != 0) 3376 ASSERT_VI_LOCKED(vp, __func__); 3377 else 3378 ASSERT_VI_UNLOCKED(vp, __func__); 3379 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3380 VNPASS(vp->v_holdcnt > 0, vp); 3381 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3382 3383 error = vn_lock(vp, flags); 3384 if (__predict_false(error != 0)) { 3385 vget_abort(vp, vs); 3386 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 3387 vp); 3388 return (error); 3389 } 3390 3391 vget_finish_ref(vp, vs); 3392 return (0); 3393 } 3394 3395 void 3396 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 3397 { 3398 int old; 3399 3400 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3401 VNPASS(vp->v_holdcnt > 0, vp); 3402 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3403 3404 if (vs == VGET_USECOUNT) 3405 return; 3406 3407 /* 3408 * We hold the vnode. If the usecount is 0 it will be utilized to keep 3409 * the vnode around. Otherwise someone else lended their hold count and 3410 * we have to drop ours. 3411 */ 3412 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3413 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3414 if (old != 0) { 3415 #ifdef INVARIANTS 3416 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3417 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3418 #else 3419 refcount_release(&vp->v_holdcnt); 3420 #endif 3421 } 3422 } 3423 3424 void 3425 vref(struct vnode *vp) 3426 { 3427 enum vgetstate vs; 3428 3429 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3430 vs = vget_prep(vp); 3431 vget_finish_ref(vp, vs); 3432 } 3433 3434 void 3435 vrefact(struct vnode *vp) 3436 { 3437 int old __diagused; 3438 3439 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3440 old = refcount_acquire(&vp->v_usecount); 3441 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3442 } 3443 3444 void 3445 vlazy(struct vnode *vp) 3446 { 3447 struct mount *mp; 3448 3449 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 3450 3451 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 3452 return; 3453 /* 3454 * We may get here for inactive routines after the vnode got doomed. 3455 */ 3456 if (VN_IS_DOOMED(vp)) 3457 return; 3458 mp = vp->v_mount; 3459 mtx_lock(&mp->mnt_listmtx); 3460 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 3461 vp->v_mflag |= VMP_LAZYLIST; 3462 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3463 mp->mnt_lazyvnodelistsize++; 3464 } 3465 mtx_unlock(&mp->mnt_listmtx); 3466 } 3467 3468 static void 3469 vunlazy(struct vnode *vp) 3470 { 3471 struct mount *mp; 3472 3473 ASSERT_VI_LOCKED(vp, __func__); 3474 VNPASS(!VN_IS_DOOMED(vp), vp); 3475 3476 mp = vp->v_mount; 3477 mtx_lock(&mp->mnt_listmtx); 3478 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3479 /* 3480 * Don't remove the vnode from the lazy list if another thread 3481 * has increased the hold count. It may have re-enqueued the 3482 * vnode to the lazy list and is now responsible for its 3483 * removal. 3484 */ 3485 if (vp->v_holdcnt == 0) { 3486 vp->v_mflag &= ~VMP_LAZYLIST; 3487 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3488 mp->mnt_lazyvnodelistsize--; 3489 } 3490 mtx_unlock(&mp->mnt_listmtx); 3491 } 3492 3493 /* 3494 * This routine is only meant to be called from vgonel prior to dooming 3495 * the vnode. 3496 */ 3497 static void 3498 vunlazy_gone(struct vnode *vp) 3499 { 3500 struct mount *mp; 3501 3502 ASSERT_VOP_ELOCKED(vp, __func__); 3503 ASSERT_VI_LOCKED(vp, __func__); 3504 VNPASS(!VN_IS_DOOMED(vp), vp); 3505 3506 if (vp->v_mflag & VMP_LAZYLIST) { 3507 mp = vp->v_mount; 3508 mtx_lock(&mp->mnt_listmtx); 3509 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3510 vp->v_mflag &= ~VMP_LAZYLIST; 3511 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3512 mp->mnt_lazyvnodelistsize--; 3513 mtx_unlock(&mp->mnt_listmtx); 3514 } 3515 } 3516 3517 static void 3518 vdefer_inactive(struct vnode *vp) 3519 { 3520 3521 ASSERT_VI_LOCKED(vp, __func__); 3522 VNPASS(vp->v_holdcnt > 0, vp); 3523 if (VN_IS_DOOMED(vp)) { 3524 vdropl(vp); 3525 return; 3526 } 3527 if (vp->v_iflag & VI_DEFINACT) { 3528 VNPASS(vp->v_holdcnt > 1, vp); 3529 vdropl(vp); 3530 return; 3531 } 3532 if (vp->v_usecount > 0) { 3533 vp->v_iflag &= ~VI_OWEINACT; 3534 vdropl(vp); 3535 return; 3536 } 3537 vlazy(vp); 3538 vp->v_iflag |= VI_DEFINACT; 3539 VI_UNLOCK(vp); 3540 atomic_add_long(&deferred_inact, 1); 3541 } 3542 3543 static void 3544 vdefer_inactive_unlocked(struct vnode *vp) 3545 { 3546 3547 VI_LOCK(vp); 3548 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3549 vdropl(vp); 3550 return; 3551 } 3552 vdefer_inactive(vp); 3553 } 3554 3555 enum vput_op { VRELE, VPUT, VUNREF }; 3556 3557 /* 3558 * Handle ->v_usecount transitioning to 0. 3559 * 3560 * By releasing the last usecount we take ownership of the hold count which 3561 * provides liveness of the vnode, meaning we have to vdrop. 3562 * 3563 * For all vnodes we may need to perform inactive processing. It requires an 3564 * exclusive lock on the vnode, while it is legal to call here with only a 3565 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3566 * inactive processing gets deferred to the syncer. 3567 * 3568 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3569 * on the lock being held all the way until VOP_INACTIVE. This in particular 3570 * happens with UFS which adds half-constructed vnodes to the hash, where they 3571 * can be found by other code. 3572 */ 3573 static void 3574 vput_final(struct vnode *vp, enum vput_op func) 3575 { 3576 int error; 3577 bool want_unlock; 3578 3579 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3580 VNPASS(vp->v_holdcnt > 0, vp); 3581 3582 VI_LOCK(vp); 3583 3584 /* 3585 * By the time we got here someone else might have transitioned 3586 * the count back to > 0. 3587 */ 3588 if (vp->v_usecount > 0) 3589 goto out; 3590 3591 /* 3592 * If the vnode is doomed vgone already performed inactive processing 3593 * (if needed). 3594 */ 3595 if (VN_IS_DOOMED(vp)) 3596 goto out; 3597 3598 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3599 goto out; 3600 3601 if (vp->v_iflag & VI_DOINGINACT) 3602 goto out; 3603 3604 /* 3605 * Locking operations here will drop the interlock and possibly the 3606 * vnode lock, opening a window where the vnode can get doomed all the 3607 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3608 * perform inactive. 3609 */ 3610 vp->v_iflag |= VI_OWEINACT; 3611 want_unlock = false; 3612 error = 0; 3613 switch (func) { 3614 case VRELE: 3615 switch (VOP_ISLOCKED(vp)) { 3616 case LK_EXCLUSIVE: 3617 break; 3618 case LK_EXCLOTHER: 3619 case 0: 3620 want_unlock = true; 3621 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3622 VI_LOCK(vp); 3623 break; 3624 default: 3625 /* 3626 * The lock has at least one sharer, but we have no way 3627 * to conclude whether this is us. Play it safe and 3628 * defer processing. 3629 */ 3630 error = EAGAIN; 3631 break; 3632 } 3633 break; 3634 case VPUT: 3635 want_unlock = true; 3636 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3637 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3638 LK_NOWAIT); 3639 VI_LOCK(vp); 3640 } 3641 break; 3642 case VUNREF: 3643 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3644 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3645 VI_LOCK(vp); 3646 } 3647 break; 3648 } 3649 if (error == 0) { 3650 if (func == VUNREF) { 3651 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp, 3652 ("recursive vunref")); 3653 vp->v_vflag |= VV_UNREF; 3654 } 3655 for (;;) { 3656 error = vinactive(vp); 3657 if (want_unlock) 3658 VOP_UNLOCK(vp); 3659 if (error != ERELOOKUP || !want_unlock) 3660 break; 3661 VOP_LOCK(vp, LK_EXCLUSIVE); 3662 } 3663 if (func == VUNREF) 3664 vp->v_vflag &= ~VV_UNREF; 3665 vdropl(vp); 3666 } else { 3667 vdefer_inactive(vp); 3668 } 3669 return; 3670 out: 3671 if (func == VPUT) 3672 VOP_UNLOCK(vp); 3673 vdropl(vp); 3674 } 3675 3676 /* 3677 * Decrement ->v_usecount for a vnode. 3678 * 3679 * Releasing the last use count requires additional processing, see vput_final 3680 * above for details. 3681 * 3682 * Comment above each variant denotes lock state on entry and exit. 3683 */ 3684 3685 /* 3686 * in: any 3687 * out: same as passed in 3688 */ 3689 void 3690 vrele(struct vnode *vp) 3691 { 3692 3693 ASSERT_VI_UNLOCKED(vp, __func__); 3694 if (!refcount_release(&vp->v_usecount)) 3695 return; 3696 vput_final(vp, VRELE); 3697 } 3698 3699 /* 3700 * in: locked 3701 * out: unlocked 3702 */ 3703 void 3704 vput(struct vnode *vp) 3705 { 3706 3707 ASSERT_VOP_LOCKED(vp, __func__); 3708 ASSERT_VI_UNLOCKED(vp, __func__); 3709 if (!refcount_release(&vp->v_usecount)) { 3710 VOP_UNLOCK(vp); 3711 return; 3712 } 3713 vput_final(vp, VPUT); 3714 } 3715 3716 /* 3717 * in: locked 3718 * out: locked 3719 */ 3720 void 3721 vunref(struct vnode *vp) 3722 { 3723 3724 ASSERT_VOP_LOCKED(vp, __func__); 3725 ASSERT_VI_UNLOCKED(vp, __func__); 3726 if (!refcount_release(&vp->v_usecount)) 3727 return; 3728 vput_final(vp, VUNREF); 3729 } 3730 3731 void 3732 vhold(struct vnode *vp) 3733 { 3734 int old; 3735 3736 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3737 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3738 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3739 ("%s: wrong hold count %d", __func__, old)); 3740 if (old == 0) 3741 vfs_freevnodes_dec(); 3742 } 3743 3744 void 3745 vholdnz(struct vnode *vp) 3746 { 3747 3748 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3749 #ifdef INVARIANTS 3750 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3751 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3752 ("%s: wrong hold count %d", __func__, old)); 3753 #else 3754 atomic_add_int(&vp->v_holdcnt, 1); 3755 #endif 3756 } 3757 3758 /* 3759 * Grab a hold count unless the vnode is freed. 3760 * 3761 * Only use this routine if vfs smr is the only protection you have against 3762 * freeing the vnode. 3763 * 3764 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3765 * is not set. After the flag is set the vnode becomes immutable to anyone but 3766 * the thread which managed to set the flag. 3767 * 3768 * It may be tempting to replace the loop with: 3769 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3770 * if (count & VHOLD_NO_SMR) { 3771 * backpedal and error out; 3772 * } 3773 * 3774 * However, while this is more performant, it hinders debugging by eliminating 3775 * the previously mentioned invariant. 3776 */ 3777 bool 3778 vhold_smr(struct vnode *vp) 3779 { 3780 int count; 3781 3782 VFS_SMR_ASSERT_ENTERED(); 3783 3784 count = atomic_load_int(&vp->v_holdcnt); 3785 for (;;) { 3786 if (count & VHOLD_NO_SMR) { 3787 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3788 ("non-zero hold count with flags %d\n", count)); 3789 return (false); 3790 } 3791 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3792 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3793 if (count == 0) 3794 vfs_freevnodes_dec(); 3795 return (true); 3796 } 3797 } 3798 } 3799 3800 /* 3801 * Hold a free vnode for recycling. 3802 * 3803 * Note: vnode_init references this comment. 3804 * 3805 * Attempts to recycle only need the global vnode list lock and have no use for 3806 * SMR. 3807 * 3808 * However, vnodes get inserted into the global list before they get fully 3809 * initialized and stay there until UMA decides to free the memory. This in 3810 * particular means the target can be found before it becomes usable and after 3811 * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to 3812 * VHOLD_NO_SMR. 3813 * 3814 * Note: the vnode may gain more references after we transition the count 0->1. 3815 */ 3816 static bool 3817 vhold_recycle_free(struct vnode *vp) 3818 { 3819 int count; 3820 3821 mtx_assert(&vnode_list_mtx, MA_OWNED); 3822 3823 count = atomic_load_int(&vp->v_holdcnt); 3824 for (;;) { 3825 if (count & VHOLD_NO_SMR) { 3826 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3827 ("non-zero hold count with flags %d\n", count)); 3828 return (false); 3829 } 3830 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3831 if (count > 0) { 3832 return (false); 3833 } 3834 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3835 vfs_freevnodes_dec(); 3836 return (true); 3837 } 3838 } 3839 } 3840 3841 static void __noinline 3842 vdbatch_process(struct vdbatch *vd) 3843 { 3844 struct vnode *vp; 3845 int i; 3846 3847 mtx_assert(&vd->lock, MA_OWNED); 3848 MPASS(curthread->td_pinned > 0); 3849 MPASS(vd->index == VDBATCH_SIZE); 3850 3851 /* 3852 * Attempt to requeue the passed batch, but give up easily. 3853 * 3854 * Despite batching the mechanism is prone to transient *significant* 3855 * lock contention, where vnode_list_mtx becomes the primary bottleneck 3856 * if multiple CPUs get here (one real-world example is highly parallel 3857 * do-nothing make , which will stat *tons* of vnodes). Since it is 3858 * quasi-LRU (read: not that great even if fully honoured) provide an 3859 * option to just dodge the problem. Parties which don't like it are 3860 * welcome to implement something better. 3861 */ 3862 if (vnode_can_skip_requeue) { 3863 if (!mtx_trylock(&vnode_list_mtx)) { 3864 counter_u64_add(vnode_skipped_requeues, 1); 3865 critical_enter(); 3866 for (i = 0; i < VDBATCH_SIZE; i++) { 3867 vp = vd->tab[i]; 3868 vd->tab[i] = NULL; 3869 MPASS(vp->v_dbatchcpu != NOCPU); 3870 vp->v_dbatchcpu = NOCPU; 3871 } 3872 vd->index = 0; 3873 critical_exit(); 3874 return; 3875 3876 } 3877 /* fallthrough to locked processing */ 3878 } else { 3879 mtx_lock(&vnode_list_mtx); 3880 } 3881 3882 mtx_assert(&vnode_list_mtx, MA_OWNED); 3883 critical_enter(); 3884 for (i = 0; i < VDBATCH_SIZE; i++) { 3885 vp = vd->tab[i]; 3886 vd->tab[i] = NULL; 3887 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3888 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3889 MPASS(vp->v_dbatchcpu != NOCPU); 3890 vp->v_dbatchcpu = NOCPU; 3891 } 3892 mtx_unlock(&vnode_list_mtx); 3893 vd->index = 0; 3894 critical_exit(); 3895 } 3896 3897 static void 3898 vdbatch_enqueue(struct vnode *vp) 3899 { 3900 struct vdbatch *vd; 3901 3902 ASSERT_VI_LOCKED(vp, __func__); 3903 VNPASS(!VN_IS_DOOMED(vp), vp); 3904 3905 if (vp->v_dbatchcpu != NOCPU) { 3906 VI_UNLOCK(vp); 3907 return; 3908 } 3909 3910 sched_pin(); 3911 vd = DPCPU_PTR(vd); 3912 mtx_lock(&vd->lock); 3913 MPASS(vd->index < VDBATCH_SIZE); 3914 MPASS(vd->tab[vd->index] == NULL); 3915 /* 3916 * A hack: we depend on being pinned so that we know what to put in 3917 * ->v_dbatchcpu. 3918 */ 3919 vp->v_dbatchcpu = curcpu; 3920 vd->tab[vd->index] = vp; 3921 vd->index++; 3922 VI_UNLOCK(vp); 3923 if (vd->index == VDBATCH_SIZE) 3924 vdbatch_process(vd); 3925 mtx_unlock(&vd->lock); 3926 sched_unpin(); 3927 } 3928 3929 /* 3930 * This routine must only be called for vnodes which are about to be 3931 * deallocated. Supporting dequeue for arbitrary vndoes would require 3932 * validating that the locked batch matches. 3933 */ 3934 static void 3935 vdbatch_dequeue(struct vnode *vp) 3936 { 3937 struct vdbatch *vd; 3938 int i; 3939 short cpu; 3940 3941 VNPASS(vp->v_type == VBAD || vp->v_type == VNON, vp); 3942 3943 cpu = vp->v_dbatchcpu; 3944 if (cpu == NOCPU) 3945 return; 3946 3947 vd = DPCPU_ID_PTR(cpu, vd); 3948 mtx_lock(&vd->lock); 3949 for (i = 0; i < vd->index; i++) { 3950 if (vd->tab[i] != vp) 3951 continue; 3952 vp->v_dbatchcpu = NOCPU; 3953 vd->index--; 3954 vd->tab[i] = vd->tab[vd->index]; 3955 vd->tab[vd->index] = NULL; 3956 break; 3957 } 3958 mtx_unlock(&vd->lock); 3959 /* 3960 * Either we dequeued the vnode above or the target CPU beat us to it. 3961 */ 3962 MPASS(vp->v_dbatchcpu == NOCPU); 3963 } 3964 3965 /* 3966 * Drop the hold count of the vnode. 3967 * 3968 * It will only get freed if this is the last hold *and* it has been vgone'd. 3969 * 3970 * Because the vnode vm object keeps a hold reference on the vnode if 3971 * there is at least one resident non-cached page, the vnode cannot 3972 * leave the active list without the page cleanup done. 3973 */ 3974 static void __noinline 3975 vdropl_final(struct vnode *vp) 3976 { 3977 3978 ASSERT_VI_LOCKED(vp, __func__); 3979 VNPASS(VN_IS_DOOMED(vp), vp); 3980 /* 3981 * Set the VHOLD_NO_SMR flag. 3982 * 3983 * We may be racing against vhold_smr. If they win we can just pretend 3984 * we never got this far, they will vdrop later. 3985 */ 3986 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { 3987 vfs_freevnodes_inc(); 3988 VI_UNLOCK(vp); 3989 /* 3990 * We lost the aforementioned race. Any subsequent access is 3991 * invalid as they might have managed to vdropl on their own. 3992 */ 3993 return; 3994 } 3995 /* 3996 * Don't bump freevnodes as this one is going away. 3997 */ 3998 freevnode(vp); 3999 } 4000 4001 void 4002 vdrop(struct vnode *vp) 4003 { 4004 4005 ASSERT_VI_UNLOCKED(vp, __func__); 4006 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4007 if (refcount_release_if_not_last(&vp->v_holdcnt)) 4008 return; 4009 VI_LOCK(vp); 4010 vdropl(vp); 4011 } 4012 4013 static __always_inline void 4014 vdropl_impl(struct vnode *vp, bool enqueue) 4015 { 4016 4017 ASSERT_VI_LOCKED(vp, __func__); 4018 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4019 if (!refcount_release(&vp->v_holdcnt)) { 4020 VI_UNLOCK(vp); 4021 return; 4022 } 4023 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp); 4024 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 4025 if (VN_IS_DOOMED(vp)) { 4026 vdropl_final(vp); 4027 return; 4028 } 4029 4030 vfs_freevnodes_inc(); 4031 if (vp->v_mflag & VMP_LAZYLIST) { 4032 vunlazy(vp); 4033 } 4034 4035 if (!enqueue) { 4036 VI_UNLOCK(vp); 4037 return; 4038 } 4039 4040 /* 4041 * Also unlocks the interlock. We can't assert on it as we 4042 * released our hold and by now the vnode might have been 4043 * freed. 4044 */ 4045 vdbatch_enqueue(vp); 4046 } 4047 4048 void 4049 vdropl(struct vnode *vp) 4050 { 4051 4052 vdropl_impl(vp, true); 4053 } 4054 4055 /* 4056 * vdrop a vnode when recycling 4057 * 4058 * This is a special case routine only to be used when recycling, differs from 4059 * regular vdrop by not requeieing the vnode on LRU. 4060 * 4061 * Consider a case where vtryrecycle continuously fails with all vnodes (due to 4062 * e.g., frozen writes on the filesystem), filling the batch and causing it to 4063 * be requeued. Then vnlru will end up revisiting the same vnodes. This is a 4064 * loop which can last for as long as writes are frozen. 4065 */ 4066 static void 4067 vdropl_recycle(struct vnode *vp) 4068 { 4069 4070 vdropl_impl(vp, false); 4071 } 4072 4073 static void 4074 vdrop_recycle(struct vnode *vp) 4075 { 4076 4077 VI_LOCK(vp); 4078 vdropl_recycle(vp); 4079 } 4080 4081 /* 4082 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 4083 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 4084 */ 4085 static int 4086 vinactivef(struct vnode *vp) 4087 { 4088 int error; 4089 4090 ASSERT_VOP_ELOCKED(vp, "vinactive"); 4091 ASSERT_VI_LOCKED(vp, "vinactive"); 4092 VNPASS((vp->v_iflag & VI_DOINGINACT) == 0, vp); 4093 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4094 vp->v_iflag |= VI_DOINGINACT; 4095 vp->v_iflag &= ~VI_OWEINACT; 4096 VI_UNLOCK(vp); 4097 4098 /* 4099 * Before moving off the active list, we must be sure that any 4100 * modified pages are converted into the vnode's dirty 4101 * buffers, since these will no longer be checked once the 4102 * vnode is on the inactive list. 4103 * 4104 * The write-out of the dirty pages is asynchronous. At the 4105 * point that VOP_INACTIVE() is called, there could still be 4106 * pending I/O and dirty pages in the object. 4107 */ 4108 if ((vp->v_vflag & VV_NOSYNC) == 0) 4109 vnode_pager_clean_async(vp); 4110 4111 error = VOP_INACTIVE(vp); 4112 VI_LOCK(vp); 4113 VNPASS(vp->v_iflag & VI_DOINGINACT, vp); 4114 vp->v_iflag &= ~VI_DOINGINACT; 4115 return (error); 4116 } 4117 4118 int 4119 vinactive(struct vnode *vp) 4120 { 4121 4122 ASSERT_VOP_ELOCKED(vp, "vinactive"); 4123 ASSERT_VI_LOCKED(vp, "vinactive"); 4124 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4125 4126 if ((vp->v_iflag & VI_OWEINACT) == 0) 4127 return (0); 4128 if (vp->v_iflag & VI_DOINGINACT) 4129 return (0); 4130 if (vp->v_usecount > 0) { 4131 vp->v_iflag &= ~VI_OWEINACT; 4132 return (0); 4133 } 4134 return (vinactivef(vp)); 4135 } 4136 4137 /* 4138 * Remove any vnodes in the vnode table belonging to mount point mp. 4139 * 4140 * If FORCECLOSE is not specified, there should not be any active ones, 4141 * return error if any are found (nb: this is a user error, not a 4142 * system error). If FORCECLOSE is specified, detach any active vnodes 4143 * that are found. 4144 * 4145 * If WRITECLOSE is set, only flush out regular file vnodes open for 4146 * writing. 4147 * 4148 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 4149 * 4150 * `rootrefs' specifies the base reference count for the root vnode 4151 * of this filesystem. The root vnode is considered busy if its 4152 * v_usecount exceeds this value. On a successful return, vflush(, td) 4153 * will call vrele() on the root vnode exactly rootrefs times. 4154 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 4155 * be zero. 4156 */ 4157 #ifdef DIAGNOSTIC 4158 static int busyprt = 0; /* print out busy vnodes */ 4159 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 4160 #endif 4161 4162 int 4163 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 4164 { 4165 struct vnode *vp, *mvp, *rootvp = NULL; 4166 struct vattr vattr; 4167 int busy = 0, error; 4168 4169 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 4170 rootrefs, flags); 4171 if (rootrefs > 0) { 4172 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 4173 ("vflush: bad args")); 4174 /* 4175 * Get the filesystem root vnode. We can vput() it 4176 * immediately, since with rootrefs > 0, it won't go away. 4177 */ 4178 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 4179 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 4180 __func__, error); 4181 return (error); 4182 } 4183 vput(rootvp); 4184 } 4185 loop: 4186 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 4187 vholdl(vp); 4188 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 4189 if (error) { 4190 vdrop(vp); 4191 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 4192 goto loop; 4193 } 4194 /* 4195 * Skip over a vnodes marked VV_SYSTEM. 4196 */ 4197 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 4198 VOP_UNLOCK(vp); 4199 vdrop(vp); 4200 continue; 4201 } 4202 /* 4203 * If WRITECLOSE is set, flush out unlinked but still open 4204 * files (even if open only for reading) and regular file 4205 * vnodes open for writing. 4206 */ 4207 if (flags & WRITECLOSE) { 4208 vnode_pager_clean_async(vp); 4209 do { 4210 error = VOP_FSYNC(vp, MNT_WAIT, td); 4211 } while (error == ERELOOKUP); 4212 if (error != 0) { 4213 VOP_UNLOCK(vp); 4214 vdrop(vp); 4215 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 4216 return (error); 4217 } 4218 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 4219 VI_LOCK(vp); 4220 4221 if ((vp->v_type == VNON || 4222 (error == 0 && vattr.va_nlink > 0)) && 4223 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 4224 VOP_UNLOCK(vp); 4225 vdropl(vp); 4226 continue; 4227 } 4228 } else 4229 VI_LOCK(vp); 4230 /* 4231 * With v_usecount == 0, all we need to do is clear out the 4232 * vnode data structures and we are done. 4233 * 4234 * If FORCECLOSE is set, forcibly close the vnode. 4235 */ 4236 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 4237 vgonel(vp); 4238 } else { 4239 busy++; 4240 #ifdef DIAGNOSTIC 4241 if (busyprt) 4242 vn_printf(vp, "vflush: busy vnode "); 4243 #endif 4244 } 4245 VOP_UNLOCK(vp); 4246 vdropl(vp); 4247 } 4248 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 4249 /* 4250 * If just the root vnode is busy, and if its refcount 4251 * is equal to `rootrefs', then go ahead and kill it. 4252 */ 4253 VI_LOCK(rootvp); 4254 KASSERT(busy > 0, ("vflush: not busy")); 4255 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 4256 ("vflush: usecount %d < rootrefs %d", 4257 rootvp->v_usecount, rootrefs)); 4258 if (busy == 1 && rootvp->v_usecount == rootrefs) { 4259 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 4260 vgone(rootvp); 4261 VOP_UNLOCK(rootvp); 4262 busy = 0; 4263 } else 4264 VI_UNLOCK(rootvp); 4265 } 4266 if (busy) { 4267 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 4268 busy); 4269 return (EBUSY); 4270 } 4271 for (; rootrefs > 0; rootrefs--) 4272 vrele(rootvp); 4273 return (0); 4274 } 4275 4276 /* 4277 * Recycle an unused vnode. 4278 */ 4279 int 4280 vrecycle(struct vnode *vp) 4281 { 4282 int recycled; 4283 4284 VI_LOCK(vp); 4285 recycled = vrecyclel(vp); 4286 VI_UNLOCK(vp); 4287 return (recycled); 4288 } 4289 4290 /* 4291 * vrecycle, with the vp interlock held. 4292 */ 4293 int 4294 vrecyclel(struct vnode *vp) 4295 { 4296 int recycled; 4297 4298 ASSERT_VOP_ELOCKED(vp, __func__); 4299 ASSERT_VI_LOCKED(vp, __func__); 4300 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4301 recycled = 0; 4302 if (vp->v_usecount == 0) { 4303 recycled = 1; 4304 vgonel(vp); 4305 } 4306 return (recycled); 4307 } 4308 4309 /* 4310 * Eliminate all activity associated with a vnode 4311 * in preparation for reuse. 4312 */ 4313 void 4314 vgone(struct vnode *vp) 4315 { 4316 VI_LOCK(vp); 4317 vgonel(vp); 4318 VI_UNLOCK(vp); 4319 } 4320 4321 /* 4322 * Notify upper mounts about reclaimed or unlinked vnode. 4323 */ 4324 void 4325 vfs_notify_upper(struct vnode *vp, enum vfs_notify_upper_type event) 4326 { 4327 struct mount *mp; 4328 struct mount_upper_node *ump; 4329 4330 mp = atomic_load_ptr(&vp->v_mount); 4331 if (mp == NULL) 4332 return; 4333 if (TAILQ_EMPTY(&mp->mnt_notify)) 4334 return; 4335 4336 MNT_ILOCK(mp); 4337 mp->mnt_upper_pending++; 4338 KASSERT(mp->mnt_upper_pending > 0, 4339 ("%s: mnt_upper_pending %d", __func__, mp->mnt_upper_pending)); 4340 TAILQ_FOREACH(ump, &mp->mnt_notify, mnt_upper_link) { 4341 MNT_IUNLOCK(mp); 4342 switch (event) { 4343 case VFS_NOTIFY_UPPER_RECLAIM: 4344 VFS_RECLAIM_LOWERVP(ump->mp, vp); 4345 break; 4346 case VFS_NOTIFY_UPPER_UNLINK: 4347 VFS_UNLINK_LOWERVP(ump->mp, vp); 4348 break; 4349 } 4350 MNT_ILOCK(mp); 4351 } 4352 mp->mnt_upper_pending--; 4353 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && 4354 mp->mnt_upper_pending == 0) { 4355 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; 4356 wakeup(&mp->mnt_uppers); 4357 } 4358 MNT_IUNLOCK(mp); 4359 } 4360 4361 /* 4362 * vgone, with the vp interlock held. 4363 */ 4364 static void 4365 vgonel(struct vnode *vp) 4366 { 4367 struct thread *td; 4368 struct mount *mp; 4369 vm_object_t object; 4370 bool active, doinginact, oweinact; 4371 4372 ASSERT_VOP_ELOCKED(vp, "vgonel"); 4373 ASSERT_VI_LOCKED(vp, "vgonel"); 4374 VNASSERT(vp->v_holdcnt, vp, 4375 ("vgonel: vp %p has no reference.", vp)); 4376 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4377 td = curthread; 4378 4379 /* 4380 * Don't vgonel if we're already doomed. 4381 */ 4382 if (VN_IS_DOOMED(vp)) { 4383 VNPASS(vn_get_state(vp) == VSTATE_DESTROYING || \ 4384 vn_get_state(vp) == VSTATE_DEAD, vp); 4385 return; 4386 } 4387 /* 4388 * Paired with freevnode. 4389 */ 4390 vn_seqc_write_begin_locked(vp); 4391 vunlazy_gone(vp); 4392 vn_irflag_set_locked(vp, VIRF_DOOMED); 4393 vn_set_state(vp, VSTATE_DESTROYING); 4394 4395 /* 4396 * Check to see if the vnode is in use. If so, we have to 4397 * call VOP_CLOSE() and VOP_INACTIVE(). 4398 * 4399 * It could be that VOP_INACTIVE() requested reclamation, in 4400 * which case we should avoid recursion, so check 4401 * VI_DOINGINACT. This is not precise but good enough. 4402 */ 4403 active = vp->v_usecount > 0; 4404 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4405 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; 4406 4407 /* 4408 * If we need to do inactive VI_OWEINACT will be set. 4409 */ 4410 if (vp->v_iflag & VI_DEFINACT) { 4411 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 4412 vp->v_iflag &= ~VI_DEFINACT; 4413 vdropl(vp); 4414 } else { 4415 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 4416 VI_UNLOCK(vp); 4417 } 4418 cache_purge_vgone(vp); 4419 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 4420 4421 /* 4422 * If purging an active vnode, it must be closed and 4423 * deactivated before being reclaimed. 4424 */ 4425 if (active) 4426 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 4427 if (!doinginact) { 4428 do { 4429 if (oweinact || active) { 4430 VI_LOCK(vp); 4431 vinactivef(vp); 4432 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4433 VI_UNLOCK(vp); 4434 } 4435 } while (oweinact); 4436 } 4437 if (vp->v_type == VSOCK) 4438 vfs_unp_reclaim(vp); 4439 4440 /* 4441 * Clean out any buffers associated with the vnode. 4442 * If the flush fails, just toss the buffers. 4443 */ 4444 mp = NULL; 4445 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 4446 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 4447 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 4448 while (vinvalbuf(vp, 0, 0, 0) != 0) 4449 ; 4450 } 4451 4452 BO_LOCK(&vp->v_bufobj); 4453 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 4454 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 4455 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 4456 vp->v_bufobj.bo_clean.bv_cnt == 0, 4457 ("vp %p bufobj not invalidated", vp)); 4458 4459 /* 4460 * For VMIO bufobj, BO_DEAD is set later, or in 4461 * vm_object_terminate() after the object's page queue is 4462 * flushed. 4463 */ 4464 object = vp->v_bufobj.bo_object; 4465 if (object == NULL) 4466 vp->v_bufobj.bo_flag |= BO_DEAD; 4467 BO_UNLOCK(&vp->v_bufobj); 4468 4469 /* 4470 * Handle the VM part. Tmpfs handles v_object on its own (the 4471 * OBJT_VNODE check). Nullfs or other bypassing filesystems 4472 * should not touch the object borrowed from the lower vnode 4473 * (the handle check). 4474 */ 4475 if (object != NULL && object->type == OBJT_VNODE && 4476 object->handle == vp) 4477 vnode_destroy_vobject(vp); 4478 4479 /* 4480 * Reclaim the vnode. 4481 */ 4482 if (VOP_RECLAIM(vp)) 4483 panic("vgone: cannot reclaim"); 4484 if (mp != NULL) 4485 vn_finished_secondary_write(mp); 4486 VNASSERT(vp->v_object == NULL, vp, 4487 ("vop_reclaim left v_object vp=%p", vp)); 4488 /* 4489 * Clear the advisory locks and wake up waiting threads. 4490 */ 4491 if (vp->v_lockf != NULL) { 4492 (void)VOP_ADVLOCKPURGE(vp); 4493 vp->v_lockf = NULL; 4494 } 4495 /* 4496 * Delete from old mount point vnode list. 4497 */ 4498 if (vp->v_mount == NULL) { 4499 VI_LOCK(vp); 4500 } else { 4501 delmntque(vp); 4502 ASSERT_VI_LOCKED(vp, "vgonel 2"); 4503 } 4504 /* 4505 * Done with purge, reset to the standard lock and invalidate 4506 * the vnode. 4507 */ 4508 vp->v_vnlock = &vp->v_lock; 4509 vp->v_op = &dead_vnodeops; 4510 vp->v_type = VBAD; 4511 vn_set_state(vp, VSTATE_DEAD); 4512 } 4513 4514 /* 4515 * Print out a description of a vnode. 4516 */ 4517 static const char *const vtypename[] = { 4518 [VNON] = "VNON", 4519 [VREG] = "VREG", 4520 [VDIR] = "VDIR", 4521 [VBLK] = "VBLK", 4522 [VCHR] = "VCHR", 4523 [VLNK] = "VLNK", 4524 [VSOCK] = "VSOCK", 4525 [VFIFO] = "VFIFO", 4526 [VBAD] = "VBAD", 4527 [VMARKER] = "VMARKER", 4528 }; 4529 _Static_assert(nitems(vtypename) == VLASTTYPE + 1, 4530 "vnode type name not added to vtypename"); 4531 4532 static const char *const vstatename[] = { 4533 [VSTATE_UNINITIALIZED] = "VSTATE_UNINITIALIZED", 4534 [VSTATE_CONSTRUCTED] = "VSTATE_CONSTRUCTED", 4535 [VSTATE_DESTROYING] = "VSTATE_DESTROYING", 4536 [VSTATE_DEAD] = "VSTATE_DEAD", 4537 }; 4538 _Static_assert(nitems(vstatename) == VLASTSTATE + 1, 4539 "vnode state name not added to vstatename"); 4540 4541 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 4542 "new hold count flag not added to vn_printf"); 4543 4544 void 4545 vn_printf(struct vnode *vp, const char *fmt, ...) 4546 { 4547 va_list ap; 4548 char buf[256], buf2[16]; 4549 u_long flags; 4550 u_int holdcnt; 4551 short irflag; 4552 4553 va_start(ap, fmt); 4554 vprintf(fmt, ap); 4555 va_end(ap); 4556 printf("%p: ", (void *)vp); 4557 printf("type %s state %s op %p\n", vtypename[vp->v_type], 4558 vstatename[vp->v_state], vp->v_op); 4559 holdcnt = atomic_load_int(&vp->v_holdcnt); 4560 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 4561 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 4562 vp->v_seqc_users); 4563 switch (vp->v_type) { 4564 case VDIR: 4565 printf(" mountedhere %p\n", vp->v_mountedhere); 4566 break; 4567 case VCHR: 4568 printf(" rdev %p\n", vp->v_rdev); 4569 break; 4570 case VSOCK: 4571 printf(" socket %p\n", vp->v_unpcb); 4572 break; 4573 case VFIFO: 4574 printf(" fifoinfo %p\n", vp->v_fifoinfo); 4575 break; 4576 default: 4577 printf("\n"); 4578 break; 4579 } 4580 buf[0] = '\0'; 4581 buf[1] = '\0'; 4582 if (holdcnt & VHOLD_NO_SMR) 4583 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 4584 printf(" hold count flags (%s)\n", buf + 1); 4585 4586 buf[0] = '\0'; 4587 buf[1] = '\0'; 4588 irflag = vn_irflag_read(vp); 4589 if (irflag & VIRF_DOOMED) 4590 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 4591 if (irflag & VIRF_PGREAD) 4592 strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); 4593 if (irflag & VIRF_MOUNTPOINT) 4594 strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf)); 4595 if (irflag & VIRF_TEXT_REF) 4596 strlcat(buf, "|VIRF_TEXT_REF", sizeof(buf)); 4597 flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT | VIRF_TEXT_REF); 4598 if (flags != 0) { 4599 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 4600 strlcat(buf, buf2, sizeof(buf)); 4601 } 4602 if (vp->v_vflag & VV_ROOT) 4603 strlcat(buf, "|VV_ROOT", sizeof(buf)); 4604 if (vp->v_vflag & VV_ISTTY) 4605 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4606 if (vp->v_vflag & VV_NOSYNC) 4607 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4608 if (vp->v_vflag & VV_ETERNALDEV) 4609 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4610 if (vp->v_vflag & VV_CACHEDLABEL) 4611 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4612 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4613 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4614 if (vp->v_vflag & VV_COPYONWRITE) 4615 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4616 if (vp->v_vflag & VV_SYSTEM) 4617 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4618 if (vp->v_vflag & VV_PROCDEP) 4619 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4620 if (vp->v_vflag & VV_DELETED) 4621 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4622 if (vp->v_vflag & VV_MD) 4623 strlcat(buf, "|VV_MD", sizeof(buf)); 4624 if (vp->v_vflag & VV_FORCEINSMQ) 4625 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4626 if (vp->v_vflag & VV_READLINK) 4627 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4628 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4629 VV_CACHEDLABEL | VV_VMSIZEVNLOCK | VV_COPYONWRITE | VV_SYSTEM | 4630 VV_PROCDEP | VV_DELETED | VV_MD | VV_FORCEINSMQ | VV_READLINK); 4631 if (flags != 0) { 4632 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4633 strlcat(buf, buf2, sizeof(buf)); 4634 } 4635 if (vp->v_iflag & VI_MOUNT) 4636 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4637 if (vp->v_iflag & VI_DOINGINACT) 4638 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4639 if (vp->v_iflag & VI_OWEINACT) 4640 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4641 if (vp->v_iflag & VI_DEFINACT) 4642 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4643 if (vp->v_iflag & VI_FOPENING) 4644 strlcat(buf, "|VI_FOPENING", sizeof(buf)); 4645 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOINGINACT | 4646 VI_OWEINACT | VI_DEFINACT | VI_FOPENING); 4647 if (flags != 0) { 4648 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4649 strlcat(buf, buf2, sizeof(buf)); 4650 } 4651 if (vp->v_mflag & VMP_LAZYLIST) 4652 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4653 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4654 if (flags != 0) { 4655 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4656 strlcat(buf, buf2, sizeof(buf)); 4657 } 4658 printf(" flags (%s)", buf + 1); 4659 if (mtx_owned(VI_MTX(vp))) 4660 printf(" VI_LOCKed"); 4661 printf("\n"); 4662 if (vp->v_object != NULL) 4663 printf(" v_object %p ref %d pages %d " 4664 "cleanbuf %d dirtybuf %d\n", 4665 vp->v_object, vp->v_object->ref_count, 4666 vp->v_object->resident_page_count, 4667 vp->v_bufobj.bo_clean.bv_cnt, 4668 vp->v_bufobj.bo_dirty.bv_cnt); 4669 printf(" "); 4670 lockmgr_printinfo(vp->v_vnlock); 4671 if (vp->v_data != NULL) 4672 VOP_PRINT(vp); 4673 } 4674 4675 #ifdef DDB 4676 /* 4677 * List all of the locked vnodes in the system. 4678 * Called when debugging the kernel. 4679 */ 4680 DB_SHOW_COMMAND_FLAGS(lockedvnods, lockedvnodes, DB_CMD_MEMSAFE) 4681 { 4682 struct mount *mp; 4683 struct vnode *vp; 4684 4685 /* 4686 * Note: because this is DDB, we can't obey the locking semantics 4687 * for these structures, which means we could catch an inconsistent 4688 * state and dereference a nasty pointer. Not much to be done 4689 * about that. 4690 */ 4691 db_printf("Locked vnodes\n"); 4692 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4693 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4694 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4695 vn_printf(vp, "vnode "); 4696 } 4697 } 4698 } 4699 4700 /* 4701 * Show details about the given vnode. 4702 */ 4703 DB_SHOW_COMMAND(vnode, db_show_vnode) 4704 { 4705 struct vnode *vp; 4706 4707 if (!have_addr) 4708 return; 4709 vp = (struct vnode *)addr; 4710 vn_printf(vp, "vnode "); 4711 } 4712 4713 /* 4714 * Show details about the given mount point. 4715 */ 4716 DB_SHOW_COMMAND(mount, db_show_mount) 4717 { 4718 struct mount *mp; 4719 struct vfsopt *opt; 4720 struct statfs *sp; 4721 struct vnode *vp; 4722 char buf[512]; 4723 uint64_t mflags; 4724 u_int flags; 4725 4726 if (!have_addr) { 4727 /* No address given, print short info about all mount points. */ 4728 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4729 db_printf("%p %s on %s (%s)\n", mp, 4730 mp->mnt_stat.f_mntfromname, 4731 mp->mnt_stat.f_mntonname, 4732 mp->mnt_stat.f_fstypename); 4733 if (db_pager_quit) 4734 break; 4735 } 4736 db_printf("\nMore info: show mount <addr>\n"); 4737 return; 4738 } 4739 4740 mp = (struct mount *)addr; 4741 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4742 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4743 4744 buf[0] = '\0'; 4745 mflags = mp->mnt_flag; 4746 #define MNT_FLAG(flag) do { \ 4747 if (mflags & (flag)) { \ 4748 if (buf[0] != '\0') \ 4749 strlcat(buf, ", ", sizeof(buf)); \ 4750 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4751 mflags &= ~(flag); \ 4752 } \ 4753 } while (0) 4754 MNT_FLAG(MNT_RDONLY); 4755 MNT_FLAG(MNT_SYNCHRONOUS); 4756 MNT_FLAG(MNT_NOEXEC); 4757 MNT_FLAG(MNT_NOSUID); 4758 MNT_FLAG(MNT_NFS4ACLS); 4759 MNT_FLAG(MNT_UNION); 4760 MNT_FLAG(MNT_ASYNC); 4761 MNT_FLAG(MNT_SUIDDIR); 4762 MNT_FLAG(MNT_SOFTDEP); 4763 MNT_FLAG(MNT_NOSYMFOLLOW); 4764 MNT_FLAG(MNT_GJOURNAL); 4765 MNT_FLAG(MNT_MULTILABEL); 4766 MNT_FLAG(MNT_ACLS); 4767 MNT_FLAG(MNT_NOATIME); 4768 MNT_FLAG(MNT_NOCLUSTERR); 4769 MNT_FLAG(MNT_NOCLUSTERW); 4770 MNT_FLAG(MNT_SUJ); 4771 MNT_FLAG(MNT_EXRDONLY); 4772 MNT_FLAG(MNT_EXPORTED); 4773 MNT_FLAG(MNT_DEFEXPORTED); 4774 MNT_FLAG(MNT_EXPORTANON); 4775 MNT_FLAG(MNT_EXKERB); 4776 MNT_FLAG(MNT_EXPUBLIC); 4777 MNT_FLAG(MNT_LOCAL); 4778 MNT_FLAG(MNT_QUOTA); 4779 MNT_FLAG(MNT_ROOTFS); 4780 MNT_FLAG(MNT_USER); 4781 MNT_FLAG(MNT_IGNORE); 4782 MNT_FLAG(MNT_UPDATE); 4783 MNT_FLAG(MNT_DELEXPORT); 4784 MNT_FLAG(MNT_RELOAD); 4785 MNT_FLAG(MNT_FORCE); 4786 MNT_FLAG(MNT_SNAPSHOT); 4787 MNT_FLAG(MNT_BYFSID); 4788 #undef MNT_FLAG 4789 if (mflags != 0) { 4790 if (buf[0] != '\0') 4791 strlcat(buf, ", ", sizeof(buf)); 4792 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4793 "0x%016jx", mflags); 4794 } 4795 db_printf(" mnt_flag = %s\n", buf); 4796 4797 buf[0] = '\0'; 4798 flags = mp->mnt_kern_flag; 4799 #define MNT_KERN_FLAG(flag) do { \ 4800 if (flags & (flag)) { \ 4801 if (buf[0] != '\0') \ 4802 strlcat(buf, ", ", sizeof(buf)); \ 4803 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4804 flags &= ~(flag); \ 4805 } \ 4806 } while (0) 4807 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4808 MNT_KERN_FLAG(MNTK_ASYNC); 4809 MNT_KERN_FLAG(MNTK_SOFTDEP); 4810 MNT_KERN_FLAG(MNTK_NOMSYNC); 4811 MNT_KERN_FLAG(MNTK_DRAINING); 4812 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4813 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4814 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4815 MNT_KERN_FLAG(MNTK_NO_IOPF); 4816 MNT_KERN_FLAG(MNTK_RECURSE); 4817 MNT_KERN_FLAG(MNTK_UPPER_WAITER); 4818 MNT_KERN_FLAG(MNTK_UNLOCKED_INSMNTQUE); 4819 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4820 MNT_KERN_FLAG(MNTK_VMSETSIZE_BUG); 4821 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4822 MNT_KERN_FLAG(MNTK_TASKQUEUE_WAITER); 4823 MNT_KERN_FLAG(MNTK_NOASYNC); 4824 MNT_KERN_FLAG(MNTK_UNMOUNT); 4825 MNT_KERN_FLAG(MNTK_MWAIT); 4826 MNT_KERN_FLAG(MNTK_SUSPEND); 4827 MNT_KERN_FLAG(MNTK_SUSPEND2); 4828 MNT_KERN_FLAG(MNTK_SUSPENDED); 4829 MNT_KERN_FLAG(MNTK_NULL_NOCACHE); 4830 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4831 #undef MNT_KERN_FLAG 4832 if (flags != 0) { 4833 if (buf[0] != '\0') 4834 strlcat(buf, ", ", sizeof(buf)); 4835 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4836 "0x%08x", flags); 4837 } 4838 db_printf(" mnt_kern_flag = %s\n", buf); 4839 4840 db_printf(" mnt_opt = "); 4841 opt = TAILQ_FIRST(mp->mnt_opt); 4842 if (opt != NULL) { 4843 db_printf("%s", opt->name); 4844 opt = TAILQ_NEXT(opt, link); 4845 while (opt != NULL) { 4846 db_printf(", %s", opt->name); 4847 opt = TAILQ_NEXT(opt, link); 4848 } 4849 } 4850 db_printf("\n"); 4851 4852 sp = &mp->mnt_stat; 4853 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4854 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4855 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4856 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4857 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4858 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4859 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4860 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4861 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4862 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4863 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4864 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4865 4866 db_printf(" mnt_cred = { uid=%u ruid=%u", 4867 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4868 if (jailed(mp->mnt_cred)) 4869 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4870 db_printf(" }\n"); 4871 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4872 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4873 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4874 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4875 db_printf(" mnt_lazyvnodelistsize = %d\n", 4876 mp->mnt_lazyvnodelistsize); 4877 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4878 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4879 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4880 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4881 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4882 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4883 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4884 db_printf(" mnt_secondary_accwrites = %d\n", 4885 mp->mnt_secondary_accwrites); 4886 db_printf(" mnt_gjprovider = %s\n", 4887 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4888 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4889 4890 db_printf("\n\nList of active vnodes\n"); 4891 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4892 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4893 vn_printf(vp, "vnode "); 4894 if (db_pager_quit) 4895 break; 4896 } 4897 } 4898 db_printf("\n\nList of inactive vnodes\n"); 4899 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4900 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4901 vn_printf(vp, "vnode "); 4902 if (db_pager_quit) 4903 break; 4904 } 4905 } 4906 } 4907 #endif /* DDB */ 4908 4909 /* 4910 * Fill in a struct xvfsconf based on a struct vfsconf. 4911 */ 4912 static int 4913 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4914 { 4915 struct xvfsconf xvfsp; 4916 4917 bzero(&xvfsp, sizeof(xvfsp)); 4918 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4919 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4920 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4921 xvfsp.vfc_flags = vfsp->vfc_flags; 4922 /* 4923 * These are unused in userland, we keep them 4924 * to not break binary compatibility. 4925 */ 4926 xvfsp.vfc_vfsops = NULL; 4927 xvfsp.vfc_next = NULL; 4928 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4929 } 4930 4931 #ifdef COMPAT_FREEBSD32 4932 struct xvfsconf32 { 4933 uint32_t vfc_vfsops; 4934 char vfc_name[MFSNAMELEN]; 4935 int32_t vfc_typenum; 4936 int32_t vfc_refcount; 4937 int32_t vfc_flags; 4938 uint32_t vfc_next; 4939 }; 4940 4941 static int 4942 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4943 { 4944 struct xvfsconf32 xvfsp; 4945 4946 bzero(&xvfsp, sizeof(xvfsp)); 4947 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4948 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4949 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4950 xvfsp.vfc_flags = vfsp->vfc_flags; 4951 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4952 } 4953 #endif 4954 4955 /* 4956 * Top level filesystem related information gathering. 4957 */ 4958 static int 4959 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4960 { 4961 struct vfsconf *vfsp; 4962 int error; 4963 4964 error = 0; 4965 vfsconf_slock(); 4966 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4967 #ifdef COMPAT_FREEBSD32 4968 if (req->flags & SCTL_MASK32) 4969 error = vfsconf2x32(req, vfsp); 4970 else 4971 #endif 4972 error = vfsconf2x(req, vfsp); 4973 if (error) 4974 break; 4975 } 4976 vfsconf_sunlock(); 4977 return (error); 4978 } 4979 4980 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4981 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4982 "S,xvfsconf", "List of all configured filesystems"); 4983 4984 #ifndef BURN_BRIDGES 4985 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4986 4987 static int 4988 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4989 { 4990 int *name = (int *)arg1 - 1; /* XXX */ 4991 u_int namelen = arg2 + 1; /* XXX */ 4992 struct vfsconf *vfsp; 4993 4994 log(LOG_WARNING, "userland calling deprecated sysctl, " 4995 "please rebuild world\n"); 4996 4997 #if 1 || defined(COMPAT_PRELITE2) 4998 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4999 if (namelen == 1) 5000 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 5001 #endif 5002 5003 switch (name[1]) { 5004 case VFS_MAXTYPENUM: 5005 if (namelen != 2) 5006 return (ENOTDIR); 5007 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 5008 case VFS_CONF: 5009 if (namelen != 3) 5010 return (ENOTDIR); /* overloaded */ 5011 vfsconf_slock(); 5012 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 5013 if (vfsp->vfc_typenum == name[2]) 5014 break; 5015 } 5016 vfsconf_sunlock(); 5017 if (vfsp == NULL) 5018 return (EOPNOTSUPP); 5019 #ifdef COMPAT_FREEBSD32 5020 if (req->flags & SCTL_MASK32) 5021 return (vfsconf2x32(req, vfsp)); 5022 else 5023 #endif 5024 return (vfsconf2x(req, vfsp)); 5025 } 5026 return (EOPNOTSUPP); 5027 } 5028 5029 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 5030 CTLFLAG_MPSAFE, vfs_sysctl, 5031 "Generic filesystem"); 5032 5033 #if 1 || defined(COMPAT_PRELITE2) 5034 5035 static int 5036 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 5037 { 5038 int error; 5039 struct vfsconf *vfsp; 5040 struct ovfsconf ovfs; 5041 5042 vfsconf_slock(); 5043 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 5044 bzero(&ovfs, sizeof(ovfs)); 5045 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 5046 strcpy(ovfs.vfc_name, vfsp->vfc_name); 5047 ovfs.vfc_index = vfsp->vfc_typenum; 5048 ovfs.vfc_refcount = vfsp->vfc_refcount; 5049 ovfs.vfc_flags = vfsp->vfc_flags; 5050 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 5051 if (error != 0) { 5052 vfsconf_sunlock(); 5053 return (error); 5054 } 5055 } 5056 vfsconf_sunlock(); 5057 return (0); 5058 } 5059 5060 #endif /* 1 || COMPAT_PRELITE2 */ 5061 #endif /* !BURN_BRIDGES */ 5062 5063 static void 5064 unmount_or_warn(struct mount *mp) 5065 { 5066 int error; 5067 5068 error = dounmount(mp, MNT_FORCE, curthread); 5069 if (error != 0) { 5070 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 5071 if (error == EBUSY) 5072 printf("BUSY)\n"); 5073 else 5074 printf("%d)\n", error); 5075 } 5076 } 5077 5078 /* 5079 * Unmount all filesystems. The list is traversed in reverse order 5080 * of mounting to avoid dependencies. 5081 */ 5082 void 5083 vfs_unmountall(void) 5084 { 5085 struct mount *mp, *tmp; 5086 5087 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 5088 5089 /* 5090 * Since this only runs when rebooting, it is not interlocked. 5091 */ 5092 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 5093 vfs_ref(mp); 5094 5095 /* 5096 * Forcibly unmounting "/dev" before "/" would prevent clean 5097 * unmount of the latter. 5098 */ 5099 if (mp == rootdevmp) 5100 continue; 5101 5102 unmount_or_warn(mp); 5103 } 5104 5105 if (rootdevmp != NULL) 5106 unmount_or_warn(rootdevmp); 5107 } 5108 5109 static void 5110 vfs_deferred_inactive(struct vnode *vp, int lkflags) 5111 { 5112 5113 ASSERT_VI_LOCKED(vp, __func__); 5114 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 5115 if ((vp->v_iflag & VI_OWEINACT) == 0) { 5116 vdropl(vp); 5117 return; 5118 } 5119 if (vn_lock(vp, lkflags) == 0) { 5120 VI_LOCK(vp); 5121 vinactive(vp); 5122 VOP_UNLOCK(vp); 5123 vdropl(vp); 5124 return; 5125 } 5126 vdefer_inactive_unlocked(vp); 5127 } 5128 5129 static int 5130 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 5131 { 5132 5133 return (vp->v_iflag & VI_DEFINACT); 5134 } 5135 5136 static void __noinline 5137 vfs_periodic_inactive(struct mount *mp, int flags) 5138 { 5139 struct vnode *vp, *mvp; 5140 int lkflags; 5141 5142 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 5143 if (flags != MNT_WAIT) 5144 lkflags |= LK_NOWAIT; 5145 5146 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 5147 if ((vp->v_iflag & VI_DEFINACT) == 0) { 5148 VI_UNLOCK(vp); 5149 continue; 5150 } 5151 vp->v_iflag &= ~VI_DEFINACT; 5152 vfs_deferred_inactive(vp, lkflags); 5153 } 5154 } 5155 5156 static inline bool 5157 vfs_want_msync(struct vnode *vp) 5158 { 5159 struct vm_object *obj; 5160 5161 /* 5162 * This test may be performed without any locks held. 5163 * We rely on vm_object's type stability. 5164 */ 5165 if (vp->v_vflag & VV_NOSYNC) 5166 return (false); 5167 obj = vp->v_object; 5168 return (obj != NULL && vm_object_mightbedirty(obj)); 5169 } 5170 5171 static int 5172 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 5173 { 5174 5175 if (vp->v_vflag & VV_NOSYNC) 5176 return (false); 5177 if (vp->v_iflag & VI_DEFINACT) 5178 return (true); 5179 return (vfs_want_msync(vp)); 5180 } 5181 5182 static void __noinline 5183 vfs_periodic_msync_inactive(struct mount *mp, int flags) 5184 { 5185 struct vnode *vp, *mvp; 5186 int lkflags; 5187 bool seen_defer; 5188 5189 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 5190 if (flags != MNT_WAIT) 5191 lkflags |= LK_NOWAIT; 5192 5193 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 5194 seen_defer = false; 5195 if (vp->v_iflag & VI_DEFINACT) { 5196 vp->v_iflag &= ~VI_DEFINACT; 5197 seen_defer = true; 5198 } 5199 if (!vfs_want_msync(vp)) { 5200 if (seen_defer) 5201 vfs_deferred_inactive(vp, lkflags); 5202 else 5203 VI_UNLOCK(vp); 5204 continue; 5205 } 5206 if (vget(vp, lkflags) == 0) { 5207 if ((vp->v_vflag & VV_NOSYNC) == 0) { 5208 if (flags == MNT_WAIT) 5209 vnode_pager_clean_sync(vp); 5210 else 5211 vnode_pager_clean_async(vp); 5212 } 5213 vput(vp); 5214 if (seen_defer) 5215 vdrop(vp); 5216 } else { 5217 if (seen_defer) 5218 vdefer_inactive_unlocked(vp); 5219 } 5220 } 5221 } 5222 5223 void 5224 vfs_periodic(struct mount *mp, int flags) 5225 { 5226 5227 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 5228 5229 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 5230 vfs_periodic_inactive(mp, flags); 5231 else 5232 vfs_periodic_msync_inactive(mp, flags); 5233 } 5234 5235 static void 5236 destroy_vpollinfo_free(struct vpollinfo *vi) 5237 { 5238 5239 knlist_destroy(&vi->vpi_selinfo.si_note); 5240 mtx_destroy(&vi->vpi_lock); 5241 free(vi, M_VNODEPOLL); 5242 } 5243 5244 static void 5245 destroy_vpollinfo(struct vpollinfo *vi) 5246 { 5247 5248 knlist_clear(&vi->vpi_selinfo.si_note, 1); 5249 seldrain(&vi->vpi_selinfo); 5250 destroy_vpollinfo_free(vi); 5251 } 5252 5253 /* 5254 * Initialize per-vnode helper structure to hold poll-related state. 5255 */ 5256 void 5257 v_addpollinfo(struct vnode *vp) 5258 { 5259 struct vpollinfo *vi; 5260 5261 if (vp->v_pollinfo != NULL) 5262 return; 5263 vi = malloc(sizeof(*vi), M_VNODEPOLL, M_WAITOK | M_ZERO); 5264 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 5265 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 5266 vfs_knlunlock, vfs_knl_assert_lock); 5267 VI_LOCK(vp); 5268 if (vp->v_pollinfo != NULL) { 5269 VI_UNLOCK(vp); 5270 destroy_vpollinfo_free(vi); 5271 return; 5272 } 5273 vp->v_pollinfo = vi; 5274 VI_UNLOCK(vp); 5275 } 5276 5277 /* 5278 * Record a process's interest in events which might happen to 5279 * a vnode. Because poll uses the historic select-style interface 5280 * internally, this routine serves as both the ``check for any 5281 * pending events'' and the ``record my interest in future events'' 5282 * functions. (These are done together, while the lock is held, 5283 * to avoid race conditions.) 5284 */ 5285 int 5286 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 5287 { 5288 5289 v_addpollinfo(vp); 5290 mtx_lock(&vp->v_pollinfo->vpi_lock); 5291 if (vp->v_pollinfo->vpi_revents & events) { 5292 /* 5293 * This leaves events we are not interested 5294 * in available for the other process which 5295 * which presumably had requested them 5296 * (otherwise they would never have been 5297 * recorded). 5298 */ 5299 events &= vp->v_pollinfo->vpi_revents; 5300 vp->v_pollinfo->vpi_revents &= ~events; 5301 5302 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5303 return (events); 5304 } 5305 vp->v_pollinfo->vpi_events |= events; 5306 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 5307 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5308 return (0); 5309 } 5310 5311 /* 5312 * Routine to create and manage a filesystem syncer vnode. 5313 */ 5314 #define sync_close ((int (*)(struct vop_close_args *))nullop) 5315 static int sync_fsync(struct vop_fsync_args *); 5316 static int sync_inactive(struct vop_inactive_args *); 5317 static int sync_reclaim(struct vop_reclaim_args *); 5318 5319 static struct vop_vector sync_vnodeops = { 5320 .vop_bypass = VOP_EOPNOTSUPP, 5321 .vop_close = sync_close, 5322 .vop_fsync = sync_fsync, 5323 .vop_getwritemount = vop_stdgetwritemount, 5324 .vop_inactive = sync_inactive, 5325 .vop_need_inactive = vop_stdneed_inactive, 5326 .vop_reclaim = sync_reclaim, 5327 .vop_lock1 = vop_stdlock, 5328 .vop_unlock = vop_stdunlock, 5329 .vop_islocked = vop_stdislocked, 5330 .vop_fplookup_vexec = VOP_EAGAIN, 5331 .vop_fplookup_symlink = VOP_EAGAIN, 5332 }; 5333 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 5334 5335 /* 5336 * Create a new filesystem syncer vnode for the specified mount point. 5337 */ 5338 void 5339 vfs_allocate_syncvnode(struct mount *mp) 5340 { 5341 struct vnode *vp; 5342 struct bufobj *bo; 5343 static long start, incr, next; 5344 int error; 5345 5346 /* Allocate a new vnode */ 5347 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 5348 if (error != 0) 5349 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 5350 vp->v_type = VNON; 5351 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5352 vp->v_vflag |= VV_FORCEINSMQ; 5353 error = insmntque1(vp, mp); 5354 if (error != 0) 5355 panic("vfs_allocate_syncvnode: insmntque() failed"); 5356 vp->v_vflag &= ~VV_FORCEINSMQ; 5357 vn_set_state(vp, VSTATE_CONSTRUCTED); 5358 VOP_UNLOCK(vp); 5359 /* 5360 * Place the vnode onto the syncer worklist. We attempt to 5361 * scatter them about on the list so that they will go off 5362 * at evenly distributed times even if all the filesystems 5363 * are mounted at once. 5364 */ 5365 next += incr; 5366 if (next == 0 || next > syncer_maxdelay) { 5367 start /= 2; 5368 incr /= 2; 5369 if (start == 0) { 5370 start = syncer_maxdelay / 2; 5371 incr = syncer_maxdelay; 5372 } 5373 next = start; 5374 } 5375 bo = &vp->v_bufobj; 5376 BO_LOCK(bo); 5377 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 5378 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 5379 mtx_lock(&sync_mtx); 5380 sync_vnode_count++; 5381 if (mp->mnt_syncer == NULL) { 5382 mp->mnt_syncer = vp; 5383 vp = NULL; 5384 } 5385 mtx_unlock(&sync_mtx); 5386 BO_UNLOCK(bo); 5387 if (vp != NULL) { 5388 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5389 vgone(vp); 5390 vput(vp); 5391 } 5392 } 5393 5394 void 5395 vfs_deallocate_syncvnode(struct mount *mp) 5396 { 5397 struct vnode *vp; 5398 5399 mtx_lock(&sync_mtx); 5400 vp = mp->mnt_syncer; 5401 if (vp != NULL) 5402 mp->mnt_syncer = NULL; 5403 mtx_unlock(&sync_mtx); 5404 if (vp != NULL) 5405 vrele(vp); 5406 } 5407 5408 /* 5409 * Do a lazy sync of the filesystem. 5410 */ 5411 static int 5412 sync_fsync(struct vop_fsync_args *ap) 5413 { 5414 struct vnode *syncvp = ap->a_vp; 5415 struct mount *mp = syncvp->v_mount; 5416 int error, save; 5417 struct bufobj *bo; 5418 5419 /* 5420 * We only need to do something if this is a lazy evaluation. 5421 */ 5422 if (ap->a_waitfor != MNT_LAZY) 5423 return (0); 5424 5425 /* 5426 * Move ourselves to the back of the sync list. 5427 */ 5428 bo = &syncvp->v_bufobj; 5429 BO_LOCK(bo); 5430 vn_syncer_add_to_worklist(bo, syncdelay); 5431 BO_UNLOCK(bo); 5432 5433 /* 5434 * Walk the list of vnodes pushing all that are dirty and 5435 * not already on the sync list. 5436 */ 5437 if (vfs_busy(mp, MBF_NOWAIT) != 0) 5438 return (0); 5439 VOP_UNLOCK(syncvp); 5440 save = curthread_pflags_set(TDP_SYNCIO); 5441 /* 5442 * The filesystem at hand may be idle with free vnodes stored in the 5443 * batch. Return them instead of letting them stay there indefinitely. 5444 */ 5445 vfs_periodic(mp, MNT_NOWAIT); 5446 error = VFS_SYNC(mp, MNT_LAZY); 5447 curthread_pflags_restore(save); 5448 vn_lock(syncvp, LK_EXCLUSIVE | LK_RETRY); 5449 vfs_unbusy(mp); 5450 return (error); 5451 } 5452 5453 /* 5454 * The syncer vnode is no referenced. 5455 */ 5456 static int 5457 sync_inactive(struct vop_inactive_args *ap) 5458 { 5459 5460 vgone(ap->a_vp); 5461 return (0); 5462 } 5463 5464 /* 5465 * The syncer vnode is no longer needed and is being decommissioned. 5466 * 5467 * Modifications to the worklist must be protected by sync_mtx. 5468 */ 5469 static int 5470 sync_reclaim(struct vop_reclaim_args *ap) 5471 { 5472 struct vnode *vp = ap->a_vp; 5473 struct bufobj *bo; 5474 5475 bo = &vp->v_bufobj; 5476 BO_LOCK(bo); 5477 mtx_lock(&sync_mtx); 5478 if (vp->v_mount->mnt_syncer == vp) 5479 vp->v_mount->mnt_syncer = NULL; 5480 if (bo->bo_flag & BO_ONWORKLST) { 5481 LIST_REMOVE(bo, bo_synclist); 5482 syncer_worklist_len--; 5483 sync_vnode_count--; 5484 bo->bo_flag &= ~BO_ONWORKLST; 5485 } 5486 mtx_unlock(&sync_mtx); 5487 BO_UNLOCK(bo); 5488 5489 return (0); 5490 } 5491 5492 int 5493 vn_need_pageq_flush(struct vnode *vp) 5494 { 5495 struct vm_object *obj; 5496 5497 obj = vp->v_object; 5498 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 5499 vm_object_mightbedirty(obj)); 5500 } 5501 5502 /* 5503 * Check if vnode represents a disk device 5504 */ 5505 bool 5506 vn_isdisk_error(struct vnode *vp, int *errp) 5507 { 5508 int error; 5509 5510 if (vp->v_type != VCHR) { 5511 error = ENOTBLK; 5512 goto out; 5513 } 5514 error = 0; 5515 dev_lock(); 5516 if (vp->v_rdev == NULL) 5517 error = ENXIO; 5518 else if (vp->v_rdev->si_devsw == NULL) 5519 error = ENXIO; 5520 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5521 error = ENOTBLK; 5522 dev_unlock(); 5523 out: 5524 *errp = error; 5525 return (error == 0); 5526 } 5527 5528 bool 5529 vn_isdisk(struct vnode *vp) 5530 { 5531 int error; 5532 5533 return (vn_isdisk_error(vp, &error)); 5534 } 5535 5536 /* 5537 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5538 * the comment above cache_fplookup for details. 5539 */ 5540 int 5541 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5542 { 5543 int error; 5544 5545 VFS_SMR_ASSERT_ENTERED(); 5546 5547 /* Check the owner. */ 5548 if (cred->cr_uid == file_uid) { 5549 if (file_mode & S_IXUSR) 5550 return (0); 5551 goto out_error; 5552 } 5553 5554 /* Otherwise, check the groups (first match) */ 5555 if (groupmember(file_gid, cred)) { 5556 if (file_mode & S_IXGRP) 5557 return (0); 5558 goto out_error; 5559 } 5560 5561 /* Otherwise, check everyone else. */ 5562 if (file_mode & S_IXOTH) 5563 return (0); 5564 out_error: 5565 /* 5566 * Permission check failed, but it is possible denial will get overwritten 5567 * (e.g., when root is traversing through a 700 directory owned by someone 5568 * else). 5569 * 5570 * vaccess() calls priv_check_cred which in turn can descent into MAC 5571 * modules overriding this result. It's quite unclear what semantics 5572 * are allowed for them to operate, thus for safety we don't call them 5573 * from within the SMR section. This also means if any such modules 5574 * are present, we have to let the regular lookup decide. 5575 */ 5576 error = priv_check_cred_vfs_lookup_nomac(cred); 5577 switch (error) { 5578 case 0: 5579 return (0); 5580 case EAGAIN: 5581 /* 5582 * MAC modules present. 5583 */ 5584 return (EAGAIN); 5585 case EPERM: 5586 return (EACCES); 5587 default: 5588 return (error); 5589 } 5590 } 5591 5592 /* 5593 * Common filesystem object access control check routine. Accepts a 5594 * vnode's type, "mode", uid and gid, requested access mode, and credentials. 5595 * Returns 0 on success, or an errno on failure. 5596 */ 5597 int 5598 vaccess(__enum_uint8(vtype) type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5599 accmode_t accmode, struct ucred *cred) 5600 { 5601 accmode_t dac_granted; 5602 accmode_t priv_granted; 5603 5604 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5605 ("invalid bit in accmode")); 5606 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5607 ("VAPPEND without VWRITE")); 5608 5609 /* 5610 * Look for a normal, non-privileged way to access the file/directory 5611 * as requested. If it exists, go with that. 5612 */ 5613 5614 dac_granted = 0; 5615 5616 /* Check the owner. */ 5617 if (cred->cr_uid == file_uid) { 5618 dac_granted |= VADMIN; 5619 if (file_mode & S_IXUSR) 5620 dac_granted |= VEXEC; 5621 if (file_mode & S_IRUSR) 5622 dac_granted |= VREAD; 5623 if (file_mode & S_IWUSR) 5624 dac_granted |= (VWRITE | VAPPEND); 5625 5626 if ((accmode & dac_granted) == accmode) 5627 return (0); 5628 5629 goto privcheck; 5630 } 5631 5632 /* Otherwise, check the groups (first match) */ 5633 if (groupmember(file_gid, cred)) { 5634 if (file_mode & S_IXGRP) 5635 dac_granted |= VEXEC; 5636 if (file_mode & S_IRGRP) 5637 dac_granted |= VREAD; 5638 if (file_mode & S_IWGRP) 5639 dac_granted |= (VWRITE | VAPPEND); 5640 5641 if ((accmode & dac_granted) == accmode) 5642 return (0); 5643 5644 goto privcheck; 5645 } 5646 5647 /* Otherwise, check everyone else. */ 5648 if (file_mode & S_IXOTH) 5649 dac_granted |= VEXEC; 5650 if (file_mode & S_IROTH) 5651 dac_granted |= VREAD; 5652 if (file_mode & S_IWOTH) 5653 dac_granted |= (VWRITE | VAPPEND); 5654 if ((accmode & dac_granted) == accmode) 5655 return (0); 5656 5657 privcheck: 5658 /* 5659 * Build a privilege mask to determine if the set of privileges 5660 * satisfies the requirements when combined with the granted mask 5661 * from above. For each privilege, if the privilege is required, 5662 * bitwise or the request type onto the priv_granted mask. 5663 */ 5664 priv_granted = 0; 5665 5666 if (type == VDIR) { 5667 /* 5668 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5669 * requests, instead of PRIV_VFS_EXEC. 5670 */ 5671 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5672 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5673 priv_granted |= VEXEC; 5674 } else { 5675 /* 5676 * Ensure that at least one execute bit is on. Otherwise, 5677 * a privileged user will always succeed, and we don't want 5678 * this to happen unless the file really is executable. 5679 */ 5680 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5681 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5682 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5683 priv_granted |= VEXEC; 5684 } 5685 5686 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5687 !priv_check_cred(cred, PRIV_VFS_READ)) 5688 priv_granted |= VREAD; 5689 5690 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5691 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5692 priv_granted |= (VWRITE | VAPPEND); 5693 5694 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5695 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5696 priv_granted |= VADMIN; 5697 5698 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5699 return (0); 5700 } 5701 5702 return ((accmode & VADMIN) ? EPERM : EACCES); 5703 } 5704 5705 /* 5706 * Credential check based on process requesting service, and per-attribute 5707 * permissions. 5708 */ 5709 int 5710 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5711 struct thread *td, accmode_t accmode) 5712 { 5713 5714 /* 5715 * Kernel-invoked always succeeds. 5716 */ 5717 if (cred == NOCRED) 5718 return (0); 5719 5720 /* 5721 * Do not allow privileged processes in jail to directly manipulate 5722 * system attributes. 5723 */ 5724 switch (attrnamespace) { 5725 case EXTATTR_NAMESPACE_SYSTEM: 5726 /* Potentially should be: return (EPERM); */ 5727 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5728 case EXTATTR_NAMESPACE_USER: 5729 return (VOP_ACCESS(vp, accmode, cred, td)); 5730 default: 5731 return (EPERM); 5732 } 5733 } 5734 5735 #ifdef DEBUG_VFS_LOCKS 5736 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5737 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5738 "Drop into debugger on lock violation"); 5739 5740 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5741 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5742 0, "Check for interlock across VOPs"); 5743 5744 int vfs_badlock_print = 1; /* Print lock violations. */ 5745 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5746 0, "Print lock violations"); 5747 5748 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5749 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5750 0, "Print vnode details on lock violations"); 5751 5752 #ifdef KDB 5753 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5754 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5755 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5756 #endif 5757 5758 static void 5759 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5760 { 5761 5762 #ifdef KDB 5763 if (vfs_badlock_backtrace) 5764 kdb_backtrace(); 5765 #endif 5766 if (vfs_badlock_vnode) 5767 vn_printf(vp, "vnode "); 5768 if (vfs_badlock_print) 5769 printf("%s: %p %s\n", str, (void *)vp, msg); 5770 if (vfs_badlock_ddb) 5771 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5772 } 5773 5774 void 5775 assert_vi_locked(struct vnode *vp, const char *str) 5776 { 5777 5778 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5779 vfs_badlock("interlock is not locked but should be", str, vp); 5780 } 5781 5782 void 5783 assert_vi_unlocked(struct vnode *vp, const char *str) 5784 { 5785 5786 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5787 vfs_badlock("interlock is locked but should not be", str, vp); 5788 } 5789 5790 void 5791 assert_vop_locked(struct vnode *vp, const char *str) 5792 { 5793 if (KERNEL_PANICKED() || vp == NULL) 5794 return; 5795 5796 #ifdef WITNESS 5797 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && 5798 witness_is_owned(&vp->v_vnlock->lock_object) == -1) 5799 #else 5800 int locked = VOP_ISLOCKED(vp); 5801 if (locked == 0 || locked == LK_EXCLOTHER) 5802 #endif 5803 vfs_badlock("is not locked but should be", str, vp); 5804 } 5805 5806 void 5807 assert_vop_unlocked(struct vnode *vp, const char *str) 5808 { 5809 if (KERNEL_PANICKED() || vp == NULL) 5810 return; 5811 5812 #ifdef WITNESS 5813 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && 5814 witness_is_owned(&vp->v_vnlock->lock_object) == 1) 5815 #else 5816 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5817 #endif 5818 vfs_badlock("is locked but should not be", str, vp); 5819 } 5820 5821 void 5822 assert_vop_elocked(struct vnode *vp, const char *str) 5823 { 5824 if (KERNEL_PANICKED() || vp == NULL) 5825 return; 5826 5827 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5828 vfs_badlock("is not exclusive locked but should be", str, vp); 5829 } 5830 #endif /* DEBUG_VFS_LOCKS */ 5831 5832 void 5833 vop_rename_fail(struct vop_rename_args *ap) 5834 { 5835 5836 if (ap->a_tvp != NULL) 5837 vput(ap->a_tvp); 5838 if (ap->a_tdvp == ap->a_tvp) 5839 vrele(ap->a_tdvp); 5840 else 5841 vput(ap->a_tdvp); 5842 vrele(ap->a_fdvp); 5843 vrele(ap->a_fvp); 5844 } 5845 5846 void 5847 vop_rename_pre(void *ap) 5848 { 5849 struct vop_rename_args *a = ap; 5850 5851 #ifdef DEBUG_VFS_LOCKS 5852 if (a->a_tvp) 5853 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5854 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5855 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5856 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5857 5858 /* Check the source (from). */ 5859 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5860 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5861 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5862 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5863 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5864 5865 /* Check the target. */ 5866 if (a->a_tvp) 5867 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5868 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5869 #endif 5870 /* 5871 * It may be tempting to add vn_seqc_write_begin/end calls here and 5872 * in vop_rename_post but that's not going to work out since some 5873 * filesystems relookup vnodes mid-rename. This is probably a bug. 5874 * 5875 * For now filesystems are expected to do the relevant calls after they 5876 * decide what vnodes to operate on. 5877 */ 5878 if (a->a_tdvp != a->a_fdvp) 5879 vhold(a->a_fdvp); 5880 if (a->a_tvp != a->a_fvp) 5881 vhold(a->a_fvp); 5882 vhold(a->a_tdvp); 5883 if (a->a_tvp) 5884 vhold(a->a_tvp); 5885 } 5886 5887 #ifdef DEBUG_VFS_LOCKS 5888 void 5889 vop_fplookup_vexec_debugpre(void *ap __unused) 5890 { 5891 5892 VFS_SMR_ASSERT_ENTERED(); 5893 } 5894 5895 void 5896 vop_fplookup_vexec_debugpost(void *ap, int rc) 5897 { 5898 struct vop_fplookup_vexec_args *a; 5899 struct vnode *vp; 5900 5901 a = ap; 5902 vp = a->a_vp; 5903 5904 VFS_SMR_ASSERT_ENTERED(); 5905 if (rc == EOPNOTSUPP) 5906 VNPASS(VN_IS_DOOMED(vp), vp); 5907 } 5908 5909 void 5910 vop_fplookup_symlink_debugpre(void *ap __unused) 5911 { 5912 5913 VFS_SMR_ASSERT_ENTERED(); 5914 } 5915 5916 void 5917 vop_fplookup_symlink_debugpost(void *ap __unused, int rc __unused) 5918 { 5919 5920 VFS_SMR_ASSERT_ENTERED(); 5921 } 5922 5923 static void 5924 vop_fsync_debugprepost(struct vnode *vp, const char *name) 5925 { 5926 if (vp->v_type == VCHR) 5927 ; 5928 /* 5929 * The shared vs. exclusive locking policy for fsync() 5930 * is actually determined by vp's write mount as indicated 5931 * by VOP_GETWRITEMOUNT(), which for stacked filesystems 5932 * may not be the same as vp->v_mount. However, if the 5933 * underlying filesystem which really handles the fsync() 5934 * supports shared locking, the stacked filesystem must also 5935 * be prepared for its VOP_FSYNC() operation to be called 5936 * with only a shared lock. On the other hand, if the 5937 * stacked filesystem claims support for shared write 5938 * locking but the underlying filesystem does not, and the 5939 * caller incorrectly uses a shared lock, this condition 5940 * should still be caught when the stacked filesystem 5941 * invokes VOP_FSYNC() on the underlying filesystem. 5942 */ 5943 else if (MNT_SHARED_WRITES(vp->v_mount)) 5944 ASSERT_VOP_LOCKED(vp, name); 5945 else 5946 ASSERT_VOP_ELOCKED(vp, name); 5947 } 5948 5949 void 5950 vop_fsync_debugpre(void *a) 5951 { 5952 struct vop_fsync_args *ap; 5953 5954 ap = a; 5955 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5956 } 5957 5958 void 5959 vop_fsync_debugpost(void *a, int rc __unused) 5960 { 5961 struct vop_fsync_args *ap; 5962 5963 ap = a; 5964 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5965 } 5966 5967 void 5968 vop_fdatasync_debugpre(void *a) 5969 { 5970 struct vop_fdatasync_args *ap; 5971 5972 ap = a; 5973 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5974 } 5975 5976 void 5977 vop_fdatasync_debugpost(void *a, int rc __unused) 5978 { 5979 struct vop_fdatasync_args *ap; 5980 5981 ap = a; 5982 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5983 } 5984 5985 void 5986 vop_strategy_debugpre(void *ap) 5987 { 5988 struct vop_strategy_args *a; 5989 struct buf *bp; 5990 5991 a = ap; 5992 bp = a->a_bp; 5993 5994 /* 5995 * Cluster ops lock their component buffers but not the IO container. 5996 */ 5997 if ((bp->b_flags & B_CLUSTER) != 0) 5998 return; 5999 6000 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 6001 if (vfs_badlock_print) 6002 printf( 6003 "VOP_STRATEGY: bp is not locked but should be\n"); 6004 if (vfs_badlock_ddb) 6005 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 6006 } 6007 } 6008 6009 void 6010 vop_lock_debugpre(void *ap) 6011 { 6012 struct vop_lock1_args *a = ap; 6013 6014 if ((a->a_flags & LK_INTERLOCK) == 0) 6015 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 6016 else 6017 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 6018 } 6019 6020 void 6021 vop_lock_debugpost(void *ap, int rc) 6022 { 6023 struct vop_lock1_args *a = ap; 6024 6025 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 6026 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 6027 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 6028 } 6029 6030 void 6031 vop_unlock_debugpre(void *ap) 6032 { 6033 struct vop_unlock_args *a = ap; 6034 struct vnode *vp = a->a_vp; 6035 6036 VNPASS(vn_get_state(vp) != VSTATE_UNINITIALIZED, vp); 6037 ASSERT_VOP_LOCKED(vp, "VOP_UNLOCK"); 6038 } 6039 6040 void 6041 vop_need_inactive_debugpre(void *ap) 6042 { 6043 struct vop_need_inactive_args *a = ap; 6044 6045 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 6046 } 6047 6048 void 6049 vop_need_inactive_debugpost(void *ap, int rc) 6050 { 6051 struct vop_need_inactive_args *a = ap; 6052 6053 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 6054 } 6055 #endif 6056 6057 void 6058 vop_create_pre(void *ap) 6059 { 6060 struct vop_create_args *a; 6061 struct vnode *dvp; 6062 6063 a = ap; 6064 dvp = a->a_dvp; 6065 vn_seqc_write_begin(dvp); 6066 } 6067 6068 void 6069 vop_create_post(void *ap, int rc) 6070 { 6071 struct vop_create_args *a; 6072 struct vnode *dvp; 6073 6074 a = ap; 6075 dvp = a->a_dvp; 6076 vn_seqc_write_end(dvp); 6077 if (!rc) 6078 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6079 } 6080 6081 void 6082 vop_whiteout_pre(void *ap) 6083 { 6084 struct vop_whiteout_args *a; 6085 struct vnode *dvp; 6086 6087 a = ap; 6088 dvp = a->a_dvp; 6089 vn_seqc_write_begin(dvp); 6090 } 6091 6092 void 6093 vop_whiteout_post(void *ap, int rc) 6094 { 6095 struct vop_whiteout_args *a; 6096 struct vnode *dvp; 6097 6098 a = ap; 6099 dvp = a->a_dvp; 6100 vn_seqc_write_end(dvp); 6101 } 6102 6103 void 6104 vop_deleteextattr_pre(void *ap) 6105 { 6106 struct vop_deleteextattr_args *a; 6107 struct vnode *vp; 6108 6109 a = ap; 6110 vp = a->a_vp; 6111 vn_seqc_write_begin(vp); 6112 } 6113 6114 void 6115 vop_deleteextattr_post(void *ap, int rc) 6116 { 6117 struct vop_deleteextattr_args *a; 6118 struct vnode *vp; 6119 6120 a = ap; 6121 vp = a->a_vp; 6122 vn_seqc_write_end(vp); 6123 if (!rc) 6124 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 6125 } 6126 6127 void 6128 vop_link_pre(void *ap) 6129 { 6130 struct vop_link_args *a; 6131 struct vnode *vp, *tdvp; 6132 6133 a = ap; 6134 vp = a->a_vp; 6135 tdvp = a->a_tdvp; 6136 vn_seqc_write_begin(vp); 6137 vn_seqc_write_begin(tdvp); 6138 } 6139 6140 void 6141 vop_link_post(void *ap, int rc) 6142 { 6143 struct vop_link_args *a; 6144 struct vnode *vp, *tdvp; 6145 6146 a = ap; 6147 vp = a->a_vp; 6148 tdvp = a->a_tdvp; 6149 vn_seqc_write_end(vp); 6150 vn_seqc_write_end(tdvp); 6151 if (!rc) { 6152 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 6153 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 6154 } 6155 } 6156 6157 void 6158 vop_mkdir_pre(void *ap) 6159 { 6160 struct vop_mkdir_args *a; 6161 struct vnode *dvp; 6162 6163 a = ap; 6164 dvp = a->a_dvp; 6165 vn_seqc_write_begin(dvp); 6166 } 6167 6168 void 6169 vop_mkdir_post(void *ap, int rc) 6170 { 6171 struct vop_mkdir_args *a; 6172 struct vnode *dvp; 6173 6174 a = ap; 6175 dvp = a->a_dvp; 6176 vn_seqc_write_end(dvp); 6177 if (!rc) 6178 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 6179 } 6180 6181 #ifdef DEBUG_VFS_LOCKS 6182 void 6183 vop_mkdir_debugpost(void *ap, int rc) 6184 { 6185 struct vop_mkdir_args *a; 6186 6187 a = ap; 6188 if (!rc) 6189 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); 6190 } 6191 #endif 6192 6193 void 6194 vop_mknod_pre(void *ap) 6195 { 6196 struct vop_mknod_args *a; 6197 struct vnode *dvp; 6198 6199 a = ap; 6200 dvp = a->a_dvp; 6201 vn_seqc_write_begin(dvp); 6202 } 6203 6204 void 6205 vop_mknod_post(void *ap, int rc) 6206 { 6207 struct vop_mknod_args *a; 6208 struct vnode *dvp; 6209 6210 a = ap; 6211 dvp = a->a_dvp; 6212 vn_seqc_write_end(dvp); 6213 if (!rc) 6214 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6215 } 6216 6217 void 6218 vop_reclaim_post(void *ap, int rc) 6219 { 6220 struct vop_reclaim_args *a; 6221 struct vnode *vp; 6222 6223 a = ap; 6224 vp = a->a_vp; 6225 ASSERT_VOP_IN_SEQC(vp); 6226 if (!rc) 6227 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 6228 } 6229 6230 void 6231 vop_remove_pre(void *ap) 6232 { 6233 struct vop_remove_args *a; 6234 struct vnode *dvp, *vp; 6235 6236 a = ap; 6237 dvp = a->a_dvp; 6238 vp = a->a_vp; 6239 vn_seqc_write_begin(dvp); 6240 vn_seqc_write_begin(vp); 6241 } 6242 6243 void 6244 vop_remove_post(void *ap, int rc) 6245 { 6246 struct vop_remove_args *a; 6247 struct vnode *dvp, *vp; 6248 6249 a = ap; 6250 dvp = a->a_dvp; 6251 vp = a->a_vp; 6252 vn_seqc_write_end(dvp); 6253 vn_seqc_write_end(vp); 6254 if (!rc) { 6255 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6256 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 6257 } 6258 } 6259 6260 void 6261 vop_rename_post(void *ap, int rc) 6262 { 6263 struct vop_rename_args *a = ap; 6264 long hint; 6265 6266 if (!rc) { 6267 hint = NOTE_WRITE; 6268 if (a->a_fdvp == a->a_tdvp) { 6269 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 6270 hint |= NOTE_LINK; 6271 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 6272 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 6273 } else { 6274 hint |= NOTE_EXTEND; 6275 if (a->a_fvp->v_type == VDIR) 6276 hint |= NOTE_LINK; 6277 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 6278 6279 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 6280 a->a_tvp->v_type == VDIR) 6281 hint &= ~NOTE_LINK; 6282 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 6283 } 6284 6285 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 6286 if (a->a_tvp) 6287 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 6288 } 6289 if (a->a_tdvp != a->a_fdvp) 6290 vdrop(a->a_fdvp); 6291 if (a->a_tvp != a->a_fvp) 6292 vdrop(a->a_fvp); 6293 vdrop(a->a_tdvp); 6294 if (a->a_tvp) 6295 vdrop(a->a_tvp); 6296 } 6297 6298 void 6299 vop_rmdir_pre(void *ap) 6300 { 6301 struct vop_rmdir_args *a; 6302 struct vnode *dvp, *vp; 6303 6304 a = ap; 6305 dvp = a->a_dvp; 6306 vp = a->a_vp; 6307 vn_seqc_write_begin(dvp); 6308 vn_seqc_write_begin(vp); 6309 } 6310 6311 void 6312 vop_rmdir_post(void *ap, int rc) 6313 { 6314 struct vop_rmdir_args *a; 6315 struct vnode *dvp, *vp; 6316 6317 a = ap; 6318 dvp = a->a_dvp; 6319 vp = a->a_vp; 6320 vn_seqc_write_end(dvp); 6321 vn_seqc_write_end(vp); 6322 if (!rc) { 6323 vp->v_vflag |= VV_UNLINKED; 6324 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 6325 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 6326 } 6327 } 6328 6329 void 6330 vop_setattr_pre(void *ap) 6331 { 6332 struct vop_setattr_args *a; 6333 struct vnode *vp; 6334 6335 a = ap; 6336 vp = a->a_vp; 6337 vn_seqc_write_begin(vp); 6338 } 6339 6340 void 6341 vop_setattr_post(void *ap, int rc) 6342 { 6343 struct vop_setattr_args *a; 6344 struct vnode *vp; 6345 6346 a = ap; 6347 vp = a->a_vp; 6348 vn_seqc_write_end(vp); 6349 if (!rc) 6350 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6351 } 6352 6353 void 6354 vop_setacl_pre(void *ap) 6355 { 6356 struct vop_setacl_args *a; 6357 struct vnode *vp; 6358 6359 a = ap; 6360 vp = a->a_vp; 6361 vn_seqc_write_begin(vp); 6362 } 6363 6364 void 6365 vop_setacl_post(void *ap, int rc __unused) 6366 { 6367 struct vop_setacl_args *a; 6368 struct vnode *vp; 6369 6370 a = ap; 6371 vp = a->a_vp; 6372 vn_seqc_write_end(vp); 6373 } 6374 6375 void 6376 vop_setextattr_pre(void *ap) 6377 { 6378 struct vop_setextattr_args *a; 6379 struct vnode *vp; 6380 6381 a = ap; 6382 vp = a->a_vp; 6383 vn_seqc_write_begin(vp); 6384 } 6385 6386 void 6387 vop_setextattr_post(void *ap, int rc) 6388 { 6389 struct vop_setextattr_args *a; 6390 struct vnode *vp; 6391 6392 a = ap; 6393 vp = a->a_vp; 6394 vn_seqc_write_end(vp); 6395 if (!rc) 6396 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6397 } 6398 6399 void 6400 vop_symlink_pre(void *ap) 6401 { 6402 struct vop_symlink_args *a; 6403 struct vnode *dvp; 6404 6405 a = ap; 6406 dvp = a->a_dvp; 6407 vn_seqc_write_begin(dvp); 6408 } 6409 6410 void 6411 vop_symlink_post(void *ap, int rc) 6412 { 6413 struct vop_symlink_args *a; 6414 struct vnode *dvp; 6415 6416 a = ap; 6417 dvp = a->a_dvp; 6418 vn_seqc_write_end(dvp); 6419 if (!rc) 6420 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6421 } 6422 6423 void 6424 vop_open_post(void *ap, int rc) 6425 { 6426 struct vop_open_args *a = ap; 6427 6428 if (!rc) 6429 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 6430 } 6431 6432 void 6433 vop_close_post(void *ap, int rc) 6434 { 6435 struct vop_close_args *a = ap; 6436 6437 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 6438 !VN_IS_DOOMED(a->a_vp))) { 6439 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 6440 NOTE_CLOSE_WRITE : NOTE_CLOSE); 6441 } 6442 } 6443 6444 void 6445 vop_read_post(void *ap, int rc) 6446 { 6447 struct vop_read_args *a = ap; 6448 6449 if (!rc) 6450 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6451 } 6452 6453 void 6454 vop_read_pgcache_post(void *ap, int rc) 6455 { 6456 struct vop_read_pgcache_args *a = ap; 6457 6458 if (!rc) 6459 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); 6460 } 6461 6462 void 6463 vop_readdir_post(void *ap, int rc) 6464 { 6465 struct vop_readdir_args *a = ap; 6466 6467 if (!rc) 6468 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6469 } 6470 6471 static struct knlist fs_knlist; 6472 6473 static void 6474 vfs_event_init(void *arg) 6475 { 6476 knlist_init_mtx(&fs_knlist, NULL); 6477 } 6478 /* XXX - correct order? */ 6479 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 6480 6481 void 6482 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 6483 { 6484 6485 KNOTE_UNLOCKED(&fs_knlist, event); 6486 } 6487 6488 static int filt_fsattach(struct knote *kn); 6489 static void filt_fsdetach(struct knote *kn); 6490 static int filt_fsevent(struct knote *kn, long hint); 6491 6492 struct filterops fs_filtops = { 6493 .f_isfd = 0, 6494 .f_attach = filt_fsattach, 6495 .f_detach = filt_fsdetach, 6496 .f_event = filt_fsevent 6497 }; 6498 6499 static int 6500 filt_fsattach(struct knote *kn) 6501 { 6502 6503 kn->kn_flags |= EV_CLEAR; 6504 knlist_add(&fs_knlist, kn, 0); 6505 return (0); 6506 } 6507 6508 static void 6509 filt_fsdetach(struct knote *kn) 6510 { 6511 6512 knlist_remove(&fs_knlist, kn, 0); 6513 } 6514 6515 static int 6516 filt_fsevent(struct knote *kn, long hint) 6517 { 6518 6519 kn->kn_fflags |= kn->kn_sfflags & hint; 6520 6521 return (kn->kn_fflags != 0); 6522 } 6523 6524 static int 6525 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 6526 { 6527 struct vfsidctl vc; 6528 int error; 6529 struct mount *mp; 6530 6531 error = SYSCTL_IN(req, &vc, sizeof(vc)); 6532 if (error) 6533 return (error); 6534 if (vc.vc_vers != VFS_CTL_VERS1) 6535 return (EINVAL); 6536 mp = vfs_getvfs(&vc.vc_fsid); 6537 if (mp == NULL) 6538 return (ENOENT); 6539 /* ensure that a specific sysctl goes to the right filesystem. */ 6540 if (strcmp(vc.vc_fstypename, "*") != 0 && 6541 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 6542 vfs_rel(mp); 6543 return (EINVAL); 6544 } 6545 VCTLTOREQ(&vc, req); 6546 error = VFS_SYSCTL(mp, vc.vc_op, req); 6547 vfs_rel(mp); 6548 return (error); 6549 } 6550 6551 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 6552 NULL, 0, sysctl_vfs_ctl, "", 6553 "Sysctl by fsid"); 6554 6555 /* 6556 * Function to initialize a va_filerev field sensibly. 6557 * XXX: Wouldn't a random number make a lot more sense ?? 6558 */ 6559 u_quad_t 6560 init_va_filerev(void) 6561 { 6562 struct bintime bt; 6563 6564 getbinuptime(&bt); 6565 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 6566 } 6567 6568 static int filt_vfsread(struct knote *kn, long hint); 6569 static int filt_vfswrite(struct knote *kn, long hint); 6570 static int filt_vfsvnode(struct knote *kn, long hint); 6571 static void filt_vfsdetach(struct knote *kn); 6572 static struct filterops vfsread_filtops = { 6573 .f_isfd = 1, 6574 .f_detach = filt_vfsdetach, 6575 .f_event = filt_vfsread 6576 }; 6577 static struct filterops vfswrite_filtops = { 6578 .f_isfd = 1, 6579 .f_detach = filt_vfsdetach, 6580 .f_event = filt_vfswrite 6581 }; 6582 static struct filterops vfsvnode_filtops = { 6583 .f_isfd = 1, 6584 .f_detach = filt_vfsdetach, 6585 .f_event = filt_vfsvnode 6586 }; 6587 6588 static void 6589 vfs_knllock(void *arg) 6590 { 6591 struct vnode *vp = arg; 6592 6593 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 6594 } 6595 6596 static void 6597 vfs_knlunlock(void *arg) 6598 { 6599 struct vnode *vp = arg; 6600 6601 VOP_UNLOCK(vp); 6602 } 6603 6604 static void 6605 vfs_knl_assert_lock(void *arg, int what) 6606 { 6607 #ifdef DEBUG_VFS_LOCKS 6608 struct vnode *vp = arg; 6609 6610 if (what == LA_LOCKED) 6611 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6612 else 6613 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6614 #endif 6615 } 6616 6617 int 6618 vfs_kqfilter(struct vop_kqfilter_args *ap) 6619 { 6620 struct vnode *vp = ap->a_vp; 6621 struct knote *kn = ap->a_kn; 6622 struct knlist *knl; 6623 6624 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ && 6625 kn->kn_filter != EVFILT_WRITE), 6626 ("READ/WRITE filter on a FIFO leaked through")); 6627 switch (kn->kn_filter) { 6628 case EVFILT_READ: 6629 kn->kn_fop = &vfsread_filtops; 6630 break; 6631 case EVFILT_WRITE: 6632 kn->kn_fop = &vfswrite_filtops; 6633 break; 6634 case EVFILT_VNODE: 6635 kn->kn_fop = &vfsvnode_filtops; 6636 break; 6637 default: 6638 return (EINVAL); 6639 } 6640 6641 kn->kn_hook = (caddr_t)vp; 6642 6643 v_addpollinfo(vp); 6644 if (vp->v_pollinfo == NULL) 6645 return (ENOMEM); 6646 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6647 vhold(vp); 6648 knlist_add(knl, kn, 0); 6649 6650 return (0); 6651 } 6652 6653 /* 6654 * Detach knote from vnode 6655 */ 6656 static void 6657 filt_vfsdetach(struct knote *kn) 6658 { 6659 struct vnode *vp = (struct vnode *)kn->kn_hook; 6660 6661 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6662 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6663 vdrop(vp); 6664 } 6665 6666 /*ARGSUSED*/ 6667 static int 6668 filt_vfsread(struct knote *kn, long hint) 6669 { 6670 struct vnode *vp = (struct vnode *)kn->kn_hook; 6671 off_t size; 6672 int res; 6673 6674 /* 6675 * filesystem is gone, so set the EOF flag and schedule 6676 * the knote for deletion. 6677 */ 6678 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6679 VI_LOCK(vp); 6680 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6681 VI_UNLOCK(vp); 6682 return (1); 6683 } 6684 6685 if (vn_getsize_locked(vp, &size, curthread->td_ucred) != 0) 6686 return (0); 6687 6688 VI_LOCK(vp); 6689 kn->kn_data = size - kn->kn_fp->f_offset; 6690 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6691 VI_UNLOCK(vp); 6692 return (res); 6693 } 6694 6695 /*ARGSUSED*/ 6696 static int 6697 filt_vfswrite(struct knote *kn, long hint) 6698 { 6699 struct vnode *vp = (struct vnode *)kn->kn_hook; 6700 6701 VI_LOCK(vp); 6702 6703 /* 6704 * filesystem is gone, so set the EOF flag and schedule 6705 * the knote for deletion. 6706 */ 6707 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6708 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6709 6710 kn->kn_data = 0; 6711 VI_UNLOCK(vp); 6712 return (1); 6713 } 6714 6715 static int 6716 filt_vfsvnode(struct knote *kn, long hint) 6717 { 6718 struct vnode *vp = (struct vnode *)kn->kn_hook; 6719 int res; 6720 6721 VI_LOCK(vp); 6722 if (kn->kn_sfflags & hint) 6723 kn->kn_fflags |= hint; 6724 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6725 kn->kn_flags |= EV_EOF; 6726 VI_UNLOCK(vp); 6727 return (1); 6728 } 6729 res = (kn->kn_fflags != 0); 6730 VI_UNLOCK(vp); 6731 return (res); 6732 } 6733 6734 int 6735 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6736 { 6737 int error; 6738 6739 if (dp->d_reclen > ap->a_uio->uio_resid) 6740 return (ENAMETOOLONG); 6741 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6742 if (error) { 6743 if (ap->a_ncookies != NULL) { 6744 if (ap->a_cookies != NULL) 6745 free(ap->a_cookies, M_TEMP); 6746 ap->a_cookies = NULL; 6747 *ap->a_ncookies = 0; 6748 } 6749 return (error); 6750 } 6751 if (ap->a_ncookies == NULL) 6752 return (0); 6753 6754 KASSERT(ap->a_cookies, 6755 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6756 6757 *ap->a_cookies = realloc(*ap->a_cookies, 6758 (*ap->a_ncookies + 1) * sizeof(uint64_t), M_TEMP, M_WAITOK | M_ZERO); 6759 (*ap->a_cookies)[*ap->a_ncookies] = off; 6760 *ap->a_ncookies += 1; 6761 return (0); 6762 } 6763 6764 /* 6765 * The purpose of this routine is to remove granularity from accmode_t, 6766 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6767 * VADMIN and VAPPEND. 6768 * 6769 * If it returns 0, the caller is supposed to continue with the usual 6770 * access checks using 'accmode' as modified by this routine. If it 6771 * returns nonzero value, the caller is supposed to return that value 6772 * as errno. 6773 * 6774 * Note that after this routine runs, accmode may be zero. 6775 */ 6776 int 6777 vfs_unixify_accmode(accmode_t *accmode) 6778 { 6779 /* 6780 * There is no way to specify explicit "deny" rule using 6781 * file mode or POSIX.1e ACLs. 6782 */ 6783 if (*accmode & VEXPLICIT_DENY) { 6784 *accmode = 0; 6785 return (0); 6786 } 6787 6788 /* 6789 * None of these can be translated into usual access bits. 6790 * Also, the common case for NFSv4 ACLs is to not contain 6791 * either of these bits. Caller should check for VWRITE 6792 * on the containing directory instead. 6793 */ 6794 if (*accmode & (VDELETE_CHILD | VDELETE)) 6795 return (EPERM); 6796 6797 if (*accmode & VADMIN_PERMS) { 6798 *accmode &= ~VADMIN_PERMS; 6799 *accmode |= VADMIN; 6800 } 6801 6802 /* 6803 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6804 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6805 */ 6806 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6807 6808 return (0); 6809 } 6810 6811 /* 6812 * Clear out a doomed vnode (if any) and replace it with a new one as long 6813 * as the fs is not being unmounted. Return the root vnode to the caller. 6814 */ 6815 static int __noinline 6816 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6817 { 6818 struct vnode *vp; 6819 int error; 6820 6821 restart: 6822 if (mp->mnt_rootvnode != NULL) { 6823 MNT_ILOCK(mp); 6824 vp = mp->mnt_rootvnode; 6825 if (vp != NULL) { 6826 if (!VN_IS_DOOMED(vp)) { 6827 vrefact(vp); 6828 MNT_IUNLOCK(mp); 6829 error = vn_lock(vp, flags); 6830 if (error == 0) { 6831 *vpp = vp; 6832 return (0); 6833 } 6834 vrele(vp); 6835 goto restart; 6836 } 6837 /* 6838 * Clear the old one. 6839 */ 6840 mp->mnt_rootvnode = NULL; 6841 } 6842 MNT_IUNLOCK(mp); 6843 if (vp != NULL) { 6844 vfs_op_barrier_wait(mp); 6845 vrele(vp); 6846 } 6847 } 6848 error = VFS_CACHEDROOT(mp, flags, vpp); 6849 if (error != 0) 6850 return (error); 6851 if (mp->mnt_vfs_ops == 0) { 6852 MNT_ILOCK(mp); 6853 if (mp->mnt_vfs_ops != 0) { 6854 MNT_IUNLOCK(mp); 6855 return (0); 6856 } 6857 if (mp->mnt_rootvnode == NULL) { 6858 vrefact(*vpp); 6859 mp->mnt_rootvnode = *vpp; 6860 } else { 6861 if (mp->mnt_rootvnode != *vpp) { 6862 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6863 panic("%s: mismatch between vnode returned " 6864 " by VFS_CACHEDROOT and the one cached " 6865 " (%p != %p)", 6866 __func__, *vpp, mp->mnt_rootvnode); 6867 } 6868 } 6869 } 6870 MNT_IUNLOCK(mp); 6871 } 6872 return (0); 6873 } 6874 6875 int 6876 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6877 { 6878 struct mount_pcpu *mpcpu; 6879 struct vnode *vp; 6880 int error; 6881 6882 if (!vfs_op_thread_enter(mp, mpcpu)) 6883 return (vfs_cache_root_fallback(mp, flags, vpp)); 6884 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6885 if (vp == NULL || VN_IS_DOOMED(vp)) { 6886 vfs_op_thread_exit(mp, mpcpu); 6887 return (vfs_cache_root_fallback(mp, flags, vpp)); 6888 } 6889 vrefact(vp); 6890 vfs_op_thread_exit(mp, mpcpu); 6891 error = vn_lock(vp, flags); 6892 if (error != 0) { 6893 vrele(vp); 6894 return (vfs_cache_root_fallback(mp, flags, vpp)); 6895 } 6896 *vpp = vp; 6897 return (0); 6898 } 6899 6900 struct vnode * 6901 vfs_cache_root_clear(struct mount *mp) 6902 { 6903 struct vnode *vp; 6904 6905 /* 6906 * ops > 0 guarantees there is nobody who can see this vnode 6907 */ 6908 MPASS(mp->mnt_vfs_ops > 0); 6909 vp = mp->mnt_rootvnode; 6910 if (vp != NULL) 6911 vn_seqc_write_begin(vp); 6912 mp->mnt_rootvnode = NULL; 6913 return (vp); 6914 } 6915 6916 void 6917 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6918 { 6919 6920 MPASS(mp->mnt_vfs_ops > 0); 6921 vrefact(vp); 6922 mp->mnt_rootvnode = vp; 6923 } 6924 6925 /* 6926 * These are helper functions for filesystems to traverse all 6927 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6928 * 6929 * This interface replaces MNT_VNODE_FOREACH. 6930 */ 6931 6932 struct vnode * 6933 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6934 { 6935 struct vnode *vp; 6936 6937 maybe_yield(); 6938 MNT_ILOCK(mp); 6939 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6940 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6941 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6942 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6943 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6944 continue; 6945 VI_LOCK(vp); 6946 if (VN_IS_DOOMED(vp)) { 6947 VI_UNLOCK(vp); 6948 continue; 6949 } 6950 break; 6951 } 6952 if (vp == NULL) { 6953 __mnt_vnode_markerfree_all(mvp, mp); 6954 /* MNT_IUNLOCK(mp); -- done in above function */ 6955 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6956 return (NULL); 6957 } 6958 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6959 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6960 MNT_IUNLOCK(mp); 6961 return (vp); 6962 } 6963 6964 struct vnode * 6965 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6966 { 6967 struct vnode *vp; 6968 6969 *mvp = vn_alloc_marker(mp); 6970 MNT_ILOCK(mp); 6971 MNT_REF(mp); 6972 6973 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6974 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6975 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6976 continue; 6977 VI_LOCK(vp); 6978 if (VN_IS_DOOMED(vp)) { 6979 VI_UNLOCK(vp); 6980 continue; 6981 } 6982 break; 6983 } 6984 if (vp == NULL) { 6985 MNT_REL(mp); 6986 MNT_IUNLOCK(mp); 6987 vn_free_marker(*mvp); 6988 *mvp = NULL; 6989 return (NULL); 6990 } 6991 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6992 MNT_IUNLOCK(mp); 6993 return (vp); 6994 } 6995 6996 void 6997 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6998 { 6999 7000 if (*mvp == NULL) { 7001 MNT_IUNLOCK(mp); 7002 return; 7003 } 7004 7005 mtx_assert(MNT_MTX(mp), MA_OWNED); 7006 7007 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 7008 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 7009 MNT_REL(mp); 7010 MNT_IUNLOCK(mp); 7011 vn_free_marker(*mvp); 7012 *mvp = NULL; 7013 } 7014 7015 /* 7016 * These are helper functions for filesystems to traverse their 7017 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 7018 */ 7019 static void 7020 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 7021 { 7022 7023 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 7024 7025 MNT_ILOCK(mp); 7026 MNT_REL(mp); 7027 MNT_IUNLOCK(mp); 7028 vn_free_marker(*mvp); 7029 *mvp = NULL; 7030 } 7031 7032 /* 7033 * Relock the mp mount vnode list lock with the vp vnode interlock in the 7034 * conventional lock order during mnt_vnode_next_lazy iteration. 7035 * 7036 * On entry, the mount vnode list lock is held and the vnode interlock is not. 7037 * The list lock is dropped and reacquired. On success, both locks are held. 7038 * On failure, the mount vnode list lock is held but the vnode interlock is 7039 * not, and the procedure may have yielded. 7040 */ 7041 static bool 7042 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 7043 struct vnode *vp) 7044 { 7045 7046 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 7047 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 7048 ("%s: bad marker", __func__)); 7049 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 7050 ("%s: inappropriate vnode", __func__)); 7051 ASSERT_VI_UNLOCKED(vp, __func__); 7052 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 7053 7054 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 7055 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 7056 7057 /* 7058 * Note we may be racing against vdrop which transitioned the hold 7059 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 7060 * if we are the only user after we get the interlock we will just 7061 * vdrop. 7062 */ 7063 vhold(vp); 7064 mtx_unlock(&mp->mnt_listmtx); 7065 VI_LOCK(vp); 7066 if (VN_IS_DOOMED(vp)) { 7067 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 7068 goto out_lost; 7069 } 7070 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 7071 /* 7072 * There is nothing to do if we are the last user. 7073 */ 7074 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 7075 goto out_lost; 7076 mtx_lock(&mp->mnt_listmtx); 7077 return (true); 7078 out_lost: 7079 vdropl(vp); 7080 maybe_yield(); 7081 mtx_lock(&mp->mnt_listmtx); 7082 return (false); 7083 } 7084 7085 static struct vnode * 7086 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 7087 void *cbarg) 7088 { 7089 struct vnode *vp; 7090 7091 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 7092 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 7093 restart: 7094 vp = TAILQ_NEXT(*mvp, v_lazylist); 7095 while (vp != NULL) { 7096 if (vp->v_type == VMARKER) { 7097 vp = TAILQ_NEXT(vp, v_lazylist); 7098 continue; 7099 } 7100 /* 7101 * See if we want to process the vnode. Note we may encounter a 7102 * long string of vnodes we don't care about and hog the list 7103 * as a result. Check for it and requeue the marker. 7104 */ 7105 VNPASS(!VN_IS_DOOMED(vp), vp); 7106 if (!cb(vp, cbarg)) { 7107 if (!should_yield()) { 7108 vp = TAILQ_NEXT(vp, v_lazylist); 7109 continue; 7110 } 7111 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 7112 v_lazylist); 7113 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 7114 v_lazylist); 7115 mtx_unlock(&mp->mnt_listmtx); 7116 kern_yield(PRI_USER); 7117 mtx_lock(&mp->mnt_listmtx); 7118 goto restart; 7119 } 7120 /* 7121 * Try-lock because this is the wrong lock order. 7122 */ 7123 if (!VI_TRYLOCK(vp) && 7124 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 7125 goto restart; 7126 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 7127 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 7128 ("alien vnode on the lazy list %p %p", vp, mp)); 7129 VNPASS(vp->v_mount == mp, vp); 7130 VNPASS(!VN_IS_DOOMED(vp), vp); 7131 break; 7132 } 7133 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 7134 7135 /* Check if we are done */ 7136 if (vp == NULL) { 7137 mtx_unlock(&mp->mnt_listmtx); 7138 mnt_vnode_markerfree_lazy(mvp, mp); 7139 return (NULL); 7140 } 7141 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 7142 mtx_unlock(&mp->mnt_listmtx); 7143 ASSERT_VI_LOCKED(vp, "lazy iter"); 7144 return (vp); 7145 } 7146 7147 struct vnode * 7148 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 7149 void *cbarg) 7150 { 7151 7152 maybe_yield(); 7153 mtx_lock(&mp->mnt_listmtx); 7154 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 7155 } 7156 7157 struct vnode * 7158 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 7159 void *cbarg) 7160 { 7161 struct vnode *vp; 7162 7163 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 7164 return (NULL); 7165 7166 *mvp = vn_alloc_marker(mp); 7167 MNT_ILOCK(mp); 7168 MNT_REF(mp); 7169 MNT_IUNLOCK(mp); 7170 7171 mtx_lock(&mp->mnt_listmtx); 7172 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 7173 if (vp == NULL) { 7174 mtx_unlock(&mp->mnt_listmtx); 7175 mnt_vnode_markerfree_lazy(mvp, mp); 7176 return (NULL); 7177 } 7178 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 7179 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 7180 } 7181 7182 void 7183 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 7184 { 7185 7186 if (*mvp == NULL) 7187 return; 7188 7189 mtx_lock(&mp->mnt_listmtx); 7190 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 7191 mtx_unlock(&mp->mnt_listmtx); 7192 mnt_vnode_markerfree_lazy(mvp, mp); 7193 } 7194 7195 int 7196 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 7197 { 7198 7199 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 7200 cnp->cn_flags &= ~NOEXECCHECK; 7201 return (0); 7202 } 7203 7204 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread)); 7205 } 7206 7207 /* 7208 * Do not use this variant unless you have means other than the hold count 7209 * to prevent the vnode from getting freed. 7210 */ 7211 void 7212 vn_seqc_write_begin_locked(struct vnode *vp) 7213 { 7214 7215 ASSERT_VI_LOCKED(vp, __func__); 7216 VNPASS(vp->v_holdcnt > 0, vp); 7217 VNPASS(vp->v_seqc_users >= 0, vp); 7218 vp->v_seqc_users++; 7219 if (vp->v_seqc_users == 1) 7220 seqc_sleepable_write_begin(&vp->v_seqc); 7221 } 7222 7223 void 7224 vn_seqc_write_begin(struct vnode *vp) 7225 { 7226 7227 VI_LOCK(vp); 7228 vn_seqc_write_begin_locked(vp); 7229 VI_UNLOCK(vp); 7230 } 7231 7232 void 7233 vn_seqc_write_end_locked(struct vnode *vp) 7234 { 7235 7236 ASSERT_VI_LOCKED(vp, __func__); 7237 VNPASS(vp->v_seqc_users > 0, vp); 7238 vp->v_seqc_users--; 7239 if (vp->v_seqc_users == 0) 7240 seqc_sleepable_write_end(&vp->v_seqc); 7241 } 7242 7243 void 7244 vn_seqc_write_end(struct vnode *vp) 7245 { 7246 7247 VI_LOCK(vp); 7248 vn_seqc_write_end_locked(vp); 7249 VI_UNLOCK(vp); 7250 } 7251 7252 /* 7253 * Special case handling for allocating and freeing vnodes. 7254 * 7255 * The counter remains unchanged on free so that a doomed vnode will 7256 * keep testing as in modify as long as it is accessible with SMR. 7257 */ 7258 static void 7259 vn_seqc_init(struct vnode *vp) 7260 { 7261 7262 vp->v_seqc = 0; 7263 vp->v_seqc_users = 0; 7264 } 7265 7266 static void 7267 vn_seqc_write_end_free(struct vnode *vp) 7268 { 7269 7270 VNPASS(seqc_in_modify(vp->v_seqc), vp); 7271 VNPASS(vp->v_seqc_users == 1, vp); 7272 } 7273 7274 void 7275 vn_irflag_set_locked(struct vnode *vp, short toset) 7276 { 7277 short flags; 7278 7279 ASSERT_VI_LOCKED(vp, __func__); 7280 flags = vn_irflag_read(vp); 7281 VNASSERT((flags & toset) == 0, vp, 7282 ("%s: some of the passed flags already set (have %d, passed %d)\n", 7283 __func__, flags, toset)); 7284 atomic_store_short(&vp->v_irflag, flags | toset); 7285 } 7286 7287 void 7288 vn_irflag_set(struct vnode *vp, short toset) 7289 { 7290 7291 VI_LOCK(vp); 7292 vn_irflag_set_locked(vp, toset); 7293 VI_UNLOCK(vp); 7294 } 7295 7296 void 7297 vn_irflag_set_cond_locked(struct vnode *vp, short toset) 7298 { 7299 short flags; 7300 7301 ASSERT_VI_LOCKED(vp, __func__); 7302 flags = vn_irflag_read(vp); 7303 atomic_store_short(&vp->v_irflag, flags | toset); 7304 } 7305 7306 void 7307 vn_irflag_set_cond(struct vnode *vp, short toset) 7308 { 7309 7310 VI_LOCK(vp); 7311 vn_irflag_set_cond_locked(vp, toset); 7312 VI_UNLOCK(vp); 7313 } 7314 7315 void 7316 vn_irflag_unset_locked(struct vnode *vp, short tounset) 7317 { 7318 short flags; 7319 7320 ASSERT_VI_LOCKED(vp, __func__); 7321 flags = vn_irflag_read(vp); 7322 VNASSERT((flags & tounset) == tounset, vp, 7323 ("%s: some of the passed flags not set (have %d, passed %d)\n", 7324 __func__, flags, tounset)); 7325 atomic_store_short(&vp->v_irflag, flags & ~tounset); 7326 } 7327 7328 void 7329 vn_irflag_unset(struct vnode *vp, short tounset) 7330 { 7331 7332 VI_LOCK(vp); 7333 vn_irflag_unset_locked(vp, tounset); 7334 VI_UNLOCK(vp); 7335 } 7336 7337 int 7338 vn_getsize_locked(struct vnode *vp, off_t *size, struct ucred *cred) 7339 { 7340 struct vattr vattr; 7341 int error; 7342 7343 ASSERT_VOP_LOCKED(vp, __func__); 7344 error = VOP_GETATTR(vp, &vattr, cred); 7345 if (__predict_true(error == 0)) { 7346 if (vattr.va_size <= OFF_MAX) 7347 *size = vattr.va_size; 7348 else 7349 error = EFBIG; 7350 } 7351 return (error); 7352 } 7353 7354 int 7355 vn_getsize(struct vnode *vp, off_t *size, struct ucred *cred) 7356 { 7357 int error; 7358 7359 VOP_LOCK(vp, LK_SHARED); 7360 error = vn_getsize_locked(vp, size, cred); 7361 VOP_UNLOCK(vp); 7362 return (error); 7363 } 7364 7365 #ifdef INVARIANTS 7366 void 7367 vn_set_state_validate(struct vnode *vp, __enum_uint8(vstate) state) 7368 { 7369 7370 switch (vp->v_state) { 7371 case VSTATE_UNINITIALIZED: 7372 switch (state) { 7373 case VSTATE_CONSTRUCTED: 7374 case VSTATE_DESTROYING: 7375 return; 7376 default: 7377 break; 7378 } 7379 break; 7380 case VSTATE_CONSTRUCTED: 7381 ASSERT_VOP_ELOCKED(vp, __func__); 7382 switch (state) { 7383 case VSTATE_DESTROYING: 7384 return; 7385 default: 7386 break; 7387 } 7388 break; 7389 case VSTATE_DESTROYING: 7390 ASSERT_VOP_ELOCKED(vp, __func__); 7391 switch (state) { 7392 case VSTATE_DEAD: 7393 return; 7394 default: 7395 break; 7396 } 7397 break; 7398 case VSTATE_DEAD: 7399 switch (state) { 7400 case VSTATE_UNINITIALIZED: 7401 return; 7402 default: 7403 break; 7404 } 7405 break; 7406 } 7407 7408 vn_printf(vp, "invalid state transition %d -> %d\n", vp->v_state, state); 7409 panic("invalid state transition %d -> %d\n", vp->v_state, state); 7410 } 7411 #endif 7412