1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 /* 38 * External virtual filesystem routines 39 */ 40 41 #include <sys/cdefs.h> 42 #include "opt_ddb.h" 43 #include "opt_watchdog.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/asan.h> 48 #include <sys/bio.h> 49 #include <sys/buf.h> 50 #include <sys/capsicum.h> 51 #include <sys/condvar.h> 52 #include <sys/conf.h> 53 #include <sys/counter.h> 54 #include <sys/dirent.h> 55 #include <sys/event.h> 56 #include <sys/eventhandler.h> 57 #include <sys/extattr.h> 58 #include <sys/file.h> 59 #include <sys/fcntl.h> 60 #include <sys/jail.h> 61 #include <sys/kdb.h> 62 #include <sys/kernel.h> 63 #include <sys/kthread.h> 64 #include <sys/ktr.h> 65 #include <sys/limits.h> 66 #include <sys/lockf.h> 67 #include <sys/malloc.h> 68 #include <sys/mount.h> 69 #include <sys/namei.h> 70 #include <sys/pctrie.h> 71 #include <sys/priv.h> 72 #include <sys/reboot.h> 73 #include <sys/refcount.h> 74 #include <sys/rwlock.h> 75 #include <sys/sched.h> 76 #include <sys/sleepqueue.h> 77 #include <sys/smr.h> 78 #include <sys/smp.h> 79 #include <sys/stat.h> 80 #include <sys/sysctl.h> 81 #include <sys/syslog.h> 82 #include <sys/vmmeter.h> 83 #include <sys/vnode.h> 84 #include <sys/watchdog.h> 85 86 #include <machine/stdarg.h> 87 88 #include <security/mac/mac_framework.h> 89 90 #include <vm/vm.h> 91 #include <vm/vm_object.h> 92 #include <vm/vm_extern.h> 93 #include <vm/pmap.h> 94 #include <vm/vm_map.h> 95 #include <vm/vm_page.h> 96 #include <vm/vm_kern.h> 97 #include <vm/vnode_pager.h> 98 #include <vm/uma.h> 99 100 #if defined(DEBUG_VFS_LOCKS) && (!defined(INVARIANTS) || !defined(WITNESS)) 101 #error DEBUG_VFS_LOCKS requires INVARIANTS and WITNESS 102 #endif 103 104 #ifdef DDB 105 #include <ddb/ddb.h> 106 #endif 107 108 static void delmntque(struct vnode *vp); 109 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 110 int slpflag, int slptimeo); 111 static void syncer_shutdown(void *arg, int howto); 112 static int vtryrecycle(struct vnode *vp, bool isvnlru); 113 static void v_init_counters(struct vnode *); 114 static void vn_seqc_init(struct vnode *); 115 static void vn_seqc_write_end_free(struct vnode *vp); 116 static void vgonel(struct vnode *); 117 static bool vhold_recycle_free(struct vnode *); 118 static void vdropl_recycle(struct vnode *vp); 119 static void vdrop_recycle(struct vnode *vp); 120 static void vfs_knllock(void *arg); 121 static void vfs_knlunlock(void *arg); 122 static void vfs_knl_assert_lock(void *arg, int what); 123 static void destroy_vpollinfo(struct vpollinfo *vi); 124 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 125 daddr_t startlbn, daddr_t endlbn); 126 static void vnlru_recalc(void); 127 128 static SYSCTL_NODE(_vfs, OID_AUTO, vnode, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 129 "vnode configuration and statistics"); 130 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, param, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 131 "vnode configuration"); 132 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 133 "vnode statistics"); 134 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, vnlru, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 135 "vnode recycling"); 136 137 /* 138 * Number of vnodes in existence. Increased whenever getnewvnode() 139 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 140 */ 141 static u_long __exclusive_cache_line numvnodes; 142 143 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 144 "Number of vnodes in existence (legacy)"); 145 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, count, CTLFLAG_RD, &numvnodes, 0, 146 "Number of vnodes in existence"); 147 148 static counter_u64_t vnodes_created; 149 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 150 "Number of vnodes created by getnewvnode (legacy)"); 151 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, created, CTLFLAG_RD, &vnodes_created, 152 "Number of vnodes created by getnewvnode"); 153 154 /* 155 * Conversion tables for conversion from vnode types to inode formats 156 * and back. 157 */ 158 __enum_uint8(vtype) iftovt_tab[16] = { 159 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 160 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 161 }; 162 int vttoif_tab[10] = { 163 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 164 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 165 }; 166 167 /* 168 * List of allocates vnodes in the system. 169 */ 170 static TAILQ_HEAD(freelst, vnode) vnode_list; 171 static struct vnode *vnode_list_free_marker; 172 static struct vnode *vnode_list_reclaim_marker; 173 174 /* 175 * "Free" vnode target. Free vnodes are rarely completely free, but are 176 * just ones that are cheap to recycle. Usually they are for files which 177 * have been stat'd but not read; these usually have inode and namecache 178 * data attached to them. This target is the preferred minimum size of a 179 * sub-cache consisting mostly of such files. The system balances the size 180 * of this sub-cache with its complement to try to prevent either from 181 * thrashing while the other is relatively inactive. The targets express 182 * a preference for the best balance. 183 * 184 * "Above" this target there are 2 further targets (watermarks) related 185 * to recyling of free vnodes. In the best-operating case, the cache is 186 * exactly full, the free list has size between vlowat and vhiwat above the 187 * free target, and recycling from it and normal use maintains this state. 188 * Sometimes the free list is below vlowat or even empty, but this state 189 * is even better for immediate use provided the cache is not full. 190 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 191 * ones) to reach one of these states. The watermarks are currently hard- 192 * coded as 4% and 9% of the available space higher. These and the default 193 * of 25% for wantfreevnodes are too large if the memory size is large. 194 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 195 * whenever vnlru_proc() becomes active. 196 */ 197 static long wantfreevnodes; 198 static long __exclusive_cache_line freevnodes; 199 static long freevnodes_old; 200 201 static u_long recycles_count; 202 SYSCTL_ULONG(_vfs, OID_AUTO, recycles, CTLFLAG_RD | CTLFLAG_STATS, &recycles_count, 0, 203 "Number of vnodes recycled to meet vnode cache targets (legacy)"); 204 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, recycles, CTLFLAG_RD | CTLFLAG_STATS, 205 &recycles_count, 0, 206 "Number of vnodes recycled to meet vnode cache targets"); 207 208 static u_long recycles_free_count; 209 SYSCTL_ULONG(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD | CTLFLAG_STATS, 210 &recycles_free_count, 0, 211 "Number of free vnodes recycled to meet vnode cache targets (legacy)"); 212 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, recycles_free, CTLFLAG_RD | CTLFLAG_STATS, 213 &recycles_free_count, 0, 214 "Number of free vnodes recycled to meet vnode cache targets"); 215 216 static counter_u64_t direct_recycles_free_count; 217 SYSCTL_COUNTER_U64(_vfs_vnode_vnlru, OID_AUTO, direct_recycles_free, CTLFLAG_RD, 218 &direct_recycles_free_count, 219 "Number of free vnodes recycled by vn_alloc callers to meet vnode cache targets"); 220 221 static counter_u64_t vnode_skipped_requeues; 222 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, skipped_requeues, CTLFLAG_RD, &vnode_skipped_requeues, 223 "Number of times LRU requeue was skipped due to lock contention"); 224 225 static __read_mostly bool vnode_can_skip_requeue; 226 SYSCTL_BOOL(_vfs_vnode_param, OID_AUTO, can_skip_requeue, CTLFLAG_RW, 227 &vnode_can_skip_requeue, 0, "Is LRU requeue skippable"); 228 229 static u_long deferred_inact; 230 SYSCTL_ULONG(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, 231 &deferred_inact, 0, "Number of times inactive processing was deferred"); 232 233 /* To keep more than one thread at a time from running vfs_getnewfsid */ 234 static struct mtx mntid_mtx; 235 236 /* 237 * Lock for any access to the following: 238 * vnode_list 239 * numvnodes 240 * freevnodes 241 */ 242 static struct mtx __exclusive_cache_line vnode_list_mtx; 243 244 /* Publicly exported FS */ 245 struct nfs_public nfs_pub; 246 247 static uma_zone_t buf_trie_zone; 248 static smr_t buf_trie_smr; 249 250 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 251 static uma_zone_t vnode_zone; 252 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll"); 253 254 __read_frequently smr_t vfs_smr; 255 256 /* 257 * The workitem queue. 258 * 259 * It is useful to delay writes of file data and filesystem metadata 260 * for tens of seconds so that quickly created and deleted files need 261 * not waste disk bandwidth being created and removed. To realize this, 262 * we append vnodes to a "workitem" queue. When running with a soft 263 * updates implementation, most pending metadata dependencies should 264 * not wait for more than a few seconds. Thus, mounted on block devices 265 * are delayed only about a half the time that file data is delayed. 266 * Similarly, directory updates are more critical, so are only delayed 267 * about a third the time that file data is delayed. Thus, there are 268 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 269 * one each second (driven off the filesystem syncer process). The 270 * syncer_delayno variable indicates the next queue that is to be processed. 271 * Items that need to be processed soon are placed in this queue: 272 * 273 * syncer_workitem_pending[syncer_delayno] 274 * 275 * A delay of fifteen seconds is done by placing the request fifteen 276 * entries later in the queue: 277 * 278 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 279 * 280 */ 281 static int syncer_delayno; 282 static long syncer_mask; 283 LIST_HEAD(synclist, bufobj); 284 static struct synclist *syncer_workitem_pending; 285 /* 286 * The sync_mtx protects: 287 * bo->bo_synclist 288 * sync_vnode_count 289 * syncer_delayno 290 * syncer_state 291 * syncer_workitem_pending 292 * syncer_worklist_len 293 * rushjob 294 */ 295 static struct mtx sync_mtx; 296 static struct cv sync_wakeup; 297 298 #define SYNCER_MAXDELAY 32 299 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 300 static int syncdelay = 30; /* max time to delay syncing data */ 301 static int filedelay = 30; /* time to delay syncing files */ 302 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 303 "Time to delay syncing files (in seconds)"); 304 static int dirdelay = 29; /* time to delay syncing directories */ 305 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 306 "Time to delay syncing directories (in seconds)"); 307 static int metadelay = 28; /* time to delay syncing metadata */ 308 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 309 "Time to delay syncing metadata (in seconds)"); 310 static int rushjob; /* number of slots to run ASAP */ 311 static int stat_rush_requests; /* number of times I/O speeded up */ 312 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 313 "Number of times I/O speeded up (rush requests)"); 314 315 #define VDBATCH_SIZE 8 316 struct vdbatch { 317 u_int index; 318 struct mtx lock; 319 struct vnode *tab[VDBATCH_SIZE]; 320 }; 321 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 322 323 static void vdbatch_dequeue(struct vnode *vp); 324 325 /* 326 * When shutting down the syncer, run it at four times normal speed. 327 */ 328 #define SYNCER_SHUTDOWN_SPEEDUP 4 329 static int sync_vnode_count; 330 static int syncer_worklist_len; 331 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 332 syncer_state; 333 334 /* Target for maximum number of vnodes. */ 335 u_long desiredvnodes; 336 static u_long gapvnodes; /* gap between wanted and desired */ 337 static u_long vhiwat; /* enough extras after expansion */ 338 static u_long vlowat; /* minimal extras before expansion */ 339 static bool vstir; /* nonzero to stir non-free vnodes */ 340 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 341 342 static u_long vnlru_read_freevnodes(void); 343 344 /* 345 * Note that no attempt is made to sanitize these parameters. 346 */ 347 static int 348 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 349 { 350 u_long val; 351 int error; 352 353 val = desiredvnodes; 354 error = sysctl_handle_long(oidp, &val, 0, req); 355 if (error != 0 || req->newptr == NULL) 356 return (error); 357 358 if (val == desiredvnodes) 359 return (0); 360 mtx_lock(&vnode_list_mtx); 361 desiredvnodes = val; 362 wantfreevnodes = desiredvnodes / 4; 363 vnlru_recalc(); 364 mtx_unlock(&vnode_list_mtx); 365 /* 366 * XXX There is no protection against multiple threads changing 367 * desiredvnodes at the same time. Locking above only helps vnlru and 368 * getnewvnode. 369 */ 370 vfs_hash_changesize(desiredvnodes); 371 cache_changesize(desiredvnodes); 372 return (0); 373 } 374 375 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 376 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 377 "LU", "Target for maximum number of vnodes (legacy)"); 378 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, limit, 379 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 380 "LU", "Target for maximum number of vnodes"); 381 382 static int 383 sysctl_freevnodes(SYSCTL_HANDLER_ARGS) 384 { 385 u_long rfreevnodes; 386 387 rfreevnodes = vnlru_read_freevnodes(); 388 return (sysctl_handle_long(oidp, &rfreevnodes, 0, req)); 389 } 390 391 SYSCTL_PROC(_vfs, OID_AUTO, freevnodes, 392 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes, 393 "LU", "Number of \"free\" vnodes (legacy)"); 394 SYSCTL_PROC(_vfs_vnode_stats, OID_AUTO, free, 395 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes, 396 "LU", "Number of \"free\" vnodes"); 397 398 static int 399 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 400 { 401 u_long val; 402 int error; 403 404 val = wantfreevnodes; 405 error = sysctl_handle_long(oidp, &val, 0, req); 406 if (error != 0 || req->newptr == NULL) 407 return (error); 408 409 if (val == wantfreevnodes) 410 return (0); 411 mtx_lock(&vnode_list_mtx); 412 wantfreevnodes = val; 413 vnlru_recalc(); 414 mtx_unlock(&vnode_list_mtx); 415 return (0); 416 } 417 418 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 419 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 420 "LU", "Target for minimum number of \"free\" vnodes (legacy)"); 421 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, wantfree, 422 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 423 "LU", "Target for minimum number of \"free\" vnodes"); 424 425 static int vnlru_nowhere; 426 SYSCTL_INT(_vfs_vnode_vnlru, OID_AUTO, failed_runs, CTLFLAG_RD | CTLFLAG_STATS, 427 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 428 429 static int 430 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 431 { 432 struct vnode *vp; 433 struct nameidata nd; 434 char *buf; 435 unsigned long ndflags; 436 int error; 437 438 if (req->newptr == NULL) 439 return (EINVAL); 440 if (req->newlen >= PATH_MAX) 441 return (E2BIG); 442 443 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 444 error = SYSCTL_IN(req, buf, req->newlen); 445 if (error != 0) 446 goto out; 447 448 buf[req->newlen] = '\0'; 449 450 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1; 451 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf); 452 if ((error = namei(&nd)) != 0) 453 goto out; 454 vp = nd.ni_vp; 455 456 if (VN_IS_DOOMED(vp)) { 457 /* 458 * This vnode is being recycled. Return != 0 to let the caller 459 * know that the sysctl had no effect. Return EAGAIN because a 460 * subsequent call will likely succeed (since namei will create 461 * a new vnode if necessary) 462 */ 463 error = EAGAIN; 464 goto putvnode; 465 } 466 467 vgone(vp); 468 putvnode: 469 vput(vp); 470 NDFREE_PNBUF(&nd); 471 out: 472 free(buf, M_TEMP); 473 return (error); 474 } 475 476 static int 477 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 478 { 479 struct thread *td = curthread; 480 struct vnode *vp; 481 struct file *fp; 482 int error; 483 int fd; 484 485 if (req->newptr == NULL) 486 return (EBADF); 487 488 error = sysctl_handle_int(oidp, &fd, 0, req); 489 if (error != 0) 490 return (error); 491 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 492 if (error != 0) 493 return (error); 494 vp = fp->f_vnode; 495 496 error = vn_lock(vp, LK_EXCLUSIVE); 497 if (error != 0) 498 goto drop; 499 500 vgone(vp); 501 VOP_UNLOCK(vp); 502 drop: 503 fdrop(fp, td); 504 return (error); 505 } 506 507 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 508 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 509 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 510 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 511 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 512 sysctl_ftry_reclaim_vnode, "I", 513 "Try to reclaim a vnode by its file descriptor"); 514 515 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 516 #define vnsz2log 8 517 #ifndef DEBUG_LOCKS 518 _Static_assert(sizeof(struct vnode) >= 1UL << vnsz2log && 519 sizeof(struct vnode) < 1UL << (vnsz2log + 1), 520 "vnsz2log needs to be updated"); 521 #endif 522 523 /* 524 * Support for the bufobj clean & dirty pctrie. 525 */ 526 static void * 527 buf_trie_alloc(struct pctrie *ptree) 528 { 529 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 530 } 531 532 static void 533 buf_trie_free(struct pctrie *ptree, void *node) 534 { 535 uma_zfree_smr(buf_trie_zone, node); 536 } 537 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 538 buf_trie_smr); 539 540 /* 541 * Initialize the vnode management data structures. 542 * 543 * Reevaluate the following cap on the number of vnodes after the physical 544 * memory size exceeds 512GB. In the limit, as the physical memory size 545 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 546 */ 547 #ifndef MAXVNODES_MAX 548 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 549 #endif 550 551 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 552 553 static struct vnode * 554 vn_alloc_marker(struct mount *mp) 555 { 556 struct vnode *vp; 557 558 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 559 vp->v_type = VMARKER; 560 vp->v_mount = mp; 561 562 return (vp); 563 } 564 565 static void 566 vn_free_marker(struct vnode *vp) 567 { 568 569 MPASS(vp->v_type == VMARKER); 570 free(vp, M_VNODE_MARKER); 571 } 572 573 #ifdef KASAN 574 static int 575 vnode_ctor(void *mem, int size, void *arg __unused, int flags __unused) 576 { 577 kasan_mark(mem, size, roundup2(size, UMA_ALIGN_PTR + 1), 0); 578 return (0); 579 } 580 581 static void 582 vnode_dtor(void *mem, int size, void *arg __unused) 583 { 584 size_t end1, end2, off1, off2; 585 586 _Static_assert(offsetof(struct vnode, v_vnodelist) < 587 offsetof(struct vnode, v_dbatchcpu), 588 "KASAN marks require updating"); 589 590 off1 = offsetof(struct vnode, v_vnodelist); 591 off2 = offsetof(struct vnode, v_dbatchcpu); 592 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist); 593 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu); 594 595 /* 596 * Access to the v_vnodelist and v_dbatchcpu fields are permitted even 597 * after the vnode has been freed. Try to get some KASAN coverage by 598 * marking everything except those two fields as invalid. Because 599 * KASAN's tracking is not byte-granular, any preceding fields sharing 600 * the same 8-byte aligned word must also be marked valid. 601 */ 602 603 /* Handle the area from the start until v_vnodelist... */ 604 off1 = rounddown2(off1, KASAN_SHADOW_SCALE); 605 kasan_mark(mem, off1, off1, KASAN_UMA_FREED); 606 607 /* ... then the area between v_vnodelist and v_dbatchcpu ... */ 608 off1 = roundup2(end1, KASAN_SHADOW_SCALE); 609 off2 = rounddown2(off2, KASAN_SHADOW_SCALE); 610 if (off2 > off1) 611 kasan_mark((void *)((char *)mem + off1), off2 - off1, 612 off2 - off1, KASAN_UMA_FREED); 613 614 /* ... and finally the area from v_dbatchcpu to the end. */ 615 off2 = roundup2(end2, KASAN_SHADOW_SCALE); 616 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2, 617 KASAN_UMA_FREED); 618 } 619 #endif /* KASAN */ 620 621 /* 622 * Initialize a vnode as it first enters the zone. 623 */ 624 static int 625 vnode_init(void *mem, int size, int flags) 626 { 627 struct vnode *vp; 628 629 vp = mem; 630 bzero(vp, size); 631 /* 632 * Setup locks. 633 */ 634 vp->v_vnlock = &vp->v_lock; 635 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 636 /* 637 * By default, don't allow shared locks unless filesystems opt-in. 638 */ 639 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 640 LK_NOSHARE | LK_IS_VNODE); 641 /* 642 * Initialize bufobj. 643 */ 644 bufobj_init(&vp->v_bufobj, vp); 645 /* 646 * Initialize namecache. 647 */ 648 cache_vnode_init(vp); 649 /* 650 * Initialize rangelocks. 651 */ 652 rangelock_init(&vp->v_rl); 653 654 vp->v_dbatchcpu = NOCPU; 655 656 vp->v_state = VSTATE_DEAD; 657 658 /* 659 * Check vhold_recycle_free for an explanation. 660 */ 661 vp->v_holdcnt = VHOLD_NO_SMR; 662 vp->v_type = VNON; 663 mtx_lock(&vnode_list_mtx); 664 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 665 mtx_unlock(&vnode_list_mtx); 666 return (0); 667 } 668 669 /* 670 * Free a vnode when it is cleared from the zone. 671 */ 672 static void 673 vnode_fini(void *mem, int size) 674 { 675 struct vnode *vp; 676 struct bufobj *bo; 677 678 vp = mem; 679 vdbatch_dequeue(vp); 680 mtx_lock(&vnode_list_mtx); 681 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 682 mtx_unlock(&vnode_list_mtx); 683 rangelock_destroy(&vp->v_rl); 684 lockdestroy(vp->v_vnlock); 685 mtx_destroy(&vp->v_interlock); 686 bo = &vp->v_bufobj; 687 rw_destroy(BO_LOCKPTR(bo)); 688 689 kasan_mark(mem, size, size, 0); 690 } 691 692 /* 693 * Provide the size of NFS nclnode and NFS fh for calculation of the 694 * vnode memory consumption. The size is specified directly to 695 * eliminate dependency on NFS-private header. 696 * 697 * Other filesystems may use bigger or smaller (like UFS and ZFS) 698 * private inode data, but the NFS-based estimation is ample enough. 699 * Still, we care about differences in the size between 64- and 32-bit 700 * platforms. 701 * 702 * Namecache structure size is heuristically 703 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 704 */ 705 #ifdef _LP64 706 #define NFS_NCLNODE_SZ (528 + 64) 707 #define NC_SZ 148 708 #else 709 #define NFS_NCLNODE_SZ (360 + 32) 710 #define NC_SZ 92 711 #endif 712 713 static void 714 vntblinit(void *dummy __unused) 715 { 716 struct vdbatch *vd; 717 uma_ctor ctor; 718 uma_dtor dtor; 719 int cpu, physvnodes, virtvnodes; 720 721 /* 722 * Desiredvnodes is a function of the physical memory size and the 723 * kernel's heap size. Generally speaking, it scales with the 724 * physical memory size. The ratio of desiredvnodes to the physical 725 * memory size is 1:16 until desiredvnodes exceeds 98,304. 726 * Thereafter, the 727 * marginal ratio of desiredvnodes to the physical memory size is 728 * 1:64. However, desiredvnodes is limited by the kernel's heap 729 * size. The memory required by desiredvnodes vnodes and vm objects 730 * must not exceed 1/10th of the kernel's heap size. 731 */ 732 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 733 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 734 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 735 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 736 desiredvnodes = min(physvnodes, virtvnodes); 737 if (desiredvnodes > MAXVNODES_MAX) { 738 if (bootverbose) 739 printf("Reducing kern.maxvnodes %lu -> %lu\n", 740 desiredvnodes, MAXVNODES_MAX); 741 desiredvnodes = MAXVNODES_MAX; 742 } 743 wantfreevnodes = desiredvnodes / 4; 744 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 745 TAILQ_INIT(&vnode_list); 746 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 747 /* 748 * The lock is taken to appease WITNESS. 749 */ 750 mtx_lock(&vnode_list_mtx); 751 vnlru_recalc(); 752 mtx_unlock(&vnode_list_mtx); 753 vnode_list_free_marker = vn_alloc_marker(NULL); 754 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 755 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 756 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 757 758 #ifdef KASAN 759 ctor = vnode_ctor; 760 dtor = vnode_dtor; 761 #else 762 ctor = NULL; 763 dtor = NULL; 764 #endif 765 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor, 766 vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_NOKASAN); 767 uma_zone_set_smr(vnode_zone, vfs_smr); 768 769 /* 770 * Preallocate enough nodes to support one-per buf so that 771 * we can not fail an insert. reassignbuf() callers can not 772 * tolerate the insertion failure. 773 */ 774 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 775 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 776 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 777 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 778 uma_prealloc(buf_trie_zone, nbuf); 779 780 vnodes_created = counter_u64_alloc(M_WAITOK); 781 direct_recycles_free_count = counter_u64_alloc(M_WAITOK); 782 vnode_skipped_requeues = counter_u64_alloc(M_WAITOK); 783 784 /* 785 * Initialize the filesystem syncer. 786 */ 787 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 788 &syncer_mask); 789 syncer_maxdelay = syncer_mask + 1; 790 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 791 cv_init(&sync_wakeup, "syncer"); 792 793 CPU_FOREACH(cpu) { 794 vd = DPCPU_ID_PTR((cpu), vd); 795 bzero(vd, sizeof(*vd)); 796 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 797 } 798 } 799 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 800 801 /* 802 * Mark a mount point as busy. Used to synchronize access and to delay 803 * unmounting. Eventually, mountlist_mtx is not released on failure. 804 * 805 * vfs_busy() is a custom lock, it can block the caller. 806 * vfs_busy() only sleeps if the unmount is active on the mount point. 807 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 808 * vnode belonging to mp. 809 * 810 * Lookup uses vfs_busy() to traverse mount points. 811 * root fs var fs 812 * / vnode lock A / vnode lock (/var) D 813 * /var vnode lock B /log vnode lock(/var/log) E 814 * vfs_busy lock C vfs_busy lock F 815 * 816 * Within each file system, the lock order is C->A->B and F->D->E. 817 * 818 * When traversing across mounts, the system follows that lock order: 819 * 820 * C->A->B 821 * | 822 * +->F->D->E 823 * 824 * The lookup() process for namei("/var") illustrates the process: 825 * 1. VOP_LOOKUP() obtains B while A is held 826 * 2. vfs_busy() obtains a shared lock on F while A and B are held 827 * 3. vput() releases lock on B 828 * 4. vput() releases lock on A 829 * 5. VFS_ROOT() obtains lock on D while shared lock on F is held 830 * 6. vfs_unbusy() releases shared lock on F 831 * 7. vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 832 * Attempt to lock A (instead of vp_crossmp) while D is held would 833 * violate the global order, causing deadlocks. 834 * 835 * dounmount() locks B while F is drained. Note that for stacked 836 * filesystems, D and B in the example above may be the same lock, 837 * which introdues potential lock order reversal deadlock between 838 * dounmount() and step 5 above. These filesystems may avoid the LOR 839 * by setting VV_CROSSLOCK on the covered vnode so that lock B will 840 * remain held until after step 5. 841 */ 842 int 843 vfs_busy(struct mount *mp, int flags) 844 { 845 struct mount_pcpu *mpcpu; 846 847 MPASS((flags & ~MBF_MASK) == 0); 848 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 849 850 if (vfs_op_thread_enter(mp, mpcpu)) { 851 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 852 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 853 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 854 vfs_mp_count_add_pcpu(mpcpu, ref, 1); 855 vfs_mp_count_add_pcpu(mpcpu, lockref, 1); 856 vfs_op_thread_exit(mp, mpcpu); 857 if (flags & MBF_MNTLSTLOCK) 858 mtx_unlock(&mountlist_mtx); 859 return (0); 860 } 861 862 MNT_ILOCK(mp); 863 vfs_assert_mount_counters(mp); 864 MNT_REF(mp); 865 /* 866 * If mount point is currently being unmounted, sleep until the 867 * mount point fate is decided. If thread doing the unmounting fails, 868 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 869 * that this mount point has survived the unmount attempt and vfs_busy 870 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 871 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 872 * about to be really destroyed. vfs_busy needs to release its 873 * reference on the mount point in this case and return with ENOENT, 874 * telling the caller the mount it tried to busy is no longer valid. 875 */ 876 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 877 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), 878 ("%s: non-empty upper mount list with pending unmount", 879 __func__)); 880 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 881 MNT_REL(mp); 882 MNT_IUNLOCK(mp); 883 CTR1(KTR_VFS, "%s: failed busying before sleeping", 884 __func__); 885 return (ENOENT); 886 } 887 if (flags & MBF_MNTLSTLOCK) 888 mtx_unlock(&mountlist_mtx); 889 mp->mnt_kern_flag |= MNTK_MWAIT; 890 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 891 if (flags & MBF_MNTLSTLOCK) 892 mtx_lock(&mountlist_mtx); 893 MNT_ILOCK(mp); 894 } 895 if (flags & MBF_MNTLSTLOCK) 896 mtx_unlock(&mountlist_mtx); 897 mp->mnt_lockref++; 898 MNT_IUNLOCK(mp); 899 return (0); 900 } 901 902 /* 903 * Free a busy filesystem. 904 */ 905 void 906 vfs_unbusy(struct mount *mp) 907 { 908 struct mount_pcpu *mpcpu; 909 int c; 910 911 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 912 913 if (vfs_op_thread_enter(mp, mpcpu)) { 914 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 915 vfs_mp_count_sub_pcpu(mpcpu, lockref, 1); 916 vfs_mp_count_sub_pcpu(mpcpu, ref, 1); 917 vfs_op_thread_exit(mp, mpcpu); 918 return; 919 } 920 921 MNT_ILOCK(mp); 922 vfs_assert_mount_counters(mp); 923 MNT_REL(mp); 924 c = --mp->mnt_lockref; 925 if (mp->mnt_vfs_ops == 0) { 926 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 927 MNT_IUNLOCK(mp); 928 return; 929 } 930 if (c < 0) 931 vfs_dump_mount_counters(mp); 932 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 933 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 934 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 935 mp->mnt_kern_flag &= ~MNTK_DRAINING; 936 wakeup(&mp->mnt_lockref); 937 } 938 MNT_IUNLOCK(mp); 939 } 940 941 /* 942 * Lookup a mount point by filesystem identifier. 943 */ 944 struct mount * 945 vfs_getvfs(fsid_t *fsid) 946 { 947 struct mount *mp; 948 949 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 950 mtx_lock(&mountlist_mtx); 951 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 952 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 953 vfs_ref(mp); 954 mtx_unlock(&mountlist_mtx); 955 return (mp); 956 } 957 } 958 mtx_unlock(&mountlist_mtx); 959 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 960 return ((struct mount *) 0); 961 } 962 963 /* 964 * Lookup a mount point by filesystem identifier, busying it before 965 * returning. 966 * 967 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 968 * cache for popular filesystem identifiers. The cache is lockess, using 969 * the fact that struct mount's are never freed. In worst case we may 970 * get pointer to unmounted or even different filesystem, so we have to 971 * check what we got, and go slow way if so. 972 */ 973 struct mount * 974 vfs_busyfs(fsid_t *fsid) 975 { 976 #define FSID_CACHE_SIZE 256 977 typedef struct mount * volatile vmp_t; 978 static vmp_t cache[FSID_CACHE_SIZE]; 979 struct mount *mp; 980 int error; 981 uint32_t hash; 982 983 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 984 hash = fsid->val[0] ^ fsid->val[1]; 985 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 986 mp = cache[hash]; 987 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 988 goto slow; 989 if (vfs_busy(mp, 0) != 0) { 990 cache[hash] = NULL; 991 goto slow; 992 } 993 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 994 return (mp); 995 else 996 vfs_unbusy(mp); 997 998 slow: 999 mtx_lock(&mountlist_mtx); 1000 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 1001 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 1002 error = vfs_busy(mp, MBF_MNTLSTLOCK); 1003 if (error) { 1004 cache[hash] = NULL; 1005 mtx_unlock(&mountlist_mtx); 1006 return (NULL); 1007 } 1008 cache[hash] = mp; 1009 return (mp); 1010 } 1011 } 1012 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 1013 mtx_unlock(&mountlist_mtx); 1014 return ((struct mount *) 0); 1015 } 1016 1017 /* 1018 * Check if a user can access privileged mount options. 1019 */ 1020 int 1021 vfs_suser(struct mount *mp, struct thread *td) 1022 { 1023 int error; 1024 1025 if (jailed(td->td_ucred)) { 1026 /* 1027 * If the jail of the calling thread lacks permission for 1028 * this type of file system, deny immediately. 1029 */ 1030 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 1031 return (EPERM); 1032 1033 /* 1034 * If the file system was mounted outside the jail of the 1035 * calling thread, deny immediately. 1036 */ 1037 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 1038 return (EPERM); 1039 } 1040 1041 /* 1042 * If file system supports delegated administration, we don't check 1043 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 1044 * by the file system itself. 1045 * If this is not the user that did original mount, we check for 1046 * the PRIV_VFS_MOUNT_OWNER privilege. 1047 */ 1048 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 1049 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 1050 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 1051 return (error); 1052 } 1053 return (0); 1054 } 1055 1056 /* 1057 * Get a new unique fsid. Try to make its val[0] unique, since this value 1058 * will be used to create fake device numbers for stat(). Also try (but 1059 * not so hard) make its val[0] unique mod 2^16, since some emulators only 1060 * support 16-bit device numbers. We end up with unique val[0]'s for the 1061 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 1062 * 1063 * Keep in mind that several mounts may be running in parallel. Starting 1064 * the search one past where the previous search terminated is both a 1065 * micro-optimization and a defense against returning the same fsid to 1066 * different mounts. 1067 */ 1068 void 1069 vfs_getnewfsid(struct mount *mp) 1070 { 1071 static uint16_t mntid_base; 1072 struct mount *nmp; 1073 fsid_t tfsid; 1074 int mtype; 1075 1076 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 1077 mtx_lock(&mntid_mtx); 1078 mtype = mp->mnt_vfc->vfc_typenum; 1079 tfsid.val[1] = mtype; 1080 mtype = (mtype & 0xFF) << 24; 1081 for (;;) { 1082 tfsid.val[0] = makedev(255, 1083 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 1084 mntid_base++; 1085 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 1086 break; 1087 vfs_rel(nmp); 1088 } 1089 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 1090 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 1091 mtx_unlock(&mntid_mtx); 1092 } 1093 1094 /* 1095 * Knob to control the precision of file timestamps: 1096 * 1097 * 0 = seconds only; nanoseconds zeroed. 1098 * 1 = seconds and nanoseconds, accurate within 1/HZ. 1099 * 2 = seconds and nanoseconds, truncated to microseconds. 1100 * >=3 = seconds and nanoseconds, maximum precision. 1101 */ 1102 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 1103 1104 static int timestamp_precision = TSP_USEC; 1105 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 1106 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 1107 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 1108 "3+: sec + ns (max. precision))"); 1109 1110 /* 1111 * Get a current timestamp. 1112 */ 1113 void 1114 vfs_timestamp(struct timespec *tsp) 1115 { 1116 struct timeval tv; 1117 1118 switch (timestamp_precision) { 1119 case TSP_SEC: 1120 tsp->tv_sec = time_second; 1121 tsp->tv_nsec = 0; 1122 break; 1123 case TSP_HZ: 1124 getnanotime(tsp); 1125 break; 1126 case TSP_USEC: 1127 microtime(&tv); 1128 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1129 break; 1130 case TSP_NSEC: 1131 default: 1132 nanotime(tsp); 1133 break; 1134 } 1135 } 1136 1137 /* 1138 * Set vnode attributes to VNOVAL 1139 */ 1140 void 1141 vattr_null(struct vattr *vap) 1142 { 1143 1144 vap->va_type = VNON; 1145 vap->va_size = VNOVAL; 1146 vap->va_bytes = VNOVAL; 1147 vap->va_mode = VNOVAL; 1148 vap->va_nlink = VNOVAL; 1149 vap->va_uid = VNOVAL; 1150 vap->va_gid = VNOVAL; 1151 vap->va_fsid = VNOVAL; 1152 vap->va_fileid = VNOVAL; 1153 vap->va_blocksize = VNOVAL; 1154 vap->va_rdev = VNOVAL; 1155 vap->va_atime.tv_sec = VNOVAL; 1156 vap->va_atime.tv_nsec = VNOVAL; 1157 vap->va_mtime.tv_sec = VNOVAL; 1158 vap->va_mtime.tv_nsec = VNOVAL; 1159 vap->va_ctime.tv_sec = VNOVAL; 1160 vap->va_ctime.tv_nsec = VNOVAL; 1161 vap->va_birthtime.tv_sec = VNOVAL; 1162 vap->va_birthtime.tv_nsec = VNOVAL; 1163 vap->va_flags = VNOVAL; 1164 vap->va_gen = VNOVAL; 1165 vap->va_vaflags = 0; 1166 } 1167 1168 /* 1169 * Try to reduce the total number of vnodes. 1170 * 1171 * This routine (and its user) are buggy in at least the following ways: 1172 * - all parameters were picked years ago when RAM sizes were significantly 1173 * smaller 1174 * - it can pick vnodes based on pages used by the vm object, but filesystems 1175 * like ZFS don't use it making the pick broken 1176 * - since ZFS has its own aging policy it gets partially combated by this one 1177 * - a dedicated method should be provided for filesystems to let them decide 1178 * whether the vnode should be recycled 1179 * 1180 * This routine is called when we have too many vnodes. It attempts 1181 * to free <count> vnodes and will potentially free vnodes that still 1182 * have VM backing store (VM backing store is typically the cause 1183 * of a vnode blowout so we want to do this). Therefore, this operation 1184 * is not considered cheap. 1185 * 1186 * A number of conditions may prevent a vnode from being reclaimed. 1187 * the buffer cache may have references on the vnode, a directory 1188 * vnode may still have references due to the namei cache representing 1189 * underlying files, or the vnode may be in active use. It is not 1190 * desirable to reuse such vnodes. These conditions may cause the 1191 * number of vnodes to reach some minimum value regardless of what 1192 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1193 * 1194 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1195 * entries if this argument is strue 1196 * @param trigger Only reclaim vnodes with fewer than this many resident 1197 * pages. 1198 * @param target How many vnodes to reclaim. 1199 * @return The number of vnodes that were reclaimed. 1200 */ 1201 static int 1202 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1203 { 1204 struct vnode *vp, *mvp; 1205 struct mount *mp; 1206 struct vm_object *object; 1207 u_long done; 1208 bool retried; 1209 1210 mtx_assert(&vnode_list_mtx, MA_OWNED); 1211 1212 retried = false; 1213 done = 0; 1214 1215 mvp = vnode_list_reclaim_marker; 1216 restart: 1217 vp = mvp; 1218 while (done < target) { 1219 vp = TAILQ_NEXT(vp, v_vnodelist); 1220 if (__predict_false(vp == NULL)) 1221 break; 1222 1223 if (__predict_false(vp->v_type == VMARKER)) 1224 continue; 1225 1226 /* 1227 * If it's been deconstructed already, it's still 1228 * referenced, or it exceeds the trigger, skip it. 1229 * Also skip free vnodes. We are trying to make space 1230 * for more free vnodes, not reduce their count. 1231 */ 1232 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1233 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1234 goto next_iter; 1235 1236 if (vp->v_type == VBAD || vp->v_type == VNON) 1237 goto next_iter; 1238 1239 object = atomic_load_ptr(&vp->v_object); 1240 if (object == NULL || object->resident_page_count > trigger) { 1241 goto next_iter; 1242 } 1243 1244 /* 1245 * Handle races against vnode allocation. Filesystems lock the 1246 * vnode some time after it gets returned from getnewvnode, 1247 * despite type and hold count being manipulated earlier. 1248 * Resorting to checking v_mount restores guarantees present 1249 * before the global list was reworked to contain all vnodes. 1250 */ 1251 if (!VI_TRYLOCK(vp)) 1252 goto next_iter; 1253 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1254 VI_UNLOCK(vp); 1255 goto next_iter; 1256 } 1257 if (vp->v_mount == NULL) { 1258 VI_UNLOCK(vp); 1259 goto next_iter; 1260 } 1261 vholdl(vp); 1262 VI_UNLOCK(vp); 1263 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1264 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1265 mtx_unlock(&vnode_list_mtx); 1266 1267 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1268 vdrop_recycle(vp); 1269 goto next_iter_unlocked; 1270 } 1271 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1272 vdrop_recycle(vp); 1273 vn_finished_write(mp); 1274 goto next_iter_unlocked; 1275 } 1276 1277 VI_LOCK(vp); 1278 if (vp->v_usecount > 0 || 1279 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1280 (vp->v_object != NULL && vp->v_object->handle == vp && 1281 vp->v_object->resident_page_count > trigger)) { 1282 VOP_UNLOCK(vp); 1283 vdropl_recycle(vp); 1284 vn_finished_write(mp); 1285 goto next_iter_unlocked; 1286 } 1287 recycles_count++; 1288 vgonel(vp); 1289 VOP_UNLOCK(vp); 1290 vdropl_recycle(vp); 1291 vn_finished_write(mp); 1292 done++; 1293 next_iter_unlocked: 1294 maybe_yield(); 1295 mtx_lock(&vnode_list_mtx); 1296 goto restart; 1297 next_iter: 1298 MPASS(vp->v_type != VMARKER); 1299 if (!should_yield()) 1300 continue; 1301 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1302 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1303 mtx_unlock(&vnode_list_mtx); 1304 kern_yield(PRI_USER); 1305 mtx_lock(&vnode_list_mtx); 1306 goto restart; 1307 } 1308 if (done == 0 && !retried) { 1309 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1310 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1311 retried = true; 1312 goto restart; 1313 } 1314 return (done); 1315 } 1316 1317 static int max_free_per_call = 10000; 1318 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_free_per_call, 0, 1319 "limit on vnode free requests per call to the vnlru_free routine (legacy)"); 1320 SYSCTL_INT(_vfs_vnode_vnlru, OID_AUTO, max_free_per_call, CTLFLAG_RW, 1321 &max_free_per_call, 0, 1322 "limit on vnode free requests per call to the vnlru_free routine"); 1323 1324 /* 1325 * Attempt to recycle requested amount of free vnodes. 1326 */ 1327 static int 1328 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp, bool isvnlru) 1329 { 1330 struct vnode *vp; 1331 struct mount *mp; 1332 int ocount; 1333 bool retried; 1334 1335 mtx_assert(&vnode_list_mtx, MA_OWNED); 1336 if (count > max_free_per_call) 1337 count = max_free_per_call; 1338 if (count == 0) { 1339 mtx_unlock(&vnode_list_mtx); 1340 return (0); 1341 } 1342 ocount = count; 1343 retried = false; 1344 vp = mvp; 1345 for (;;) { 1346 vp = TAILQ_NEXT(vp, v_vnodelist); 1347 if (__predict_false(vp == NULL)) { 1348 /* 1349 * The free vnode marker can be past eligible vnodes: 1350 * 1. if vdbatch_process trylock failed 1351 * 2. if vtryrecycle failed 1352 * 1353 * If so, start the scan from scratch. 1354 */ 1355 if (!retried && vnlru_read_freevnodes() > 0) { 1356 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1357 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1358 vp = mvp; 1359 retried = true; 1360 continue; 1361 } 1362 1363 /* 1364 * Give up 1365 */ 1366 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1367 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1368 mtx_unlock(&vnode_list_mtx); 1369 break; 1370 } 1371 if (__predict_false(vp->v_type == VMARKER)) 1372 continue; 1373 if (vp->v_holdcnt > 0) 1374 continue; 1375 /* 1376 * Don't recycle if our vnode is from different type 1377 * of mount point. Note that mp is type-safe, the 1378 * check does not reach unmapped address even if 1379 * vnode is reclaimed. 1380 */ 1381 if (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1382 mp->mnt_op != mnt_op) { 1383 continue; 1384 } 1385 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1386 continue; 1387 } 1388 if (!vhold_recycle_free(vp)) 1389 continue; 1390 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1391 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1392 mtx_unlock(&vnode_list_mtx); 1393 /* 1394 * FIXME: ignores the return value, meaning it may be nothing 1395 * got recycled but it claims otherwise to the caller. 1396 * 1397 * Originally the value started being ignored in 2005 with 1398 * 114a1006a8204aa156e1f9ad6476cdff89cada7f . 1399 * 1400 * Respecting the value can run into significant stalls if most 1401 * vnodes belong to one file system and it has writes 1402 * suspended. In presence of many threads and millions of 1403 * vnodes they keep contending on the vnode_list_mtx lock only 1404 * to find vnodes they can't recycle. 1405 * 1406 * The solution would be to pre-check if the vnode is likely to 1407 * be recycle-able, but it needs to happen with the 1408 * vnode_list_mtx lock held. This runs into a problem where 1409 * VOP_GETWRITEMOUNT (currently needed to find out about if 1410 * writes are frozen) can take locks which LOR against it. 1411 * 1412 * Check nullfs for one example (null_getwritemount). 1413 */ 1414 vtryrecycle(vp, isvnlru); 1415 count--; 1416 if (count == 0) { 1417 break; 1418 } 1419 mtx_lock(&vnode_list_mtx); 1420 vp = mvp; 1421 } 1422 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1423 return (ocount - count); 1424 } 1425 1426 /* 1427 * XXX: returns without vnode_list_mtx locked! 1428 */ 1429 static int 1430 vnlru_free_locked_direct(int count) 1431 { 1432 int ret; 1433 1434 mtx_assert(&vnode_list_mtx, MA_OWNED); 1435 ret = vnlru_free_impl(count, NULL, vnode_list_free_marker, false); 1436 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1437 return (ret); 1438 } 1439 1440 static int 1441 vnlru_free_locked_vnlru(int count) 1442 { 1443 int ret; 1444 1445 mtx_assert(&vnode_list_mtx, MA_OWNED); 1446 ret = vnlru_free_impl(count, NULL, vnode_list_free_marker, true); 1447 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1448 return (ret); 1449 } 1450 1451 static int 1452 vnlru_free_vnlru(int count) 1453 { 1454 1455 mtx_lock(&vnode_list_mtx); 1456 return (vnlru_free_locked_vnlru(count)); 1457 } 1458 1459 void 1460 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp) 1461 { 1462 1463 MPASS(mnt_op != NULL); 1464 MPASS(mvp != NULL); 1465 VNPASS(mvp->v_type == VMARKER, mvp); 1466 mtx_lock(&vnode_list_mtx); 1467 vnlru_free_impl(count, mnt_op, mvp, true); 1468 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1469 } 1470 1471 struct vnode * 1472 vnlru_alloc_marker(void) 1473 { 1474 struct vnode *mvp; 1475 1476 mvp = vn_alloc_marker(NULL); 1477 mtx_lock(&vnode_list_mtx); 1478 TAILQ_INSERT_BEFORE(vnode_list_free_marker, mvp, v_vnodelist); 1479 mtx_unlock(&vnode_list_mtx); 1480 return (mvp); 1481 } 1482 1483 void 1484 vnlru_free_marker(struct vnode *mvp) 1485 { 1486 mtx_lock(&vnode_list_mtx); 1487 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1488 mtx_unlock(&vnode_list_mtx); 1489 vn_free_marker(mvp); 1490 } 1491 1492 static void 1493 vnlru_recalc(void) 1494 { 1495 1496 mtx_assert(&vnode_list_mtx, MA_OWNED); 1497 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1498 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1499 vlowat = vhiwat / 2; 1500 } 1501 1502 /* 1503 * Attempt to recycle vnodes in a context that is always safe to block. 1504 * Calling vlrurecycle() from the bowels of filesystem code has some 1505 * interesting deadlock problems. 1506 */ 1507 static struct proc *vnlruproc; 1508 static int vnlruproc_sig; 1509 static u_long vnlruproc_kicks; 1510 1511 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, kicks, CTLFLAG_RD, &vnlruproc_kicks, 0, 1512 "Number of times vnlru awakened due to vnode shortage"); 1513 1514 #define VNLRU_COUNT_SLOP 100 1515 1516 /* 1517 * The main freevnodes counter is only updated when a counter local to CPU 1518 * diverges from 0 by more than VNLRU_FREEVNODES_SLOP. CPUs are conditionally 1519 * walked to compute a more accurate total. 1520 * 1521 * Note: the actual value at any given moment can still exceed slop, but it 1522 * should not be by significant margin in practice. 1523 */ 1524 #define VNLRU_FREEVNODES_SLOP 126 1525 1526 static void __noinline 1527 vfs_freevnodes_rollup(int8_t *lfreevnodes) 1528 { 1529 1530 atomic_add_long(&freevnodes, *lfreevnodes); 1531 *lfreevnodes = 0; 1532 critical_exit(); 1533 } 1534 1535 static __inline void 1536 vfs_freevnodes_inc(void) 1537 { 1538 int8_t *lfreevnodes; 1539 1540 critical_enter(); 1541 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1542 (*lfreevnodes)++; 1543 if (__predict_false(*lfreevnodes == VNLRU_FREEVNODES_SLOP)) 1544 vfs_freevnodes_rollup(lfreevnodes); 1545 else 1546 critical_exit(); 1547 } 1548 1549 static __inline void 1550 vfs_freevnodes_dec(void) 1551 { 1552 int8_t *lfreevnodes; 1553 1554 critical_enter(); 1555 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1556 (*lfreevnodes)--; 1557 if (__predict_false(*lfreevnodes == -VNLRU_FREEVNODES_SLOP)) 1558 vfs_freevnodes_rollup(lfreevnodes); 1559 else 1560 critical_exit(); 1561 } 1562 1563 static u_long 1564 vnlru_read_freevnodes(void) 1565 { 1566 long slop, rfreevnodes, rfreevnodes_old; 1567 int cpu; 1568 1569 rfreevnodes = atomic_load_long(&freevnodes); 1570 rfreevnodes_old = atomic_load_long(&freevnodes_old); 1571 1572 if (rfreevnodes > rfreevnodes_old) 1573 slop = rfreevnodes - rfreevnodes_old; 1574 else 1575 slop = rfreevnodes_old - rfreevnodes; 1576 if (slop < VNLRU_FREEVNODES_SLOP) 1577 return (rfreevnodes >= 0 ? rfreevnodes : 0); 1578 CPU_FOREACH(cpu) { 1579 rfreevnodes += cpuid_to_pcpu[cpu]->pc_vfs_freevnodes; 1580 } 1581 atomic_store_long(&freevnodes_old, rfreevnodes); 1582 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1583 } 1584 1585 static bool 1586 vnlru_under(u_long rnumvnodes, u_long limit) 1587 { 1588 u_long rfreevnodes, space; 1589 1590 if (__predict_false(rnumvnodes > desiredvnodes)) 1591 return (true); 1592 1593 space = desiredvnodes - rnumvnodes; 1594 if (space < limit) { 1595 rfreevnodes = vnlru_read_freevnodes(); 1596 if (rfreevnodes > wantfreevnodes) 1597 space += rfreevnodes - wantfreevnodes; 1598 } 1599 return (space < limit); 1600 } 1601 1602 static void 1603 vnlru_kick_locked(void) 1604 { 1605 1606 mtx_assert(&vnode_list_mtx, MA_OWNED); 1607 if (vnlruproc_sig == 0) { 1608 vnlruproc_sig = 1; 1609 vnlruproc_kicks++; 1610 wakeup(vnlruproc); 1611 } 1612 } 1613 1614 static void 1615 vnlru_kick_cond(void) 1616 { 1617 1618 if (vnlru_read_freevnodes() > wantfreevnodes) 1619 return; 1620 1621 if (vnlruproc_sig) 1622 return; 1623 mtx_lock(&vnode_list_mtx); 1624 vnlru_kick_locked(); 1625 mtx_unlock(&vnode_list_mtx); 1626 } 1627 1628 static void 1629 vnlru_proc_sleep(void) 1630 { 1631 1632 if (vnlruproc_sig) { 1633 vnlruproc_sig = 0; 1634 wakeup(&vnlruproc_sig); 1635 } 1636 msleep(vnlruproc, &vnode_list_mtx, PVFS|PDROP, "vlruwt", hz); 1637 } 1638 1639 /* 1640 * A lighter version of the machinery below. 1641 * 1642 * Tries to reach goals only by recycling free vnodes and does not invoke 1643 * uma_reclaim(UMA_RECLAIM_DRAIN). 1644 * 1645 * This works around pathological behavior in vnlru in presence of tons of free 1646 * vnodes, but without having to rewrite the machinery at this time. Said 1647 * behavior boils down to continuously trying to reclaim all kinds of vnodes 1648 * (cycling through all levels of "force") when the count is transiently above 1649 * limit. This happens a lot when all vnodes are used up and vn_alloc 1650 * speculatively increments the counter. 1651 * 1652 * Sample testcase: vnode limit 8388608, 20 separate directory trees each with 1653 * 1 million files in total and 20 find(1) processes stating them in parallel 1654 * (one per each tree). 1655 * 1656 * On a kernel with only stock machinery this needs anywhere between 60 and 120 1657 * seconds to execute (time varies *wildly* between runs). With the workaround 1658 * it consistently stays around 20 seconds [it got further down with later 1659 * changes]. 1660 * 1661 * That is to say the entire thing needs a fundamental redesign (most notably 1662 * to accommodate faster recycling), the above only tries to get it ouf the way. 1663 * 1664 * Return values are: 1665 * -1 -- fallback to regular vnlru loop 1666 * 0 -- do nothing, go to sleep 1667 * >0 -- recycle this many vnodes 1668 */ 1669 static long 1670 vnlru_proc_light_pick(void) 1671 { 1672 u_long rnumvnodes, rfreevnodes; 1673 1674 if (vstir || vnlruproc_sig == 1) 1675 return (-1); 1676 1677 rnumvnodes = atomic_load_long(&numvnodes); 1678 rfreevnodes = vnlru_read_freevnodes(); 1679 1680 /* 1681 * vnode limit might have changed and now we may be at a significant 1682 * excess. Bail if we can't sort it out with free vnodes. 1683 * 1684 * Due to atomic updates the count can legitimately go above 1685 * the limit for a short period, don't bother doing anything in 1686 * that case. 1687 */ 1688 if (rnumvnodes > desiredvnodes + VNLRU_COUNT_SLOP + 10) { 1689 if (rnumvnodes - rfreevnodes >= desiredvnodes || 1690 rfreevnodes <= wantfreevnodes) { 1691 return (-1); 1692 } 1693 1694 return (rnumvnodes - desiredvnodes); 1695 } 1696 1697 /* 1698 * Don't try to reach wantfreevnodes target if there are too few vnodes 1699 * to begin with. 1700 */ 1701 if (rnumvnodes < wantfreevnodes) { 1702 return (0); 1703 } 1704 1705 if (rfreevnodes < wantfreevnodes) { 1706 return (-1); 1707 } 1708 1709 return (0); 1710 } 1711 1712 static bool 1713 vnlru_proc_light(void) 1714 { 1715 long freecount; 1716 1717 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1718 1719 freecount = vnlru_proc_light_pick(); 1720 if (freecount == -1) 1721 return (false); 1722 1723 if (freecount != 0) { 1724 vnlru_free_vnlru(freecount); 1725 } 1726 1727 mtx_lock(&vnode_list_mtx); 1728 vnlru_proc_sleep(); 1729 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1730 return (true); 1731 } 1732 1733 static u_long uma_reclaim_calls; 1734 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, uma_reclaim_calls, CTLFLAG_RD | CTLFLAG_STATS, 1735 &uma_reclaim_calls, 0, "Number of calls to uma_reclaim"); 1736 1737 static void 1738 vnlru_proc(void) 1739 { 1740 u_long rnumvnodes, rfreevnodes, target; 1741 unsigned long onumvnodes; 1742 int done, force, trigger, usevnodes; 1743 bool reclaim_nc_src, want_reread; 1744 1745 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1746 SHUTDOWN_PRI_FIRST); 1747 1748 force = 0; 1749 want_reread = false; 1750 for (;;) { 1751 kproc_suspend_check(vnlruproc); 1752 1753 if (force == 0 && vnlru_proc_light()) 1754 continue; 1755 1756 mtx_lock(&vnode_list_mtx); 1757 rnumvnodes = atomic_load_long(&numvnodes); 1758 1759 if (want_reread) { 1760 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1761 want_reread = false; 1762 } 1763 1764 /* 1765 * If numvnodes is too large (due to desiredvnodes being 1766 * adjusted using its sysctl, or emergency growth), first 1767 * try to reduce it by discarding free vnodes. 1768 */ 1769 if (rnumvnodes > desiredvnodes + 10) { 1770 vnlru_free_locked_vnlru(rnumvnodes - desiredvnodes); 1771 mtx_lock(&vnode_list_mtx); 1772 rnumvnodes = atomic_load_long(&numvnodes); 1773 } 1774 /* 1775 * Sleep if the vnode cache is in a good state. This is 1776 * when it is not over-full and has space for about a 4% 1777 * or 9% expansion (by growing its size or inexcessively 1778 * reducing free vnode count). Otherwise, try to reclaim 1779 * space for a 10% expansion. 1780 */ 1781 if (vstir && force == 0) { 1782 force = 1; 1783 vstir = false; 1784 } 1785 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1786 vnlru_proc_sleep(); 1787 continue; 1788 } 1789 rfreevnodes = vnlru_read_freevnodes(); 1790 1791 onumvnodes = rnumvnodes; 1792 /* 1793 * Calculate parameters for recycling. These are the same 1794 * throughout the loop to give some semblance of fairness. 1795 * The trigger point is to avoid recycling vnodes with lots 1796 * of resident pages. We aren't trying to free memory; we 1797 * are trying to recycle or at least free vnodes. 1798 */ 1799 if (rnumvnodes <= desiredvnodes) 1800 usevnodes = rnumvnodes - rfreevnodes; 1801 else 1802 usevnodes = rnumvnodes; 1803 if (usevnodes <= 0) 1804 usevnodes = 1; 1805 /* 1806 * The trigger value is chosen to give a conservatively 1807 * large value to ensure that it alone doesn't prevent 1808 * making progress. The value can easily be so large that 1809 * it is effectively infinite in some congested and 1810 * misconfigured cases, and this is necessary. Normally 1811 * it is about 8 to 100 (pages), which is quite large. 1812 */ 1813 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1814 if (force < 2) 1815 trigger = vsmalltrigger; 1816 reclaim_nc_src = force >= 3; 1817 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1818 target = target / 10 + 1; 1819 done = vlrureclaim(reclaim_nc_src, trigger, target); 1820 mtx_unlock(&vnode_list_mtx); 1821 /* 1822 * Total number of vnodes can transiently go slightly above the 1823 * limit (see vn_alloc_hard), no need to call uma_reclaim if 1824 * this happens. 1825 */ 1826 if (onumvnodes + VNLRU_COUNT_SLOP + 1000 > desiredvnodes && 1827 numvnodes <= desiredvnodes) { 1828 uma_reclaim_calls++; 1829 uma_reclaim(UMA_RECLAIM_DRAIN); 1830 } 1831 if (done == 0) { 1832 if (force == 0 || force == 1) { 1833 force = 2; 1834 continue; 1835 } 1836 if (force == 2) { 1837 force = 3; 1838 continue; 1839 } 1840 want_reread = true; 1841 force = 0; 1842 vnlru_nowhere++; 1843 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1844 } else { 1845 want_reread = true; 1846 kern_yield(PRI_USER); 1847 } 1848 } 1849 } 1850 1851 static struct kproc_desc vnlru_kp = { 1852 "vnlru", 1853 vnlru_proc, 1854 &vnlruproc 1855 }; 1856 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1857 &vnlru_kp); 1858 1859 /* 1860 * Routines having to do with the management of the vnode table. 1861 */ 1862 1863 /* 1864 * Try to recycle a freed vnode. 1865 */ 1866 static int 1867 vtryrecycle(struct vnode *vp, bool isvnlru) 1868 { 1869 struct mount *vnmp; 1870 1871 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1872 VNPASS(vp->v_holdcnt > 0, vp); 1873 /* 1874 * This vnode may found and locked via some other list, if so we 1875 * can't recycle it yet. 1876 */ 1877 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1878 CTR2(KTR_VFS, 1879 "%s: impossible to recycle, vp %p lock is already held", 1880 __func__, vp); 1881 vdrop_recycle(vp); 1882 return (EWOULDBLOCK); 1883 } 1884 /* 1885 * Don't recycle if its filesystem is being suspended. 1886 */ 1887 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1888 VOP_UNLOCK(vp); 1889 CTR2(KTR_VFS, 1890 "%s: impossible to recycle, cannot start the write for %p", 1891 __func__, vp); 1892 vdrop_recycle(vp); 1893 return (EBUSY); 1894 } 1895 /* 1896 * If we got this far, we need to acquire the interlock and see if 1897 * anyone picked up this vnode from another list. If not, we will 1898 * mark it with DOOMED via vgonel() so that anyone who does find it 1899 * will skip over it. 1900 */ 1901 VI_LOCK(vp); 1902 if (vp->v_usecount) { 1903 VOP_UNLOCK(vp); 1904 vdropl_recycle(vp); 1905 vn_finished_write(vnmp); 1906 CTR2(KTR_VFS, 1907 "%s: impossible to recycle, %p is already referenced", 1908 __func__, vp); 1909 return (EBUSY); 1910 } 1911 if (!VN_IS_DOOMED(vp)) { 1912 if (isvnlru) 1913 recycles_free_count++; 1914 else 1915 counter_u64_add(direct_recycles_free_count, 1); 1916 vgonel(vp); 1917 } 1918 VOP_UNLOCK(vp); 1919 vdropl_recycle(vp); 1920 vn_finished_write(vnmp); 1921 return (0); 1922 } 1923 1924 /* 1925 * Allocate a new vnode. 1926 * 1927 * The operation never returns an error. Returning an error was disabled 1928 * in r145385 (dated 2005) with the following comment: 1929 * 1930 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1931 * 1932 * Given the age of this commit (almost 15 years at the time of writing this 1933 * comment) restoring the ability to fail requires a significant audit of 1934 * all codepaths. 1935 * 1936 * The routine can try to free a vnode or stall for up to 1 second waiting for 1937 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1938 */ 1939 static u_long vn_alloc_cyclecount; 1940 static u_long vn_alloc_sleeps; 1941 1942 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, alloc_sleeps, CTLFLAG_RD, &vn_alloc_sleeps, 0, 1943 "Number of times vnode allocation blocked waiting on vnlru"); 1944 1945 static struct vnode * __noinline 1946 vn_alloc_hard(struct mount *mp, u_long rnumvnodes, bool bumped) 1947 { 1948 u_long rfreevnodes; 1949 1950 if (bumped) { 1951 if (rnumvnodes > desiredvnodes + VNLRU_COUNT_SLOP) { 1952 atomic_subtract_long(&numvnodes, 1); 1953 bumped = false; 1954 } 1955 } 1956 1957 mtx_lock(&vnode_list_mtx); 1958 1959 if (vn_alloc_cyclecount != 0) { 1960 rnumvnodes = atomic_load_long(&numvnodes); 1961 if (rnumvnodes + 1 < desiredvnodes) { 1962 vn_alloc_cyclecount = 0; 1963 mtx_unlock(&vnode_list_mtx); 1964 goto alloc; 1965 } 1966 1967 rfreevnodes = vnlru_read_freevnodes(); 1968 if (rfreevnodes < wantfreevnodes) { 1969 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1970 vn_alloc_cyclecount = 0; 1971 vstir = true; 1972 } 1973 } else { 1974 vn_alloc_cyclecount = 0; 1975 } 1976 } 1977 1978 /* 1979 * Grow the vnode cache if it will not be above its target max after 1980 * growing. Otherwise, if there is at least one free vnode, try to 1981 * reclaim 1 item from it before growing the cache (possibly above its 1982 * target max if the reclamation failed or is delayed). 1983 */ 1984 if (vnlru_free_locked_direct(1) > 0) 1985 goto alloc; 1986 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1987 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1988 /* 1989 * Wait for space for a new vnode. 1990 */ 1991 if (bumped) { 1992 atomic_subtract_long(&numvnodes, 1); 1993 bumped = false; 1994 } 1995 mtx_lock(&vnode_list_mtx); 1996 vnlru_kick_locked(); 1997 vn_alloc_sleeps++; 1998 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 1999 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 2000 vnlru_read_freevnodes() > 1) 2001 vnlru_free_locked_direct(1); 2002 else 2003 mtx_unlock(&vnode_list_mtx); 2004 } 2005 alloc: 2006 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 2007 if (!bumped) 2008 atomic_add_long(&numvnodes, 1); 2009 vnlru_kick_cond(); 2010 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 2011 } 2012 2013 static struct vnode * 2014 vn_alloc(struct mount *mp) 2015 { 2016 u_long rnumvnodes; 2017 2018 if (__predict_false(vn_alloc_cyclecount != 0)) 2019 return (vn_alloc_hard(mp, 0, false)); 2020 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 2021 if (__predict_false(vnlru_under(rnumvnodes, vlowat))) { 2022 return (vn_alloc_hard(mp, rnumvnodes, true)); 2023 } 2024 2025 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 2026 } 2027 2028 static void 2029 vn_free(struct vnode *vp) 2030 { 2031 2032 atomic_subtract_long(&numvnodes, 1); 2033 uma_zfree_smr(vnode_zone, vp); 2034 } 2035 2036 /* 2037 * Allocate a new vnode. 2038 */ 2039 int 2040 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 2041 struct vnode **vpp) 2042 { 2043 struct vnode *vp; 2044 struct thread *td; 2045 struct lock_object *lo; 2046 2047 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 2048 2049 KASSERT(vops->registered, 2050 ("%s: not registered vector op %p\n", __func__, vops)); 2051 cache_validate_vop_vector(mp, vops); 2052 2053 td = curthread; 2054 if (td->td_vp_reserved != NULL) { 2055 vp = td->td_vp_reserved; 2056 td->td_vp_reserved = NULL; 2057 } else { 2058 vp = vn_alloc(mp); 2059 } 2060 counter_u64_add(vnodes_created, 1); 2061 2062 vn_set_state(vp, VSTATE_UNINITIALIZED); 2063 2064 /* 2065 * Locks are given the generic name "vnode" when created. 2066 * Follow the historic practice of using the filesystem 2067 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 2068 * 2069 * Locks live in a witness group keyed on their name. Thus, 2070 * when a lock is renamed, it must also move from the witness 2071 * group of its old name to the witness group of its new name. 2072 * 2073 * The change only needs to be made when the vnode moves 2074 * from one filesystem type to another. We ensure that each 2075 * filesystem use a single static name pointer for its tag so 2076 * that we can compare pointers rather than doing a strcmp(). 2077 */ 2078 lo = &vp->v_vnlock->lock_object; 2079 #ifdef WITNESS 2080 if (lo->lo_name != tag) { 2081 #endif 2082 lo->lo_name = tag; 2083 #ifdef WITNESS 2084 WITNESS_DESTROY(lo); 2085 WITNESS_INIT(lo, tag); 2086 } 2087 #endif 2088 /* 2089 * By default, don't allow shared locks unless filesystems opt-in. 2090 */ 2091 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 2092 /* 2093 * Finalize various vnode identity bits. 2094 */ 2095 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 2096 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 2097 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 2098 vp->v_type = VNON; 2099 vp->v_op = vops; 2100 vp->v_irflag = 0; 2101 v_init_counters(vp); 2102 vn_seqc_init(vp); 2103 vp->v_bufobj.bo_ops = &buf_ops_bio; 2104 #ifdef DIAGNOSTIC 2105 if (mp == NULL && vops != &dead_vnodeops) 2106 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 2107 #endif 2108 #ifdef MAC 2109 mac_vnode_init(vp); 2110 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 2111 mac_vnode_associate_singlelabel(mp, vp); 2112 #endif 2113 if (mp != NULL) { 2114 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 2115 } 2116 2117 /* 2118 * For the filesystems which do not use vfs_hash_insert(), 2119 * still initialize v_hash to have vfs_hash_index() useful. 2120 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 2121 * its own hashing. 2122 */ 2123 vp->v_hash = (uintptr_t)vp >> vnsz2log; 2124 2125 *vpp = vp; 2126 return (0); 2127 } 2128 2129 void 2130 getnewvnode_reserve(void) 2131 { 2132 struct thread *td; 2133 2134 td = curthread; 2135 MPASS(td->td_vp_reserved == NULL); 2136 td->td_vp_reserved = vn_alloc(NULL); 2137 } 2138 2139 void 2140 getnewvnode_drop_reserve(void) 2141 { 2142 struct thread *td; 2143 2144 td = curthread; 2145 if (td->td_vp_reserved != NULL) { 2146 vn_free(td->td_vp_reserved); 2147 td->td_vp_reserved = NULL; 2148 } 2149 } 2150 2151 static void __noinline 2152 freevnode(struct vnode *vp) 2153 { 2154 struct bufobj *bo; 2155 2156 /* 2157 * The vnode has been marked for destruction, so free it. 2158 * 2159 * The vnode will be returned to the zone where it will 2160 * normally remain until it is needed for another vnode. We 2161 * need to cleanup (or verify that the cleanup has already 2162 * been done) any residual data left from its current use 2163 * so as not to contaminate the freshly allocated vnode. 2164 */ 2165 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 2166 /* 2167 * Paired with vgone. 2168 */ 2169 vn_seqc_write_end_free(vp); 2170 2171 bo = &vp->v_bufobj; 2172 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 2173 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 2174 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 2175 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 2176 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 2177 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 2178 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 2179 ("clean blk trie not empty")); 2180 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 2181 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 2182 ("dirty blk trie not empty")); 2183 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 2184 ("Dangling rangelock waiters")); 2185 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, 2186 ("Leaked inactivation")); 2187 VI_UNLOCK(vp); 2188 cache_assert_no_entries(vp); 2189 2190 #ifdef MAC 2191 mac_vnode_destroy(vp); 2192 #endif 2193 if (vp->v_pollinfo != NULL) { 2194 /* 2195 * Use LK_NOWAIT to shut up witness about the lock. We may get 2196 * here while having another vnode locked when trying to 2197 * satisfy a lookup and needing to recycle. 2198 */ 2199 VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT); 2200 destroy_vpollinfo(vp->v_pollinfo); 2201 VOP_UNLOCK(vp); 2202 vp->v_pollinfo = NULL; 2203 } 2204 vp->v_mountedhere = NULL; 2205 vp->v_unpcb = NULL; 2206 vp->v_rdev = NULL; 2207 vp->v_fifoinfo = NULL; 2208 vp->v_iflag = 0; 2209 vp->v_vflag = 0; 2210 bo->bo_flag = 0; 2211 vn_free(vp); 2212 } 2213 2214 /* 2215 * Delete from old mount point vnode list, if on one. 2216 */ 2217 static void 2218 delmntque(struct vnode *vp) 2219 { 2220 struct mount *mp; 2221 2222 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 2223 2224 mp = vp->v_mount; 2225 MNT_ILOCK(mp); 2226 VI_LOCK(vp); 2227 vp->v_mount = NULL; 2228 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 2229 ("bad mount point vnode list size")); 2230 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2231 mp->mnt_nvnodelistsize--; 2232 MNT_REL(mp); 2233 MNT_IUNLOCK(mp); 2234 /* 2235 * The caller expects the interlock to be still held. 2236 */ 2237 ASSERT_VI_LOCKED(vp, __func__); 2238 } 2239 2240 static int 2241 insmntque1_int(struct vnode *vp, struct mount *mp, bool dtr) 2242 { 2243 2244 KASSERT(vp->v_mount == NULL, 2245 ("insmntque: vnode already on per mount vnode list")); 2246 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 2247 if ((mp->mnt_kern_flag & MNTK_UNLOCKED_INSMNTQUE) == 0) { 2248 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 2249 } else { 2250 KASSERT(!dtr, 2251 ("%s: can't have MNTK_UNLOCKED_INSMNTQUE and cleanup", 2252 __func__)); 2253 } 2254 2255 /* 2256 * We acquire the vnode interlock early to ensure that the 2257 * vnode cannot be recycled by another process releasing a 2258 * holdcnt on it before we get it on both the vnode list 2259 * and the active vnode list. The mount mutex protects only 2260 * manipulation of the vnode list and the vnode freelist 2261 * mutex protects only manipulation of the active vnode list. 2262 * Hence the need to hold the vnode interlock throughout. 2263 */ 2264 MNT_ILOCK(mp); 2265 VI_LOCK(vp); 2266 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 2267 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 2268 mp->mnt_nvnodelistsize == 0)) && 2269 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 2270 VI_UNLOCK(vp); 2271 MNT_IUNLOCK(mp); 2272 if (dtr) { 2273 vp->v_data = NULL; 2274 vp->v_op = &dead_vnodeops; 2275 vgone(vp); 2276 vput(vp); 2277 } 2278 return (EBUSY); 2279 } 2280 vp->v_mount = mp; 2281 MNT_REF(mp); 2282 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2283 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 2284 ("neg mount point vnode list size")); 2285 mp->mnt_nvnodelistsize++; 2286 VI_UNLOCK(vp); 2287 MNT_IUNLOCK(mp); 2288 return (0); 2289 } 2290 2291 /* 2292 * Insert into list of vnodes for the new mount point, if available. 2293 * insmntque() reclaims the vnode on insertion failure, insmntque1() 2294 * leaves handling of the vnode to the caller. 2295 */ 2296 int 2297 insmntque(struct vnode *vp, struct mount *mp) 2298 { 2299 return (insmntque1_int(vp, mp, true)); 2300 } 2301 2302 int 2303 insmntque1(struct vnode *vp, struct mount *mp) 2304 { 2305 return (insmntque1_int(vp, mp, false)); 2306 } 2307 2308 /* 2309 * Flush out and invalidate all buffers associated with a bufobj 2310 * Called with the underlying object locked. 2311 */ 2312 int 2313 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 2314 { 2315 int error; 2316 2317 BO_LOCK(bo); 2318 if (flags & V_SAVE) { 2319 error = bufobj_wwait(bo, slpflag, slptimeo); 2320 if (error) { 2321 BO_UNLOCK(bo); 2322 return (error); 2323 } 2324 if (bo->bo_dirty.bv_cnt > 0) { 2325 BO_UNLOCK(bo); 2326 do { 2327 error = BO_SYNC(bo, MNT_WAIT); 2328 } while (error == ERELOOKUP); 2329 if (error != 0) 2330 return (error); 2331 BO_LOCK(bo); 2332 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 2333 BO_UNLOCK(bo); 2334 return (EBUSY); 2335 } 2336 } 2337 } 2338 /* 2339 * If you alter this loop please notice that interlock is dropped and 2340 * reacquired in flushbuflist. Special care is needed to ensure that 2341 * no race conditions occur from this. 2342 */ 2343 do { 2344 error = flushbuflist(&bo->bo_clean, 2345 flags, bo, slpflag, slptimeo); 2346 if (error == 0 && !(flags & V_CLEANONLY)) 2347 error = flushbuflist(&bo->bo_dirty, 2348 flags, bo, slpflag, slptimeo); 2349 if (error != 0 && error != EAGAIN) { 2350 BO_UNLOCK(bo); 2351 return (error); 2352 } 2353 } while (error != 0); 2354 2355 /* 2356 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 2357 * have write I/O in-progress but if there is a VM object then the 2358 * VM object can also have read-I/O in-progress. 2359 */ 2360 do { 2361 bufobj_wwait(bo, 0, 0); 2362 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 2363 BO_UNLOCK(bo); 2364 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 2365 BO_LOCK(bo); 2366 } 2367 } while (bo->bo_numoutput > 0); 2368 BO_UNLOCK(bo); 2369 2370 /* 2371 * Destroy the copy in the VM cache, too. 2372 */ 2373 if (bo->bo_object != NULL && 2374 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 2375 VM_OBJECT_WLOCK(bo->bo_object); 2376 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 2377 OBJPR_CLEANONLY : 0); 2378 VM_OBJECT_WUNLOCK(bo->bo_object); 2379 } 2380 2381 #ifdef INVARIANTS 2382 BO_LOCK(bo); 2383 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 2384 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 2385 bo->bo_clean.bv_cnt > 0)) 2386 panic("vinvalbuf: flush failed"); 2387 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 2388 bo->bo_dirty.bv_cnt > 0) 2389 panic("vinvalbuf: flush dirty failed"); 2390 BO_UNLOCK(bo); 2391 #endif 2392 return (0); 2393 } 2394 2395 /* 2396 * Flush out and invalidate all buffers associated with a vnode. 2397 * Called with the underlying object locked. 2398 */ 2399 int 2400 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 2401 { 2402 2403 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2404 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 2405 if (vp->v_object != NULL && vp->v_object->handle != vp) 2406 return (0); 2407 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 2408 } 2409 2410 /* 2411 * Flush out buffers on the specified list. 2412 * 2413 */ 2414 static int 2415 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2416 int slptimeo) 2417 { 2418 struct buf *bp, *nbp; 2419 int retval, error; 2420 daddr_t lblkno; 2421 b_xflags_t xflags; 2422 2423 ASSERT_BO_WLOCKED(bo); 2424 2425 retval = 0; 2426 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2427 /* 2428 * If we are flushing both V_NORMAL and V_ALT buffers then 2429 * do not skip any buffers. If we are flushing only V_NORMAL 2430 * buffers then skip buffers marked as BX_ALTDATA. If we are 2431 * flushing only V_ALT buffers then skip buffers not marked 2432 * as BX_ALTDATA. 2433 */ 2434 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2435 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2436 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2437 continue; 2438 } 2439 if (nbp != NULL) { 2440 lblkno = nbp->b_lblkno; 2441 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2442 } 2443 retval = EAGAIN; 2444 error = BUF_TIMELOCK(bp, 2445 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2446 "flushbuf", slpflag, slptimeo); 2447 if (error) { 2448 BO_LOCK(bo); 2449 return (error != ENOLCK ? error : EAGAIN); 2450 } 2451 KASSERT(bp->b_bufobj == bo, 2452 ("bp %p wrong b_bufobj %p should be %p", 2453 bp, bp->b_bufobj, bo)); 2454 /* 2455 * XXX Since there are no node locks for NFS, I 2456 * believe there is a slight chance that a delayed 2457 * write will occur while sleeping just above, so 2458 * check for it. 2459 */ 2460 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2461 (flags & V_SAVE)) { 2462 bremfree(bp); 2463 bp->b_flags |= B_ASYNC; 2464 bwrite(bp); 2465 BO_LOCK(bo); 2466 return (EAGAIN); /* XXX: why not loop ? */ 2467 } 2468 bremfree(bp); 2469 bp->b_flags |= (B_INVAL | B_RELBUF); 2470 bp->b_flags &= ~B_ASYNC; 2471 brelse(bp); 2472 BO_LOCK(bo); 2473 if (nbp == NULL) 2474 break; 2475 nbp = gbincore(bo, lblkno); 2476 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2477 != xflags) 2478 break; /* nbp invalid */ 2479 } 2480 return (retval); 2481 } 2482 2483 int 2484 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2485 { 2486 struct buf *bp; 2487 int error; 2488 daddr_t lblkno; 2489 2490 ASSERT_BO_LOCKED(bo); 2491 2492 for (lblkno = startn;;) { 2493 again: 2494 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2495 if (bp == NULL || bp->b_lblkno >= endn || 2496 bp->b_lblkno < startn) 2497 break; 2498 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2499 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2500 if (error != 0) { 2501 BO_RLOCK(bo); 2502 if (error == ENOLCK) 2503 goto again; 2504 return (error); 2505 } 2506 KASSERT(bp->b_bufobj == bo, 2507 ("bp %p wrong b_bufobj %p should be %p", 2508 bp, bp->b_bufobj, bo)); 2509 lblkno = bp->b_lblkno + 1; 2510 if ((bp->b_flags & B_MANAGED) == 0) 2511 bremfree(bp); 2512 bp->b_flags |= B_RELBUF; 2513 /* 2514 * In the VMIO case, use the B_NOREUSE flag to hint that the 2515 * pages backing each buffer in the range are unlikely to be 2516 * reused. Dirty buffers will have the hint applied once 2517 * they've been written. 2518 */ 2519 if ((bp->b_flags & B_VMIO) != 0) 2520 bp->b_flags |= B_NOREUSE; 2521 brelse(bp); 2522 BO_RLOCK(bo); 2523 } 2524 return (0); 2525 } 2526 2527 /* 2528 * Truncate a file's buffer and pages to a specified length. This 2529 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2530 * sync activity. 2531 */ 2532 int 2533 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2534 { 2535 struct buf *bp, *nbp; 2536 struct bufobj *bo; 2537 daddr_t startlbn; 2538 2539 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2540 vp, blksize, (uintmax_t)length); 2541 2542 /* 2543 * Round up to the *next* lbn. 2544 */ 2545 startlbn = howmany(length, blksize); 2546 2547 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2548 2549 bo = &vp->v_bufobj; 2550 restart_unlocked: 2551 BO_LOCK(bo); 2552 2553 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2554 ; 2555 2556 if (length > 0) { 2557 /* 2558 * Write out vnode metadata, e.g. indirect blocks. 2559 */ 2560 restartsync: 2561 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2562 if (bp->b_lblkno >= 0) 2563 continue; 2564 /* 2565 * Since we hold the vnode lock this should only 2566 * fail if we're racing with the buf daemon. 2567 */ 2568 if (BUF_LOCK(bp, 2569 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2570 BO_LOCKPTR(bo)) == ENOLCK) 2571 goto restart_unlocked; 2572 2573 VNASSERT((bp->b_flags & B_DELWRI), vp, 2574 ("buf(%p) on dirty queue without DELWRI", bp)); 2575 2576 bremfree(bp); 2577 bawrite(bp); 2578 BO_LOCK(bo); 2579 goto restartsync; 2580 } 2581 } 2582 2583 bufobj_wwait(bo, 0, 0); 2584 BO_UNLOCK(bo); 2585 vnode_pager_setsize(vp, length); 2586 2587 return (0); 2588 } 2589 2590 /* 2591 * Invalidate the cached pages of a file's buffer within the range of block 2592 * numbers [startlbn, endlbn). 2593 */ 2594 void 2595 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2596 int blksize) 2597 { 2598 struct bufobj *bo; 2599 off_t start, end; 2600 2601 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2602 2603 start = blksize * startlbn; 2604 end = blksize * endlbn; 2605 2606 bo = &vp->v_bufobj; 2607 BO_LOCK(bo); 2608 MPASS(blksize == bo->bo_bsize); 2609 2610 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2611 ; 2612 2613 BO_UNLOCK(bo); 2614 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2615 } 2616 2617 static int 2618 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2619 daddr_t startlbn, daddr_t endlbn) 2620 { 2621 struct buf *bp, *nbp; 2622 bool anyfreed; 2623 2624 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2625 ASSERT_BO_LOCKED(bo); 2626 2627 do { 2628 anyfreed = false; 2629 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2630 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2631 continue; 2632 if (BUF_LOCK(bp, 2633 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2634 BO_LOCKPTR(bo)) == ENOLCK) { 2635 BO_LOCK(bo); 2636 return (EAGAIN); 2637 } 2638 2639 bremfree(bp); 2640 bp->b_flags |= B_INVAL | B_RELBUF; 2641 bp->b_flags &= ~B_ASYNC; 2642 brelse(bp); 2643 anyfreed = true; 2644 2645 BO_LOCK(bo); 2646 if (nbp != NULL && 2647 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2648 nbp->b_vp != vp || 2649 (nbp->b_flags & B_DELWRI) != 0)) 2650 return (EAGAIN); 2651 } 2652 2653 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2654 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2655 continue; 2656 if (BUF_LOCK(bp, 2657 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2658 BO_LOCKPTR(bo)) == ENOLCK) { 2659 BO_LOCK(bo); 2660 return (EAGAIN); 2661 } 2662 bremfree(bp); 2663 bp->b_flags |= B_INVAL | B_RELBUF; 2664 bp->b_flags &= ~B_ASYNC; 2665 brelse(bp); 2666 anyfreed = true; 2667 2668 BO_LOCK(bo); 2669 if (nbp != NULL && 2670 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2671 (nbp->b_vp != vp) || 2672 (nbp->b_flags & B_DELWRI) == 0)) 2673 return (EAGAIN); 2674 } 2675 } while (anyfreed); 2676 return (0); 2677 } 2678 2679 static void 2680 buf_vlist_remove(struct buf *bp) 2681 { 2682 struct bufv *bv; 2683 b_xflags_t flags; 2684 2685 flags = bp->b_xflags; 2686 2687 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2688 ASSERT_BO_WLOCKED(bp->b_bufobj); 2689 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 && 2690 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN), 2691 ("%s: buffer %p has invalid queue state", __func__, bp)); 2692 2693 if ((flags & BX_VNDIRTY) != 0) 2694 bv = &bp->b_bufobj->bo_dirty; 2695 else 2696 bv = &bp->b_bufobj->bo_clean; 2697 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2698 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2699 bv->bv_cnt--; 2700 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2701 } 2702 2703 /* 2704 * Add the buffer to the sorted clean or dirty block list. Return zero on 2705 * success, EEXIST if a buffer with this identity already exists, or another 2706 * error on allocation failure. 2707 */ 2708 static inline int 2709 buf_vlist_find_or_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2710 { 2711 struct bufv *bv; 2712 struct buf *n; 2713 int error; 2714 2715 ASSERT_BO_WLOCKED(bo); 2716 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2717 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2718 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2719 ("dead bo %p", bo)); 2720 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == xflags, 2721 ("buf_vlist_add: b_xflags %#x not set on bp %p", xflags, bp)); 2722 2723 if (xflags & BX_VNDIRTY) 2724 bv = &bo->bo_dirty; 2725 else 2726 bv = &bo->bo_clean; 2727 2728 error = BUF_PCTRIE_INSERT_LOOKUP_LE(&bv->bv_root, bp, &n); 2729 if (n == NULL) { 2730 KASSERT(error != EEXIST, 2731 ("buf_vlist_add: EEXIST but no existing buf found: bp %p", 2732 bp)); 2733 } else { 2734 KASSERT((uint64_t)n->b_lblkno <= (uint64_t)bp->b_lblkno, 2735 ("buf_vlist_add: out of order insert/lookup: bp %p n %p", 2736 bp, n)); 2737 KASSERT((n->b_lblkno == bp->b_lblkno) == (error == EEXIST), 2738 ("buf_vlist_add: inconsistent result for existing buf: " 2739 "error %d bp %p n %p", error, bp, n)); 2740 } 2741 if (error != 0) 2742 return (error); 2743 2744 /* Keep the list ordered. */ 2745 if (n == NULL) { 2746 KASSERT(TAILQ_EMPTY(&bv->bv_hd) || 2747 (uint64_t)bp->b_lblkno < 2748 (uint64_t)TAILQ_FIRST(&bv->bv_hd)->b_lblkno, 2749 ("buf_vlist_add: queue order: " 2750 "%p should be before first %p", 2751 bp, TAILQ_FIRST(&bv->bv_hd))); 2752 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2753 } else { 2754 KASSERT(TAILQ_NEXT(n, b_bobufs) == NULL || 2755 (uint64_t)bp->b_lblkno < 2756 (uint64_t)TAILQ_NEXT(n, b_bobufs)->b_lblkno, 2757 ("buf_vlist_add: queue order: " 2758 "%p should be before next %p", 2759 bp, TAILQ_NEXT(n, b_bobufs))); 2760 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2761 } 2762 2763 bv->bv_cnt++; 2764 return (0); 2765 } 2766 2767 /* 2768 * Add the buffer to the sorted clean or dirty block list. 2769 * 2770 * NOTE: xflags is passed as a constant, optimizing this inline function! 2771 */ 2772 static void 2773 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2774 { 2775 int error; 2776 2777 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0, 2778 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2779 bp->b_xflags |= xflags; 2780 error = buf_vlist_find_or_add(bp, bo, xflags); 2781 if (error) 2782 panic("buf_vlist_add: error=%d", error); 2783 } 2784 2785 /* 2786 * Look up a buffer using the buffer tries. 2787 */ 2788 struct buf * 2789 gbincore(struct bufobj *bo, daddr_t lblkno) 2790 { 2791 struct buf *bp; 2792 2793 ASSERT_BO_LOCKED(bo); 2794 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2795 if (bp != NULL) 2796 return (bp); 2797 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2798 } 2799 2800 /* 2801 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2802 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2803 * stability of the result. Like other lockless lookups, the found buf may 2804 * already be invalid by the time this function returns. 2805 */ 2806 struct buf * 2807 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2808 { 2809 struct buf *bp; 2810 2811 ASSERT_BO_UNLOCKED(bo); 2812 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2813 if (bp != NULL) 2814 return (bp); 2815 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2816 } 2817 2818 /* 2819 * Associate a buffer with a vnode. 2820 */ 2821 int 2822 bgetvp(struct vnode *vp, struct buf *bp) 2823 { 2824 struct bufobj *bo; 2825 int error; 2826 2827 bo = &vp->v_bufobj; 2828 ASSERT_BO_UNLOCKED(bo); 2829 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2830 2831 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2832 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2833 ("bgetvp: bp already attached! %p", bp)); 2834 2835 /* 2836 * Add the buf to the vnode's clean list unless we lost a race and find 2837 * an existing buf in either dirty or clean. 2838 */ 2839 bp->b_vp = vp; 2840 bp->b_bufobj = bo; 2841 bp->b_xflags |= BX_VNCLEAN; 2842 error = EEXIST; 2843 BO_LOCK(bo); 2844 if (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, bp->b_lblkno) == NULL) 2845 error = buf_vlist_find_or_add(bp, bo, BX_VNCLEAN); 2846 BO_UNLOCK(bo); 2847 if (__predict_true(error == 0)) { 2848 vhold(vp); 2849 return (0); 2850 } 2851 if (error != EEXIST) 2852 panic("bgetvp: buf_vlist_add error: %d", error); 2853 bp->b_vp = NULL; 2854 bp->b_bufobj = NULL; 2855 bp->b_xflags &= ~BX_VNCLEAN; 2856 return (error); 2857 } 2858 2859 /* 2860 * Disassociate a buffer from a vnode. 2861 */ 2862 void 2863 brelvp(struct buf *bp) 2864 { 2865 struct bufobj *bo; 2866 struct vnode *vp; 2867 2868 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2869 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2870 2871 /* 2872 * Delete from old vnode list, if on one. 2873 */ 2874 vp = bp->b_vp; /* XXX */ 2875 bo = bp->b_bufobj; 2876 BO_LOCK(bo); 2877 buf_vlist_remove(bp); 2878 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2879 bo->bo_flag &= ~BO_ONWORKLST; 2880 mtx_lock(&sync_mtx); 2881 LIST_REMOVE(bo, bo_synclist); 2882 syncer_worklist_len--; 2883 mtx_unlock(&sync_mtx); 2884 } 2885 bp->b_vp = NULL; 2886 bp->b_bufobj = NULL; 2887 BO_UNLOCK(bo); 2888 vdrop(vp); 2889 } 2890 2891 /* 2892 * Add an item to the syncer work queue. 2893 */ 2894 static void 2895 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2896 { 2897 int slot; 2898 2899 ASSERT_BO_WLOCKED(bo); 2900 2901 mtx_lock(&sync_mtx); 2902 if (bo->bo_flag & BO_ONWORKLST) 2903 LIST_REMOVE(bo, bo_synclist); 2904 else { 2905 bo->bo_flag |= BO_ONWORKLST; 2906 syncer_worklist_len++; 2907 } 2908 2909 if (delay > syncer_maxdelay - 2) 2910 delay = syncer_maxdelay - 2; 2911 slot = (syncer_delayno + delay) & syncer_mask; 2912 2913 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2914 mtx_unlock(&sync_mtx); 2915 } 2916 2917 static int 2918 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2919 { 2920 int error, len; 2921 2922 mtx_lock(&sync_mtx); 2923 len = syncer_worklist_len - sync_vnode_count; 2924 mtx_unlock(&sync_mtx); 2925 error = SYSCTL_OUT(req, &len, sizeof(len)); 2926 return (error); 2927 } 2928 2929 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2930 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2931 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2932 2933 static struct proc *updateproc; 2934 static void sched_sync(void); 2935 static struct kproc_desc up_kp = { 2936 "syncer", 2937 sched_sync, 2938 &updateproc 2939 }; 2940 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2941 2942 static int 2943 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2944 { 2945 struct vnode *vp; 2946 struct mount *mp; 2947 2948 *bo = LIST_FIRST(slp); 2949 if (*bo == NULL) 2950 return (0); 2951 vp = bo2vnode(*bo); 2952 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2953 return (1); 2954 /* 2955 * We use vhold in case the vnode does not 2956 * successfully sync. vhold prevents the vnode from 2957 * going away when we unlock the sync_mtx so that 2958 * we can acquire the vnode interlock. 2959 */ 2960 vholdl(vp); 2961 mtx_unlock(&sync_mtx); 2962 VI_UNLOCK(vp); 2963 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2964 vdrop(vp); 2965 mtx_lock(&sync_mtx); 2966 return (*bo == LIST_FIRST(slp)); 2967 } 2968 MPASSERT(mp == NULL || (curthread->td_pflags & TDP_IGNSUSP) != 0 || 2969 (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0, mp, 2970 ("suspended mp syncing vp %p", vp)); 2971 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2972 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2973 VOP_UNLOCK(vp); 2974 vn_finished_write(mp); 2975 BO_LOCK(*bo); 2976 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2977 /* 2978 * Put us back on the worklist. The worklist 2979 * routine will remove us from our current 2980 * position and then add us back in at a later 2981 * position. 2982 */ 2983 vn_syncer_add_to_worklist(*bo, syncdelay); 2984 } 2985 BO_UNLOCK(*bo); 2986 vdrop(vp); 2987 mtx_lock(&sync_mtx); 2988 return (0); 2989 } 2990 2991 static int first_printf = 1; 2992 2993 /* 2994 * System filesystem synchronizer daemon. 2995 */ 2996 static void 2997 sched_sync(void) 2998 { 2999 struct synclist *next, *slp; 3000 struct bufobj *bo; 3001 long starttime; 3002 struct thread *td = curthread; 3003 int last_work_seen; 3004 int net_worklist_len; 3005 int syncer_final_iter; 3006 int error; 3007 3008 last_work_seen = 0; 3009 syncer_final_iter = 0; 3010 syncer_state = SYNCER_RUNNING; 3011 starttime = time_uptime; 3012 td->td_pflags |= TDP_NORUNNINGBUF; 3013 3014 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 3015 SHUTDOWN_PRI_LAST); 3016 3017 mtx_lock(&sync_mtx); 3018 for (;;) { 3019 if (syncer_state == SYNCER_FINAL_DELAY && 3020 syncer_final_iter == 0) { 3021 mtx_unlock(&sync_mtx); 3022 kproc_suspend_check(td->td_proc); 3023 mtx_lock(&sync_mtx); 3024 } 3025 net_worklist_len = syncer_worklist_len - sync_vnode_count; 3026 if (syncer_state != SYNCER_RUNNING && 3027 starttime != time_uptime) { 3028 if (first_printf) { 3029 printf("\nSyncing disks, vnodes remaining... "); 3030 first_printf = 0; 3031 } 3032 printf("%d ", net_worklist_len); 3033 } 3034 starttime = time_uptime; 3035 3036 /* 3037 * Push files whose dirty time has expired. Be careful 3038 * of interrupt race on slp queue. 3039 * 3040 * Skip over empty worklist slots when shutting down. 3041 */ 3042 do { 3043 slp = &syncer_workitem_pending[syncer_delayno]; 3044 syncer_delayno += 1; 3045 if (syncer_delayno == syncer_maxdelay) 3046 syncer_delayno = 0; 3047 next = &syncer_workitem_pending[syncer_delayno]; 3048 /* 3049 * If the worklist has wrapped since the 3050 * it was emptied of all but syncer vnodes, 3051 * switch to the FINAL_DELAY state and run 3052 * for one more second. 3053 */ 3054 if (syncer_state == SYNCER_SHUTTING_DOWN && 3055 net_worklist_len == 0 && 3056 last_work_seen == syncer_delayno) { 3057 syncer_state = SYNCER_FINAL_DELAY; 3058 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 3059 } 3060 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 3061 syncer_worklist_len > 0); 3062 3063 /* 3064 * Keep track of the last time there was anything 3065 * on the worklist other than syncer vnodes. 3066 * Return to the SHUTTING_DOWN state if any 3067 * new work appears. 3068 */ 3069 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 3070 last_work_seen = syncer_delayno; 3071 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 3072 syncer_state = SYNCER_SHUTTING_DOWN; 3073 while (!LIST_EMPTY(slp)) { 3074 error = sync_vnode(slp, &bo, td); 3075 if (error == 1) { 3076 LIST_REMOVE(bo, bo_synclist); 3077 LIST_INSERT_HEAD(next, bo, bo_synclist); 3078 continue; 3079 } 3080 3081 if (first_printf == 0) { 3082 /* 3083 * Drop the sync mutex, because some watchdog 3084 * drivers need to sleep while patting 3085 */ 3086 mtx_unlock(&sync_mtx); 3087 wdog_kern_pat(WD_LASTVAL); 3088 mtx_lock(&sync_mtx); 3089 } 3090 } 3091 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 3092 syncer_final_iter--; 3093 /* 3094 * The variable rushjob allows the kernel to speed up the 3095 * processing of the filesystem syncer process. A rushjob 3096 * value of N tells the filesystem syncer to process the next 3097 * N seconds worth of work on its queue ASAP. Currently rushjob 3098 * is used by the soft update code to speed up the filesystem 3099 * syncer process when the incore state is getting so far 3100 * ahead of the disk that the kernel memory pool is being 3101 * threatened with exhaustion. 3102 */ 3103 if (rushjob > 0) { 3104 rushjob -= 1; 3105 continue; 3106 } 3107 /* 3108 * Just sleep for a short period of time between 3109 * iterations when shutting down to allow some I/O 3110 * to happen. 3111 * 3112 * If it has taken us less than a second to process the 3113 * current work, then wait. Otherwise start right over 3114 * again. We can still lose time if any single round 3115 * takes more than two seconds, but it does not really 3116 * matter as we are just trying to generally pace the 3117 * filesystem activity. 3118 */ 3119 if (syncer_state != SYNCER_RUNNING || 3120 time_uptime == starttime) { 3121 thread_lock(td); 3122 sched_prio(td, PPAUSE); 3123 thread_unlock(td); 3124 } 3125 if (syncer_state != SYNCER_RUNNING) 3126 cv_timedwait(&sync_wakeup, &sync_mtx, 3127 hz / SYNCER_SHUTDOWN_SPEEDUP); 3128 else if (time_uptime == starttime) 3129 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 3130 } 3131 } 3132 3133 /* 3134 * Request the syncer daemon to speed up its work. 3135 * We never push it to speed up more than half of its 3136 * normal turn time, otherwise it could take over the cpu. 3137 */ 3138 int 3139 speedup_syncer(void) 3140 { 3141 int ret = 0; 3142 3143 mtx_lock(&sync_mtx); 3144 if (rushjob < syncdelay / 2) { 3145 rushjob += 1; 3146 stat_rush_requests += 1; 3147 ret = 1; 3148 } 3149 mtx_unlock(&sync_mtx); 3150 cv_broadcast(&sync_wakeup); 3151 return (ret); 3152 } 3153 3154 /* 3155 * Tell the syncer to speed up its work and run though its work 3156 * list several times, then tell it to shut down. 3157 */ 3158 static void 3159 syncer_shutdown(void *arg, int howto) 3160 { 3161 3162 if (howto & RB_NOSYNC) 3163 return; 3164 mtx_lock(&sync_mtx); 3165 syncer_state = SYNCER_SHUTTING_DOWN; 3166 rushjob = 0; 3167 mtx_unlock(&sync_mtx); 3168 cv_broadcast(&sync_wakeup); 3169 kproc_shutdown(arg, howto); 3170 } 3171 3172 void 3173 syncer_suspend(void) 3174 { 3175 3176 syncer_shutdown(updateproc, 0); 3177 } 3178 3179 void 3180 syncer_resume(void) 3181 { 3182 3183 mtx_lock(&sync_mtx); 3184 first_printf = 1; 3185 syncer_state = SYNCER_RUNNING; 3186 mtx_unlock(&sync_mtx); 3187 cv_broadcast(&sync_wakeup); 3188 kproc_resume(updateproc); 3189 } 3190 3191 /* 3192 * Move the buffer between the clean and dirty lists of its vnode. 3193 */ 3194 void 3195 reassignbuf(struct buf *bp) 3196 { 3197 struct vnode *vp; 3198 struct bufobj *bo; 3199 int delay; 3200 #ifdef INVARIANTS 3201 struct bufv *bv; 3202 #endif 3203 3204 vp = bp->b_vp; 3205 bo = bp->b_bufobj; 3206 3207 KASSERT((bp->b_flags & B_PAGING) == 0, 3208 ("%s: cannot reassign paging buffer %p", __func__, bp)); 3209 3210 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 3211 bp, bp->b_vp, bp->b_flags); 3212 3213 BO_LOCK(bo); 3214 if ((bo->bo_flag & BO_NONSTERILE) == 0) { 3215 /* 3216 * Coordinate with getblk's unlocked lookup. Make 3217 * BO_NONSTERILE visible before the first reassignbuf produces 3218 * any side effect. This could be outside the bo lock if we 3219 * used a separate atomic flag field. 3220 */ 3221 bo->bo_flag |= BO_NONSTERILE; 3222 atomic_thread_fence_rel(); 3223 } 3224 buf_vlist_remove(bp); 3225 3226 /* 3227 * If dirty, put on list of dirty buffers; otherwise insert onto list 3228 * of clean buffers. 3229 */ 3230 if (bp->b_flags & B_DELWRI) { 3231 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 3232 switch (vp->v_type) { 3233 case VDIR: 3234 delay = dirdelay; 3235 break; 3236 case VCHR: 3237 delay = metadelay; 3238 break; 3239 default: 3240 delay = filedelay; 3241 } 3242 vn_syncer_add_to_worklist(bo, delay); 3243 } 3244 buf_vlist_add(bp, bo, BX_VNDIRTY); 3245 } else { 3246 buf_vlist_add(bp, bo, BX_VNCLEAN); 3247 3248 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 3249 mtx_lock(&sync_mtx); 3250 LIST_REMOVE(bo, bo_synclist); 3251 syncer_worklist_len--; 3252 mtx_unlock(&sync_mtx); 3253 bo->bo_flag &= ~BO_ONWORKLST; 3254 } 3255 } 3256 #ifdef INVARIANTS 3257 bv = &bo->bo_clean; 3258 bp = TAILQ_FIRST(&bv->bv_hd); 3259 KASSERT(bp == NULL || bp->b_bufobj == bo, 3260 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3261 bp = TAILQ_LAST(&bv->bv_hd, buflists); 3262 KASSERT(bp == NULL || bp->b_bufobj == bo, 3263 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3264 bv = &bo->bo_dirty; 3265 bp = TAILQ_FIRST(&bv->bv_hd); 3266 KASSERT(bp == NULL || bp->b_bufobj == bo, 3267 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3268 bp = TAILQ_LAST(&bv->bv_hd, buflists); 3269 KASSERT(bp == NULL || bp->b_bufobj == bo, 3270 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3271 #endif 3272 BO_UNLOCK(bo); 3273 } 3274 3275 static void 3276 v_init_counters(struct vnode *vp) 3277 { 3278 3279 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 3280 vp, ("%s called for an initialized vnode", __FUNCTION__)); 3281 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 3282 3283 refcount_init(&vp->v_holdcnt, 1); 3284 refcount_init(&vp->v_usecount, 1); 3285 } 3286 3287 /* 3288 * Get a usecount on a vnode. 3289 * 3290 * vget and vget_finish may fail to lock the vnode if they lose a race against 3291 * it being doomed. LK_RETRY can be passed in flags to lock it anyway. 3292 * 3293 * Consumers which don't guarantee liveness of the vnode can use SMR to 3294 * try to get a reference. Note this operation can fail since the vnode 3295 * may be awaiting getting freed by the time they get to it. 3296 */ 3297 enum vgetstate 3298 vget_prep_smr(struct vnode *vp) 3299 { 3300 enum vgetstate vs; 3301 3302 VFS_SMR_ASSERT_ENTERED(); 3303 3304 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3305 vs = VGET_USECOUNT; 3306 } else { 3307 if (vhold_smr(vp)) 3308 vs = VGET_HOLDCNT; 3309 else 3310 vs = VGET_NONE; 3311 } 3312 return (vs); 3313 } 3314 3315 enum vgetstate 3316 vget_prep(struct vnode *vp) 3317 { 3318 enum vgetstate vs; 3319 3320 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3321 vs = VGET_USECOUNT; 3322 } else { 3323 vhold(vp); 3324 vs = VGET_HOLDCNT; 3325 } 3326 return (vs); 3327 } 3328 3329 void 3330 vget_abort(struct vnode *vp, enum vgetstate vs) 3331 { 3332 3333 switch (vs) { 3334 case VGET_USECOUNT: 3335 vrele(vp); 3336 break; 3337 case VGET_HOLDCNT: 3338 vdrop(vp); 3339 break; 3340 default: 3341 __assert_unreachable(); 3342 } 3343 } 3344 3345 int 3346 vget(struct vnode *vp, int flags) 3347 { 3348 enum vgetstate vs; 3349 3350 vs = vget_prep(vp); 3351 return (vget_finish(vp, flags, vs)); 3352 } 3353 3354 int 3355 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 3356 { 3357 int error; 3358 3359 if ((flags & LK_INTERLOCK) != 0) 3360 ASSERT_VI_LOCKED(vp, __func__); 3361 else 3362 ASSERT_VI_UNLOCKED(vp, __func__); 3363 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3364 VNPASS(vp->v_holdcnt > 0, vp); 3365 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3366 3367 error = vn_lock(vp, flags); 3368 if (__predict_false(error != 0)) { 3369 vget_abort(vp, vs); 3370 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 3371 vp); 3372 return (error); 3373 } 3374 3375 vget_finish_ref(vp, vs); 3376 return (0); 3377 } 3378 3379 void 3380 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 3381 { 3382 int old; 3383 3384 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3385 VNPASS(vp->v_holdcnt > 0, vp); 3386 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3387 3388 if (vs == VGET_USECOUNT) 3389 return; 3390 3391 /* 3392 * We hold the vnode. If the usecount is 0 it will be utilized to keep 3393 * the vnode around. Otherwise someone else lended their hold count and 3394 * we have to drop ours. 3395 */ 3396 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3397 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3398 if (old != 0) { 3399 #ifdef INVARIANTS 3400 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3401 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3402 #else 3403 refcount_release(&vp->v_holdcnt); 3404 #endif 3405 } 3406 } 3407 3408 void 3409 vref(struct vnode *vp) 3410 { 3411 enum vgetstate vs; 3412 3413 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3414 vs = vget_prep(vp); 3415 vget_finish_ref(vp, vs); 3416 } 3417 3418 void 3419 vrefact(struct vnode *vp) 3420 { 3421 int old __diagused; 3422 3423 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3424 old = refcount_acquire(&vp->v_usecount); 3425 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3426 } 3427 3428 void 3429 vlazy(struct vnode *vp) 3430 { 3431 struct mount *mp; 3432 3433 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 3434 3435 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 3436 return; 3437 /* 3438 * We may get here for inactive routines after the vnode got doomed. 3439 */ 3440 if (VN_IS_DOOMED(vp)) 3441 return; 3442 mp = vp->v_mount; 3443 mtx_lock(&mp->mnt_listmtx); 3444 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 3445 vp->v_mflag |= VMP_LAZYLIST; 3446 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3447 mp->mnt_lazyvnodelistsize++; 3448 } 3449 mtx_unlock(&mp->mnt_listmtx); 3450 } 3451 3452 static void 3453 vunlazy(struct vnode *vp) 3454 { 3455 struct mount *mp; 3456 3457 ASSERT_VI_LOCKED(vp, __func__); 3458 VNPASS(!VN_IS_DOOMED(vp), vp); 3459 3460 mp = vp->v_mount; 3461 mtx_lock(&mp->mnt_listmtx); 3462 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3463 /* 3464 * Don't remove the vnode from the lazy list if another thread 3465 * has increased the hold count. It may have re-enqueued the 3466 * vnode to the lazy list and is now responsible for its 3467 * removal. 3468 */ 3469 if (vp->v_holdcnt == 0) { 3470 vp->v_mflag &= ~VMP_LAZYLIST; 3471 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3472 mp->mnt_lazyvnodelistsize--; 3473 } 3474 mtx_unlock(&mp->mnt_listmtx); 3475 } 3476 3477 /* 3478 * This routine is only meant to be called from vgonel prior to dooming 3479 * the vnode. 3480 */ 3481 static void 3482 vunlazy_gone(struct vnode *vp) 3483 { 3484 struct mount *mp; 3485 3486 ASSERT_VOP_ELOCKED(vp, __func__); 3487 ASSERT_VI_LOCKED(vp, __func__); 3488 VNPASS(!VN_IS_DOOMED(vp), vp); 3489 3490 if (vp->v_mflag & VMP_LAZYLIST) { 3491 mp = vp->v_mount; 3492 mtx_lock(&mp->mnt_listmtx); 3493 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3494 vp->v_mflag &= ~VMP_LAZYLIST; 3495 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3496 mp->mnt_lazyvnodelistsize--; 3497 mtx_unlock(&mp->mnt_listmtx); 3498 } 3499 } 3500 3501 static void 3502 vdefer_inactive(struct vnode *vp) 3503 { 3504 3505 ASSERT_VI_LOCKED(vp, __func__); 3506 VNPASS(vp->v_holdcnt > 0, vp); 3507 if (VN_IS_DOOMED(vp)) { 3508 vdropl(vp); 3509 return; 3510 } 3511 if (vp->v_iflag & VI_DEFINACT) { 3512 VNPASS(vp->v_holdcnt > 1, vp); 3513 vdropl(vp); 3514 return; 3515 } 3516 if (vp->v_usecount > 0) { 3517 vp->v_iflag &= ~VI_OWEINACT; 3518 vdropl(vp); 3519 return; 3520 } 3521 vlazy(vp); 3522 vp->v_iflag |= VI_DEFINACT; 3523 VI_UNLOCK(vp); 3524 atomic_add_long(&deferred_inact, 1); 3525 } 3526 3527 static void 3528 vdefer_inactive_unlocked(struct vnode *vp) 3529 { 3530 3531 VI_LOCK(vp); 3532 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3533 vdropl(vp); 3534 return; 3535 } 3536 vdefer_inactive(vp); 3537 } 3538 3539 enum vput_op { VRELE, VPUT, VUNREF }; 3540 3541 /* 3542 * Handle ->v_usecount transitioning to 0. 3543 * 3544 * By releasing the last usecount we take ownership of the hold count which 3545 * provides liveness of the vnode, meaning we have to vdrop. 3546 * 3547 * For all vnodes we may need to perform inactive processing. It requires an 3548 * exclusive lock on the vnode, while it is legal to call here with only a 3549 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3550 * inactive processing gets deferred to the syncer. 3551 * 3552 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3553 * on the lock being held all the way until VOP_INACTIVE. This in particular 3554 * happens with UFS which adds half-constructed vnodes to the hash, where they 3555 * can be found by other code. 3556 */ 3557 static void 3558 vput_final(struct vnode *vp, enum vput_op func) 3559 { 3560 int error; 3561 bool want_unlock; 3562 3563 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3564 VNPASS(vp->v_holdcnt > 0, vp); 3565 3566 VI_LOCK(vp); 3567 3568 /* 3569 * By the time we got here someone else might have transitioned 3570 * the count back to > 0. 3571 */ 3572 if (vp->v_usecount > 0) 3573 goto out; 3574 3575 /* 3576 * If the vnode is doomed vgone already performed inactive processing 3577 * (if needed). 3578 */ 3579 if (VN_IS_DOOMED(vp)) 3580 goto out; 3581 3582 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3583 goto out; 3584 3585 if (vp->v_iflag & VI_DOINGINACT) 3586 goto out; 3587 3588 /* 3589 * Locking operations here will drop the interlock and possibly the 3590 * vnode lock, opening a window where the vnode can get doomed all the 3591 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3592 * perform inactive. 3593 */ 3594 vp->v_iflag |= VI_OWEINACT; 3595 want_unlock = false; 3596 error = 0; 3597 switch (func) { 3598 case VRELE: 3599 switch (VOP_ISLOCKED(vp)) { 3600 case LK_EXCLUSIVE: 3601 break; 3602 case LK_EXCLOTHER: 3603 case 0: 3604 want_unlock = true; 3605 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3606 VI_LOCK(vp); 3607 break; 3608 default: 3609 /* 3610 * The lock has at least one sharer, but we have no way 3611 * to conclude whether this is us. Play it safe and 3612 * defer processing. 3613 */ 3614 error = EAGAIN; 3615 break; 3616 } 3617 break; 3618 case VPUT: 3619 want_unlock = true; 3620 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3621 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3622 LK_NOWAIT); 3623 VI_LOCK(vp); 3624 } 3625 break; 3626 case VUNREF: 3627 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3628 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3629 VI_LOCK(vp); 3630 } 3631 break; 3632 } 3633 if (error == 0) { 3634 if (func == VUNREF) { 3635 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp, 3636 ("recursive vunref")); 3637 vp->v_vflag |= VV_UNREF; 3638 } 3639 for (;;) { 3640 error = vinactive(vp); 3641 if (want_unlock) 3642 VOP_UNLOCK(vp); 3643 if (error != ERELOOKUP || !want_unlock) 3644 break; 3645 VOP_LOCK(vp, LK_EXCLUSIVE); 3646 } 3647 if (func == VUNREF) 3648 vp->v_vflag &= ~VV_UNREF; 3649 vdropl(vp); 3650 } else { 3651 vdefer_inactive(vp); 3652 } 3653 return; 3654 out: 3655 if (func == VPUT) 3656 VOP_UNLOCK(vp); 3657 vdropl(vp); 3658 } 3659 3660 /* 3661 * Decrement ->v_usecount for a vnode. 3662 * 3663 * Releasing the last use count requires additional processing, see vput_final 3664 * above for details. 3665 * 3666 * Comment above each variant denotes lock state on entry and exit. 3667 */ 3668 3669 /* 3670 * in: any 3671 * out: same as passed in 3672 */ 3673 void 3674 vrele(struct vnode *vp) 3675 { 3676 3677 ASSERT_VI_UNLOCKED(vp, __func__); 3678 if (!refcount_release(&vp->v_usecount)) 3679 return; 3680 vput_final(vp, VRELE); 3681 } 3682 3683 /* 3684 * in: locked 3685 * out: unlocked 3686 */ 3687 void 3688 vput(struct vnode *vp) 3689 { 3690 3691 ASSERT_VOP_LOCKED(vp, __func__); 3692 ASSERT_VI_UNLOCKED(vp, __func__); 3693 if (!refcount_release(&vp->v_usecount)) { 3694 VOP_UNLOCK(vp); 3695 return; 3696 } 3697 vput_final(vp, VPUT); 3698 } 3699 3700 /* 3701 * in: locked 3702 * out: locked 3703 */ 3704 void 3705 vunref(struct vnode *vp) 3706 { 3707 3708 ASSERT_VOP_LOCKED(vp, __func__); 3709 ASSERT_VI_UNLOCKED(vp, __func__); 3710 if (!refcount_release(&vp->v_usecount)) 3711 return; 3712 vput_final(vp, VUNREF); 3713 } 3714 3715 void 3716 vhold(struct vnode *vp) 3717 { 3718 int old; 3719 3720 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3721 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3722 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3723 ("%s: wrong hold count %d", __func__, old)); 3724 if (old == 0) 3725 vfs_freevnodes_dec(); 3726 } 3727 3728 void 3729 vholdnz(struct vnode *vp) 3730 { 3731 3732 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3733 #ifdef INVARIANTS 3734 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3735 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3736 ("%s: wrong hold count %d", __func__, old)); 3737 #else 3738 atomic_add_int(&vp->v_holdcnt, 1); 3739 #endif 3740 } 3741 3742 /* 3743 * Grab a hold count unless the vnode is freed. 3744 * 3745 * Only use this routine if vfs smr is the only protection you have against 3746 * freeing the vnode. 3747 * 3748 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3749 * is not set. After the flag is set the vnode becomes immutable to anyone but 3750 * the thread which managed to set the flag. 3751 * 3752 * It may be tempting to replace the loop with: 3753 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3754 * if (count & VHOLD_NO_SMR) { 3755 * backpedal and error out; 3756 * } 3757 * 3758 * However, while this is more performant, it hinders debugging by eliminating 3759 * the previously mentioned invariant. 3760 */ 3761 bool 3762 vhold_smr(struct vnode *vp) 3763 { 3764 int count; 3765 3766 VFS_SMR_ASSERT_ENTERED(); 3767 3768 count = atomic_load_int(&vp->v_holdcnt); 3769 for (;;) { 3770 if (count & VHOLD_NO_SMR) { 3771 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3772 ("non-zero hold count with flags %d\n", count)); 3773 return (false); 3774 } 3775 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3776 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3777 if (count == 0) 3778 vfs_freevnodes_dec(); 3779 return (true); 3780 } 3781 } 3782 } 3783 3784 /* 3785 * Hold a free vnode for recycling. 3786 * 3787 * Note: vnode_init references this comment. 3788 * 3789 * Attempts to recycle only need the global vnode list lock and have no use for 3790 * SMR. 3791 * 3792 * However, vnodes get inserted into the global list before they get fully 3793 * initialized and stay there until UMA decides to free the memory. This in 3794 * particular means the target can be found before it becomes usable and after 3795 * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to 3796 * VHOLD_NO_SMR. 3797 * 3798 * Note: the vnode may gain more references after we transition the count 0->1. 3799 */ 3800 static bool 3801 vhold_recycle_free(struct vnode *vp) 3802 { 3803 int count; 3804 3805 mtx_assert(&vnode_list_mtx, MA_OWNED); 3806 3807 count = atomic_load_int(&vp->v_holdcnt); 3808 for (;;) { 3809 if (count & VHOLD_NO_SMR) { 3810 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3811 ("non-zero hold count with flags %d\n", count)); 3812 return (false); 3813 } 3814 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3815 if (count > 0) { 3816 return (false); 3817 } 3818 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3819 vfs_freevnodes_dec(); 3820 return (true); 3821 } 3822 } 3823 } 3824 3825 static void __noinline 3826 vdbatch_process(struct vdbatch *vd) 3827 { 3828 struct vnode *vp; 3829 int i; 3830 3831 mtx_assert(&vd->lock, MA_OWNED); 3832 MPASS(curthread->td_pinned > 0); 3833 MPASS(vd->index == VDBATCH_SIZE); 3834 3835 /* 3836 * Attempt to requeue the passed batch, but give up easily. 3837 * 3838 * Despite batching the mechanism is prone to transient *significant* 3839 * lock contention, where vnode_list_mtx becomes the primary bottleneck 3840 * if multiple CPUs get here (one real-world example is highly parallel 3841 * do-nothing make , which will stat *tons* of vnodes). Since it is 3842 * quasi-LRU (read: not that great even if fully honoured) provide an 3843 * option to just dodge the problem. Parties which don't like it are 3844 * welcome to implement something better. 3845 */ 3846 if (vnode_can_skip_requeue) { 3847 if (!mtx_trylock(&vnode_list_mtx)) { 3848 counter_u64_add(vnode_skipped_requeues, 1); 3849 critical_enter(); 3850 for (i = 0; i < VDBATCH_SIZE; i++) { 3851 vp = vd->tab[i]; 3852 vd->tab[i] = NULL; 3853 MPASS(vp->v_dbatchcpu != NOCPU); 3854 vp->v_dbatchcpu = NOCPU; 3855 } 3856 vd->index = 0; 3857 critical_exit(); 3858 return; 3859 3860 } 3861 /* fallthrough to locked processing */ 3862 } else { 3863 mtx_lock(&vnode_list_mtx); 3864 } 3865 3866 mtx_assert(&vnode_list_mtx, MA_OWNED); 3867 critical_enter(); 3868 for (i = 0; i < VDBATCH_SIZE; i++) { 3869 vp = vd->tab[i]; 3870 vd->tab[i] = NULL; 3871 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3872 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3873 MPASS(vp->v_dbatchcpu != NOCPU); 3874 vp->v_dbatchcpu = NOCPU; 3875 } 3876 mtx_unlock(&vnode_list_mtx); 3877 vd->index = 0; 3878 critical_exit(); 3879 } 3880 3881 static void 3882 vdbatch_enqueue(struct vnode *vp) 3883 { 3884 struct vdbatch *vd; 3885 3886 ASSERT_VI_LOCKED(vp, __func__); 3887 VNPASS(!VN_IS_DOOMED(vp), vp); 3888 3889 if (vp->v_dbatchcpu != NOCPU) { 3890 VI_UNLOCK(vp); 3891 return; 3892 } 3893 3894 sched_pin(); 3895 vd = DPCPU_PTR(vd); 3896 mtx_lock(&vd->lock); 3897 MPASS(vd->index < VDBATCH_SIZE); 3898 MPASS(vd->tab[vd->index] == NULL); 3899 /* 3900 * A hack: we depend on being pinned so that we know what to put in 3901 * ->v_dbatchcpu. 3902 */ 3903 vp->v_dbatchcpu = curcpu; 3904 vd->tab[vd->index] = vp; 3905 vd->index++; 3906 VI_UNLOCK(vp); 3907 if (vd->index == VDBATCH_SIZE) 3908 vdbatch_process(vd); 3909 mtx_unlock(&vd->lock); 3910 sched_unpin(); 3911 } 3912 3913 /* 3914 * This routine must only be called for vnodes which are about to be 3915 * deallocated. Supporting dequeue for arbitrary vndoes would require 3916 * validating that the locked batch matches. 3917 */ 3918 static void 3919 vdbatch_dequeue(struct vnode *vp) 3920 { 3921 struct vdbatch *vd; 3922 int i; 3923 short cpu; 3924 3925 VNPASS(vp->v_type == VBAD || vp->v_type == VNON, vp); 3926 3927 cpu = vp->v_dbatchcpu; 3928 if (cpu == NOCPU) 3929 return; 3930 3931 vd = DPCPU_ID_PTR(cpu, vd); 3932 mtx_lock(&vd->lock); 3933 for (i = 0; i < vd->index; i++) { 3934 if (vd->tab[i] != vp) 3935 continue; 3936 vp->v_dbatchcpu = NOCPU; 3937 vd->index--; 3938 vd->tab[i] = vd->tab[vd->index]; 3939 vd->tab[vd->index] = NULL; 3940 break; 3941 } 3942 mtx_unlock(&vd->lock); 3943 /* 3944 * Either we dequeued the vnode above or the target CPU beat us to it. 3945 */ 3946 MPASS(vp->v_dbatchcpu == NOCPU); 3947 } 3948 3949 /* 3950 * Drop the hold count of the vnode. 3951 * 3952 * It will only get freed if this is the last hold *and* it has been vgone'd. 3953 * 3954 * Because the vnode vm object keeps a hold reference on the vnode if 3955 * there is at least one resident non-cached page, the vnode cannot 3956 * leave the active list without the page cleanup done. 3957 */ 3958 static void __noinline 3959 vdropl_final(struct vnode *vp) 3960 { 3961 3962 ASSERT_VI_LOCKED(vp, __func__); 3963 VNPASS(VN_IS_DOOMED(vp), vp); 3964 /* 3965 * Set the VHOLD_NO_SMR flag. 3966 * 3967 * We may be racing against vhold_smr. If they win we can just pretend 3968 * we never got this far, they will vdrop later. 3969 */ 3970 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { 3971 vfs_freevnodes_inc(); 3972 VI_UNLOCK(vp); 3973 /* 3974 * We lost the aforementioned race. Any subsequent access is 3975 * invalid as they might have managed to vdropl on their own. 3976 */ 3977 return; 3978 } 3979 /* 3980 * Don't bump freevnodes as this one is going away. 3981 */ 3982 freevnode(vp); 3983 } 3984 3985 void 3986 vdrop(struct vnode *vp) 3987 { 3988 3989 ASSERT_VI_UNLOCKED(vp, __func__); 3990 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3991 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3992 return; 3993 VI_LOCK(vp); 3994 vdropl(vp); 3995 } 3996 3997 static __always_inline void 3998 vdropl_impl(struct vnode *vp, bool enqueue) 3999 { 4000 4001 ASSERT_VI_LOCKED(vp, __func__); 4002 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4003 if (!refcount_release(&vp->v_holdcnt)) { 4004 VI_UNLOCK(vp); 4005 return; 4006 } 4007 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp); 4008 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 4009 if (VN_IS_DOOMED(vp)) { 4010 vdropl_final(vp); 4011 return; 4012 } 4013 4014 vfs_freevnodes_inc(); 4015 if (vp->v_mflag & VMP_LAZYLIST) { 4016 vunlazy(vp); 4017 } 4018 4019 if (!enqueue) { 4020 VI_UNLOCK(vp); 4021 return; 4022 } 4023 4024 /* 4025 * Also unlocks the interlock. We can't assert on it as we 4026 * released our hold and by now the vnode might have been 4027 * freed. 4028 */ 4029 vdbatch_enqueue(vp); 4030 } 4031 4032 void 4033 vdropl(struct vnode *vp) 4034 { 4035 4036 vdropl_impl(vp, true); 4037 } 4038 4039 /* 4040 * vdrop a vnode when recycling 4041 * 4042 * This is a special case routine only to be used when recycling, differs from 4043 * regular vdrop by not requeieing the vnode on LRU. 4044 * 4045 * Consider a case where vtryrecycle continuously fails with all vnodes (due to 4046 * e.g., frozen writes on the filesystem), filling the batch and causing it to 4047 * be requeued. Then vnlru will end up revisiting the same vnodes. This is a 4048 * loop which can last for as long as writes are frozen. 4049 */ 4050 static void 4051 vdropl_recycle(struct vnode *vp) 4052 { 4053 4054 vdropl_impl(vp, false); 4055 } 4056 4057 static void 4058 vdrop_recycle(struct vnode *vp) 4059 { 4060 4061 VI_LOCK(vp); 4062 vdropl_recycle(vp); 4063 } 4064 4065 /* 4066 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 4067 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 4068 */ 4069 static int 4070 vinactivef(struct vnode *vp) 4071 { 4072 int error; 4073 4074 ASSERT_VOP_ELOCKED(vp, "vinactive"); 4075 ASSERT_VI_LOCKED(vp, "vinactive"); 4076 VNPASS((vp->v_iflag & VI_DOINGINACT) == 0, vp); 4077 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4078 vp->v_iflag |= VI_DOINGINACT; 4079 vp->v_iflag &= ~VI_OWEINACT; 4080 VI_UNLOCK(vp); 4081 4082 /* 4083 * Before moving off the active list, we must be sure that any 4084 * modified pages are converted into the vnode's dirty 4085 * buffers, since these will no longer be checked once the 4086 * vnode is on the inactive list. 4087 * 4088 * The write-out of the dirty pages is asynchronous. At the 4089 * point that VOP_INACTIVE() is called, there could still be 4090 * pending I/O and dirty pages in the object. 4091 */ 4092 if ((vp->v_vflag & VV_NOSYNC) == 0) 4093 vnode_pager_clean_async(vp); 4094 4095 error = VOP_INACTIVE(vp); 4096 VI_LOCK(vp); 4097 VNPASS(vp->v_iflag & VI_DOINGINACT, vp); 4098 vp->v_iflag &= ~VI_DOINGINACT; 4099 return (error); 4100 } 4101 4102 int 4103 vinactive(struct vnode *vp) 4104 { 4105 4106 ASSERT_VOP_ELOCKED(vp, "vinactive"); 4107 ASSERT_VI_LOCKED(vp, "vinactive"); 4108 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4109 4110 if ((vp->v_iflag & VI_OWEINACT) == 0) 4111 return (0); 4112 if (vp->v_iflag & VI_DOINGINACT) 4113 return (0); 4114 if (vp->v_usecount > 0) { 4115 vp->v_iflag &= ~VI_OWEINACT; 4116 return (0); 4117 } 4118 return (vinactivef(vp)); 4119 } 4120 4121 /* 4122 * Remove any vnodes in the vnode table belonging to mount point mp. 4123 * 4124 * If FORCECLOSE is not specified, there should not be any active ones, 4125 * return error if any are found (nb: this is a user error, not a 4126 * system error). If FORCECLOSE is specified, detach any active vnodes 4127 * that are found. 4128 * 4129 * If WRITECLOSE is set, only flush out regular file vnodes open for 4130 * writing. 4131 * 4132 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 4133 * 4134 * `rootrefs' specifies the base reference count for the root vnode 4135 * of this filesystem. The root vnode is considered busy if its 4136 * v_usecount exceeds this value. On a successful return, vflush(, td) 4137 * will call vrele() on the root vnode exactly rootrefs times. 4138 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 4139 * be zero. 4140 */ 4141 #ifdef DIAGNOSTIC 4142 static int busyprt = 0; /* print out busy vnodes */ 4143 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 4144 #endif 4145 4146 int 4147 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 4148 { 4149 struct vnode *vp, *mvp, *rootvp = NULL; 4150 struct vattr vattr; 4151 int busy = 0, error; 4152 4153 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 4154 rootrefs, flags); 4155 if (rootrefs > 0) { 4156 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 4157 ("vflush: bad args")); 4158 /* 4159 * Get the filesystem root vnode. We can vput() it 4160 * immediately, since with rootrefs > 0, it won't go away. 4161 */ 4162 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 4163 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 4164 __func__, error); 4165 return (error); 4166 } 4167 vput(rootvp); 4168 } 4169 loop: 4170 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 4171 vholdl(vp); 4172 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 4173 if (error) { 4174 vdrop(vp); 4175 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 4176 goto loop; 4177 } 4178 /* 4179 * Skip over a vnodes marked VV_SYSTEM. 4180 */ 4181 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 4182 VOP_UNLOCK(vp); 4183 vdrop(vp); 4184 continue; 4185 } 4186 /* 4187 * If WRITECLOSE is set, flush out unlinked but still open 4188 * files (even if open only for reading) and regular file 4189 * vnodes open for writing. 4190 */ 4191 if (flags & WRITECLOSE) { 4192 vnode_pager_clean_async(vp); 4193 do { 4194 error = VOP_FSYNC(vp, MNT_WAIT, td); 4195 } while (error == ERELOOKUP); 4196 if (error != 0) { 4197 VOP_UNLOCK(vp); 4198 vdrop(vp); 4199 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 4200 return (error); 4201 } 4202 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 4203 VI_LOCK(vp); 4204 4205 if ((vp->v_type == VNON || 4206 (error == 0 && vattr.va_nlink > 0)) && 4207 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 4208 VOP_UNLOCK(vp); 4209 vdropl(vp); 4210 continue; 4211 } 4212 } else 4213 VI_LOCK(vp); 4214 /* 4215 * With v_usecount == 0, all we need to do is clear out the 4216 * vnode data structures and we are done. 4217 * 4218 * If FORCECLOSE is set, forcibly close the vnode. 4219 */ 4220 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 4221 vgonel(vp); 4222 } else { 4223 busy++; 4224 #ifdef DIAGNOSTIC 4225 if (busyprt) 4226 vn_printf(vp, "vflush: busy vnode "); 4227 #endif 4228 } 4229 VOP_UNLOCK(vp); 4230 vdropl(vp); 4231 } 4232 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 4233 /* 4234 * If just the root vnode is busy, and if its refcount 4235 * is equal to `rootrefs', then go ahead and kill it. 4236 */ 4237 VI_LOCK(rootvp); 4238 KASSERT(busy > 0, ("vflush: not busy")); 4239 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 4240 ("vflush: usecount %d < rootrefs %d", 4241 rootvp->v_usecount, rootrefs)); 4242 if (busy == 1 && rootvp->v_usecount == rootrefs) { 4243 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 4244 vgone(rootvp); 4245 VOP_UNLOCK(rootvp); 4246 busy = 0; 4247 } else 4248 VI_UNLOCK(rootvp); 4249 } 4250 if (busy) { 4251 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 4252 busy); 4253 return (EBUSY); 4254 } 4255 for (; rootrefs > 0; rootrefs--) 4256 vrele(rootvp); 4257 return (0); 4258 } 4259 4260 /* 4261 * Recycle an unused vnode. 4262 */ 4263 int 4264 vrecycle(struct vnode *vp) 4265 { 4266 int recycled; 4267 4268 VI_LOCK(vp); 4269 recycled = vrecyclel(vp); 4270 VI_UNLOCK(vp); 4271 return (recycled); 4272 } 4273 4274 /* 4275 * vrecycle, with the vp interlock held. 4276 */ 4277 int 4278 vrecyclel(struct vnode *vp) 4279 { 4280 int recycled; 4281 4282 ASSERT_VOP_ELOCKED(vp, __func__); 4283 ASSERT_VI_LOCKED(vp, __func__); 4284 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4285 recycled = 0; 4286 if (vp->v_usecount == 0) { 4287 recycled = 1; 4288 vgonel(vp); 4289 } 4290 return (recycled); 4291 } 4292 4293 /* 4294 * Eliminate all activity associated with a vnode 4295 * in preparation for reuse. 4296 */ 4297 void 4298 vgone(struct vnode *vp) 4299 { 4300 VI_LOCK(vp); 4301 vgonel(vp); 4302 VI_UNLOCK(vp); 4303 } 4304 4305 /* 4306 * Notify upper mounts about reclaimed or unlinked vnode. 4307 */ 4308 void 4309 vfs_notify_upper(struct vnode *vp, enum vfs_notify_upper_type event) 4310 { 4311 struct mount *mp; 4312 struct mount_upper_node *ump; 4313 4314 mp = atomic_load_ptr(&vp->v_mount); 4315 if (mp == NULL) 4316 return; 4317 if (TAILQ_EMPTY(&mp->mnt_notify)) 4318 return; 4319 4320 MNT_ILOCK(mp); 4321 mp->mnt_upper_pending++; 4322 KASSERT(mp->mnt_upper_pending > 0, 4323 ("%s: mnt_upper_pending %d", __func__, mp->mnt_upper_pending)); 4324 TAILQ_FOREACH(ump, &mp->mnt_notify, mnt_upper_link) { 4325 MNT_IUNLOCK(mp); 4326 switch (event) { 4327 case VFS_NOTIFY_UPPER_RECLAIM: 4328 VFS_RECLAIM_LOWERVP(ump->mp, vp); 4329 break; 4330 case VFS_NOTIFY_UPPER_UNLINK: 4331 VFS_UNLINK_LOWERVP(ump->mp, vp); 4332 break; 4333 } 4334 MNT_ILOCK(mp); 4335 } 4336 mp->mnt_upper_pending--; 4337 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && 4338 mp->mnt_upper_pending == 0) { 4339 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; 4340 wakeup(&mp->mnt_uppers); 4341 } 4342 MNT_IUNLOCK(mp); 4343 } 4344 4345 /* 4346 * vgone, with the vp interlock held. 4347 */ 4348 static void 4349 vgonel(struct vnode *vp) 4350 { 4351 struct thread *td; 4352 struct mount *mp; 4353 vm_object_t object; 4354 bool active, doinginact, oweinact; 4355 4356 ASSERT_VOP_ELOCKED(vp, "vgonel"); 4357 ASSERT_VI_LOCKED(vp, "vgonel"); 4358 VNASSERT(vp->v_holdcnt, vp, 4359 ("vgonel: vp %p has no reference.", vp)); 4360 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4361 td = curthread; 4362 4363 /* 4364 * Don't vgonel if we're already doomed. 4365 */ 4366 if (VN_IS_DOOMED(vp)) { 4367 VNPASS(vn_get_state(vp) == VSTATE_DESTROYING || \ 4368 vn_get_state(vp) == VSTATE_DEAD, vp); 4369 return; 4370 } 4371 /* 4372 * Paired with freevnode. 4373 */ 4374 vn_seqc_write_begin_locked(vp); 4375 vunlazy_gone(vp); 4376 vn_irflag_set_locked(vp, VIRF_DOOMED); 4377 vn_set_state(vp, VSTATE_DESTROYING); 4378 4379 /* 4380 * Check to see if the vnode is in use. If so, we have to 4381 * call VOP_CLOSE() and VOP_INACTIVE(). 4382 * 4383 * It could be that VOP_INACTIVE() requested reclamation, in 4384 * which case we should avoid recursion, so check 4385 * VI_DOINGINACT. This is not precise but good enough. 4386 */ 4387 active = vp->v_usecount > 0; 4388 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4389 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; 4390 4391 /* 4392 * If we need to do inactive VI_OWEINACT will be set. 4393 */ 4394 if (vp->v_iflag & VI_DEFINACT) { 4395 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 4396 vp->v_iflag &= ~VI_DEFINACT; 4397 vdropl(vp); 4398 } else { 4399 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 4400 VI_UNLOCK(vp); 4401 } 4402 cache_purge_vgone(vp); 4403 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 4404 4405 /* 4406 * If purging an active vnode, it must be closed and 4407 * deactivated before being reclaimed. 4408 */ 4409 if (active) 4410 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 4411 if (!doinginact) { 4412 do { 4413 if (oweinact || active) { 4414 VI_LOCK(vp); 4415 vinactivef(vp); 4416 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4417 VI_UNLOCK(vp); 4418 } 4419 } while (oweinact); 4420 } 4421 if (vp->v_type == VSOCK) 4422 vfs_unp_reclaim(vp); 4423 4424 /* 4425 * Clean out any buffers associated with the vnode. 4426 * If the flush fails, just toss the buffers. 4427 */ 4428 mp = NULL; 4429 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 4430 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 4431 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 4432 while (vinvalbuf(vp, 0, 0, 0) != 0) 4433 ; 4434 } 4435 4436 BO_LOCK(&vp->v_bufobj); 4437 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 4438 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 4439 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 4440 vp->v_bufobj.bo_clean.bv_cnt == 0, 4441 ("vp %p bufobj not invalidated", vp)); 4442 4443 /* 4444 * For VMIO bufobj, BO_DEAD is set later, or in 4445 * vm_object_terminate() after the object's page queue is 4446 * flushed. 4447 */ 4448 object = vp->v_bufobj.bo_object; 4449 if (object == NULL) 4450 vp->v_bufobj.bo_flag |= BO_DEAD; 4451 BO_UNLOCK(&vp->v_bufobj); 4452 4453 /* 4454 * Handle the VM part. Tmpfs handles v_object on its own (the 4455 * OBJT_VNODE check). Nullfs or other bypassing filesystems 4456 * should not touch the object borrowed from the lower vnode 4457 * (the handle check). 4458 */ 4459 if (object != NULL && object->type == OBJT_VNODE && 4460 object->handle == vp) 4461 vnode_destroy_vobject(vp); 4462 4463 /* 4464 * Reclaim the vnode. 4465 */ 4466 if (VOP_RECLAIM(vp)) 4467 panic("vgone: cannot reclaim"); 4468 if (mp != NULL) 4469 vn_finished_secondary_write(mp); 4470 VNASSERT(vp->v_object == NULL, vp, 4471 ("vop_reclaim left v_object vp=%p", vp)); 4472 /* 4473 * Clear the advisory locks and wake up waiting threads. 4474 */ 4475 if (vp->v_lockf != NULL) { 4476 (void)VOP_ADVLOCKPURGE(vp); 4477 vp->v_lockf = NULL; 4478 } 4479 /* 4480 * Delete from old mount point vnode list. 4481 */ 4482 if (vp->v_mount == NULL) { 4483 VI_LOCK(vp); 4484 } else { 4485 delmntque(vp); 4486 ASSERT_VI_LOCKED(vp, "vgonel 2"); 4487 } 4488 /* 4489 * Done with purge, reset to the standard lock and invalidate 4490 * the vnode. 4491 */ 4492 vp->v_vnlock = &vp->v_lock; 4493 vp->v_op = &dead_vnodeops; 4494 vp->v_type = VBAD; 4495 vn_set_state(vp, VSTATE_DEAD); 4496 } 4497 4498 /* 4499 * Print out a description of a vnode. 4500 */ 4501 static const char *const vtypename[] = { 4502 [VNON] = "VNON", 4503 [VREG] = "VREG", 4504 [VDIR] = "VDIR", 4505 [VBLK] = "VBLK", 4506 [VCHR] = "VCHR", 4507 [VLNK] = "VLNK", 4508 [VSOCK] = "VSOCK", 4509 [VFIFO] = "VFIFO", 4510 [VBAD] = "VBAD", 4511 [VMARKER] = "VMARKER", 4512 }; 4513 _Static_assert(nitems(vtypename) == VLASTTYPE + 1, 4514 "vnode type name not added to vtypename"); 4515 4516 static const char *const vstatename[] = { 4517 [VSTATE_UNINITIALIZED] = "VSTATE_UNINITIALIZED", 4518 [VSTATE_CONSTRUCTED] = "VSTATE_CONSTRUCTED", 4519 [VSTATE_DESTROYING] = "VSTATE_DESTROYING", 4520 [VSTATE_DEAD] = "VSTATE_DEAD", 4521 }; 4522 _Static_assert(nitems(vstatename) == VLASTSTATE + 1, 4523 "vnode state name not added to vstatename"); 4524 4525 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 4526 "new hold count flag not added to vn_printf"); 4527 4528 void 4529 vn_printf(struct vnode *vp, const char *fmt, ...) 4530 { 4531 va_list ap; 4532 char buf[256], buf2[16]; 4533 u_long flags; 4534 u_int holdcnt; 4535 short irflag; 4536 4537 va_start(ap, fmt); 4538 vprintf(fmt, ap); 4539 va_end(ap); 4540 printf("%p: ", (void *)vp); 4541 printf("type %s state %s op %p\n", vtypename[vp->v_type], 4542 vstatename[vp->v_state], vp->v_op); 4543 holdcnt = atomic_load_int(&vp->v_holdcnt); 4544 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 4545 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 4546 vp->v_seqc_users); 4547 switch (vp->v_type) { 4548 case VDIR: 4549 printf(" mountedhere %p\n", vp->v_mountedhere); 4550 break; 4551 case VCHR: 4552 printf(" rdev %p\n", vp->v_rdev); 4553 break; 4554 case VSOCK: 4555 printf(" socket %p\n", vp->v_unpcb); 4556 break; 4557 case VFIFO: 4558 printf(" fifoinfo %p\n", vp->v_fifoinfo); 4559 break; 4560 default: 4561 printf("\n"); 4562 break; 4563 } 4564 buf[0] = '\0'; 4565 buf[1] = '\0'; 4566 if (holdcnt & VHOLD_NO_SMR) 4567 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 4568 printf(" hold count flags (%s)\n", buf + 1); 4569 4570 buf[0] = '\0'; 4571 buf[1] = '\0'; 4572 irflag = vn_irflag_read(vp); 4573 if (irflag & VIRF_DOOMED) 4574 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 4575 if (irflag & VIRF_PGREAD) 4576 strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); 4577 if (irflag & VIRF_MOUNTPOINT) 4578 strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf)); 4579 if (irflag & VIRF_TEXT_REF) 4580 strlcat(buf, "|VIRF_TEXT_REF", sizeof(buf)); 4581 flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT | VIRF_TEXT_REF); 4582 if (flags != 0) { 4583 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 4584 strlcat(buf, buf2, sizeof(buf)); 4585 } 4586 if (vp->v_vflag & VV_ROOT) 4587 strlcat(buf, "|VV_ROOT", sizeof(buf)); 4588 if (vp->v_vflag & VV_ISTTY) 4589 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4590 if (vp->v_vflag & VV_NOSYNC) 4591 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4592 if (vp->v_vflag & VV_ETERNALDEV) 4593 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4594 if (vp->v_vflag & VV_CACHEDLABEL) 4595 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4596 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4597 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4598 if (vp->v_vflag & VV_COPYONWRITE) 4599 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4600 if (vp->v_vflag & VV_SYSTEM) 4601 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4602 if (vp->v_vflag & VV_PROCDEP) 4603 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4604 if (vp->v_vflag & VV_DELETED) 4605 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4606 if (vp->v_vflag & VV_MD) 4607 strlcat(buf, "|VV_MD", sizeof(buf)); 4608 if (vp->v_vflag & VV_FORCEINSMQ) 4609 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4610 if (vp->v_vflag & VV_READLINK) 4611 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4612 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4613 VV_CACHEDLABEL | VV_VMSIZEVNLOCK | VV_COPYONWRITE | VV_SYSTEM | 4614 VV_PROCDEP | VV_DELETED | VV_MD | VV_FORCEINSMQ | VV_READLINK); 4615 if (flags != 0) { 4616 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4617 strlcat(buf, buf2, sizeof(buf)); 4618 } 4619 if (vp->v_iflag & VI_MOUNT) 4620 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4621 if (vp->v_iflag & VI_DOINGINACT) 4622 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4623 if (vp->v_iflag & VI_OWEINACT) 4624 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4625 if (vp->v_iflag & VI_DEFINACT) 4626 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4627 if (vp->v_iflag & VI_FOPENING) 4628 strlcat(buf, "|VI_FOPENING", sizeof(buf)); 4629 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOINGINACT | 4630 VI_OWEINACT | VI_DEFINACT | VI_FOPENING); 4631 if (flags != 0) { 4632 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4633 strlcat(buf, buf2, sizeof(buf)); 4634 } 4635 if (vp->v_mflag & VMP_LAZYLIST) 4636 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4637 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4638 if (flags != 0) { 4639 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4640 strlcat(buf, buf2, sizeof(buf)); 4641 } 4642 printf(" flags (%s)", buf + 1); 4643 if (mtx_owned(VI_MTX(vp))) 4644 printf(" VI_LOCKed"); 4645 printf("\n"); 4646 if (vp->v_object != NULL) 4647 printf(" v_object %p ref %d pages %d " 4648 "cleanbuf %d dirtybuf %d\n", 4649 vp->v_object, vp->v_object->ref_count, 4650 vp->v_object->resident_page_count, 4651 vp->v_bufobj.bo_clean.bv_cnt, 4652 vp->v_bufobj.bo_dirty.bv_cnt); 4653 printf(" "); 4654 lockmgr_printinfo(vp->v_vnlock); 4655 if (vp->v_data != NULL) 4656 VOP_PRINT(vp); 4657 } 4658 4659 #ifdef DDB 4660 /* 4661 * List all of the locked vnodes in the system. 4662 * Called when debugging the kernel. 4663 */ 4664 DB_SHOW_COMMAND_FLAGS(lockedvnods, lockedvnodes, DB_CMD_MEMSAFE) 4665 { 4666 struct mount *mp; 4667 struct vnode *vp; 4668 4669 /* 4670 * Note: because this is DDB, we can't obey the locking semantics 4671 * for these structures, which means we could catch an inconsistent 4672 * state and dereference a nasty pointer. Not much to be done 4673 * about that. 4674 */ 4675 db_printf("Locked vnodes\n"); 4676 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4677 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4678 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4679 vn_printf(vp, "vnode "); 4680 } 4681 } 4682 } 4683 4684 /* 4685 * Show details about the given vnode. 4686 */ 4687 DB_SHOW_COMMAND(vnode, db_show_vnode) 4688 { 4689 struct vnode *vp; 4690 4691 if (!have_addr) 4692 return; 4693 vp = (struct vnode *)addr; 4694 vn_printf(vp, "vnode "); 4695 } 4696 4697 /* 4698 * Show details about the given mount point. 4699 */ 4700 DB_SHOW_COMMAND(mount, db_show_mount) 4701 { 4702 struct mount *mp; 4703 struct vfsopt *opt; 4704 struct statfs *sp; 4705 struct vnode *vp; 4706 char buf[512]; 4707 uint64_t mflags; 4708 u_int flags; 4709 4710 if (!have_addr) { 4711 /* No address given, print short info about all mount points. */ 4712 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4713 db_printf("%p %s on %s (%s)\n", mp, 4714 mp->mnt_stat.f_mntfromname, 4715 mp->mnt_stat.f_mntonname, 4716 mp->mnt_stat.f_fstypename); 4717 if (db_pager_quit) 4718 break; 4719 } 4720 db_printf("\nMore info: show mount <addr>\n"); 4721 return; 4722 } 4723 4724 mp = (struct mount *)addr; 4725 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4726 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4727 4728 buf[0] = '\0'; 4729 mflags = mp->mnt_flag; 4730 #define MNT_FLAG(flag) do { \ 4731 if (mflags & (flag)) { \ 4732 if (buf[0] != '\0') \ 4733 strlcat(buf, ", ", sizeof(buf)); \ 4734 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4735 mflags &= ~(flag); \ 4736 } \ 4737 } while (0) 4738 MNT_FLAG(MNT_RDONLY); 4739 MNT_FLAG(MNT_SYNCHRONOUS); 4740 MNT_FLAG(MNT_NOEXEC); 4741 MNT_FLAG(MNT_NOSUID); 4742 MNT_FLAG(MNT_NFS4ACLS); 4743 MNT_FLAG(MNT_UNION); 4744 MNT_FLAG(MNT_ASYNC); 4745 MNT_FLAG(MNT_SUIDDIR); 4746 MNT_FLAG(MNT_SOFTDEP); 4747 MNT_FLAG(MNT_NOSYMFOLLOW); 4748 MNT_FLAG(MNT_GJOURNAL); 4749 MNT_FLAG(MNT_MULTILABEL); 4750 MNT_FLAG(MNT_ACLS); 4751 MNT_FLAG(MNT_NOATIME); 4752 MNT_FLAG(MNT_NOCLUSTERR); 4753 MNT_FLAG(MNT_NOCLUSTERW); 4754 MNT_FLAG(MNT_SUJ); 4755 MNT_FLAG(MNT_EXRDONLY); 4756 MNT_FLAG(MNT_EXPORTED); 4757 MNT_FLAG(MNT_DEFEXPORTED); 4758 MNT_FLAG(MNT_EXPORTANON); 4759 MNT_FLAG(MNT_EXKERB); 4760 MNT_FLAG(MNT_EXPUBLIC); 4761 MNT_FLAG(MNT_LOCAL); 4762 MNT_FLAG(MNT_QUOTA); 4763 MNT_FLAG(MNT_ROOTFS); 4764 MNT_FLAG(MNT_USER); 4765 MNT_FLAG(MNT_IGNORE); 4766 MNT_FLAG(MNT_UPDATE); 4767 MNT_FLAG(MNT_DELEXPORT); 4768 MNT_FLAG(MNT_RELOAD); 4769 MNT_FLAG(MNT_FORCE); 4770 MNT_FLAG(MNT_SNAPSHOT); 4771 MNT_FLAG(MNT_BYFSID); 4772 #undef MNT_FLAG 4773 if (mflags != 0) { 4774 if (buf[0] != '\0') 4775 strlcat(buf, ", ", sizeof(buf)); 4776 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4777 "0x%016jx", mflags); 4778 } 4779 db_printf(" mnt_flag = %s\n", buf); 4780 4781 buf[0] = '\0'; 4782 flags = mp->mnt_kern_flag; 4783 #define MNT_KERN_FLAG(flag) do { \ 4784 if (flags & (flag)) { \ 4785 if (buf[0] != '\0') \ 4786 strlcat(buf, ", ", sizeof(buf)); \ 4787 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4788 flags &= ~(flag); \ 4789 } \ 4790 } while (0) 4791 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4792 MNT_KERN_FLAG(MNTK_ASYNC); 4793 MNT_KERN_FLAG(MNTK_SOFTDEP); 4794 MNT_KERN_FLAG(MNTK_NOMSYNC); 4795 MNT_KERN_FLAG(MNTK_DRAINING); 4796 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4797 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4798 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4799 MNT_KERN_FLAG(MNTK_NO_IOPF); 4800 MNT_KERN_FLAG(MNTK_RECURSE); 4801 MNT_KERN_FLAG(MNTK_UPPER_WAITER); 4802 MNT_KERN_FLAG(MNTK_UNLOCKED_INSMNTQUE); 4803 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4804 MNT_KERN_FLAG(MNTK_VMSETSIZE_BUG); 4805 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4806 MNT_KERN_FLAG(MNTK_TASKQUEUE_WAITER); 4807 MNT_KERN_FLAG(MNTK_NOASYNC); 4808 MNT_KERN_FLAG(MNTK_UNMOUNT); 4809 MNT_KERN_FLAG(MNTK_MWAIT); 4810 MNT_KERN_FLAG(MNTK_SUSPEND); 4811 MNT_KERN_FLAG(MNTK_SUSPEND2); 4812 MNT_KERN_FLAG(MNTK_SUSPENDED); 4813 MNT_KERN_FLAG(MNTK_NULL_NOCACHE); 4814 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4815 #undef MNT_KERN_FLAG 4816 if (flags != 0) { 4817 if (buf[0] != '\0') 4818 strlcat(buf, ", ", sizeof(buf)); 4819 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4820 "0x%08x", flags); 4821 } 4822 db_printf(" mnt_kern_flag = %s\n", buf); 4823 4824 db_printf(" mnt_opt = "); 4825 opt = TAILQ_FIRST(mp->mnt_opt); 4826 if (opt != NULL) { 4827 db_printf("%s", opt->name); 4828 opt = TAILQ_NEXT(opt, link); 4829 while (opt != NULL) { 4830 db_printf(", %s", opt->name); 4831 opt = TAILQ_NEXT(opt, link); 4832 } 4833 } 4834 db_printf("\n"); 4835 4836 sp = &mp->mnt_stat; 4837 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4838 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4839 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4840 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4841 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4842 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4843 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4844 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4845 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4846 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4847 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4848 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4849 4850 db_printf(" mnt_cred = { uid=%u ruid=%u", 4851 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4852 if (jailed(mp->mnt_cred)) 4853 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4854 db_printf(" }\n"); 4855 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4856 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4857 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4858 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4859 db_printf(" mnt_lazyvnodelistsize = %d\n", 4860 mp->mnt_lazyvnodelistsize); 4861 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4862 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4863 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4864 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4865 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4866 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4867 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4868 db_printf(" mnt_secondary_accwrites = %d\n", 4869 mp->mnt_secondary_accwrites); 4870 db_printf(" mnt_gjprovider = %s\n", 4871 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4872 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4873 4874 db_printf("\n\nList of active vnodes\n"); 4875 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4876 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4877 vn_printf(vp, "vnode "); 4878 if (db_pager_quit) 4879 break; 4880 } 4881 } 4882 db_printf("\n\nList of inactive vnodes\n"); 4883 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4884 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4885 vn_printf(vp, "vnode "); 4886 if (db_pager_quit) 4887 break; 4888 } 4889 } 4890 } 4891 #endif /* DDB */ 4892 4893 /* 4894 * Fill in a struct xvfsconf based on a struct vfsconf. 4895 */ 4896 static int 4897 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4898 { 4899 struct xvfsconf xvfsp; 4900 4901 bzero(&xvfsp, sizeof(xvfsp)); 4902 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4903 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4904 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4905 xvfsp.vfc_flags = vfsp->vfc_flags; 4906 /* 4907 * These are unused in userland, we keep them 4908 * to not break binary compatibility. 4909 */ 4910 xvfsp.vfc_vfsops = NULL; 4911 xvfsp.vfc_next = NULL; 4912 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4913 } 4914 4915 #ifdef COMPAT_FREEBSD32 4916 struct xvfsconf32 { 4917 uint32_t vfc_vfsops; 4918 char vfc_name[MFSNAMELEN]; 4919 int32_t vfc_typenum; 4920 int32_t vfc_refcount; 4921 int32_t vfc_flags; 4922 uint32_t vfc_next; 4923 }; 4924 4925 static int 4926 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4927 { 4928 struct xvfsconf32 xvfsp; 4929 4930 bzero(&xvfsp, sizeof(xvfsp)); 4931 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4932 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4933 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4934 xvfsp.vfc_flags = vfsp->vfc_flags; 4935 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4936 } 4937 #endif 4938 4939 /* 4940 * Top level filesystem related information gathering. 4941 */ 4942 static int 4943 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4944 { 4945 struct vfsconf *vfsp; 4946 int error; 4947 4948 error = 0; 4949 vfsconf_slock(); 4950 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4951 #ifdef COMPAT_FREEBSD32 4952 if (req->flags & SCTL_MASK32) 4953 error = vfsconf2x32(req, vfsp); 4954 else 4955 #endif 4956 error = vfsconf2x(req, vfsp); 4957 if (error) 4958 break; 4959 } 4960 vfsconf_sunlock(); 4961 return (error); 4962 } 4963 4964 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4965 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4966 "S,xvfsconf", "List of all configured filesystems"); 4967 4968 #ifndef BURN_BRIDGES 4969 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4970 4971 static int 4972 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4973 { 4974 int *name = (int *)arg1 - 1; /* XXX */ 4975 u_int namelen = arg2 + 1; /* XXX */ 4976 struct vfsconf *vfsp; 4977 4978 log(LOG_WARNING, "userland calling deprecated sysctl, " 4979 "please rebuild world\n"); 4980 4981 #if 1 || defined(COMPAT_PRELITE2) 4982 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4983 if (namelen == 1) 4984 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4985 #endif 4986 4987 switch (name[1]) { 4988 case VFS_MAXTYPENUM: 4989 if (namelen != 2) 4990 return (ENOTDIR); 4991 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4992 case VFS_CONF: 4993 if (namelen != 3) 4994 return (ENOTDIR); /* overloaded */ 4995 vfsconf_slock(); 4996 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4997 if (vfsp->vfc_typenum == name[2]) 4998 break; 4999 } 5000 vfsconf_sunlock(); 5001 if (vfsp == NULL) 5002 return (EOPNOTSUPP); 5003 #ifdef COMPAT_FREEBSD32 5004 if (req->flags & SCTL_MASK32) 5005 return (vfsconf2x32(req, vfsp)); 5006 else 5007 #endif 5008 return (vfsconf2x(req, vfsp)); 5009 } 5010 return (EOPNOTSUPP); 5011 } 5012 5013 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 5014 CTLFLAG_MPSAFE, vfs_sysctl, 5015 "Generic filesystem"); 5016 5017 #if 1 || defined(COMPAT_PRELITE2) 5018 5019 static int 5020 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 5021 { 5022 int error; 5023 struct vfsconf *vfsp; 5024 struct ovfsconf ovfs; 5025 5026 vfsconf_slock(); 5027 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 5028 bzero(&ovfs, sizeof(ovfs)); 5029 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 5030 strcpy(ovfs.vfc_name, vfsp->vfc_name); 5031 ovfs.vfc_index = vfsp->vfc_typenum; 5032 ovfs.vfc_refcount = vfsp->vfc_refcount; 5033 ovfs.vfc_flags = vfsp->vfc_flags; 5034 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 5035 if (error != 0) { 5036 vfsconf_sunlock(); 5037 return (error); 5038 } 5039 } 5040 vfsconf_sunlock(); 5041 return (0); 5042 } 5043 5044 #endif /* 1 || COMPAT_PRELITE2 */ 5045 #endif /* !BURN_BRIDGES */ 5046 5047 static void 5048 unmount_or_warn(struct mount *mp) 5049 { 5050 int error; 5051 5052 error = dounmount(mp, MNT_FORCE, curthread); 5053 if (error != 0) { 5054 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 5055 if (error == EBUSY) 5056 printf("BUSY)\n"); 5057 else 5058 printf("%d)\n", error); 5059 } 5060 } 5061 5062 /* 5063 * Unmount all filesystems. The list is traversed in reverse order 5064 * of mounting to avoid dependencies. 5065 */ 5066 void 5067 vfs_unmountall(void) 5068 { 5069 struct mount *mp, *tmp; 5070 5071 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 5072 5073 /* 5074 * Since this only runs when rebooting, it is not interlocked. 5075 */ 5076 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 5077 vfs_ref(mp); 5078 5079 /* 5080 * Forcibly unmounting "/dev" before "/" would prevent clean 5081 * unmount of the latter. 5082 */ 5083 if (mp == rootdevmp) 5084 continue; 5085 5086 unmount_or_warn(mp); 5087 } 5088 5089 if (rootdevmp != NULL) 5090 unmount_or_warn(rootdevmp); 5091 } 5092 5093 static void 5094 vfs_deferred_inactive(struct vnode *vp, int lkflags) 5095 { 5096 5097 ASSERT_VI_LOCKED(vp, __func__); 5098 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 5099 if ((vp->v_iflag & VI_OWEINACT) == 0) { 5100 vdropl(vp); 5101 return; 5102 } 5103 if (vn_lock(vp, lkflags) == 0) { 5104 VI_LOCK(vp); 5105 vinactive(vp); 5106 VOP_UNLOCK(vp); 5107 vdropl(vp); 5108 return; 5109 } 5110 vdefer_inactive_unlocked(vp); 5111 } 5112 5113 static int 5114 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 5115 { 5116 5117 return (vp->v_iflag & VI_DEFINACT); 5118 } 5119 5120 static void __noinline 5121 vfs_periodic_inactive(struct mount *mp, int flags) 5122 { 5123 struct vnode *vp, *mvp; 5124 int lkflags; 5125 5126 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 5127 if (flags != MNT_WAIT) 5128 lkflags |= LK_NOWAIT; 5129 5130 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 5131 if ((vp->v_iflag & VI_DEFINACT) == 0) { 5132 VI_UNLOCK(vp); 5133 continue; 5134 } 5135 vp->v_iflag &= ~VI_DEFINACT; 5136 vfs_deferred_inactive(vp, lkflags); 5137 } 5138 } 5139 5140 static inline bool 5141 vfs_want_msync(struct vnode *vp) 5142 { 5143 struct vm_object *obj; 5144 5145 /* 5146 * This test may be performed without any locks held. 5147 * We rely on vm_object's type stability. 5148 */ 5149 if (vp->v_vflag & VV_NOSYNC) 5150 return (false); 5151 obj = vp->v_object; 5152 return (obj != NULL && vm_object_mightbedirty(obj)); 5153 } 5154 5155 static int 5156 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 5157 { 5158 5159 if (vp->v_vflag & VV_NOSYNC) 5160 return (false); 5161 if (vp->v_iflag & VI_DEFINACT) 5162 return (true); 5163 return (vfs_want_msync(vp)); 5164 } 5165 5166 static void __noinline 5167 vfs_periodic_msync_inactive(struct mount *mp, int flags) 5168 { 5169 struct vnode *vp, *mvp; 5170 int lkflags; 5171 bool seen_defer; 5172 5173 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 5174 if (flags != MNT_WAIT) 5175 lkflags |= LK_NOWAIT; 5176 5177 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 5178 seen_defer = false; 5179 if (vp->v_iflag & VI_DEFINACT) { 5180 vp->v_iflag &= ~VI_DEFINACT; 5181 seen_defer = true; 5182 } 5183 if (!vfs_want_msync(vp)) { 5184 if (seen_defer) 5185 vfs_deferred_inactive(vp, lkflags); 5186 else 5187 VI_UNLOCK(vp); 5188 continue; 5189 } 5190 if (vget(vp, lkflags) == 0) { 5191 if ((vp->v_vflag & VV_NOSYNC) == 0) { 5192 if (flags == MNT_WAIT) 5193 vnode_pager_clean_sync(vp); 5194 else 5195 vnode_pager_clean_async(vp); 5196 } 5197 vput(vp); 5198 if (seen_defer) 5199 vdrop(vp); 5200 } else { 5201 if (seen_defer) 5202 vdefer_inactive_unlocked(vp); 5203 } 5204 } 5205 } 5206 5207 void 5208 vfs_periodic(struct mount *mp, int flags) 5209 { 5210 5211 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 5212 5213 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 5214 vfs_periodic_inactive(mp, flags); 5215 else 5216 vfs_periodic_msync_inactive(mp, flags); 5217 } 5218 5219 static void 5220 destroy_vpollinfo_free(struct vpollinfo *vi) 5221 { 5222 5223 knlist_destroy(&vi->vpi_selinfo.si_note); 5224 mtx_destroy(&vi->vpi_lock); 5225 free(vi, M_VNODEPOLL); 5226 } 5227 5228 static void 5229 destroy_vpollinfo(struct vpollinfo *vi) 5230 { 5231 5232 knlist_clear(&vi->vpi_selinfo.si_note, 1); 5233 seldrain(&vi->vpi_selinfo); 5234 destroy_vpollinfo_free(vi); 5235 } 5236 5237 /* 5238 * Initialize per-vnode helper structure to hold poll-related state. 5239 */ 5240 void 5241 v_addpollinfo(struct vnode *vp) 5242 { 5243 struct vpollinfo *vi; 5244 5245 if (vp->v_pollinfo != NULL) 5246 return; 5247 vi = malloc(sizeof(*vi), M_VNODEPOLL, M_WAITOK | M_ZERO); 5248 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 5249 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 5250 vfs_knlunlock, vfs_knl_assert_lock); 5251 VI_LOCK(vp); 5252 if (vp->v_pollinfo != NULL) { 5253 VI_UNLOCK(vp); 5254 destroy_vpollinfo_free(vi); 5255 return; 5256 } 5257 vp->v_pollinfo = vi; 5258 VI_UNLOCK(vp); 5259 } 5260 5261 /* 5262 * Record a process's interest in events which might happen to 5263 * a vnode. Because poll uses the historic select-style interface 5264 * internally, this routine serves as both the ``check for any 5265 * pending events'' and the ``record my interest in future events'' 5266 * functions. (These are done together, while the lock is held, 5267 * to avoid race conditions.) 5268 */ 5269 int 5270 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 5271 { 5272 5273 v_addpollinfo(vp); 5274 mtx_lock(&vp->v_pollinfo->vpi_lock); 5275 if (vp->v_pollinfo->vpi_revents & events) { 5276 /* 5277 * This leaves events we are not interested 5278 * in available for the other process which 5279 * which presumably had requested them 5280 * (otherwise they would never have been 5281 * recorded). 5282 */ 5283 events &= vp->v_pollinfo->vpi_revents; 5284 vp->v_pollinfo->vpi_revents &= ~events; 5285 5286 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5287 return (events); 5288 } 5289 vp->v_pollinfo->vpi_events |= events; 5290 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 5291 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5292 return (0); 5293 } 5294 5295 /* 5296 * Routine to create and manage a filesystem syncer vnode. 5297 */ 5298 #define sync_close ((int (*)(struct vop_close_args *))nullop) 5299 static int sync_fsync(struct vop_fsync_args *); 5300 static int sync_inactive(struct vop_inactive_args *); 5301 static int sync_reclaim(struct vop_reclaim_args *); 5302 5303 static struct vop_vector sync_vnodeops = { 5304 .vop_bypass = VOP_EOPNOTSUPP, 5305 .vop_close = sync_close, 5306 .vop_fsync = sync_fsync, 5307 .vop_getwritemount = vop_stdgetwritemount, 5308 .vop_inactive = sync_inactive, 5309 .vop_need_inactive = vop_stdneed_inactive, 5310 .vop_reclaim = sync_reclaim, 5311 .vop_lock1 = vop_stdlock, 5312 .vop_unlock = vop_stdunlock, 5313 .vop_islocked = vop_stdislocked, 5314 .vop_fplookup_vexec = VOP_EAGAIN, 5315 .vop_fplookup_symlink = VOP_EAGAIN, 5316 }; 5317 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 5318 5319 /* 5320 * Create a new filesystem syncer vnode for the specified mount point. 5321 */ 5322 void 5323 vfs_allocate_syncvnode(struct mount *mp) 5324 { 5325 struct vnode *vp; 5326 struct bufobj *bo; 5327 static long start, incr, next; 5328 int error; 5329 5330 /* Allocate a new vnode */ 5331 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 5332 if (error != 0) 5333 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 5334 vp->v_type = VNON; 5335 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5336 vp->v_vflag |= VV_FORCEINSMQ; 5337 error = insmntque1(vp, mp); 5338 if (error != 0) 5339 panic("vfs_allocate_syncvnode: insmntque() failed"); 5340 vp->v_vflag &= ~VV_FORCEINSMQ; 5341 vn_set_state(vp, VSTATE_CONSTRUCTED); 5342 VOP_UNLOCK(vp); 5343 /* 5344 * Place the vnode onto the syncer worklist. We attempt to 5345 * scatter them about on the list so that they will go off 5346 * at evenly distributed times even if all the filesystems 5347 * are mounted at once. 5348 */ 5349 next += incr; 5350 if (next == 0 || next > syncer_maxdelay) { 5351 start /= 2; 5352 incr /= 2; 5353 if (start == 0) { 5354 start = syncer_maxdelay / 2; 5355 incr = syncer_maxdelay; 5356 } 5357 next = start; 5358 } 5359 bo = &vp->v_bufobj; 5360 BO_LOCK(bo); 5361 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 5362 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 5363 mtx_lock(&sync_mtx); 5364 sync_vnode_count++; 5365 if (mp->mnt_syncer == NULL) { 5366 mp->mnt_syncer = vp; 5367 vp = NULL; 5368 } 5369 mtx_unlock(&sync_mtx); 5370 BO_UNLOCK(bo); 5371 if (vp != NULL) { 5372 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5373 vgone(vp); 5374 vput(vp); 5375 } 5376 } 5377 5378 void 5379 vfs_deallocate_syncvnode(struct mount *mp) 5380 { 5381 struct vnode *vp; 5382 5383 mtx_lock(&sync_mtx); 5384 vp = mp->mnt_syncer; 5385 if (vp != NULL) 5386 mp->mnt_syncer = NULL; 5387 mtx_unlock(&sync_mtx); 5388 if (vp != NULL) 5389 vrele(vp); 5390 } 5391 5392 /* 5393 * Do a lazy sync of the filesystem. 5394 */ 5395 static int 5396 sync_fsync(struct vop_fsync_args *ap) 5397 { 5398 struct vnode *syncvp = ap->a_vp; 5399 struct mount *mp = syncvp->v_mount; 5400 int error, save; 5401 struct bufobj *bo; 5402 5403 /* 5404 * We only need to do something if this is a lazy evaluation. 5405 */ 5406 if (ap->a_waitfor != MNT_LAZY) 5407 return (0); 5408 5409 /* 5410 * Move ourselves to the back of the sync list. 5411 */ 5412 bo = &syncvp->v_bufobj; 5413 BO_LOCK(bo); 5414 vn_syncer_add_to_worklist(bo, syncdelay); 5415 BO_UNLOCK(bo); 5416 5417 /* 5418 * Walk the list of vnodes pushing all that are dirty and 5419 * not already on the sync list. 5420 */ 5421 if (vfs_busy(mp, MBF_NOWAIT) != 0) 5422 return (0); 5423 VOP_UNLOCK(syncvp); 5424 save = curthread_pflags_set(TDP_SYNCIO); 5425 /* 5426 * The filesystem at hand may be idle with free vnodes stored in the 5427 * batch. Return them instead of letting them stay there indefinitely. 5428 */ 5429 vfs_periodic(mp, MNT_NOWAIT); 5430 error = VFS_SYNC(mp, MNT_LAZY); 5431 curthread_pflags_restore(save); 5432 vn_lock(syncvp, LK_EXCLUSIVE | LK_RETRY); 5433 vfs_unbusy(mp); 5434 return (error); 5435 } 5436 5437 /* 5438 * The syncer vnode is no referenced. 5439 */ 5440 static int 5441 sync_inactive(struct vop_inactive_args *ap) 5442 { 5443 5444 vgone(ap->a_vp); 5445 return (0); 5446 } 5447 5448 /* 5449 * The syncer vnode is no longer needed and is being decommissioned. 5450 * 5451 * Modifications to the worklist must be protected by sync_mtx. 5452 */ 5453 static int 5454 sync_reclaim(struct vop_reclaim_args *ap) 5455 { 5456 struct vnode *vp = ap->a_vp; 5457 struct bufobj *bo; 5458 5459 bo = &vp->v_bufobj; 5460 BO_LOCK(bo); 5461 mtx_lock(&sync_mtx); 5462 if (vp->v_mount->mnt_syncer == vp) 5463 vp->v_mount->mnt_syncer = NULL; 5464 if (bo->bo_flag & BO_ONWORKLST) { 5465 LIST_REMOVE(bo, bo_synclist); 5466 syncer_worklist_len--; 5467 sync_vnode_count--; 5468 bo->bo_flag &= ~BO_ONWORKLST; 5469 } 5470 mtx_unlock(&sync_mtx); 5471 BO_UNLOCK(bo); 5472 5473 return (0); 5474 } 5475 5476 int 5477 vn_need_pageq_flush(struct vnode *vp) 5478 { 5479 struct vm_object *obj; 5480 5481 obj = vp->v_object; 5482 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 5483 vm_object_mightbedirty(obj)); 5484 } 5485 5486 /* 5487 * Check if vnode represents a disk device 5488 */ 5489 bool 5490 vn_isdisk_error(struct vnode *vp, int *errp) 5491 { 5492 int error; 5493 5494 if (vp->v_type != VCHR) { 5495 error = ENOTBLK; 5496 goto out; 5497 } 5498 error = 0; 5499 dev_lock(); 5500 if (vp->v_rdev == NULL) 5501 error = ENXIO; 5502 else if (vp->v_rdev->si_devsw == NULL) 5503 error = ENXIO; 5504 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5505 error = ENOTBLK; 5506 dev_unlock(); 5507 out: 5508 *errp = error; 5509 return (error == 0); 5510 } 5511 5512 bool 5513 vn_isdisk(struct vnode *vp) 5514 { 5515 int error; 5516 5517 return (vn_isdisk_error(vp, &error)); 5518 } 5519 5520 /* 5521 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5522 * the comment above cache_fplookup for details. 5523 */ 5524 int 5525 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5526 { 5527 int error; 5528 5529 VFS_SMR_ASSERT_ENTERED(); 5530 5531 /* Check the owner. */ 5532 if (cred->cr_uid == file_uid) { 5533 if (file_mode & S_IXUSR) 5534 return (0); 5535 goto out_error; 5536 } 5537 5538 /* Otherwise, check the groups (first match) */ 5539 if (groupmember(file_gid, cred)) { 5540 if (file_mode & S_IXGRP) 5541 return (0); 5542 goto out_error; 5543 } 5544 5545 /* Otherwise, check everyone else. */ 5546 if (file_mode & S_IXOTH) 5547 return (0); 5548 out_error: 5549 /* 5550 * Permission check failed, but it is possible denial will get overwritten 5551 * (e.g., when root is traversing through a 700 directory owned by someone 5552 * else). 5553 * 5554 * vaccess() calls priv_check_cred which in turn can descent into MAC 5555 * modules overriding this result. It's quite unclear what semantics 5556 * are allowed for them to operate, thus for safety we don't call them 5557 * from within the SMR section. This also means if any such modules 5558 * are present, we have to let the regular lookup decide. 5559 */ 5560 error = priv_check_cred_vfs_lookup_nomac(cred); 5561 switch (error) { 5562 case 0: 5563 return (0); 5564 case EAGAIN: 5565 /* 5566 * MAC modules present. 5567 */ 5568 return (EAGAIN); 5569 case EPERM: 5570 return (EACCES); 5571 default: 5572 return (error); 5573 } 5574 } 5575 5576 /* 5577 * Common filesystem object access control check routine. Accepts a 5578 * vnode's type, "mode", uid and gid, requested access mode, and credentials. 5579 * Returns 0 on success, or an errno on failure. 5580 */ 5581 int 5582 vaccess(__enum_uint8(vtype) type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5583 accmode_t accmode, struct ucred *cred) 5584 { 5585 accmode_t dac_granted; 5586 accmode_t priv_granted; 5587 5588 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5589 ("invalid bit in accmode")); 5590 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5591 ("VAPPEND without VWRITE")); 5592 5593 /* 5594 * Look for a normal, non-privileged way to access the file/directory 5595 * as requested. If it exists, go with that. 5596 */ 5597 5598 dac_granted = 0; 5599 5600 /* Check the owner. */ 5601 if (cred->cr_uid == file_uid) { 5602 dac_granted |= VADMIN; 5603 if (file_mode & S_IXUSR) 5604 dac_granted |= VEXEC; 5605 if (file_mode & S_IRUSR) 5606 dac_granted |= VREAD; 5607 if (file_mode & S_IWUSR) 5608 dac_granted |= (VWRITE | VAPPEND); 5609 5610 if ((accmode & dac_granted) == accmode) 5611 return (0); 5612 5613 goto privcheck; 5614 } 5615 5616 /* Otherwise, check the groups (first match) */ 5617 if (groupmember(file_gid, cred)) { 5618 if (file_mode & S_IXGRP) 5619 dac_granted |= VEXEC; 5620 if (file_mode & S_IRGRP) 5621 dac_granted |= VREAD; 5622 if (file_mode & S_IWGRP) 5623 dac_granted |= (VWRITE | VAPPEND); 5624 5625 if ((accmode & dac_granted) == accmode) 5626 return (0); 5627 5628 goto privcheck; 5629 } 5630 5631 /* Otherwise, check everyone else. */ 5632 if (file_mode & S_IXOTH) 5633 dac_granted |= VEXEC; 5634 if (file_mode & S_IROTH) 5635 dac_granted |= VREAD; 5636 if (file_mode & S_IWOTH) 5637 dac_granted |= (VWRITE | VAPPEND); 5638 if ((accmode & dac_granted) == accmode) 5639 return (0); 5640 5641 privcheck: 5642 /* 5643 * Build a privilege mask to determine if the set of privileges 5644 * satisfies the requirements when combined with the granted mask 5645 * from above. For each privilege, if the privilege is required, 5646 * bitwise or the request type onto the priv_granted mask. 5647 */ 5648 priv_granted = 0; 5649 5650 if (type == VDIR) { 5651 /* 5652 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5653 * requests, instead of PRIV_VFS_EXEC. 5654 */ 5655 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5656 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5657 priv_granted |= VEXEC; 5658 } else { 5659 /* 5660 * Ensure that at least one execute bit is on. Otherwise, 5661 * a privileged user will always succeed, and we don't want 5662 * this to happen unless the file really is executable. 5663 */ 5664 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5665 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5666 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5667 priv_granted |= VEXEC; 5668 } 5669 5670 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5671 !priv_check_cred(cred, PRIV_VFS_READ)) 5672 priv_granted |= VREAD; 5673 5674 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5675 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5676 priv_granted |= (VWRITE | VAPPEND); 5677 5678 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5679 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5680 priv_granted |= VADMIN; 5681 5682 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5683 return (0); 5684 } 5685 5686 return ((accmode & VADMIN) ? EPERM : EACCES); 5687 } 5688 5689 /* 5690 * Credential check based on process requesting service, and per-attribute 5691 * permissions. 5692 */ 5693 int 5694 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5695 struct thread *td, accmode_t accmode) 5696 { 5697 5698 /* 5699 * Kernel-invoked always succeeds. 5700 */ 5701 if (cred == NOCRED) 5702 return (0); 5703 5704 /* 5705 * Do not allow privileged processes in jail to directly manipulate 5706 * system attributes. 5707 */ 5708 switch (attrnamespace) { 5709 case EXTATTR_NAMESPACE_SYSTEM: 5710 /* Potentially should be: return (EPERM); */ 5711 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5712 case EXTATTR_NAMESPACE_USER: 5713 return (VOP_ACCESS(vp, accmode, cred, td)); 5714 default: 5715 return (EPERM); 5716 } 5717 } 5718 5719 #ifdef DEBUG_VFS_LOCKS 5720 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5721 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5722 "Drop into debugger on lock violation"); 5723 5724 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5725 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5726 0, "Check for interlock across VOPs"); 5727 5728 int vfs_badlock_print = 1; /* Print lock violations. */ 5729 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5730 0, "Print lock violations"); 5731 5732 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5733 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5734 0, "Print vnode details on lock violations"); 5735 5736 #ifdef KDB 5737 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5738 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5739 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5740 #endif 5741 5742 static void 5743 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5744 { 5745 5746 #ifdef KDB 5747 if (vfs_badlock_backtrace) 5748 kdb_backtrace(); 5749 #endif 5750 if (vfs_badlock_vnode) 5751 vn_printf(vp, "vnode "); 5752 if (vfs_badlock_print) 5753 printf("%s: %p %s\n", str, (void *)vp, msg); 5754 if (vfs_badlock_ddb) 5755 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5756 } 5757 5758 void 5759 assert_vi_locked(struct vnode *vp, const char *str) 5760 { 5761 5762 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5763 vfs_badlock("interlock is not locked but should be", str, vp); 5764 } 5765 5766 void 5767 assert_vi_unlocked(struct vnode *vp, const char *str) 5768 { 5769 5770 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5771 vfs_badlock("interlock is locked but should not be", str, vp); 5772 } 5773 5774 void 5775 assert_vop_locked(struct vnode *vp, const char *str) 5776 { 5777 if (KERNEL_PANICKED() || vp == NULL) 5778 return; 5779 5780 #ifdef WITNESS 5781 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && 5782 witness_is_owned(&vp->v_vnlock->lock_object) == -1) 5783 #else 5784 int locked = VOP_ISLOCKED(vp); 5785 if (locked == 0 || locked == LK_EXCLOTHER) 5786 #endif 5787 vfs_badlock("is not locked but should be", str, vp); 5788 } 5789 5790 void 5791 assert_vop_unlocked(struct vnode *vp, const char *str) 5792 { 5793 if (KERNEL_PANICKED() || vp == NULL) 5794 return; 5795 5796 #ifdef WITNESS 5797 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && 5798 witness_is_owned(&vp->v_vnlock->lock_object) == 1) 5799 #else 5800 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5801 #endif 5802 vfs_badlock("is locked but should not be", str, vp); 5803 } 5804 5805 void 5806 assert_vop_elocked(struct vnode *vp, const char *str) 5807 { 5808 if (KERNEL_PANICKED() || vp == NULL) 5809 return; 5810 5811 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5812 vfs_badlock("is not exclusive locked but should be", str, vp); 5813 } 5814 #endif /* DEBUG_VFS_LOCKS */ 5815 5816 void 5817 vop_rename_fail(struct vop_rename_args *ap) 5818 { 5819 5820 if (ap->a_tvp != NULL) 5821 vput(ap->a_tvp); 5822 if (ap->a_tdvp == ap->a_tvp) 5823 vrele(ap->a_tdvp); 5824 else 5825 vput(ap->a_tdvp); 5826 vrele(ap->a_fdvp); 5827 vrele(ap->a_fvp); 5828 } 5829 5830 void 5831 vop_rename_pre(void *ap) 5832 { 5833 struct vop_rename_args *a = ap; 5834 5835 #ifdef DEBUG_VFS_LOCKS 5836 if (a->a_tvp) 5837 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5838 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5839 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5840 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5841 5842 /* Check the source (from). */ 5843 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5844 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5845 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5846 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5847 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5848 5849 /* Check the target. */ 5850 if (a->a_tvp) 5851 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5852 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5853 #endif 5854 /* 5855 * It may be tempting to add vn_seqc_write_begin/end calls here and 5856 * in vop_rename_post but that's not going to work out since some 5857 * filesystems relookup vnodes mid-rename. This is probably a bug. 5858 * 5859 * For now filesystems are expected to do the relevant calls after they 5860 * decide what vnodes to operate on. 5861 */ 5862 if (a->a_tdvp != a->a_fdvp) 5863 vhold(a->a_fdvp); 5864 if (a->a_tvp != a->a_fvp) 5865 vhold(a->a_fvp); 5866 vhold(a->a_tdvp); 5867 if (a->a_tvp) 5868 vhold(a->a_tvp); 5869 } 5870 5871 #ifdef DEBUG_VFS_LOCKS 5872 void 5873 vop_fplookup_vexec_debugpre(void *ap __unused) 5874 { 5875 5876 VFS_SMR_ASSERT_ENTERED(); 5877 } 5878 5879 void 5880 vop_fplookup_vexec_debugpost(void *ap, int rc) 5881 { 5882 struct vop_fplookup_vexec_args *a; 5883 struct vnode *vp; 5884 5885 a = ap; 5886 vp = a->a_vp; 5887 5888 VFS_SMR_ASSERT_ENTERED(); 5889 if (rc == EOPNOTSUPP) 5890 VNPASS(VN_IS_DOOMED(vp), vp); 5891 } 5892 5893 void 5894 vop_fplookup_symlink_debugpre(void *ap __unused) 5895 { 5896 5897 VFS_SMR_ASSERT_ENTERED(); 5898 } 5899 5900 void 5901 vop_fplookup_symlink_debugpost(void *ap __unused, int rc __unused) 5902 { 5903 5904 VFS_SMR_ASSERT_ENTERED(); 5905 } 5906 5907 static void 5908 vop_fsync_debugprepost(struct vnode *vp, const char *name) 5909 { 5910 if (vp->v_type == VCHR) 5911 ; 5912 /* 5913 * The shared vs. exclusive locking policy for fsync() 5914 * is actually determined by vp's write mount as indicated 5915 * by VOP_GETWRITEMOUNT(), which for stacked filesystems 5916 * may not be the same as vp->v_mount. However, if the 5917 * underlying filesystem which really handles the fsync() 5918 * supports shared locking, the stacked filesystem must also 5919 * be prepared for its VOP_FSYNC() operation to be called 5920 * with only a shared lock. On the other hand, if the 5921 * stacked filesystem claims support for shared write 5922 * locking but the underlying filesystem does not, and the 5923 * caller incorrectly uses a shared lock, this condition 5924 * should still be caught when the stacked filesystem 5925 * invokes VOP_FSYNC() on the underlying filesystem. 5926 */ 5927 else if (MNT_SHARED_WRITES(vp->v_mount)) 5928 ASSERT_VOP_LOCKED(vp, name); 5929 else 5930 ASSERT_VOP_ELOCKED(vp, name); 5931 } 5932 5933 void 5934 vop_fsync_debugpre(void *a) 5935 { 5936 struct vop_fsync_args *ap; 5937 5938 ap = a; 5939 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5940 } 5941 5942 void 5943 vop_fsync_debugpost(void *a, int rc __unused) 5944 { 5945 struct vop_fsync_args *ap; 5946 5947 ap = a; 5948 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5949 } 5950 5951 void 5952 vop_fdatasync_debugpre(void *a) 5953 { 5954 struct vop_fdatasync_args *ap; 5955 5956 ap = a; 5957 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5958 } 5959 5960 void 5961 vop_fdatasync_debugpost(void *a, int rc __unused) 5962 { 5963 struct vop_fdatasync_args *ap; 5964 5965 ap = a; 5966 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5967 } 5968 5969 void 5970 vop_strategy_debugpre(void *ap) 5971 { 5972 struct vop_strategy_args *a; 5973 struct buf *bp; 5974 5975 a = ap; 5976 bp = a->a_bp; 5977 5978 /* 5979 * Cluster ops lock their component buffers but not the IO container. 5980 */ 5981 if ((bp->b_flags & B_CLUSTER) != 0) 5982 return; 5983 5984 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5985 if (vfs_badlock_print) 5986 printf( 5987 "VOP_STRATEGY: bp is not locked but should be\n"); 5988 if (vfs_badlock_ddb) 5989 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5990 } 5991 } 5992 5993 void 5994 vop_lock_debugpre(void *ap) 5995 { 5996 struct vop_lock1_args *a = ap; 5997 5998 if ((a->a_flags & LK_INTERLOCK) == 0) 5999 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 6000 else 6001 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 6002 } 6003 6004 void 6005 vop_lock_debugpost(void *ap, int rc) 6006 { 6007 struct vop_lock1_args *a = ap; 6008 6009 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 6010 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 6011 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 6012 } 6013 6014 void 6015 vop_unlock_debugpre(void *ap) 6016 { 6017 struct vop_unlock_args *a = ap; 6018 struct vnode *vp = a->a_vp; 6019 6020 VNPASS(vn_get_state(vp) != VSTATE_UNINITIALIZED, vp); 6021 ASSERT_VOP_LOCKED(vp, "VOP_UNLOCK"); 6022 } 6023 6024 void 6025 vop_need_inactive_debugpre(void *ap) 6026 { 6027 struct vop_need_inactive_args *a = ap; 6028 6029 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 6030 } 6031 6032 void 6033 vop_need_inactive_debugpost(void *ap, int rc) 6034 { 6035 struct vop_need_inactive_args *a = ap; 6036 6037 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 6038 } 6039 #endif 6040 6041 void 6042 vop_create_pre(void *ap) 6043 { 6044 struct vop_create_args *a; 6045 struct vnode *dvp; 6046 6047 a = ap; 6048 dvp = a->a_dvp; 6049 vn_seqc_write_begin(dvp); 6050 } 6051 6052 void 6053 vop_create_post(void *ap, int rc) 6054 { 6055 struct vop_create_args *a; 6056 struct vnode *dvp; 6057 6058 a = ap; 6059 dvp = a->a_dvp; 6060 vn_seqc_write_end(dvp); 6061 if (!rc) 6062 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6063 } 6064 6065 void 6066 vop_whiteout_pre(void *ap) 6067 { 6068 struct vop_whiteout_args *a; 6069 struct vnode *dvp; 6070 6071 a = ap; 6072 dvp = a->a_dvp; 6073 vn_seqc_write_begin(dvp); 6074 } 6075 6076 void 6077 vop_whiteout_post(void *ap, int rc) 6078 { 6079 struct vop_whiteout_args *a; 6080 struct vnode *dvp; 6081 6082 a = ap; 6083 dvp = a->a_dvp; 6084 vn_seqc_write_end(dvp); 6085 } 6086 6087 void 6088 vop_deleteextattr_pre(void *ap) 6089 { 6090 struct vop_deleteextattr_args *a; 6091 struct vnode *vp; 6092 6093 a = ap; 6094 vp = a->a_vp; 6095 vn_seqc_write_begin(vp); 6096 } 6097 6098 void 6099 vop_deleteextattr_post(void *ap, int rc) 6100 { 6101 struct vop_deleteextattr_args *a; 6102 struct vnode *vp; 6103 6104 a = ap; 6105 vp = a->a_vp; 6106 vn_seqc_write_end(vp); 6107 if (!rc) 6108 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 6109 } 6110 6111 void 6112 vop_link_pre(void *ap) 6113 { 6114 struct vop_link_args *a; 6115 struct vnode *vp, *tdvp; 6116 6117 a = ap; 6118 vp = a->a_vp; 6119 tdvp = a->a_tdvp; 6120 vn_seqc_write_begin(vp); 6121 vn_seqc_write_begin(tdvp); 6122 } 6123 6124 void 6125 vop_link_post(void *ap, int rc) 6126 { 6127 struct vop_link_args *a; 6128 struct vnode *vp, *tdvp; 6129 6130 a = ap; 6131 vp = a->a_vp; 6132 tdvp = a->a_tdvp; 6133 vn_seqc_write_end(vp); 6134 vn_seqc_write_end(tdvp); 6135 if (!rc) { 6136 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 6137 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 6138 } 6139 } 6140 6141 void 6142 vop_mkdir_pre(void *ap) 6143 { 6144 struct vop_mkdir_args *a; 6145 struct vnode *dvp; 6146 6147 a = ap; 6148 dvp = a->a_dvp; 6149 vn_seqc_write_begin(dvp); 6150 } 6151 6152 void 6153 vop_mkdir_post(void *ap, int rc) 6154 { 6155 struct vop_mkdir_args *a; 6156 struct vnode *dvp; 6157 6158 a = ap; 6159 dvp = a->a_dvp; 6160 vn_seqc_write_end(dvp); 6161 if (!rc) 6162 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 6163 } 6164 6165 #ifdef DEBUG_VFS_LOCKS 6166 void 6167 vop_mkdir_debugpost(void *ap, int rc) 6168 { 6169 struct vop_mkdir_args *a; 6170 6171 a = ap; 6172 if (!rc) 6173 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); 6174 } 6175 #endif 6176 6177 void 6178 vop_mknod_pre(void *ap) 6179 { 6180 struct vop_mknod_args *a; 6181 struct vnode *dvp; 6182 6183 a = ap; 6184 dvp = a->a_dvp; 6185 vn_seqc_write_begin(dvp); 6186 } 6187 6188 void 6189 vop_mknod_post(void *ap, int rc) 6190 { 6191 struct vop_mknod_args *a; 6192 struct vnode *dvp; 6193 6194 a = ap; 6195 dvp = a->a_dvp; 6196 vn_seqc_write_end(dvp); 6197 if (!rc) 6198 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6199 } 6200 6201 void 6202 vop_reclaim_post(void *ap, int rc) 6203 { 6204 struct vop_reclaim_args *a; 6205 struct vnode *vp; 6206 6207 a = ap; 6208 vp = a->a_vp; 6209 ASSERT_VOP_IN_SEQC(vp); 6210 if (!rc) 6211 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 6212 } 6213 6214 void 6215 vop_remove_pre(void *ap) 6216 { 6217 struct vop_remove_args *a; 6218 struct vnode *dvp, *vp; 6219 6220 a = ap; 6221 dvp = a->a_dvp; 6222 vp = a->a_vp; 6223 vn_seqc_write_begin(dvp); 6224 vn_seqc_write_begin(vp); 6225 } 6226 6227 void 6228 vop_remove_post(void *ap, int rc) 6229 { 6230 struct vop_remove_args *a; 6231 struct vnode *dvp, *vp; 6232 6233 a = ap; 6234 dvp = a->a_dvp; 6235 vp = a->a_vp; 6236 vn_seqc_write_end(dvp); 6237 vn_seqc_write_end(vp); 6238 if (!rc) { 6239 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6240 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 6241 } 6242 } 6243 6244 void 6245 vop_rename_post(void *ap, int rc) 6246 { 6247 struct vop_rename_args *a = ap; 6248 long hint; 6249 6250 if (!rc) { 6251 hint = NOTE_WRITE; 6252 if (a->a_fdvp == a->a_tdvp) { 6253 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 6254 hint |= NOTE_LINK; 6255 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 6256 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 6257 } else { 6258 hint |= NOTE_EXTEND; 6259 if (a->a_fvp->v_type == VDIR) 6260 hint |= NOTE_LINK; 6261 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 6262 6263 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 6264 a->a_tvp->v_type == VDIR) 6265 hint &= ~NOTE_LINK; 6266 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 6267 } 6268 6269 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 6270 if (a->a_tvp) 6271 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 6272 } 6273 if (a->a_tdvp != a->a_fdvp) 6274 vdrop(a->a_fdvp); 6275 if (a->a_tvp != a->a_fvp) 6276 vdrop(a->a_fvp); 6277 vdrop(a->a_tdvp); 6278 if (a->a_tvp) 6279 vdrop(a->a_tvp); 6280 } 6281 6282 void 6283 vop_rmdir_pre(void *ap) 6284 { 6285 struct vop_rmdir_args *a; 6286 struct vnode *dvp, *vp; 6287 6288 a = ap; 6289 dvp = a->a_dvp; 6290 vp = a->a_vp; 6291 vn_seqc_write_begin(dvp); 6292 vn_seqc_write_begin(vp); 6293 } 6294 6295 void 6296 vop_rmdir_post(void *ap, int rc) 6297 { 6298 struct vop_rmdir_args *a; 6299 struct vnode *dvp, *vp; 6300 6301 a = ap; 6302 dvp = a->a_dvp; 6303 vp = a->a_vp; 6304 vn_seqc_write_end(dvp); 6305 vn_seqc_write_end(vp); 6306 if (!rc) { 6307 vp->v_vflag |= VV_UNLINKED; 6308 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 6309 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 6310 } 6311 } 6312 6313 void 6314 vop_setattr_pre(void *ap) 6315 { 6316 struct vop_setattr_args *a; 6317 struct vnode *vp; 6318 6319 a = ap; 6320 vp = a->a_vp; 6321 vn_seqc_write_begin(vp); 6322 } 6323 6324 void 6325 vop_setattr_post(void *ap, int rc) 6326 { 6327 struct vop_setattr_args *a; 6328 struct vnode *vp; 6329 6330 a = ap; 6331 vp = a->a_vp; 6332 vn_seqc_write_end(vp); 6333 if (!rc) 6334 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6335 } 6336 6337 void 6338 vop_setacl_pre(void *ap) 6339 { 6340 struct vop_setacl_args *a; 6341 struct vnode *vp; 6342 6343 a = ap; 6344 vp = a->a_vp; 6345 vn_seqc_write_begin(vp); 6346 } 6347 6348 void 6349 vop_setacl_post(void *ap, int rc __unused) 6350 { 6351 struct vop_setacl_args *a; 6352 struct vnode *vp; 6353 6354 a = ap; 6355 vp = a->a_vp; 6356 vn_seqc_write_end(vp); 6357 } 6358 6359 void 6360 vop_setextattr_pre(void *ap) 6361 { 6362 struct vop_setextattr_args *a; 6363 struct vnode *vp; 6364 6365 a = ap; 6366 vp = a->a_vp; 6367 vn_seqc_write_begin(vp); 6368 } 6369 6370 void 6371 vop_setextattr_post(void *ap, int rc) 6372 { 6373 struct vop_setextattr_args *a; 6374 struct vnode *vp; 6375 6376 a = ap; 6377 vp = a->a_vp; 6378 vn_seqc_write_end(vp); 6379 if (!rc) 6380 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6381 } 6382 6383 void 6384 vop_symlink_pre(void *ap) 6385 { 6386 struct vop_symlink_args *a; 6387 struct vnode *dvp; 6388 6389 a = ap; 6390 dvp = a->a_dvp; 6391 vn_seqc_write_begin(dvp); 6392 } 6393 6394 void 6395 vop_symlink_post(void *ap, int rc) 6396 { 6397 struct vop_symlink_args *a; 6398 struct vnode *dvp; 6399 6400 a = ap; 6401 dvp = a->a_dvp; 6402 vn_seqc_write_end(dvp); 6403 if (!rc) 6404 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6405 } 6406 6407 void 6408 vop_open_post(void *ap, int rc) 6409 { 6410 struct vop_open_args *a = ap; 6411 6412 if (!rc) 6413 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 6414 } 6415 6416 void 6417 vop_close_post(void *ap, int rc) 6418 { 6419 struct vop_close_args *a = ap; 6420 6421 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 6422 !VN_IS_DOOMED(a->a_vp))) { 6423 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 6424 NOTE_CLOSE_WRITE : NOTE_CLOSE); 6425 } 6426 } 6427 6428 void 6429 vop_read_post(void *ap, int rc) 6430 { 6431 struct vop_read_args *a = ap; 6432 6433 if (!rc) 6434 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6435 } 6436 6437 void 6438 vop_read_pgcache_post(void *ap, int rc) 6439 { 6440 struct vop_read_pgcache_args *a = ap; 6441 6442 if (!rc) 6443 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); 6444 } 6445 6446 void 6447 vop_readdir_post(void *ap, int rc) 6448 { 6449 struct vop_readdir_args *a = ap; 6450 6451 if (!rc) 6452 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6453 } 6454 6455 static struct knlist fs_knlist; 6456 6457 static void 6458 vfs_event_init(void *arg) 6459 { 6460 knlist_init_mtx(&fs_knlist, NULL); 6461 } 6462 /* XXX - correct order? */ 6463 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 6464 6465 void 6466 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 6467 { 6468 6469 KNOTE_UNLOCKED(&fs_knlist, event); 6470 } 6471 6472 static int filt_fsattach(struct knote *kn); 6473 static void filt_fsdetach(struct knote *kn); 6474 static int filt_fsevent(struct knote *kn, long hint); 6475 6476 struct filterops fs_filtops = { 6477 .f_isfd = 0, 6478 .f_attach = filt_fsattach, 6479 .f_detach = filt_fsdetach, 6480 .f_event = filt_fsevent 6481 }; 6482 6483 static int 6484 filt_fsattach(struct knote *kn) 6485 { 6486 6487 kn->kn_flags |= EV_CLEAR; 6488 knlist_add(&fs_knlist, kn, 0); 6489 return (0); 6490 } 6491 6492 static void 6493 filt_fsdetach(struct knote *kn) 6494 { 6495 6496 knlist_remove(&fs_knlist, kn, 0); 6497 } 6498 6499 static int 6500 filt_fsevent(struct knote *kn, long hint) 6501 { 6502 6503 kn->kn_fflags |= kn->kn_sfflags & hint; 6504 6505 return (kn->kn_fflags != 0); 6506 } 6507 6508 static int 6509 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 6510 { 6511 struct vfsidctl vc; 6512 int error; 6513 struct mount *mp; 6514 6515 error = SYSCTL_IN(req, &vc, sizeof(vc)); 6516 if (error) 6517 return (error); 6518 if (vc.vc_vers != VFS_CTL_VERS1) 6519 return (EINVAL); 6520 mp = vfs_getvfs(&vc.vc_fsid); 6521 if (mp == NULL) 6522 return (ENOENT); 6523 /* ensure that a specific sysctl goes to the right filesystem. */ 6524 if (strcmp(vc.vc_fstypename, "*") != 0 && 6525 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 6526 vfs_rel(mp); 6527 return (EINVAL); 6528 } 6529 VCTLTOREQ(&vc, req); 6530 error = VFS_SYSCTL(mp, vc.vc_op, req); 6531 vfs_rel(mp); 6532 return (error); 6533 } 6534 6535 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 6536 NULL, 0, sysctl_vfs_ctl, "", 6537 "Sysctl by fsid"); 6538 6539 /* 6540 * Function to initialize a va_filerev field sensibly. 6541 * XXX: Wouldn't a random number make a lot more sense ?? 6542 */ 6543 u_quad_t 6544 init_va_filerev(void) 6545 { 6546 struct bintime bt; 6547 6548 getbinuptime(&bt); 6549 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 6550 } 6551 6552 static int filt_vfsread(struct knote *kn, long hint); 6553 static int filt_vfswrite(struct knote *kn, long hint); 6554 static int filt_vfsvnode(struct knote *kn, long hint); 6555 static void filt_vfsdetach(struct knote *kn); 6556 static struct filterops vfsread_filtops = { 6557 .f_isfd = 1, 6558 .f_detach = filt_vfsdetach, 6559 .f_event = filt_vfsread 6560 }; 6561 static struct filterops vfswrite_filtops = { 6562 .f_isfd = 1, 6563 .f_detach = filt_vfsdetach, 6564 .f_event = filt_vfswrite 6565 }; 6566 static struct filterops vfsvnode_filtops = { 6567 .f_isfd = 1, 6568 .f_detach = filt_vfsdetach, 6569 .f_event = filt_vfsvnode 6570 }; 6571 6572 static void 6573 vfs_knllock(void *arg) 6574 { 6575 struct vnode *vp = arg; 6576 6577 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 6578 } 6579 6580 static void 6581 vfs_knlunlock(void *arg) 6582 { 6583 struct vnode *vp = arg; 6584 6585 VOP_UNLOCK(vp); 6586 } 6587 6588 static void 6589 vfs_knl_assert_lock(void *arg, int what) 6590 { 6591 #ifdef DEBUG_VFS_LOCKS 6592 struct vnode *vp = arg; 6593 6594 if (what == LA_LOCKED) 6595 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6596 else 6597 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6598 #endif 6599 } 6600 6601 int 6602 vfs_kqfilter(struct vop_kqfilter_args *ap) 6603 { 6604 struct vnode *vp = ap->a_vp; 6605 struct knote *kn = ap->a_kn; 6606 struct knlist *knl; 6607 6608 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ && 6609 kn->kn_filter != EVFILT_WRITE), 6610 ("READ/WRITE filter on a FIFO leaked through")); 6611 switch (kn->kn_filter) { 6612 case EVFILT_READ: 6613 kn->kn_fop = &vfsread_filtops; 6614 break; 6615 case EVFILT_WRITE: 6616 kn->kn_fop = &vfswrite_filtops; 6617 break; 6618 case EVFILT_VNODE: 6619 kn->kn_fop = &vfsvnode_filtops; 6620 break; 6621 default: 6622 return (EINVAL); 6623 } 6624 6625 kn->kn_hook = (caddr_t)vp; 6626 6627 v_addpollinfo(vp); 6628 if (vp->v_pollinfo == NULL) 6629 return (ENOMEM); 6630 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6631 vhold(vp); 6632 knlist_add(knl, kn, 0); 6633 6634 return (0); 6635 } 6636 6637 /* 6638 * Detach knote from vnode 6639 */ 6640 static void 6641 filt_vfsdetach(struct knote *kn) 6642 { 6643 struct vnode *vp = (struct vnode *)kn->kn_hook; 6644 6645 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6646 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6647 vdrop(vp); 6648 } 6649 6650 /*ARGSUSED*/ 6651 static int 6652 filt_vfsread(struct knote *kn, long hint) 6653 { 6654 struct vnode *vp = (struct vnode *)kn->kn_hook; 6655 off_t size; 6656 int res; 6657 6658 /* 6659 * filesystem is gone, so set the EOF flag and schedule 6660 * the knote for deletion. 6661 */ 6662 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6663 VI_LOCK(vp); 6664 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6665 VI_UNLOCK(vp); 6666 return (1); 6667 } 6668 6669 if (vn_getsize_locked(vp, &size, curthread->td_ucred) != 0) 6670 return (0); 6671 6672 VI_LOCK(vp); 6673 kn->kn_data = size - kn->kn_fp->f_offset; 6674 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6675 VI_UNLOCK(vp); 6676 return (res); 6677 } 6678 6679 /*ARGSUSED*/ 6680 static int 6681 filt_vfswrite(struct knote *kn, long hint) 6682 { 6683 struct vnode *vp = (struct vnode *)kn->kn_hook; 6684 6685 VI_LOCK(vp); 6686 6687 /* 6688 * filesystem is gone, so set the EOF flag and schedule 6689 * the knote for deletion. 6690 */ 6691 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6692 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6693 6694 kn->kn_data = 0; 6695 VI_UNLOCK(vp); 6696 return (1); 6697 } 6698 6699 static int 6700 filt_vfsvnode(struct knote *kn, long hint) 6701 { 6702 struct vnode *vp = (struct vnode *)kn->kn_hook; 6703 int res; 6704 6705 VI_LOCK(vp); 6706 if (kn->kn_sfflags & hint) 6707 kn->kn_fflags |= hint; 6708 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6709 kn->kn_flags |= EV_EOF; 6710 VI_UNLOCK(vp); 6711 return (1); 6712 } 6713 res = (kn->kn_fflags != 0); 6714 VI_UNLOCK(vp); 6715 return (res); 6716 } 6717 6718 int 6719 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6720 { 6721 int error; 6722 6723 if (dp->d_reclen > ap->a_uio->uio_resid) 6724 return (ENAMETOOLONG); 6725 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6726 if (error) { 6727 if (ap->a_ncookies != NULL) { 6728 if (ap->a_cookies != NULL) 6729 free(ap->a_cookies, M_TEMP); 6730 ap->a_cookies = NULL; 6731 *ap->a_ncookies = 0; 6732 } 6733 return (error); 6734 } 6735 if (ap->a_ncookies == NULL) 6736 return (0); 6737 6738 KASSERT(ap->a_cookies, 6739 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6740 6741 *ap->a_cookies = realloc(*ap->a_cookies, 6742 (*ap->a_ncookies + 1) * sizeof(uint64_t), M_TEMP, M_WAITOK | M_ZERO); 6743 (*ap->a_cookies)[*ap->a_ncookies] = off; 6744 *ap->a_ncookies += 1; 6745 return (0); 6746 } 6747 6748 /* 6749 * The purpose of this routine is to remove granularity from accmode_t, 6750 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6751 * VADMIN and VAPPEND. 6752 * 6753 * If it returns 0, the caller is supposed to continue with the usual 6754 * access checks using 'accmode' as modified by this routine. If it 6755 * returns nonzero value, the caller is supposed to return that value 6756 * as errno. 6757 * 6758 * Note that after this routine runs, accmode may be zero. 6759 */ 6760 int 6761 vfs_unixify_accmode(accmode_t *accmode) 6762 { 6763 /* 6764 * There is no way to specify explicit "deny" rule using 6765 * file mode or POSIX.1e ACLs. 6766 */ 6767 if (*accmode & VEXPLICIT_DENY) { 6768 *accmode = 0; 6769 return (0); 6770 } 6771 6772 /* 6773 * None of these can be translated into usual access bits. 6774 * Also, the common case for NFSv4 ACLs is to not contain 6775 * either of these bits. Caller should check for VWRITE 6776 * on the containing directory instead. 6777 */ 6778 if (*accmode & (VDELETE_CHILD | VDELETE)) 6779 return (EPERM); 6780 6781 if (*accmode & VADMIN_PERMS) { 6782 *accmode &= ~VADMIN_PERMS; 6783 *accmode |= VADMIN; 6784 } 6785 6786 /* 6787 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6788 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6789 */ 6790 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6791 6792 return (0); 6793 } 6794 6795 /* 6796 * Clear out a doomed vnode (if any) and replace it with a new one as long 6797 * as the fs is not being unmounted. Return the root vnode to the caller. 6798 */ 6799 static int __noinline 6800 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6801 { 6802 struct vnode *vp; 6803 int error; 6804 6805 restart: 6806 if (mp->mnt_rootvnode != NULL) { 6807 MNT_ILOCK(mp); 6808 vp = mp->mnt_rootvnode; 6809 if (vp != NULL) { 6810 if (!VN_IS_DOOMED(vp)) { 6811 vrefact(vp); 6812 MNT_IUNLOCK(mp); 6813 error = vn_lock(vp, flags); 6814 if (error == 0) { 6815 *vpp = vp; 6816 return (0); 6817 } 6818 vrele(vp); 6819 goto restart; 6820 } 6821 /* 6822 * Clear the old one. 6823 */ 6824 mp->mnt_rootvnode = NULL; 6825 } 6826 MNT_IUNLOCK(mp); 6827 if (vp != NULL) { 6828 vfs_op_barrier_wait(mp); 6829 vrele(vp); 6830 } 6831 } 6832 error = VFS_CACHEDROOT(mp, flags, vpp); 6833 if (error != 0) 6834 return (error); 6835 if (mp->mnt_vfs_ops == 0) { 6836 MNT_ILOCK(mp); 6837 if (mp->mnt_vfs_ops != 0) { 6838 MNT_IUNLOCK(mp); 6839 return (0); 6840 } 6841 if (mp->mnt_rootvnode == NULL) { 6842 vrefact(*vpp); 6843 mp->mnt_rootvnode = *vpp; 6844 } else { 6845 if (mp->mnt_rootvnode != *vpp) { 6846 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6847 panic("%s: mismatch between vnode returned " 6848 " by VFS_CACHEDROOT and the one cached " 6849 " (%p != %p)", 6850 __func__, *vpp, mp->mnt_rootvnode); 6851 } 6852 } 6853 } 6854 MNT_IUNLOCK(mp); 6855 } 6856 return (0); 6857 } 6858 6859 int 6860 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6861 { 6862 struct mount_pcpu *mpcpu; 6863 struct vnode *vp; 6864 int error; 6865 6866 if (!vfs_op_thread_enter(mp, mpcpu)) 6867 return (vfs_cache_root_fallback(mp, flags, vpp)); 6868 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6869 if (vp == NULL || VN_IS_DOOMED(vp)) { 6870 vfs_op_thread_exit(mp, mpcpu); 6871 return (vfs_cache_root_fallback(mp, flags, vpp)); 6872 } 6873 vrefact(vp); 6874 vfs_op_thread_exit(mp, mpcpu); 6875 error = vn_lock(vp, flags); 6876 if (error != 0) { 6877 vrele(vp); 6878 return (vfs_cache_root_fallback(mp, flags, vpp)); 6879 } 6880 *vpp = vp; 6881 return (0); 6882 } 6883 6884 struct vnode * 6885 vfs_cache_root_clear(struct mount *mp) 6886 { 6887 struct vnode *vp; 6888 6889 /* 6890 * ops > 0 guarantees there is nobody who can see this vnode 6891 */ 6892 MPASS(mp->mnt_vfs_ops > 0); 6893 vp = mp->mnt_rootvnode; 6894 if (vp != NULL) 6895 vn_seqc_write_begin(vp); 6896 mp->mnt_rootvnode = NULL; 6897 return (vp); 6898 } 6899 6900 void 6901 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6902 { 6903 6904 MPASS(mp->mnt_vfs_ops > 0); 6905 vrefact(vp); 6906 mp->mnt_rootvnode = vp; 6907 } 6908 6909 /* 6910 * These are helper functions for filesystems to traverse all 6911 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6912 * 6913 * This interface replaces MNT_VNODE_FOREACH. 6914 */ 6915 6916 struct vnode * 6917 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6918 { 6919 struct vnode *vp; 6920 6921 maybe_yield(); 6922 MNT_ILOCK(mp); 6923 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6924 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6925 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6926 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6927 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6928 continue; 6929 VI_LOCK(vp); 6930 if (VN_IS_DOOMED(vp)) { 6931 VI_UNLOCK(vp); 6932 continue; 6933 } 6934 break; 6935 } 6936 if (vp == NULL) { 6937 __mnt_vnode_markerfree_all(mvp, mp); 6938 /* MNT_IUNLOCK(mp); -- done in above function */ 6939 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6940 return (NULL); 6941 } 6942 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6943 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6944 MNT_IUNLOCK(mp); 6945 return (vp); 6946 } 6947 6948 struct vnode * 6949 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6950 { 6951 struct vnode *vp; 6952 6953 *mvp = vn_alloc_marker(mp); 6954 MNT_ILOCK(mp); 6955 MNT_REF(mp); 6956 6957 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6958 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6959 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6960 continue; 6961 VI_LOCK(vp); 6962 if (VN_IS_DOOMED(vp)) { 6963 VI_UNLOCK(vp); 6964 continue; 6965 } 6966 break; 6967 } 6968 if (vp == NULL) { 6969 MNT_REL(mp); 6970 MNT_IUNLOCK(mp); 6971 vn_free_marker(*mvp); 6972 *mvp = NULL; 6973 return (NULL); 6974 } 6975 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6976 MNT_IUNLOCK(mp); 6977 return (vp); 6978 } 6979 6980 void 6981 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6982 { 6983 6984 if (*mvp == NULL) { 6985 MNT_IUNLOCK(mp); 6986 return; 6987 } 6988 6989 mtx_assert(MNT_MTX(mp), MA_OWNED); 6990 6991 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6992 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6993 MNT_REL(mp); 6994 MNT_IUNLOCK(mp); 6995 vn_free_marker(*mvp); 6996 *mvp = NULL; 6997 } 6998 6999 /* 7000 * These are helper functions for filesystems to traverse their 7001 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 7002 */ 7003 static void 7004 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 7005 { 7006 7007 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 7008 7009 MNT_ILOCK(mp); 7010 MNT_REL(mp); 7011 MNT_IUNLOCK(mp); 7012 vn_free_marker(*mvp); 7013 *mvp = NULL; 7014 } 7015 7016 /* 7017 * Relock the mp mount vnode list lock with the vp vnode interlock in the 7018 * conventional lock order during mnt_vnode_next_lazy iteration. 7019 * 7020 * On entry, the mount vnode list lock is held and the vnode interlock is not. 7021 * The list lock is dropped and reacquired. On success, both locks are held. 7022 * On failure, the mount vnode list lock is held but the vnode interlock is 7023 * not, and the procedure may have yielded. 7024 */ 7025 static bool 7026 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 7027 struct vnode *vp) 7028 { 7029 7030 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 7031 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 7032 ("%s: bad marker", __func__)); 7033 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 7034 ("%s: inappropriate vnode", __func__)); 7035 ASSERT_VI_UNLOCKED(vp, __func__); 7036 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 7037 7038 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 7039 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 7040 7041 /* 7042 * Note we may be racing against vdrop which transitioned the hold 7043 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 7044 * if we are the only user after we get the interlock we will just 7045 * vdrop. 7046 */ 7047 vhold(vp); 7048 mtx_unlock(&mp->mnt_listmtx); 7049 VI_LOCK(vp); 7050 if (VN_IS_DOOMED(vp)) { 7051 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 7052 goto out_lost; 7053 } 7054 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 7055 /* 7056 * There is nothing to do if we are the last user. 7057 */ 7058 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 7059 goto out_lost; 7060 mtx_lock(&mp->mnt_listmtx); 7061 return (true); 7062 out_lost: 7063 vdropl(vp); 7064 maybe_yield(); 7065 mtx_lock(&mp->mnt_listmtx); 7066 return (false); 7067 } 7068 7069 static struct vnode * 7070 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 7071 void *cbarg) 7072 { 7073 struct vnode *vp; 7074 7075 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 7076 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 7077 restart: 7078 vp = TAILQ_NEXT(*mvp, v_lazylist); 7079 while (vp != NULL) { 7080 if (vp->v_type == VMARKER) { 7081 vp = TAILQ_NEXT(vp, v_lazylist); 7082 continue; 7083 } 7084 /* 7085 * See if we want to process the vnode. Note we may encounter a 7086 * long string of vnodes we don't care about and hog the list 7087 * as a result. Check for it and requeue the marker. 7088 */ 7089 VNPASS(!VN_IS_DOOMED(vp), vp); 7090 if (!cb(vp, cbarg)) { 7091 if (!should_yield()) { 7092 vp = TAILQ_NEXT(vp, v_lazylist); 7093 continue; 7094 } 7095 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 7096 v_lazylist); 7097 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 7098 v_lazylist); 7099 mtx_unlock(&mp->mnt_listmtx); 7100 kern_yield(PRI_USER); 7101 mtx_lock(&mp->mnt_listmtx); 7102 goto restart; 7103 } 7104 /* 7105 * Try-lock because this is the wrong lock order. 7106 */ 7107 if (!VI_TRYLOCK(vp) && 7108 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 7109 goto restart; 7110 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 7111 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 7112 ("alien vnode on the lazy list %p %p", vp, mp)); 7113 VNPASS(vp->v_mount == mp, vp); 7114 VNPASS(!VN_IS_DOOMED(vp), vp); 7115 break; 7116 } 7117 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 7118 7119 /* Check if we are done */ 7120 if (vp == NULL) { 7121 mtx_unlock(&mp->mnt_listmtx); 7122 mnt_vnode_markerfree_lazy(mvp, mp); 7123 return (NULL); 7124 } 7125 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 7126 mtx_unlock(&mp->mnt_listmtx); 7127 ASSERT_VI_LOCKED(vp, "lazy iter"); 7128 return (vp); 7129 } 7130 7131 struct vnode * 7132 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 7133 void *cbarg) 7134 { 7135 7136 maybe_yield(); 7137 mtx_lock(&mp->mnt_listmtx); 7138 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 7139 } 7140 7141 struct vnode * 7142 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 7143 void *cbarg) 7144 { 7145 struct vnode *vp; 7146 7147 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 7148 return (NULL); 7149 7150 *mvp = vn_alloc_marker(mp); 7151 MNT_ILOCK(mp); 7152 MNT_REF(mp); 7153 MNT_IUNLOCK(mp); 7154 7155 mtx_lock(&mp->mnt_listmtx); 7156 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 7157 if (vp == NULL) { 7158 mtx_unlock(&mp->mnt_listmtx); 7159 mnt_vnode_markerfree_lazy(mvp, mp); 7160 return (NULL); 7161 } 7162 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 7163 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 7164 } 7165 7166 void 7167 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 7168 { 7169 7170 if (*mvp == NULL) 7171 return; 7172 7173 mtx_lock(&mp->mnt_listmtx); 7174 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 7175 mtx_unlock(&mp->mnt_listmtx); 7176 mnt_vnode_markerfree_lazy(mvp, mp); 7177 } 7178 7179 int 7180 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 7181 { 7182 7183 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 7184 cnp->cn_flags &= ~NOEXECCHECK; 7185 return (0); 7186 } 7187 7188 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread)); 7189 } 7190 7191 /* 7192 * Do not use this variant unless you have means other than the hold count 7193 * to prevent the vnode from getting freed. 7194 */ 7195 void 7196 vn_seqc_write_begin_locked(struct vnode *vp) 7197 { 7198 7199 ASSERT_VI_LOCKED(vp, __func__); 7200 VNPASS(vp->v_holdcnt > 0, vp); 7201 VNPASS(vp->v_seqc_users >= 0, vp); 7202 vp->v_seqc_users++; 7203 if (vp->v_seqc_users == 1) 7204 seqc_sleepable_write_begin(&vp->v_seqc); 7205 } 7206 7207 void 7208 vn_seqc_write_begin(struct vnode *vp) 7209 { 7210 7211 VI_LOCK(vp); 7212 vn_seqc_write_begin_locked(vp); 7213 VI_UNLOCK(vp); 7214 } 7215 7216 void 7217 vn_seqc_write_end_locked(struct vnode *vp) 7218 { 7219 7220 ASSERT_VI_LOCKED(vp, __func__); 7221 VNPASS(vp->v_seqc_users > 0, vp); 7222 vp->v_seqc_users--; 7223 if (vp->v_seqc_users == 0) 7224 seqc_sleepable_write_end(&vp->v_seqc); 7225 } 7226 7227 void 7228 vn_seqc_write_end(struct vnode *vp) 7229 { 7230 7231 VI_LOCK(vp); 7232 vn_seqc_write_end_locked(vp); 7233 VI_UNLOCK(vp); 7234 } 7235 7236 /* 7237 * Special case handling for allocating and freeing vnodes. 7238 * 7239 * The counter remains unchanged on free so that a doomed vnode will 7240 * keep testing as in modify as long as it is accessible with SMR. 7241 */ 7242 static void 7243 vn_seqc_init(struct vnode *vp) 7244 { 7245 7246 vp->v_seqc = 0; 7247 vp->v_seqc_users = 0; 7248 } 7249 7250 static void 7251 vn_seqc_write_end_free(struct vnode *vp) 7252 { 7253 7254 VNPASS(seqc_in_modify(vp->v_seqc), vp); 7255 VNPASS(vp->v_seqc_users == 1, vp); 7256 } 7257 7258 void 7259 vn_irflag_set_locked(struct vnode *vp, short toset) 7260 { 7261 short flags; 7262 7263 ASSERT_VI_LOCKED(vp, __func__); 7264 flags = vn_irflag_read(vp); 7265 VNASSERT((flags & toset) == 0, vp, 7266 ("%s: some of the passed flags already set (have %d, passed %d)\n", 7267 __func__, flags, toset)); 7268 atomic_store_short(&vp->v_irflag, flags | toset); 7269 } 7270 7271 void 7272 vn_irflag_set(struct vnode *vp, short toset) 7273 { 7274 7275 VI_LOCK(vp); 7276 vn_irflag_set_locked(vp, toset); 7277 VI_UNLOCK(vp); 7278 } 7279 7280 void 7281 vn_irflag_set_cond_locked(struct vnode *vp, short toset) 7282 { 7283 short flags; 7284 7285 ASSERT_VI_LOCKED(vp, __func__); 7286 flags = vn_irflag_read(vp); 7287 atomic_store_short(&vp->v_irflag, flags | toset); 7288 } 7289 7290 void 7291 vn_irflag_set_cond(struct vnode *vp, short toset) 7292 { 7293 7294 VI_LOCK(vp); 7295 vn_irflag_set_cond_locked(vp, toset); 7296 VI_UNLOCK(vp); 7297 } 7298 7299 void 7300 vn_irflag_unset_locked(struct vnode *vp, short tounset) 7301 { 7302 short flags; 7303 7304 ASSERT_VI_LOCKED(vp, __func__); 7305 flags = vn_irflag_read(vp); 7306 VNASSERT((flags & tounset) == tounset, vp, 7307 ("%s: some of the passed flags not set (have %d, passed %d)\n", 7308 __func__, flags, tounset)); 7309 atomic_store_short(&vp->v_irflag, flags & ~tounset); 7310 } 7311 7312 void 7313 vn_irflag_unset(struct vnode *vp, short tounset) 7314 { 7315 7316 VI_LOCK(vp); 7317 vn_irflag_unset_locked(vp, tounset); 7318 VI_UNLOCK(vp); 7319 } 7320 7321 int 7322 vn_getsize_locked(struct vnode *vp, off_t *size, struct ucred *cred) 7323 { 7324 struct vattr vattr; 7325 int error; 7326 7327 ASSERT_VOP_LOCKED(vp, __func__); 7328 error = VOP_GETATTR(vp, &vattr, cred); 7329 if (__predict_true(error == 0)) { 7330 if (vattr.va_size <= OFF_MAX) 7331 *size = vattr.va_size; 7332 else 7333 error = EFBIG; 7334 } 7335 return (error); 7336 } 7337 7338 int 7339 vn_getsize(struct vnode *vp, off_t *size, struct ucred *cred) 7340 { 7341 int error; 7342 7343 VOP_LOCK(vp, LK_SHARED); 7344 error = vn_getsize_locked(vp, size, cred); 7345 VOP_UNLOCK(vp); 7346 return (error); 7347 } 7348 7349 #ifdef INVARIANTS 7350 void 7351 vn_set_state_validate(struct vnode *vp, __enum_uint8(vstate) state) 7352 { 7353 7354 switch (vp->v_state) { 7355 case VSTATE_UNINITIALIZED: 7356 switch (state) { 7357 case VSTATE_CONSTRUCTED: 7358 case VSTATE_DESTROYING: 7359 return; 7360 default: 7361 break; 7362 } 7363 break; 7364 case VSTATE_CONSTRUCTED: 7365 ASSERT_VOP_ELOCKED(vp, __func__); 7366 switch (state) { 7367 case VSTATE_DESTROYING: 7368 return; 7369 default: 7370 break; 7371 } 7372 break; 7373 case VSTATE_DESTROYING: 7374 ASSERT_VOP_ELOCKED(vp, __func__); 7375 switch (state) { 7376 case VSTATE_DEAD: 7377 return; 7378 default: 7379 break; 7380 } 7381 break; 7382 case VSTATE_DEAD: 7383 switch (state) { 7384 case VSTATE_UNINITIALIZED: 7385 return; 7386 default: 7387 break; 7388 } 7389 break; 7390 } 7391 7392 vn_printf(vp, "invalid state transition %d -> %d\n", vp->v_state, state); 7393 panic("invalid state transition %d -> %d\n", vp->v_state, state); 7394 } 7395 #endif 7396