1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 #include "opt_ddb.h" 45 #include "opt_watchdog.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/asan.h> 50 #include <sys/bio.h> 51 #include <sys/buf.h> 52 #include <sys/capsicum.h> 53 #include <sys/condvar.h> 54 #include <sys/conf.h> 55 #include <sys/counter.h> 56 #include <sys/dirent.h> 57 #include <sys/event.h> 58 #include <sys/eventhandler.h> 59 #include <sys/extattr.h> 60 #include <sys/file.h> 61 #include <sys/fcntl.h> 62 #include <sys/jail.h> 63 #include <sys/kdb.h> 64 #include <sys/kernel.h> 65 #include <sys/kthread.h> 66 #include <sys/ktr.h> 67 #include <sys/limits.h> 68 #include <sys/lockf.h> 69 #include <sys/malloc.h> 70 #include <sys/mount.h> 71 #include <sys/namei.h> 72 #include <sys/pctrie.h> 73 #include <sys/priv.h> 74 #include <sys/reboot.h> 75 #include <sys/refcount.h> 76 #include <sys/rwlock.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smr.h> 80 #include <sys/smp.h> 81 #include <sys/stat.h> 82 #include <sys/sysctl.h> 83 #include <sys/syslog.h> 84 #include <sys/vmmeter.h> 85 #include <sys/vnode.h> 86 #include <sys/watchdog.h> 87 88 #include <machine/stdarg.h> 89 90 #include <security/mac/mac_framework.h> 91 92 #include <vm/vm.h> 93 #include <vm/vm_object.h> 94 #include <vm/vm_extern.h> 95 #include <vm/pmap.h> 96 #include <vm/vm_map.h> 97 #include <vm/vm_page.h> 98 #include <vm/vm_kern.h> 99 #include <vm/uma.h> 100 101 #if defined(DEBUG_VFS_LOCKS) && (!defined(INVARIANTS) || !defined(WITNESS)) 102 #error DEBUG_VFS_LOCKS requires INVARIANTS and WITNESS 103 #endif 104 105 #ifdef DDB 106 #include <ddb/ddb.h> 107 #endif 108 109 static void delmntque(struct vnode *vp); 110 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 111 int slpflag, int slptimeo); 112 static void syncer_shutdown(void *arg, int howto); 113 static int vtryrecycle(struct vnode *vp, bool isvnlru); 114 static void v_init_counters(struct vnode *); 115 static void vn_seqc_init(struct vnode *); 116 static void vn_seqc_write_end_free(struct vnode *vp); 117 static void vgonel(struct vnode *); 118 static bool vhold_recycle_free(struct vnode *); 119 static void vdropl_recycle(struct vnode *vp); 120 static void vdrop_recycle(struct vnode *vp); 121 static void vfs_knllock(void *arg); 122 static void vfs_knlunlock(void *arg); 123 static void vfs_knl_assert_lock(void *arg, int what); 124 static void destroy_vpollinfo(struct vpollinfo *vi); 125 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 126 daddr_t startlbn, daddr_t endlbn); 127 static void vnlru_recalc(void); 128 129 static SYSCTL_NODE(_vfs, OID_AUTO, vnode, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 130 "vnode configuration and statistics"); 131 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, param, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 132 "vnode configuration"); 133 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 134 "vnode statistics"); 135 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, vnlru, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 136 "vnode recycling"); 137 138 /* 139 * Number of vnodes in existence. Increased whenever getnewvnode() 140 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 141 */ 142 static u_long __exclusive_cache_line numvnodes; 143 144 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 145 "Number of vnodes in existence (legacy)"); 146 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, count, CTLFLAG_RD, &numvnodes, 0, 147 "Number of vnodes in existence"); 148 149 static counter_u64_t vnodes_created; 150 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 151 "Number of vnodes created by getnewvnode (legacy)"); 152 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, created, CTLFLAG_RD, &vnodes_created, 153 "Number of vnodes created by getnewvnode"); 154 155 /* 156 * Conversion tables for conversion from vnode types to inode formats 157 * and back. 158 */ 159 __enum_uint8(vtype) iftovt_tab[16] = { 160 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 161 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 162 }; 163 int vttoif_tab[10] = { 164 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 165 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 166 }; 167 168 /* 169 * List of allocates vnodes in the system. 170 */ 171 static TAILQ_HEAD(freelst, vnode) vnode_list; 172 static struct vnode *vnode_list_free_marker; 173 static struct vnode *vnode_list_reclaim_marker; 174 175 /* 176 * "Free" vnode target. Free vnodes are rarely completely free, but are 177 * just ones that are cheap to recycle. Usually they are for files which 178 * have been stat'd but not read; these usually have inode and namecache 179 * data attached to them. This target is the preferred minimum size of a 180 * sub-cache consisting mostly of such files. The system balances the size 181 * of this sub-cache with its complement to try to prevent either from 182 * thrashing while the other is relatively inactive. The targets express 183 * a preference for the best balance. 184 * 185 * "Above" this target there are 2 further targets (watermarks) related 186 * to recyling of free vnodes. In the best-operating case, the cache is 187 * exactly full, the free list has size between vlowat and vhiwat above the 188 * free target, and recycling from it and normal use maintains this state. 189 * Sometimes the free list is below vlowat or even empty, but this state 190 * is even better for immediate use provided the cache is not full. 191 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 192 * ones) to reach one of these states. The watermarks are currently hard- 193 * coded as 4% and 9% of the available space higher. These and the default 194 * of 25% for wantfreevnodes are too large if the memory size is large. 195 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 196 * whenever vnlru_proc() becomes active. 197 */ 198 static long wantfreevnodes; 199 static long __exclusive_cache_line freevnodes; 200 static long freevnodes_old; 201 202 static u_long recycles_count; 203 SYSCTL_ULONG(_vfs, OID_AUTO, recycles, CTLFLAG_RD | CTLFLAG_STATS, &recycles_count, 0, 204 "Number of vnodes recycled to meet vnode cache targets (legacy)"); 205 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, recycles, CTLFLAG_RD | CTLFLAG_STATS, 206 &recycles_count, 0, 207 "Number of vnodes recycled to meet vnode cache targets"); 208 209 static u_long recycles_free_count; 210 SYSCTL_ULONG(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD | CTLFLAG_STATS, 211 &recycles_free_count, 0, 212 "Number of free vnodes recycled to meet vnode cache targets (legacy)"); 213 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, recycles_free, CTLFLAG_RD | CTLFLAG_STATS, 214 &recycles_free_count, 0, 215 "Number of free vnodes recycled to meet vnode cache targets"); 216 217 static counter_u64_t direct_recycles_free_count; 218 SYSCTL_COUNTER_U64(_vfs_vnode_vnlru, OID_AUTO, direct_recycles_free, CTLFLAG_RD, 219 &direct_recycles_free_count, 220 "Number of free vnodes recycled by vn_alloc callers to meet vnode cache targets"); 221 222 static counter_u64_t vnode_skipped_requeues; 223 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, skipped_requeues, CTLFLAG_RD, &vnode_skipped_requeues, 224 "Number of times LRU requeue was skipped due to lock contention"); 225 226 static u_long deferred_inact; 227 SYSCTL_ULONG(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, 228 &deferred_inact, 0, "Number of times inactive processing was deferred"); 229 230 /* To keep more than one thread at a time from running vfs_getnewfsid */ 231 static struct mtx mntid_mtx; 232 233 /* 234 * Lock for any access to the following: 235 * vnode_list 236 * numvnodes 237 * freevnodes 238 */ 239 static struct mtx __exclusive_cache_line vnode_list_mtx; 240 241 /* Publicly exported FS */ 242 struct nfs_public nfs_pub; 243 244 static uma_zone_t buf_trie_zone; 245 static smr_t buf_trie_smr; 246 247 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 248 static uma_zone_t vnode_zone; 249 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll"); 250 251 __read_frequently smr_t vfs_smr; 252 253 /* 254 * The workitem queue. 255 * 256 * It is useful to delay writes of file data and filesystem metadata 257 * for tens of seconds so that quickly created and deleted files need 258 * not waste disk bandwidth being created and removed. To realize this, 259 * we append vnodes to a "workitem" queue. When running with a soft 260 * updates implementation, most pending metadata dependencies should 261 * not wait for more than a few seconds. Thus, mounted on block devices 262 * are delayed only about a half the time that file data is delayed. 263 * Similarly, directory updates are more critical, so are only delayed 264 * about a third the time that file data is delayed. Thus, there are 265 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 266 * one each second (driven off the filesystem syncer process). The 267 * syncer_delayno variable indicates the next queue that is to be processed. 268 * Items that need to be processed soon are placed in this queue: 269 * 270 * syncer_workitem_pending[syncer_delayno] 271 * 272 * A delay of fifteen seconds is done by placing the request fifteen 273 * entries later in the queue: 274 * 275 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 276 * 277 */ 278 static int syncer_delayno; 279 static long syncer_mask; 280 LIST_HEAD(synclist, bufobj); 281 static struct synclist *syncer_workitem_pending; 282 /* 283 * The sync_mtx protects: 284 * bo->bo_synclist 285 * sync_vnode_count 286 * syncer_delayno 287 * syncer_state 288 * syncer_workitem_pending 289 * syncer_worklist_len 290 * rushjob 291 */ 292 static struct mtx sync_mtx; 293 static struct cv sync_wakeup; 294 295 #define SYNCER_MAXDELAY 32 296 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 297 static int syncdelay = 30; /* max time to delay syncing data */ 298 static int filedelay = 30; /* time to delay syncing files */ 299 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 300 "Time to delay syncing files (in seconds)"); 301 static int dirdelay = 29; /* time to delay syncing directories */ 302 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 303 "Time to delay syncing directories (in seconds)"); 304 static int metadelay = 28; /* time to delay syncing metadata */ 305 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 306 "Time to delay syncing metadata (in seconds)"); 307 static int rushjob; /* number of slots to run ASAP */ 308 static int stat_rush_requests; /* number of times I/O speeded up */ 309 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 310 "Number of times I/O speeded up (rush requests)"); 311 312 #define VDBATCH_SIZE 8 313 struct vdbatch { 314 u_int index; 315 struct mtx lock; 316 struct vnode *tab[VDBATCH_SIZE]; 317 }; 318 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 319 320 static void vdbatch_dequeue(struct vnode *vp); 321 322 /* 323 * When shutting down the syncer, run it at four times normal speed. 324 */ 325 #define SYNCER_SHUTDOWN_SPEEDUP 4 326 static int sync_vnode_count; 327 static int syncer_worklist_len; 328 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 329 syncer_state; 330 331 /* Target for maximum number of vnodes. */ 332 u_long desiredvnodes; 333 static u_long gapvnodes; /* gap between wanted and desired */ 334 static u_long vhiwat; /* enough extras after expansion */ 335 static u_long vlowat; /* minimal extras before expansion */ 336 static bool vstir; /* nonzero to stir non-free vnodes */ 337 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 338 339 static u_long vnlru_read_freevnodes(void); 340 341 /* 342 * Note that no attempt is made to sanitize these parameters. 343 */ 344 static int 345 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 346 { 347 u_long val; 348 int error; 349 350 val = desiredvnodes; 351 error = sysctl_handle_long(oidp, &val, 0, req); 352 if (error != 0 || req->newptr == NULL) 353 return (error); 354 355 if (val == desiredvnodes) 356 return (0); 357 mtx_lock(&vnode_list_mtx); 358 desiredvnodes = val; 359 wantfreevnodes = desiredvnodes / 4; 360 vnlru_recalc(); 361 mtx_unlock(&vnode_list_mtx); 362 /* 363 * XXX There is no protection against multiple threads changing 364 * desiredvnodes at the same time. Locking above only helps vnlru and 365 * getnewvnode. 366 */ 367 vfs_hash_changesize(desiredvnodes); 368 cache_changesize(desiredvnodes); 369 return (0); 370 } 371 372 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 373 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 374 "LU", "Target for maximum number of vnodes (legacy)"); 375 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, limit, 376 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 377 "LU", "Target for maximum number of vnodes"); 378 379 static int 380 sysctl_freevnodes(SYSCTL_HANDLER_ARGS) 381 { 382 u_long rfreevnodes; 383 384 rfreevnodes = vnlru_read_freevnodes(); 385 return (sysctl_handle_long(oidp, &rfreevnodes, 0, req)); 386 } 387 388 SYSCTL_PROC(_vfs, OID_AUTO, freevnodes, 389 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes, 390 "LU", "Number of \"free\" vnodes (legacy)"); 391 SYSCTL_PROC(_vfs_vnode_stats, OID_AUTO, free, 392 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes, 393 "LU", "Number of \"free\" vnodes"); 394 395 static int 396 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 397 { 398 u_long val; 399 int error; 400 401 val = wantfreevnodes; 402 error = sysctl_handle_long(oidp, &val, 0, req); 403 if (error != 0 || req->newptr == NULL) 404 return (error); 405 406 if (val == wantfreevnodes) 407 return (0); 408 mtx_lock(&vnode_list_mtx); 409 wantfreevnodes = val; 410 vnlru_recalc(); 411 mtx_unlock(&vnode_list_mtx); 412 return (0); 413 } 414 415 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 416 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 417 "LU", "Target for minimum number of \"free\" vnodes (legacy)"); 418 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, wantfree, 419 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 420 "LU", "Target for minimum number of \"free\" vnodes"); 421 422 static int vnlru_nowhere; 423 SYSCTL_INT(_vfs_vnode_vnlru, OID_AUTO, failed_runs, CTLFLAG_RD | CTLFLAG_STATS, 424 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 425 426 static int 427 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 428 { 429 struct vnode *vp; 430 struct nameidata nd; 431 char *buf; 432 unsigned long ndflags; 433 int error; 434 435 if (req->newptr == NULL) 436 return (EINVAL); 437 if (req->newlen >= PATH_MAX) 438 return (E2BIG); 439 440 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 441 error = SYSCTL_IN(req, buf, req->newlen); 442 if (error != 0) 443 goto out; 444 445 buf[req->newlen] = '\0'; 446 447 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1; 448 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf); 449 if ((error = namei(&nd)) != 0) 450 goto out; 451 vp = nd.ni_vp; 452 453 if (VN_IS_DOOMED(vp)) { 454 /* 455 * This vnode is being recycled. Return != 0 to let the caller 456 * know that the sysctl had no effect. Return EAGAIN because a 457 * subsequent call will likely succeed (since namei will create 458 * a new vnode if necessary) 459 */ 460 error = EAGAIN; 461 goto putvnode; 462 } 463 464 vgone(vp); 465 putvnode: 466 vput(vp); 467 NDFREE_PNBUF(&nd); 468 out: 469 free(buf, M_TEMP); 470 return (error); 471 } 472 473 static int 474 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 475 { 476 struct thread *td = curthread; 477 struct vnode *vp; 478 struct file *fp; 479 int error; 480 int fd; 481 482 if (req->newptr == NULL) 483 return (EBADF); 484 485 error = sysctl_handle_int(oidp, &fd, 0, req); 486 if (error != 0) 487 return (error); 488 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 489 if (error != 0) 490 return (error); 491 vp = fp->f_vnode; 492 493 error = vn_lock(vp, LK_EXCLUSIVE); 494 if (error != 0) 495 goto drop; 496 497 vgone(vp); 498 VOP_UNLOCK(vp); 499 drop: 500 fdrop(fp, td); 501 return (error); 502 } 503 504 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 505 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 506 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 507 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 508 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 509 sysctl_ftry_reclaim_vnode, "I", 510 "Try to reclaim a vnode by its file descriptor"); 511 512 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 513 #define vnsz2log 8 514 #ifndef DEBUG_LOCKS 515 _Static_assert(sizeof(struct vnode) >= 1UL << vnsz2log && 516 sizeof(struct vnode) < 1UL << (vnsz2log + 1), 517 "vnsz2log needs to be updated"); 518 #endif 519 520 /* 521 * Support for the bufobj clean & dirty pctrie. 522 */ 523 static void * 524 buf_trie_alloc(struct pctrie *ptree) 525 { 526 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 527 } 528 529 static void 530 buf_trie_free(struct pctrie *ptree, void *node) 531 { 532 uma_zfree_smr(buf_trie_zone, node); 533 } 534 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 535 buf_trie_smr); 536 537 /* 538 * Initialize the vnode management data structures. 539 * 540 * Reevaluate the following cap on the number of vnodes after the physical 541 * memory size exceeds 512GB. In the limit, as the physical memory size 542 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 543 */ 544 #ifndef MAXVNODES_MAX 545 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 546 #endif 547 548 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 549 550 static struct vnode * 551 vn_alloc_marker(struct mount *mp) 552 { 553 struct vnode *vp; 554 555 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 556 vp->v_type = VMARKER; 557 vp->v_mount = mp; 558 559 return (vp); 560 } 561 562 static void 563 vn_free_marker(struct vnode *vp) 564 { 565 566 MPASS(vp->v_type == VMARKER); 567 free(vp, M_VNODE_MARKER); 568 } 569 570 #ifdef KASAN 571 static int 572 vnode_ctor(void *mem, int size, void *arg __unused, int flags __unused) 573 { 574 kasan_mark(mem, size, roundup2(size, UMA_ALIGN_PTR + 1), 0); 575 return (0); 576 } 577 578 static void 579 vnode_dtor(void *mem, int size, void *arg __unused) 580 { 581 size_t end1, end2, off1, off2; 582 583 _Static_assert(offsetof(struct vnode, v_vnodelist) < 584 offsetof(struct vnode, v_dbatchcpu), 585 "KASAN marks require updating"); 586 587 off1 = offsetof(struct vnode, v_vnodelist); 588 off2 = offsetof(struct vnode, v_dbatchcpu); 589 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist); 590 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu); 591 592 /* 593 * Access to the v_vnodelist and v_dbatchcpu fields are permitted even 594 * after the vnode has been freed. Try to get some KASAN coverage by 595 * marking everything except those two fields as invalid. Because 596 * KASAN's tracking is not byte-granular, any preceding fields sharing 597 * the same 8-byte aligned word must also be marked valid. 598 */ 599 600 /* Handle the area from the start until v_vnodelist... */ 601 off1 = rounddown2(off1, KASAN_SHADOW_SCALE); 602 kasan_mark(mem, off1, off1, KASAN_UMA_FREED); 603 604 /* ... then the area between v_vnodelist and v_dbatchcpu ... */ 605 off1 = roundup2(end1, KASAN_SHADOW_SCALE); 606 off2 = rounddown2(off2, KASAN_SHADOW_SCALE); 607 if (off2 > off1) 608 kasan_mark((void *)((char *)mem + off1), off2 - off1, 609 off2 - off1, KASAN_UMA_FREED); 610 611 /* ... and finally the area from v_dbatchcpu to the end. */ 612 off2 = roundup2(end2, KASAN_SHADOW_SCALE); 613 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2, 614 KASAN_UMA_FREED); 615 } 616 #endif /* KASAN */ 617 618 /* 619 * Initialize a vnode as it first enters the zone. 620 */ 621 static int 622 vnode_init(void *mem, int size, int flags) 623 { 624 struct vnode *vp; 625 626 vp = mem; 627 bzero(vp, size); 628 /* 629 * Setup locks. 630 */ 631 vp->v_vnlock = &vp->v_lock; 632 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 633 /* 634 * By default, don't allow shared locks unless filesystems opt-in. 635 */ 636 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 637 LK_NOSHARE | LK_IS_VNODE); 638 /* 639 * Initialize bufobj. 640 */ 641 bufobj_init(&vp->v_bufobj, vp); 642 /* 643 * Initialize namecache. 644 */ 645 cache_vnode_init(vp); 646 /* 647 * Initialize rangelocks. 648 */ 649 rangelock_init(&vp->v_rl); 650 651 vp->v_dbatchcpu = NOCPU; 652 653 vp->v_state = VSTATE_DEAD; 654 655 /* 656 * Check vhold_recycle_free for an explanation. 657 */ 658 vp->v_holdcnt = VHOLD_NO_SMR; 659 vp->v_type = VNON; 660 mtx_lock(&vnode_list_mtx); 661 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 662 mtx_unlock(&vnode_list_mtx); 663 return (0); 664 } 665 666 /* 667 * Free a vnode when it is cleared from the zone. 668 */ 669 static void 670 vnode_fini(void *mem, int size) 671 { 672 struct vnode *vp; 673 struct bufobj *bo; 674 675 vp = mem; 676 vdbatch_dequeue(vp); 677 mtx_lock(&vnode_list_mtx); 678 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 679 mtx_unlock(&vnode_list_mtx); 680 rangelock_destroy(&vp->v_rl); 681 lockdestroy(vp->v_vnlock); 682 mtx_destroy(&vp->v_interlock); 683 bo = &vp->v_bufobj; 684 rw_destroy(BO_LOCKPTR(bo)); 685 686 kasan_mark(mem, size, size, 0); 687 } 688 689 /* 690 * Provide the size of NFS nclnode and NFS fh for calculation of the 691 * vnode memory consumption. The size is specified directly to 692 * eliminate dependency on NFS-private header. 693 * 694 * Other filesystems may use bigger or smaller (like UFS and ZFS) 695 * private inode data, but the NFS-based estimation is ample enough. 696 * Still, we care about differences in the size between 64- and 32-bit 697 * platforms. 698 * 699 * Namecache structure size is heuristically 700 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 701 */ 702 #ifdef _LP64 703 #define NFS_NCLNODE_SZ (528 + 64) 704 #define NC_SZ 148 705 #else 706 #define NFS_NCLNODE_SZ (360 + 32) 707 #define NC_SZ 92 708 #endif 709 710 static void 711 vntblinit(void *dummy __unused) 712 { 713 struct vdbatch *vd; 714 uma_ctor ctor; 715 uma_dtor dtor; 716 int cpu, physvnodes, virtvnodes; 717 718 /* 719 * Desiredvnodes is a function of the physical memory size and the 720 * kernel's heap size. Generally speaking, it scales with the 721 * physical memory size. The ratio of desiredvnodes to the physical 722 * memory size is 1:16 until desiredvnodes exceeds 98,304. 723 * Thereafter, the 724 * marginal ratio of desiredvnodes to the physical memory size is 725 * 1:64. However, desiredvnodes is limited by the kernel's heap 726 * size. The memory required by desiredvnodes vnodes and vm objects 727 * must not exceed 1/10th of the kernel's heap size. 728 */ 729 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 730 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 731 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 732 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 733 desiredvnodes = min(physvnodes, virtvnodes); 734 if (desiredvnodes > MAXVNODES_MAX) { 735 if (bootverbose) 736 printf("Reducing kern.maxvnodes %lu -> %lu\n", 737 desiredvnodes, MAXVNODES_MAX); 738 desiredvnodes = MAXVNODES_MAX; 739 } 740 wantfreevnodes = desiredvnodes / 4; 741 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 742 TAILQ_INIT(&vnode_list); 743 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 744 /* 745 * The lock is taken to appease WITNESS. 746 */ 747 mtx_lock(&vnode_list_mtx); 748 vnlru_recalc(); 749 mtx_unlock(&vnode_list_mtx); 750 vnode_list_free_marker = vn_alloc_marker(NULL); 751 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 752 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 753 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 754 755 #ifdef KASAN 756 ctor = vnode_ctor; 757 dtor = vnode_dtor; 758 #else 759 ctor = NULL; 760 dtor = NULL; 761 #endif 762 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor, 763 vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_NOKASAN); 764 uma_zone_set_smr(vnode_zone, vfs_smr); 765 766 /* 767 * Preallocate enough nodes to support one-per buf so that 768 * we can not fail an insert. reassignbuf() callers can not 769 * tolerate the insertion failure. 770 */ 771 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 772 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 773 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 774 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 775 uma_prealloc(buf_trie_zone, nbuf); 776 777 vnodes_created = counter_u64_alloc(M_WAITOK); 778 direct_recycles_free_count = counter_u64_alloc(M_WAITOK); 779 vnode_skipped_requeues = counter_u64_alloc(M_WAITOK); 780 781 /* 782 * Initialize the filesystem syncer. 783 */ 784 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 785 &syncer_mask); 786 syncer_maxdelay = syncer_mask + 1; 787 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 788 cv_init(&sync_wakeup, "syncer"); 789 790 CPU_FOREACH(cpu) { 791 vd = DPCPU_ID_PTR((cpu), vd); 792 bzero(vd, sizeof(*vd)); 793 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 794 } 795 } 796 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 797 798 /* 799 * Mark a mount point as busy. Used to synchronize access and to delay 800 * unmounting. Eventually, mountlist_mtx is not released on failure. 801 * 802 * vfs_busy() is a custom lock, it can block the caller. 803 * vfs_busy() only sleeps if the unmount is active on the mount point. 804 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 805 * vnode belonging to mp. 806 * 807 * Lookup uses vfs_busy() to traverse mount points. 808 * root fs var fs 809 * / vnode lock A / vnode lock (/var) D 810 * /var vnode lock B /log vnode lock(/var/log) E 811 * vfs_busy lock C vfs_busy lock F 812 * 813 * Within each file system, the lock order is C->A->B and F->D->E. 814 * 815 * When traversing across mounts, the system follows that lock order: 816 * 817 * C->A->B 818 * | 819 * +->F->D->E 820 * 821 * The lookup() process for namei("/var") illustrates the process: 822 * 1. VOP_LOOKUP() obtains B while A is held 823 * 2. vfs_busy() obtains a shared lock on F while A and B are held 824 * 3. vput() releases lock on B 825 * 4. vput() releases lock on A 826 * 5. VFS_ROOT() obtains lock on D while shared lock on F is held 827 * 6. vfs_unbusy() releases shared lock on F 828 * 7. vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 829 * Attempt to lock A (instead of vp_crossmp) while D is held would 830 * violate the global order, causing deadlocks. 831 * 832 * dounmount() locks B while F is drained. Note that for stacked 833 * filesystems, D and B in the example above may be the same lock, 834 * which introdues potential lock order reversal deadlock between 835 * dounmount() and step 5 above. These filesystems may avoid the LOR 836 * by setting VV_CROSSLOCK on the covered vnode so that lock B will 837 * remain held until after step 5. 838 */ 839 int 840 vfs_busy(struct mount *mp, int flags) 841 { 842 struct mount_pcpu *mpcpu; 843 844 MPASS((flags & ~MBF_MASK) == 0); 845 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 846 847 if (vfs_op_thread_enter(mp, mpcpu)) { 848 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 849 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 850 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 851 vfs_mp_count_add_pcpu(mpcpu, ref, 1); 852 vfs_mp_count_add_pcpu(mpcpu, lockref, 1); 853 vfs_op_thread_exit(mp, mpcpu); 854 if (flags & MBF_MNTLSTLOCK) 855 mtx_unlock(&mountlist_mtx); 856 return (0); 857 } 858 859 MNT_ILOCK(mp); 860 vfs_assert_mount_counters(mp); 861 MNT_REF(mp); 862 /* 863 * If mount point is currently being unmounted, sleep until the 864 * mount point fate is decided. If thread doing the unmounting fails, 865 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 866 * that this mount point has survived the unmount attempt and vfs_busy 867 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 868 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 869 * about to be really destroyed. vfs_busy needs to release its 870 * reference on the mount point in this case and return with ENOENT, 871 * telling the caller the mount it tried to busy is no longer valid. 872 */ 873 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 874 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), 875 ("%s: non-empty upper mount list with pending unmount", 876 __func__)); 877 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 878 MNT_REL(mp); 879 MNT_IUNLOCK(mp); 880 CTR1(KTR_VFS, "%s: failed busying before sleeping", 881 __func__); 882 return (ENOENT); 883 } 884 if (flags & MBF_MNTLSTLOCK) 885 mtx_unlock(&mountlist_mtx); 886 mp->mnt_kern_flag |= MNTK_MWAIT; 887 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 888 if (flags & MBF_MNTLSTLOCK) 889 mtx_lock(&mountlist_mtx); 890 MNT_ILOCK(mp); 891 } 892 if (flags & MBF_MNTLSTLOCK) 893 mtx_unlock(&mountlist_mtx); 894 mp->mnt_lockref++; 895 MNT_IUNLOCK(mp); 896 return (0); 897 } 898 899 /* 900 * Free a busy filesystem. 901 */ 902 void 903 vfs_unbusy(struct mount *mp) 904 { 905 struct mount_pcpu *mpcpu; 906 int c; 907 908 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 909 910 if (vfs_op_thread_enter(mp, mpcpu)) { 911 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 912 vfs_mp_count_sub_pcpu(mpcpu, lockref, 1); 913 vfs_mp_count_sub_pcpu(mpcpu, ref, 1); 914 vfs_op_thread_exit(mp, mpcpu); 915 return; 916 } 917 918 MNT_ILOCK(mp); 919 vfs_assert_mount_counters(mp); 920 MNT_REL(mp); 921 c = --mp->mnt_lockref; 922 if (mp->mnt_vfs_ops == 0) { 923 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 924 MNT_IUNLOCK(mp); 925 return; 926 } 927 if (c < 0) 928 vfs_dump_mount_counters(mp); 929 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 930 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 931 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 932 mp->mnt_kern_flag &= ~MNTK_DRAINING; 933 wakeup(&mp->mnt_lockref); 934 } 935 MNT_IUNLOCK(mp); 936 } 937 938 /* 939 * Lookup a mount point by filesystem identifier. 940 */ 941 struct mount * 942 vfs_getvfs(fsid_t *fsid) 943 { 944 struct mount *mp; 945 946 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 947 mtx_lock(&mountlist_mtx); 948 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 949 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 950 vfs_ref(mp); 951 mtx_unlock(&mountlist_mtx); 952 return (mp); 953 } 954 } 955 mtx_unlock(&mountlist_mtx); 956 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 957 return ((struct mount *) 0); 958 } 959 960 /* 961 * Lookup a mount point by filesystem identifier, busying it before 962 * returning. 963 * 964 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 965 * cache for popular filesystem identifiers. The cache is lockess, using 966 * the fact that struct mount's are never freed. In worst case we may 967 * get pointer to unmounted or even different filesystem, so we have to 968 * check what we got, and go slow way if so. 969 */ 970 struct mount * 971 vfs_busyfs(fsid_t *fsid) 972 { 973 #define FSID_CACHE_SIZE 256 974 typedef struct mount * volatile vmp_t; 975 static vmp_t cache[FSID_CACHE_SIZE]; 976 struct mount *mp; 977 int error; 978 uint32_t hash; 979 980 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 981 hash = fsid->val[0] ^ fsid->val[1]; 982 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 983 mp = cache[hash]; 984 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 985 goto slow; 986 if (vfs_busy(mp, 0) != 0) { 987 cache[hash] = NULL; 988 goto slow; 989 } 990 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 991 return (mp); 992 else 993 vfs_unbusy(mp); 994 995 slow: 996 mtx_lock(&mountlist_mtx); 997 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 998 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 999 error = vfs_busy(mp, MBF_MNTLSTLOCK); 1000 if (error) { 1001 cache[hash] = NULL; 1002 mtx_unlock(&mountlist_mtx); 1003 return (NULL); 1004 } 1005 cache[hash] = mp; 1006 return (mp); 1007 } 1008 } 1009 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 1010 mtx_unlock(&mountlist_mtx); 1011 return ((struct mount *) 0); 1012 } 1013 1014 /* 1015 * Check if a user can access privileged mount options. 1016 */ 1017 int 1018 vfs_suser(struct mount *mp, struct thread *td) 1019 { 1020 int error; 1021 1022 if (jailed(td->td_ucred)) { 1023 /* 1024 * If the jail of the calling thread lacks permission for 1025 * this type of file system, deny immediately. 1026 */ 1027 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 1028 return (EPERM); 1029 1030 /* 1031 * If the file system was mounted outside the jail of the 1032 * calling thread, deny immediately. 1033 */ 1034 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 1035 return (EPERM); 1036 } 1037 1038 /* 1039 * If file system supports delegated administration, we don't check 1040 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 1041 * by the file system itself. 1042 * If this is not the user that did original mount, we check for 1043 * the PRIV_VFS_MOUNT_OWNER privilege. 1044 */ 1045 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 1046 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 1047 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 1048 return (error); 1049 } 1050 return (0); 1051 } 1052 1053 /* 1054 * Get a new unique fsid. Try to make its val[0] unique, since this value 1055 * will be used to create fake device numbers for stat(). Also try (but 1056 * not so hard) make its val[0] unique mod 2^16, since some emulators only 1057 * support 16-bit device numbers. We end up with unique val[0]'s for the 1058 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 1059 * 1060 * Keep in mind that several mounts may be running in parallel. Starting 1061 * the search one past where the previous search terminated is both a 1062 * micro-optimization and a defense against returning the same fsid to 1063 * different mounts. 1064 */ 1065 void 1066 vfs_getnewfsid(struct mount *mp) 1067 { 1068 static uint16_t mntid_base; 1069 struct mount *nmp; 1070 fsid_t tfsid; 1071 int mtype; 1072 1073 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 1074 mtx_lock(&mntid_mtx); 1075 mtype = mp->mnt_vfc->vfc_typenum; 1076 tfsid.val[1] = mtype; 1077 mtype = (mtype & 0xFF) << 24; 1078 for (;;) { 1079 tfsid.val[0] = makedev(255, 1080 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 1081 mntid_base++; 1082 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 1083 break; 1084 vfs_rel(nmp); 1085 } 1086 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 1087 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 1088 mtx_unlock(&mntid_mtx); 1089 } 1090 1091 /* 1092 * Knob to control the precision of file timestamps: 1093 * 1094 * 0 = seconds only; nanoseconds zeroed. 1095 * 1 = seconds and nanoseconds, accurate within 1/HZ. 1096 * 2 = seconds and nanoseconds, truncated to microseconds. 1097 * >=3 = seconds and nanoseconds, maximum precision. 1098 */ 1099 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 1100 1101 static int timestamp_precision = TSP_USEC; 1102 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 1103 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 1104 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 1105 "3+: sec + ns (max. precision))"); 1106 1107 /* 1108 * Get a current timestamp. 1109 */ 1110 void 1111 vfs_timestamp(struct timespec *tsp) 1112 { 1113 struct timeval tv; 1114 1115 switch (timestamp_precision) { 1116 case TSP_SEC: 1117 tsp->tv_sec = time_second; 1118 tsp->tv_nsec = 0; 1119 break; 1120 case TSP_HZ: 1121 getnanotime(tsp); 1122 break; 1123 case TSP_USEC: 1124 microtime(&tv); 1125 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1126 break; 1127 case TSP_NSEC: 1128 default: 1129 nanotime(tsp); 1130 break; 1131 } 1132 } 1133 1134 /* 1135 * Set vnode attributes to VNOVAL 1136 */ 1137 void 1138 vattr_null(struct vattr *vap) 1139 { 1140 1141 vap->va_type = VNON; 1142 vap->va_size = VNOVAL; 1143 vap->va_bytes = VNOVAL; 1144 vap->va_mode = VNOVAL; 1145 vap->va_nlink = VNOVAL; 1146 vap->va_uid = VNOVAL; 1147 vap->va_gid = VNOVAL; 1148 vap->va_fsid = VNOVAL; 1149 vap->va_fileid = VNOVAL; 1150 vap->va_blocksize = VNOVAL; 1151 vap->va_rdev = VNOVAL; 1152 vap->va_atime.tv_sec = VNOVAL; 1153 vap->va_atime.tv_nsec = VNOVAL; 1154 vap->va_mtime.tv_sec = VNOVAL; 1155 vap->va_mtime.tv_nsec = VNOVAL; 1156 vap->va_ctime.tv_sec = VNOVAL; 1157 vap->va_ctime.tv_nsec = VNOVAL; 1158 vap->va_birthtime.tv_sec = VNOVAL; 1159 vap->va_birthtime.tv_nsec = VNOVAL; 1160 vap->va_flags = VNOVAL; 1161 vap->va_gen = VNOVAL; 1162 vap->va_vaflags = 0; 1163 } 1164 1165 /* 1166 * Try to reduce the total number of vnodes. 1167 * 1168 * This routine (and its user) are buggy in at least the following ways: 1169 * - all parameters were picked years ago when RAM sizes were significantly 1170 * smaller 1171 * - it can pick vnodes based on pages used by the vm object, but filesystems 1172 * like ZFS don't use it making the pick broken 1173 * - since ZFS has its own aging policy it gets partially combated by this one 1174 * - a dedicated method should be provided for filesystems to let them decide 1175 * whether the vnode should be recycled 1176 * 1177 * This routine is called when we have too many vnodes. It attempts 1178 * to free <count> vnodes and will potentially free vnodes that still 1179 * have VM backing store (VM backing store is typically the cause 1180 * of a vnode blowout so we want to do this). Therefore, this operation 1181 * is not considered cheap. 1182 * 1183 * A number of conditions may prevent a vnode from being reclaimed. 1184 * the buffer cache may have references on the vnode, a directory 1185 * vnode may still have references due to the namei cache representing 1186 * underlying files, or the vnode may be in active use. It is not 1187 * desirable to reuse such vnodes. These conditions may cause the 1188 * number of vnodes to reach some minimum value regardless of what 1189 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1190 * 1191 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1192 * entries if this argument is strue 1193 * @param trigger Only reclaim vnodes with fewer than this many resident 1194 * pages. 1195 * @param target How many vnodes to reclaim. 1196 * @return The number of vnodes that were reclaimed. 1197 */ 1198 static int 1199 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1200 { 1201 struct vnode *vp, *mvp; 1202 struct mount *mp; 1203 struct vm_object *object; 1204 u_long done; 1205 bool retried; 1206 1207 mtx_assert(&vnode_list_mtx, MA_OWNED); 1208 1209 retried = false; 1210 done = 0; 1211 1212 mvp = vnode_list_reclaim_marker; 1213 restart: 1214 vp = mvp; 1215 while (done < target) { 1216 vp = TAILQ_NEXT(vp, v_vnodelist); 1217 if (__predict_false(vp == NULL)) 1218 break; 1219 1220 if (__predict_false(vp->v_type == VMARKER)) 1221 continue; 1222 1223 /* 1224 * If it's been deconstructed already, it's still 1225 * referenced, or it exceeds the trigger, skip it. 1226 * Also skip free vnodes. We are trying to make space 1227 * to expand the free list, not reduce it. 1228 */ 1229 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1230 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1231 goto next_iter; 1232 1233 if (vp->v_type == VBAD || vp->v_type == VNON) 1234 goto next_iter; 1235 1236 object = atomic_load_ptr(&vp->v_object); 1237 if (object == NULL || object->resident_page_count > trigger) { 1238 goto next_iter; 1239 } 1240 1241 /* 1242 * Handle races against vnode allocation. Filesystems lock the 1243 * vnode some time after it gets returned from getnewvnode, 1244 * despite type and hold count being manipulated earlier. 1245 * Resorting to checking v_mount restores guarantees present 1246 * before the global list was reworked to contain all vnodes. 1247 */ 1248 if (!VI_TRYLOCK(vp)) 1249 goto next_iter; 1250 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1251 VI_UNLOCK(vp); 1252 goto next_iter; 1253 } 1254 if (vp->v_mount == NULL) { 1255 VI_UNLOCK(vp); 1256 goto next_iter; 1257 } 1258 vholdl(vp); 1259 VI_UNLOCK(vp); 1260 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1261 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1262 mtx_unlock(&vnode_list_mtx); 1263 1264 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1265 vdrop_recycle(vp); 1266 goto next_iter_unlocked; 1267 } 1268 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1269 vdrop_recycle(vp); 1270 vn_finished_write(mp); 1271 goto next_iter_unlocked; 1272 } 1273 1274 VI_LOCK(vp); 1275 if (vp->v_usecount > 0 || 1276 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1277 (vp->v_object != NULL && vp->v_object->handle == vp && 1278 vp->v_object->resident_page_count > trigger)) { 1279 VOP_UNLOCK(vp); 1280 vdropl_recycle(vp); 1281 vn_finished_write(mp); 1282 goto next_iter_unlocked; 1283 } 1284 recycles_count++; 1285 vgonel(vp); 1286 VOP_UNLOCK(vp); 1287 vdropl_recycle(vp); 1288 vn_finished_write(mp); 1289 done++; 1290 next_iter_unlocked: 1291 maybe_yield(); 1292 mtx_lock(&vnode_list_mtx); 1293 goto restart; 1294 next_iter: 1295 MPASS(vp->v_type != VMARKER); 1296 if (!should_yield()) 1297 continue; 1298 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1299 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1300 mtx_unlock(&vnode_list_mtx); 1301 kern_yield(PRI_USER); 1302 mtx_lock(&vnode_list_mtx); 1303 goto restart; 1304 } 1305 if (done == 0 && !retried) { 1306 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1307 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1308 retried = true; 1309 goto restart; 1310 } 1311 return (done); 1312 } 1313 1314 static int max_free_per_call = 10000; 1315 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_free_per_call, 0, 1316 "limit on vnode free requests per call to the vnlru_free routine (legacy)"); 1317 SYSCTL_INT(_vfs_vnode_vnlru, OID_AUTO, max_free_per_call, CTLFLAG_RW, 1318 &max_free_per_call, 0, 1319 "limit on vnode free requests per call to the vnlru_free routine"); 1320 1321 /* 1322 * Attempt to reduce the free list by the requested amount. 1323 */ 1324 static int 1325 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp, bool isvnlru) 1326 { 1327 struct vnode *vp; 1328 struct mount *mp; 1329 int ocount; 1330 bool retried; 1331 1332 mtx_assert(&vnode_list_mtx, MA_OWNED); 1333 if (count > max_free_per_call) 1334 count = max_free_per_call; 1335 if (count == 0) { 1336 mtx_unlock(&vnode_list_mtx); 1337 return (0); 1338 } 1339 ocount = count; 1340 retried = false; 1341 vp = mvp; 1342 for (;;) { 1343 vp = TAILQ_NEXT(vp, v_vnodelist); 1344 if (__predict_false(vp == NULL)) { 1345 /* 1346 * The free vnode marker can be past eligible vnodes: 1347 * 1. if vdbatch_process trylock failed 1348 * 2. if vtryrecycle failed 1349 * 1350 * If so, start the scan from scratch. 1351 */ 1352 if (!retried && vnlru_read_freevnodes() > 0) { 1353 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1354 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1355 vp = mvp; 1356 retried = true; 1357 continue; 1358 } 1359 1360 /* 1361 * Give up 1362 */ 1363 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1364 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1365 mtx_unlock(&vnode_list_mtx); 1366 break; 1367 } 1368 if (__predict_false(vp->v_type == VMARKER)) 1369 continue; 1370 if (vp->v_holdcnt > 0) 1371 continue; 1372 /* 1373 * Don't recycle if our vnode is from different type 1374 * of mount point. Note that mp is type-safe, the 1375 * check does not reach unmapped address even if 1376 * vnode is reclaimed. 1377 */ 1378 if (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1379 mp->mnt_op != mnt_op) { 1380 continue; 1381 } 1382 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1383 continue; 1384 } 1385 if (!vhold_recycle_free(vp)) 1386 continue; 1387 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1388 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1389 mtx_unlock(&vnode_list_mtx); 1390 /* 1391 * FIXME: ignores the return value, meaning it may be nothing 1392 * got recycled but it claims otherwise to the caller. 1393 * 1394 * Originally the value started being ignored in 2005 with 1395 * 114a1006a8204aa156e1f9ad6476cdff89cada7f . 1396 * 1397 * Respecting the value can run into significant stalls if most 1398 * vnodes belong to one file system and it has writes 1399 * suspended. In presence of many threads and millions of 1400 * vnodes they keep contending on the vnode_list_mtx lock only 1401 * to find vnodes they can't recycle. 1402 * 1403 * The solution would be to pre-check if the vnode is likely to 1404 * be recycle-able, but it needs to happen with the 1405 * vnode_list_mtx lock held. This runs into a problem where 1406 * VOP_GETWRITEMOUNT (currently needed to find out about if 1407 * writes are frozen) can take locks which LOR against it. 1408 * 1409 * Check nullfs for one example (null_getwritemount). 1410 */ 1411 vtryrecycle(vp, isvnlru); 1412 count--; 1413 if (count == 0) { 1414 break; 1415 } 1416 mtx_lock(&vnode_list_mtx); 1417 vp = mvp; 1418 } 1419 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1420 return (ocount - count); 1421 } 1422 1423 /* 1424 * XXX: returns without vnode_list_mtx locked! 1425 */ 1426 static int 1427 vnlru_free_locked_direct(int count) 1428 { 1429 int ret; 1430 1431 mtx_assert(&vnode_list_mtx, MA_OWNED); 1432 ret = vnlru_free_impl(count, NULL, vnode_list_free_marker, false); 1433 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1434 return (ret); 1435 } 1436 1437 static int 1438 vnlru_free_locked_vnlru(int count) 1439 { 1440 int ret; 1441 1442 mtx_assert(&vnode_list_mtx, MA_OWNED); 1443 ret = vnlru_free_impl(count, NULL, vnode_list_free_marker, true); 1444 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1445 return (ret); 1446 } 1447 1448 static int 1449 vnlru_free_vnlru(int count) 1450 { 1451 1452 mtx_lock(&vnode_list_mtx); 1453 return (vnlru_free_locked_vnlru(count)); 1454 } 1455 1456 void 1457 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp) 1458 { 1459 1460 MPASS(mnt_op != NULL); 1461 MPASS(mvp != NULL); 1462 VNPASS(mvp->v_type == VMARKER, mvp); 1463 mtx_lock(&vnode_list_mtx); 1464 vnlru_free_impl(count, mnt_op, mvp, true); 1465 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1466 } 1467 1468 struct vnode * 1469 vnlru_alloc_marker(void) 1470 { 1471 struct vnode *mvp; 1472 1473 mvp = vn_alloc_marker(NULL); 1474 mtx_lock(&vnode_list_mtx); 1475 TAILQ_INSERT_BEFORE(vnode_list_free_marker, mvp, v_vnodelist); 1476 mtx_unlock(&vnode_list_mtx); 1477 return (mvp); 1478 } 1479 1480 void 1481 vnlru_free_marker(struct vnode *mvp) 1482 { 1483 mtx_lock(&vnode_list_mtx); 1484 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1485 mtx_unlock(&vnode_list_mtx); 1486 vn_free_marker(mvp); 1487 } 1488 1489 static void 1490 vnlru_recalc(void) 1491 { 1492 1493 mtx_assert(&vnode_list_mtx, MA_OWNED); 1494 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1495 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1496 vlowat = vhiwat / 2; 1497 } 1498 1499 /* 1500 * Attempt to recycle vnodes in a context that is always safe to block. 1501 * Calling vlrurecycle() from the bowels of filesystem code has some 1502 * interesting deadlock problems. 1503 */ 1504 static struct proc *vnlruproc; 1505 static int vnlruproc_sig; 1506 static u_long vnlruproc_kicks; 1507 1508 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, kicks, CTLFLAG_RD, &vnlruproc_kicks, 0, 1509 "Number of times vnlru got woken up due to vnode shortage"); 1510 1511 #define VNLRU_COUNT_SLOP 100 1512 1513 /* 1514 * The main freevnodes counter is only updated when a counter local to CPU 1515 * diverges from 0 by more than VNLRU_FREEVNODES_SLOP. CPUs are conditionally 1516 * walked to compute a more accurate total. 1517 * 1518 * Note: the actual value at any given moment can still exceed slop, but it 1519 * should not be by significant margin in practice. 1520 */ 1521 #define VNLRU_FREEVNODES_SLOP 126 1522 1523 static void __noinline 1524 vfs_freevnodes_rollup(int8_t *lfreevnodes) 1525 { 1526 1527 atomic_add_long(&freevnodes, *lfreevnodes); 1528 *lfreevnodes = 0; 1529 critical_exit(); 1530 } 1531 1532 static __inline void 1533 vfs_freevnodes_inc(void) 1534 { 1535 int8_t *lfreevnodes; 1536 1537 critical_enter(); 1538 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1539 (*lfreevnodes)++; 1540 if (__predict_false(*lfreevnodes == VNLRU_FREEVNODES_SLOP)) 1541 vfs_freevnodes_rollup(lfreevnodes); 1542 else 1543 critical_exit(); 1544 } 1545 1546 static __inline void 1547 vfs_freevnodes_dec(void) 1548 { 1549 int8_t *lfreevnodes; 1550 1551 critical_enter(); 1552 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1553 (*lfreevnodes)--; 1554 if (__predict_false(*lfreevnodes == -VNLRU_FREEVNODES_SLOP)) 1555 vfs_freevnodes_rollup(lfreevnodes); 1556 else 1557 critical_exit(); 1558 } 1559 1560 static u_long 1561 vnlru_read_freevnodes(void) 1562 { 1563 long slop, rfreevnodes, rfreevnodes_old; 1564 int cpu; 1565 1566 rfreevnodes = atomic_load_long(&freevnodes); 1567 rfreevnodes_old = atomic_load_long(&freevnodes_old); 1568 1569 if (rfreevnodes > rfreevnodes_old) 1570 slop = rfreevnodes - rfreevnodes_old; 1571 else 1572 slop = rfreevnodes_old - rfreevnodes; 1573 if (slop < VNLRU_FREEVNODES_SLOP) 1574 return (rfreevnodes >= 0 ? rfreevnodes : 0); 1575 CPU_FOREACH(cpu) { 1576 rfreevnodes += cpuid_to_pcpu[cpu]->pc_vfs_freevnodes; 1577 } 1578 atomic_store_long(&freevnodes_old, rfreevnodes); 1579 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1580 } 1581 1582 static bool 1583 vnlru_under(u_long rnumvnodes, u_long limit) 1584 { 1585 u_long rfreevnodes, space; 1586 1587 if (__predict_false(rnumvnodes > desiredvnodes)) 1588 return (true); 1589 1590 space = desiredvnodes - rnumvnodes; 1591 if (space < limit) { 1592 rfreevnodes = vnlru_read_freevnodes(); 1593 if (rfreevnodes > wantfreevnodes) 1594 space += rfreevnodes - wantfreevnodes; 1595 } 1596 return (space < limit); 1597 } 1598 1599 static void 1600 vnlru_kick_locked(void) 1601 { 1602 1603 mtx_assert(&vnode_list_mtx, MA_OWNED); 1604 if (vnlruproc_sig == 0) { 1605 vnlruproc_sig = 1; 1606 vnlruproc_kicks++; 1607 wakeup(vnlruproc); 1608 } 1609 } 1610 1611 static void 1612 vnlru_kick_cond(void) 1613 { 1614 1615 if (vnlru_read_freevnodes() > wantfreevnodes) 1616 return; 1617 1618 if (vnlruproc_sig) 1619 return; 1620 mtx_lock(&vnode_list_mtx); 1621 vnlru_kick_locked(); 1622 mtx_unlock(&vnode_list_mtx); 1623 } 1624 1625 static void 1626 vnlru_proc_sleep(void) 1627 { 1628 1629 if (vnlruproc_sig) { 1630 vnlruproc_sig = 0; 1631 wakeup(&vnlruproc_sig); 1632 } 1633 msleep(vnlruproc, &vnode_list_mtx, PVFS|PDROP, "vlruwt", hz); 1634 } 1635 1636 /* 1637 * A lighter version of the machinery below. 1638 * 1639 * Tries to reach goals only by recycling free vnodes and does not invoke 1640 * uma_reclaim(UMA_RECLAIM_DRAIN). 1641 * 1642 * This works around pathological behavior in vnlru in presence of tons of free 1643 * vnodes, but without having to rewrite the machinery at this time. Said 1644 * behavior boils down to continuously trying to reclaim all kinds of vnodes 1645 * (cycling through all levels of "force") when the count is transiently above 1646 * limit. This happens a lot when all vnodes are used up and vn_alloc 1647 * speculatively increments the counter. 1648 * 1649 * Sample testcase: vnode limit 8388608, 20 separate directory trees each with 1650 * 1 million files in total and 20 find(1) processes stating them in parallel 1651 * (one per each tree). 1652 * 1653 * On a kernel with only stock machinery this needs anywhere between 60 and 120 1654 * seconds to execute (time varies *wildly* between runs). With the workaround 1655 * it consistently stays around 20 seconds [it got further down with later 1656 * changes]. 1657 * 1658 * That is to say the entire thing needs a fundamental redesign (most notably 1659 * to accommodate faster recycling), the above only tries to get it ouf the way. 1660 * 1661 * Return values are: 1662 * -1 -- fallback to regular vnlru loop 1663 * 0 -- do nothing, go to sleep 1664 * >0 -- recycle this many vnodes 1665 */ 1666 static long 1667 vnlru_proc_light_pick(void) 1668 { 1669 u_long rnumvnodes, rfreevnodes; 1670 1671 if (vstir || vnlruproc_sig == 1) 1672 return (-1); 1673 1674 rnumvnodes = atomic_load_long(&numvnodes); 1675 rfreevnodes = vnlru_read_freevnodes(); 1676 1677 /* 1678 * vnode limit might have changed and now we may be at a significant 1679 * excess. Bail if we can't sort it out with free vnodes. 1680 * 1681 * Due to atomic updates the count can legitimately go above 1682 * the limit for a short period, don't bother doing anything in 1683 * that case. 1684 */ 1685 if (rnumvnodes > desiredvnodes + VNLRU_COUNT_SLOP + 10) { 1686 if (rnumvnodes - rfreevnodes >= desiredvnodes || 1687 rfreevnodes <= wantfreevnodes) { 1688 return (-1); 1689 } 1690 1691 return (rnumvnodes - desiredvnodes); 1692 } 1693 1694 /* 1695 * Don't try to reach wantfreevnodes target if there are too few vnodes 1696 * to begin with. 1697 */ 1698 if (rnumvnodes < wantfreevnodes) { 1699 return (0); 1700 } 1701 1702 if (rfreevnodes < wantfreevnodes) { 1703 return (-1); 1704 } 1705 1706 return (0); 1707 } 1708 1709 static bool 1710 vnlru_proc_light(void) 1711 { 1712 long freecount; 1713 1714 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1715 1716 freecount = vnlru_proc_light_pick(); 1717 if (freecount == -1) 1718 return (false); 1719 1720 if (freecount != 0) { 1721 vnlru_free_vnlru(freecount); 1722 } 1723 1724 mtx_lock(&vnode_list_mtx); 1725 vnlru_proc_sleep(); 1726 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1727 return (true); 1728 } 1729 1730 static u_long uma_reclaim_calls; 1731 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, uma_reclaim_calls, CTLFLAG_RD | CTLFLAG_STATS, 1732 &uma_reclaim_calls, 0, "Number of calls to uma_reclaim"); 1733 1734 static void 1735 vnlru_proc(void) 1736 { 1737 u_long rnumvnodes, rfreevnodes, target; 1738 unsigned long onumvnodes; 1739 int done, force, trigger, usevnodes; 1740 bool reclaim_nc_src, want_reread; 1741 1742 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1743 SHUTDOWN_PRI_FIRST); 1744 1745 force = 0; 1746 want_reread = false; 1747 for (;;) { 1748 kproc_suspend_check(vnlruproc); 1749 1750 if (force == 0 && vnlru_proc_light()) 1751 continue; 1752 1753 mtx_lock(&vnode_list_mtx); 1754 rnumvnodes = atomic_load_long(&numvnodes); 1755 1756 if (want_reread) { 1757 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1758 want_reread = false; 1759 } 1760 1761 /* 1762 * If numvnodes is too large (due to desiredvnodes being 1763 * adjusted using its sysctl, or emergency growth), first 1764 * try to reduce it by discarding from the free list. 1765 */ 1766 if (rnumvnodes > desiredvnodes + 10) { 1767 vnlru_free_locked_vnlru(rnumvnodes - desiredvnodes); 1768 mtx_lock(&vnode_list_mtx); 1769 rnumvnodes = atomic_load_long(&numvnodes); 1770 } 1771 /* 1772 * Sleep if the vnode cache is in a good state. This is 1773 * when it is not over-full and has space for about a 4% 1774 * or 9% expansion (by growing its size or inexcessively 1775 * reducing its free list). Otherwise, try to reclaim 1776 * space for a 10% expansion. 1777 */ 1778 if (vstir && force == 0) { 1779 force = 1; 1780 vstir = false; 1781 } 1782 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1783 vnlru_proc_sleep(); 1784 continue; 1785 } 1786 rfreevnodes = vnlru_read_freevnodes(); 1787 1788 onumvnodes = rnumvnodes; 1789 /* 1790 * Calculate parameters for recycling. These are the same 1791 * throughout the loop to give some semblance of fairness. 1792 * The trigger point is to avoid recycling vnodes with lots 1793 * of resident pages. We aren't trying to free memory; we 1794 * are trying to recycle or at least free vnodes. 1795 */ 1796 if (rnumvnodes <= desiredvnodes) 1797 usevnodes = rnumvnodes - rfreevnodes; 1798 else 1799 usevnodes = rnumvnodes; 1800 if (usevnodes <= 0) 1801 usevnodes = 1; 1802 /* 1803 * The trigger value is chosen to give a conservatively 1804 * large value to ensure that it alone doesn't prevent 1805 * making progress. The value can easily be so large that 1806 * it is effectively infinite in some congested and 1807 * misconfigured cases, and this is necessary. Normally 1808 * it is about 8 to 100 (pages), which is quite large. 1809 */ 1810 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1811 if (force < 2) 1812 trigger = vsmalltrigger; 1813 reclaim_nc_src = force >= 3; 1814 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1815 target = target / 10 + 1; 1816 done = vlrureclaim(reclaim_nc_src, trigger, target); 1817 mtx_unlock(&vnode_list_mtx); 1818 /* 1819 * Total number of vnodes can transiently go slightly above the 1820 * limit (see vn_alloc_hard), no need to call uma_reclaim if 1821 * this happens. 1822 */ 1823 if (onumvnodes + VNLRU_COUNT_SLOP + 1000 > desiredvnodes && 1824 numvnodes <= desiredvnodes) { 1825 uma_reclaim_calls++; 1826 uma_reclaim(UMA_RECLAIM_DRAIN); 1827 } 1828 if (done == 0) { 1829 if (force == 0 || force == 1) { 1830 force = 2; 1831 continue; 1832 } 1833 if (force == 2) { 1834 force = 3; 1835 continue; 1836 } 1837 want_reread = true; 1838 force = 0; 1839 vnlru_nowhere++; 1840 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1841 } else { 1842 want_reread = true; 1843 kern_yield(PRI_USER); 1844 } 1845 } 1846 } 1847 1848 static struct kproc_desc vnlru_kp = { 1849 "vnlru", 1850 vnlru_proc, 1851 &vnlruproc 1852 }; 1853 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1854 &vnlru_kp); 1855 1856 /* 1857 * Routines having to do with the management of the vnode table. 1858 */ 1859 1860 /* 1861 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1862 * before we actually vgone(). This function must be called with the vnode 1863 * held to prevent the vnode from being returned to the free list midway 1864 * through vgone(). 1865 */ 1866 static int 1867 vtryrecycle(struct vnode *vp, bool isvnlru) 1868 { 1869 struct mount *vnmp; 1870 1871 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1872 VNPASS(vp->v_holdcnt > 0, vp); 1873 /* 1874 * This vnode may found and locked via some other list, if so we 1875 * can't recycle it yet. 1876 */ 1877 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1878 CTR2(KTR_VFS, 1879 "%s: impossible to recycle, vp %p lock is already held", 1880 __func__, vp); 1881 vdrop_recycle(vp); 1882 return (EWOULDBLOCK); 1883 } 1884 /* 1885 * Don't recycle if its filesystem is being suspended. 1886 */ 1887 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1888 VOP_UNLOCK(vp); 1889 CTR2(KTR_VFS, 1890 "%s: impossible to recycle, cannot start the write for %p", 1891 __func__, vp); 1892 vdrop_recycle(vp); 1893 return (EBUSY); 1894 } 1895 /* 1896 * If we got this far, we need to acquire the interlock and see if 1897 * anyone picked up this vnode from another list. If not, we will 1898 * mark it with DOOMED via vgonel() so that anyone who does find it 1899 * will skip over it. 1900 */ 1901 VI_LOCK(vp); 1902 if (vp->v_usecount) { 1903 VOP_UNLOCK(vp); 1904 vdropl_recycle(vp); 1905 vn_finished_write(vnmp); 1906 CTR2(KTR_VFS, 1907 "%s: impossible to recycle, %p is already referenced", 1908 __func__, vp); 1909 return (EBUSY); 1910 } 1911 if (!VN_IS_DOOMED(vp)) { 1912 if (isvnlru) 1913 recycles_free_count++; 1914 else 1915 counter_u64_add(direct_recycles_free_count, 1); 1916 vgonel(vp); 1917 } 1918 VOP_UNLOCK(vp); 1919 vdropl_recycle(vp); 1920 vn_finished_write(vnmp); 1921 return (0); 1922 } 1923 1924 /* 1925 * Allocate a new vnode. 1926 * 1927 * The operation never returns an error. Returning an error was disabled 1928 * in r145385 (dated 2005) with the following comment: 1929 * 1930 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1931 * 1932 * Given the age of this commit (almost 15 years at the time of writing this 1933 * comment) restoring the ability to fail requires a significant audit of 1934 * all codepaths. 1935 * 1936 * The routine can try to free a vnode or stall for up to 1 second waiting for 1937 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1938 */ 1939 static u_long vn_alloc_cyclecount; 1940 static u_long vn_alloc_sleeps; 1941 1942 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, alloc_sleeps, CTLFLAG_RD, &vn_alloc_sleeps, 0, 1943 "Number of times vnode allocation blocked waiting on vnlru"); 1944 1945 static struct vnode * __noinline 1946 vn_alloc_hard(struct mount *mp, u_long rnumvnodes, bool bumped) 1947 { 1948 u_long rfreevnodes; 1949 1950 if (bumped) { 1951 if (rnumvnodes > desiredvnodes + VNLRU_COUNT_SLOP) { 1952 atomic_subtract_long(&numvnodes, 1); 1953 bumped = false; 1954 } 1955 } 1956 1957 mtx_lock(&vnode_list_mtx); 1958 1959 if (vn_alloc_cyclecount != 0) { 1960 rnumvnodes = atomic_load_long(&numvnodes); 1961 if (rnumvnodes + 1 < desiredvnodes) { 1962 vn_alloc_cyclecount = 0; 1963 mtx_unlock(&vnode_list_mtx); 1964 goto alloc; 1965 } 1966 1967 rfreevnodes = vnlru_read_freevnodes(); 1968 if (rfreevnodes < wantfreevnodes) { 1969 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1970 vn_alloc_cyclecount = 0; 1971 vstir = true; 1972 } 1973 } else { 1974 vn_alloc_cyclecount = 0; 1975 } 1976 } 1977 1978 /* 1979 * Grow the vnode cache if it will not be above its target max 1980 * after growing. Otherwise, if the free list is nonempty, try 1981 * to reclaim 1 item from it before growing the cache (possibly 1982 * above its target max if the reclamation failed or is delayed). 1983 * Otherwise, wait for some space. In all cases, schedule 1984 * vnlru_proc() if we are getting short of space. The watermarks 1985 * should be chosen so that we never wait or even reclaim from 1986 * the free list to below its target minimum. 1987 */ 1988 if (vnlru_free_locked_direct(1) > 0) 1989 goto alloc; 1990 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1991 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1992 /* 1993 * Wait for space for a new vnode. 1994 */ 1995 if (bumped) { 1996 atomic_subtract_long(&numvnodes, 1); 1997 bumped = false; 1998 } 1999 mtx_lock(&vnode_list_mtx); 2000 vnlru_kick_locked(); 2001 vn_alloc_sleeps++; 2002 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 2003 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 2004 vnlru_read_freevnodes() > 1) 2005 vnlru_free_locked_direct(1); 2006 else 2007 mtx_unlock(&vnode_list_mtx); 2008 } 2009 alloc: 2010 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 2011 if (!bumped) 2012 atomic_add_long(&numvnodes, 1); 2013 vnlru_kick_cond(); 2014 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 2015 } 2016 2017 static struct vnode * 2018 vn_alloc(struct mount *mp) 2019 { 2020 u_long rnumvnodes; 2021 2022 if (__predict_false(vn_alloc_cyclecount != 0)) 2023 return (vn_alloc_hard(mp, 0, false)); 2024 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 2025 if (__predict_false(vnlru_under(rnumvnodes, vlowat))) { 2026 return (vn_alloc_hard(mp, rnumvnodes, true)); 2027 } 2028 2029 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 2030 } 2031 2032 static void 2033 vn_free(struct vnode *vp) 2034 { 2035 2036 atomic_subtract_long(&numvnodes, 1); 2037 uma_zfree_smr(vnode_zone, vp); 2038 } 2039 2040 /* 2041 * Return the next vnode from the free list. 2042 */ 2043 int 2044 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 2045 struct vnode **vpp) 2046 { 2047 struct vnode *vp; 2048 struct thread *td; 2049 struct lock_object *lo; 2050 2051 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 2052 2053 KASSERT(vops->registered, 2054 ("%s: not registered vector op %p\n", __func__, vops)); 2055 cache_validate_vop_vector(mp, vops); 2056 2057 td = curthread; 2058 if (td->td_vp_reserved != NULL) { 2059 vp = td->td_vp_reserved; 2060 td->td_vp_reserved = NULL; 2061 } else { 2062 vp = vn_alloc(mp); 2063 } 2064 counter_u64_add(vnodes_created, 1); 2065 2066 vn_set_state(vp, VSTATE_UNINITIALIZED); 2067 2068 /* 2069 * Locks are given the generic name "vnode" when created. 2070 * Follow the historic practice of using the filesystem 2071 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 2072 * 2073 * Locks live in a witness group keyed on their name. Thus, 2074 * when a lock is renamed, it must also move from the witness 2075 * group of its old name to the witness group of its new name. 2076 * 2077 * The change only needs to be made when the vnode moves 2078 * from one filesystem type to another. We ensure that each 2079 * filesystem use a single static name pointer for its tag so 2080 * that we can compare pointers rather than doing a strcmp(). 2081 */ 2082 lo = &vp->v_vnlock->lock_object; 2083 #ifdef WITNESS 2084 if (lo->lo_name != tag) { 2085 #endif 2086 lo->lo_name = tag; 2087 #ifdef WITNESS 2088 WITNESS_DESTROY(lo); 2089 WITNESS_INIT(lo, tag); 2090 } 2091 #endif 2092 /* 2093 * By default, don't allow shared locks unless filesystems opt-in. 2094 */ 2095 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 2096 /* 2097 * Finalize various vnode identity bits. 2098 */ 2099 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 2100 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 2101 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 2102 vp->v_type = VNON; 2103 vp->v_op = vops; 2104 vp->v_irflag = 0; 2105 v_init_counters(vp); 2106 vn_seqc_init(vp); 2107 vp->v_bufobj.bo_ops = &buf_ops_bio; 2108 #ifdef DIAGNOSTIC 2109 if (mp == NULL && vops != &dead_vnodeops) 2110 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 2111 #endif 2112 #ifdef MAC 2113 mac_vnode_init(vp); 2114 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 2115 mac_vnode_associate_singlelabel(mp, vp); 2116 #endif 2117 if (mp != NULL) { 2118 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 2119 } 2120 2121 /* 2122 * For the filesystems which do not use vfs_hash_insert(), 2123 * still initialize v_hash to have vfs_hash_index() useful. 2124 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 2125 * its own hashing. 2126 */ 2127 vp->v_hash = (uintptr_t)vp >> vnsz2log; 2128 2129 *vpp = vp; 2130 return (0); 2131 } 2132 2133 void 2134 getnewvnode_reserve(void) 2135 { 2136 struct thread *td; 2137 2138 td = curthread; 2139 MPASS(td->td_vp_reserved == NULL); 2140 td->td_vp_reserved = vn_alloc(NULL); 2141 } 2142 2143 void 2144 getnewvnode_drop_reserve(void) 2145 { 2146 struct thread *td; 2147 2148 td = curthread; 2149 if (td->td_vp_reserved != NULL) { 2150 vn_free(td->td_vp_reserved); 2151 td->td_vp_reserved = NULL; 2152 } 2153 } 2154 2155 static void __noinline 2156 freevnode(struct vnode *vp) 2157 { 2158 struct bufobj *bo; 2159 2160 /* 2161 * The vnode has been marked for destruction, so free it. 2162 * 2163 * The vnode will be returned to the zone where it will 2164 * normally remain until it is needed for another vnode. We 2165 * need to cleanup (or verify that the cleanup has already 2166 * been done) any residual data left from its current use 2167 * so as not to contaminate the freshly allocated vnode. 2168 */ 2169 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 2170 /* 2171 * Paired with vgone. 2172 */ 2173 vn_seqc_write_end_free(vp); 2174 2175 bo = &vp->v_bufobj; 2176 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 2177 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 2178 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 2179 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 2180 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 2181 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 2182 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 2183 ("clean blk trie not empty")); 2184 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 2185 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 2186 ("dirty blk trie not empty")); 2187 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 2188 ("Dangling rangelock waiters")); 2189 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, 2190 ("Leaked inactivation")); 2191 VI_UNLOCK(vp); 2192 cache_assert_no_entries(vp); 2193 2194 #ifdef MAC 2195 mac_vnode_destroy(vp); 2196 #endif 2197 if (vp->v_pollinfo != NULL) { 2198 /* 2199 * Use LK_NOWAIT to shut up witness about the lock. We may get 2200 * here while having another vnode locked when trying to 2201 * satisfy a lookup and needing to recycle. 2202 */ 2203 VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT); 2204 destroy_vpollinfo(vp->v_pollinfo); 2205 VOP_UNLOCK(vp); 2206 vp->v_pollinfo = NULL; 2207 } 2208 vp->v_mountedhere = NULL; 2209 vp->v_unpcb = NULL; 2210 vp->v_rdev = NULL; 2211 vp->v_fifoinfo = NULL; 2212 vp->v_iflag = 0; 2213 vp->v_vflag = 0; 2214 bo->bo_flag = 0; 2215 vn_free(vp); 2216 } 2217 2218 /* 2219 * Delete from old mount point vnode list, if on one. 2220 */ 2221 static void 2222 delmntque(struct vnode *vp) 2223 { 2224 struct mount *mp; 2225 2226 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 2227 2228 mp = vp->v_mount; 2229 MNT_ILOCK(mp); 2230 VI_LOCK(vp); 2231 vp->v_mount = NULL; 2232 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 2233 ("bad mount point vnode list size")); 2234 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2235 mp->mnt_nvnodelistsize--; 2236 MNT_REL(mp); 2237 MNT_IUNLOCK(mp); 2238 /* 2239 * The caller expects the interlock to be still held. 2240 */ 2241 ASSERT_VI_LOCKED(vp, __func__); 2242 } 2243 2244 static int 2245 insmntque1_int(struct vnode *vp, struct mount *mp, bool dtr) 2246 { 2247 2248 KASSERT(vp->v_mount == NULL, 2249 ("insmntque: vnode already on per mount vnode list")); 2250 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 2251 if ((mp->mnt_kern_flag & MNTK_UNLOCKED_INSMNTQUE) == 0) { 2252 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 2253 } else { 2254 KASSERT(!dtr, 2255 ("%s: can't have MNTK_UNLOCKED_INSMNTQUE and cleanup", 2256 __func__)); 2257 } 2258 2259 /* 2260 * We acquire the vnode interlock early to ensure that the 2261 * vnode cannot be recycled by another process releasing a 2262 * holdcnt on it before we get it on both the vnode list 2263 * and the active vnode list. The mount mutex protects only 2264 * manipulation of the vnode list and the vnode freelist 2265 * mutex protects only manipulation of the active vnode list. 2266 * Hence the need to hold the vnode interlock throughout. 2267 */ 2268 MNT_ILOCK(mp); 2269 VI_LOCK(vp); 2270 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 2271 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 2272 mp->mnt_nvnodelistsize == 0)) && 2273 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 2274 VI_UNLOCK(vp); 2275 MNT_IUNLOCK(mp); 2276 if (dtr) { 2277 vp->v_data = NULL; 2278 vp->v_op = &dead_vnodeops; 2279 vgone(vp); 2280 vput(vp); 2281 } 2282 return (EBUSY); 2283 } 2284 vp->v_mount = mp; 2285 MNT_REF(mp); 2286 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2287 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 2288 ("neg mount point vnode list size")); 2289 mp->mnt_nvnodelistsize++; 2290 VI_UNLOCK(vp); 2291 MNT_IUNLOCK(mp); 2292 return (0); 2293 } 2294 2295 /* 2296 * Insert into list of vnodes for the new mount point, if available. 2297 * insmntque() reclaims the vnode on insertion failure, insmntque1() 2298 * leaves handling of the vnode to the caller. 2299 */ 2300 int 2301 insmntque(struct vnode *vp, struct mount *mp) 2302 { 2303 return (insmntque1_int(vp, mp, true)); 2304 } 2305 2306 int 2307 insmntque1(struct vnode *vp, struct mount *mp) 2308 { 2309 return (insmntque1_int(vp, mp, false)); 2310 } 2311 2312 /* 2313 * Flush out and invalidate all buffers associated with a bufobj 2314 * Called with the underlying object locked. 2315 */ 2316 int 2317 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 2318 { 2319 int error; 2320 2321 BO_LOCK(bo); 2322 if (flags & V_SAVE) { 2323 error = bufobj_wwait(bo, slpflag, slptimeo); 2324 if (error) { 2325 BO_UNLOCK(bo); 2326 return (error); 2327 } 2328 if (bo->bo_dirty.bv_cnt > 0) { 2329 BO_UNLOCK(bo); 2330 do { 2331 error = BO_SYNC(bo, MNT_WAIT); 2332 } while (error == ERELOOKUP); 2333 if (error != 0) 2334 return (error); 2335 BO_LOCK(bo); 2336 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 2337 BO_UNLOCK(bo); 2338 return (EBUSY); 2339 } 2340 } 2341 } 2342 /* 2343 * If you alter this loop please notice that interlock is dropped and 2344 * reacquired in flushbuflist. Special care is needed to ensure that 2345 * no race conditions occur from this. 2346 */ 2347 do { 2348 error = flushbuflist(&bo->bo_clean, 2349 flags, bo, slpflag, slptimeo); 2350 if (error == 0 && !(flags & V_CLEANONLY)) 2351 error = flushbuflist(&bo->bo_dirty, 2352 flags, bo, slpflag, slptimeo); 2353 if (error != 0 && error != EAGAIN) { 2354 BO_UNLOCK(bo); 2355 return (error); 2356 } 2357 } while (error != 0); 2358 2359 /* 2360 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 2361 * have write I/O in-progress but if there is a VM object then the 2362 * VM object can also have read-I/O in-progress. 2363 */ 2364 do { 2365 bufobj_wwait(bo, 0, 0); 2366 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 2367 BO_UNLOCK(bo); 2368 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 2369 BO_LOCK(bo); 2370 } 2371 } while (bo->bo_numoutput > 0); 2372 BO_UNLOCK(bo); 2373 2374 /* 2375 * Destroy the copy in the VM cache, too. 2376 */ 2377 if (bo->bo_object != NULL && 2378 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 2379 VM_OBJECT_WLOCK(bo->bo_object); 2380 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 2381 OBJPR_CLEANONLY : 0); 2382 VM_OBJECT_WUNLOCK(bo->bo_object); 2383 } 2384 2385 #ifdef INVARIANTS 2386 BO_LOCK(bo); 2387 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 2388 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 2389 bo->bo_clean.bv_cnt > 0)) 2390 panic("vinvalbuf: flush failed"); 2391 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 2392 bo->bo_dirty.bv_cnt > 0) 2393 panic("vinvalbuf: flush dirty failed"); 2394 BO_UNLOCK(bo); 2395 #endif 2396 return (0); 2397 } 2398 2399 /* 2400 * Flush out and invalidate all buffers associated with a vnode. 2401 * Called with the underlying object locked. 2402 */ 2403 int 2404 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 2405 { 2406 2407 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2408 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 2409 if (vp->v_object != NULL && vp->v_object->handle != vp) 2410 return (0); 2411 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 2412 } 2413 2414 /* 2415 * Flush out buffers on the specified list. 2416 * 2417 */ 2418 static int 2419 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2420 int slptimeo) 2421 { 2422 struct buf *bp, *nbp; 2423 int retval, error; 2424 daddr_t lblkno; 2425 b_xflags_t xflags; 2426 2427 ASSERT_BO_WLOCKED(bo); 2428 2429 retval = 0; 2430 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2431 /* 2432 * If we are flushing both V_NORMAL and V_ALT buffers then 2433 * do not skip any buffers. If we are flushing only V_NORMAL 2434 * buffers then skip buffers marked as BX_ALTDATA. If we are 2435 * flushing only V_ALT buffers then skip buffers not marked 2436 * as BX_ALTDATA. 2437 */ 2438 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2439 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2440 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2441 continue; 2442 } 2443 if (nbp != NULL) { 2444 lblkno = nbp->b_lblkno; 2445 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2446 } 2447 retval = EAGAIN; 2448 error = BUF_TIMELOCK(bp, 2449 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2450 "flushbuf", slpflag, slptimeo); 2451 if (error) { 2452 BO_LOCK(bo); 2453 return (error != ENOLCK ? error : EAGAIN); 2454 } 2455 KASSERT(bp->b_bufobj == bo, 2456 ("bp %p wrong b_bufobj %p should be %p", 2457 bp, bp->b_bufobj, bo)); 2458 /* 2459 * XXX Since there are no node locks for NFS, I 2460 * believe there is a slight chance that a delayed 2461 * write will occur while sleeping just above, so 2462 * check for it. 2463 */ 2464 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2465 (flags & V_SAVE)) { 2466 bremfree(bp); 2467 bp->b_flags |= B_ASYNC; 2468 bwrite(bp); 2469 BO_LOCK(bo); 2470 return (EAGAIN); /* XXX: why not loop ? */ 2471 } 2472 bremfree(bp); 2473 bp->b_flags |= (B_INVAL | B_RELBUF); 2474 bp->b_flags &= ~B_ASYNC; 2475 brelse(bp); 2476 BO_LOCK(bo); 2477 if (nbp == NULL) 2478 break; 2479 nbp = gbincore(bo, lblkno); 2480 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2481 != xflags) 2482 break; /* nbp invalid */ 2483 } 2484 return (retval); 2485 } 2486 2487 int 2488 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2489 { 2490 struct buf *bp; 2491 int error; 2492 daddr_t lblkno; 2493 2494 ASSERT_BO_LOCKED(bo); 2495 2496 for (lblkno = startn;;) { 2497 again: 2498 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2499 if (bp == NULL || bp->b_lblkno >= endn || 2500 bp->b_lblkno < startn) 2501 break; 2502 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2503 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2504 if (error != 0) { 2505 BO_RLOCK(bo); 2506 if (error == ENOLCK) 2507 goto again; 2508 return (error); 2509 } 2510 KASSERT(bp->b_bufobj == bo, 2511 ("bp %p wrong b_bufobj %p should be %p", 2512 bp, bp->b_bufobj, bo)); 2513 lblkno = bp->b_lblkno + 1; 2514 if ((bp->b_flags & B_MANAGED) == 0) 2515 bremfree(bp); 2516 bp->b_flags |= B_RELBUF; 2517 /* 2518 * In the VMIO case, use the B_NOREUSE flag to hint that the 2519 * pages backing each buffer in the range are unlikely to be 2520 * reused. Dirty buffers will have the hint applied once 2521 * they've been written. 2522 */ 2523 if ((bp->b_flags & B_VMIO) != 0) 2524 bp->b_flags |= B_NOREUSE; 2525 brelse(bp); 2526 BO_RLOCK(bo); 2527 } 2528 return (0); 2529 } 2530 2531 /* 2532 * Truncate a file's buffer and pages to a specified length. This 2533 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2534 * sync activity. 2535 */ 2536 int 2537 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2538 { 2539 struct buf *bp, *nbp; 2540 struct bufobj *bo; 2541 daddr_t startlbn; 2542 2543 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2544 vp, blksize, (uintmax_t)length); 2545 2546 /* 2547 * Round up to the *next* lbn. 2548 */ 2549 startlbn = howmany(length, blksize); 2550 2551 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2552 2553 bo = &vp->v_bufobj; 2554 restart_unlocked: 2555 BO_LOCK(bo); 2556 2557 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2558 ; 2559 2560 if (length > 0) { 2561 restartsync: 2562 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2563 if (bp->b_lblkno > 0) 2564 continue; 2565 /* 2566 * Since we hold the vnode lock this should only 2567 * fail if we're racing with the buf daemon. 2568 */ 2569 if (BUF_LOCK(bp, 2570 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2571 BO_LOCKPTR(bo)) == ENOLCK) 2572 goto restart_unlocked; 2573 2574 VNASSERT((bp->b_flags & B_DELWRI), vp, 2575 ("buf(%p) on dirty queue without DELWRI", bp)); 2576 2577 bremfree(bp); 2578 bawrite(bp); 2579 BO_LOCK(bo); 2580 goto restartsync; 2581 } 2582 } 2583 2584 bufobj_wwait(bo, 0, 0); 2585 BO_UNLOCK(bo); 2586 vnode_pager_setsize(vp, length); 2587 2588 return (0); 2589 } 2590 2591 /* 2592 * Invalidate the cached pages of a file's buffer within the range of block 2593 * numbers [startlbn, endlbn). 2594 */ 2595 void 2596 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2597 int blksize) 2598 { 2599 struct bufobj *bo; 2600 off_t start, end; 2601 2602 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2603 2604 start = blksize * startlbn; 2605 end = blksize * endlbn; 2606 2607 bo = &vp->v_bufobj; 2608 BO_LOCK(bo); 2609 MPASS(blksize == bo->bo_bsize); 2610 2611 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2612 ; 2613 2614 BO_UNLOCK(bo); 2615 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2616 } 2617 2618 static int 2619 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2620 daddr_t startlbn, daddr_t endlbn) 2621 { 2622 struct buf *bp, *nbp; 2623 bool anyfreed; 2624 2625 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2626 ASSERT_BO_LOCKED(bo); 2627 2628 do { 2629 anyfreed = false; 2630 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2631 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2632 continue; 2633 if (BUF_LOCK(bp, 2634 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2635 BO_LOCKPTR(bo)) == ENOLCK) { 2636 BO_LOCK(bo); 2637 return (EAGAIN); 2638 } 2639 2640 bremfree(bp); 2641 bp->b_flags |= B_INVAL | B_RELBUF; 2642 bp->b_flags &= ~B_ASYNC; 2643 brelse(bp); 2644 anyfreed = true; 2645 2646 BO_LOCK(bo); 2647 if (nbp != NULL && 2648 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2649 nbp->b_vp != vp || 2650 (nbp->b_flags & B_DELWRI) != 0)) 2651 return (EAGAIN); 2652 } 2653 2654 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2655 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2656 continue; 2657 if (BUF_LOCK(bp, 2658 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2659 BO_LOCKPTR(bo)) == ENOLCK) { 2660 BO_LOCK(bo); 2661 return (EAGAIN); 2662 } 2663 bremfree(bp); 2664 bp->b_flags |= B_INVAL | B_RELBUF; 2665 bp->b_flags &= ~B_ASYNC; 2666 brelse(bp); 2667 anyfreed = true; 2668 2669 BO_LOCK(bo); 2670 if (nbp != NULL && 2671 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2672 (nbp->b_vp != vp) || 2673 (nbp->b_flags & B_DELWRI) == 0)) 2674 return (EAGAIN); 2675 } 2676 } while (anyfreed); 2677 return (0); 2678 } 2679 2680 static void 2681 buf_vlist_remove(struct buf *bp) 2682 { 2683 struct bufv *bv; 2684 b_xflags_t flags; 2685 2686 flags = bp->b_xflags; 2687 2688 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2689 ASSERT_BO_WLOCKED(bp->b_bufobj); 2690 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 && 2691 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN), 2692 ("%s: buffer %p has invalid queue state", __func__, bp)); 2693 2694 if ((flags & BX_VNDIRTY) != 0) 2695 bv = &bp->b_bufobj->bo_dirty; 2696 else 2697 bv = &bp->b_bufobj->bo_clean; 2698 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2699 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2700 bv->bv_cnt--; 2701 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2702 } 2703 2704 /* 2705 * Add the buffer to the sorted clean or dirty block list. 2706 * 2707 * NOTE: xflags is passed as a constant, optimizing this inline function! 2708 */ 2709 static void 2710 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2711 { 2712 struct bufv *bv; 2713 struct buf *n; 2714 int error; 2715 2716 ASSERT_BO_WLOCKED(bo); 2717 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2718 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2719 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2720 ("dead bo %p", bo)); 2721 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2722 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2723 bp->b_xflags |= xflags; 2724 if (xflags & BX_VNDIRTY) 2725 bv = &bo->bo_dirty; 2726 else 2727 bv = &bo->bo_clean; 2728 2729 /* 2730 * Keep the list ordered. Optimize empty list insertion. Assume 2731 * we tend to grow at the tail so lookup_le should usually be cheaper 2732 * than _ge. 2733 */ 2734 if (bv->bv_cnt == 0 || 2735 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2736 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2737 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2738 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2739 else 2740 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2741 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2742 if (error) 2743 panic("buf_vlist_add: Preallocated nodes insufficient."); 2744 bv->bv_cnt++; 2745 } 2746 2747 /* 2748 * Look up a buffer using the buffer tries. 2749 */ 2750 struct buf * 2751 gbincore(struct bufobj *bo, daddr_t lblkno) 2752 { 2753 struct buf *bp; 2754 2755 ASSERT_BO_LOCKED(bo); 2756 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2757 if (bp != NULL) 2758 return (bp); 2759 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2760 } 2761 2762 /* 2763 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2764 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2765 * stability of the result. Like other lockless lookups, the found buf may 2766 * already be invalid by the time this function returns. 2767 */ 2768 struct buf * 2769 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2770 { 2771 struct buf *bp; 2772 2773 ASSERT_BO_UNLOCKED(bo); 2774 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2775 if (bp != NULL) 2776 return (bp); 2777 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2778 } 2779 2780 /* 2781 * Associate a buffer with a vnode. 2782 */ 2783 void 2784 bgetvp(struct vnode *vp, struct buf *bp) 2785 { 2786 struct bufobj *bo; 2787 2788 bo = &vp->v_bufobj; 2789 ASSERT_BO_WLOCKED(bo); 2790 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2791 2792 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2793 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2794 ("bgetvp: bp already attached! %p", bp)); 2795 2796 vhold(vp); 2797 bp->b_vp = vp; 2798 bp->b_bufobj = bo; 2799 /* 2800 * Insert onto list for new vnode. 2801 */ 2802 buf_vlist_add(bp, bo, BX_VNCLEAN); 2803 } 2804 2805 /* 2806 * Disassociate a buffer from a vnode. 2807 */ 2808 void 2809 brelvp(struct buf *bp) 2810 { 2811 struct bufobj *bo; 2812 struct vnode *vp; 2813 2814 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2815 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2816 2817 /* 2818 * Delete from old vnode list, if on one. 2819 */ 2820 vp = bp->b_vp; /* XXX */ 2821 bo = bp->b_bufobj; 2822 BO_LOCK(bo); 2823 buf_vlist_remove(bp); 2824 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2825 bo->bo_flag &= ~BO_ONWORKLST; 2826 mtx_lock(&sync_mtx); 2827 LIST_REMOVE(bo, bo_synclist); 2828 syncer_worklist_len--; 2829 mtx_unlock(&sync_mtx); 2830 } 2831 bp->b_vp = NULL; 2832 bp->b_bufobj = NULL; 2833 BO_UNLOCK(bo); 2834 vdrop(vp); 2835 } 2836 2837 /* 2838 * Add an item to the syncer work queue. 2839 */ 2840 static void 2841 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2842 { 2843 int slot; 2844 2845 ASSERT_BO_WLOCKED(bo); 2846 2847 mtx_lock(&sync_mtx); 2848 if (bo->bo_flag & BO_ONWORKLST) 2849 LIST_REMOVE(bo, bo_synclist); 2850 else { 2851 bo->bo_flag |= BO_ONWORKLST; 2852 syncer_worklist_len++; 2853 } 2854 2855 if (delay > syncer_maxdelay - 2) 2856 delay = syncer_maxdelay - 2; 2857 slot = (syncer_delayno + delay) & syncer_mask; 2858 2859 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2860 mtx_unlock(&sync_mtx); 2861 } 2862 2863 static int 2864 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2865 { 2866 int error, len; 2867 2868 mtx_lock(&sync_mtx); 2869 len = syncer_worklist_len - sync_vnode_count; 2870 mtx_unlock(&sync_mtx); 2871 error = SYSCTL_OUT(req, &len, sizeof(len)); 2872 return (error); 2873 } 2874 2875 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2876 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2877 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2878 2879 static struct proc *updateproc; 2880 static void sched_sync(void); 2881 static struct kproc_desc up_kp = { 2882 "syncer", 2883 sched_sync, 2884 &updateproc 2885 }; 2886 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2887 2888 static int 2889 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2890 { 2891 struct vnode *vp; 2892 struct mount *mp; 2893 2894 *bo = LIST_FIRST(slp); 2895 if (*bo == NULL) 2896 return (0); 2897 vp = bo2vnode(*bo); 2898 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2899 return (1); 2900 /* 2901 * We use vhold in case the vnode does not 2902 * successfully sync. vhold prevents the vnode from 2903 * going away when we unlock the sync_mtx so that 2904 * we can acquire the vnode interlock. 2905 */ 2906 vholdl(vp); 2907 mtx_unlock(&sync_mtx); 2908 VI_UNLOCK(vp); 2909 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2910 vdrop(vp); 2911 mtx_lock(&sync_mtx); 2912 return (*bo == LIST_FIRST(slp)); 2913 } 2914 MPASSERT(mp == NULL || (curthread->td_pflags & TDP_IGNSUSP) != 0 || 2915 (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0, mp, 2916 ("suspended mp syncing vp %p", vp)); 2917 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2918 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2919 VOP_UNLOCK(vp); 2920 vn_finished_write(mp); 2921 BO_LOCK(*bo); 2922 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2923 /* 2924 * Put us back on the worklist. The worklist 2925 * routine will remove us from our current 2926 * position and then add us back in at a later 2927 * position. 2928 */ 2929 vn_syncer_add_to_worklist(*bo, syncdelay); 2930 } 2931 BO_UNLOCK(*bo); 2932 vdrop(vp); 2933 mtx_lock(&sync_mtx); 2934 return (0); 2935 } 2936 2937 static int first_printf = 1; 2938 2939 /* 2940 * System filesystem synchronizer daemon. 2941 */ 2942 static void 2943 sched_sync(void) 2944 { 2945 struct synclist *next, *slp; 2946 struct bufobj *bo; 2947 long starttime; 2948 struct thread *td = curthread; 2949 int last_work_seen; 2950 int net_worklist_len; 2951 int syncer_final_iter; 2952 int error; 2953 2954 last_work_seen = 0; 2955 syncer_final_iter = 0; 2956 syncer_state = SYNCER_RUNNING; 2957 starttime = time_uptime; 2958 td->td_pflags |= TDP_NORUNNINGBUF; 2959 2960 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2961 SHUTDOWN_PRI_LAST); 2962 2963 mtx_lock(&sync_mtx); 2964 for (;;) { 2965 if (syncer_state == SYNCER_FINAL_DELAY && 2966 syncer_final_iter == 0) { 2967 mtx_unlock(&sync_mtx); 2968 kproc_suspend_check(td->td_proc); 2969 mtx_lock(&sync_mtx); 2970 } 2971 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2972 if (syncer_state != SYNCER_RUNNING && 2973 starttime != time_uptime) { 2974 if (first_printf) { 2975 printf("\nSyncing disks, vnodes remaining... "); 2976 first_printf = 0; 2977 } 2978 printf("%d ", net_worklist_len); 2979 } 2980 starttime = time_uptime; 2981 2982 /* 2983 * Push files whose dirty time has expired. Be careful 2984 * of interrupt race on slp queue. 2985 * 2986 * Skip over empty worklist slots when shutting down. 2987 */ 2988 do { 2989 slp = &syncer_workitem_pending[syncer_delayno]; 2990 syncer_delayno += 1; 2991 if (syncer_delayno == syncer_maxdelay) 2992 syncer_delayno = 0; 2993 next = &syncer_workitem_pending[syncer_delayno]; 2994 /* 2995 * If the worklist has wrapped since the 2996 * it was emptied of all but syncer vnodes, 2997 * switch to the FINAL_DELAY state and run 2998 * for one more second. 2999 */ 3000 if (syncer_state == SYNCER_SHUTTING_DOWN && 3001 net_worklist_len == 0 && 3002 last_work_seen == syncer_delayno) { 3003 syncer_state = SYNCER_FINAL_DELAY; 3004 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 3005 } 3006 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 3007 syncer_worklist_len > 0); 3008 3009 /* 3010 * Keep track of the last time there was anything 3011 * on the worklist other than syncer vnodes. 3012 * Return to the SHUTTING_DOWN state if any 3013 * new work appears. 3014 */ 3015 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 3016 last_work_seen = syncer_delayno; 3017 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 3018 syncer_state = SYNCER_SHUTTING_DOWN; 3019 while (!LIST_EMPTY(slp)) { 3020 error = sync_vnode(slp, &bo, td); 3021 if (error == 1) { 3022 LIST_REMOVE(bo, bo_synclist); 3023 LIST_INSERT_HEAD(next, bo, bo_synclist); 3024 continue; 3025 } 3026 3027 if (first_printf == 0) { 3028 /* 3029 * Drop the sync mutex, because some watchdog 3030 * drivers need to sleep while patting 3031 */ 3032 mtx_unlock(&sync_mtx); 3033 wdog_kern_pat(WD_LASTVAL); 3034 mtx_lock(&sync_mtx); 3035 } 3036 } 3037 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 3038 syncer_final_iter--; 3039 /* 3040 * The variable rushjob allows the kernel to speed up the 3041 * processing of the filesystem syncer process. A rushjob 3042 * value of N tells the filesystem syncer to process the next 3043 * N seconds worth of work on its queue ASAP. Currently rushjob 3044 * is used by the soft update code to speed up the filesystem 3045 * syncer process when the incore state is getting so far 3046 * ahead of the disk that the kernel memory pool is being 3047 * threatened with exhaustion. 3048 */ 3049 if (rushjob > 0) { 3050 rushjob -= 1; 3051 continue; 3052 } 3053 /* 3054 * Just sleep for a short period of time between 3055 * iterations when shutting down to allow some I/O 3056 * to happen. 3057 * 3058 * If it has taken us less than a second to process the 3059 * current work, then wait. Otherwise start right over 3060 * again. We can still lose time if any single round 3061 * takes more than two seconds, but it does not really 3062 * matter as we are just trying to generally pace the 3063 * filesystem activity. 3064 */ 3065 if (syncer_state != SYNCER_RUNNING || 3066 time_uptime == starttime) { 3067 thread_lock(td); 3068 sched_prio(td, PPAUSE); 3069 thread_unlock(td); 3070 } 3071 if (syncer_state != SYNCER_RUNNING) 3072 cv_timedwait(&sync_wakeup, &sync_mtx, 3073 hz / SYNCER_SHUTDOWN_SPEEDUP); 3074 else if (time_uptime == starttime) 3075 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 3076 } 3077 } 3078 3079 /* 3080 * Request the syncer daemon to speed up its work. 3081 * We never push it to speed up more than half of its 3082 * normal turn time, otherwise it could take over the cpu. 3083 */ 3084 int 3085 speedup_syncer(void) 3086 { 3087 int ret = 0; 3088 3089 mtx_lock(&sync_mtx); 3090 if (rushjob < syncdelay / 2) { 3091 rushjob += 1; 3092 stat_rush_requests += 1; 3093 ret = 1; 3094 } 3095 mtx_unlock(&sync_mtx); 3096 cv_broadcast(&sync_wakeup); 3097 return (ret); 3098 } 3099 3100 /* 3101 * Tell the syncer to speed up its work and run though its work 3102 * list several times, then tell it to shut down. 3103 */ 3104 static void 3105 syncer_shutdown(void *arg, int howto) 3106 { 3107 3108 if (howto & RB_NOSYNC) 3109 return; 3110 mtx_lock(&sync_mtx); 3111 syncer_state = SYNCER_SHUTTING_DOWN; 3112 rushjob = 0; 3113 mtx_unlock(&sync_mtx); 3114 cv_broadcast(&sync_wakeup); 3115 kproc_shutdown(arg, howto); 3116 } 3117 3118 void 3119 syncer_suspend(void) 3120 { 3121 3122 syncer_shutdown(updateproc, 0); 3123 } 3124 3125 void 3126 syncer_resume(void) 3127 { 3128 3129 mtx_lock(&sync_mtx); 3130 first_printf = 1; 3131 syncer_state = SYNCER_RUNNING; 3132 mtx_unlock(&sync_mtx); 3133 cv_broadcast(&sync_wakeup); 3134 kproc_resume(updateproc); 3135 } 3136 3137 /* 3138 * Move the buffer between the clean and dirty lists of its vnode. 3139 */ 3140 void 3141 reassignbuf(struct buf *bp) 3142 { 3143 struct vnode *vp; 3144 struct bufobj *bo; 3145 int delay; 3146 #ifdef INVARIANTS 3147 struct bufv *bv; 3148 #endif 3149 3150 vp = bp->b_vp; 3151 bo = bp->b_bufobj; 3152 3153 KASSERT((bp->b_flags & B_PAGING) == 0, 3154 ("%s: cannot reassign paging buffer %p", __func__, bp)); 3155 3156 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 3157 bp, bp->b_vp, bp->b_flags); 3158 3159 BO_LOCK(bo); 3160 buf_vlist_remove(bp); 3161 3162 /* 3163 * If dirty, put on list of dirty buffers; otherwise insert onto list 3164 * of clean buffers. 3165 */ 3166 if (bp->b_flags & B_DELWRI) { 3167 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 3168 switch (vp->v_type) { 3169 case VDIR: 3170 delay = dirdelay; 3171 break; 3172 case VCHR: 3173 delay = metadelay; 3174 break; 3175 default: 3176 delay = filedelay; 3177 } 3178 vn_syncer_add_to_worklist(bo, delay); 3179 } 3180 buf_vlist_add(bp, bo, BX_VNDIRTY); 3181 } else { 3182 buf_vlist_add(bp, bo, BX_VNCLEAN); 3183 3184 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 3185 mtx_lock(&sync_mtx); 3186 LIST_REMOVE(bo, bo_synclist); 3187 syncer_worklist_len--; 3188 mtx_unlock(&sync_mtx); 3189 bo->bo_flag &= ~BO_ONWORKLST; 3190 } 3191 } 3192 #ifdef INVARIANTS 3193 bv = &bo->bo_clean; 3194 bp = TAILQ_FIRST(&bv->bv_hd); 3195 KASSERT(bp == NULL || bp->b_bufobj == bo, 3196 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3197 bp = TAILQ_LAST(&bv->bv_hd, buflists); 3198 KASSERT(bp == NULL || bp->b_bufobj == bo, 3199 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3200 bv = &bo->bo_dirty; 3201 bp = TAILQ_FIRST(&bv->bv_hd); 3202 KASSERT(bp == NULL || bp->b_bufobj == bo, 3203 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3204 bp = TAILQ_LAST(&bv->bv_hd, buflists); 3205 KASSERT(bp == NULL || bp->b_bufobj == bo, 3206 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3207 #endif 3208 BO_UNLOCK(bo); 3209 } 3210 3211 static void 3212 v_init_counters(struct vnode *vp) 3213 { 3214 3215 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 3216 vp, ("%s called for an initialized vnode", __FUNCTION__)); 3217 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 3218 3219 refcount_init(&vp->v_holdcnt, 1); 3220 refcount_init(&vp->v_usecount, 1); 3221 } 3222 3223 /* 3224 * Grab a particular vnode from the free list, increment its 3225 * reference count and lock it. VIRF_DOOMED is set if the vnode 3226 * is being destroyed. Only callers who specify LK_RETRY will 3227 * see doomed vnodes. If inactive processing was delayed in 3228 * vput try to do it here. 3229 * 3230 * usecount is manipulated using atomics without holding any locks. 3231 * 3232 * holdcnt can be manipulated using atomics without holding any locks, 3233 * except when transitioning 1<->0, in which case the interlock is held. 3234 * 3235 * Consumers which don't guarantee liveness of the vnode can use SMR to 3236 * try to get a reference. Note this operation can fail since the vnode 3237 * may be awaiting getting freed by the time they get to it. 3238 */ 3239 enum vgetstate 3240 vget_prep_smr(struct vnode *vp) 3241 { 3242 enum vgetstate vs; 3243 3244 VFS_SMR_ASSERT_ENTERED(); 3245 3246 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3247 vs = VGET_USECOUNT; 3248 } else { 3249 if (vhold_smr(vp)) 3250 vs = VGET_HOLDCNT; 3251 else 3252 vs = VGET_NONE; 3253 } 3254 return (vs); 3255 } 3256 3257 enum vgetstate 3258 vget_prep(struct vnode *vp) 3259 { 3260 enum vgetstate vs; 3261 3262 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3263 vs = VGET_USECOUNT; 3264 } else { 3265 vhold(vp); 3266 vs = VGET_HOLDCNT; 3267 } 3268 return (vs); 3269 } 3270 3271 void 3272 vget_abort(struct vnode *vp, enum vgetstate vs) 3273 { 3274 3275 switch (vs) { 3276 case VGET_USECOUNT: 3277 vrele(vp); 3278 break; 3279 case VGET_HOLDCNT: 3280 vdrop(vp); 3281 break; 3282 default: 3283 __assert_unreachable(); 3284 } 3285 } 3286 3287 int 3288 vget(struct vnode *vp, int flags) 3289 { 3290 enum vgetstate vs; 3291 3292 vs = vget_prep(vp); 3293 return (vget_finish(vp, flags, vs)); 3294 } 3295 3296 int 3297 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 3298 { 3299 int error; 3300 3301 if ((flags & LK_INTERLOCK) != 0) 3302 ASSERT_VI_LOCKED(vp, __func__); 3303 else 3304 ASSERT_VI_UNLOCKED(vp, __func__); 3305 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3306 VNPASS(vp->v_holdcnt > 0, vp); 3307 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3308 3309 error = vn_lock(vp, flags); 3310 if (__predict_false(error != 0)) { 3311 vget_abort(vp, vs); 3312 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 3313 vp); 3314 return (error); 3315 } 3316 3317 vget_finish_ref(vp, vs); 3318 return (0); 3319 } 3320 3321 void 3322 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 3323 { 3324 int old; 3325 3326 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3327 VNPASS(vp->v_holdcnt > 0, vp); 3328 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3329 3330 if (vs == VGET_USECOUNT) 3331 return; 3332 3333 /* 3334 * We hold the vnode. If the usecount is 0 it will be utilized to keep 3335 * the vnode around. Otherwise someone else lended their hold count and 3336 * we have to drop ours. 3337 */ 3338 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3339 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3340 if (old != 0) { 3341 #ifdef INVARIANTS 3342 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3343 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3344 #else 3345 refcount_release(&vp->v_holdcnt); 3346 #endif 3347 } 3348 } 3349 3350 void 3351 vref(struct vnode *vp) 3352 { 3353 enum vgetstate vs; 3354 3355 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3356 vs = vget_prep(vp); 3357 vget_finish_ref(vp, vs); 3358 } 3359 3360 void 3361 vrefact(struct vnode *vp) 3362 { 3363 3364 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3365 #ifdef INVARIANTS 3366 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 3367 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3368 #else 3369 refcount_acquire(&vp->v_usecount); 3370 #endif 3371 } 3372 3373 void 3374 vlazy(struct vnode *vp) 3375 { 3376 struct mount *mp; 3377 3378 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 3379 3380 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 3381 return; 3382 /* 3383 * We may get here for inactive routines after the vnode got doomed. 3384 */ 3385 if (VN_IS_DOOMED(vp)) 3386 return; 3387 mp = vp->v_mount; 3388 mtx_lock(&mp->mnt_listmtx); 3389 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 3390 vp->v_mflag |= VMP_LAZYLIST; 3391 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3392 mp->mnt_lazyvnodelistsize++; 3393 } 3394 mtx_unlock(&mp->mnt_listmtx); 3395 } 3396 3397 static void 3398 vunlazy(struct vnode *vp) 3399 { 3400 struct mount *mp; 3401 3402 ASSERT_VI_LOCKED(vp, __func__); 3403 VNPASS(!VN_IS_DOOMED(vp), vp); 3404 3405 mp = vp->v_mount; 3406 mtx_lock(&mp->mnt_listmtx); 3407 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3408 /* 3409 * Don't remove the vnode from the lazy list if another thread 3410 * has increased the hold count. It may have re-enqueued the 3411 * vnode to the lazy list and is now responsible for its 3412 * removal. 3413 */ 3414 if (vp->v_holdcnt == 0) { 3415 vp->v_mflag &= ~VMP_LAZYLIST; 3416 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3417 mp->mnt_lazyvnodelistsize--; 3418 } 3419 mtx_unlock(&mp->mnt_listmtx); 3420 } 3421 3422 /* 3423 * This routine is only meant to be called from vgonel prior to dooming 3424 * the vnode. 3425 */ 3426 static void 3427 vunlazy_gone(struct vnode *vp) 3428 { 3429 struct mount *mp; 3430 3431 ASSERT_VOP_ELOCKED(vp, __func__); 3432 ASSERT_VI_LOCKED(vp, __func__); 3433 VNPASS(!VN_IS_DOOMED(vp), vp); 3434 3435 if (vp->v_mflag & VMP_LAZYLIST) { 3436 mp = vp->v_mount; 3437 mtx_lock(&mp->mnt_listmtx); 3438 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3439 vp->v_mflag &= ~VMP_LAZYLIST; 3440 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3441 mp->mnt_lazyvnodelistsize--; 3442 mtx_unlock(&mp->mnt_listmtx); 3443 } 3444 } 3445 3446 static void 3447 vdefer_inactive(struct vnode *vp) 3448 { 3449 3450 ASSERT_VI_LOCKED(vp, __func__); 3451 VNPASS(vp->v_holdcnt > 0, vp); 3452 if (VN_IS_DOOMED(vp)) { 3453 vdropl(vp); 3454 return; 3455 } 3456 if (vp->v_iflag & VI_DEFINACT) { 3457 VNPASS(vp->v_holdcnt > 1, vp); 3458 vdropl(vp); 3459 return; 3460 } 3461 if (vp->v_usecount > 0) { 3462 vp->v_iflag &= ~VI_OWEINACT; 3463 vdropl(vp); 3464 return; 3465 } 3466 vlazy(vp); 3467 vp->v_iflag |= VI_DEFINACT; 3468 VI_UNLOCK(vp); 3469 atomic_add_long(&deferred_inact, 1); 3470 } 3471 3472 static void 3473 vdefer_inactive_unlocked(struct vnode *vp) 3474 { 3475 3476 VI_LOCK(vp); 3477 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3478 vdropl(vp); 3479 return; 3480 } 3481 vdefer_inactive(vp); 3482 } 3483 3484 enum vput_op { VRELE, VPUT, VUNREF }; 3485 3486 /* 3487 * Handle ->v_usecount transitioning to 0. 3488 * 3489 * By releasing the last usecount we take ownership of the hold count which 3490 * provides liveness of the vnode, meaning we have to vdrop. 3491 * 3492 * For all vnodes we may need to perform inactive processing. It requires an 3493 * exclusive lock on the vnode, while it is legal to call here with only a 3494 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3495 * inactive processing gets deferred to the syncer. 3496 * 3497 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3498 * on the lock being held all the way until VOP_INACTIVE. This in particular 3499 * happens with UFS which adds half-constructed vnodes to the hash, where they 3500 * can be found by other code. 3501 */ 3502 static void 3503 vput_final(struct vnode *vp, enum vput_op func) 3504 { 3505 int error; 3506 bool want_unlock; 3507 3508 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3509 VNPASS(vp->v_holdcnt > 0, vp); 3510 3511 VI_LOCK(vp); 3512 3513 /* 3514 * By the time we got here someone else might have transitioned 3515 * the count back to > 0. 3516 */ 3517 if (vp->v_usecount > 0) 3518 goto out; 3519 3520 /* 3521 * If the vnode is doomed vgone already performed inactive processing 3522 * (if needed). 3523 */ 3524 if (VN_IS_DOOMED(vp)) 3525 goto out; 3526 3527 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3528 goto out; 3529 3530 if (vp->v_iflag & VI_DOINGINACT) 3531 goto out; 3532 3533 /* 3534 * Locking operations here will drop the interlock and possibly the 3535 * vnode lock, opening a window where the vnode can get doomed all the 3536 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3537 * perform inactive. 3538 */ 3539 vp->v_iflag |= VI_OWEINACT; 3540 want_unlock = false; 3541 error = 0; 3542 switch (func) { 3543 case VRELE: 3544 switch (VOP_ISLOCKED(vp)) { 3545 case LK_EXCLUSIVE: 3546 break; 3547 case LK_EXCLOTHER: 3548 case 0: 3549 want_unlock = true; 3550 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3551 VI_LOCK(vp); 3552 break; 3553 default: 3554 /* 3555 * The lock has at least one sharer, but we have no way 3556 * to conclude whether this is us. Play it safe and 3557 * defer processing. 3558 */ 3559 error = EAGAIN; 3560 break; 3561 } 3562 break; 3563 case VPUT: 3564 want_unlock = true; 3565 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3566 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3567 LK_NOWAIT); 3568 VI_LOCK(vp); 3569 } 3570 break; 3571 case VUNREF: 3572 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3573 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3574 VI_LOCK(vp); 3575 } 3576 break; 3577 } 3578 if (error == 0) { 3579 if (func == VUNREF) { 3580 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp, 3581 ("recursive vunref")); 3582 vp->v_vflag |= VV_UNREF; 3583 } 3584 for (;;) { 3585 error = vinactive(vp); 3586 if (want_unlock) 3587 VOP_UNLOCK(vp); 3588 if (error != ERELOOKUP || !want_unlock) 3589 break; 3590 VOP_LOCK(vp, LK_EXCLUSIVE); 3591 } 3592 if (func == VUNREF) 3593 vp->v_vflag &= ~VV_UNREF; 3594 vdropl(vp); 3595 } else { 3596 vdefer_inactive(vp); 3597 } 3598 return; 3599 out: 3600 if (func == VPUT) 3601 VOP_UNLOCK(vp); 3602 vdropl(vp); 3603 } 3604 3605 /* 3606 * Decrement ->v_usecount for a vnode. 3607 * 3608 * Releasing the last use count requires additional processing, see vput_final 3609 * above for details. 3610 * 3611 * Comment above each variant denotes lock state on entry and exit. 3612 */ 3613 3614 /* 3615 * in: any 3616 * out: same as passed in 3617 */ 3618 void 3619 vrele(struct vnode *vp) 3620 { 3621 3622 ASSERT_VI_UNLOCKED(vp, __func__); 3623 if (!refcount_release(&vp->v_usecount)) 3624 return; 3625 vput_final(vp, VRELE); 3626 } 3627 3628 /* 3629 * in: locked 3630 * out: unlocked 3631 */ 3632 void 3633 vput(struct vnode *vp) 3634 { 3635 3636 ASSERT_VOP_LOCKED(vp, __func__); 3637 ASSERT_VI_UNLOCKED(vp, __func__); 3638 if (!refcount_release(&vp->v_usecount)) { 3639 VOP_UNLOCK(vp); 3640 return; 3641 } 3642 vput_final(vp, VPUT); 3643 } 3644 3645 /* 3646 * in: locked 3647 * out: locked 3648 */ 3649 void 3650 vunref(struct vnode *vp) 3651 { 3652 3653 ASSERT_VOP_LOCKED(vp, __func__); 3654 ASSERT_VI_UNLOCKED(vp, __func__); 3655 if (!refcount_release(&vp->v_usecount)) 3656 return; 3657 vput_final(vp, VUNREF); 3658 } 3659 3660 void 3661 vhold(struct vnode *vp) 3662 { 3663 int old; 3664 3665 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3666 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3667 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3668 ("%s: wrong hold count %d", __func__, old)); 3669 if (old == 0) 3670 vfs_freevnodes_dec(); 3671 } 3672 3673 void 3674 vholdnz(struct vnode *vp) 3675 { 3676 3677 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3678 #ifdef INVARIANTS 3679 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3680 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3681 ("%s: wrong hold count %d", __func__, old)); 3682 #else 3683 atomic_add_int(&vp->v_holdcnt, 1); 3684 #endif 3685 } 3686 3687 /* 3688 * Grab a hold count unless the vnode is freed. 3689 * 3690 * Only use this routine if vfs smr is the only protection you have against 3691 * freeing the vnode. 3692 * 3693 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3694 * is not set. After the flag is set the vnode becomes immutable to anyone but 3695 * the thread which managed to set the flag. 3696 * 3697 * It may be tempting to replace the loop with: 3698 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3699 * if (count & VHOLD_NO_SMR) { 3700 * backpedal and error out; 3701 * } 3702 * 3703 * However, while this is more performant, it hinders debugging by eliminating 3704 * the previously mentioned invariant. 3705 */ 3706 bool 3707 vhold_smr(struct vnode *vp) 3708 { 3709 int count; 3710 3711 VFS_SMR_ASSERT_ENTERED(); 3712 3713 count = atomic_load_int(&vp->v_holdcnt); 3714 for (;;) { 3715 if (count & VHOLD_NO_SMR) { 3716 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3717 ("non-zero hold count with flags %d\n", count)); 3718 return (false); 3719 } 3720 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3721 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3722 if (count == 0) 3723 vfs_freevnodes_dec(); 3724 return (true); 3725 } 3726 } 3727 } 3728 3729 /* 3730 * Hold a free vnode for recycling. 3731 * 3732 * Note: vnode_init references this comment. 3733 * 3734 * Attempts to recycle only need the global vnode list lock and have no use for 3735 * SMR. 3736 * 3737 * However, vnodes get inserted into the global list before they get fully 3738 * initialized and stay there until UMA decides to free the memory. This in 3739 * particular means the target can be found before it becomes usable and after 3740 * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to 3741 * VHOLD_NO_SMR. 3742 * 3743 * Note: the vnode may gain more references after we transition the count 0->1. 3744 */ 3745 static bool 3746 vhold_recycle_free(struct vnode *vp) 3747 { 3748 int count; 3749 3750 mtx_assert(&vnode_list_mtx, MA_OWNED); 3751 3752 count = atomic_load_int(&vp->v_holdcnt); 3753 for (;;) { 3754 if (count & VHOLD_NO_SMR) { 3755 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3756 ("non-zero hold count with flags %d\n", count)); 3757 return (false); 3758 } 3759 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3760 if (count > 0) { 3761 return (false); 3762 } 3763 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3764 vfs_freevnodes_dec(); 3765 return (true); 3766 } 3767 } 3768 } 3769 3770 static void __noinline 3771 vdbatch_process(struct vdbatch *vd) 3772 { 3773 struct vnode *vp; 3774 int i; 3775 3776 mtx_assert(&vd->lock, MA_OWNED); 3777 MPASS(curthread->td_pinned > 0); 3778 MPASS(vd->index == VDBATCH_SIZE); 3779 3780 /* 3781 * Attempt to requeue the passed batch, but give up easily. 3782 * 3783 * Despite batching the mechanism is prone to transient *significant* 3784 * lock contention, where vnode_list_mtx becomes the primary bottleneck 3785 * if multiple CPUs get here (one real-world example is highly parallel 3786 * do-nothing make , which will stat *tons* of vnodes). Since it is 3787 * quasi-LRU (read: not that great even if fully honoured) just dodge 3788 * the problem. Parties which don't like it are welcome to implement 3789 * something better. 3790 */ 3791 critical_enter(); 3792 if (mtx_trylock(&vnode_list_mtx)) { 3793 for (i = 0; i < VDBATCH_SIZE; i++) { 3794 vp = vd->tab[i]; 3795 vd->tab[i] = NULL; 3796 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3797 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3798 MPASS(vp->v_dbatchcpu != NOCPU); 3799 vp->v_dbatchcpu = NOCPU; 3800 } 3801 mtx_unlock(&vnode_list_mtx); 3802 } else { 3803 counter_u64_add(vnode_skipped_requeues, 1); 3804 3805 for (i = 0; i < VDBATCH_SIZE; i++) { 3806 vp = vd->tab[i]; 3807 vd->tab[i] = NULL; 3808 MPASS(vp->v_dbatchcpu != NOCPU); 3809 vp->v_dbatchcpu = NOCPU; 3810 } 3811 } 3812 vd->index = 0; 3813 critical_exit(); 3814 } 3815 3816 static void 3817 vdbatch_enqueue(struct vnode *vp) 3818 { 3819 struct vdbatch *vd; 3820 3821 ASSERT_VI_LOCKED(vp, __func__); 3822 VNPASS(!VN_IS_DOOMED(vp), vp); 3823 3824 if (vp->v_dbatchcpu != NOCPU) { 3825 VI_UNLOCK(vp); 3826 return; 3827 } 3828 3829 sched_pin(); 3830 vd = DPCPU_PTR(vd); 3831 mtx_lock(&vd->lock); 3832 MPASS(vd->index < VDBATCH_SIZE); 3833 MPASS(vd->tab[vd->index] == NULL); 3834 /* 3835 * A hack: we depend on being pinned so that we know what to put in 3836 * ->v_dbatchcpu. 3837 */ 3838 vp->v_dbatchcpu = curcpu; 3839 vd->tab[vd->index] = vp; 3840 vd->index++; 3841 VI_UNLOCK(vp); 3842 if (vd->index == VDBATCH_SIZE) 3843 vdbatch_process(vd); 3844 mtx_unlock(&vd->lock); 3845 sched_unpin(); 3846 } 3847 3848 /* 3849 * This routine must only be called for vnodes which are about to be 3850 * deallocated. Supporting dequeue for arbitrary vndoes would require 3851 * validating that the locked batch matches. 3852 */ 3853 static void 3854 vdbatch_dequeue(struct vnode *vp) 3855 { 3856 struct vdbatch *vd; 3857 int i; 3858 short cpu; 3859 3860 VNPASS(vp->v_type == VBAD || vp->v_type == VNON, vp); 3861 3862 cpu = vp->v_dbatchcpu; 3863 if (cpu == NOCPU) 3864 return; 3865 3866 vd = DPCPU_ID_PTR(cpu, vd); 3867 mtx_lock(&vd->lock); 3868 for (i = 0; i < vd->index; i++) { 3869 if (vd->tab[i] != vp) 3870 continue; 3871 vp->v_dbatchcpu = NOCPU; 3872 vd->index--; 3873 vd->tab[i] = vd->tab[vd->index]; 3874 vd->tab[vd->index] = NULL; 3875 break; 3876 } 3877 mtx_unlock(&vd->lock); 3878 /* 3879 * Either we dequeued the vnode above or the target CPU beat us to it. 3880 */ 3881 MPASS(vp->v_dbatchcpu == NOCPU); 3882 } 3883 3884 /* 3885 * Drop the hold count of the vnode. If this is the last reference to 3886 * the vnode we place it on the free list unless it has been vgone'd 3887 * (marked VIRF_DOOMED) in which case we will free it. 3888 * 3889 * Because the vnode vm object keeps a hold reference on the vnode if 3890 * there is at least one resident non-cached page, the vnode cannot 3891 * leave the active list without the page cleanup done. 3892 */ 3893 static void __noinline 3894 vdropl_final(struct vnode *vp) 3895 { 3896 3897 ASSERT_VI_LOCKED(vp, __func__); 3898 VNPASS(VN_IS_DOOMED(vp), vp); 3899 /* 3900 * Set the VHOLD_NO_SMR flag. 3901 * 3902 * We may be racing against vhold_smr. If they win we can just pretend 3903 * we never got this far, they will vdrop later. 3904 */ 3905 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { 3906 vfs_freevnodes_inc(); 3907 VI_UNLOCK(vp); 3908 /* 3909 * We lost the aforementioned race. Any subsequent access is 3910 * invalid as they might have managed to vdropl on their own. 3911 */ 3912 return; 3913 } 3914 /* 3915 * Don't bump freevnodes as this one is going away. 3916 */ 3917 freevnode(vp); 3918 } 3919 3920 void 3921 vdrop(struct vnode *vp) 3922 { 3923 3924 ASSERT_VI_UNLOCKED(vp, __func__); 3925 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3926 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3927 return; 3928 VI_LOCK(vp); 3929 vdropl(vp); 3930 } 3931 3932 static void __always_inline 3933 vdropl_impl(struct vnode *vp, bool enqueue) 3934 { 3935 3936 ASSERT_VI_LOCKED(vp, __func__); 3937 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3938 if (!refcount_release(&vp->v_holdcnt)) { 3939 VI_UNLOCK(vp); 3940 return; 3941 } 3942 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp); 3943 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 3944 if (VN_IS_DOOMED(vp)) { 3945 vdropl_final(vp); 3946 return; 3947 } 3948 3949 vfs_freevnodes_inc(); 3950 if (vp->v_mflag & VMP_LAZYLIST) { 3951 vunlazy(vp); 3952 } 3953 3954 if (!enqueue) { 3955 VI_UNLOCK(vp); 3956 return; 3957 } 3958 3959 /* 3960 * Also unlocks the interlock. We can't assert on it as we 3961 * released our hold and by now the vnode might have been 3962 * freed. 3963 */ 3964 vdbatch_enqueue(vp); 3965 } 3966 3967 void 3968 vdropl(struct vnode *vp) 3969 { 3970 3971 vdropl_impl(vp, true); 3972 } 3973 3974 /* 3975 * vdrop a vnode when recycling 3976 * 3977 * This is a special case routine only to be used when recycling, differs from 3978 * regular vdrop by not requeieing the vnode on LRU. 3979 * 3980 * Consider a case where vtryrecycle continuously fails with all vnodes (due to 3981 * e.g., frozen writes on the filesystem), filling the batch and causing it to 3982 * be requeued. Then vnlru will end up revisiting the same vnodes. This is a 3983 * loop which can last for as long as writes are frozen. 3984 */ 3985 static void 3986 vdropl_recycle(struct vnode *vp) 3987 { 3988 3989 vdropl_impl(vp, false); 3990 } 3991 3992 static void 3993 vdrop_recycle(struct vnode *vp) 3994 { 3995 3996 VI_LOCK(vp); 3997 vdropl_recycle(vp); 3998 } 3999 4000 /* 4001 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 4002 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 4003 */ 4004 static int 4005 vinactivef(struct vnode *vp) 4006 { 4007 struct vm_object *obj; 4008 int error; 4009 4010 ASSERT_VOP_ELOCKED(vp, "vinactive"); 4011 ASSERT_VI_LOCKED(vp, "vinactive"); 4012 VNPASS((vp->v_iflag & VI_DOINGINACT) == 0, vp); 4013 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4014 vp->v_iflag |= VI_DOINGINACT; 4015 vp->v_iflag &= ~VI_OWEINACT; 4016 VI_UNLOCK(vp); 4017 /* 4018 * Before moving off the active list, we must be sure that any 4019 * modified pages are converted into the vnode's dirty 4020 * buffers, since these will no longer be checked once the 4021 * vnode is on the inactive list. 4022 * 4023 * The write-out of the dirty pages is asynchronous. At the 4024 * point that VOP_INACTIVE() is called, there could still be 4025 * pending I/O and dirty pages in the object. 4026 */ 4027 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 4028 vm_object_mightbedirty(obj)) { 4029 VM_OBJECT_WLOCK(obj); 4030 vm_object_page_clean(obj, 0, 0, 0); 4031 VM_OBJECT_WUNLOCK(obj); 4032 } 4033 error = VOP_INACTIVE(vp); 4034 VI_LOCK(vp); 4035 VNPASS(vp->v_iflag & VI_DOINGINACT, vp); 4036 vp->v_iflag &= ~VI_DOINGINACT; 4037 return (error); 4038 } 4039 4040 int 4041 vinactive(struct vnode *vp) 4042 { 4043 4044 ASSERT_VOP_ELOCKED(vp, "vinactive"); 4045 ASSERT_VI_LOCKED(vp, "vinactive"); 4046 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4047 4048 if ((vp->v_iflag & VI_OWEINACT) == 0) 4049 return (0); 4050 if (vp->v_iflag & VI_DOINGINACT) 4051 return (0); 4052 if (vp->v_usecount > 0) { 4053 vp->v_iflag &= ~VI_OWEINACT; 4054 return (0); 4055 } 4056 return (vinactivef(vp)); 4057 } 4058 4059 /* 4060 * Remove any vnodes in the vnode table belonging to mount point mp. 4061 * 4062 * If FORCECLOSE is not specified, there should not be any active ones, 4063 * return error if any are found (nb: this is a user error, not a 4064 * system error). If FORCECLOSE is specified, detach any active vnodes 4065 * that are found. 4066 * 4067 * If WRITECLOSE is set, only flush out regular file vnodes open for 4068 * writing. 4069 * 4070 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 4071 * 4072 * `rootrefs' specifies the base reference count for the root vnode 4073 * of this filesystem. The root vnode is considered busy if its 4074 * v_usecount exceeds this value. On a successful return, vflush(, td) 4075 * will call vrele() on the root vnode exactly rootrefs times. 4076 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 4077 * be zero. 4078 */ 4079 #ifdef DIAGNOSTIC 4080 static int busyprt = 0; /* print out busy vnodes */ 4081 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 4082 #endif 4083 4084 int 4085 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 4086 { 4087 struct vnode *vp, *mvp, *rootvp = NULL; 4088 struct vattr vattr; 4089 int busy = 0, error; 4090 4091 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 4092 rootrefs, flags); 4093 if (rootrefs > 0) { 4094 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 4095 ("vflush: bad args")); 4096 /* 4097 * Get the filesystem root vnode. We can vput() it 4098 * immediately, since with rootrefs > 0, it won't go away. 4099 */ 4100 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 4101 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 4102 __func__, error); 4103 return (error); 4104 } 4105 vput(rootvp); 4106 } 4107 loop: 4108 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 4109 vholdl(vp); 4110 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 4111 if (error) { 4112 vdrop(vp); 4113 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 4114 goto loop; 4115 } 4116 /* 4117 * Skip over a vnodes marked VV_SYSTEM. 4118 */ 4119 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 4120 VOP_UNLOCK(vp); 4121 vdrop(vp); 4122 continue; 4123 } 4124 /* 4125 * If WRITECLOSE is set, flush out unlinked but still open 4126 * files (even if open only for reading) and regular file 4127 * vnodes open for writing. 4128 */ 4129 if (flags & WRITECLOSE) { 4130 if (vp->v_object != NULL) { 4131 VM_OBJECT_WLOCK(vp->v_object); 4132 vm_object_page_clean(vp->v_object, 0, 0, 0); 4133 VM_OBJECT_WUNLOCK(vp->v_object); 4134 } 4135 do { 4136 error = VOP_FSYNC(vp, MNT_WAIT, td); 4137 } while (error == ERELOOKUP); 4138 if (error != 0) { 4139 VOP_UNLOCK(vp); 4140 vdrop(vp); 4141 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 4142 return (error); 4143 } 4144 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 4145 VI_LOCK(vp); 4146 4147 if ((vp->v_type == VNON || 4148 (error == 0 && vattr.va_nlink > 0)) && 4149 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 4150 VOP_UNLOCK(vp); 4151 vdropl(vp); 4152 continue; 4153 } 4154 } else 4155 VI_LOCK(vp); 4156 /* 4157 * With v_usecount == 0, all we need to do is clear out the 4158 * vnode data structures and we are done. 4159 * 4160 * If FORCECLOSE is set, forcibly close the vnode. 4161 */ 4162 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 4163 vgonel(vp); 4164 } else { 4165 busy++; 4166 #ifdef DIAGNOSTIC 4167 if (busyprt) 4168 vn_printf(vp, "vflush: busy vnode "); 4169 #endif 4170 } 4171 VOP_UNLOCK(vp); 4172 vdropl(vp); 4173 } 4174 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 4175 /* 4176 * If just the root vnode is busy, and if its refcount 4177 * is equal to `rootrefs', then go ahead and kill it. 4178 */ 4179 VI_LOCK(rootvp); 4180 KASSERT(busy > 0, ("vflush: not busy")); 4181 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 4182 ("vflush: usecount %d < rootrefs %d", 4183 rootvp->v_usecount, rootrefs)); 4184 if (busy == 1 && rootvp->v_usecount == rootrefs) { 4185 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 4186 vgone(rootvp); 4187 VOP_UNLOCK(rootvp); 4188 busy = 0; 4189 } else 4190 VI_UNLOCK(rootvp); 4191 } 4192 if (busy) { 4193 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 4194 busy); 4195 return (EBUSY); 4196 } 4197 for (; rootrefs > 0; rootrefs--) 4198 vrele(rootvp); 4199 return (0); 4200 } 4201 4202 /* 4203 * Recycle an unused vnode to the front of the free list. 4204 */ 4205 int 4206 vrecycle(struct vnode *vp) 4207 { 4208 int recycled; 4209 4210 VI_LOCK(vp); 4211 recycled = vrecyclel(vp); 4212 VI_UNLOCK(vp); 4213 return (recycled); 4214 } 4215 4216 /* 4217 * vrecycle, with the vp interlock held. 4218 */ 4219 int 4220 vrecyclel(struct vnode *vp) 4221 { 4222 int recycled; 4223 4224 ASSERT_VOP_ELOCKED(vp, __func__); 4225 ASSERT_VI_LOCKED(vp, __func__); 4226 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4227 recycled = 0; 4228 if (vp->v_usecount == 0) { 4229 recycled = 1; 4230 vgonel(vp); 4231 } 4232 return (recycled); 4233 } 4234 4235 /* 4236 * Eliminate all activity associated with a vnode 4237 * in preparation for reuse. 4238 */ 4239 void 4240 vgone(struct vnode *vp) 4241 { 4242 VI_LOCK(vp); 4243 vgonel(vp); 4244 VI_UNLOCK(vp); 4245 } 4246 4247 /* 4248 * Notify upper mounts about reclaimed or unlinked vnode. 4249 */ 4250 void 4251 vfs_notify_upper(struct vnode *vp, enum vfs_notify_upper_type event) 4252 { 4253 struct mount *mp; 4254 struct mount_upper_node *ump; 4255 4256 mp = atomic_load_ptr(&vp->v_mount); 4257 if (mp == NULL) 4258 return; 4259 if (TAILQ_EMPTY(&mp->mnt_notify)) 4260 return; 4261 4262 MNT_ILOCK(mp); 4263 mp->mnt_upper_pending++; 4264 KASSERT(mp->mnt_upper_pending > 0, 4265 ("%s: mnt_upper_pending %d", __func__, mp->mnt_upper_pending)); 4266 TAILQ_FOREACH(ump, &mp->mnt_notify, mnt_upper_link) { 4267 MNT_IUNLOCK(mp); 4268 switch (event) { 4269 case VFS_NOTIFY_UPPER_RECLAIM: 4270 VFS_RECLAIM_LOWERVP(ump->mp, vp); 4271 break; 4272 case VFS_NOTIFY_UPPER_UNLINK: 4273 VFS_UNLINK_LOWERVP(ump->mp, vp); 4274 break; 4275 } 4276 MNT_ILOCK(mp); 4277 } 4278 mp->mnt_upper_pending--; 4279 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && 4280 mp->mnt_upper_pending == 0) { 4281 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; 4282 wakeup(&mp->mnt_uppers); 4283 } 4284 MNT_IUNLOCK(mp); 4285 } 4286 4287 /* 4288 * vgone, with the vp interlock held. 4289 */ 4290 static void 4291 vgonel(struct vnode *vp) 4292 { 4293 struct thread *td; 4294 struct mount *mp; 4295 vm_object_t object; 4296 bool active, doinginact, oweinact; 4297 4298 ASSERT_VOP_ELOCKED(vp, "vgonel"); 4299 ASSERT_VI_LOCKED(vp, "vgonel"); 4300 VNASSERT(vp->v_holdcnt, vp, 4301 ("vgonel: vp %p has no reference.", vp)); 4302 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4303 td = curthread; 4304 4305 /* 4306 * Don't vgonel if we're already doomed. 4307 */ 4308 if (VN_IS_DOOMED(vp)) { 4309 VNPASS(vn_get_state(vp) == VSTATE_DESTROYING || \ 4310 vn_get_state(vp) == VSTATE_DEAD, vp); 4311 return; 4312 } 4313 /* 4314 * Paired with freevnode. 4315 */ 4316 vn_seqc_write_begin_locked(vp); 4317 vunlazy_gone(vp); 4318 vn_irflag_set_locked(vp, VIRF_DOOMED); 4319 vn_set_state(vp, VSTATE_DESTROYING); 4320 4321 /* 4322 * Check to see if the vnode is in use. If so, we have to 4323 * call VOP_CLOSE() and VOP_INACTIVE(). 4324 * 4325 * It could be that VOP_INACTIVE() requested reclamation, in 4326 * which case we should avoid recursion, so check 4327 * VI_DOINGINACT. This is not precise but good enough. 4328 */ 4329 active = vp->v_usecount > 0; 4330 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4331 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; 4332 4333 /* 4334 * If we need to do inactive VI_OWEINACT will be set. 4335 */ 4336 if (vp->v_iflag & VI_DEFINACT) { 4337 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 4338 vp->v_iflag &= ~VI_DEFINACT; 4339 vdropl(vp); 4340 } else { 4341 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 4342 VI_UNLOCK(vp); 4343 } 4344 cache_purge_vgone(vp); 4345 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 4346 4347 /* 4348 * If purging an active vnode, it must be closed and 4349 * deactivated before being reclaimed. 4350 */ 4351 if (active) 4352 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 4353 if (!doinginact) { 4354 do { 4355 if (oweinact || active) { 4356 VI_LOCK(vp); 4357 vinactivef(vp); 4358 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4359 VI_UNLOCK(vp); 4360 } 4361 } while (oweinact); 4362 } 4363 if (vp->v_type == VSOCK) 4364 vfs_unp_reclaim(vp); 4365 4366 /* 4367 * Clean out any buffers associated with the vnode. 4368 * If the flush fails, just toss the buffers. 4369 */ 4370 mp = NULL; 4371 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 4372 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 4373 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 4374 while (vinvalbuf(vp, 0, 0, 0) != 0) 4375 ; 4376 } 4377 4378 BO_LOCK(&vp->v_bufobj); 4379 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 4380 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 4381 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 4382 vp->v_bufobj.bo_clean.bv_cnt == 0, 4383 ("vp %p bufobj not invalidated", vp)); 4384 4385 /* 4386 * For VMIO bufobj, BO_DEAD is set later, or in 4387 * vm_object_terminate() after the object's page queue is 4388 * flushed. 4389 */ 4390 object = vp->v_bufobj.bo_object; 4391 if (object == NULL) 4392 vp->v_bufobj.bo_flag |= BO_DEAD; 4393 BO_UNLOCK(&vp->v_bufobj); 4394 4395 /* 4396 * Handle the VM part. Tmpfs handles v_object on its own (the 4397 * OBJT_VNODE check). Nullfs or other bypassing filesystems 4398 * should not touch the object borrowed from the lower vnode 4399 * (the handle check). 4400 */ 4401 if (object != NULL && object->type == OBJT_VNODE && 4402 object->handle == vp) 4403 vnode_destroy_vobject(vp); 4404 4405 /* 4406 * Reclaim the vnode. 4407 */ 4408 if (VOP_RECLAIM(vp)) 4409 panic("vgone: cannot reclaim"); 4410 if (mp != NULL) 4411 vn_finished_secondary_write(mp); 4412 VNASSERT(vp->v_object == NULL, vp, 4413 ("vop_reclaim left v_object vp=%p", vp)); 4414 /* 4415 * Clear the advisory locks and wake up waiting threads. 4416 */ 4417 if (vp->v_lockf != NULL) { 4418 (void)VOP_ADVLOCKPURGE(vp); 4419 vp->v_lockf = NULL; 4420 } 4421 /* 4422 * Delete from old mount point vnode list. 4423 */ 4424 if (vp->v_mount == NULL) { 4425 VI_LOCK(vp); 4426 } else { 4427 delmntque(vp); 4428 ASSERT_VI_LOCKED(vp, "vgonel 2"); 4429 } 4430 /* 4431 * Done with purge, reset to the standard lock and invalidate 4432 * the vnode. 4433 */ 4434 vp->v_vnlock = &vp->v_lock; 4435 vp->v_op = &dead_vnodeops; 4436 vp->v_type = VBAD; 4437 vn_set_state(vp, VSTATE_DEAD); 4438 } 4439 4440 /* 4441 * Print out a description of a vnode. 4442 */ 4443 static const char *const vtypename[] = { 4444 [VNON] = "VNON", 4445 [VREG] = "VREG", 4446 [VDIR] = "VDIR", 4447 [VBLK] = "VBLK", 4448 [VCHR] = "VCHR", 4449 [VLNK] = "VLNK", 4450 [VSOCK] = "VSOCK", 4451 [VFIFO] = "VFIFO", 4452 [VBAD] = "VBAD", 4453 [VMARKER] = "VMARKER", 4454 }; 4455 _Static_assert(nitems(vtypename) == VLASTTYPE + 1, 4456 "vnode type name not added to vtypename"); 4457 4458 static const char *const vstatename[] = { 4459 [VSTATE_UNINITIALIZED] = "VSTATE_UNINITIALIZED", 4460 [VSTATE_CONSTRUCTED] = "VSTATE_CONSTRUCTED", 4461 [VSTATE_DESTROYING] = "VSTATE_DESTROYING", 4462 [VSTATE_DEAD] = "VSTATE_DEAD", 4463 }; 4464 _Static_assert(nitems(vstatename) == VLASTSTATE + 1, 4465 "vnode state name not added to vstatename"); 4466 4467 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 4468 "new hold count flag not added to vn_printf"); 4469 4470 void 4471 vn_printf(struct vnode *vp, const char *fmt, ...) 4472 { 4473 va_list ap; 4474 char buf[256], buf2[16]; 4475 u_long flags; 4476 u_int holdcnt; 4477 short irflag; 4478 4479 va_start(ap, fmt); 4480 vprintf(fmt, ap); 4481 va_end(ap); 4482 printf("%p: ", (void *)vp); 4483 printf("type %s state %s op %p\n", vtypename[vp->v_type], 4484 vstatename[vp->v_state], vp->v_op); 4485 holdcnt = atomic_load_int(&vp->v_holdcnt); 4486 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 4487 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 4488 vp->v_seqc_users); 4489 switch (vp->v_type) { 4490 case VDIR: 4491 printf(" mountedhere %p\n", vp->v_mountedhere); 4492 break; 4493 case VCHR: 4494 printf(" rdev %p\n", vp->v_rdev); 4495 break; 4496 case VSOCK: 4497 printf(" socket %p\n", vp->v_unpcb); 4498 break; 4499 case VFIFO: 4500 printf(" fifoinfo %p\n", vp->v_fifoinfo); 4501 break; 4502 default: 4503 printf("\n"); 4504 break; 4505 } 4506 buf[0] = '\0'; 4507 buf[1] = '\0'; 4508 if (holdcnt & VHOLD_NO_SMR) 4509 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 4510 printf(" hold count flags (%s)\n", buf + 1); 4511 4512 buf[0] = '\0'; 4513 buf[1] = '\0'; 4514 irflag = vn_irflag_read(vp); 4515 if (irflag & VIRF_DOOMED) 4516 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 4517 if (irflag & VIRF_PGREAD) 4518 strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); 4519 if (irflag & VIRF_MOUNTPOINT) 4520 strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf)); 4521 if (irflag & VIRF_TEXT_REF) 4522 strlcat(buf, "|VIRF_TEXT_REF", sizeof(buf)); 4523 flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT | VIRF_TEXT_REF); 4524 if (flags != 0) { 4525 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 4526 strlcat(buf, buf2, sizeof(buf)); 4527 } 4528 if (vp->v_vflag & VV_ROOT) 4529 strlcat(buf, "|VV_ROOT", sizeof(buf)); 4530 if (vp->v_vflag & VV_ISTTY) 4531 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4532 if (vp->v_vflag & VV_NOSYNC) 4533 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4534 if (vp->v_vflag & VV_ETERNALDEV) 4535 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4536 if (vp->v_vflag & VV_CACHEDLABEL) 4537 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4538 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4539 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4540 if (vp->v_vflag & VV_COPYONWRITE) 4541 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4542 if (vp->v_vflag & VV_SYSTEM) 4543 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4544 if (vp->v_vflag & VV_PROCDEP) 4545 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4546 if (vp->v_vflag & VV_DELETED) 4547 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4548 if (vp->v_vflag & VV_MD) 4549 strlcat(buf, "|VV_MD", sizeof(buf)); 4550 if (vp->v_vflag & VV_FORCEINSMQ) 4551 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4552 if (vp->v_vflag & VV_READLINK) 4553 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4554 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4555 VV_CACHEDLABEL | VV_VMSIZEVNLOCK | VV_COPYONWRITE | VV_SYSTEM | 4556 VV_PROCDEP | VV_DELETED | VV_MD | VV_FORCEINSMQ | VV_READLINK); 4557 if (flags != 0) { 4558 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4559 strlcat(buf, buf2, sizeof(buf)); 4560 } 4561 if (vp->v_iflag & VI_MOUNT) 4562 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4563 if (vp->v_iflag & VI_DOINGINACT) 4564 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4565 if (vp->v_iflag & VI_OWEINACT) 4566 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4567 if (vp->v_iflag & VI_DEFINACT) 4568 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4569 if (vp->v_iflag & VI_FOPENING) 4570 strlcat(buf, "|VI_FOPENING", sizeof(buf)); 4571 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOINGINACT | 4572 VI_OWEINACT | VI_DEFINACT | VI_FOPENING); 4573 if (flags != 0) { 4574 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4575 strlcat(buf, buf2, sizeof(buf)); 4576 } 4577 if (vp->v_mflag & VMP_LAZYLIST) 4578 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4579 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4580 if (flags != 0) { 4581 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4582 strlcat(buf, buf2, sizeof(buf)); 4583 } 4584 printf(" flags (%s)", buf + 1); 4585 if (mtx_owned(VI_MTX(vp))) 4586 printf(" VI_LOCKed"); 4587 printf("\n"); 4588 if (vp->v_object != NULL) 4589 printf(" v_object %p ref %d pages %d " 4590 "cleanbuf %d dirtybuf %d\n", 4591 vp->v_object, vp->v_object->ref_count, 4592 vp->v_object->resident_page_count, 4593 vp->v_bufobj.bo_clean.bv_cnt, 4594 vp->v_bufobj.bo_dirty.bv_cnt); 4595 printf(" "); 4596 lockmgr_printinfo(vp->v_vnlock); 4597 if (vp->v_data != NULL) 4598 VOP_PRINT(vp); 4599 } 4600 4601 #ifdef DDB 4602 /* 4603 * List all of the locked vnodes in the system. 4604 * Called when debugging the kernel. 4605 */ 4606 DB_SHOW_COMMAND_FLAGS(lockedvnods, lockedvnodes, DB_CMD_MEMSAFE) 4607 { 4608 struct mount *mp; 4609 struct vnode *vp; 4610 4611 /* 4612 * Note: because this is DDB, we can't obey the locking semantics 4613 * for these structures, which means we could catch an inconsistent 4614 * state and dereference a nasty pointer. Not much to be done 4615 * about that. 4616 */ 4617 db_printf("Locked vnodes\n"); 4618 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4619 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4620 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4621 vn_printf(vp, "vnode "); 4622 } 4623 } 4624 } 4625 4626 /* 4627 * Show details about the given vnode. 4628 */ 4629 DB_SHOW_COMMAND(vnode, db_show_vnode) 4630 { 4631 struct vnode *vp; 4632 4633 if (!have_addr) 4634 return; 4635 vp = (struct vnode *)addr; 4636 vn_printf(vp, "vnode "); 4637 } 4638 4639 /* 4640 * Show details about the given mount point. 4641 */ 4642 DB_SHOW_COMMAND(mount, db_show_mount) 4643 { 4644 struct mount *mp; 4645 struct vfsopt *opt; 4646 struct statfs *sp; 4647 struct vnode *vp; 4648 char buf[512]; 4649 uint64_t mflags; 4650 u_int flags; 4651 4652 if (!have_addr) { 4653 /* No address given, print short info about all mount points. */ 4654 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4655 db_printf("%p %s on %s (%s)\n", mp, 4656 mp->mnt_stat.f_mntfromname, 4657 mp->mnt_stat.f_mntonname, 4658 mp->mnt_stat.f_fstypename); 4659 if (db_pager_quit) 4660 break; 4661 } 4662 db_printf("\nMore info: show mount <addr>\n"); 4663 return; 4664 } 4665 4666 mp = (struct mount *)addr; 4667 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4668 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4669 4670 buf[0] = '\0'; 4671 mflags = mp->mnt_flag; 4672 #define MNT_FLAG(flag) do { \ 4673 if (mflags & (flag)) { \ 4674 if (buf[0] != '\0') \ 4675 strlcat(buf, ", ", sizeof(buf)); \ 4676 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4677 mflags &= ~(flag); \ 4678 } \ 4679 } while (0) 4680 MNT_FLAG(MNT_RDONLY); 4681 MNT_FLAG(MNT_SYNCHRONOUS); 4682 MNT_FLAG(MNT_NOEXEC); 4683 MNT_FLAG(MNT_NOSUID); 4684 MNT_FLAG(MNT_NFS4ACLS); 4685 MNT_FLAG(MNT_UNION); 4686 MNT_FLAG(MNT_ASYNC); 4687 MNT_FLAG(MNT_SUIDDIR); 4688 MNT_FLAG(MNT_SOFTDEP); 4689 MNT_FLAG(MNT_NOSYMFOLLOW); 4690 MNT_FLAG(MNT_GJOURNAL); 4691 MNT_FLAG(MNT_MULTILABEL); 4692 MNT_FLAG(MNT_ACLS); 4693 MNT_FLAG(MNT_NOATIME); 4694 MNT_FLAG(MNT_NOCLUSTERR); 4695 MNT_FLAG(MNT_NOCLUSTERW); 4696 MNT_FLAG(MNT_SUJ); 4697 MNT_FLAG(MNT_EXRDONLY); 4698 MNT_FLAG(MNT_EXPORTED); 4699 MNT_FLAG(MNT_DEFEXPORTED); 4700 MNT_FLAG(MNT_EXPORTANON); 4701 MNT_FLAG(MNT_EXKERB); 4702 MNT_FLAG(MNT_EXPUBLIC); 4703 MNT_FLAG(MNT_LOCAL); 4704 MNT_FLAG(MNT_QUOTA); 4705 MNT_FLAG(MNT_ROOTFS); 4706 MNT_FLAG(MNT_USER); 4707 MNT_FLAG(MNT_IGNORE); 4708 MNT_FLAG(MNT_UPDATE); 4709 MNT_FLAG(MNT_DELEXPORT); 4710 MNT_FLAG(MNT_RELOAD); 4711 MNT_FLAG(MNT_FORCE); 4712 MNT_FLAG(MNT_SNAPSHOT); 4713 MNT_FLAG(MNT_BYFSID); 4714 #undef MNT_FLAG 4715 if (mflags != 0) { 4716 if (buf[0] != '\0') 4717 strlcat(buf, ", ", sizeof(buf)); 4718 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4719 "0x%016jx", mflags); 4720 } 4721 db_printf(" mnt_flag = %s\n", buf); 4722 4723 buf[0] = '\0'; 4724 flags = mp->mnt_kern_flag; 4725 #define MNT_KERN_FLAG(flag) do { \ 4726 if (flags & (flag)) { \ 4727 if (buf[0] != '\0') \ 4728 strlcat(buf, ", ", sizeof(buf)); \ 4729 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4730 flags &= ~(flag); \ 4731 } \ 4732 } while (0) 4733 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4734 MNT_KERN_FLAG(MNTK_ASYNC); 4735 MNT_KERN_FLAG(MNTK_SOFTDEP); 4736 MNT_KERN_FLAG(MNTK_NOMSYNC); 4737 MNT_KERN_FLAG(MNTK_DRAINING); 4738 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4739 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4740 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4741 MNT_KERN_FLAG(MNTK_NO_IOPF); 4742 MNT_KERN_FLAG(MNTK_RECURSE); 4743 MNT_KERN_FLAG(MNTK_UPPER_WAITER); 4744 MNT_KERN_FLAG(MNTK_UNLOCKED_INSMNTQUE); 4745 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4746 MNT_KERN_FLAG(MNTK_VMSETSIZE_BUG); 4747 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4748 MNT_KERN_FLAG(MNTK_TASKQUEUE_WAITER); 4749 MNT_KERN_FLAG(MNTK_NOASYNC); 4750 MNT_KERN_FLAG(MNTK_UNMOUNT); 4751 MNT_KERN_FLAG(MNTK_MWAIT); 4752 MNT_KERN_FLAG(MNTK_SUSPEND); 4753 MNT_KERN_FLAG(MNTK_SUSPEND2); 4754 MNT_KERN_FLAG(MNTK_SUSPENDED); 4755 MNT_KERN_FLAG(MNTK_NULL_NOCACHE); 4756 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4757 #undef MNT_KERN_FLAG 4758 if (flags != 0) { 4759 if (buf[0] != '\0') 4760 strlcat(buf, ", ", sizeof(buf)); 4761 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4762 "0x%08x", flags); 4763 } 4764 db_printf(" mnt_kern_flag = %s\n", buf); 4765 4766 db_printf(" mnt_opt = "); 4767 opt = TAILQ_FIRST(mp->mnt_opt); 4768 if (opt != NULL) { 4769 db_printf("%s", opt->name); 4770 opt = TAILQ_NEXT(opt, link); 4771 while (opt != NULL) { 4772 db_printf(", %s", opt->name); 4773 opt = TAILQ_NEXT(opt, link); 4774 } 4775 } 4776 db_printf("\n"); 4777 4778 sp = &mp->mnt_stat; 4779 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4780 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4781 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4782 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4783 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4784 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4785 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4786 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4787 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4788 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4789 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4790 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4791 4792 db_printf(" mnt_cred = { uid=%u ruid=%u", 4793 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4794 if (jailed(mp->mnt_cred)) 4795 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4796 db_printf(" }\n"); 4797 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4798 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4799 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4800 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4801 db_printf(" mnt_lazyvnodelistsize = %d\n", 4802 mp->mnt_lazyvnodelistsize); 4803 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4804 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4805 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4806 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4807 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4808 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4809 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4810 db_printf(" mnt_secondary_accwrites = %d\n", 4811 mp->mnt_secondary_accwrites); 4812 db_printf(" mnt_gjprovider = %s\n", 4813 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4814 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4815 4816 db_printf("\n\nList of active vnodes\n"); 4817 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4818 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4819 vn_printf(vp, "vnode "); 4820 if (db_pager_quit) 4821 break; 4822 } 4823 } 4824 db_printf("\n\nList of inactive vnodes\n"); 4825 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4826 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4827 vn_printf(vp, "vnode "); 4828 if (db_pager_quit) 4829 break; 4830 } 4831 } 4832 } 4833 #endif /* DDB */ 4834 4835 /* 4836 * Fill in a struct xvfsconf based on a struct vfsconf. 4837 */ 4838 static int 4839 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4840 { 4841 struct xvfsconf xvfsp; 4842 4843 bzero(&xvfsp, sizeof(xvfsp)); 4844 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4845 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4846 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4847 xvfsp.vfc_flags = vfsp->vfc_flags; 4848 /* 4849 * These are unused in userland, we keep them 4850 * to not break binary compatibility. 4851 */ 4852 xvfsp.vfc_vfsops = NULL; 4853 xvfsp.vfc_next = NULL; 4854 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4855 } 4856 4857 #ifdef COMPAT_FREEBSD32 4858 struct xvfsconf32 { 4859 uint32_t vfc_vfsops; 4860 char vfc_name[MFSNAMELEN]; 4861 int32_t vfc_typenum; 4862 int32_t vfc_refcount; 4863 int32_t vfc_flags; 4864 uint32_t vfc_next; 4865 }; 4866 4867 static int 4868 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4869 { 4870 struct xvfsconf32 xvfsp; 4871 4872 bzero(&xvfsp, sizeof(xvfsp)); 4873 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4874 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4875 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4876 xvfsp.vfc_flags = vfsp->vfc_flags; 4877 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4878 } 4879 #endif 4880 4881 /* 4882 * Top level filesystem related information gathering. 4883 */ 4884 static int 4885 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4886 { 4887 struct vfsconf *vfsp; 4888 int error; 4889 4890 error = 0; 4891 vfsconf_slock(); 4892 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4893 #ifdef COMPAT_FREEBSD32 4894 if (req->flags & SCTL_MASK32) 4895 error = vfsconf2x32(req, vfsp); 4896 else 4897 #endif 4898 error = vfsconf2x(req, vfsp); 4899 if (error) 4900 break; 4901 } 4902 vfsconf_sunlock(); 4903 return (error); 4904 } 4905 4906 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4907 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4908 "S,xvfsconf", "List of all configured filesystems"); 4909 4910 #ifndef BURN_BRIDGES 4911 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4912 4913 static int 4914 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4915 { 4916 int *name = (int *)arg1 - 1; /* XXX */ 4917 u_int namelen = arg2 + 1; /* XXX */ 4918 struct vfsconf *vfsp; 4919 4920 log(LOG_WARNING, "userland calling deprecated sysctl, " 4921 "please rebuild world\n"); 4922 4923 #if 1 || defined(COMPAT_PRELITE2) 4924 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4925 if (namelen == 1) 4926 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4927 #endif 4928 4929 switch (name[1]) { 4930 case VFS_MAXTYPENUM: 4931 if (namelen != 2) 4932 return (ENOTDIR); 4933 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4934 case VFS_CONF: 4935 if (namelen != 3) 4936 return (ENOTDIR); /* overloaded */ 4937 vfsconf_slock(); 4938 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4939 if (vfsp->vfc_typenum == name[2]) 4940 break; 4941 } 4942 vfsconf_sunlock(); 4943 if (vfsp == NULL) 4944 return (EOPNOTSUPP); 4945 #ifdef COMPAT_FREEBSD32 4946 if (req->flags & SCTL_MASK32) 4947 return (vfsconf2x32(req, vfsp)); 4948 else 4949 #endif 4950 return (vfsconf2x(req, vfsp)); 4951 } 4952 return (EOPNOTSUPP); 4953 } 4954 4955 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4956 CTLFLAG_MPSAFE, vfs_sysctl, 4957 "Generic filesystem"); 4958 4959 #if 1 || defined(COMPAT_PRELITE2) 4960 4961 static int 4962 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4963 { 4964 int error; 4965 struct vfsconf *vfsp; 4966 struct ovfsconf ovfs; 4967 4968 vfsconf_slock(); 4969 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4970 bzero(&ovfs, sizeof(ovfs)); 4971 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4972 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4973 ovfs.vfc_index = vfsp->vfc_typenum; 4974 ovfs.vfc_refcount = vfsp->vfc_refcount; 4975 ovfs.vfc_flags = vfsp->vfc_flags; 4976 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4977 if (error != 0) { 4978 vfsconf_sunlock(); 4979 return (error); 4980 } 4981 } 4982 vfsconf_sunlock(); 4983 return (0); 4984 } 4985 4986 #endif /* 1 || COMPAT_PRELITE2 */ 4987 #endif /* !BURN_BRIDGES */ 4988 4989 static void 4990 unmount_or_warn(struct mount *mp) 4991 { 4992 int error; 4993 4994 error = dounmount(mp, MNT_FORCE, curthread); 4995 if (error != 0) { 4996 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4997 if (error == EBUSY) 4998 printf("BUSY)\n"); 4999 else 5000 printf("%d)\n", error); 5001 } 5002 } 5003 5004 /* 5005 * Unmount all filesystems. The list is traversed in reverse order 5006 * of mounting to avoid dependencies. 5007 */ 5008 void 5009 vfs_unmountall(void) 5010 { 5011 struct mount *mp, *tmp; 5012 5013 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 5014 5015 /* 5016 * Since this only runs when rebooting, it is not interlocked. 5017 */ 5018 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 5019 vfs_ref(mp); 5020 5021 /* 5022 * Forcibly unmounting "/dev" before "/" would prevent clean 5023 * unmount of the latter. 5024 */ 5025 if (mp == rootdevmp) 5026 continue; 5027 5028 unmount_or_warn(mp); 5029 } 5030 5031 if (rootdevmp != NULL) 5032 unmount_or_warn(rootdevmp); 5033 } 5034 5035 static void 5036 vfs_deferred_inactive(struct vnode *vp, int lkflags) 5037 { 5038 5039 ASSERT_VI_LOCKED(vp, __func__); 5040 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 5041 if ((vp->v_iflag & VI_OWEINACT) == 0) { 5042 vdropl(vp); 5043 return; 5044 } 5045 if (vn_lock(vp, lkflags) == 0) { 5046 VI_LOCK(vp); 5047 vinactive(vp); 5048 VOP_UNLOCK(vp); 5049 vdropl(vp); 5050 return; 5051 } 5052 vdefer_inactive_unlocked(vp); 5053 } 5054 5055 static int 5056 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 5057 { 5058 5059 return (vp->v_iflag & VI_DEFINACT); 5060 } 5061 5062 static void __noinline 5063 vfs_periodic_inactive(struct mount *mp, int flags) 5064 { 5065 struct vnode *vp, *mvp; 5066 int lkflags; 5067 5068 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 5069 if (flags != MNT_WAIT) 5070 lkflags |= LK_NOWAIT; 5071 5072 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 5073 if ((vp->v_iflag & VI_DEFINACT) == 0) { 5074 VI_UNLOCK(vp); 5075 continue; 5076 } 5077 vp->v_iflag &= ~VI_DEFINACT; 5078 vfs_deferred_inactive(vp, lkflags); 5079 } 5080 } 5081 5082 static inline bool 5083 vfs_want_msync(struct vnode *vp) 5084 { 5085 struct vm_object *obj; 5086 5087 /* 5088 * This test may be performed without any locks held. 5089 * We rely on vm_object's type stability. 5090 */ 5091 if (vp->v_vflag & VV_NOSYNC) 5092 return (false); 5093 obj = vp->v_object; 5094 return (obj != NULL && vm_object_mightbedirty(obj)); 5095 } 5096 5097 static int 5098 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 5099 { 5100 5101 if (vp->v_vflag & VV_NOSYNC) 5102 return (false); 5103 if (vp->v_iflag & VI_DEFINACT) 5104 return (true); 5105 return (vfs_want_msync(vp)); 5106 } 5107 5108 static void __noinline 5109 vfs_periodic_msync_inactive(struct mount *mp, int flags) 5110 { 5111 struct vnode *vp, *mvp; 5112 struct vm_object *obj; 5113 int lkflags, objflags; 5114 bool seen_defer; 5115 5116 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 5117 if (flags != MNT_WAIT) { 5118 lkflags |= LK_NOWAIT; 5119 objflags = OBJPC_NOSYNC; 5120 } else { 5121 objflags = OBJPC_SYNC; 5122 } 5123 5124 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 5125 seen_defer = false; 5126 if (vp->v_iflag & VI_DEFINACT) { 5127 vp->v_iflag &= ~VI_DEFINACT; 5128 seen_defer = true; 5129 } 5130 if (!vfs_want_msync(vp)) { 5131 if (seen_defer) 5132 vfs_deferred_inactive(vp, lkflags); 5133 else 5134 VI_UNLOCK(vp); 5135 continue; 5136 } 5137 if (vget(vp, lkflags) == 0) { 5138 obj = vp->v_object; 5139 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 5140 VM_OBJECT_WLOCK(obj); 5141 vm_object_page_clean(obj, 0, 0, objflags); 5142 VM_OBJECT_WUNLOCK(obj); 5143 } 5144 vput(vp); 5145 if (seen_defer) 5146 vdrop(vp); 5147 } else { 5148 if (seen_defer) 5149 vdefer_inactive_unlocked(vp); 5150 } 5151 } 5152 } 5153 5154 void 5155 vfs_periodic(struct mount *mp, int flags) 5156 { 5157 5158 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 5159 5160 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 5161 vfs_periodic_inactive(mp, flags); 5162 else 5163 vfs_periodic_msync_inactive(mp, flags); 5164 } 5165 5166 static void 5167 destroy_vpollinfo_free(struct vpollinfo *vi) 5168 { 5169 5170 knlist_destroy(&vi->vpi_selinfo.si_note); 5171 mtx_destroy(&vi->vpi_lock); 5172 free(vi, M_VNODEPOLL); 5173 } 5174 5175 static void 5176 destroy_vpollinfo(struct vpollinfo *vi) 5177 { 5178 5179 knlist_clear(&vi->vpi_selinfo.si_note, 1); 5180 seldrain(&vi->vpi_selinfo); 5181 destroy_vpollinfo_free(vi); 5182 } 5183 5184 /* 5185 * Initialize per-vnode helper structure to hold poll-related state. 5186 */ 5187 void 5188 v_addpollinfo(struct vnode *vp) 5189 { 5190 struct vpollinfo *vi; 5191 5192 if (vp->v_pollinfo != NULL) 5193 return; 5194 vi = malloc(sizeof(*vi), M_VNODEPOLL, M_WAITOK | M_ZERO); 5195 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 5196 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 5197 vfs_knlunlock, vfs_knl_assert_lock); 5198 VI_LOCK(vp); 5199 if (vp->v_pollinfo != NULL) { 5200 VI_UNLOCK(vp); 5201 destroy_vpollinfo_free(vi); 5202 return; 5203 } 5204 vp->v_pollinfo = vi; 5205 VI_UNLOCK(vp); 5206 } 5207 5208 /* 5209 * Record a process's interest in events which might happen to 5210 * a vnode. Because poll uses the historic select-style interface 5211 * internally, this routine serves as both the ``check for any 5212 * pending events'' and the ``record my interest in future events'' 5213 * functions. (These are done together, while the lock is held, 5214 * to avoid race conditions.) 5215 */ 5216 int 5217 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 5218 { 5219 5220 v_addpollinfo(vp); 5221 mtx_lock(&vp->v_pollinfo->vpi_lock); 5222 if (vp->v_pollinfo->vpi_revents & events) { 5223 /* 5224 * This leaves events we are not interested 5225 * in available for the other process which 5226 * which presumably had requested them 5227 * (otherwise they would never have been 5228 * recorded). 5229 */ 5230 events &= vp->v_pollinfo->vpi_revents; 5231 vp->v_pollinfo->vpi_revents &= ~events; 5232 5233 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5234 return (events); 5235 } 5236 vp->v_pollinfo->vpi_events |= events; 5237 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 5238 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5239 return (0); 5240 } 5241 5242 /* 5243 * Routine to create and manage a filesystem syncer vnode. 5244 */ 5245 #define sync_close ((int (*)(struct vop_close_args *))nullop) 5246 static int sync_fsync(struct vop_fsync_args *); 5247 static int sync_inactive(struct vop_inactive_args *); 5248 static int sync_reclaim(struct vop_reclaim_args *); 5249 5250 static struct vop_vector sync_vnodeops = { 5251 .vop_bypass = VOP_EOPNOTSUPP, 5252 .vop_close = sync_close, 5253 .vop_fsync = sync_fsync, 5254 .vop_getwritemount = vop_stdgetwritemount, 5255 .vop_inactive = sync_inactive, 5256 .vop_need_inactive = vop_stdneed_inactive, 5257 .vop_reclaim = sync_reclaim, 5258 .vop_lock1 = vop_stdlock, 5259 .vop_unlock = vop_stdunlock, 5260 .vop_islocked = vop_stdislocked, 5261 .vop_fplookup_vexec = VOP_EAGAIN, 5262 .vop_fplookup_symlink = VOP_EAGAIN, 5263 }; 5264 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 5265 5266 /* 5267 * Create a new filesystem syncer vnode for the specified mount point. 5268 */ 5269 void 5270 vfs_allocate_syncvnode(struct mount *mp) 5271 { 5272 struct vnode *vp; 5273 struct bufobj *bo; 5274 static long start, incr, next; 5275 int error; 5276 5277 /* Allocate a new vnode */ 5278 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 5279 if (error != 0) 5280 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 5281 vp->v_type = VNON; 5282 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5283 vp->v_vflag |= VV_FORCEINSMQ; 5284 error = insmntque1(vp, mp); 5285 if (error != 0) 5286 panic("vfs_allocate_syncvnode: insmntque() failed"); 5287 vp->v_vflag &= ~VV_FORCEINSMQ; 5288 vn_set_state(vp, VSTATE_CONSTRUCTED); 5289 VOP_UNLOCK(vp); 5290 /* 5291 * Place the vnode onto the syncer worklist. We attempt to 5292 * scatter them about on the list so that they will go off 5293 * at evenly distributed times even if all the filesystems 5294 * are mounted at once. 5295 */ 5296 next += incr; 5297 if (next == 0 || next > syncer_maxdelay) { 5298 start /= 2; 5299 incr /= 2; 5300 if (start == 0) { 5301 start = syncer_maxdelay / 2; 5302 incr = syncer_maxdelay; 5303 } 5304 next = start; 5305 } 5306 bo = &vp->v_bufobj; 5307 BO_LOCK(bo); 5308 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 5309 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 5310 mtx_lock(&sync_mtx); 5311 sync_vnode_count++; 5312 if (mp->mnt_syncer == NULL) { 5313 mp->mnt_syncer = vp; 5314 vp = NULL; 5315 } 5316 mtx_unlock(&sync_mtx); 5317 BO_UNLOCK(bo); 5318 if (vp != NULL) { 5319 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5320 vgone(vp); 5321 vput(vp); 5322 } 5323 } 5324 5325 void 5326 vfs_deallocate_syncvnode(struct mount *mp) 5327 { 5328 struct vnode *vp; 5329 5330 mtx_lock(&sync_mtx); 5331 vp = mp->mnt_syncer; 5332 if (vp != NULL) 5333 mp->mnt_syncer = NULL; 5334 mtx_unlock(&sync_mtx); 5335 if (vp != NULL) 5336 vrele(vp); 5337 } 5338 5339 /* 5340 * Do a lazy sync of the filesystem. 5341 */ 5342 static int 5343 sync_fsync(struct vop_fsync_args *ap) 5344 { 5345 struct vnode *syncvp = ap->a_vp; 5346 struct mount *mp = syncvp->v_mount; 5347 int error, save; 5348 struct bufobj *bo; 5349 5350 /* 5351 * We only need to do something if this is a lazy evaluation. 5352 */ 5353 if (ap->a_waitfor != MNT_LAZY) 5354 return (0); 5355 5356 /* 5357 * Move ourselves to the back of the sync list. 5358 */ 5359 bo = &syncvp->v_bufobj; 5360 BO_LOCK(bo); 5361 vn_syncer_add_to_worklist(bo, syncdelay); 5362 BO_UNLOCK(bo); 5363 5364 /* 5365 * Walk the list of vnodes pushing all that are dirty and 5366 * not already on the sync list. 5367 */ 5368 if (vfs_busy(mp, MBF_NOWAIT) != 0) 5369 return (0); 5370 VOP_UNLOCK(syncvp); 5371 save = curthread_pflags_set(TDP_SYNCIO); 5372 /* 5373 * The filesystem at hand may be idle with free vnodes stored in the 5374 * batch. Return them instead of letting them stay there indefinitely. 5375 */ 5376 vfs_periodic(mp, MNT_NOWAIT); 5377 error = VFS_SYNC(mp, MNT_LAZY); 5378 curthread_pflags_restore(save); 5379 vn_lock(syncvp, LK_EXCLUSIVE | LK_RETRY); 5380 vfs_unbusy(mp); 5381 return (error); 5382 } 5383 5384 /* 5385 * The syncer vnode is no referenced. 5386 */ 5387 static int 5388 sync_inactive(struct vop_inactive_args *ap) 5389 { 5390 5391 vgone(ap->a_vp); 5392 return (0); 5393 } 5394 5395 /* 5396 * The syncer vnode is no longer needed and is being decommissioned. 5397 * 5398 * Modifications to the worklist must be protected by sync_mtx. 5399 */ 5400 static int 5401 sync_reclaim(struct vop_reclaim_args *ap) 5402 { 5403 struct vnode *vp = ap->a_vp; 5404 struct bufobj *bo; 5405 5406 bo = &vp->v_bufobj; 5407 BO_LOCK(bo); 5408 mtx_lock(&sync_mtx); 5409 if (vp->v_mount->mnt_syncer == vp) 5410 vp->v_mount->mnt_syncer = NULL; 5411 if (bo->bo_flag & BO_ONWORKLST) { 5412 LIST_REMOVE(bo, bo_synclist); 5413 syncer_worklist_len--; 5414 sync_vnode_count--; 5415 bo->bo_flag &= ~BO_ONWORKLST; 5416 } 5417 mtx_unlock(&sync_mtx); 5418 BO_UNLOCK(bo); 5419 5420 return (0); 5421 } 5422 5423 int 5424 vn_need_pageq_flush(struct vnode *vp) 5425 { 5426 struct vm_object *obj; 5427 5428 obj = vp->v_object; 5429 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 5430 vm_object_mightbedirty(obj)); 5431 } 5432 5433 /* 5434 * Check if vnode represents a disk device 5435 */ 5436 bool 5437 vn_isdisk_error(struct vnode *vp, int *errp) 5438 { 5439 int error; 5440 5441 if (vp->v_type != VCHR) { 5442 error = ENOTBLK; 5443 goto out; 5444 } 5445 error = 0; 5446 dev_lock(); 5447 if (vp->v_rdev == NULL) 5448 error = ENXIO; 5449 else if (vp->v_rdev->si_devsw == NULL) 5450 error = ENXIO; 5451 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5452 error = ENOTBLK; 5453 dev_unlock(); 5454 out: 5455 *errp = error; 5456 return (error == 0); 5457 } 5458 5459 bool 5460 vn_isdisk(struct vnode *vp) 5461 { 5462 int error; 5463 5464 return (vn_isdisk_error(vp, &error)); 5465 } 5466 5467 /* 5468 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5469 * the comment above cache_fplookup for details. 5470 */ 5471 int 5472 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5473 { 5474 int error; 5475 5476 VFS_SMR_ASSERT_ENTERED(); 5477 5478 /* Check the owner. */ 5479 if (cred->cr_uid == file_uid) { 5480 if (file_mode & S_IXUSR) 5481 return (0); 5482 goto out_error; 5483 } 5484 5485 /* Otherwise, check the groups (first match) */ 5486 if (groupmember(file_gid, cred)) { 5487 if (file_mode & S_IXGRP) 5488 return (0); 5489 goto out_error; 5490 } 5491 5492 /* Otherwise, check everyone else. */ 5493 if (file_mode & S_IXOTH) 5494 return (0); 5495 out_error: 5496 /* 5497 * Permission check failed, but it is possible denial will get overwritten 5498 * (e.g., when root is traversing through a 700 directory owned by someone 5499 * else). 5500 * 5501 * vaccess() calls priv_check_cred which in turn can descent into MAC 5502 * modules overriding this result. It's quite unclear what semantics 5503 * are allowed for them to operate, thus for safety we don't call them 5504 * from within the SMR section. This also means if any such modules 5505 * are present, we have to let the regular lookup decide. 5506 */ 5507 error = priv_check_cred_vfs_lookup_nomac(cred); 5508 switch (error) { 5509 case 0: 5510 return (0); 5511 case EAGAIN: 5512 /* 5513 * MAC modules present. 5514 */ 5515 return (EAGAIN); 5516 case EPERM: 5517 return (EACCES); 5518 default: 5519 return (error); 5520 } 5521 } 5522 5523 /* 5524 * Common filesystem object access control check routine. Accepts a 5525 * vnode's type, "mode", uid and gid, requested access mode, and credentials. 5526 * Returns 0 on success, or an errno on failure. 5527 */ 5528 int 5529 vaccess(__enum_uint8(vtype) type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5530 accmode_t accmode, struct ucred *cred) 5531 { 5532 accmode_t dac_granted; 5533 accmode_t priv_granted; 5534 5535 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5536 ("invalid bit in accmode")); 5537 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5538 ("VAPPEND without VWRITE")); 5539 5540 /* 5541 * Look for a normal, non-privileged way to access the file/directory 5542 * as requested. If it exists, go with that. 5543 */ 5544 5545 dac_granted = 0; 5546 5547 /* Check the owner. */ 5548 if (cred->cr_uid == file_uid) { 5549 dac_granted |= VADMIN; 5550 if (file_mode & S_IXUSR) 5551 dac_granted |= VEXEC; 5552 if (file_mode & S_IRUSR) 5553 dac_granted |= VREAD; 5554 if (file_mode & S_IWUSR) 5555 dac_granted |= (VWRITE | VAPPEND); 5556 5557 if ((accmode & dac_granted) == accmode) 5558 return (0); 5559 5560 goto privcheck; 5561 } 5562 5563 /* Otherwise, check the groups (first match) */ 5564 if (groupmember(file_gid, cred)) { 5565 if (file_mode & S_IXGRP) 5566 dac_granted |= VEXEC; 5567 if (file_mode & S_IRGRP) 5568 dac_granted |= VREAD; 5569 if (file_mode & S_IWGRP) 5570 dac_granted |= (VWRITE | VAPPEND); 5571 5572 if ((accmode & dac_granted) == accmode) 5573 return (0); 5574 5575 goto privcheck; 5576 } 5577 5578 /* Otherwise, check everyone else. */ 5579 if (file_mode & S_IXOTH) 5580 dac_granted |= VEXEC; 5581 if (file_mode & S_IROTH) 5582 dac_granted |= VREAD; 5583 if (file_mode & S_IWOTH) 5584 dac_granted |= (VWRITE | VAPPEND); 5585 if ((accmode & dac_granted) == accmode) 5586 return (0); 5587 5588 privcheck: 5589 /* 5590 * Build a privilege mask to determine if the set of privileges 5591 * satisfies the requirements when combined with the granted mask 5592 * from above. For each privilege, if the privilege is required, 5593 * bitwise or the request type onto the priv_granted mask. 5594 */ 5595 priv_granted = 0; 5596 5597 if (type == VDIR) { 5598 /* 5599 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5600 * requests, instead of PRIV_VFS_EXEC. 5601 */ 5602 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5603 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5604 priv_granted |= VEXEC; 5605 } else { 5606 /* 5607 * Ensure that at least one execute bit is on. Otherwise, 5608 * a privileged user will always succeed, and we don't want 5609 * this to happen unless the file really is executable. 5610 */ 5611 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5612 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5613 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5614 priv_granted |= VEXEC; 5615 } 5616 5617 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5618 !priv_check_cred(cred, PRIV_VFS_READ)) 5619 priv_granted |= VREAD; 5620 5621 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5622 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5623 priv_granted |= (VWRITE | VAPPEND); 5624 5625 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5626 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5627 priv_granted |= VADMIN; 5628 5629 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5630 return (0); 5631 } 5632 5633 return ((accmode & VADMIN) ? EPERM : EACCES); 5634 } 5635 5636 /* 5637 * Credential check based on process requesting service, and per-attribute 5638 * permissions. 5639 */ 5640 int 5641 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5642 struct thread *td, accmode_t accmode) 5643 { 5644 5645 /* 5646 * Kernel-invoked always succeeds. 5647 */ 5648 if (cred == NOCRED) 5649 return (0); 5650 5651 /* 5652 * Do not allow privileged processes in jail to directly manipulate 5653 * system attributes. 5654 */ 5655 switch (attrnamespace) { 5656 case EXTATTR_NAMESPACE_SYSTEM: 5657 /* Potentially should be: return (EPERM); */ 5658 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5659 case EXTATTR_NAMESPACE_USER: 5660 return (VOP_ACCESS(vp, accmode, cred, td)); 5661 default: 5662 return (EPERM); 5663 } 5664 } 5665 5666 #ifdef DEBUG_VFS_LOCKS 5667 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5668 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5669 "Drop into debugger on lock violation"); 5670 5671 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5672 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5673 0, "Check for interlock across VOPs"); 5674 5675 int vfs_badlock_print = 1; /* Print lock violations. */ 5676 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5677 0, "Print lock violations"); 5678 5679 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5680 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5681 0, "Print vnode details on lock violations"); 5682 5683 #ifdef KDB 5684 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5685 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5686 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5687 #endif 5688 5689 static void 5690 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5691 { 5692 5693 #ifdef KDB 5694 if (vfs_badlock_backtrace) 5695 kdb_backtrace(); 5696 #endif 5697 if (vfs_badlock_vnode) 5698 vn_printf(vp, "vnode "); 5699 if (vfs_badlock_print) 5700 printf("%s: %p %s\n", str, (void *)vp, msg); 5701 if (vfs_badlock_ddb) 5702 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5703 } 5704 5705 void 5706 assert_vi_locked(struct vnode *vp, const char *str) 5707 { 5708 5709 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5710 vfs_badlock("interlock is not locked but should be", str, vp); 5711 } 5712 5713 void 5714 assert_vi_unlocked(struct vnode *vp, const char *str) 5715 { 5716 5717 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5718 vfs_badlock("interlock is locked but should not be", str, vp); 5719 } 5720 5721 void 5722 assert_vop_locked(struct vnode *vp, const char *str) 5723 { 5724 if (KERNEL_PANICKED() || vp == NULL) 5725 return; 5726 5727 #ifdef WITNESS 5728 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && 5729 witness_is_owned(&vp->v_vnlock->lock_object) == -1) 5730 #else 5731 int locked = VOP_ISLOCKED(vp); 5732 if (locked == 0 || locked == LK_EXCLOTHER) 5733 #endif 5734 vfs_badlock("is not locked but should be", str, vp); 5735 } 5736 5737 void 5738 assert_vop_unlocked(struct vnode *vp, const char *str) 5739 { 5740 if (KERNEL_PANICKED() || vp == NULL) 5741 return; 5742 5743 #ifdef WITNESS 5744 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && 5745 witness_is_owned(&vp->v_vnlock->lock_object) == 1) 5746 #else 5747 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5748 #endif 5749 vfs_badlock("is locked but should not be", str, vp); 5750 } 5751 5752 void 5753 assert_vop_elocked(struct vnode *vp, const char *str) 5754 { 5755 if (KERNEL_PANICKED() || vp == NULL) 5756 return; 5757 5758 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5759 vfs_badlock("is not exclusive locked but should be", str, vp); 5760 } 5761 #endif /* DEBUG_VFS_LOCKS */ 5762 5763 void 5764 vop_rename_fail(struct vop_rename_args *ap) 5765 { 5766 5767 if (ap->a_tvp != NULL) 5768 vput(ap->a_tvp); 5769 if (ap->a_tdvp == ap->a_tvp) 5770 vrele(ap->a_tdvp); 5771 else 5772 vput(ap->a_tdvp); 5773 vrele(ap->a_fdvp); 5774 vrele(ap->a_fvp); 5775 } 5776 5777 void 5778 vop_rename_pre(void *ap) 5779 { 5780 struct vop_rename_args *a = ap; 5781 5782 #ifdef DEBUG_VFS_LOCKS 5783 if (a->a_tvp) 5784 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5785 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5786 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5787 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5788 5789 /* Check the source (from). */ 5790 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5791 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5792 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5793 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5794 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5795 5796 /* Check the target. */ 5797 if (a->a_tvp) 5798 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5799 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5800 #endif 5801 /* 5802 * It may be tempting to add vn_seqc_write_begin/end calls here and 5803 * in vop_rename_post but that's not going to work out since some 5804 * filesystems relookup vnodes mid-rename. This is probably a bug. 5805 * 5806 * For now filesystems are expected to do the relevant calls after they 5807 * decide what vnodes to operate on. 5808 */ 5809 if (a->a_tdvp != a->a_fdvp) 5810 vhold(a->a_fdvp); 5811 if (a->a_tvp != a->a_fvp) 5812 vhold(a->a_fvp); 5813 vhold(a->a_tdvp); 5814 if (a->a_tvp) 5815 vhold(a->a_tvp); 5816 } 5817 5818 #ifdef DEBUG_VFS_LOCKS 5819 void 5820 vop_fplookup_vexec_debugpre(void *ap __unused) 5821 { 5822 5823 VFS_SMR_ASSERT_ENTERED(); 5824 } 5825 5826 void 5827 vop_fplookup_vexec_debugpost(void *ap, int rc) 5828 { 5829 struct vop_fplookup_vexec_args *a; 5830 struct vnode *vp; 5831 5832 a = ap; 5833 vp = a->a_vp; 5834 5835 VFS_SMR_ASSERT_ENTERED(); 5836 if (rc == EOPNOTSUPP) 5837 VNPASS(VN_IS_DOOMED(vp), vp); 5838 } 5839 5840 void 5841 vop_fplookup_symlink_debugpre(void *ap __unused) 5842 { 5843 5844 VFS_SMR_ASSERT_ENTERED(); 5845 } 5846 5847 void 5848 vop_fplookup_symlink_debugpost(void *ap __unused, int rc __unused) 5849 { 5850 5851 VFS_SMR_ASSERT_ENTERED(); 5852 } 5853 5854 static void 5855 vop_fsync_debugprepost(struct vnode *vp, const char *name) 5856 { 5857 if (vp->v_type == VCHR) 5858 ; 5859 else if (MNT_EXTENDED_SHARED(vp->v_mount)) 5860 ASSERT_VOP_LOCKED(vp, name); 5861 else 5862 ASSERT_VOP_ELOCKED(vp, name); 5863 } 5864 5865 void 5866 vop_fsync_debugpre(void *a) 5867 { 5868 struct vop_fsync_args *ap; 5869 5870 ap = a; 5871 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5872 } 5873 5874 void 5875 vop_fsync_debugpost(void *a, int rc __unused) 5876 { 5877 struct vop_fsync_args *ap; 5878 5879 ap = a; 5880 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5881 } 5882 5883 void 5884 vop_fdatasync_debugpre(void *a) 5885 { 5886 struct vop_fdatasync_args *ap; 5887 5888 ap = a; 5889 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5890 } 5891 5892 void 5893 vop_fdatasync_debugpost(void *a, int rc __unused) 5894 { 5895 struct vop_fdatasync_args *ap; 5896 5897 ap = a; 5898 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5899 } 5900 5901 void 5902 vop_strategy_debugpre(void *ap) 5903 { 5904 struct vop_strategy_args *a; 5905 struct buf *bp; 5906 5907 a = ap; 5908 bp = a->a_bp; 5909 5910 /* 5911 * Cluster ops lock their component buffers but not the IO container. 5912 */ 5913 if ((bp->b_flags & B_CLUSTER) != 0) 5914 return; 5915 5916 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5917 if (vfs_badlock_print) 5918 printf( 5919 "VOP_STRATEGY: bp is not locked but should be\n"); 5920 if (vfs_badlock_ddb) 5921 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5922 } 5923 } 5924 5925 void 5926 vop_lock_debugpre(void *ap) 5927 { 5928 struct vop_lock1_args *a = ap; 5929 5930 if ((a->a_flags & LK_INTERLOCK) == 0) 5931 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5932 else 5933 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5934 } 5935 5936 void 5937 vop_lock_debugpost(void *ap, int rc) 5938 { 5939 struct vop_lock1_args *a = ap; 5940 5941 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5942 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5943 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5944 } 5945 5946 void 5947 vop_unlock_debugpre(void *ap) 5948 { 5949 struct vop_unlock_args *a = ap; 5950 struct vnode *vp = a->a_vp; 5951 5952 VNPASS(vn_get_state(vp) != VSTATE_UNINITIALIZED, vp); 5953 ASSERT_VOP_LOCKED(vp, "VOP_UNLOCK"); 5954 } 5955 5956 void 5957 vop_need_inactive_debugpre(void *ap) 5958 { 5959 struct vop_need_inactive_args *a = ap; 5960 5961 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5962 } 5963 5964 void 5965 vop_need_inactive_debugpost(void *ap, int rc) 5966 { 5967 struct vop_need_inactive_args *a = ap; 5968 5969 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5970 } 5971 #endif 5972 5973 void 5974 vop_create_pre(void *ap) 5975 { 5976 struct vop_create_args *a; 5977 struct vnode *dvp; 5978 5979 a = ap; 5980 dvp = a->a_dvp; 5981 vn_seqc_write_begin(dvp); 5982 } 5983 5984 void 5985 vop_create_post(void *ap, int rc) 5986 { 5987 struct vop_create_args *a; 5988 struct vnode *dvp; 5989 5990 a = ap; 5991 dvp = a->a_dvp; 5992 vn_seqc_write_end(dvp); 5993 if (!rc) 5994 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5995 } 5996 5997 void 5998 vop_whiteout_pre(void *ap) 5999 { 6000 struct vop_whiteout_args *a; 6001 struct vnode *dvp; 6002 6003 a = ap; 6004 dvp = a->a_dvp; 6005 vn_seqc_write_begin(dvp); 6006 } 6007 6008 void 6009 vop_whiteout_post(void *ap, int rc) 6010 { 6011 struct vop_whiteout_args *a; 6012 struct vnode *dvp; 6013 6014 a = ap; 6015 dvp = a->a_dvp; 6016 vn_seqc_write_end(dvp); 6017 } 6018 6019 void 6020 vop_deleteextattr_pre(void *ap) 6021 { 6022 struct vop_deleteextattr_args *a; 6023 struct vnode *vp; 6024 6025 a = ap; 6026 vp = a->a_vp; 6027 vn_seqc_write_begin(vp); 6028 } 6029 6030 void 6031 vop_deleteextattr_post(void *ap, int rc) 6032 { 6033 struct vop_deleteextattr_args *a; 6034 struct vnode *vp; 6035 6036 a = ap; 6037 vp = a->a_vp; 6038 vn_seqc_write_end(vp); 6039 if (!rc) 6040 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 6041 } 6042 6043 void 6044 vop_link_pre(void *ap) 6045 { 6046 struct vop_link_args *a; 6047 struct vnode *vp, *tdvp; 6048 6049 a = ap; 6050 vp = a->a_vp; 6051 tdvp = a->a_tdvp; 6052 vn_seqc_write_begin(vp); 6053 vn_seqc_write_begin(tdvp); 6054 } 6055 6056 void 6057 vop_link_post(void *ap, int rc) 6058 { 6059 struct vop_link_args *a; 6060 struct vnode *vp, *tdvp; 6061 6062 a = ap; 6063 vp = a->a_vp; 6064 tdvp = a->a_tdvp; 6065 vn_seqc_write_end(vp); 6066 vn_seqc_write_end(tdvp); 6067 if (!rc) { 6068 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 6069 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 6070 } 6071 } 6072 6073 void 6074 vop_mkdir_pre(void *ap) 6075 { 6076 struct vop_mkdir_args *a; 6077 struct vnode *dvp; 6078 6079 a = ap; 6080 dvp = a->a_dvp; 6081 vn_seqc_write_begin(dvp); 6082 } 6083 6084 void 6085 vop_mkdir_post(void *ap, int rc) 6086 { 6087 struct vop_mkdir_args *a; 6088 struct vnode *dvp; 6089 6090 a = ap; 6091 dvp = a->a_dvp; 6092 vn_seqc_write_end(dvp); 6093 if (!rc) 6094 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 6095 } 6096 6097 #ifdef DEBUG_VFS_LOCKS 6098 void 6099 vop_mkdir_debugpost(void *ap, int rc) 6100 { 6101 struct vop_mkdir_args *a; 6102 6103 a = ap; 6104 if (!rc) 6105 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); 6106 } 6107 #endif 6108 6109 void 6110 vop_mknod_pre(void *ap) 6111 { 6112 struct vop_mknod_args *a; 6113 struct vnode *dvp; 6114 6115 a = ap; 6116 dvp = a->a_dvp; 6117 vn_seqc_write_begin(dvp); 6118 } 6119 6120 void 6121 vop_mknod_post(void *ap, int rc) 6122 { 6123 struct vop_mknod_args *a; 6124 struct vnode *dvp; 6125 6126 a = ap; 6127 dvp = a->a_dvp; 6128 vn_seqc_write_end(dvp); 6129 if (!rc) 6130 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6131 } 6132 6133 void 6134 vop_reclaim_post(void *ap, int rc) 6135 { 6136 struct vop_reclaim_args *a; 6137 struct vnode *vp; 6138 6139 a = ap; 6140 vp = a->a_vp; 6141 ASSERT_VOP_IN_SEQC(vp); 6142 if (!rc) 6143 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 6144 } 6145 6146 void 6147 vop_remove_pre(void *ap) 6148 { 6149 struct vop_remove_args *a; 6150 struct vnode *dvp, *vp; 6151 6152 a = ap; 6153 dvp = a->a_dvp; 6154 vp = a->a_vp; 6155 vn_seqc_write_begin(dvp); 6156 vn_seqc_write_begin(vp); 6157 } 6158 6159 void 6160 vop_remove_post(void *ap, int rc) 6161 { 6162 struct vop_remove_args *a; 6163 struct vnode *dvp, *vp; 6164 6165 a = ap; 6166 dvp = a->a_dvp; 6167 vp = a->a_vp; 6168 vn_seqc_write_end(dvp); 6169 vn_seqc_write_end(vp); 6170 if (!rc) { 6171 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6172 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 6173 } 6174 } 6175 6176 void 6177 vop_rename_post(void *ap, int rc) 6178 { 6179 struct vop_rename_args *a = ap; 6180 long hint; 6181 6182 if (!rc) { 6183 hint = NOTE_WRITE; 6184 if (a->a_fdvp == a->a_tdvp) { 6185 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 6186 hint |= NOTE_LINK; 6187 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 6188 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 6189 } else { 6190 hint |= NOTE_EXTEND; 6191 if (a->a_fvp->v_type == VDIR) 6192 hint |= NOTE_LINK; 6193 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 6194 6195 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 6196 a->a_tvp->v_type == VDIR) 6197 hint &= ~NOTE_LINK; 6198 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 6199 } 6200 6201 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 6202 if (a->a_tvp) 6203 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 6204 } 6205 if (a->a_tdvp != a->a_fdvp) 6206 vdrop(a->a_fdvp); 6207 if (a->a_tvp != a->a_fvp) 6208 vdrop(a->a_fvp); 6209 vdrop(a->a_tdvp); 6210 if (a->a_tvp) 6211 vdrop(a->a_tvp); 6212 } 6213 6214 void 6215 vop_rmdir_pre(void *ap) 6216 { 6217 struct vop_rmdir_args *a; 6218 struct vnode *dvp, *vp; 6219 6220 a = ap; 6221 dvp = a->a_dvp; 6222 vp = a->a_vp; 6223 vn_seqc_write_begin(dvp); 6224 vn_seqc_write_begin(vp); 6225 } 6226 6227 void 6228 vop_rmdir_post(void *ap, int rc) 6229 { 6230 struct vop_rmdir_args *a; 6231 struct vnode *dvp, *vp; 6232 6233 a = ap; 6234 dvp = a->a_dvp; 6235 vp = a->a_vp; 6236 vn_seqc_write_end(dvp); 6237 vn_seqc_write_end(vp); 6238 if (!rc) { 6239 vp->v_vflag |= VV_UNLINKED; 6240 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 6241 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 6242 } 6243 } 6244 6245 void 6246 vop_setattr_pre(void *ap) 6247 { 6248 struct vop_setattr_args *a; 6249 struct vnode *vp; 6250 6251 a = ap; 6252 vp = a->a_vp; 6253 vn_seqc_write_begin(vp); 6254 } 6255 6256 void 6257 vop_setattr_post(void *ap, int rc) 6258 { 6259 struct vop_setattr_args *a; 6260 struct vnode *vp; 6261 6262 a = ap; 6263 vp = a->a_vp; 6264 vn_seqc_write_end(vp); 6265 if (!rc) 6266 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6267 } 6268 6269 void 6270 vop_setacl_pre(void *ap) 6271 { 6272 struct vop_setacl_args *a; 6273 struct vnode *vp; 6274 6275 a = ap; 6276 vp = a->a_vp; 6277 vn_seqc_write_begin(vp); 6278 } 6279 6280 void 6281 vop_setacl_post(void *ap, int rc __unused) 6282 { 6283 struct vop_setacl_args *a; 6284 struct vnode *vp; 6285 6286 a = ap; 6287 vp = a->a_vp; 6288 vn_seqc_write_end(vp); 6289 } 6290 6291 void 6292 vop_setextattr_pre(void *ap) 6293 { 6294 struct vop_setextattr_args *a; 6295 struct vnode *vp; 6296 6297 a = ap; 6298 vp = a->a_vp; 6299 vn_seqc_write_begin(vp); 6300 } 6301 6302 void 6303 vop_setextattr_post(void *ap, int rc) 6304 { 6305 struct vop_setextattr_args *a; 6306 struct vnode *vp; 6307 6308 a = ap; 6309 vp = a->a_vp; 6310 vn_seqc_write_end(vp); 6311 if (!rc) 6312 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6313 } 6314 6315 void 6316 vop_symlink_pre(void *ap) 6317 { 6318 struct vop_symlink_args *a; 6319 struct vnode *dvp; 6320 6321 a = ap; 6322 dvp = a->a_dvp; 6323 vn_seqc_write_begin(dvp); 6324 } 6325 6326 void 6327 vop_symlink_post(void *ap, int rc) 6328 { 6329 struct vop_symlink_args *a; 6330 struct vnode *dvp; 6331 6332 a = ap; 6333 dvp = a->a_dvp; 6334 vn_seqc_write_end(dvp); 6335 if (!rc) 6336 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6337 } 6338 6339 void 6340 vop_open_post(void *ap, int rc) 6341 { 6342 struct vop_open_args *a = ap; 6343 6344 if (!rc) 6345 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 6346 } 6347 6348 void 6349 vop_close_post(void *ap, int rc) 6350 { 6351 struct vop_close_args *a = ap; 6352 6353 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 6354 !VN_IS_DOOMED(a->a_vp))) { 6355 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 6356 NOTE_CLOSE_WRITE : NOTE_CLOSE); 6357 } 6358 } 6359 6360 void 6361 vop_read_post(void *ap, int rc) 6362 { 6363 struct vop_read_args *a = ap; 6364 6365 if (!rc) 6366 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6367 } 6368 6369 void 6370 vop_read_pgcache_post(void *ap, int rc) 6371 { 6372 struct vop_read_pgcache_args *a = ap; 6373 6374 if (!rc) 6375 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); 6376 } 6377 6378 void 6379 vop_readdir_post(void *ap, int rc) 6380 { 6381 struct vop_readdir_args *a = ap; 6382 6383 if (!rc) 6384 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6385 } 6386 6387 static struct knlist fs_knlist; 6388 6389 static void 6390 vfs_event_init(void *arg) 6391 { 6392 knlist_init_mtx(&fs_knlist, NULL); 6393 } 6394 /* XXX - correct order? */ 6395 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 6396 6397 void 6398 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 6399 { 6400 6401 KNOTE_UNLOCKED(&fs_knlist, event); 6402 } 6403 6404 static int filt_fsattach(struct knote *kn); 6405 static void filt_fsdetach(struct knote *kn); 6406 static int filt_fsevent(struct knote *kn, long hint); 6407 6408 struct filterops fs_filtops = { 6409 .f_isfd = 0, 6410 .f_attach = filt_fsattach, 6411 .f_detach = filt_fsdetach, 6412 .f_event = filt_fsevent 6413 }; 6414 6415 static int 6416 filt_fsattach(struct knote *kn) 6417 { 6418 6419 kn->kn_flags |= EV_CLEAR; 6420 knlist_add(&fs_knlist, kn, 0); 6421 return (0); 6422 } 6423 6424 static void 6425 filt_fsdetach(struct knote *kn) 6426 { 6427 6428 knlist_remove(&fs_knlist, kn, 0); 6429 } 6430 6431 static int 6432 filt_fsevent(struct knote *kn, long hint) 6433 { 6434 6435 kn->kn_fflags |= kn->kn_sfflags & hint; 6436 6437 return (kn->kn_fflags != 0); 6438 } 6439 6440 static int 6441 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 6442 { 6443 struct vfsidctl vc; 6444 int error; 6445 struct mount *mp; 6446 6447 error = SYSCTL_IN(req, &vc, sizeof(vc)); 6448 if (error) 6449 return (error); 6450 if (vc.vc_vers != VFS_CTL_VERS1) 6451 return (EINVAL); 6452 mp = vfs_getvfs(&vc.vc_fsid); 6453 if (mp == NULL) 6454 return (ENOENT); 6455 /* ensure that a specific sysctl goes to the right filesystem. */ 6456 if (strcmp(vc.vc_fstypename, "*") != 0 && 6457 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 6458 vfs_rel(mp); 6459 return (EINVAL); 6460 } 6461 VCTLTOREQ(&vc, req); 6462 error = VFS_SYSCTL(mp, vc.vc_op, req); 6463 vfs_rel(mp); 6464 return (error); 6465 } 6466 6467 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 6468 NULL, 0, sysctl_vfs_ctl, "", 6469 "Sysctl by fsid"); 6470 6471 /* 6472 * Function to initialize a va_filerev field sensibly. 6473 * XXX: Wouldn't a random number make a lot more sense ?? 6474 */ 6475 u_quad_t 6476 init_va_filerev(void) 6477 { 6478 struct bintime bt; 6479 6480 getbinuptime(&bt); 6481 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 6482 } 6483 6484 static int filt_vfsread(struct knote *kn, long hint); 6485 static int filt_vfswrite(struct knote *kn, long hint); 6486 static int filt_vfsvnode(struct knote *kn, long hint); 6487 static void filt_vfsdetach(struct knote *kn); 6488 static struct filterops vfsread_filtops = { 6489 .f_isfd = 1, 6490 .f_detach = filt_vfsdetach, 6491 .f_event = filt_vfsread 6492 }; 6493 static struct filterops vfswrite_filtops = { 6494 .f_isfd = 1, 6495 .f_detach = filt_vfsdetach, 6496 .f_event = filt_vfswrite 6497 }; 6498 static struct filterops vfsvnode_filtops = { 6499 .f_isfd = 1, 6500 .f_detach = filt_vfsdetach, 6501 .f_event = filt_vfsvnode 6502 }; 6503 6504 static void 6505 vfs_knllock(void *arg) 6506 { 6507 struct vnode *vp = arg; 6508 6509 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 6510 } 6511 6512 static void 6513 vfs_knlunlock(void *arg) 6514 { 6515 struct vnode *vp = arg; 6516 6517 VOP_UNLOCK(vp); 6518 } 6519 6520 static void 6521 vfs_knl_assert_lock(void *arg, int what) 6522 { 6523 #ifdef DEBUG_VFS_LOCKS 6524 struct vnode *vp = arg; 6525 6526 if (what == LA_LOCKED) 6527 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6528 else 6529 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6530 #endif 6531 } 6532 6533 int 6534 vfs_kqfilter(struct vop_kqfilter_args *ap) 6535 { 6536 struct vnode *vp = ap->a_vp; 6537 struct knote *kn = ap->a_kn; 6538 struct knlist *knl; 6539 6540 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ && 6541 kn->kn_filter != EVFILT_WRITE), 6542 ("READ/WRITE filter on a FIFO leaked through")); 6543 switch (kn->kn_filter) { 6544 case EVFILT_READ: 6545 kn->kn_fop = &vfsread_filtops; 6546 break; 6547 case EVFILT_WRITE: 6548 kn->kn_fop = &vfswrite_filtops; 6549 break; 6550 case EVFILT_VNODE: 6551 kn->kn_fop = &vfsvnode_filtops; 6552 break; 6553 default: 6554 return (EINVAL); 6555 } 6556 6557 kn->kn_hook = (caddr_t)vp; 6558 6559 v_addpollinfo(vp); 6560 if (vp->v_pollinfo == NULL) 6561 return (ENOMEM); 6562 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6563 vhold(vp); 6564 knlist_add(knl, kn, 0); 6565 6566 return (0); 6567 } 6568 6569 /* 6570 * Detach knote from vnode 6571 */ 6572 static void 6573 filt_vfsdetach(struct knote *kn) 6574 { 6575 struct vnode *vp = (struct vnode *)kn->kn_hook; 6576 6577 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6578 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6579 vdrop(vp); 6580 } 6581 6582 /*ARGSUSED*/ 6583 static int 6584 filt_vfsread(struct knote *kn, long hint) 6585 { 6586 struct vnode *vp = (struct vnode *)kn->kn_hook; 6587 off_t size; 6588 int res; 6589 6590 /* 6591 * filesystem is gone, so set the EOF flag and schedule 6592 * the knote for deletion. 6593 */ 6594 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6595 VI_LOCK(vp); 6596 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6597 VI_UNLOCK(vp); 6598 return (1); 6599 } 6600 6601 if (vn_getsize_locked(vp, &size, curthread->td_ucred) != 0) 6602 return (0); 6603 6604 VI_LOCK(vp); 6605 kn->kn_data = size - kn->kn_fp->f_offset; 6606 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6607 VI_UNLOCK(vp); 6608 return (res); 6609 } 6610 6611 /*ARGSUSED*/ 6612 static int 6613 filt_vfswrite(struct knote *kn, long hint) 6614 { 6615 struct vnode *vp = (struct vnode *)kn->kn_hook; 6616 6617 VI_LOCK(vp); 6618 6619 /* 6620 * filesystem is gone, so set the EOF flag and schedule 6621 * the knote for deletion. 6622 */ 6623 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6624 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6625 6626 kn->kn_data = 0; 6627 VI_UNLOCK(vp); 6628 return (1); 6629 } 6630 6631 static int 6632 filt_vfsvnode(struct knote *kn, long hint) 6633 { 6634 struct vnode *vp = (struct vnode *)kn->kn_hook; 6635 int res; 6636 6637 VI_LOCK(vp); 6638 if (kn->kn_sfflags & hint) 6639 kn->kn_fflags |= hint; 6640 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6641 kn->kn_flags |= EV_EOF; 6642 VI_UNLOCK(vp); 6643 return (1); 6644 } 6645 res = (kn->kn_fflags != 0); 6646 VI_UNLOCK(vp); 6647 return (res); 6648 } 6649 6650 int 6651 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6652 { 6653 int error; 6654 6655 if (dp->d_reclen > ap->a_uio->uio_resid) 6656 return (ENAMETOOLONG); 6657 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6658 if (error) { 6659 if (ap->a_ncookies != NULL) { 6660 if (ap->a_cookies != NULL) 6661 free(ap->a_cookies, M_TEMP); 6662 ap->a_cookies = NULL; 6663 *ap->a_ncookies = 0; 6664 } 6665 return (error); 6666 } 6667 if (ap->a_ncookies == NULL) 6668 return (0); 6669 6670 KASSERT(ap->a_cookies, 6671 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6672 6673 *ap->a_cookies = realloc(*ap->a_cookies, 6674 (*ap->a_ncookies + 1) * sizeof(uint64_t), M_TEMP, M_WAITOK | M_ZERO); 6675 (*ap->a_cookies)[*ap->a_ncookies] = off; 6676 *ap->a_ncookies += 1; 6677 return (0); 6678 } 6679 6680 /* 6681 * The purpose of this routine is to remove granularity from accmode_t, 6682 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6683 * VADMIN and VAPPEND. 6684 * 6685 * If it returns 0, the caller is supposed to continue with the usual 6686 * access checks using 'accmode' as modified by this routine. If it 6687 * returns nonzero value, the caller is supposed to return that value 6688 * as errno. 6689 * 6690 * Note that after this routine runs, accmode may be zero. 6691 */ 6692 int 6693 vfs_unixify_accmode(accmode_t *accmode) 6694 { 6695 /* 6696 * There is no way to specify explicit "deny" rule using 6697 * file mode or POSIX.1e ACLs. 6698 */ 6699 if (*accmode & VEXPLICIT_DENY) { 6700 *accmode = 0; 6701 return (0); 6702 } 6703 6704 /* 6705 * None of these can be translated into usual access bits. 6706 * Also, the common case for NFSv4 ACLs is to not contain 6707 * either of these bits. Caller should check for VWRITE 6708 * on the containing directory instead. 6709 */ 6710 if (*accmode & (VDELETE_CHILD | VDELETE)) 6711 return (EPERM); 6712 6713 if (*accmode & VADMIN_PERMS) { 6714 *accmode &= ~VADMIN_PERMS; 6715 *accmode |= VADMIN; 6716 } 6717 6718 /* 6719 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6720 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6721 */ 6722 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6723 6724 return (0); 6725 } 6726 6727 /* 6728 * Clear out a doomed vnode (if any) and replace it with a new one as long 6729 * as the fs is not being unmounted. Return the root vnode to the caller. 6730 */ 6731 static int __noinline 6732 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6733 { 6734 struct vnode *vp; 6735 int error; 6736 6737 restart: 6738 if (mp->mnt_rootvnode != NULL) { 6739 MNT_ILOCK(mp); 6740 vp = mp->mnt_rootvnode; 6741 if (vp != NULL) { 6742 if (!VN_IS_DOOMED(vp)) { 6743 vrefact(vp); 6744 MNT_IUNLOCK(mp); 6745 error = vn_lock(vp, flags); 6746 if (error == 0) { 6747 *vpp = vp; 6748 return (0); 6749 } 6750 vrele(vp); 6751 goto restart; 6752 } 6753 /* 6754 * Clear the old one. 6755 */ 6756 mp->mnt_rootvnode = NULL; 6757 } 6758 MNT_IUNLOCK(mp); 6759 if (vp != NULL) { 6760 vfs_op_barrier_wait(mp); 6761 vrele(vp); 6762 } 6763 } 6764 error = VFS_CACHEDROOT(mp, flags, vpp); 6765 if (error != 0) 6766 return (error); 6767 if (mp->mnt_vfs_ops == 0) { 6768 MNT_ILOCK(mp); 6769 if (mp->mnt_vfs_ops != 0) { 6770 MNT_IUNLOCK(mp); 6771 return (0); 6772 } 6773 if (mp->mnt_rootvnode == NULL) { 6774 vrefact(*vpp); 6775 mp->mnt_rootvnode = *vpp; 6776 } else { 6777 if (mp->mnt_rootvnode != *vpp) { 6778 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6779 panic("%s: mismatch between vnode returned " 6780 " by VFS_CACHEDROOT and the one cached " 6781 " (%p != %p)", 6782 __func__, *vpp, mp->mnt_rootvnode); 6783 } 6784 } 6785 } 6786 MNT_IUNLOCK(mp); 6787 } 6788 return (0); 6789 } 6790 6791 int 6792 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6793 { 6794 struct mount_pcpu *mpcpu; 6795 struct vnode *vp; 6796 int error; 6797 6798 if (!vfs_op_thread_enter(mp, mpcpu)) 6799 return (vfs_cache_root_fallback(mp, flags, vpp)); 6800 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6801 if (vp == NULL || VN_IS_DOOMED(vp)) { 6802 vfs_op_thread_exit(mp, mpcpu); 6803 return (vfs_cache_root_fallback(mp, flags, vpp)); 6804 } 6805 vrefact(vp); 6806 vfs_op_thread_exit(mp, mpcpu); 6807 error = vn_lock(vp, flags); 6808 if (error != 0) { 6809 vrele(vp); 6810 return (vfs_cache_root_fallback(mp, flags, vpp)); 6811 } 6812 *vpp = vp; 6813 return (0); 6814 } 6815 6816 struct vnode * 6817 vfs_cache_root_clear(struct mount *mp) 6818 { 6819 struct vnode *vp; 6820 6821 /* 6822 * ops > 0 guarantees there is nobody who can see this vnode 6823 */ 6824 MPASS(mp->mnt_vfs_ops > 0); 6825 vp = mp->mnt_rootvnode; 6826 if (vp != NULL) 6827 vn_seqc_write_begin(vp); 6828 mp->mnt_rootvnode = NULL; 6829 return (vp); 6830 } 6831 6832 void 6833 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6834 { 6835 6836 MPASS(mp->mnt_vfs_ops > 0); 6837 vrefact(vp); 6838 mp->mnt_rootvnode = vp; 6839 } 6840 6841 /* 6842 * These are helper functions for filesystems to traverse all 6843 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6844 * 6845 * This interface replaces MNT_VNODE_FOREACH. 6846 */ 6847 6848 struct vnode * 6849 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6850 { 6851 struct vnode *vp; 6852 6853 maybe_yield(); 6854 MNT_ILOCK(mp); 6855 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6856 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6857 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6858 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6859 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6860 continue; 6861 VI_LOCK(vp); 6862 if (VN_IS_DOOMED(vp)) { 6863 VI_UNLOCK(vp); 6864 continue; 6865 } 6866 break; 6867 } 6868 if (vp == NULL) { 6869 __mnt_vnode_markerfree_all(mvp, mp); 6870 /* MNT_IUNLOCK(mp); -- done in above function */ 6871 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6872 return (NULL); 6873 } 6874 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6875 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6876 MNT_IUNLOCK(mp); 6877 return (vp); 6878 } 6879 6880 struct vnode * 6881 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6882 { 6883 struct vnode *vp; 6884 6885 *mvp = vn_alloc_marker(mp); 6886 MNT_ILOCK(mp); 6887 MNT_REF(mp); 6888 6889 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6890 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6891 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6892 continue; 6893 VI_LOCK(vp); 6894 if (VN_IS_DOOMED(vp)) { 6895 VI_UNLOCK(vp); 6896 continue; 6897 } 6898 break; 6899 } 6900 if (vp == NULL) { 6901 MNT_REL(mp); 6902 MNT_IUNLOCK(mp); 6903 vn_free_marker(*mvp); 6904 *mvp = NULL; 6905 return (NULL); 6906 } 6907 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6908 MNT_IUNLOCK(mp); 6909 return (vp); 6910 } 6911 6912 void 6913 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6914 { 6915 6916 if (*mvp == NULL) { 6917 MNT_IUNLOCK(mp); 6918 return; 6919 } 6920 6921 mtx_assert(MNT_MTX(mp), MA_OWNED); 6922 6923 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6924 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6925 MNT_REL(mp); 6926 MNT_IUNLOCK(mp); 6927 vn_free_marker(*mvp); 6928 *mvp = NULL; 6929 } 6930 6931 /* 6932 * These are helper functions for filesystems to traverse their 6933 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6934 */ 6935 static void 6936 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6937 { 6938 6939 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6940 6941 MNT_ILOCK(mp); 6942 MNT_REL(mp); 6943 MNT_IUNLOCK(mp); 6944 vn_free_marker(*mvp); 6945 *mvp = NULL; 6946 } 6947 6948 /* 6949 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6950 * conventional lock order during mnt_vnode_next_lazy iteration. 6951 * 6952 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6953 * The list lock is dropped and reacquired. On success, both locks are held. 6954 * On failure, the mount vnode list lock is held but the vnode interlock is 6955 * not, and the procedure may have yielded. 6956 */ 6957 static bool 6958 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6959 struct vnode *vp) 6960 { 6961 6962 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6963 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6964 ("%s: bad marker", __func__)); 6965 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6966 ("%s: inappropriate vnode", __func__)); 6967 ASSERT_VI_UNLOCKED(vp, __func__); 6968 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6969 6970 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6971 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6972 6973 /* 6974 * Note we may be racing against vdrop which transitioned the hold 6975 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 6976 * if we are the only user after we get the interlock we will just 6977 * vdrop. 6978 */ 6979 vhold(vp); 6980 mtx_unlock(&mp->mnt_listmtx); 6981 VI_LOCK(vp); 6982 if (VN_IS_DOOMED(vp)) { 6983 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 6984 goto out_lost; 6985 } 6986 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 6987 /* 6988 * There is nothing to do if we are the last user. 6989 */ 6990 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 6991 goto out_lost; 6992 mtx_lock(&mp->mnt_listmtx); 6993 return (true); 6994 out_lost: 6995 vdropl(vp); 6996 maybe_yield(); 6997 mtx_lock(&mp->mnt_listmtx); 6998 return (false); 6999 } 7000 7001 static struct vnode * 7002 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 7003 void *cbarg) 7004 { 7005 struct vnode *vp; 7006 7007 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 7008 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 7009 restart: 7010 vp = TAILQ_NEXT(*mvp, v_lazylist); 7011 while (vp != NULL) { 7012 if (vp->v_type == VMARKER) { 7013 vp = TAILQ_NEXT(vp, v_lazylist); 7014 continue; 7015 } 7016 /* 7017 * See if we want to process the vnode. Note we may encounter a 7018 * long string of vnodes we don't care about and hog the list 7019 * as a result. Check for it and requeue the marker. 7020 */ 7021 VNPASS(!VN_IS_DOOMED(vp), vp); 7022 if (!cb(vp, cbarg)) { 7023 if (!should_yield()) { 7024 vp = TAILQ_NEXT(vp, v_lazylist); 7025 continue; 7026 } 7027 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 7028 v_lazylist); 7029 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 7030 v_lazylist); 7031 mtx_unlock(&mp->mnt_listmtx); 7032 kern_yield(PRI_USER); 7033 mtx_lock(&mp->mnt_listmtx); 7034 goto restart; 7035 } 7036 /* 7037 * Try-lock because this is the wrong lock order. 7038 */ 7039 if (!VI_TRYLOCK(vp) && 7040 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 7041 goto restart; 7042 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 7043 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 7044 ("alien vnode on the lazy list %p %p", vp, mp)); 7045 VNPASS(vp->v_mount == mp, vp); 7046 VNPASS(!VN_IS_DOOMED(vp), vp); 7047 break; 7048 } 7049 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 7050 7051 /* Check if we are done */ 7052 if (vp == NULL) { 7053 mtx_unlock(&mp->mnt_listmtx); 7054 mnt_vnode_markerfree_lazy(mvp, mp); 7055 return (NULL); 7056 } 7057 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 7058 mtx_unlock(&mp->mnt_listmtx); 7059 ASSERT_VI_LOCKED(vp, "lazy iter"); 7060 return (vp); 7061 } 7062 7063 struct vnode * 7064 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 7065 void *cbarg) 7066 { 7067 7068 maybe_yield(); 7069 mtx_lock(&mp->mnt_listmtx); 7070 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 7071 } 7072 7073 struct vnode * 7074 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 7075 void *cbarg) 7076 { 7077 struct vnode *vp; 7078 7079 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 7080 return (NULL); 7081 7082 *mvp = vn_alloc_marker(mp); 7083 MNT_ILOCK(mp); 7084 MNT_REF(mp); 7085 MNT_IUNLOCK(mp); 7086 7087 mtx_lock(&mp->mnt_listmtx); 7088 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 7089 if (vp == NULL) { 7090 mtx_unlock(&mp->mnt_listmtx); 7091 mnt_vnode_markerfree_lazy(mvp, mp); 7092 return (NULL); 7093 } 7094 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 7095 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 7096 } 7097 7098 void 7099 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 7100 { 7101 7102 if (*mvp == NULL) 7103 return; 7104 7105 mtx_lock(&mp->mnt_listmtx); 7106 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 7107 mtx_unlock(&mp->mnt_listmtx); 7108 mnt_vnode_markerfree_lazy(mvp, mp); 7109 } 7110 7111 int 7112 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 7113 { 7114 7115 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 7116 cnp->cn_flags &= ~NOEXECCHECK; 7117 return (0); 7118 } 7119 7120 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread)); 7121 } 7122 7123 /* 7124 * Do not use this variant unless you have means other than the hold count 7125 * to prevent the vnode from getting freed. 7126 */ 7127 void 7128 vn_seqc_write_begin_locked(struct vnode *vp) 7129 { 7130 7131 ASSERT_VI_LOCKED(vp, __func__); 7132 VNPASS(vp->v_holdcnt > 0, vp); 7133 VNPASS(vp->v_seqc_users >= 0, vp); 7134 vp->v_seqc_users++; 7135 if (vp->v_seqc_users == 1) 7136 seqc_sleepable_write_begin(&vp->v_seqc); 7137 } 7138 7139 void 7140 vn_seqc_write_begin(struct vnode *vp) 7141 { 7142 7143 VI_LOCK(vp); 7144 vn_seqc_write_begin_locked(vp); 7145 VI_UNLOCK(vp); 7146 } 7147 7148 void 7149 vn_seqc_write_end_locked(struct vnode *vp) 7150 { 7151 7152 ASSERT_VI_LOCKED(vp, __func__); 7153 VNPASS(vp->v_seqc_users > 0, vp); 7154 vp->v_seqc_users--; 7155 if (vp->v_seqc_users == 0) 7156 seqc_sleepable_write_end(&vp->v_seqc); 7157 } 7158 7159 void 7160 vn_seqc_write_end(struct vnode *vp) 7161 { 7162 7163 VI_LOCK(vp); 7164 vn_seqc_write_end_locked(vp); 7165 VI_UNLOCK(vp); 7166 } 7167 7168 /* 7169 * Special case handling for allocating and freeing vnodes. 7170 * 7171 * The counter remains unchanged on free so that a doomed vnode will 7172 * keep testing as in modify as long as it is accessible with SMR. 7173 */ 7174 static void 7175 vn_seqc_init(struct vnode *vp) 7176 { 7177 7178 vp->v_seqc = 0; 7179 vp->v_seqc_users = 0; 7180 } 7181 7182 static void 7183 vn_seqc_write_end_free(struct vnode *vp) 7184 { 7185 7186 VNPASS(seqc_in_modify(vp->v_seqc), vp); 7187 VNPASS(vp->v_seqc_users == 1, vp); 7188 } 7189 7190 void 7191 vn_irflag_set_locked(struct vnode *vp, short toset) 7192 { 7193 short flags; 7194 7195 ASSERT_VI_LOCKED(vp, __func__); 7196 flags = vn_irflag_read(vp); 7197 VNASSERT((flags & toset) == 0, vp, 7198 ("%s: some of the passed flags already set (have %d, passed %d)\n", 7199 __func__, flags, toset)); 7200 atomic_store_short(&vp->v_irflag, flags | toset); 7201 } 7202 7203 void 7204 vn_irflag_set(struct vnode *vp, short toset) 7205 { 7206 7207 VI_LOCK(vp); 7208 vn_irflag_set_locked(vp, toset); 7209 VI_UNLOCK(vp); 7210 } 7211 7212 void 7213 vn_irflag_set_cond_locked(struct vnode *vp, short toset) 7214 { 7215 short flags; 7216 7217 ASSERT_VI_LOCKED(vp, __func__); 7218 flags = vn_irflag_read(vp); 7219 atomic_store_short(&vp->v_irflag, flags | toset); 7220 } 7221 7222 void 7223 vn_irflag_set_cond(struct vnode *vp, short toset) 7224 { 7225 7226 VI_LOCK(vp); 7227 vn_irflag_set_cond_locked(vp, toset); 7228 VI_UNLOCK(vp); 7229 } 7230 7231 void 7232 vn_irflag_unset_locked(struct vnode *vp, short tounset) 7233 { 7234 short flags; 7235 7236 ASSERT_VI_LOCKED(vp, __func__); 7237 flags = vn_irflag_read(vp); 7238 VNASSERT((flags & tounset) == tounset, vp, 7239 ("%s: some of the passed flags not set (have %d, passed %d)\n", 7240 __func__, flags, tounset)); 7241 atomic_store_short(&vp->v_irflag, flags & ~tounset); 7242 } 7243 7244 void 7245 vn_irflag_unset(struct vnode *vp, short tounset) 7246 { 7247 7248 VI_LOCK(vp); 7249 vn_irflag_unset_locked(vp, tounset); 7250 VI_UNLOCK(vp); 7251 } 7252 7253 int 7254 vn_getsize_locked(struct vnode *vp, off_t *size, struct ucred *cred) 7255 { 7256 struct vattr vattr; 7257 int error; 7258 7259 ASSERT_VOP_LOCKED(vp, __func__); 7260 error = VOP_GETATTR(vp, &vattr, cred); 7261 if (__predict_true(error == 0)) { 7262 if (vattr.va_size <= OFF_MAX) 7263 *size = vattr.va_size; 7264 else 7265 error = EFBIG; 7266 } 7267 return (error); 7268 } 7269 7270 int 7271 vn_getsize(struct vnode *vp, off_t *size, struct ucred *cred) 7272 { 7273 int error; 7274 7275 VOP_LOCK(vp, LK_SHARED); 7276 error = vn_getsize_locked(vp, size, cred); 7277 VOP_UNLOCK(vp); 7278 return (error); 7279 } 7280 7281 #ifdef INVARIANTS 7282 void 7283 vn_set_state_validate(struct vnode *vp, __enum_uint8(vstate) state) 7284 { 7285 7286 switch (vp->v_state) { 7287 case VSTATE_UNINITIALIZED: 7288 switch (state) { 7289 case VSTATE_CONSTRUCTED: 7290 case VSTATE_DESTROYING: 7291 return; 7292 default: 7293 break; 7294 } 7295 break; 7296 case VSTATE_CONSTRUCTED: 7297 ASSERT_VOP_ELOCKED(vp, __func__); 7298 switch (state) { 7299 case VSTATE_DESTROYING: 7300 return; 7301 default: 7302 break; 7303 } 7304 break; 7305 case VSTATE_DESTROYING: 7306 ASSERT_VOP_ELOCKED(vp, __func__); 7307 switch (state) { 7308 case VSTATE_DEAD: 7309 return; 7310 default: 7311 break; 7312 } 7313 break; 7314 case VSTATE_DEAD: 7315 switch (state) { 7316 case VSTATE_UNINITIALIZED: 7317 return; 7318 default: 7319 break; 7320 } 7321 break; 7322 } 7323 7324 vn_printf(vp, "invalid state transition %d -> %d\n", vp->v_state, state); 7325 panic("invalid state transition %d -> %d\n", vp->v_state, state); 7326 } 7327 #endif 7328