1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 #include "opt_ddb.h" 45 #include "opt_watchdog.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/asan.h> 50 #include <sys/bio.h> 51 #include <sys/buf.h> 52 #include <sys/capsicum.h> 53 #include <sys/condvar.h> 54 #include <sys/conf.h> 55 #include <sys/counter.h> 56 #include <sys/dirent.h> 57 #include <sys/event.h> 58 #include <sys/eventhandler.h> 59 #include <sys/extattr.h> 60 #include <sys/file.h> 61 #include <sys/fcntl.h> 62 #include <sys/jail.h> 63 #include <sys/kdb.h> 64 #include <sys/kernel.h> 65 #include <sys/kthread.h> 66 #include <sys/ktr.h> 67 #include <sys/limits.h> 68 #include <sys/lockf.h> 69 #include <sys/malloc.h> 70 #include <sys/mount.h> 71 #include <sys/namei.h> 72 #include <sys/pctrie.h> 73 #include <sys/priv.h> 74 #include <sys/reboot.h> 75 #include <sys/refcount.h> 76 #include <sys/rwlock.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smr.h> 80 #include <sys/smp.h> 81 #include <sys/stat.h> 82 #include <sys/sysctl.h> 83 #include <sys/syslog.h> 84 #include <sys/vmmeter.h> 85 #include <sys/vnode.h> 86 #include <sys/watchdog.h> 87 88 #include <machine/stdarg.h> 89 90 #include <security/mac/mac_framework.h> 91 92 #include <vm/vm.h> 93 #include <vm/vm_object.h> 94 #include <vm/vm_extern.h> 95 #include <vm/pmap.h> 96 #include <vm/vm_map.h> 97 #include <vm/vm_page.h> 98 #include <vm/vm_kern.h> 99 #include <vm/uma.h> 100 101 #if defined(DEBUG_VFS_LOCKS) && (!defined(INVARIANTS) || !defined(WITNESS)) 102 #error DEBUG_VFS_LOCKS requires INVARIANTS and WITNESS 103 #endif 104 105 #ifdef DDB 106 #include <ddb/ddb.h> 107 #endif 108 109 static void delmntque(struct vnode *vp); 110 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 111 int slpflag, int slptimeo); 112 static void syncer_shutdown(void *arg, int howto); 113 static int vtryrecycle(struct vnode *vp); 114 static void v_init_counters(struct vnode *); 115 static void vn_seqc_init(struct vnode *); 116 static void vn_seqc_write_end_free(struct vnode *vp); 117 static void vgonel(struct vnode *); 118 static bool vhold_recycle_free(struct vnode *); 119 static void vdropl_recycle(struct vnode *vp); 120 static void vdrop_recycle(struct vnode *vp); 121 static void vfs_knllock(void *arg); 122 static void vfs_knlunlock(void *arg); 123 static void vfs_knl_assert_lock(void *arg, int what); 124 static void destroy_vpollinfo(struct vpollinfo *vi); 125 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 126 daddr_t startlbn, daddr_t endlbn); 127 static void vnlru_recalc(void); 128 129 static SYSCTL_NODE(_vfs, OID_AUTO, vnode, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 130 "vnode configuration and statistics"); 131 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, param, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 132 "vnode configuration"); 133 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 134 "vnode statistics"); 135 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, vnlru, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 136 "vnode recycling"); 137 138 /* 139 * Number of vnodes in existence. Increased whenever getnewvnode() 140 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 141 */ 142 static u_long __exclusive_cache_line numvnodes; 143 144 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 145 "Number of vnodes in existence (legacy)"); 146 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, count, CTLFLAG_RD, &numvnodes, 0, 147 "Number of vnodes in existence"); 148 149 static counter_u64_t vnodes_created; 150 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 151 "Number of vnodes created by getnewvnode (legacy)"); 152 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, created, CTLFLAG_RD, &vnodes_created, 153 "Number of vnodes created by getnewvnode"); 154 155 /* 156 * Conversion tables for conversion from vnode types to inode formats 157 * and back. 158 */ 159 __enum_uint8(vtype) iftovt_tab[16] = { 160 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 161 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 162 }; 163 int vttoif_tab[10] = { 164 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 165 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 166 }; 167 168 /* 169 * List of allocates vnodes in the system. 170 */ 171 static TAILQ_HEAD(freelst, vnode) vnode_list; 172 static struct vnode *vnode_list_free_marker; 173 static struct vnode *vnode_list_reclaim_marker; 174 175 /* 176 * "Free" vnode target. Free vnodes are rarely completely free, but are 177 * just ones that are cheap to recycle. Usually they are for files which 178 * have been stat'd but not read; these usually have inode and namecache 179 * data attached to them. This target is the preferred minimum size of a 180 * sub-cache consisting mostly of such files. The system balances the size 181 * of this sub-cache with its complement to try to prevent either from 182 * thrashing while the other is relatively inactive. The targets express 183 * a preference for the best balance. 184 * 185 * "Above" this target there are 2 further targets (watermarks) related 186 * to recyling of free vnodes. In the best-operating case, the cache is 187 * exactly full, the free list has size between vlowat and vhiwat above the 188 * free target, and recycling from it and normal use maintains this state. 189 * Sometimes the free list is below vlowat or even empty, but this state 190 * is even better for immediate use provided the cache is not full. 191 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 192 * ones) to reach one of these states. The watermarks are currently hard- 193 * coded as 4% and 9% of the available space higher. These and the default 194 * of 25% for wantfreevnodes are too large if the memory size is large. 195 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 196 * whenever vnlru_proc() becomes active. 197 */ 198 static long wantfreevnodes; 199 static long __exclusive_cache_line freevnodes; 200 static long freevnodes_old; 201 202 static counter_u64_t recycles_count; 203 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 204 "Number of vnodes recycled to meet vnode cache targets (legacy)"); 205 SYSCTL_COUNTER_U64(_vfs_vnode_vnlru, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 206 "Number of vnodes recycled to meet vnode cache targets"); 207 208 static counter_u64_t recycles_free_count; 209 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 210 "Number of free vnodes recycled to meet vnode cache targets (legacy)"); 211 SYSCTL_COUNTER_U64(_vfs_vnode_vnlru, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 212 "Number of free vnodes recycled to meet vnode cache targets"); 213 214 static counter_u64_t vnode_skipped_requeues; 215 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, skipped_requeues, CTLFLAG_RD, &vnode_skipped_requeues, 216 "Number of times LRU requeue was skipped due to lock contention"); 217 218 static u_long deferred_inact; 219 SYSCTL_ULONG(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, 220 &deferred_inact, 0, "Number of times inactive processing was deferred"); 221 222 /* To keep more than one thread at a time from running vfs_getnewfsid */ 223 static struct mtx mntid_mtx; 224 225 /* 226 * Lock for any access to the following: 227 * vnode_list 228 * numvnodes 229 * freevnodes 230 */ 231 static struct mtx __exclusive_cache_line vnode_list_mtx; 232 233 /* Publicly exported FS */ 234 struct nfs_public nfs_pub; 235 236 static uma_zone_t buf_trie_zone; 237 static smr_t buf_trie_smr; 238 239 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 240 static uma_zone_t vnode_zone; 241 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll"); 242 243 __read_frequently smr_t vfs_smr; 244 245 /* 246 * The workitem queue. 247 * 248 * It is useful to delay writes of file data and filesystem metadata 249 * for tens of seconds so that quickly created and deleted files need 250 * not waste disk bandwidth being created and removed. To realize this, 251 * we append vnodes to a "workitem" queue. When running with a soft 252 * updates implementation, most pending metadata dependencies should 253 * not wait for more than a few seconds. Thus, mounted on block devices 254 * are delayed only about a half the time that file data is delayed. 255 * Similarly, directory updates are more critical, so are only delayed 256 * about a third the time that file data is delayed. Thus, there are 257 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 258 * one each second (driven off the filesystem syncer process). The 259 * syncer_delayno variable indicates the next queue that is to be processed. 260 * Items that need to be processed soon are placed in this queue: 261 * 262 * syncer_workitem_pending[syncer_delayno] 263 * 264 * A delay of fifteen seconds is done by placing the request fifteen 265 * entries later in the queue: 266 * 267 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 268 * 269 */ 270 static int syncer_delayno; 271 static long syncer_mask; 272 LIST_HEAD(synclist, bufobj); 273 static struct synclist *syncer_workitem_pending; 274 /* 275 * The sync_mtx protects: 276 * bo->bo_synclist 277 * sync_vnode_count 278 * syncer_delayno 279 * syncer_state 280 * syncer_workitem_pending 281 * syncer_worklist_len 282 * rushjob 283 */ 284 static struct mtx sync_mtx; 285 static struct cv sync_wakeup; 286 287 #define SYNCER_MAXDELAY 32 288 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 289 static int syncdelay = 30; /* max time to delay syncing data */ 290 static int filedelay = 30; /* time to delay syncing files */ 291 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 292 "Time to delay syncing files (in seconds)"); 293 static int dirdelay = 29; /* time to delay syncing directories */ 294 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 295 "Time to delay syncing directories (in seconds)"); 296 static int metadelay = 28; /* time to delay syncing metadata */ 297 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 298 "Time to delay syncing metadata (in seconds)"); 299 static int rushjob; /* number of slots to run ASAP */ 300 static int stat_rush_requests; /* number of times I/O speeded up */ 301 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 302 "Number of times I/O speeded up (rush requests)"); 303 304 #define VDBATCH_SIZE 8 305 struct vdbatch { 306 u_int index; 307 struct mtx lock; 308 struct vnode *tab[VDBATCH_SIZE]; 309 }; 310 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 311 312 static void vdbatch_dequeue(struct vnode *vp); 313 314 /* 315 * When shutting down the syncer, run it at four times normal speed. 316 */ 317 #define SYNCER_SHUTDOWN_SPEEDUP 4 318 static int sync_vnode_count; 319 static int syncer_worklist_len; 320 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 321 syncer_state; 322 323 /* Target for maximum number of vnodes. */ 324 u_long desiredvnodes; 325 static u_long gapvnodes; /* gap between wanted and desired */ 326 static u_long vhiwat; /* enough extras after expansion */ 327 static u_long vlowat; /* minimal extras before expansion */ 328 static bool vstir; /* nonzero to stir non-free vnodes */ 329 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 330 331 static u_long vnlru_read_freevnodes(void); 332 333 /* 334 * Note that no attempt is made to sanitize these parameters. 335 */ 336 static int 337 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 338 { 339 u_long val; 340 int error; 341 342 val = desiredvnodes; 343 error = sysctl_handle_long(oidp, &val, 0, req); 344 if (error != 0 || req->newptr == NULL) 345 return (error); 346 347 if (val == desiredvnodes) 348 return (0); 349 mtx_lock(&vnode_list_mtx); 350 desiredvnodes = val; 351 wantfreevnodes = desiredvnodes / 4; 352 vnlru_recalc(); 353 mtx_unlock(&vnode_list_mtx); 354 /* 355 * XXX There is no protection against multiple threads changing 356 * desiredvnodes at the same time. Locking above only helps vnlru and 357 * getnewvnode. 358 */ 359 vfs_hash_changesize(desiredvnodes); 360 cache_changesize(desiredvnodes); 361 return (0); 362 } 363 364 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 365 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 366 "LU", "Target for maximum number of vnodes (legacy)"); 367 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, limit, 368 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 369 "LU", "Target for maximum number of vnodes"); 370 371 static int 372 sysctl_freevnodes(SYSCTL_HANDLER_ARGS) 373 { 374 u_long rfreevnodes; 375 376 rfreevnodes = vnlru_read_freevnodes(); 377 return (sysctl_handle_long(oidp, &rfreevnodes, 0, req)); 378 } 379 380 SYSCTL_PROC(_vfs, OID_AUTO, freevnodes, 381 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes, 382 "LU", "Number of \"free\" vnodes (legacy)"); 383 SYSCTL_PROC(_vfs_vnode_stats, OID_AUTO, free, 384 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes, 385 "LU", "Number of \"free\" vnodes"); 386 387 static int 388 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 389 { 390 u_long val; 391 int error; 392 393 val = wantfreevnodes; 394 error = sysctl_handle_long(oidp, &val, 0, req); 395 if (error != 0 || req->newptr == NULL) 396 return (error); 397 398 if (val == wantfreevnodes) 399 return (0); 400 mtx_lock(&vnode_list_mtx); 401 wantfreevnodes = val; 402 vnlru_recalc(); 403 mtx_unlock(&vnode_list_mtx); 404 return (0); 405 } 406 407 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 408 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 409 "LU", "Target for minimum number of \"free\" vnodes (legacy)"); 410 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, wantfree, 411 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 412 "LU", "Target for minimum number of \"free\" vnodes"); 413 414 static int vnlru_nowhere; 415 SYSCTL_INT(_vfs_vnode_vnlru, OID_AUTO, failed_runs, CTLFLAG_RD | CTLFLAG_STATS, 416 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 417 418 static int 419 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 420 { 421 struct vnode *vp; 422 struct nameidata nd; 423 char *buf; 424 unsigned long ndflags; 425 int error; 426 427 if (req->newptr == NULL) 428 return (EINVAL); 429 if (req->newlen >= PATH_MAX) 430 return (E2BIG); 431 432 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 433 error = SYSCTL_IN(req, buf, req->newlen); 434 if (error != 0) 435 goto out; 436 437 buf[req->newlen] = '\0'; 438 439 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1; 440 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf); 441 if ((error = namei(&nd)) != 0) 442 goto out; 443 vp = nd.ni_vp; 444 445 if (VN_IS_DOOMED(vp)) { 446 /* 447 * This vnode is being recycled. Return != 0 to let the caller 448 * know that the sysctl had no effect. Return EAGAIN because a 449 * subsequent call will likely succeed (since namei will create 450 * a new vnode if necessary) 451 */ 452 error = EAGAIN; 453 goto putvnode; 454 } 455 456 counter_u64_add(recycles_count, 1); 457 vgone(vp); 458 putvnode: 459 vput(vp); 460 NDFREE_PNBUF(&nd); 461 out: 462 free(buf, M_TEMP); 463 return (error); 464 } 465 466 static int 467 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 468 { 469 struct thread *td = curthread; 470 struct vnode *vp; 471 struct file *fp; 472 int error; 473 int fd; 474 475 if (req->newptr == NULL) 476 return (EBADF); 477 478 error = sysctl_handle_int(oidp, &fd, 0, req); 479 if (error != 0) 480 return (error); 481 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 482 if (error != 0) 483 return (error); 484 vp = fp->f_vnode; 485 486 error = vn_lock(vp, LK_EXCLUSIVE); 487 if (error != 0) 488 goto drop; 489 490 counter_u64_add(recycles_count, 1); 491 vgone(vp); 492 VOP_UNLOCK(vp); 493 drop: 494 fdrop(fp, td); 495 return (error); 496 } 497 498 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 499 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 500 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 501 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 502 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 503 sysctl_ftry_reclaim_vnode, "I", 504 "Try to reclaim a vnode by its file descriptor"); 505 506 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 507 #define vnsz2log 8 508 #ifndef DEBUG_LOCKS 509 _Static_assert(sizeof(struct vnode) >= 1UL << vnsz2log && 510 sizeof(struct vnode) < 1UL << (vnsz2log + 1), 511 "vnsz2log needs to be updated"); 512 #endif 513 514 /* 515 * Support for the bufobj clean & dirty pctrie. 516 */ 517 static void * 518 buf_trie_alloc(struct pctrie *ptree) 519 { 520 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 521 } 522 523 static void 524 buf_trie_free(struct pctrie *ptree, void *node) 525 { 526 uma_zfree_smr(buf_trie_zone, node); 527 } 528 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 529 buf_trie_smr); 530 531 /* 532 * Initialize the vnode management data structures. 533 * 534 * Reevaluate the following cap on the number of vnodes after the physical 535 * memory size exceeds 512GB. In the limit, as the physical memory size 536 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 537 */ 538 #ifndef MAXVNODES_MAX 539 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 540 #endif 541 542 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 543 544 static struct vnode * 545 vn_alloc_marker(struct mount *mp) 546 { 547 struct vnode *vp; 548 549 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 550 vp->v_type = VMARKER; 551 vp->v_mount = mp; 552 553 return (vp); 554 } 555 556 static void 557 vn_free_marker(struct vnode *vp) 558 { 559 560 MPASS(vp->v_type == VMARKER); 561 free(vp, M_VNODE_MARKER); 562 } 563 564 #ifdef KASAN 565 static int 566 vnode_ctor(void *mem, int size, void *arg __unused, int flags __unused) 567 { 568 kasan_mark(mem, size, roundup2(size, UMA_ALIGN_PTR + 1), 0); 569 return (0); 570 } 571 572 static void 573 vnode_dtor(void *mem, int size, void *arg __unused) 574 { 575 size_t end1, end2, off1, off2; 576 577 _Static_assert(offsetof(struct vnode, v_vnodelist) < 578 offsetof(struct vnode, v_dbatchcpu), 579 "KASAN marks require updating"); 580 581 off1 = offsetof(struct vnode, v_vnodelist); 582 off2 = offsetof(struct vnode, v_dbatchcpu); 583 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist); 584 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu); 585 586 /* 587 * Access to the v_vnodelist and v_dbatchcpu fields are permitted even 588 * after the vnode has been freed. Try to get some KASAN coverage by 589 * marking everything except those two fields as invalid. Because 590 * KASAN's tracking is not byte-granular, any preceding fields sharing 591 * the same 8-byte aligned word must also be marked valid. 592 */ 593 594 /* Handle the area from the start until v_vnodelist... */ 595 off1 = rounddown2(off1, KASAN_SHADOW_SCALE); 596 kasan_mark(mem, off1, off1, KASAN_UMA_FREED); 597 598 /* ... then the area between v_vnodelist and v_dbatchcpu ... */ 599 off1 = roundup2(end1, KASAN_SHADOW_SCALE); 600 off2 = rounddown2(off2, KASAN_SHADOW_SCALE); 601 if (off2 > off1) 602 kasan_mark((void *)((char *)mem + off1), off2 - off1, 603 off2 - off1, KASAN_UMA_FREED); 604 605 /* ... and finally the area from v_dbatchcpu to the end. */ 606 off2 = roundup2(end2, KASAN_SHADOW_SCALE); 607 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2, 608 KASAN_UMA_FREED); 609 } 610 #endif /* KASAN */ 611 612 /* 613 * Initialize a vnode as it first enters the zone. 614 */ 615 static int 616 vnode_init(void *mem, int size, int flags) 617 { 618 struct vnode *vp; 619 620 vp = mem; 621 bzero(vp, size); 622 /* 623 * Setup locks. 624 */ 625 vp->v_vnlock = &vp->v_lock; 626 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 627 /* 628 * By default, don't allow shared locks unless filesystems opt-in. 629 */ 630 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 631 LK_NOSHARE | LK_IS_VNODE); 632 /* 633 * Initialize bufobj. 634 */ 635 bufobj_init(&vp->v_bufobj, vp); 636 /* 637 * Initialize namecache. 638 */ 639 cache_vnode_init(vp); 640 /* 641 * Initialize rangelocks. 642 */ 643 rangelock_init(&vp->v_rl); 644 645 vp->v_dbatchcpu = NOCPU; 646 647 vp->v_state = VSTATE_DEAD; 648 649 /* 650 * Check vhold_recycle_free for an explanation. 651 */ 652 vp->v_holdcnt = VHOLD_NO_SMR; 653 vp->v_type = VNON; 654 mtx_lock(&vnode_list_mtx); 655 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 656 mtx_unlock(&vnode_list_mtx); 657 return (0); 658 } 659 660 /* 661 * Free a vnode when it is cleared from the zone. 662 */ 663 static void 664 vnode_fini(void *mem, int size) 665 { 666 struct vnode *vp; 667 struct bufobj *bo; 668 669 vp = mem; 670 vdbatch_dequeue(vp); 671 mtx_lock(&vnode_list_mtx); 672 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 673 mtx_unlock(&vnode_list_mtx); 674 rangelock_destroy(&vp->v_rl); 675 lockdestroy(vp->v_vnlock); 676 mtx_destroy(&vp->v_interlock); 677 bo = &vp->v_bufobj; 678 rw_destroy(BO_LOCKPTR(bo)); 679 680 kasan_mark(mem, size, size, 0); 681 } 682 683 /* 684 * Provide the size of NFS nclnode and NFS fh for calculation of the 685 * vnode memory consumption. The size is specified directly to 686 * eliminate dependency on NFS-private header. 687 * 688 * Other filesystems may use bigger or smaller (like UFS and ZFS) 689 * private inode data, but the NFS-based estimation is ample enough. 690 * Still, we care about differences in the size between 64- and 32-bit 691 * platforms. 692 * 693 * Namecache structure size is heuristically 694 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 695 */ 696 #ifdef _LP64 697 #define NFS_NCLNODE_SZ (528 + 64) 698 #define NC_SZ 148 699 #else 700 #define NFS_NCLNODE_SZ (360 + 32) 701 #define NC_SZ 92 702 #endif 703 704 static void 705 vntblinit(void *dummy __unused) 706 { 707 struct vdbatch *vd; 708 uma_ctor ctor; 709 uma_dtor dtor; 710 int cpu, physvnodes, virtvnodes; 711 712 /* 713 * Desiredvnodes is a function of the physical memory size and the 714 * kernel's heap size. Generally speaking, it scales with the 715 * physical memory size. The ratio of desiredvnodes to the physical 716 * memory size is 1:16 until desiredvnodes exceeds 98,304. 717 * Thereafter, the 718 * marginal ratio of desiredvnodes to the physical memory size is 719 * 1:64. However, desiredvnodes is limited by the kernel's heap 720 * size. The memory required by desiredvnodes vnodes and vm objects 721 * must not exceed 1/10th of the kernel's heap size. 722 */ 723 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 724 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 725 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 726 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 727 desiredvnodes = min(physvnodes, virtvnodes); 728 if (desiredvnodes > MAXVNODES_MAX) { 729 if (bootverbose) 730 printf("Reducing kern.maxvnodes %lu -> %lu\n", 731 desiredvnodes, MAXVNODES_MAX); 732 desiredvnodes = MAXVNODES_MAX; 733 } 734 wantfreevnodes = desiredvnodes / 4; 735 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 736 TAILQ_INIT(&vnode_list); 737 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 738 /* 739 * The lock is taken to appease WITNESS. 740 */ 741 mtx_lock(&vnode_list_mtx); 742 vnlru_recalc(); 743 mtx_unlock(&vnode_list_mtx); 744 vnode_list_free_marker = vn_alloc_marker(NULL); 745 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 746 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 747 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 748 749 #ifdef KASAN 750 ctor = vnode_ctor; 751 dtor = vnode_dtor; 752 #else 753 ctor = NULL; 754 dtor = NULL; 755 #endif 756 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor, 757 vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_NOKASAN); 758 uma_zone_set_smr(vnode_zone, vfs_smr); 759 760 /* 761 * Preallocate enough nodes to support one-per buf so that 762 * we can not fail an insert. reassignbuf() callers can not 763 * tolerate the insertion failure. 764 */ 765 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 766 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 767 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 768 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 769 uma_prealloc(buf_trie_zone, nbuf); 770 771 vnodes_created = counter_u64_alloc(M_WAITOK); 772 recycles_count = counter_u64_alloc(M_WAITOK); 773 recycles_free_count = counter_u64_alloc(M_WAITOK); 774 vnode_skipped_requeues = counter_u64_alloc(M_WAITOK); 775 776 /* 777 * Initialize the filesystem syncer. 778 */ 779 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 780 &syncer_mask); 781 syncer_maxdelay = syncer_mask + 1; 782 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 783 cv_init(&sync_wakeup, "syncer"); 784 785 CPU_FOREACH(cpu) { 786 vd = DPCPU_ID_PTR((cpu), vd); 787 bzero(vd, sizeof(*vd)); 788 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 789 } 790 } 791 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 792 793 /* 794 * Mark a mount point as busy. Used to synchronize access and to delay 795 * unmounting. Eventually, mountlist_mtx is not released on failure. 796 * 797 * vfs_busy() is a custom lock, it can block the caller. 798 * vfs_busy() only sleeps if the unmount is active on the mount point. 799 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 800 * vnode belonging to mp. 801 * 802 * Lookup uses vfs_busy() to traverse mount points. 803 * root fs var fs 804 * / vnode lock A / vnode lock (/var) D 805 * /var vnode lock B /log vnode lock(/var/log) E 806 * vfs_busy lock C vfs_busy lock F 807 * 808 * Within each file system, the lock order is C->A->B and F->D->E. 809 * 810 * When traversing across mounts, the system follows that lock order: 811 * 812 * C->A->B 813 * | 814 * +->F->D->E 815 * 816 * The lookup() process for namei("/var") illustrates the process: 817 * 1. VOP_LOOKUP() obtains B while A is held 818 * 2. vfs_busy() obtains a shared lock on F while A and B are held 819 * 3. vput() releases lock on B 820 * 4. vput() releases lock on A 821 * 5. VFS_ROOT() obtains lock on D while shared lock on F is held 822 * 6. vfs_unbusy() releases shared lock on F 823 * 7. vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 824 * Attempt to lock A (instead of vp_crossmp) while D is held would 825 * violate the global order, causing deadlocks. 826 * 827 * dounmount() locks B while F is drained. Note that for stacked 828 * filesystems, D and B in the example above may be the same lock, 829 * which introdues potential lock order reversal deadlock between 830 * dounmount() and step 5 above. These filesystems may avoid the LOR 831 * by setting VV_CROSSLOCK on the covered vnode so that lock B will 832 * remain held until after step 5. 833 */ 834 int 835 vfs_busy(struct mount *mp, int flags) 836 { 837 struct mount_pcpu *mpcpu; 838 839 MPASS((flags & ~MBF_MASK) == 0); 840 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 841 842 if (vfs_op_thread_enter(mp, mpcpu)) { 843 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 844 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 845 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 846 vfs_mp_count_add_pcpu(mpcpu, ref, 1); 847 vfs_mp_count_add_pcpu(mpcpu, lockref, 1); 848 vfs_op_thread_exit(mp, mpcpu); 849 if (flags & MBF_MNTLSTLOCK) 850 mtx_unlock(&mountlist_mtx); 851 return (0); 852 } 853 854 MNT_ILOCK(mp); 855 vfs_assert_mount_counters(mp); 856 MNT_REF(mp); 857 /* 858 * If mount point is currently being unmounted, sleep until the 859 * mount point fate is decided. If thread doing the unmounting fails, 860 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 861 * that this mount point has survived the unmount attempt and vfs_busy 862 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 863 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 864 * about to be really destroyed. vfs_busy needs to release its 865 * reference on the mount point in this case and return with ENOENT, 866 * telling the caller the mount it tried to busy is no longer valid. 867 */ 868 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 869 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), 870 ("%s: non-empty upper mount list with pending unmount", 871 __func__)); 872 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 873 MNT_REL(mp); 874 MNT_IUNLOCK(mp); 875 CTR1(KTR_VFS, "%s: failed busying before sleeping", 876 __func__); 877 return (ENOENT); 878 } 879 if (flags & MBF_MNTLSTLOCK) 880 mtx_unlock(&mountlist_mtx); 881 mp->mnt_kern_flag |= MNTK_MWAIT; 882 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 883 if (flags & MBF_MNTLSTLOCK) 884 mtx_lock(&mountlist_mtx); 885 MNT_ILOCK(mp); 886 } 887 if (flags & MBF_MNTLSTLOCK) 888 mtx_unlock(&mountlist_mtx); 889 mp->mnt_lockref++; 890 MNT_IUNLOCK(mp); 891 return (0); 892 } 893 894 /* 895 * Free a busy filesystem. 896 */ 897 void 898 vfs_unbusy(struct mount *mp) 899 { 900 struct mount_pcpu *mpcpu; 901 int c; 902 903 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 904 905 if (vfs_op_thread_enter(mp, mpcpu)) { 906 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 907 vfs_mp_count_sub_pcpu(mpcpu, lockref, 1); 908 vfs_mp_count_sub_pcpu(mpcpu, ref, 1); 909 vfs_op_thread_exit(mp, mpcpu); 910 return; 911 } 912 913 MNT_ILOCK(mp); 914 vfs_assert_mount_counters(mp); 915 MNT_REL(mp); 916 c = --mp->mnt_lockref; 917 if (mp->mnt_vfs_ops == 0) { 918 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 919 MNT_IUNLOCK(mp); 920 return; 921 } 922 if (c < 0) 923 vfs_dump_mount_counters(mp); 924 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 925 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 926 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 927 mp->mnt_kern_flag &= ~MNTK_DRAINING; 928 wakeup(&mp->mnt_lockref); 929 } 930 MNT_IUNLOCK(mp); 931 } 932 933 /* 934 * Lookup a mount point by filesystem identifier. 935 */ 936 struct mount * 937 vfs_getvfs(fsid_t *fsid) 938 { 939 struct mount *mp; 940 941 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 942 mtx_lock(&mountlist_mtx); 943 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 944 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 945 vfs_ref(mp); 946 mtx_unlock(&mountlist_mtx); 947 return (mp); 948 } 949 } 950 mtx_unlock(&mountlist_mtx); 951 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 952 return ((struct mount *) 0); 953 } 954 955 /* 956 * Lookup a mount point by filesystem identifier, busying it before 957 * returning. 958 * 959 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 960 * cache for popular filesystem identifiers. The cache is lockess, using 961 * the fact that struct mount's are never freed. In worst case we may 962 * get pointer to unmounted or even different filesystem, so we have to 963 * check what we got, and go slow way if so. 964 */ 965 struct mount * 966 vfs_busyfs(fsid_t *fsid) 967 { 968 #define FSID_CACHE_SIZE 256 969 typedef struct mount * volatile vmp_t; 970 static vmp_t cache[FSID_CACHE_SIZE]; 971 struct mount *mp; 972 int error; 973 uint32_t hash; 974 975 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 976 hash = fsid->val[0] ^ fsid->val[1]; 977 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 978 mp = cache[hash]; 979 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 980 goto slow; 981 if (vfs_busy(mp, 0) != 0) { 982 cache[hash] = NULL; 983 goto slow; 984 } 985 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 986 return (mp); 987 else 988 vfs_unbusy(mp); 989 990 slow: 991 mtx_lock(&mountlist_mtx); 992 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 993 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 994 error = vfs_busy(mp, MBF_MNTLSTLOCK); 995 if (error) { 996 cache[hash] = NULL; 997 mtx_unlock(&mountlist_mtx); 998 return (NULL); 999 } 1000 cache[hash] = mp; 1001 return (mp); 1002 } 1003 } 1004 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 1005 mtx_unlock(&mountlist_mtx); 1006 return ((struct mount *) 0); 1007 } 1008 1009 /* 1010 * Check if a user can access privileged mount options. 1011 */ 1012 int 1013 vfs_suser(struct mount *mp, struct thread *td) 1014 { 1015 int error; 1016 1017 if (jailed(td->td_ucred)) { 1018 /* 1019 * If the jail of the calling thread lacks permission for 1020 * this type of file system, deny immediately. 1021 */ 1022 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 1023 return (EPERM); 1024 1025 /* 1026 * If the file system was mounted outside the jail of the 1027 * calling thread, deny immediately. 1028 */ 1029 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 1030 return (EPERM); 1031 } 1032 1033 /* 1034 * If file system supports delegated administration, we don't check 1035 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 1036 * by the file system itself. 1037 * If this is not the user that did original mount, we check for 1038 * the PRIV_VFS_MOUNT_OWNER privilege. 1039 */ 1040 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 1041 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 1042 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 1043 return (error); 1044 } 1045 return (0); 1046 } 1047 1048 /* 1049 * Get a new unique fsid. Try to make its val[0] unique, since this value 1050 * will be used to create fake device numbers for stat(). Also try (but 1051 * not so hard) make its val[0] unique mod 2^16, since some emulators only 1052 * support 16-bit device numbers. We end up with unique val[0]'s for the 1053 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 1054 * 1055 * Keep in mind that several mounts may be running in parallel. Starting 1056 * the search one past where the previous search terminated is both a 1057 * micro-optimization and a defense against returning the same fsid to 1058 * different mounts. 1059 */ 1060 void 1061 vfs_getnewfsid(struct mount *mp) 1062 { 1063 static uint16_t mntid_base; 1064 struct mount *nmp; 1065 fsid_t tfsid; 1066 int mtype; 1067 1068 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 1069 mtx_lock(&mntid_mtx); 1070 mtype = mp->mnt_vfc->vfc_typenum; 1071 tfsid.val[1] = mtype; 1072 mtype = (mtype & 0xFF) << 24; 1073 for (;;) { 1074 tfsid.val[0] = makedev(255, 1075 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 1076 mntid_base++; 1077 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 1078 break; 1079 vfs_rel(nmp); 1080 } 1081 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 1082 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 1083 mtx_unlock(&mntid_mtx); 1084 } 1085 1086 /* 1087 * Knob to control the precision of file timestamps: 1088 * 1089 * 0 = seconds only; nanoseconds zeroed. 1090 * 1 = seconds and nanoseconds, accurate within 1/HZ. 1091 * 2 = seconds and nanoseconds, truncated to microseconds. 1092 * >=3 = seconds and nanoseconds, maximum precision. 1093 */ 1094 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 1095 1096 static int timestamp_precision = TSP_USEC; 1097 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 1098 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 1099 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 1100 "3+: sec + ns (max. precision))"); 1101 1102 /* 1103 * Get a current timestamp. 1104 */ 1105 void 1106 vfs_timestamp(struct timespec *tsp) 1107 { 1108 struct timeval tv; 1109 1110 switch (timestamp_precision) { 1111 case TSP_SEC: 1112 tsp->tv_sec = time_second; 1113 tsp->tv_nsec = 0; 1114 break; 1115 case TSP_HZ: 1116 getnanotime(tsp); 1117 break; 1118 case TSP_USEC: 1119 microtime(&tv); 1120 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1121 break; 1122 case TSP_NSEC: 1123 default: 1124 nanotime(tsp); 1125 break; 1126 } 1127 } 1128 1129 /* 1130 * Set vnode attributes to VNOVAL 1131 */ 1132 void 1133 vattr_null(struct vattr *vap) 1134 { 1135 1136 vap->va_type = VNON; 1137 vap->va_size = VNOVAL; 1138 vap->va_bytes = VNOVAL; 1139 vap->va_mode = VNOVAL; 1140 vap->va_nlink = VNOVAL; 1141 vap->va_uid = VNOVAL; 1142 vap->va_gid = VNOVAL; 1143 vap->va_fsid = VNOVAL; 1144 vap->va_fileid = VNOVAL; 1145 vap->va_blocksize = VNOVAL; 1146 vap->va_rdev = VNOVAL; 1147 vap->va_atime.tv_sec = VNOVAL; 1148 vap->va_atime.tv_nsec = VNOVAL; 1149 vap->va_mtime.tv_sec = VNOVAL; 1150 vap->va_mtime.tv_nsec = VNOVAL; 1151 vap->va_ctime.tv_sec = VNOVAL; 1152 vap->va_ctime.tv_nsec = VNOVAL; 1153 vap->va_birthtime.tv_sec = VNOVAL; 1154 vap->va_birthtime.tv_nsec = VNOVAL; 1155 vap->va_flags = VNOVAL; 1156 vap->va_gen = VNOVAL; 1157 vap->va_vaflags = 0; 1158 } 1159 1160 /* 1161 * Try to reduce the total number of vnodes. 1162 * 1163 * This routine (and its user) are buggy in at least the following ways: 1164 * - all parameters were picked years ago when RAM sizes were significantly 1165 * smaller 1166 * - it can pick vnodes based on pages used by the vm object, but filesystems 1167 * like ZFS don't use it making the pick broken 1168 * - since ZFS has its own aging policy it gets partially combated by this one 1169 * - a dedicated method should be provided for filesystems to let them decide 1170 * whether the vnode should be recycled 1171 * 1172 * This routine is called when we have too many vnodes. It attempts 1173 * to free <count> vnodes and will potentially free vnodes that still 1174 * have VM backing store (VM backing store is typically the cause 1175 * of a vnode blowout so we want to do this). Therefore, this operation 1176 * is not considered cheap. 1177 * 1178 * A number of conditions may prevent a vnode from being reclaimed. 1179 * the buffer cache may have references on the vnode, a directory 1180 * vnode may still have references due to the namei cache representing 1181 * underlying files, or the vnode may be in active use. It is not 1182 * desirable to reuse such vnodes. These conditions may cause the 1183 * number of vnodes to reach some minimum value regardless of what 1184 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1185 * 1186 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1187 * entries if this argument is strue 1188 * @param trigger Only reclaim vnodes with fewer than this many resident 1189 * pages. 1190 * @param target How many vnodes to reclaim. 1191 * @return The number of vnodes that were reclaimed. 1192 */ 1193 static int 1194 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1195 { 1196 struct vnode *vp, *mvp; 1197 struct mount *mp; 1198 struct vm_object *object; 1199 u_long done; 1200 bool retried; 1201 1202 mtx_assert(&vnode_list_mtx, MA_OWNED); 1203 1204 retried = false; 1205 done = 0; 1206 1207 mvp = vnode_list_reclaim_marker; 1208 restart: 1209 vp = mvp; 1210 while (done < target) { 1211 vp = TAILQ_NEXT(vp, v_vnodelist); 1212 if (__predict_false(vp == NULL)) 1213 break; 1214 1215 if (__predict_false(vp->v_type == VMARKER)) 1216 continue; 1217 1218 /* 1219 * If it's been deconstructed already, it's still 1220 * referenced, or it exceeds the trigger, skip it. 1221 * Also skip free vnodes. We are trying to make space 1222 * to expand the free list, not reduce it. 1223 */ 1224 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1225 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1226 goto next_iter; 1227 1228 if (vp->v_type == VBAD || vp->v_type == VNON) 1229 goto next_iter; 1230 1231 object = atomic_load_ptr(&vp->v_object); 1232 if (object == NULL || object->resident_page_count > trigger) { 1233 goto next_iter; 1234 } 1235 1236 /* 1237 * Handle races against vnode allocation. Filesystems lock the 1238 * vnode some time after it gets returned from getnewvnode, 1239 * despite type and hold count being manipulated earlier. 1240 * Resorting to checking v_mount restores guarantees present 1241 * before the global list was reworked to contain all vnodes. 1242 */ 1243 if (!VI_TRYLOCK(vp)) 1244 goto next_iter; 1245 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1246 VI_UNLOCK(vp); 1247 goto next_iter; 1248 } 1249 if (vp->v_mount == NULL) { 1250 VI_UNLOCK(vp); 1251 goto next_iter; 1252 } 1253 vholdl(vp); 1254 VI_UNLOCK(vp); 1255 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1256 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1257 mtx_unlock(&vnode_list_mtx); 1258 1259 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1260 vdrop_recycle(vp); 1261 goto next_iter_unlocked; 1262 } 1263 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1264 vdrop_recycle(vp); 1265 vn_finished_write(mp); 1266 goto next_iter_unlocked; 1267 } 1268 1269 VI_LOCK(vp); 1270 if (vp->v_usecount > 0 || 1271 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1272 (vp->v_object != NULL && vp->v_object->handle == vp && 1273 vp->v_object->resident_page_count > trigger)) { 1274 VOP_UNLOCK(vp); 1275 vdropl_recycle(vp); 1276 vn_finished_write(mp); 1277 goto next_iter_unlocked; 1278 } 1279 counter_u64_add(recycles_count, 1); 1280 vgonel(vp); 1281 VOP_UNLOCK(vp); 1282 vdropl_recycle(vp); 1283 vn_finished_write(mp); 1284 done++; 1285 next_iter_unlocked: 1286 maybe_yield(); 1287 mtx_lock(&vnode_list_mtx); 1288 goto restart; 1289 next_iter: 1290 MPASS(vp->v_type != VMARKER); 1291 if (!should_yield()) 1292 continue; 1293 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1294 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1295 mtx_unlock(&vnode_list_mtx); 1296 kern_yield(PRI_USER); 1297 mtx_lock(&vnode_list_mtx); 1298 goto restart; 1299 } 1300 if (done == 0 && !retried) { 1301 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1302 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1303 retried = true; 1304 goto restart; 1305 } 1306 return (done); 1307 } 1308 1309 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1310 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1311 0, 1312 "limit on vnode free requests per call to the vnlru_free routine"); 1313 1314 /* 1315 * Attempt to reduce the free list by the requested amount. 1316 */ 1317 static int 1318 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp) 1319 { 1320 struct vnode *vp; 1321 struct mount *mp; 1322 int ocount; 1323 bool retried; 1324 1325 mtx_assert(&vnode_list_mtx, MA_OWNED); 1326 if (count > max_vnlru_free) 1327 count = max_vnlru_free; 1328 if (count == 0) { 1329 mtx_unlock(&vnode_list_mtx); 1330 return (0); 1331 } 1332 ocount = count; 1333 retried = false; 1334 vp = mvp; 1335 for (;;) { 1336 vp = TAILQ_NEXT(vp, v_vnodelist); 1337 if (__predict_false(vp == NULL)) { 1338 /* 1339 * The free vnode marker can be past eligible vnodes: 1340 * 1. if vdbatch_process trylock failed 1341 * 2. if vtryrecycle failed 1342 * 1343 * If so, start the scan from scratch. 1344 */ 1345 if (!retried && vnlru_read_freevnodes() > 0) { 1346 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1347 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1348 vp = mvp; 1349 retried = true; 1350 continue; 1351 } 1352 1353 /* 1354 * Give up 1355 */ 1356 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1357 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1358 mtx_unlock(&vnode_list_mtx); 1359 break; 1360 } 1361 if (__predict_false(vp->v_type == VMARKER)) 1362 continue; 1363 if (vp->v_holdcnt > 0) 1364 continue; 1365 /* 1366 * Don't recycle if our vnode is from different type 1367 * of mount point. Note that mp is type-safe, the 1368 * check does not reach unmapped address even if 1369 * vnode is reclaimed. 1370 */ 1371 if (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1372 mp->mnt_op != mnt_op) { 1373 continue; 1374 } 1375 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1376 continue; 1377 } 1378 if (!vhold_recycle_free(vp)) 1379 continue; 1380 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1381 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1382 mtx_unlock(&vnode_list_mtx); 1383 /* 1384 * FIXME: ignores the return value, meaning it may be nothing 1385 * got recycled but it claims otherwise to the caller. 1386 * 1387 * Originally the value started being ignored in 2005 with 1388 * 114a1006a8204aa156e1f9ad6476cdff89cada7f . 1389 * 1390 * Respecting the value can run into significant stalls if most 1391 * vnodes belong to one file system and it has writes 1392 * suspended. In presence of many threads and millions of 1393 * vnodes they keep contending on the vnode_list_mtx lock only 1394 * to find vnodes they can't recycle. 1395 * 1396 * The solution would be to pre-check if the vnode is likely to 1397 * be recycle-able, but it needs to happen with the 1398 * vnode_list_mtx lock held. This runs into a problem where 1399 * VOP_GETWRITEMOUNT (currently needed to find out about if 1400 * writes are frozen) can take locks which LOR against it. 1401 * 1402 * Check nullfs for one example (null_getwritemount). 1403 */ 1404 vtryrecycle(vp); 1405 count--; 1406 if (count == 0) { 1407 break; 1408 } 1409 mtx_lock(&vnode_list_mtx); 1410 vp = mvp; 1411 } 1412 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1413 return (ocount - count); 1414 } 1415 1416 /* 1417 * XXX: returns without vnode_list_mtx locked! 1418 */ 1419 static int 1420 vnlru_free_locked(int count) 1421 { 1422 int ret; 1423 1424 mtx_assert(&vnode_list_mtx, MA_OWNED); 1425 ret = vnlru_free_impl(count, NULL, vnode_list_free_marker); 1426 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1427 return (ret); 1428 } 1429 1430 void 1431 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp) 1432 { 1433 1434 MPASS(mnt_op != NULL); 1435 MPASS(mvp != NULL); 1436 VNPASS(mvp->v_type == VMARKER, mvp); 1437 mtx_lock(&vnode_list_mtx); 1438 vnlru_free_impl(count, mnt_op, mvp); 1439 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1440 } 1441 1442 struct vnode * 1443 vnlru_alloc_marker(void) 1444 { 1445 struct vnode *mvp; 1446 1447 mvp = vn_alloc_marker(NULL); 1448 mtx_lock(&vnode_list_mtx); 1449 TAILQ_INSERT_BEFORE(vnode_list_free_marker, mvp, v_vnodelist); 1450 mtx_unlock(&vnode_list_mtx); 1451 return (mvp); 1452 } 1453 1454 void 1455 vnlru_free_marker(struct vnode *mvp) 1456 { 1457 mtx_lock(&vnode_list_mtx); 1458 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1459 mtx_unlock(&vnode_list_mtx); 1460 vn_free_marker(mvp); 1461 } 1462 1463 static void 1464 vnlru_recalc(void) 1465 { 1466 1467 mtx_assert(&vnode_list_mtx, MA_OWNED); 1468 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1469 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1470 vlowat = vhiwat / 2; 1471 } 1472 1473 /* 1474 * Attempt to recycle vnodes in a context that is always safe to block. 1475 * Calling vlrurecycle() from the bowels of filesystem code has some 1476 * interesting deadlock problems. 1477 */ 1478 static struct proc *vnlruproc; 1479 static int vnlruproc_sig; 1480 static u_long vnlruproc_kicks; 1481 1482 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, kicks, CTLFLAG_RD, &vnlruproc_kicks, 0, 1483 "Number of times vnlru got woken up due to vnode shortage"); 1484 1485 /* 1486 * The main freevnodes counter is only updated when a counter local to CPU 1487 * diverges from 0 by more than VNLRU_FREEVNODES_SLOP. CPUs are conditionally 1488 * walked to compute a more accurate total. 1489 * 1490 * Note: the actual value at any given moment can still exceed slop, but it 1491 * should not be by significant margin in practice. 1492 */ 1493 #define VNLRU_FREEVNODES_SLOP 126 1494 1495 static void __noinline 1496 vfs_freevnodes_rollup(int8_t *lfreevnodes) 1497 { 1498 1499 atomic_add_long(&freevnodes, *lfreevnodes); 1500 *lfreevnodes = 0; 1501 critical_exit(); 1502 } 1503 1504 static __inline void 1505 vfs_freevnodes_inc(void) 1506 { 1507 int8_t *lfreevnodes; 1508 1509 critical_enter(); 1510 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1511 (*lfreevnodes)++; 1512 if (__predict_false(*lfreevnodes == VNLRU_FREEVNODES_SLOP)) 1513 vfs_freevnodes_rollup(lfreevnodes); 1514 else 1515 critical_exit(); 1516 } 1517 1518 static __inline void 1519 vfs_freevnodes_dec(void) 1520 { 1521 int8_t *lfreevnodes; 1522 1523 critical_enter(); 1524 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1525 (*lfreevnodes)--; 1526 if (__predict_false(*lfreevnodes == -VNLRU_FREEVNODES_SLOP)) 1527 vfs_freevnodes_rollup(lfreevnodes); 1528 else 1529 critical_exit(); 1530 } 1531 1532 static u_long 1533 vnlru_read_freevnodes(void) 1534 { 1535 long slop, rfreevnodes, rfreevnodes_old; 1536 int cpu; 1537 1538 rfreevnodes = atomic_load_long(&freevnodes); 1539 rfreevnodes_old = atomic_load_long(&freevnodes_old); 1540 1541 if (rfreevnodes > rfreevnodes_old) 1542 slop = rfreevnodes - rfreevnodes_old; 1543 else 1544 slop = rfreevnodes_old - rfreevnodes; 1545 if (slop < VNLRU_FREEVNODES_SLOP) 1546 return (rfreevnodes >= 0 ? rfreevnodes : 0); 1547 CPU_FOREACH(cpu) { 1548 rfreevnodes += cpuid_to_pcpu[cpu]->pc_vfs_freevnodes; 1549 } 1550 atomic_store_long(&freevnodes_old, rfreevnodes); 1551 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1552 } 1553 1554 static bool 1555 vnlru_under(u_long rnumvnodes, u_long limit) 1556 { 1557 u_long rfreevnodes, space; 1558 1559 if (__predict_false(rnumvnodes > desiredvnodes)) 1560 return (true); 1561 1562 space = desiredvnodes - rnumvnodes; 1563 if (space < limit) { 1564 rfreevnodes = vnlru_read_freevnodes(); 1565 if (rfreevnodes > wantfreevnodes) 1566 space += rfreevnodes - wantfreevnodes; 1567 } 1568 return (space < limit); 1569 } 1570 1571 static void 1572 vnlru_kick_locked(void) 1573 { 1574 1575 mtx_assert(&vnode_list_mtx, MA_OWNED); 1576 if (vnlruproc_sig == 0) { 1577 vnlruproc_sig = 1; 1578 vnlruproc_kicks++; 1579 wakeup(vnlruproc); 1580 } 1581 } 1582 1583 static void 1584 vnlru_kick_cond(void) 1585 { 1586 1587 if (vnlruproc_sig) 1588 return; 1589 mtx_lock(&vnode_list_mtx); 1590 vnlru_kick_locked(); 1591 mtx_unlock(&vnode_list_mtx); 1592 } 1593 1594 static void 1595 vnlru_proc(void) 1596 { 1597 u_long rnumvnodes, rfreevnodes, target; 1598 unsigned long onumvnodes; 1599 int done, force, trigger, usevnodes; 1600 bool reclaim_nc_src, want_reread; 1601 1602 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1603 SHUTDOWN_PRI_FIRST); 1604 1605 force = 0; 1606 want_reread = false; 1607 for (;;) { 1608 kproc_suspend_check(vnlruproc); 1609 mtx_lock(&vnode_list_mtx); 1610 rnumvnodes = atomic_load_long(&numvnodes); 1611 1612 if (want_reread) { 1613 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1614 want_reread = false; 1615 } 1616 1617 /* 1618 * If numvnodes is too large (due to desiredvnodes being 1619 * adjusted using its sysctl, or emergency growth), first 1620 * try to reduce it by discarding from the free list. 1621 */ 1622 if (rnumvnodes > desiredvnodes) { 1623 vnlru_free_locked(rnumvnodes - desiredvnodes); 1624 mtx_lock(&vnode_list_mtx); 1625 rnumvnodes = atomic_load_long(&numvnodes); 1626 } 1627 /* 1628 * Sleep if the vnode cache is in a good state. This is 1629 * when it is not over-full and has space for about a 4% 1630 * or 9% expansion (by growing its size or inexcessively 1631 * reducing its free list). Otherwise, try to reclaim 1632 * space for a 10% expansion. 1633 */ 1634 if (vstir && force == 0) { 1635 force = 1; 1636 vstir = false; 1637 } 1638 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1639 vnlruproc_sig = 0; 1640 wakeup(&vnlruproc_sig); 1641 msleep(vnlruproc, &vnode_list_mtx, 1642 PVFS|PDROP, "vlruwt", hz); 1643 continue; 1644 } 1645 rfreevnodes = vnlru_read_freevnodes(); 1646 1647 onumvnodes = rnumvnodes; 1648 /* 1649 * Calculate parameters for recycling. These are the same 1650 * throughout the loop to give some semblance of fairness. 1651 * The trigger point is to avoid recycling vnodes with lots 1652 * of resident pages. We aren't trying to free memory; we 1653 * are trying to recycle or at least free vnodes. 1654 */ 1655 if (rnumvnodes <= desiredvnodes) 1656 usevnodes = rnumvnodes - rfreevnodes; 1657 else 1658 usevnodes = rnumvnodes; 1659 if (usevnodes <= 0) 1660 usevnodes = 1; 1661 /* 1662 * The trigger value is chosen to give a conservatively 1663 * large value to ensure that it alone doesn't prevent 1664 * making progress. The value can easily be so large that 1665 * it is effectively infinite in some congested and 1666 * misconfigured cases, and this is necessary. Normally 1667 * it is about 8 to 100 (pages), which is quite large. 1668 */ 1669 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1670 if (force < 2) 1671 trigger = vsmalltrigger; 1672 reclaim_nc_src = force >= 3; 1673 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1674 target = target / 10 + 1; 1675 done = vlrureclaim(reclaim_nc_src, trigger, target); 1676 mtx_unlock(&vnode_list_mtx); 1677 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1678 uma_reclaim(UMA_RECLAIM_DRAIN); 1679 if (done == 0) { 1680 if (force == 0 || force == 1) { 1681 force = 2; 1682 continue; 1683 } 1684 if (force == 2) { 1685 force = 3; 1686 continue; 1687 } 1688 want_reread = true; 1689 force = 0; 1690 vnlru_nowhere++; 1691 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1692 } else { 1693 want_reread = true; 1694 kern_yield(PRI_USER); 1695 } 1696 } 1697 } 1698 1699 static struct kproc_desc vnlru_kp = { 1700 "vnlru", 1701 vnlru_proc, 1702 &vnlruproc 1703 }; 1704 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1705 &vnlru_kp); 1706 1707 /* 1708 * Routines having to do with the management of the vnode table. 1709 */ 1710 1711 /* 1712 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1713 * before we actually vgone(). This function must be called with the vnode 1714 * held to prevent the vnode from being returned to the free list midway 1715 * through vgone(). 1716 */ 1717 static int 1718 vtryrecycle(struct vnode *vp) 1719 { 1720 struct mount *vnmp; 1721 1722 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1723 VNPASS(vp->v_holdcnt > 0, vp); 1724 /* 1725 * This vnode may found and locked via some other list, if so we 1726 * can't recycle it yet. 1727 */ 1728 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1729 CTR2(KTR_VFS, 1730 "%s: impossible to recycle, vp %p lock is already held", 1731 __func__, vp); 1732 vdrop_recycle(vp); 1733 return (EWOULDBLOCK); 1734 } 1735 /* 1736 * Don't recycle if its filesystem is being suspended. 1737 */ 1738 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1739 VOP_UNLOCK(vp); 1740 CTR2(KTR_VFS, 1741 "%s: impossible to recycle, cannot start the write for %p", 1742 __func__, vp); 1743 vdrop_recycle(vp); 1744 return (EBUSY); 1745 } 1746 /* 1747 * If we got this far, we need to acquire the interlock and see if 1748 * anyone picked up this vnode from another list. If not, we will 1749 * mark it with DOOMED via vgonel() so that anyone who does find it 1750 * will skip over it. 1751 */ 1752 VI_LOCK(vp); 1753 if (vp->v_usecount) { 1754 VOP_UNLOCK(vp); 1755 vdropl_recycle(vp); 1756 vn_finished_write(vnmp); 1757 CTR2(KTR_VFS, 1758 "%s: impossible to recycle, %p is already referenced", 1759 __func__, vp); 1760 return (EBUSY); 1761 } 1762 if (!VN_IS_DOOMED(vp)) { 1763 counter_u64_add(recycles_free_count, 1); 1764 vgonel(vp); 1765 } 1766 VOP_UNLOCK(vp); 1767 vdropl_recycle(vp); 1768 vn_finished_write(vnmp); 1769 return (0); 1770 } 1771 1772 /* 1773 * Allocate a new vnode. 1774 * 1775 * The operation never returns an error. Returning an error was disabled 1776 * in r145385 (dated 2005) with the following comment: 1777 * 1778 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1779 * 1780 * Given the age of this commit (almost 15 years at the time of writing this 1781 * comment) restoring the ability to fail requires a significant audit of 1782 * all codepaths. 1783 * 1784 * The routine can try to free a vnode or stall for up to 1 second waiting for 1785 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1786 */ 1787 static u_long vn_alloc_cyclecount; 1788 static u_long vn_alloc_sleeps; 1789 1790 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, alloc_sleeps, CTLFLAG_RD, &vn_alloc_sleeps, 0, 1791 "Number of times vnode allocation blocked waiting on vnlru"); 1792 1793 static struct vnode * __noinline 1794 vn_alloc_hard(struct mount *mp) 1795 { 1796 u_long rnumvnodes, rfreevnodes; 1797 1798 mtx_lock(&vnode_list_mtx); 1799 rnumvnodes = atomic_load_long(&numvnodes); 1800 if (rnumvnodes + 1 < desiredvnodes) { 1801 vn_alloc_cyclecount = 0; 1802 mtx_unlock(&vnode_list_mtx); 1803 goto alloc; 1804 } 1805 rfreevnodes = vnlru_read_freevnodes(); 1806 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1807 vn_alloc_cyclecount = 0; 1808 vstir = true; 1809 } 1810 /* 1811 * Grow the vnode cache if it will not be above its target max 1812 * after growing. Otherwise, if the free list is nonempty, try 1813 * to reclaim 1 item from it before growing the cache (possibly 1814 * above its target max if the reclamation failed or is delayed). 1815 * Otherwise, wait for some space. In all cases, schedule 1816 * vnlru_proc() if we are getting short of space. The watermarks 1817 * should be chosen so that we never wait or even reclaim from 1818 * the free list to below its target minimum. 1819 */ 1820 if (vnlru_free_locked(1) > 0) 1821 goto alloc; 1822 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1823 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1824 /* 1825 * Wait for space for a new vnode. 1826 */ 1827 mtx_lock(&vnode_list_mtx); 1828 vnlru_kick_locked(); 1829 vn_alloc_sleeps++; 1830 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 1831 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 1832 vnlru_read_freevnodes() > 1) 1833 vnlru_free_locked(1); 1834 else 1835 mtx_unlock(&vnode_list_mtx); 1836 } 1837 alloc: 1838 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1839 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1840 if (vnlru_under(rnumvnodes, vlowat)) 1841 vnlru_kick_cond(); 1842 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1843 } 1844 1845 static struct vnode * 1846 vn_alloc(struct mount *mp) 1847 { 1848 u_long rnumvnodes; 1849 1850 if (__predict_false(vn_alloc_cyclecount != 0)) 1851 return (vn_alloc_hard(mp)); 1852 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1853 if (__predict_false(vnlru_under(rnumvnodes, vlowat))) { 1854 atomic_subtract_long(&numvnodes, 1); 1855 return (vn_alloc_hard(mp)); 1856 } 1857 1858 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1859 } 1860 1861 static void 1862 vn_free(struct vnode *vp) 1863 { 1864 1865 atomic_subtract_long(&numvnodes, 1); 1866 uma_zfree_smr(vnode_zone, vp); 1867 } 1868 1869 /* 1870 * Return the next vnode from the free list. 1871 */ 1872 int 1873 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1874 struct vnode **vpp) 1875 { 1876 struct vnode *vp; 1877 struct thread *td; 1878 struct lock_object *lo; 1879 1880 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1881 1882 KASSERT(vops->registered, 1883 ("%s: not registered vector op %p\n", __func__, vops)); 1884 cache_validate_vop_vector(mp, vops); 1885 1886 td = curthread; 1887 if (td->td_vp_reserved != NULL) { 1888 vp = td->td_vp_reserved; 1889 td->td_vp_reserved = NULL; 1890 } else { 1891 vp = vn_alloc(mp); 1892 } 1893 counter_u64_add(vnodes_created, 1); 1894 1895 vn_set_state(vp, VSTATE_UNINITIALIZED); 1896 1897 /* 1898 * Locks are given the generic name "vnode" when created. 1899 * Follow the historic practice of using the filesystem 1900 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1901 * 1902 * Locks live in a witness group keyed on their name. Thus, 1903 * when a lock is renamed, it must also move from the witness 1904 * group of its old name to the witness group of its new name. 1905 * 1906 * The change only needs to be made when the vnode moves 1907 * from one filesystem type to another. We ensure that each 1908 * filesystem use a single static name pointer for its tag so 1909 * that we can compare pointers rather than doing a strcmp(). 1910 */ 1911 lo = &vp->v_vnlock->lock_object; 1912 #ifdef WITNESS 1913 if (lo->lo_name != tag) { 1914 #endif 1915 lo->lo_name = tag; 1916 #ifdef WITNESS 1917 WITNESS_DESTROY(lo); 1918 WITNESS_INIT(lo, tag); 1919 } 1920 #endif 1921 /* 1922 * By default, don't allow shared locks unless filesystems opt-in. 1923 */ 1924 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1925 /* 1926 * Finalize various vnode identity bits. 1927 */ 1928 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1929 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1930 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1931 vp->v_type = VNON; 1932 vp->v_op = vops; 1933 vp->v_irflag = 0; 1934 v_init_counters(vp); 1935 vn_seqc_init(vp); 1936 vp->v_bufobj.bo_ops = &buf_ops_bio; 1937 #ifdef DIAGNOSTIC 1938 if (mp == NULL && vops != &dead_vnodeops) 1939 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1940 #endif 1941 #ifdef MAC 1942 mac_vnode_init(vp); 1943 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1944 mac_vnode_associate_singlelabel(mp, vp); 1945 #endif 1946 if (mp != NULL) { 1947 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1948 } 1949 1950 /* 1951 * For the filesystems which do not use vfs_hash_insert(), 1952 * still initialize v_hash to have vfs_hash_index() useful. 1953 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1954 * its own hashing. 1955 */ 1956 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1957 1958 *vpp = vp; 1959 return (0); 1960 } 1961 1962 void 1963 getnewvnode_reserve(void) 1964 { 1965 struct thread *td; 1966 1967 td = curthread; 1968 MPASS(td->td_vp_reserved == NULL); 1969 td->td_vp_reserved = vn_alloc(NULL); 1970 } 1971 1972 void 1973 getnewvnode_drop_reserve(void) 1974 { 1975 struct thread *td; 1976 1977 td = curthread; 1978 if (td->td_vp_reserved != NULL) { 1979 vn_free(td->td_vp_reserved); 1980 td->td_vp_reserved = NULL; 1981 } 1982 } 1983 1984 static void __noinline 1985 freevnode(struct vnode *vp) 1986 { 1987 struct bufobj *bo; 1988 1989 /* 1990 * The vnode has been marked for destruction, so free it. 1991 * 1992 * The vnode will be returned to the zone where it will 1993 * normally remain until it is needed for another vnode. We 1994 * need to cleanup (or verify that the cleanup has already 1995 * been done) any residual data left from its current use 1996 * so as not to contaminate the freshly allocated vnode. 1997 */ 1998 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 1999 /* 2000 * Paired with vgone. 2001 */ 2002 vn_seqc_write_end_free(vp); 2003 2004 bo = &vp->v_bufobj; 2005 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 2006 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 2007 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 2008 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 2009 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 2010 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 2011 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 2012 ("clean blk trie not empty")); 2013 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 2014 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 2015 ("dirty blk trie not empty")); 2016 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 2017 ("Dangling rangelock waiters")); 2018 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, 2019 ("Leaked inactivation")); 2020 VI_UNLOCK(vp); 2021 cache_assert_no_entries(vp); 2022 2023 #ifdef MAC 2024 mac_vnode_destroy(vp); 2025 #endif 2026 if (vp->v_pollinfo != NULL) { 2027 /* 2028 * Use LK_NOWAIT to shut up witness about the lock. We may get 2029 * here while having another vnode locked when trying to 2030 * satisfy a lookup and needing to recycle. 2031 */ 2032 VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT); 2033 destroy_vpollinfo(vp->v_pollinfo); 2034 VOP_UNLOCK(vp); 2035 vp->v_pollinfo = NULL; 2036 } 2037 vp->v_mountedhere = NULL; 2038 vp->v_unpcb = NULL; 2039 vp->v_rdev = NULL; 2040 vp->v_fifoinfo = NULL; 2041 vp->v_iflag = 0; 2042 vp->v_vflag = 0; 2043 bo->bo_flag = 0; 2044 vn_free(vp); 2045 } 2046 2047 /* 2048 * Delete from old mount point vnode list, if on one. 2049 */ 2050 static void 2051 delmntque(struct vnode *vp) 2052 { 2053 struct mount *mp; 2054 2055 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 2056 2057 mp = vp->v_mount; 2058 MNT_ILOCK(mp); 2059 VI_LOCK(vp); 2060 vp->v_mount = NULL; 2061 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 2062 ("bad mount point vnode list size")); 2063 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2064 mp->mnt_nvnodelistsize--; 2065 MNT_REL(mp); 2066 MNT_IUNLOCK(mp); 2067 /* 2068 * The caller expects the interlock to be still held. 2069 */ 2070 ASSERT_VI_LOCKED(vp, __func__); 2071 } 2072 2073 static int 2074 insmntque1_int(struct vnode *vp, struct mount *mp, bool dtr) 2075 { 2076 2077 KASSERT(vp->v_mount == NULL, 2078 ("insmntque: vnode already on per mount vnode list")); 2079 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 2080 if ((mp->mnt_kern_flag & MNTK_UNLOCKED_INSMNTQUE) == 0) { 2081 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 2082 } else { 2083 KASSERT(!dtr, 2084 ("%s: can't have MNTK_UNLOCKED_INSMNTQUE and cleanup", 2085 __func__)); 2086 } 2087 2088 /* 2089 * We acquire the vnode interlock early to ensure that the 2090 * vnode cannot be recycled by another process releasing a 2091 * holdcnt on it before we get it on both the vnode list 2092 * and the active vnode list. The mount mutex protects only 2093 * manipulation of the vnode list and the vnode freelist 2094 * mutex protects only manipulation of the active vnode list. 2095 * Hence the need to hold the vnode interlock throughout. 2096 */ 2097 MNT_ILOCK(mp); 2098 VI_LOCK(vp); 2099 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 2100 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 2101 mp->mnt_nvnodelistsize == 0)) && 2102 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 2103 VI_UNLOCK(vp); 2104 MNT_IUNLOCK(mp); 2105 if (dtr) { 2106 vp->v_data = NULL; 2107 vp->v_op = &dead_vnodeops; 2108 vgone(vp); 2109 vput(vp); 2110 } 2111 return (EBUSY); 2112 } 2113 vp->v_mount = mp; 2114 MNT_REF(mp); 2115 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2116 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 2117 ("neg mount point vnode list size")); 2118 mp->mnt_nvnodelistsize++; 2119 VI_UNLOCK(vp); 2120 MNT_IUNLOCK(mp); 2121 return (0); 2122 } 2123 2124 /* 2125 * Insert into list of vnodes for the new mount point, if available. 2126 * insmntque() reclaims the vnode on insertion failure, insmntque1() 2127 * leaves handling of the vnode to the caller. 2128 */ 2129 int 2130 insmntque(struct vnode *vp, struct mount *mp) 2131 { 2132 return (insmntque1_int(vp, mp, true)); 2133 } 2134 2135 int 2136 insmntque1(struct vnode *vp, struct mount *mp) 2137 { 2138 return (insmntque1_int(vp, mp, false)); 2139 } 2140 2141 /* 2142 * Flush out and invalidate all buffers associated with a bufobj 2143 * Called with the underlying object locked. 2144 */ 2145 int 2146 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 2147 { 2148 int error; 2149 2150 BO_LOCK(bo); 2151 if (flags & V_SAVE) { 2152 error = bufobj_wwait(bo, slpflag, slptimeo); 2153 if (error) { 2154 BO_UNLOCK(bo); 2155 return (error); 2156 } 2157 if (bo->bo_dirty.bv_cnt > 0) { 2158 BO_UNLOCK(bo); 2159 do { 2160 error = BO_SYNC(bo, MNT_WAIT); 2161 } while (error == ERELOOKUP); 2162 if (error != 0) 2163 return (error); 2164 BO_LOCK(bo); 2165 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 2166 BO_UNLOCK(bo); 2167 return (EBUSY); 2168 } 2169 } 2170 } 2171 /* 2172 * If you alter this loop please notice that interlock is dropped and 2173 * reacquired in flushbuflist. Special care is needed to ensure that 2174 * no race conditions occur from this. 2175 */ 2176 do { 2177 error = flushbuflist(&bo->bo_clean, 2178 flags, bo, slpflag, slptimeo); 2179 if (error == 0 && !(flags & V_CLEANONLY)) 2180 error = flushbuflist(&bo->bo_dirty, 2181 flags, bo, slpflag, slptimeo); 2182 if (error != 0 && error != EAGAIN) { 2183 BO_UNLOCK(bo); 2184 return (error); 2185 } 2186 } while (error != 0); 2187 2188 /* 2189 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 2190 * have write I/O in-progress but if there is a VM object then the 2191 * VM object can also have read-I/O in-progress. 2192 */ 2193 do { 2194 bufobj_wwait(bo, 0, 0); 2195 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 2196 BO_UNLOCK(bo); 2197 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 2198 BO_LOCK(bo); 2199 } 2200 } while (bo->bo_numoutput > 0); 2201 BO_UNLOCK(bo); 2202 2203 /* 2204 * Destroy the copy in the VM cache, too. 2205 */ 2206 if (bo->bo_object != NULL && 2207 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 2208 VM_OBJECT_WLOCK(bo->bo_object); 2209 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 2210 OBJPR_CLEANONLY : 0); 2211 VM_OBJECT_WUNLOCK(bo->bo_object); 2212 } 2213 2214 #ifdef INVARIANTS 2215 BO_LOCK(bo); 2216 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 2217 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 2218 bo->bo_clean.bv_cnt > 0)) 2219 panic("vinvalbuf: flush failed"); 2220 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 2221 bo->bo_dirty.bv_cnt > 0) 2222 panic("vinvalbuf: flush dirty failed"); 2223 BO_UNLOCK(bo); 2224 #endif 2225 return (0); 2226 } 2227 2228 /* 2229 * Flush out and invalidate all buffers associated with a vnode. 2230 * Called with the underlying object locked. 2231 */ 2232 int 2233 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 2234 { 2235 2236 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2237 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 2238 if (vp->v_object != NULL && vp->v_object->handle != vp) 2239 return (0); 2240 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 2241 } 2242 2243 /* 2244 * Flush out buffers on the specified list. 2245 * 2246 */ 2247 static int 2248 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2249 int slptimeo) 2250 { 2251 struct buf *bp, *nbp; 2252 int retval, error; 2253 daddr_t lblkno; 2254 b_xflags_t xflags; 2255 2256 ASSERT_BO_WLOCKED(bo); 2257 2258 retval = 0; 2259 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2260 /* 2261 * If we are flushing both V_NORMAL and V_ALT buffers then 2262 * do not skip any buffers. If we are flushing only V_NORMAL 2263 * buffers then skip buffers marked as BX_ALTDATA. If we are 2264 * flushing only V_ALT buffers then skip buffers not marked 2265 * as BX_ALTDATA. 2266 */ 2267 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2268 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2269 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2270 continue; 2271 } 2272 if (nbp != NULL) { 2273 lblkno = nbp->b_lblkno; 2274 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2275 } 2276 retval = EAGAIN; 2277 error = BUF_TIMELOCK(bp, 2278 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2279 "flushbuf", slpflag, slptimeo); 2280 if (error) { 2281 BO_LOCK(bo); 2282 return (error != ENOLCK ? error : EAGAIN); 2283 } 2284 KASSERT(bp->b_bufobj == bo, 2285 ("bp %p wrong b_bufobj %p should be %p", 2286 bp, bp->b_bufobj, bo)); 2287 /* 2288 * XXX Since there are no node locks for NFS, I 2289 * believe there is a slight chance that a delayed 2290 * write will occur while sleeping just above, so 2291 * check for it. 2292 */ 2293 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2294 (flags & V_SAVE)) { 2295 bremfree(bp); 2296 bp->b_flags |= B_ASYNC; 2297 bwrite(bp); 2298 BO_LOCK(bo); 2299 return (EAGAIN); /* XXX: why not loop ? */ 2300 } 2301 bremfree(bp); 2302 bp->b_flags |= (B_INVAL | B_RELBUF); 2303 bp->b_flags &= ~B_ASYNC; 2304 brelse(bp); 2305 BO_LOCK(bo); 2306 if (nbp == NULL) 2307 break; 2308 nbp = gbincore(bo, lblkno); 2309 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2310 != xflags) 2311 break; /* nbp invalid */ 2312 } 2313 return (retval); 2314 } 2315 2316 int 2317 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2318 { 2319 struct buf *bp; 2320 int error; 2321 daddr_t lblkno; 2322 2323 ASSERT_BO_LOCKED(bo); 2324 2325 for (lblkno = startn;;) { 2326 again: 2327 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2328 if (bp == NULL || bp->b_lblkno >= endn || 2329 bp->b_lblkno < startn) 2330 break; 2331 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2332 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2333 if (error != 0) { 2334 BO_RLOCK(bo); 2335 if (error == ENOLCK) 2336 goto again; 2337 return (error); 2338 } 2339 KASSERT(bp->b_bufobj == bo, 2340 ("bp %p wrong b_bufobj %p should be %p", 2341 bp, bp->b_bufobj, bo)); 2342 lblkno = bp->b_lblkno + 1; 2343 if ((bp->b_flags & B_MANAGED) == 0) 2344 bremfree(bp); 2345 bp->b_flags |= B_RELBUF; 2346 /* 2347 * In the VMIO case, use the B_NOREUSE flag to hint that the 2348 * pages backing each buffer in the range are unlikely to be 2349 * reused. Dirty buffers will have the hint applied once 2350 * they've been written. 2351 */ 2352 if ((bp->b_flags & B_VMIO) != 0) 2353 bp->b_flags |= B_NOREUSE; 2354 brelse(bp); 2355 BO_RLOCK(bo); 2356 } 2357 return (0); 2358 } 2359 2360 /* 2361 * Truncate a file's buffer and pages to a specified length. This 2362 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2363 * sync activity. 2364 */ 2365 int 2366 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2367 { 2368 struct buf *bp, *nbp; 2369 struct bufobj *bo; 2370 daddr_t startlbn; 2371 2372 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2373 vp, blksize, (uintmax_t)length); 2374 2375 /* 2376 * Round up to the *next* lbn. 2377 */ 2378 startlbn = howmany(length, blksize); 2379 2380 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2381 2382 bo = &vp->v_bufobj; 2383 restart_unlocked: 2384 BO_LOCK(bo); 2385 2386 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2387 ; 2388 2389 if (length > 0) { 2390 restartsync: 2391 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2392 if (bp->b_lblkno > 0) 2393 continue; 2394 /* 2395 * Since we hold the vnode lock this should only 2396 * fail if we're racing with the buf daemon. 2397 */ 2398 if (BUF_LOCK(bp, 2399 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2400 BO_LOCKPTR(bo)) == ENOLCK) 2401 goto restart_unlocked; 2402 2403 VNASSERT((bp->b_flags & B_DELWRI), vp, 2404 ("buf(%p) on dirty queue without DELWRI", bp)); 2405 2406 bremfree(bp); 2407 bawrite(bp); 2408 BO_LOCK(bo); 2409 goto restartsync; 2410 } 2411 } 2412 2413 bufobj_wwait(bo, 0, 0); 2414 BO_UNLOCK(bo); 2415 vnode_pager_setsize(vp, length); 2416 2417 return (0); 2418 } 2419 2420 /* 2421 * Invalidate the cached pages of a file's buffer within the range of block 2422 * numbers [startlbn, endlbn). 2423 */ 2424 void 2425 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2426 int blksize) 2427 { 2428 struct bufobj *bo; 2429 off_t start, end; 2430 2431 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2432 2433 start = blksize * startlbn; 2434 end = blksize * endlbn; 2435 2436 bo = &vp->v_bufobj; 2437 BO_LOCK(bo); 2438 MPASS(blksize == bo->bo_bsize); 2439 2440 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2441 ; 2442 2443 BO_UNLOCK(bo); 2444 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2445 } 2446 2447 static int 2448 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2449 daddr_t startlbn, daddr_t endlbn) 2450 { 2451 struct buf *bp, *nbp; 2452 bool anyfreed; 2453 2454 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2455 ASSERT_BO_LOCKED(bo); 2456 2457 do { 2458 anyfreed = false; 2459 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2460 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2461 continue; 2462 if (BUF_LOCK(bp, 2463 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2464 BO_LOCKPTR(bo)) == ENOLCK) { 2465 BO_LOCK(bo); 2466 return (EAGAIN); 2467 } 2468 2469 bremfree(bp); 2470 bp->b_flags |= B_INVAL | B_RELBUF; 2471 bp->b_flags &= ~B_ASYNC; 2472 brelse(bp); 2473 anyfreed = true; 2474 2475 BO_LOCK(bo); 2476 if (nbp != NULL && 2477 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2478 nbp->b_vp != vp || 2479 (nbp->b_flags & B_DELWRI) != 0)) 2480 return (EAGAIN); 2481 } 2482 2483 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2484 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2485 continue; 2486 if (BUF_LOCK(bp, 2487 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2488 BO_LOCKPTR(bo)) == ENOLCK) { 2489 BO_LOCK(bo); 2490 return (EAGAIN); 2491 } 2492 bremfree(bp); 2493 bp->b_flags |= B_INVAL | B_RELBUF; 2494 bp->b_flags &= ~B_ASYNC; 2495 brelse(bp); 2496 anyfreed = true; 2497 2498 BO_LOCK(bo); 2499 if (nbp != NULL && 2500 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2501 (nbp->b_vp != vp) || 2502 (nbp->b_flags & B_DELWRI) == 0)) 2503 return (EAGAIN); 2504 } 2505 } while (anyfreed); 2506 return (0); 2507 } 2508 2509 static void 2510 buf_vlist_remove(struct buf *bp) 2511 { 2512 struct bufv *bv; 2513 b_xflags_t flags; 2514 2515 flags = bp->b_xflags; 2516 2517 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2518 ASSERT_BO_WLOCKED(bp->b_bufobj); 2519 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 && 2520 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN), 2521 ("%s: buffer %p has invalid queue state", __func__, bp)); 2522 2523 if ((flags & BX_VNDIRTY) != 0) 2524 bv = &bp->b_bufobj->bo_dirty; 2525 else 2526 bv = &bp->b_bufobj->bo_clean; 2527 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2528 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2529 bv->bv_cnt--; 2530 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2531 } 2532 2533 /* 2534 * Add the buffer to the sorted clean or dirty block list. 2535 * 2536 * NOTE: xflags is passed as a constant, optimizing this inline function! 2537 */ 2538 static void 2539 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2540 { 2541 struct bufv *bv; 2542 struct buf *n; 2543 int error; 2544 2545 ASSERT_BO_WLOCKED(bo); 2546 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2547 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2548 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2549 ("dead bo %p", bo)); 2550 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2551 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2552 bp->b_xflags |= xflags; 2553 if (xflags & BX_VNDIRTY) 2554 bv = &bo->bo_dirty; 2555 else 2556 bv = &bo->bo_clean; 2557 2558 /* 2559 * Keep the list ordered. Optimize empty list insertion. Assume 2560 * we tend to grow at the tail so lookup_le should usually be cheaper 2561 * than _ge. 2562 */ 2563 if (bv->bv_cnt == 0 || 2564 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2565 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2566 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2567 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2568 else 2569 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2570 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2571 if (error) 2572 panic("buf_vlist_add: Preallocated nodes insufficient."); 2573 bv->bv_cnt++; 2574 } 2575 2576 /* 2577 * Look up a buffer using the buffer tries. 2578 */ 2579 struct buf * 2580 gbincore(struct bufobj *bo, daddr_t lblkno) 2581 { 2582 struct buf *bp; 2583 2584 ASSERT_BO_LOCKED(bo); 2585 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2586 if (bp != NULL) 2587 return (bp); 2588 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2589 } 2590 2591 /* 2592 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2593 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2594 * stability of the result. Like other lockless lookups, the found buf may 2595 * already be invalid by the time this function returns. 2596 */ 2597 struct buf * 2598 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2599 { 2600 struct buf *bp; 2601 2602 ASSERT_BO_UNLOCKED(bo); 2603 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2604 if (bp != NULL) 2605 return (bp); 2606 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2607 } 2608 2609 /* 2610 * Associate a buffer with a vnode. 2611 */ 2612 void 2613 bgetvp(struct vnode *vp, struct buf *bp) 2614 { 2615 struct bufobj *bo; 2616 2617 bo = &vp->v_bufobj; 2618 ASSERT_BO_WLOCKED(bo); 2619 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2620 2621 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2622 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2623 ("bgetvp: bp already attached! %p", bp)); 2624 2625 vhold(vp); 2626 bp->b_vp = vp; 2627 bp->b_bufobj = bo; 2628 /* 2629 * Insert onto list for new vnode. 2630 */ 2631 buf_vlist_add(bp, bo, BX_VNCLEAN); 2632 } 2633 2634 /* 2635 * Disassociate a buffer from a vnode. 2636 */ 2637 void 2638 brelvp(struct buf *bp) 2639 { 2640 struct bufobj *bo; 2641 struct vnode *vp; 2642 2643 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2644 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2645 2646 /* 2647 * Delete from old vnode list, if on one. 2648 */ 2649 vp = bp->b_vp; /* XXX */ 2650 bo = bp->b_bufobj; 2651 BO_LOCK(bo); 2652 buf_vlist_remove(bp); 2653 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2654 bo->bo_flag &= ~BO_ONWORKLST; 2655 mtx_lock(&sync_mtx); 2656 LIST_REMOVE(bo, bo_synclist); 2657 syncer_worklist_len--; 2658 mtx_unlock(&sync_mtx); 2659 } 2660 bp->b_vp = NULL; 2661 bp->b_bufobj = NULL; 2662 BO_UNLOCK(bo); 2663 vdrop(vp); 2664 } 2665 2666 /* 2667 * Add an item to the syncer work queue. 2668 */ 2669 static void 2670 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2671 { 2672 int slot; 2673 2674 ASSERT_BO_WLOCKED(bo); 2675 2676 mtx_lock(&sync_mtx); 2677 if (bo->bo_flag & BO_ONWORKLST) 2678 LIST_REMOVE(bo, bo_synclist); 2679 else { 2680 bo->bo_flag |= BO_ONWORKLST; 2681 syncer_worklist_len++; 2682 } 2683 2684 if (delay > syncer_maxdelay - 2) 2685 delay = syncer_maxdelay - 2; 2686 slot = (syncer_delayno + delay) & syncer_mask; 2687 2688 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2689 mtx_unlock(&sync_mtx); 2690 } 2691 2692 static int 2693 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2694 { 2695 int error, len; 2696 2697 mtx_lock(&sync_mtx); 2698 len = syncer_worklist_len - sync_vnode_count; 2699 mtx_unlock(&sync_mtx); 2700 error = SYSCTL_OUT(req, &len, sizeof(len)); 2701 return (error); 2702 } 2703 2704 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2705 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2706 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2707 2708 static struct proc *updateproc; 2709 static void sched_sync(void); 2710 static struct kproc_desc up_kp = { 2711 "syncer", 2712 sched_sync, 2713 &updateproc 2714 }; 2715 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2716 2717 static int 2718 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2719 { 2720 struct vnode *vp; 2721 struct mount *mp; 2722 2723 *bo = LIST_FIRST(slp); 2724 if (*bo == NULL) 2725 return (0); 2726 vp = bo2vnode(*bo); 2727 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2728 return (1); 2729 /* 2730 * We use vhold in case the vnode does not 2731 * successfully sync. vhold prevents the vnode from 2732 * going away when we unlock the sync_mtx so that 2733 * we can acquire the vnode interlock. 2734 */ 2735 vholdl(vp); 2736 mtx_unlock(&sync_mtx); 2737 VI_UNLOCK(vp); 2738 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2739 vdrop(vp); 2740 mtx_lock(&sync_mtx); 2741 return (*bo == LIST_FIRST(slp)); 2742 } 2743 MPASSERT(mp == NULL || (curthread->td_pflags & TDP_IGNSUSP) != 0 || 2744 (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0, mp, 2745 ("suspended mp syncing vp %p", vp)); 2746 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2747 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2748 VOP_UNLOCK(vp); 2749 vn_finished_write(mp); 2750 BO_LOCK(*bo); 2751 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2752 /* 2753 * Put us back on the worklist. The worklist 2754 * routine will remove us from our current 2755 * position and then add us back in at a later 2756 * position. 2757 */ 2758 vn_syncer_add_to_worklist(*bo, syncdelay); 2759 } 2760 BO_UNLOCK(*bo); 2761 vdrop(vp); 2762 mtx_lock(&sync_mtx); 2763 return (0); 2764 } 2765 2766 static int first_printf = 1; 2767 2768 /* 2769 * System filesystem synchronizer daemon. 2770 */ 2771 static void 2772 sched_sync(void) 2773 { 2774 struct synclist *next, *slp; 2775 struct bufobj *bo; 2776 long starttime; 2777 struct thread *td = curthread; 2778 int last_work_seen; 2779 int net_worklist_len; 2780 int syncer_final_iter; 2781 int error; 2782 2783 last_work_seen = 0; 2784 syncer_final_iter = 0; 2785 syncer_state = SYNCER_RUNNING; 2786 starttime = time_uptime; 2787 td->td_pflags |= TDP_NORUNNINGBUF; 2788 2789 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2790 SHUTDOWN_PRI_LAST); 2791 2792 mtx_lock(&sync_mtx); 2793 for (;;) { 2794 if (syncer_state == SYNCER_FINAL_DELAY && 2795 syncer_final_iter == 0) { 2796 mtx_unlock(&sync_mtx); 2797 kproc_suspend_check(td->td_proc); 2798 mtx_lock(&sync_mtx); 2799 } 2800 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2801 if (syncer_state != SYNCER_RUNNING && 2802 starttime != time_uptime) { 2803 if (first_printf) { 2804 printf("\nSyncing disks, vnodes remaining... "); 2805 first_printf = 0; 2806 } 2807 printf("%d ", net_worklist_len); 2808 } 2809 starttime = time_uptime; 2810 2811 /* 2812 * Push files whose dirty time has expired. Be careful 2813 * of interrupt race on slp queue. 2814 * 2815 * Skip over empty worklist slots when shutting down. 2816 */ 2817 do { 2818 slp = &syncer_workitem_pending[syncer_delayno]; 2819 syncer_delayno += 1; 2820 if (syncer_delayno == syncer_maxdelay) 2821 syncer_delayno = 0; 2822 next = &syncer_workitem_pending[syncer_delayno]; 2823 /* 2824 * If the worklist has wrapped since the 2825 * it was emptied of all but syncer vnodes, 2826 * switch to the FINAL_DELAY state and run 2827 * for one more second. 2828 */ 2829 if (syncer_state == SYNCER_SHUTTING_DOWN && 2830 net_worklist_len == 0 && 2831 last_work_seen == syncer_delayno) { 2832 syncer_state = SYNCER_FINAL_DELAY; 2833 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2834 } 2835 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2836 syncer_worklist_len > 0); 2837 2838 /* 2839 * Keep track of the last time there was anything 2840 * on the worklist other than syncer vnodes. 2841 * Return to the SHUTTING_DOWN state if any 2842 * new work appears. 2843 */ 2844 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2845 last_work_seen = syncer_delayno; 2846 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2847 syncer_state = SYNCER_SHUTTING_DOWN; 2848 while (!LIST_EMPTY(slp)) { 2849 error = sync_vnode(slp, &bo, td); 2850 if (error == 1) { 2851 LIST_REMOVE(bo, bo_synclist); 2852 LIST_INSERT_HEAD(next, bo, bo_synclist); 2853 continue; 2854 } 2855 2856 if (first_printf == 0) { 2857 /* 2858 * Drop the sync mutex, because some watchdog 2859 * drivers need to sleep while patting 2860 */ 2861 mtx_unlock(&sync_mtx); 2862 wdog_kern_pat(WD_LASTVAL); 2863 mtx_lock(&sync_mtx); 2864 } 2865 } 2866 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2867 syncer_final_iter--; 2868 /* 2869 * The variable rushjob allows the kernel to speed up the 2870 * processing of the filesystem syncer process. A rushjob 2871 * value of N tells the filesystem syncer to process the next 2872 * N seconds worth of work on its queue ASAP. Currently rushjob 2873 * is used by the soft update code to speed up the filesystem 2874 * syncer process when the incore state is getting so far 2875 * ahead of the disk that the kernel memory pool is being 2876 * threatened with exhaustion. 2877 */ 2878 if (rushjob > 0) { 2879 rushjob -= 1; 2880 continue; 2881 } 2882 /* 2883 * Just sleep for a short period of time between 2884 * iterations when shutting down to allow some I/O 2885 * to happen. 2886 * 2887 * If it has taken us less than a second to process the 2888 * current work, then wait. Otherwise start right over 2889 * again. We can still lose time if any single round 2890 * takes more than two seconds, but it does not really 2891 * matter as we are just trying to generally pace the 2892 * filesystem activity. 2893 */ 2894 if (syncer_state != SYNCER_RUNNING || 2895 time_uptime == starttime) { 2896 thread_lock(td); 2897 sched_prio(td, PPAUSE); 2898 thread_unlock(td); 2899 } 2900 if (syncer_state != SYNCER_RUNNING) 2901 cv_timedwait(&sync_wakeup, &sync_mtx, 2902 hz / SYNCER_SHUTDOWN_SPEEDUP); 2903 else if (time_uptime == starttime) 2904 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2905 } 2906 } 2907 2908 /* 2909 * Request the syncer daemon to speed up its work. 2910 * We never push it to speed up more than half of its 2911 * normal turn time, otherwise it could take over the cpu. 2912 */ 2913 int 2914 speedup_syncer(void) 2915 { 2916 int ret = 0; 2917 2918 mtx_lock(&sync_mtx); 2919 if (rushjob < syncdelay / 2) { 2920 rushjob += 1; 2921 stat_rush_requests += 1; 2922 ret = 1; 2923 } 2924 mtx_unlock(&sync_mtx); 2925 cv_broadcast(&sync_wakeup); 2926 return (ret); 2927 } 2928 2929 /* 2930 * Tell the syncer to speed up its work and run though its work 2931 * list several times, then tell it to shut down. 2932 */ 2933 static void 2934 syncer_shutdown(void *arg, int howto) 2935 { 2936 2937 if (howto & RB_NOSYNC) 2938 return; 2939 mtx_lock(&sync_mtx); 2940 syncer_state = SYNCER_SHUTTING_DOWN; 2941 rushjob = 0; 2942 mtx_unlock(&sync_mtx); 2943 cv_broadcast(&sync_wakeup); 2944 kproc_shutdown(arg, howto); 2945 } 2946 2947 void 2948 syncer_suspend(void) 2949 { 2950 2951 syncer_shutdown(updateproc, 0); 2952 } 2953 2954 void 2955 syncer_resume(void) 2956 { 2957 2958 mtx_lock(&sync_mtx); 2959 first_printf = 1; 2960 syncer_state = SYNCER_RUNNING; 2961 mtx_unlock(&sync_mtx); 2962 cv_broadcast(&sync_wakeup); 2963 kproc_resume(updateproc); 2964 } 2965 2966 /* 2967 * Move the buffer between the clean and dirty lists of its vnode. 2968 */ 2969 void 2970 reassignbuf(struct buf *bp) 2971 { 2972 struct vnode *vp; 2973 struct bufobj *bo; 2974 int delay; 2975 #ifdef INVARIANTS 2976 struct bufv *bv; 2977 #endif 2978 2979 vp = bp->b_vp; 2980 bo = bp->b_bufobj; 2981 2982 KASSERT((bp->b_flags & B_PAGING) == 0, 2983 ("%s: cannot reassign paging buffer %p", __func__, bp)); 2984 2985 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2986 bp, bp->b_vp, bp->b_flags); 2987 2988 BO_LOCK(bo); 2989 buf_vlist_remove(bp); 2990 2991 /* 2992 * If dirty, put on list of dirty buffers; otherwise insert onto list 2993 * of clean buffers. 2994 */ 2995 if (bp->b_flags & B_DELWRI) { 2996 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2997 switch (vp->v_type) { 2998 case VDIR: 2999 delay = dirdelay; 3000 break; 3001 case VCHR: 3002 delay = metadelay; 3003 break; 3004 default: 3005 delay = filedelay; 3006 } 3007 vn_syncer_add_to_worklist(bo, delay); 3008 } 3009 buf_vlist_add(bp, bo, BX_VNDIRTY); 3010 } else { 3011 buf_vlist_add(bp, bo, BX_VNCLEAN); 3012 3013 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 3014 mtx_lock(&sync_mtx); 3015 LIST_REMOVE(bo, bo_synclist); 3016 syncer_worklist_len--; 3017 mtx_unlock(&sync_mtx); 3018 bo->bo_flag &= ~BO_ONWORKLST; 3019 } 3020 } 3021 #ifdef INVARIANTS 3022 bv = &bo->bo_clean; 3023 bp = TAILQ_FIRST(&bv->bv_hd); 3024 KASSERT(bp == NULL || bp->b_bufobj == bo, 3025 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3026 bp = TAILQ_LAST(&bv->bv_hd, buflists); 3027 KASSERT(bp == NULL || bp->b_bufobj == bo, 3028 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3029 bv = &bo->bo_dirty; 3030 bp = TAILQ_FIRST(&bv->bv_hd); 3031 KASSERT(bp == NULL || bp->b_bufobj == bo, 3032 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3033 bp = TAILQ_LAST(&bv->bv_hd, buflists); 3034 KASSERT(bp == NULL || bp->b_bufobj == bo, 3035 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3036 #endif 3037 BO_UNLOCK(bo); 3038 } 3039 3040 static void 3041 v_init_counters(struct vnode *vp) 3042 { 3043 3044 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 3045 vp, ("%s called for an initialized vnode", __FUNCTION__)); 3046 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 3047 3048 refcount_init(&vp->v_holdcnt, 1); 3049 refcount_init(&vp->v_usecount, 1); 3050 } 3051 3052 /* 3053 * Grab a particular vnode from the free list, increment its 3054 * reference count and lock it. VIRF_DOOMED is set if the vnode 3055 * is being destroyed. Only callers who specify LK_RETRY will 3056 * see doomed vnodes. If inactive processing was delayed in 3057 * vput try to do it here. 3058 * 3059 * usecount is manipulated using atomics without holding any locks. 3060 * 3061 * holdcnt can be manipulated using atomics without holding any locks, 3062 * except when transitioning 1<->0, in which case the interlock is held. 3063 * 3064 * Consumers which don't guarantee liveness of the vnode can use SMR to 3065 * try to get a reference. Note this operation can fail since the vnode 3066 * may be awaiting getting freed by the time they get to it. 3067 */ 3068 enum vgetstate 3069 vget_prep_smr(struct vnode *vp) 3070 { 3071 enum vgetstate vs; 3072 3073 VFS_SMR_ASSERT_ENTERED(); 3074 3075 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3076 vs = VGET_USECOUNT; 3077 } else { 3078 if (vhold_smr(vp)) 3079 vs = VGET_HOLDCNT; 3080 else 3081 vs = VGET_NONE; 3082 } 3083 return (vs); 3084 } 3085 3086 enum vgetstate 3087 vget_prep(struct vnode *vp) 3088 { 3089 enum vgetstate vs; 3090 3091 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3092 vs = VGET_USECOUNT; 3093 } else { 3094 vhold(vp); 3095 vs = VGET_HOLDCNT; 3096 } 3097 return (vs); 3098 } 3099 3100 void 3101 vget_abort(struct vnode *vp, enum vgetstate vs) 3102 { 3103 3104 switch (vs) { 3105 case VGET_USECOUNT: 3106 vrele(vp); 3107 break; 3108 case VGET_HOLDCNT: 3109 vdrop(vp); 3110 break; 3111 default: 3112 __assert_unreachable(); 3113 } 3114 } 3115 3116 int 3117 vget(struct vnode *vp, int flags) 3118 { 3119 enum vgetstate vs; 3120 3121 vs = vget_prep(vp); 3122 return (vget_finish(vp, flags, vs)); 3123 } 3124 3125 int 3126 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 3127 { 3128 int error; 3129 3130 if ((flags & LK_INTERLOCK) != 0) 3131 ASSERT_VI_LOCKED(vp, __func__); 3132 else 3133 ASSERT_VI_UNLOCKED(vp, __func__); 3134 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3135 VNPASS(vp->v_holdcnt > 0, vp); 3136 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3137 3138 error = vn_lock(vp, flags); 3139 if (__predict_false(error != 0)) { 3140 vget_abort(vp, vs); 3141 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 3142 vp); 3143 return (error); 3144 } 3145 3146 vget_finish_ref(vp, vs); 3147 return (0); 3148 } 3149 3150 void 3151 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 3152 { 3153 int old; 3154 3155 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3156 VNPASS(vp->v_holdcnt > 0, vp); 3157 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3158 3159 if (vs == VGET_USECOUNT) 3160 return; 3161 3162 /* 3163 * We hold the vnode. If the usecount is 0 it will be utilized to keep 3164 * the vnode around. Otherwise someone else lended their hold count and 3165 * we have to drop ours. 3166 */ 3167 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3168 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3169 if (old != 0) { 3170 #ifdef INVARIANTS 3171 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3172 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3173 #else 3174 refcount_release(&vp->v_holdcnt); 3175 #endif 3176 } 3177 } 3178 3179 void 3180 vref(struct vnode *vp) 3181 { 3182 enum vgetstate vs; 3183 3184 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3185 vs = vget_prep(vp); 3186 vget_finish_ref(vp, vs); 3187 } 3188 3189 void 3190 vrefact(struct vnode *vp) 3191 { 3192 3193 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3194 #ifdef INVARIANTS 3195 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 3196 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3197 #else 3198 refcount_acquire(&vp->v_usecount); 3199 #endif 3200 } 3201 3202 void 3203 vlazy(struct vnode *vp) 3204 { 3205 struct mount *mp; 3206 3207 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 3208 3209 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 3210 return; 3211 /* 3212 * We may get here for inactive routines after the vnode got doomed. 3213 */ 3214 if (VN_IS_DOOMED(vp)) 3215 return; 3216 mp = vp->v_mount; 3217 mtx_lock(&mp->mnt_listmtx); 3218 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 3219 vp->v_mflag |= VMP_LAZYLIST; 3220 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3221 mp->mnt_lazyvnodelistsize++; 3222 } 3223 mtx_unlock(&mp->mnt_listmtx); 3224 } 3225 3226 static void 3227 vunlazy(struct vnode *vp) 3228 { 3229 struct mount *mp; 3230 3231 ASSERT_VI_LOCKED(vp, __func__); 3232 VNPASS(!VN_IS_DOOMED(vp), vp); 3233 3234 mp = vp->v_mount; 3235 mtx_lock(&mp->mnt_listmtx); 3236 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3237 /* 3238 * Don't remove the vnode from the lazy list if another thread 3239 * has increased the hold count. It may have re-enqueued the 3240 * vnode to the lazy list and is now responsible for its 3241 * removal. 3242 */ 3243 if (vp->v_holdcnt == 0) { 3244 vp->v_mflag &= ~VMP_LAZYLIST; 3245 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3246 mp->mnt_lazyvnodelistsize--; 3247 } 3248 mtx_unlock(&mp->mnt_listmtx); 3249 } 3250 3251 /* 3252 * This routine is only meant to be called from vgonel prior to dooming 3253 * the vnode. 3254 */ 3255 static void 3256 vunlazy_gone(struct vnode *vp) 3257 { 3258 struct mount *mp; 3259 3260 ASSERT_VOP_ELOCKED(vp, __func__); 3261 ASSERT_VI_LOCKED(vp, __func__); 3262 VNPASS(!VN_IS_DOOMED(vp), vp); 3263 3264 if (vp->v_mflag & VMP_LAZYLIST) { 3265 mp = vp->v_mount; 3266 mtx_lock(&mp->mnt_listmtx); 3267 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3268 vp->v_mflag &= ~VMP_LAZYLIST; 3269 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3270 mp->mnt_lazyvnodelistsize--; 3271 mtx_unlock(&mp->mnt_listmtx); 3272 } 3273 } 3274 3275 static void 3276 vdefer_inactive(struct vnode *vp) 3277 { 3278 3279 ASSERT_VI_LOCKED(vp, __func__); 3280 VNPASS(vp->v_holdcnt > 0, vp); 3281 if (VN_IS_DOOMED(vp)) { 3282 vdropl(vp); 3283 return; 3284 } 3285 if (vp->v_iflag & VI_DEFINACT) { 3286 VNPASS(vp->v_holdcnt > 1, vp); 3287 vdropl(vp); 3288 return; 3289 } 3290 if (vp->v_usecount > 0) { 3291 vp->v_iflag &= ~VI_OWEINACT; 3292 vdropl(vp); 3293 return; 3294 } 3295 vlazy(vp); 3296 vp->v_iflag |= VI_DEFINACT; 3297 VI_UNLOCK(vp); 3298 atomic_add_long(&deferred_inact, 1); 3299 } 3300 3301 static void 3302 vdefer_inactive_unlocked(struct vnode *vp) 3303 { 3304 3305 VI_LOCK(vp); 3306 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3307 vdropl(vp); 3308 return; 3309 } 3310 vdefer_inactive(vp); 3311 } 3312 3313 enum vput_op { VRELE, VPUT, VUNREF }; 3314 3315 /* 3316 * Handle ->v_usecount transitioning to 0. 3317 * 3318 * By releasing the last usecount we take ownership of the hold count which 3319 * provides liveness of the vnode, meaning we have to vdrop. 3320 * 3321 * For all vnodes we may need to perform inactive processing. It requires an 3322 * exclusive lock on the vnode, while it is legal to call here with only a 3323 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3324 * inactive processing gets deferred to the syncer. 3325 * 3326 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3327 * on the lock being held all the way until VOP_INACTIVE. This in particular 3328 * happens with UFS which adds half-constructed vnodes to the hash, where they 3329 * can be found by other code. 3330 */ 3331 static void 3332 vput_final(struct vnode *vp, enum vput_op func) 3333 { 3334 int error; 3335 bool want_unlock; 3336 3337 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3338 VNPASS(vp->v_holdcnt > 0, vp); 3339 3340 VI_LOCK(vp); 3341 3342 /* 3343 * By the time we got here someone else might have transitioned 3344 * the count back to > 0. 3345 */ 3346 if (vp->v_usecount > 0) 3347 goto out; 3348 3349 /* 3350 * If the vnode is doomed vgone already performed inactive processing 3351 * (if needed). 3352 */ 3353 if (VN_IS_DOOMED(vp)) 3354 goto out; 3355 3356 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3357 goto out; 3358 3359 if (vp->v_iflag & VI_DOINGINACT) 3360 goto out; 3361 3362 /* 3363 * Locking operations here will drop the interlock and possibly the 3364 * vnode lock, opening a window where the vnode can get doomed all the 3365 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3366 * perform inactive. 3367 */ 3368 vp->v_iflag |= VI_OWEINACT; 3369 want_unlock = false; 3370 error = 0; 3371 switch (func) { 3372 case VRELE: 3373 switch (VOP_ISLOCKED(vp)) { 3374 case LK_EXCLUSIVE: 3375 break; 3376 case LK_EXCLOTHER: 3377 case 0: 3378 want_unlock = true; 3379 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3380 VI_LOCK(vp); 3381 break; 3382 default: 3383 /* 3384 * The lock has at least one sharer, but we have no way 3385 * to conclude whether this is us. Play it safe and 3386 * defer processing. 3387 */ 3388 error = EAGAIN; 3389 break; 3390 } 3391 break; 3392 case VPUT: 3393 want_unlock = true; 3394 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3395 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3396 LK_NOWAIT); 3397 VI_LOCK(vp); 3398 } 3399 break; 3400 case VUNREF: 3401 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3402 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3403 VI_LOCK(vp); 3404 } 3405 break; 3406 } 3407 if (error == 0) { 3408 if (func == VUNREF) { 3409 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp, 3410 ("recursive vunref")); 3411 vp->v_vflag |= VV_UNREF; 3412 } 3413 for (;;) { 3414 error = vinactive(vp); 3415 if (want_unlock) 3416 VOP_UNLOCK(vp); 3417 if (error != ERELOOKUP || !want_unlock) 3418 break; 3419 VOP_LOCK(vp, LK_EXCLUSIVE); 3420 } 3421 if (func == VUNREF) 3422 vp->v_vflag &= ~VV_UNREF; 3423 vdropl(vp); 3424 } else { 3425 vdefer_inactive(vp); 3426 } 3427 return; 3428 out: 3429 if (func == VPUT) 3430 VOP_UNLOCK(vp); 3431 vdropl(vp); 3432 } 3433 3434 /* 3435 * Decrement ->v_usecount for a vnode. 3436 * 3437 * Releasing the last use count requires additional processing, see vput_final 3438 * above for details. 3439 * 3440 * Comment above each variant denotes lock state on entry and exit. 3441 */ 3442 3443 /* 3444 * in: any 3445 * out: same as passed in 3446 */ 3447 void 3448 vrele(struct vnode *vp) 3449 { 3450 3451 ASSERT_VI_UNLOCKED(vp, __func__); 3452 if (!refcount_release(&vp->v_usecount)) 3453 return; 3454 vput_final(vp, VRELE); 3455 } 3456 3457 /* 3458 * in: locked 3459 * out: unlocked 3460 */ 3461 void 3462 vput(struct vnode *vp) 3463 { 3464 3465 ASSERT_VOP_LOCKED(vp, __func__); 3466 ASSERT_VI_UNLOCKED(vp, __func__); 3467 if (!refcount_release(&vp->v_usecount)) { 3468 VOP_UNLOCK(vp); 3469 return; 3470 } 3471 vput_final(vp, VPUT); 3472 } 3473 3474 /* 3475 * in: locked 3476 * out: locked 3477 */ 3478 void 3479 vunref(struct vnode *vp) 3480 { 3481 3482 ASSERT_VOP_LOCKED(vp, __func__); 3483 ASSERT_VI_UNLOCKED(vp, __func__); 3484 if (!refcount_release(&vp->v_usecount)) 3485 return; 3486 vput_final(vp, VUNREF); 3487 } 3488 3489 void 3490 vhold(struct vnode *vp) 3491 { 3492 int old; 3493 3494 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3495 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3496 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3497 ("%s: wrong hold count %d", __func__, old)); 3498 if (old == 0) 3499 vfs_freevnodes_dec(); 3500 } 3501 3502 void 3503 vholdnz(struct vnode *vp) 3504 { 3505 3506 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3507 #ifdef INVARIANTS 3508 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3509 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3510 ("%s: wrong hold count %d", __func__, old)); 3511 #else 3512 atomic_add_int(&vp->v_holdcnt, 1); 3513 #endif 3514 } 3515 3516 /* 3517 * Grab a hold count unless the vnode is freed. 3518 * 3519 * Only use this routine if vfs smr is the only protection you have against 3520 * freeing the vnode. 3521 * 3522 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3523 * is not set. After the flag is set the vnode becomes immutable to anyone but 3524 * the thread which managed to set the flag. 3525 * 3526 * It may be tempting to replace the loop with: 3527 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3528 * if (count & VHOLD_NO_SMR) { 3529 * backpedal and error out; 3530 * } 3531 * 3532 * However, while this is more performant, it hinders debugging by eliminating 3533 * the previously mentioned invariant. 3534 */ 3535 bool 3536 vhold_smr(struct vnode *vp) 3537 { 3538 int count; 3539 3540 VFS_SMR_ASSERT_ENTERED(); 3541 3542 count = atomic_load_int(&vp->v_holdcnt); 3543 for (;;) { 3544 if (count & VHOLD_NO_SMR) { 3545 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3546 ("non-zero hold count with flags %d\n", count)); 3547 return (false); 3548 } 3549 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3550 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3551 if (count == 0) 3552 vfs_freevnodes_dec(); 3553 return (true); 3554 } 3555 } 3556 } 3557 3558 /* 3559 * Hold a free vnode for recycling. 3560 * 3561 * Note: vnode_init references this comment. 3562 * 3563 * Attempts to recycle only need the global vnode list lock and have no use for 3564 * SMR. 3565 * 3566 * However, vnodes get inserted into the global list before they get fully 3567 * initialized and stay there until UMA decides to free the memory. This in 3568 * particular means the target can be found before it becomes usable and after 3569 * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to 3570 * VHOLD_NO_SMR. 3571 * 3572 * Note: the vnode may gain more references after we transition the count 0->1. 3573 */ 3574 static bool 3575 vhold_recycle_free(struct vnode *vp) 3576 { 3577 int count; 3578 3579 mtx_assert(&vnode_list_mtx, MA_OWNED); 3580 3581 count = atomic_load_int(&vp->v_holdcnt); 3582 for (;;) { 3583 if (count & VHOLD_NO_SMR) { 3584 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3585 ("non-zero hold count with flags %d\n", count)); 3586 return (false); 3587 } 3588 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3589 if (count > 0) { 3590 return (false); 3591 } 3592 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3593 vfs_freevnodes_dec(); 3594 return (true); 3595 } 3596 } 3597 } 3598 3599 static void __noinline 3600 vdbatch_process(struct vdbatch *vd) 3601 { 3602 struct vnode *vp; 3603 int i; 3604 3605 mtx_assert(&vd->lock, MA_OWNED); 3606 MPASS(curthread->td_pinned > 0); 3607 MPASS(vd->index == VDBATCH_SIZE); 3608 3609 /* 3610 * Attempt to requeue the passed batch, but give up easily. 3611 * 3612 * Despite batching the mechanism is prone to transient *significant* 3613 * lock contention, where vnode_list_mtx becomes the primary bottleneck 3614 * if multiple CPUs get here (one real-world example is highly parallel 3615 * do-nothing make , which will stat *tons* of vnodes). Since it is 3616 * quasi-LRU (read: not that great even if fully honoured) just dodge 3617 * the problem. Parties which don't like it are welcome to implement 3618 * something better. 3619 */ 3620 critical_enter(); 3621 if (mtx_trylock(&vnode_list_mtx)) { 3622 for (i = 0; i < VDBATCH_SIZE; i++) { 3623 vp = vd->tab[i]; 3624 vd->tab[i] = NULL; 3625 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3626 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3627 MPASS(vp->v_dbatchcpu != NOCPU); 3628 vp->v_dbatchcpu = NOCPU; 3629 } 3630 mtx_unlock(&vnode_list_mtx); 3631 } else { 3632 counter_u64_add(vnode_skipped_requeues, 1); 3633 3634 for (i = 0; i < VDBATCH_SIZE; i++) { 3635 vp = vd->tab[i]; 3636 vd->tab[i] = NULL; 3637 MPASS(vp->v_dbatchcpu != NOCPU); 3638 vp->v_dbatchcpu = NOCPU; 3639 } 3640 } 3641 vd->index = 0; 3642 critical_exit(); 3643 } 3644 3645 static void 3646 vdbatch_enqueue(struct vnode *vp) 3647 { 3648 struct vdbatch *vd; 3649 3650 ASSERT_VI_LOCKED(vp, __func__); 3651 VNPASS(!VN_IS_DOOMED(vp), vp); 3652 3653 if (vp->v_dbatchcpu != NOCPU) { 3654 VI_UNLOCK(vp); 3655 return; 3656 } 3657 3658 sched_pin(); 3659 vd = DPCPU_PTR(vd); 3660 mtx_lock(&vd->lock); 3661 MPASS(vd->index < VDBATCH_SIZE); 3662 MPASS(vd->tab[vd->index] == NULL); 3663 /* 3664 * A hack: we depend on being pinned so that we know what to put in 3665 * ->v_dbatchcpu. 3666 */ 3667 vp->v_dbatchcpu = curcpu; 3668 vd->tab[vd->index] = vp; 3669 vd->index++; 3670 VI_UNLOCK(vp); 3671 if (vd->index == VDBATCH_SIZE) 3672 vdbatch_process(vd); 3673 mtx_unlock(&vd->lock); 3674 sched_unpin(); 3675 } 3676 3677 /* 3678 * This routine must only be called for vnodes which are about to be 3679 * deallocated. Supporting dequeue for arbitrary vndoes would require 3680 * validating that the locked batch matches. 3681 */ 3682 static void 3683 vdbatch_dequeue(struct vnode *vp) 3684 { 3685 struct vdbatch *vd; 3686 int i; 3687 short cpu; 3688 3689 VNPASS(vp->v_type == VBAD || vp->v_type == VNON, vp); 3690 3691 cpu = vp->v_dbatchcpu; 3692 if (cpu == NOCPU) 3693 return; 3694 3695 vd = DPCPU_ID_PTR(cpu, vd); 3696 mtx_lock(&vd->lock); 3697 for (i = 0; i < vd->index; i++) { 3698 if (vd->tab[i] != vp) 3699 continue; 3700 vp->v_dbatchcpu = NOCPU; 3701 vd->index--; 3702 vd->tab[i] = vd->tab[vd->index]; 3703 vd->tab[vd->index] = NULL; 3704 break; 3705 } 3706 mtx_unlock(&vd->lock); 3707 /* 3708 * Either we dequeued the vnode above or the target CPU beat us to it. 3709 */ 3710 MPASS(vp->v_dbatchcpu == NOCPU); 3711 } 3712 3713 /* 3714 * Drop the hold count of the vnode. If this is the last reference to 3715 * the vnode we place it on the free list unless it has been vgone'd 3716 * (marked VIRF_DOOMED) in which case we will free it. 3717 * 3718 * Because the vnode vm object keeps a hold reference on the vnode if 3719 * there is at least one resident non-cached page, the vnode cannot 3720 * leave the active list without the page cleanup done. 3721 */ 3722 static void __noinline 3723 vdropl_final(struct vnode *vp) 3724 { 3725 3726 ASSERT_VI_LOCKED(vp, __func__); 3727 VNPASS(VN_IS_DOOMED(vp), vp); 3728 /* 3729 * Set the VHOLD_NO_SMR flag. 3730 * 3731 * We may be racing against vhold_smr. If they win we can just pretend 3732 * we never got this far, they will vdrop later. 3733 */ 3734 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { 3735 vfs_freevnodes_inc(); 3736 VI_UNLOCK(vp); 3737 /* 3738 * We lost the aforementioned race. Any subsequent access is 3739 * invalid as they might have managed to vdropl on their own. 3740 */ 3741 return; 3742 } 3743 /* 3744 * Don't bump freevnodes as this one is going away. 3745 */ 3746 freevnode(vp); 3747 } 3748 3749 void 3750 vdrop(struct vnode *vp) 3751 { 3752 3753 ASSERT_VI_UNLOCKED(vp, __func__); 3754 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3755 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3756 return; 3757 VI_LOCK(vp); 3758 vdropl(vp); 3759 } 3760 3761 static void __always_inline 3762 vdropl_impl(struct vnode *vp, bool enqueue) 3763 { 3764 3765 ASSERT_VI_LOCKED(vp, __func__); 3766 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3767 if (!refcount_release(&vp->v_holdcnt)) { 3768 VI_UNLOCK(vp); 3769 return; 3770 } 3771 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp); 3772 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 3773 if (VN_IS_DOOMED(vp)) { 3774 vdropl_final(vp); 3775 return; 3776 } 3777 3778 vfs_freevnodes_inc(); 3779 if (vp->v_mflag & VMP_LAZYLIST) { 3780 vunlazy(vp); 3781 } 3782 3783 if (!enqueue) { 3784 VI_UNLOCK(vp); 3785 return; 3786 } 3787 3788 /* 3789 * Also unlocks the interlock. We can't assert on it as we 3790 * released our hold and by now the vnode might have been 3791 * freed. 3792 */ 3793 vdbatch_enqueue(vp); 3794 } 3795 3796 void 3797 vdropl(struct vnode *vp) 3798 { 3799 3800 vdropl_impl(vp, true); 3801 } 3802 3803 /* 3804 * vdrop a vnode when recycling 3805 * 3806 * This is a special case routine only to be used when recycling, differs from 3807 * regular vdrop by not requeieing the vnode on LRU. 3808 * 3809 * Consider a case where vtryrecycle continuously fails with all vnodes (due to 3810 * e.g., frozen writes on the filesystem), filling the batch and causing it to 3811 * be requeued. Then vnlru will end up revisiting the same vnodes. This is a 3812 * loop which can last for as long as writes are frozen. 3813 */ 3814 static void 3815 vdropl_recycle(struct vnode *vp) 3816 { 3817 3818 vdropl_impl(vp, false); 3819 } 3820 3821 static void 3822 vdrop_recycle(struct vnode *vp) 3823 { 3824 3825 VI_LOCK(vp); 3826 vdropl_recycle(vp); 3827 } 3828 3829 /* 3830 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3831 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3832 */ 3833 static int 3834 vinactivef(struct vnode *vp) 3835 { 3836 struct vm_object *obj; 3837 int error; 3838 3839 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3840 ASSERT_VI_LOCKED(vp, "vinactive"); 3841 VNPASS((vp->v_iflag & VI_DOINGINACT) == 0, vp); 3842 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3843 vp->v_iflag |= VI_DOINGINACT; 3844 vp->v_iflag &= ~VI_OWEINACT; 3845 VI_UNLOCK(vp); 3846 /* 3847 * Before moving off the active list, we must be sure that any 3848 * modified pages are converted into the vnode's dirty 3849 * buffers, since these will no longer be checked once the 3850 * vnode is on the inactive list. 3851 * 3852 * The write-out of the dirty pages is asynchronous. At the 3853 * point that VOP_INACTIVE() is called, there could still be 3854 * pending I/O and dirty pages in the object. 3855 */ 3856 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3857 vm_object_mightbedirty(obj)) { 3858 VM_OBJECT_WLOCK(obj); 3859 vm_object_page_clean(obj, 0, 0, 0); 3860 VM_OBJECT_WUNLOCK(obj); 3861 } 3862 error = VOP_INACTIVE(vp); 3863 VI_LOCK(vp); 3864 VNPASS(vp->v_iflag & VI_DOINGINACT, vp); 3865 vp->v_iflag &= ~VI_DOINGINACT; 3866 return (error); 3867 } 3868 3869 int 3870 vinactive(struct vnode *vp) 3871 { 3872 3873 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3874 ASSERT_VI_LOCKED(vp, "vinactive"); 3875 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3876 3877 if ((vp->v_iflag & VI_OWEINACT) == 0) 3878 return (0); 3879 if (vp->v_iflag & VI_DOINGINACT) 3880 return (0); 3881 if (vp->v_usecount > 0) { 3882 vp->v_iflag &= ~VI_OWEINACT; 3883 return (0); 3884 } 3885 return (vinactivef(vp)); 3886 } 3887 3888 /* 3889 * Remove any vnodes in the vnode table belonging to mount point mp. 3890 * 3891 * If FORCECLOSE is not specified, there should not be any active ones, 3892 * return error if any are found (nb: this is a user error, not a 3893 * system error). If FORCECLOSE is specified, detach any active vnodes 3894 * that are found. 3895 * 3896 * If WRITECLOSE is set, only flush out regular file vnodes open for 3897 * writing. 3898 * 3899 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3900 * 3901 * `rootrefs' specifies the base reference count for the root vnode 3902 * of this filesystem. The root vnode is considered busy if its 3903 * v_usecount exceeds this value. On a successful return, vflush(, td) 3904 * will call vrele() on the root vnode exactly rootrefs times. 3905 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3906 * be zero. 3907 */ 3908 #ifdef DIAGNOSTIC 3909 static int busyprt = 0; /* print out busy vnodes */ 3910 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3911 #endif 3912 3913 int 3914 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3915 { 3916 struct vnode *vp, *mvp, *rootvp = NULL; 3917 struct vattr vattr; 3918 int busy = 0, error; 3919 3920 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3921 rootrefs, flags); 3922 if (rootrefs > 0) { 3923 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3924 ("vflush: bad args")); 3925 /* 3926 * Get the filesystem root vnode. We can vput() it 3927 * immediately, since with rootrefs > 0, it won't go away. 3928 */ 3929 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3930 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3931 __func__, error); 3932 return (error); 3933 } 3934 vput(rootvp); 3935 } 3936 loop: 3937 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3938 vholdl(vp); 3939 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3940 if (error) { 3941 vdrop(vp); 3942 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3943 goto loop; 3944 } 3945 /* 3946 * Skip over a vnodes marked VV_SYSTEM. 3947 */ 3948 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3949 VOP_UNLOCK(vp); 3950 vdrop(vp); 3951 continue; 3952 } 3953 /* 3954 * If WRITECLOSE is set, flush out unlinked but still open 3955 * files (even if open only for reading) and regular file 3956 * vnodes open for writing. 3957 */ 3958 if (flags & WRITECLOSE) { 3959 if (vp->v_object != NULL) { 3960 VM_OBJECT_WLOCK(vp->v_object); 3961 vm_object_page_clean(vp->v_object, 0, 0, 0); 3962 VM_OBJECT_WUNLOCK(vp->v_object); 3963 } 3964 do { 3965 error = VOP_FSYNC(vp, MNT_WAIT, td); 3966 } while (error == ERELOOKUP); 3967 if (error != 0) { 3968 VOP_UNLOCK(vp); 3969 vdrop(vp); 3970 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3971 return (error); 3972 } 3973 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3974 VI_LOCK(vp); 3975 3976 if ((vp->v_type == VNON || 3977 (error == 0 && vattr.va_nlink > 0)) && 3978 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3979 VOP_UNLOCK(vp); 3980 vdropl(vp); 3981 continue; 3982 } 3983 } else 3984 VI_LOCK(vp); 3985 /* 3986 * With v_usecount == 0, all we need to do is clear out the 3987 * vnode data structures and we are done. 3988 * 3989 * If FORCECLOSE is set, forcibly close the vnode. 3990 */ 3991 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3992 vgonel(vp); 3993 } else { 3994 busy++; 3995 #ifdef DIAGNOSTIC 3996 if (busyprt) 3997 vn_printf(vp, "vflush: busy vnode "); 3998 #endif 3999 } 4000 VOP_UNLOCK(vp); 4001 vdropl(vp); 4002 } 4003 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 4004 /* 4005 * If just the root vnode is busy, and if its refcount 4006 * is equal to `rootrefs', then go ahead and kill it. 4007 */ 4008 VI_LOCK(rootvp); 4009 KASSERT(busy > 0, ("vflush: not busy")); 4010 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 4011 ("vflush: usecount %d < rootrefs %d", 4012 rootvp->v_usecount, rootrefs)); 4013 if (busy == 1 && rootvp->v_usecount == rootrefs) { 4014 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 4015 vgone(rootvp); 4016 VOP_UNLOCK(rootvp); 4017 busy = 0; 4018 } else 4019 VI_UNLOCK(rootvp); 4020 } 4021 if (busy) { 4022 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 4023 busy); 4024 return (EBUSY); 4025 } 4026 for (; rootrefs > 0; rootrefs--) 4027 vrele(rootvp); 4028 return (0); 4029 } 4030 4031 /* 4032 * Recycle an unused vnode to the front of the free list. 4033 */ 4034 int 4035 vrecycle(struct vnode *vp) 4036 { 4037 int recycled; 4038 4039 VI_LOCK(vp); 4040 recycled = vrecyclel(vp); 4041 VI_UNLOCK(vp); 4042 return (recycled); 4043 } 4044 4045 /* 4046 * vrecycle, with the vp interlock held. 4047 */ 4048 int 4049 vrecyclel(struct vnode *vp) 4050 { 4051 int recycled; 4052 4053 ASSERT_VOP_ELOCKED(vp, __func__); 4054 ASSERT_VI_LOCKED(vp, __func__); 4055 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4056 recycled = 0; 4057 if (vp->v_usecount == 0) { 4058 recycled = 1; 4059 vgonel(vp); 4060 } 4061 return (recycled); 4062 } 4063 4064 /* 4065 * Eliminate all activity associated with a vnode 4066 * in preparation for reuse. 4067 */ 4068 void 4069 vgone(struct vnode *vp) 4070 { 4071 VI_LOCK(vp); 4072 vgonel(vp); 4073 VI_UNLOCK(vp); 4074 } 4075 4076 /* 4077 * Notify upper mounts about reclaimed or unlinked vnode. 4078 */ 4079 void 4080 vfs_notify_upper(struct vnode *vp, enum vfs_notify_upper_type event) 4081 { 4082 struct mount *mp; 4083 struct mount_upper_node *ump; 4084 4085 mp = atomic_load_ptr(&vp->v_mount); 4086 if (mp == NULL) 4087 return; 4088 if (TAILQ_EMPTY(&mp->mnt_notify)) 4089 return; 4090 4091 MNT_ILOCK(mp); 4092 mp->mnt_upper_pending++; 4093 KASSERT(mp->mnt_upper_pending > 0, 4094 ("%s: mnt_upper_pending %d", __func__, mp->mnt_upper_pending)); 4095 TAILQ_FOREACH(ump, &mp->mnt_notify, mnt_upper_link) { 4096 MNT_IUNLOCK(mp); 4097 switch (event) { 4098 case VFS_NOTIFY_UPPER_RECLAIM: 4099 VFS_RECLAIM_LOWERVP(ump->mp, vp); 4100 break; 4101 case VFS_NOTIFY_UPPER_UNLINK: 4102 VFS_UNLINK_LOWERVP(ump->mp, vp); 4103 break; 4104 } 4105 MNT_ILOCK(mp); 4106 } 4107 mp->mnt_upper_pending--; 4108 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && 4109 mp->mnt_upper_pending == 0) { 4110 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; 4111 wakeup(&mp->mnt_uppers); 4112 } 4113 MNT_IUNLOCK(mp); 4114 } 4115 4116 /* 4117 * vgone, with the vp interlock held. 4118 */ 4119 static void 4120 vgonel(struct vnode *vp) 4121 { 4122 struct thread *td; 4123 struct mount *mp; 4124 vm_object_t object; 4125 bool active, doinginact, oweinact; 4126 4127 ASSERT_VOP_ELOCKED(vp, "vgonel"); 4128 ASSERT_VI_LOCKED(vp, "vgonel"); 4129 VNASSERT(vp->v_holdcnt, vp, 4130 ("vgonel: vp %p has no reference.", vp)); 4131 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4132 td = curthread; 4133 4134 /* 4135 * Don't vgonel if we're already doomed. 4136 */ 4137 if (VN_IS_DOOMED(vp)) { 4138 VNPASS(vn_get_state(vp) == VSTATE_DESTROYING || \ 4139 vn_get_state(vp) == VSTATE_DEAD, vp); 4140 return; 4141 } 4142 /* 4143 * Paired with freevnode. 4144 */ 4145 vn_seqc_write_begin_locked(vp); 4146 vunlazy_gone(vp); 4147 vn_irflag_set_locked(vp, VIRF_DOOMED); 4148 vn_set_state(vp, VSTATE_DESTROYING); 4149 4150 /* 4151 * Check to see if the vnode is in use. If so, we have to 4152 * call VOP_CLOSE() and VOP_INACTIVE(). 4153 * 4154 * It could be that VOP_INACTIVE() requested reclamation, in 4155 * which case we should avoid recursion, so check 4156 * VI_DOINGINACT. This is not precise but good enough. 4157 */ 4158 active = vp->v_usecount > 0; 4159 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4160 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; 4161 4162 /* 4163 * If we need to do inactive VI_OWEINACT will be set. 4164 */ 4165 if (vp->v_iflag & VI_DEFINACT) { 4166 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 4167 vp->v_iflag &= ~VI_DEFINACT; 4168 vdropl(vp); 4169 } else { 4170 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 4171 VI_UNLOCK(vp); 4172 } 4173 cache_purge_vgone(vp); 4174 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 4175 4176 /* 4177 * If purging an active vnode, it must be closed and 4178 * deactivated before being reclaimed. 4179 */ 4180 if (active) 4181 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 4182 if (!doinginact) { 4183 do { 4184 if (oweinact || active) { 4185 VI_LOCK(vp); 4186 vinactivef(vp); 4187 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4188 VI_UNLOCK(vp); 4189 } 4190 } while (oweinact); 4191 } 4192 if (vp->v_type == VSOCK) 4193 vfs_unp_reclaim(vp); 4194 4195 /* 4196 * Clean out any buffers associated with the vnode. 4197 * If the flush fails, just toss the buffers. 4198 */ 4199 mp = NULL; 4200 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 4201 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 4202 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 4203 while (vinvalbuf(vp, 0, 0, 0) != 0) 4204 ; 4205 } 4206 4207 BO_LOCK(&vp->v_bufobj); 4208 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 4209 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 4210 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 4211 vp->v_bufobj.bo_clean.bv_cnt == 0, 4212 ("vp %p bufobj not invalidated", vp)); 4213 4214 /* 4215 * For VMIO bufobj, BO_DEAD is set later, or in 4216 * vm_object_terminate() after the object's page queue is 4217 * flushed. 4218 */ 4219 object = vp->v_bufobj.bo_object; 4220 if (object == NULL) 4221 vp->v_bufobj.bo_flag |= BO_DEAD; 4222 BO_UNLOCK(&vp->v_bufobj); 4223 4224 /* 4225 * Handle the VM part. Tmpfs handles v_object on its own (the 4226 * OBJT_VNODE check). Nullfs or other bypassing filesystems 4227 * should not touch the object borrowed from the lower vnode 4228 * (the handle check). 4229 */ 4230 if (object != NULL && object->type == OBJT_VNODE && 4231 object->handle == vp) 4232 vnode_destroy_vobject(vp); 4233 4234 /* 4235 * Reclaim the vnode. 4236 */ 4237 if (VOP_RECLAIM(vp)) 4238 panic("vgone: cannot reclaim"); 4239 if (mp != NULL) 4240 vn_finished_secondary_write(mp); 4241 VNASSERT(vp->v_object == NULL, vp, 4242 ("vop_reclaim left v_object vp=%p", vp)); 4243 /* 4244 * Clear the advisory locks and wake up waiting threads. 4245 */ 4246 if (vp->v_lockf != NULL) { 4247 (void)VOP_ADVLOCKPURGE(vp); 4248 vp->v_lockf = NULL; 4249 } 4250 /* 4251 * Delete from old mount point vnode list. 4252 */ 4253 if (vp->v_mount == NULL) { 4254 VI_LOCK(vp); 4255 } else { 4256 delmntque(vp); 4257 ASSERT_VI_LOCKED(vp, "vgonel 2"); 4258 } 4259 /* 4260 * Done with purge, reset to the standard lock and invalidate 4261 * the vnode. 4262 */ 4263 vp->v_vnlock = &vp->v_lock; 4264 vp->v_op = &dead_vnodeops; 4265 vp->v_type = VBAD; 4266 vn_set_state(vp, VSTATE_DEAD); 4267 } 4268 4269 /* 4270 * Print out a description of a vnode. 4271 */ 4272 static const char *const vtypename[] = { 4273 [VNON] = "VNON", 4274 [VREG] = "VREG", 4275 [VDIR] = "VDIR", 4276 [VBLK] = "VBLK", 4277 [VCHR] = "VCHR", 4278 [VLNK] = "VLNK", 4279 [VSOCK] = "VSOCK", 4280 [VFIFO] = "VFIFO", 4281 [VBAD] = "VBAD", 4282 [VMARKER] = "VMARKER", 4283 }; 4284 _Static_assert(nitems(vtypename) == VLASTTYPE + 1, 4285 "vnode type name not added to vtypename"); 4286 4287 static const char *const vstatename[] = { 4288 [VSTATE_UNINITIALIZED] = "VSTATE_UNINITIALIZED", 4289 [VSTATE_CONSTRUCTED] = "VSTATE_CONSTRUCTED", 4290 [VSTATE_DESTROYING] = "VSTATE_DESTROYING", 4291 [VSTATE_DEAD] = "VSTATE_DEAD", 4292 }; 4293 _Static_assert(nitems(vstatename) == VLASTSTATE + 1, 4294 "vnode state name not added to vstatename"); 4295 4296 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 4297 "new hold count flag not added to vn_printf"); 4298 4299 void 4300 vn_printf(struct vnode *vp, const char *fmt, ...) 4301 { 4302 va_list ap; 4303 char buf[256], buf2[16]; 4304 u_long flags; 4305 u_int holdcnt; 4306 short irflag; 4307 4308 va_start(ap, fmt); 4309 vprintf(fmt, ap); 4310 va_end(ap); 4311 printf("%p: ", (void *)vp); 4312 printf("type %s state %s op %p\n", vtypename[vp->v_type], 4313 vstatename[vp->v_state], vp->v_op); 4314 holdcnt = atomic_load_int(&vp->v_holdcnt); 4315 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 4316 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 4317 vp->v_seqc_users); 4318 switch (vp->v_type) { 4319 case VDIR: 4320 printf(" mountedhere %p\n", vp->v_mountedhere); 4321 break; 4322 case VCHR: 4323 printf(" rdev %p\n", vp->v_rdev); 4324 break; 4325 case VSOCK: 4326 printf(" socket %p\n", vp->v_unpcb); 4327 break; 4328 case VFIFO: 4329 printf(" fifoinfo %p\n", vp->v_fifoinfo); 4330 break; 4331 default: 4332 printf("\n"); 4333 break; 4334 } 4335 buf[0] = '\0'; 4336 buf[1] = '\0'; 4337 if (holdcnt & VHOLD_NO_SMR) 4338 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 4339 printf(" hold count flags (%s)\n", buf + 1); 4340 4341 buf[0] = '\0'; 4342 buf[1] = '\0'; 4343 irflag = vn_irflag_read(vp); 4344 if (irflag & VIRF_DOOMED) 4345 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 4346 if (irflag & VIRF_PGREAD) 4347 strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); 4348 if (irflag & VIRF_MOUNTPOINT) 4349 strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf)); 4350 if (irflag & VIRF_TEXT_REF) 4351 strlcat(buf, "|VIRF_TEXT_REF", sizeof(buf)); 4352 flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT | VIRF_TEXT_REF); 4353 if (flags != 0) { 4354 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 4355 strlcat(buf, buf2, sizeof(buf)); 4356 } 4357 if (vp->v_vflag & VV_ROOT) 4358 strlcat(buf, "|VV_ROOT", sizeof(buf)); 4359 if (vp->v_vflag & VV_ISTTY) 4360 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4361 if (vp->v_vflag & VV_NOSYNC) 4362 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4363 if (vp->v_vflag & VV_ETERNALDEV) 4364 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4365 if (vp->v_vflag & VV_CACHEDLABEL) 4366 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4367 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4368 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4369 if (vp->v_vflag & VV_COPYONWRITE) 4370 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4371 if (vp->v_vflag & VV_SYSTEM) 4372 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4373 if (vp->v_vflag & VV_PROCDEP) 4374 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4375 if (vp->v_vflag & VV_DELETED) 4376 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4377 if (vp->v_vflag & VV_MD) 4378 strlcat(buf, "|VV_MD", sizeof(buf)); 4379 if (vp->v_vflag & VV_FORCEINSMQ) 4380 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4381 if (vp->v_vflag & VV_READLINK) 4382 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4383 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4384 VV_CACHEDLABEL | VV_VMSIZEVNLOCK | VV_COPYONWRITE | VV_SYSTEM | 4385 VV_PROCDEP | VV_DELETED | VV_MD | VV_FORCEINSMQ | VV_READLINK); 4386 if (flags != 0) { 4387 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4388 strlcat(buf, buf2, sizeof(buf)); 4389 } 4390 if (vp->v_iflag & VI_MOUNT) 4391 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4392 if (vp->v_iflag & VI_DOINGINACT) 4393 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4394 if (vp->v_iflag & VI_OWEINACT) 4395 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4396 if (vp->v_iflag & VI_DEFINACT) 4397 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4398 if (vp->v_iflag & VI_FOPENING) 4399 strlcat(buf, "|VI_FOPENING", sizeof(buf)); 4400 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOINGINACT | 4401 VI_OWEINACT | VI_DEFINACT | VI_FOPENING); 4402 if (flags != 0) { 4403 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4404 strlcat(buf, buf2, sizeof(buf)); 4405 } 4406 if (vp->v_mflag & VMP_LAZYLIST) 4407 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4408 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4409 if (flags != 0) { 4410 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4411 strlcat(buf, buf2, sizeof(buf)); 4412 } 4413 printf(" flags (%s)", buf + 1); 4414 if (mtx_owned(VI_MTX(vp))) 4415 printf(" VI_LOCKed"); 4416 printf("\n"); 4417 if (vp->v_object != NULL) 4418 printf(" v_object %p ref %d pages %d " 4419 "cleanbuf %d dirtybuf %d\n", 4420 vp->v_object, vp->v_object->ref_count, 4421 vp->v_object->resident_page_count, 4422 vp->v_bufobj.bo_clean.bv_cnt, 4423 vp->v_bufobj.bo_dirty.bv_cnt); 4424 printf(" "); 4425 lockmgr_printinfo(vp->v_vnlock); 4426 if (vp->v_data != NULL) 4427 VOP_PRINT(vp); 4428 } 4429 4430 #ifdef DDB 4431 /* 4432 * List all of the locked vnodes in the system. 4433 * Called when debugging the kernel. 4434 */ 4435 DB_SHOW_COMMAND_FLAGS(lockedvnods, lockedvnodes, DB_CMD_MEMSAFE) 4436 { 4437 struct mount *mp; 4438 struct vnode *vp; 4439 4440 /* 4441 * Note: because this is DDB, we can't obey the locking semantics 4442 * for these structures, which means we could catch an inconsistent 4443 * state and dereference a nasty pointer. Not much to be done 4444 * about that. 4445 */ 4446 db_printf("Locked vnodes\n"); 4447 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4448 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4449 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4450 vn_printf(vp, "vnode "); 4451 } 4452 } 4453 } 4454 4455 /* 4456 * Show details about the given vnode. 4457 */ 4458 DB_SHOW_COMMAND(vnode, db_show_vnode) 4459 { 4460 struct vnode *vp; 4461 4462 if (!have_addr) 4463 return; 4464 vp = (struct vnode *)addr; 4465 vn_printf(vp, "vnode "); 4466 } 4467 4468 /* 4469 * Show details about the given mount point. 4470 */ 4471 DB_SHOW_COMMAND(mount, db_show_mount) 4472 { 4473 struct mount *mp; 4474 struct vfsopt *opt; 4475 struct statfs *sp; 4476 struct vnode *vp; 4477 char buf[512]; 4478 uint64_t mflags; 4479 u_int flags; 4480 4481 if (!have_addr) { 4482 /* No address given, print short info about all mount points. */ 4483 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4484 db_printf("%p %s on %s (%s)\n", mp, 4485 mp->mnt_stat.f_mntfromname, 4486 mp->mnt_stat.f_mntonname, 4487 mp->mnt_stat.f_fstypename); 4488 if (db_pager_quit) 4489 break; 4490 } 4491 db_printf("\nMore info: show mount <addr>\n"); 4492 return; 4493 } 4494 4495 mp = (struct mount *)addr; 4496 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4497 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4498 4499 buf[0] = '\0'; 4500 mflags = mp->mnt_flag; 4501 #define MNT_FLAG(flag) do { \ 4502 if (mflags & (flag)) { \ 4503 if (buf[0] != '\0') \ 4504 strlcat(buf, ", ", sizeof(buf)); \ 4505 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4506 mflags &= ~(flag); \ 4507 } \ 4508 } while (0) 4509 MNT_FLAG(MNT_RDONLY); 4510 MNT_FLAG(MNT_SYNCHRONOUS); 4511 MNT_FLAG(MNT_NOEXEC); 4512 MNT_FLAG(MNT_NOSUID); 4513 MNT_FLAG(MNT_NFS4ACLS); 4514 MNT_FLAG(MNT_UNION); 4515 MNT_FLAG(MNT_ASYNC); 4516 MNT_FLAG(MNT_SUIDDIR); 4517 MNT_FLAG(MNT_SOFTDEP); 4518 MNT_FLAG(MNT_NOSYMFOLLOW); 4519 MNT_FLAG(MNT_GJOURNAL); 4520 MNT_FLAG(MNT_MULTILABEL); 4521 MNT_FLAG(MNT_ACLS); 4522 MNT_FLAG(MNT_NOATIME); 4523 MNT_FLAG(MNT_NOCLUSTERR); 4524 MNT_FLAG(MNT_NOCLUSTERW); 4525 MNT_FLAG(MNT_SUJ); 4526 MNT_FLAG(MNT_EXRDONLY); 4527 MNT_FLAG(MNT_EXPORTED); 4528 MNT_FLAG(MNT_DEFEXPORTED); 4529 MNT_FLAG(MNT_EXPORTANON); 4530 MNT_FLAG(MNT_EXKERB); 4531 MNT_FLAG(MNT_EXPUBLIC); 4532 MNT_FLAG(MNT_LOCAL); 4533 MNT_FLAG(MNT_QUOTA); 4534 MNT_FLAG(MNT_ROOTFS); 4535 MNT_FLAG(MNT_USER); 4536 MNT_FLAG(MNT_IGNORE); 4537 MNT_FLAG(MNT_UPDATE); 4538 MNT_FLAG(MNT_DELEXPORT); 4539 MNT_FLAG(MNT_RELOAD); 4540 MNT_FLAG(MNT_FORCE); 4541 MNT_FLAG(MNT_SNAPSHOT); 4542 MNT_FLAG(MNT_BYFSID); 4543 #undef MNT_FLAG 4544 if (mflags != 0) { 4545 if (buf[0] != '\0') 4546 strlcat(buf, ", ", sizeof(buf)); 4547 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4548 "0x%016jx", mflags); 4549 } 4550 db_printf(" mnt_flag = %s\n", buf); 4551 4552 buf[0] = '\0'; 4553 flags = mp->mnt_kern_flag; 4554 #define MNT_KERN_FLAG(flag) do { \ 4555 if (flags & (flag)) { \ 4556 if (buf[0] != '\0') \ 4557 strlcat(buf, ", ", sizeof(buf)); \ 4558 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4559 flags &= ~(flag); \ 4560 } \ 4561 } while (0) 4562 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4563 MNT_KERN_FLAG(MNTK_ASYNC); 4564 MNT_KERN_FLAG(MNTK_SOFTDEP); 4565 MNT_KERN_FLAG(MNTK_NOMSYNC); 4566 MNT_KERN_FLAG(MNTK_DRAINING); 4567 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4568 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4569 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4570 MNT_KERN_FLAG(MNTK_NO_IOPF); 4571 MNT_KERN_FLAG(MNTK_RECURSE); 4572 MNT_KERN_FLAG(MNTK_UPPER_WAITER); 4573 MNT_KERN_FLAG(MNTK_UNLOCKED_INSMNTQUE); 4574 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4575 MNT_KERN_FLAG(MNTK_VMSETSIZE_BUG); 4576 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4577 MNT_KERN_FLAG(MNTK_TASKQUEUE_WAITER); 4578 MNT_KERN_FLAG(MNTK_NOASYNC); 4579 MNT_KERN_FLAG(MNTK_UNMOUNT); 4580 MNT_KERN_FLAG(MNTK_MWAIT); 4581 MNT_KERN_FLAG(MNTK_SUSPEND); 4582 MNT_KERN_FLAG(MNTK_SUSPEND2); 4583 MNT_KERN_FLAG(MNTK_SUSPENDED); 4584 MNT_KERN_FLAG(MNTK_NULL_NOCACHE); 4585 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4586 #undef MNT_KERN_FLAG 4587 if (flags != 0) { 4588 if (buf[0] != '\0') 4589 strlcat(buf, ", ", sizeof(buf)); 4590 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4591 "0x%08x", flags); 4592 } 4593 db_printf(" mnt_kern_flag = %s\n", buf); 4594 4595 db_printf(" mnt_opt = "); 4596 opt = TAILQ_FIRST(mp->mnt_opt); 4597 if (opt != NULL) { 4598 db_printf("%s", opt->name); 4599 opt = TAILQ_NEXT(opt, link); 4600 while (opt != NULL) { 4601 db_printf(", %s", opt->name); 4602 opt = TAILQ_NEXT(opt, link); 4603 } 4604 } 4605 db_printf("\n"); 4606 4607 sp = &mp->mnt_stat; 4608 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4609 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4610 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4611 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4612 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4613 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4614 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4615 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4616 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4617 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4618 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4619 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4620 4621 db_printf(" mnt_cred = { uid=%u ruid=%u", 4622 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4623 if (jailed(mp->mnt_cred)) 4624 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4625 db_printf(" }\n"); 4626 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4627 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4628 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4629 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4630 db_printf(" mnt_lazyvnodelistsize = %d\n", 4631 mp->mnt_lazyvnodelistsize); 4632 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4633 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4634 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4635 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4636 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4637 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4638 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4639 db_printf(" mnt_secondary_accwrites = %d\n", 4640 mp->mnt_secondary_accwrites); 4641 db_printf(" mnt_gjprovider = %s\n", 4642 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4643 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4644 4645 db_printf("\n\nList of active vnodes\n"); 4646 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4647 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4648 vn_printf(vp, "vnode "); 4649 if (db_pager_quit) 4650 break; 4651 } 4652 } 4653 db_printf("\n\nList of inactive vnodes\n"); 4654 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4655 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4656 vn_printf(vp, "vnode "); 4657 if (db_pager_quit) 4658 break; 4659 } 4660 } 4661 } 4662 #endif /* DDB */ 4663 4664 /* 4665 * Fill in a struct xvfsconf based on a struct vfsconf. 4666 */ 4667 static int 4668 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4669 { 4670 struct xvfsconf xvfsp; 4671 4672 bzero(&xvfsp, sizeof(xvfsp)); 4673 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4674 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4675 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4676 xvfsp.vfc_flags = vfsp->vfc_flags; 4677 /* 4678 * These are unused in userland, we keep them 4679 * to not break binary compatibility. 4680 */ 4681 xvfsp.vfc_vfsops = NULL; 4682 xvfsp.vfc_next = NULL; 4683 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4684 } 4685 4686 #ifdef COMPAT_FREEBSD32 4687 struct xvfsconf32 { 4688 uint32_t vfc_vfsops; 4689 char vfc_name[MFSNAMELEN]; 4690 int32_t vfc_typenum; 4691 int32_t vfc_refcount; 4692 int32_t vfc_flags; 4693 uint32_t vfc_next; 4694 }; 4695 4696 static int 4697 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4698 { 4699 struct xvfsconf32 xvfsp; 4700 4701 bzero(&xvfsp, sizeof(xvfsp)); 4702 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4703 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4704 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4705 xvfsp.vfc_flags = vfsp->vfc_flags; 4706 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4707 } 4708 #endif 4709 4710 /* 4711 * Top level filesystem related information gathering. 4712 */ 4713 static int 4714 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4715 { 4716 struct vfsconf *vfsp; 4717 int error; 4718 4719 error = 0; 4720 vfsconf_slock(); 4721 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4722 #ifdef COMPAT_FREEBSD32 4723 if (req->flags & SCTL_MASK32) 4724 error = vfsconf2x32(req, vfsp); 4725 else 4726 #endif 4727 error = vfsconf2x(req, vfsp); 4728 if (error) 4729 break; 4730 } 4731 vfsconf_sunlock(); 4732 return (error); 4733 } 4734 4735 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4736 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4737 "S,xvfsconf", "List of all configured filesystems"); 4738 4739 #ifndef BURN_BRIDGES 4740 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4741 4742 static int 4743 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4744 { 4745 int *name = (int *)arg1 - 1; /* XXX */ 4746 u_int namelen = arg2 + 1; /* XXX */ 4747 struct vfsconf *vfsp; 4748 4749 log(LOG_WARNING, "userland calling deprecated sysctl, " 4750 "please rebuild world\n"); 4751 4752 #if 1 || defined(COMPAT_PRELITE2) 4753 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4754 if (namelen == 1) 4755 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4756 #endif 4757 4758 switch (name[1]) { 4759 case VFS_MAXTYPENUM: 4760 if (namelen != 2) 4761 return (ENOTDIR); 4762 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4763 case VFS_CONF: 4764 if (namelen != 3) 4765 return (ENOTDIR); /* overloaded */ 4766 vfsconf_slock(); 4767 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4768 if (vfsp->vfc_typenum == name[2]) 4769 break; 4770 } 4771 vfsconf_sunlock(); 4772 if (vfsp == NULL) 4773 return (EOPNOTSUPP); 4774 #ifdef COMPAT_FREEBSD32 4775 if (req->flags & SCTL_MASK32) 4776 return (vfsconf2x32(req, vfsp)); 4777 else 4778 #endif 4779 return (vfsconf2x(req, vfsp)); 4780 } 4781 return (EOPNOTSUPP); 4782 } 4783 4784 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4785 CTLFLAG_MPSAFE, vfs_sysctl, 4786 "Generic filesystem"); 4787 4788 #if 1 || defined(COMPAT_PRELITE2) 4789 4790 static int 4791 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4792 { 4793 int error; 4794 struct vfsconf *vfsp; 4795 struct ovfsconf ovfs; 4796 4797 vfsconf_slock(); 4798 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4799 bzero(&ovfs, sizeof(ovfs)); 4800 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4801 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4802 ovfs.vfc_index = vfsp->vfc_typenum; 4803 ovfs.vfc_refcount = vfsp->vfc_refcount; 4804 ovfs.vfc_flags = vfsp->vfc_flags; 4805 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4806 if (error != 0) { 4807 vfsconf_sunlock(); 4808 return (error); 4809 } 4810 } 4811 vfsconf_sunlock(); 4812 return (0); 4813 } 4814 4815 #endif /* 1 || COMPAT_PRELITE2 */ 4816 #endif /* !BURN_BRIDGES */ 4817 4818 static void 4819 unmount_or_warn(struct mount *mp) 4820 { 4821 int error; 4822 4823 error = dounmount(mp, MNT_FORCE, curthread); 4824 if (error != 0) { 4825 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4826 if (error == EBUSY) 4827 printf("BUSY)\n"); 4828 else 4829 printf("%d)\n", error); 4830 } 4831 } 4832 4833 /* 4834 * Unmount all filesystems. The list is traversed in reverse order 4835 * of mounting to avoid dependencies. 4836 */ 4837 void 4838 vfs_unmountall(void) 4839 { 4840 struct mount *mp, *tmp; 4841 4842 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4843 4844 /* 4845 * Since this only runs when rebooting, it is not interlocked. 4846 */ 4847 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4848 vfs_ref(mp); 4849 4850 /* 4851 * Forcibly unmounting "/dev" before "/" would prevent clean 4852 * unmount of the latter. 4853 */ 4854 if (mp == rootdevmp) 4855 continue; 4856 4857 unmount_or_warn(mp); 4858 } 4859 4860 if (rootdevmp != NULL) 4861 unmount_or_warn(rootdevmp); 4862 } 4863 4864 static void 4865 vfs_deferred_inactive(struct vnode *vp, int lkflags) 4866 { 4867 4868 ASSERT_VI_LOCKED(vp, __func__); 4869 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 4870 if ((vp->v_iflag & VI_OWEINACT) == 0) { 4871 vdropl(vp); 4872 return; 4873 } 4874 if (vn_lock(vp, lkflags) == 0) { 4875 VI_LOCK(vp); 4876 vinactive(vp); 4877 VOP_UNLOCK(vp); 4878 vdropl(vp); 4879 return; 4880 } 4881 vdefer_inactive_unlocked(vp); 4882 } 4883 4884 static int 4885 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 4886 { 4887 4888 return (vp->v_iflag & VI_DEFINACT); 4889 } 4890 4891 static void __noinline 4892 vfs_periodic_inactive(struct mount *mp, int flags) 4893 { 4894 struct vnode *vp, *mvp; 4895 int lkflags; 4896 4897 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4898 if (flags != MNT_WAIT) 4899 lkflags |= LK_NOWAIT; 4900 4901 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 4902 if ((vp->v_iflag & VI_DEFINACT) == 0) { 4903 VI_UNLOCK(vp); 4904 continue; 4905 } 4906 vp->v_iflag &= ~VI_DEFINACT; 4907 vfs_deferred_inactive(vp, lkflags); 4908 } 4909 } 4910 4911 static inline bool 4912 vfs_want_msync(struct vnode *vp) 4913 { 4914 struct vm_object *obj; 4915 4916 /* 4917 * This test may be performed without any locks held. 4918 * We rely on vm_object's type stability. 4919 */ 4920 if (vp->v_vflag & VV_NOSYNC) 4921 return (false); 4922 obj = vp->v_object; 4923 return (obj != NULL && vm_object_mightbedirty(obj)); 4924 } 4925 4926 static int 4927 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 4928 { 4929 4930 if (vp->v_vflag & VV_NOSYNC) 4931 return (false); 4932 if (vp->v_iflag & VI_DEFINACT) 4933 return (true); 4934 return (vfs_want_msync(vp)); 4935 } 4936 4937 static void __noinline 4938 vfs_periodic_msync_inactive(struct mount *mp, int flags) 4939 { 4940 struct vnode *vp, *mvp; 4941 struct vm_object *obj; 4942 int lkflags, objflags; 4943 bool seen_defer; 4944 4945 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4946 if (flags != MNT_WAIT) { 4947 lkflags |= LK_NOWAIT; 4948 objflags = OBJPC_NOSYNC; 4949 } else { 4950 objflags = OBJPC_SYNC; 4951 } 4952 4953 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 4954 seen_defer = false; 4955 if (vp->v_iflag & VI_DEFINACT) { 4956 vp->v_iflag &= ~VI_DEFINACT; 4957 seen_defer = true; 4958 } 4959 if (!vfs_want_msync(vp)) { 4960 if (seen_defer) 4961 vfs_deferred_inactive(vp, lkflags); 4962 else 4963 VI_UNLOCK(vp); 4964 continue; 4965 } 4966 if (vget(vp, lkflags) == 0) { 4967 obj = vp->v_object; 4968 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 4969 VM_OBJECT_WLOCK(obj); 4970 vm_object_page_clean(obj, 0, 0, objflags); 4971 VM_OBJECT_WUNLOCK(obj); 4972 } 4973 vput(vp); 4974 if (seen_defer) 4975 vdrop(vp); 4976 } else { 4977 if (seen_defer) 4978 vdefer_inactive_unlocked(vp); 4979 } 4980 } 4981 } 4982 4983 void 4984 vfs_periodic(struct mount *mp, int flags) 4985 { 4986 4987 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4988 4989 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 4990 vfs_periodic_inactive(mp, flags); 4991 else 4992 vfs_periodic_msync_inactive(mp, flags); 4993 } 4994 4995 static void 4996 destroy_vpollinfo_free(struct vpollinfo *vi) 4997 { 4998 4999 knlist_destroy(&vi->vpi_selinfo.si_note); 5000 mtx_destroy(&vi->vpi_lock); 5001 free(vi, M_VNODEPOLL); 5002 } 5003 5004 static void 5005 destroy_vpollinfo(struct vpollinfo *vi) 5006 { 5007 5008 knlist_clear(&vi->vpi_selinfo.si_note, 1); 5009 seldrain(&vi->vpi_selinfo); 5010 destroy_vpollinfo_free(vi); 5011 } 5012 5013 /* 5014 * Initialize per-vnode helper structure to hold poll-related state. 5015 */ 5016 void 5017 v_addpollinfo(struct vnode *vp) 5018 { 5019 struct vpollinfo *vi; 5020 5021 if (vp->v_pollinfo != NULL) 5022 return; 5023 vi = malloc(sizeof(*vi), M_VNODEPOLL, M_WAITOK | M_ZERO); 5024 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 5025 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 5026 vfs_knlunlock, vfs_knl_assert_lock); 5027 VI_LOCK(vp); 5028 if (vp->v_pollinfo != NULL) { 5029 VI_UNLOCK(vp); 5030 destroy_vpollinfo_free(vi); 5031 return; 5032 } 5033 vp->v_pollinfo = vi; 5034 VI_UNLOCK(vp); 5035 } 5036 5037 /* 5038 * Record a process's interest in events which might happen to 5039 * a vnode. Because poll uses the historic select-style interface 5040 * internally, this routine serves as both the ``check for any 5041 * pending events'' and the ``record my interest in future events'' 5042 * functions. (These are done together, while the lock is held, 5043 * to avoid race conditions.) 5044 */ 5045 int 5046 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 5047 { 5048 5049 v_addpollinfo(vp); 5050 mtx_lock(&vp->v_pollinfo->vpi_lock); 5051 if (vp->v_pollinfo->vpi_revents & events) { 5052 /* 5053 * This leaves events we are not interested 5054 * in available for the other process which 5055 * which presumably had requested them 5056 * (otherwise they would never have been 5057 * recorded). 5058 */ 5059 events &= vp->v_pollinfo->vpi_revents; 5060 vp->v_pollinfo->vpi_revents &= ~events; 5061 5062 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5063 return (events); 5064 } 5065 vp->v_pollinfo->vpi_events |= events; 5066 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 5067 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5068 return (0); 5069 } 5070 5071 /* 5072 * Routine to create and manage a filesystem syncer vnode. 5073 */ 5074 #define sync_close ((int (*)(struct vop_close_args *))nullop) 5075 static int sync_fsync(struct vop_fsync_args *); 5076 static int sync_inactive(struct vop_inactive_args *); 5077 static int sync_reclaim(struct vop_reclaim_args *); 5078 5079 static struct vop_vector sync_vnodeops = { 5080 .vop_bypass = VOP_EOPNOTSUPP, 5081 .vop_close = sync_close, 5082 .vop_fsync = sync_fsync, 5083 .vop_getwritemount = vop_stdgetwritemount, 5084 .vop_inactive = sync_inactive, 5085 .vop_need_inactive = vop_stdneed_inactive, 5086 .vop_reclaim = sync_reclaim, 5087 .vop_lock1 = vop_stdlock, 5088 .vop_unlock = vop_stdunlock, 5089 .vop_islocked = vop_stdislocked, 5090 .vop_fplookup_vexec = VOP_EAGAIN, 5091 .vop_fplookup_symlink = VOP_EAGAIN, 5092 }; 5093 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 5094 5095 /* 5096 * Create a new filesystem syncer vnode for the specified mount point. 5097 */ 5098 void 5099 vfs_allocate_syncvnode(struct mount *mp) 5100 { 5101 struct vnode *vp; 5102 struct bufobj *bo; 5103 static long start, incr, next; 5104 int error; 5105 5106 /* Allocate a new vnode */ 5107 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 5108 if (error != 0) 5109 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 5110 vp->v_type = VNON; 5111 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5112 vp->v_vflag |= VV_FORCEINSMQ; 5113 error = insmntque1(vp, mp); 5114 if (error != 0) 5115 panic("vfs_allocate_syncvnode: insmntque() failed"); 5116 vp->v_vflag &= ~VV_FORCEINSMQ; 5117 vn_set_state(vp, VSTATE_CONSTRUCTED); 5118 VOP_UNLOCK(vp); 5119 /* 5120 * Place the vnode onto the syncer worklist. We attempt to 5121 * scatter them about on the list so that they will go off 5122 * at evenly distributed times even if all the filesystems 5123 * are mounted at once. 5124 */ 5125 next += incr; 5126 if (next == 0 || next > syncer_maxdelay) { 5127 start /= 2; 5128 incr /= 2; 5129 if (start == 0) { 5130 start = syncer_maxdelay / 2; 5131 incr = syncer_maxdelay; 5132 } 5133 next = start; 5134 } 5135 bo = &vp->v_bufobj; 5136 BO_LOCK(bo); 5137 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 5138 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 5139 mtx_lock(&sync_mtx); 5140 sync_vnode_count++; 5141 if (mp->mnt_syncer == NULL) { 5142 mp->mnt_syncer = vp; 5143 vp = NULL; 5144 } 5145 mtx_unlock(&sync_mtx); 5146 BO_UNLOCK(bo); 5147 if (vp != NULL) { 5148 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5149 vgone(vp); 5150 vput(vp); 5151 } 5152 } 5153 5154 void 5155 vfs_deallocate_syncvnode(struct mount *mp) 5156 { 5157 struct vnode *vp; 5158 5159 mtx_lock(&sync_mtx); 5160 vp = mp->mnt_syncer; 5161 if (vp != NULL) 5162 mp->mnt_syncer = NULL; 5163 mtx_unlock(&sync_mtx); 5164 if (vp != NULL) 5165 vrele(vp); 5166 } 5167 5168 /* 5169 * Do a lazy sync of the filesystem. 5170 */ 5171 static int 5172 sync_fsync(struct vop_fsync_args *ap) 5173 { 5174 struct vnode *syncvp = ap->a_vp; 5175 struct mount *mp = syncvp->v_mount; 5176 int error, save; 5177 struct bufobj *bo; 5178 5179 /* 5180 * We only need to do something if this is a lazy evaluation. 5181 */ 5182 if (ap->a_waitfor != MNT_LAZY) 5183 return (0); 5184 5185 /* 5186 * Move ourselves to the back of the sync list. 5187 */ 5188 bo = &syncvp->v_bufobj; 5189 BO_LOCK(bo); 5190 vn_syncer_add_to_worklist(bo, syncdelay); 5191 BO_UNLOCK(bo); 5192 5193 /* 5194 * Walk the list of vnodes pushing all that are dirty and 5195 * not already on the sync list. 5196 */ 5197 if (vfs_busy(mp, MBF_NOWAIT) != 0) 5198 return (0); 5199 VOP_UNLOCK(syncvp); 5200 save = curthread_pflags_set(TDP_SYNCIO); 5201 /* 5202 * The filesystem at hand may be idle with free vnodes stored in the 5203 * batch. Return them instead of letting them stay there indefinitely. 5204 */ 5205 vfs_periodic(mp, MNT_NOWAIT); 5206 error = VFS_SYNC(mp, MNT_LAZY); 5207 curthread_pflags_restore(save); 5208 vn_lock(syncvp, LK_EXCLUSIVE | LK_RETRY); 5209 vfs_unbusy(mp); 5210 return (error); 5211 } 5212 5213 /* 5214 * The syncer vnode is no referenced. 5215 */ 5216 static int 5217 sync_inactive(struct vop_inactive_args *ap) 5218 { 5219 5220 vgone(ap->a_vp); 5221 return (0); 5222 } 5223 5224 /* 5225 * The syncer vnode is no longer needed and is being decommissioned. 5226 * 5227 * Modifications to the worklist must be protected by sync_mtx. 5228 */ 5229 static int 5230 sync_reclaim(struct vop_reclaim_args *ap) 5231 { 5232 struct vnode *vp = ap->a_vp; 5233 struct bufobj *bo; 5234 5235 bo = &vp->v_bufobj; 5236 BO_LOCK(bo); 5237 mtx_lock(&sync_mtx); 5238 if (vp->v_mount->mnt_syncer == vp) 5239 vp->v_mount->mnt_syncer = NULL; 5240 if (bo->bo_flag & BO_ONWORKLST) { 5241 LIST_REMOVE(bo, bo_synclist); 5242 syncer_worklist_len--; 5243 sync_vnode_count--; 5244 bo->bo_flag &= ~BO_ONWORKLST; 5245 } 5246 mtx_unlock(&sync_mtx); 5247 BO_UNLOCK(bo); 5248 5249 return (0); 5250 } 5251 5252 int 5253 vn_need_pageq_flush(struct vnode *vp) 5254 { 5255 struct vm_object *obj; 5256 5257 obj = vp->v_object; 5258 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 5259 vm_object_mightbedirty(obj)); 5260 } 5261 5262 /* 5263 * Check if vnode represents a disk device 5264 */ 5265 bool 5266 vn_isdisk_error(struct vnode *vp, int *errp) 5267 { 5268 int error; 5269 5270 if (vp->v_type != VCHR) { 5271 error = ENOTBLK; 5272 goto out; 5273 } 5274 error = 0; 5275 dev_lock(); 5276 if (vp->v_rdev == NULL) 5277 error = ENXIO; 5278 else if (vp->v_rdev->si_devsw == NULL) 5279 error = ENXIO; 5280 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5281 error = ENOTBLK; 5282 dev_unlock(); 5283 out: 5284 *errp = error; 5285 return (error == 0); 5286 } 5287 5288 bool 5289 vn_isdisk(struct vnode *vp) 5290 { 5291 int error; 5292 5293 return (vn_isdisk_error(vp, &error)); 5294 } 5295 5296 /* 5297 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5298 * the comment above cache_fplookup for details. 5299 */ 5300 int 5301 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5302 { 5303 int error; 5304 5305 VFS_SMR_ASSERT_ENTERED(); 5306 5307 /* Check the owner. */ 5308 if (cred->cr_uid == file_uid) { 5309 if (file_mode & S_IXUSR) 5310 return (0); 5311 goto out_error; 5312 } 5313 5314 /* Otherwise, check the groups (first match) */ 5315 if (groupmember(file_gid, cred)) { 5316 if (file_mode & S_IXGRP) 5317 return (0); 5318 goto out_error; 5319 } 5320 5321 /* Otherwise, check everyone else. */ 5322 if (file_mode & S_IXOTH) 5323 return (0); 5324 out_error: 5325 /* 5326 * Permission check failed, but it is possible denial will get overwritten 5327 * (e.g., when root is traversing through a 700 directory owned by someone 5328 * else). 5329 * 5330 * vaccess() calls priv_check_cred which in turn can descent into MAC 5331 * modules overriding this result. It's quite unclear what semantics 5332 * are allowed for them to operate, thus for safety we don't call them 5333 * from within the SMR section. This also means if any such modules 5334 * are present, we have to let the regular lookup decide. 5335 */ 5336 error = priv_check_cred_vfs_lookup_nomac(cred); 5337 switch (error) { 5338 case 0: 5339 return (0); 5340 case EAGAIN: 5341 /* 5342 * MAC modules present. 5343 */ 5344 return (EAGAIN); 5345 case EPERM: 5346 return (EACCES); 5347 default: 5348 return (error); 5349 } 5350 } 5351 5352 /* 5353 * Common filesystem object access control check routine. Accepts a 5354 * vnode's type, "mode", uid and gid, requested access mode, and credentials. 5355 * Returns 0 on success, or an errno on failure. 5356 */ 5357 int 5358 vaccess(__enum_uint8(vtype) type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5359 accmode_t accmode, struct ucred *cred) 5360 { 5361 accmode_t dac_granted; 5362 accmode_t priv_granted; 5363 5364 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5365 ("invalid bit in accmode")); 5366 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5367 ("VAPPEND without VWRITE")); 5368 5369 /* 5370 * Look for a normal, non-privileged way to access the file/directory 5371 * as requested. If it exists, go with that. 5372 */ 5373 5374 dac_granted = 0; 5375 5376 /* Check the owner. */ 5377 if (cred->cr_uid == file_uid) { 5378 dac_granted |= VADMIN; 5379 if (file_mode & S_IXUSR) 5380 dac_granted |= VEXEC; 5381 if (file_mode & S_IRUSR) 5382 dac_granted |= VREAD; 5383 if (file_mode & S_IWUSR) 5384 dac_granted |= (VWRITE | VAPPEND); 5385 5386 if ((accmode & dac_granted) == accmode) 5387 return (0); 5388 5389 goto privcheck; 5390 } 5391 5392 /* Otherwise, check the groups (first match) */ 5393 if (groupmember(file_gid, cred)) { 5394 if (file_mode & S_IXGRP) 5395 dac_granted |= VEXEC; 5396 if (file_mode & S_IRGRP) 5397 dac_granted |= VREAD; 5398 if (file_mode & S_IWGRP) 5399 dac_granted |= (VWRITE | VAPPEND); 5400 5401 if ((accmode & dac_granted) == accmode) 5402 return (0); 5403 5404 goto privcheck; 5405 } 5406 5407 /* Otherwise, check everyone else. */ 5408 if (file_mode & S_IXOTH) 5409 dac_granted |= VEXEC; 5410 if (file_mode & S_IROTH) 5411 dac_granted |= VREAD; 5412 if (file_mode & S_IWOTH) 5413 dac_granted |= (VWRITE | VAPPEND); 5414 if ((accmode & dac_granted) == accmode) 5415 return (0); 5416 5417 privcheck: 5418 /* 5419 * Build a privilege mask to determine if the set of privileges 5420 * satisfies the requirements when combined with the granted mask 5421 * from above. For each privilege, if the privilege is required, 5422 * bitwise or the request type onto the priv_granted mask. 5423 */ 5424 priv_granted = 0; 5425 5426 if (type == VDIR) { 5427 /* 5428 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5429 * requests, instead of PRIV_VFS_EXEC. 5430 */ 5431 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5432 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5433 priv_granted |= VEXEC; 5434 } else { 5435 /* 5436 * Ensure that at least one execute bit is on. Otherwise, 5437 * a privileged user will always succeed, and we don't want 5438 * this to happen unless the file really is executable. 5439 */ 5440 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5441 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5442 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5443 priv_granted |= VEXEC; 5444 } 5445 5446 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5447 !priv_check_cred(cred, PRIV_VFS_READ)) 5448 priv_granted |= VREAD; 5449 5450 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5451 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5452 priv_granted |= (VWRITE | VAPPEND); 5453 5454 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5455 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5456 priv_granted |= VADMIN; 5457 5458 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5459 return (0); 5460 } 5461 5462 return ((accmode & VADMIN) ? EPERM : EACCES); 5463 } 5464 5465 /* 5466 * Credential check based on process requesting service, and per-attribute 5467 * permissions. 5468 */ 5469 int 5470 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5471 struct thread *td, accmode_t accmode) 5472 { 5473 5474 /* 5475 * Kernel-invoked always succeeds. 5476 */ 5477 if (cred == NOCRED) 5478 return (0); 5479 5480 /* 5481 * Do not allow privileged processes in jail to directly manipulate 5482 * system attributes. 5483 */ 5484 switch (attrnamespace) { 5485 case EXTATTR_NAMESPACE_SYSTEM: 5486 /* Potentially should be: return (EPERM); */ 5487 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5488 case EXTATTR_NAMESPACE_USER: 5489 return (VOP_ACCESS(vp, accmode, cred, td)); 5490 default: 5491 return (EPERM); 5492 } 5493 } 5494 5495 #ifdef DEBUG_VFS_LOCKS 5496 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5497 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5498 "Drop into debugger on lock violation"); 5499 5500 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5501 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5502 0, "Check for interlock across VOPs"); 5503 5504 int vfs_badlock_print = 1; /* Print lock violations. */ 5505 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5506 0, "Print lock violations"); 5507 5508 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5509 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5510 0, "Print vnode details on lock violations"); 5511 5512 #ifdef KDB 5513 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5514 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5515 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5516 #endif 5517 5518 static void 5519 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5520 { 5521 5522 #ifdef KDB 5523 if (vfs_badlock_backtrace) 5524 kdb_backtrace(); 5525 #endif 5526 if (vfs_badlock_vnode) 5527 vn_printf(vp, "vnode "); 5528 if (vfs_badlock_print) 5529 printf("%s: %p %s\n", str, (void *)vp, msg); 5530 if (vfs_badlock_ddb) 5531 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5532 } 5533 5534 void 5535 assert_vi_locked(struct vnode *vp, const char *str) 5536 { 5537 5538 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5539 vfs_badlock("interlock is not locked but should be", str, vp); 5540 } 5541 5542 void 5543 assert_vi_unlocked(struct vnode *vp, const char *str) 5544 { 5545 5546 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5547 vfs_badlock("interlock is locked but should not be", str, vp); 5548 } 5549 5550 void 5551 assert_vop_locked(struct vnode *vp, const char *str) 5552 { 5553 if (KERNEL_PANICKED() || vp == NULL) 5554 return; 5555 5556 #ifdef WITNESS 5557 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && 5558 witness_is_owned(&vp->v_vnlock->lock_object) == -1) 5559 #else 5560 int locked = VOP_ISLOCKED(vp); 5561 if (locked == 0 || locked == LK_EXCLOTHER) 5562 #endif 5563 vfs_badlock("is not locked but should be", str, vp); 5564 } 5565 5566 void 5567 assert_vop_unlocked(struct vnode *vp, const char *str) 5568 { 5569 if (KERNEL_PANICKED() || vp == NULL) 5570 return; 5571 5572 #ifdef WITNESS 5573 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && 5574 witness_is_owned(&vp->v_vnlock->lock_object) == 1) 5575 #else 5576 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5577 #endif 5578 vfs_badlock("is locked but should not be", str, vp); 5579 } 5580 5581 void 5582 assert_vop_elocked(struct vnode *vp, const char *str) 5583 { 5584 if (KERNEL_PANICKED() || vp == NULL) 5585 return; 5586 5587 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5588 vfs_badlock("is not exclusive locked but should be", str, vp); 5589 } 5590 #endif /* DEBUG_VFS_LOCKS */ 5591 5592 void 5593 vop_rename_fail(struct vop_rename_args *ap) 5594 { 5595 5596 if (ap->a_tvp != NULL) 5597 vput(ap->a_tvp); 5598 if (ap->a_tdvp == ap->a_tvp) 5599 vrele(ap->a_tdvp); 5600 else 5601 vput(ap->a_tdvp); 5602 vrele(ap->a_fdvp); 5603 vrele(ap->a_fvp); 5604 } 5605 5606 void 5607 vop_rename_pre(void *ap) 5608 { 5609 struct vop_rename_args *a = ap; 5610 5611 #ifdef DEBUG_VFS_LOCKS 5612 if (a->a_tvp) 5613 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5614 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5615 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5616 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5617 5618 /* Check the source (from). */ 5619 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5620 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5621 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5622 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5623 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5624 5625 /* Check the target. */ 5626 if (a->a_tvp) 5627 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5628 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5629 #endif 5630 /* 5631 * It may be tempting to add vn_seqc_write_begin/end calls here and 5632 * in vop_rename_post but that's not going to work out since some 5633 * filesystems relookup vnodes mid-rename. This is probably a bug. 5634 * 5635 * For now filesystems are expected to do the relevant calls after they 5636 * decide what vnodes to operate on. 5637 */ 5638 if (a->a_tdvp != a->a_fdvp) 5639 vhold(a->a_fdvp); 5640 if (a->a_tvp != a->a_fvp) 5641 vhold(a->a_fvp); 5642 vhold(a->a_tdvp); 5643 if (a->a_tvp) 5644 vhold(a->a_tvp); 5645 } 5646 5647 #ifdef DEBUG_VFS_LOCKS 5648 void 5649 vop_fplookup_vexec_debugpre(void *ap __unused) 5650 { 5651 5652 VFS_SMR_ASSERT_ENTERED(); 5653 } 5654 5655 void 5656 vop_fplookup_vexec_debugpost(void *ap, int rc) 5657 { 5658 struct vop_fplookup_vexec_args *a; 5659 struct vnode *vp; 5660 5661 a = ap; 5662 vp = a->a_vp; 5663 5664 VFS_SMR_ASSERT_ENTERED(); 5665 if (rc == EOPNOTSUPP) 5666 VNPASS(VN_IS_DOOMED(vp), vp); 5667 } 5668 5669 void 5670 vop_fplookup_symlink_debugpre(void *ap __unused) 5671 { 5672 5673 VFS_SMR_ASSERT_ENTERED(); 5674 } 5675 5676 void 5677 vop_fplookup_symlink_debugpost(void *ap __unused, int rc __unused) 5678 { 5679 5680 VFS_SMR_ASSERT_ENTERED(); 5681 } 5682 5683 static void 5684 vop_fsync_debugprepost(struct vnode *vp, const char *name) 5685 { 5686 if (vp->v_type == VCHR) 5687 ; 5688 else if (MNT_EXTENDED_SHARED(vp->v_mount)) 5689 ASSERT_VOP_LOCKED(vp, name); 5690 else 5691 ASSERT_VOP_ELOCKED(vp, name); 5692 } 5693 5694 void 5695 vop_fsync_debugpre(void *a) 5696 { 5697 struct vop_fsync_args *ap; 5698 5699 ap = a; 5700 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5701 } 5702 5703 void 5704 vop_fsync_debugpost(void *a, int rc __unused) 5705 { 5706 struct vop_fsync_args *ap; 5707 5708 ap = a; 5709 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5710 } 5711 5712 void 5713 vop_fdatasync_debugpre(void *a) 5714 { 5715 struct vop_fdatasync_args *ap; 5716 5717 ap = a; 5718 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5719 } 5720 5721 void 5722 vop_fdatasync_debugpost(void *a, int rc __unused) 5723 { 5724 struct vop_fdatasync_args *ap; 5725 5726 ap = a; 5727 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5728 } 5729 5730 void 5731 vop_strategy_debugpre(void *ap) 5732 { 5733 struct vop_strategy_args *a; 5734 struct buf *bp; 5735 5736 a = ap; 5737 bp = a->a_bp; 5738 5739 /* 5740 * Cluster ops lock their component buffers but not the IO container. 5741 */ 5742 if ((bp->b_flags & B_CLUSTER) != 0) 5743 return; 5744 5745 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5746 if (vfs_badlock_print) 5747 printf( 5748 "VOP_STRATEGY: bp is not locked but should be\n"); 5749 if (vfs_badlock_ddb) 5750 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5751 } 5752 } 5753 5754 void 5755 vop_lock_debugpre(void *ap) 5756 { 5757 struct vop_lock1_args *a = ap; 5758 5759 if ((a->a_flags & LK_INTERLOCK) == 0) 5760 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5761 else 5762 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5763 } 5764 5765 void 5766 vop_lock_debugpost(void *ap, int rc) 5767 { 5768 struct vop_lock1_args *a = ap; 5769 5770 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5771 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5772 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5773 } 5774 5775 void 5776 vop_unlock_debugpre(void *ap) 5777 { 5778 struct vop_unlock_args *a = ap; 5779 struct vnode *vp = a->a_vp; 5780 5781 VNPASS(vn_get_state(vp) != VSTATE_UNINITIALIZED, vp); 5782 ASSERT_VOP_LOCKED(vp, "VOP_UNLOCK"); 5783 } 5784 5785 void 5786 vop_need_inactive_debugpre(void *ap) 5787 { 5788 struct vop_need_inactive_args *a = ap; 5789 5790 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5791 } 5792 5793 void 5794 vop_need_inactive_debugpost(void *ap, int rc) 5795 { 5796 struct vop_need_inactive_args *a = ap; 5797 5798 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5799 } 5800 #endif 5801 5802 void 5803 vop_create_pre(void *ap) 5804 { 5805 struct vop_create_args *a; 5806 struct vnode *dvp; 5807 5808 a = ap; 5809 dvp = a->a_dvp; 5810 vn_seqc_write_begin(dvp); 5811 } 5812 5813 void 5814 vop_create_post(void *ap, int rc) 5815 { 5816 struct vop_create_args *a; 5817 struct vnode *dvp; 5818 5819 a = ap; 5820 dvp = a->a_dvp; 5821 vn_seqc_write_end(dvp); 5822 if (!rc) 5823 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5824 } 5825 5826 void 5827 vop_whiteout_pre(void *ap) 5828 { 5829 struct vop_whiteout_args *a; 5830 struct vnode *dvp; 5831 5832 a = ap; 5833 dvp = a->a_dvp; 5834 vn_seqc_write_begin(dvp); 5835 } 5836 5837 void 5838 vop_whiteout_post(void *ap, int rc) 5839 { 5840 struct vop_whiteout_args *a; 5841 struct vnode *dvp; 5842 5843 a = ap; 5844 dvp = a->a_dvp; 5845 vn_seqc_write_end(dvp); 5846 } 5847 5848 void 5849 vop_deleteextattr_pre(void *ap) 5850 { 5851 struct vop_deleteextattr_args *a; 5852 struct vnode *vp; 5853 5854 a = ap; 5855 vp = a->a_vp; 5856 vn_seqc_write_begin(vp); 5857 } 5858 5859 void 5860 vop_deleteextattr_post(void *ap, int rc) 5861 { 5862 struct vop_deleteextattr_args *a; 5863 struct vnode *vp; 5864 5865 a = ap; 5866 vp = a->a_vp; 5867 vn_seqc_write_end(vp); 5868 if (!rc) 5869 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5870 } 5871 5872 void 5873 vop_link_pre(void *ap) 5874 { 5875 struct vop_link_args *a; 5876 struct vnode *vp, *tdvp; 5877 5878 a = ap; 5879 vp = a->a_vp; 5880 tdvp = a->a_tdvp; 5881 vn_seqc_write_begin(vp); 5882 vn_seqc_write_begin(tdvp); 5883 } 5884 5885 void 5886 vop_link_post(void *ap, int rc) 5887 { 5888 struct vop_link_args *a; 5889 struct vnode *vp, *tdvp; 5890 5891 a = ap; 5892 vp = a->a_vp; 5893 tdvp = a->a_tdvp; 5894 vn_seqc_write_end(vp); 5895 vn_seqc_write_end(tdvp); 5896 if (!rc) { 5897 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 5898 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 5899 } 5900 } 5901 5902 void 5903 vop_mkdir_pre(void *ap) 5904 { 5905 struct vop_mkdir_args *a; 5906 struct vnode *dvp; 5907 5908 a = ap; 5909 dvp = a->a_dvp; 5910 vn_seqc_write_begin(dvp); 5911 } 5912 5913 void 5914 vop_mkdir_post(void *ap, int rc) 5915 { 5916 struct vop_mkdir_args *a; 5917 struct vnode *dvp; 5918 5919 a = ap; 5920 dvp = a->a_dvp; 5921 vn_seqc_write_end(dvp); 5922 if (!rc) 5923 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5924 } 5925 5926 #ifdef DEBUG_VFS_LOCKS 5927 void 5928 vop_mkdir_debugpost(void *ap, int rc) 5929 { 5930 struct vop_mkdir_args *a; 5931 5932 a = ap; 5933 if (!rc) 5934 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); 5935 } 5936 #endif 5937 5938 void 5939 vop_mknod_pre(void *ap) 5940 { 5941 struct vop_mknod_args *a; 5942 struct vnode *dvp; 5943 5944 a = ap; 5945 dvp = a->a_dvp; 5946 vn_seqc_write_begin(dvp); 5947 } 5948 5949 void 5950 vop_mknod_post(void *ap, int rc) 5951 { 5952 struct vop_mknod_args *a; 5953 struct vnode *dvp; 5954 5955 a = ap; 5956 dvp = a->a_dvp; 5957 vn_seqc_write_end(dvp); 5958 if (!rc) 5959 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5960 } 5961 5962 void 5963 vop_reclaim_post(void *ap, int rc) 5964 { 5965 struct vop_reclaim_args *a; 5966 struct vnode *vp; 5967 5968 a = ap; 5969 vp = a->a_vp; 5970 ASSERT_VOP_IN_SEQC(vp); 5971 if (!rc) 5972 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 5973 } 5974 5975 void 5976 vop_remove_pre(void *ap) 5977 { 5978 struct vop_remove_args *a; 5979 struct vnode *dvp, *vp; 5980 5981 a = ap; 5982 dvp = a->a_dvp; 5983 vp = a->a_vp; 5984 vn_seqc_write_begin(dvp); 5985 vn_seqc_write_begin(vp); 5986 } 5987 5988 void 5989 vop_remove_post(void *ap, int rc) 5990 { 5991 struct vop_remove_args *a; 5992 struct vnode *dvp, *vp; 5993 5994 a = ap; 5995 dvp = a->a_dvp; 5996 vp = a->a_vp; 5997 vn_seqc_write_end(dvp); 5998 vn_seqc_write_end(vp); 5999 if (!rc) { 6000 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6001 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 6002 } 6003 } 6004 6005 void 6006 vop_rename_post(void *ap, int rc) 6007 { 6008 struct vop_rename_args *a = ap; 6009 long hint; 6010 6011 if (!rc) { 6012 hint = NOTE_WRITE; 6013 if (a->a_fdvp == a->a_tdvp) { 6014 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 6015 hint |= NOTE_LINK; 6016 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 6017 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 6018 } else { 6019 hint |= NOTE_EXTEND; 6020 if (a->a_fvp->v_type == VDIR) 6021 hint |= NOTE_LINK; 6022 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 6023 6024 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 6025 a->a_tvp->v_type == VDIR) 6026 hint &= ~NOTE_LINK; 6027 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 6028 } 6029 6030 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 6031 if (a->a_tvp) 6032 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 6033 } 6034 if (a->a_tdvp != a->a_fdvp) 6035 vdrop(a->a_fdvp); 6036 if (a->a_tvp != a->a_fvp) 6037 vdrop(a->a_fvp); 6038 vdrop(a->a_tdvp); 6039 if (a->a_tvp) 6040 vdrop(a->a_tvp); 6041 } 6042 6043 void 6044 vop_rmdir_pre(void *ap) 6045 { 6046 struct vop_rmdir_args *a; 6047 struct vnode *dvp, *vp; 6048 6049 a = ap; 6050 dvp = a->a_dvp; 6051 vp = a->a_vp; 6052 vn_seqc_write_begin(dvp); 6053 vn_seqc_write_begin(vp); 6054 } 6055 6056 void 6057 vop_rmdir_post(void *ap, int rc) 6058 { 6059 struct vop_rmdir_args *a; 6060 struct vnode *dvp, *vp; 6061 6062 a = ap; 6063 dvp = a->a_dvp; 6064 vp = a->a_vp; 6065 vn_seqc_write_end(dvp); 6066 vn_seqc_write_end(vp); 6067 if (!rc) { 6068 vp->v_vflag |= VV_UNLINKED; 6069 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 6070 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 6071 } 6072 } 6073 6074 void 6075 vop_setattr_pre(void *ap) 6076 { 6077 struct vop_setattr_args *a; 6078 struct vnode *vp; 6079 6080 a = ap; 6081 vp = a->a_vp; 6082 vn_seqc_write_begin(vp); 6083 } 6084 6085 void 6086 vop_setattr_post(void *ap, int rc) 6087 { 6088 struct vop_setattr_args *a; 6089 struct vnode *vp; 6090 6091 a = ap; 6092 vp = a->a_vp; 6093 vn_seqc_write_end(vp); 6094 if (!rc) 6095 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6096 } 6097 6098 void 6099 vop_setacl_pre(void *ap) 6100 { 6101 struct vop_setacl_args *a; 6102 struct vnode *vp; 6103 6104 a = ap; 6105 vp = a->a_vp; 6106 vn_seqc_write_begin(vp); 6107 } 6108 6109 void 6110 vop_setacl_post(void *ap, int rc __unused) 6111 { 6112 struct vop_setacl_args *a; 6113 struct vnode *vp; 6114 6115 a = ap; 6116 vp = a->a_vp; 6117 vn_seqc_write_end(vp); 6118 } 6119 6120 void 6121 vop_setextattr_pre(void *ap) 6122 { 6123 struct vop_setextattr_args *a; 6124 struct vnode *vp; 6125 6126 a = ap; 6127 vp = a->a_vp; 6128 vn_seqc_write_begin(vp); 6129 } 6130 6131 void 6132 vop_setextattr_post(void *ap, int rc) 6133 { 6134 struct vop_setextattr_args *a; 6135 struct vnode *vp; 6136 6137 a = ap; 6138 vp = a->a_vp; 6139 vn_seqc_write_end(vp); 6140 if (!rc) 6141 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6142 } 6143 6144 void 6145 vop_symlink_pre(void *ap) 6146 { 6147 struct vop_symlink_args *a; 6148 struct vnode *dvp; 6149 6150 a = ap; 6151 dvp = a->a_dvp; 6152 vn_seqc_write_begin(dvp); 6153 } 6154 6155 void 6156 vop_symlink_post(void *ap, int rc) 6157 { 6158 struct vop_symlink_args *a; 6159 struct vnode *dvp; 6160 6161 a = ap; 6162 dvp = a->a_dvp; 6163 vn_seqc_write_end(dvp); 6164 if (!rc) 6165 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6166 } 6167 6168 void 6169 vop_open_post(void *ap, int rc) 6170 { 6171 struct vop_open_args *a = ap; 6172 6173 if (!rc) 6174 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 6175 } 6176 6177 void 6178 vop_close_post(void *ap, int rc) 6179 { 6180 struct vop_close_args *a = ap; 6181 6182 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 6183 !VN_IS_DOOMED(a->a_vp))) { 6184 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 6185 NOTE_CLOSE_WRITE : NOTE_CLOSE); 6186 } 6187 } 6188 6189 void 6190 vop_read_post(void *ap, int rc) 6191 { 6192 struct vop_read_args *a = ap; 6193 6194 if (!rc) 6195 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6196 } 6197 6198 void 6199 vop_read_pgcache_post(void *ap, int rc) 6200 { 6201 struct vop_read_pgcache_args *a = ap; 6202 6203 if (!rc) 6204 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); 6205 } 6206 6207 void 6208 vop_readdir_post(void *ap, int rc) 6209 { 6210 struct vop_readdir_args *a = ap; 6211 6212 if (!rc) 6213 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6214 } 6215 6216 static struct knlist fs_knlist; 6217 6218 static void 6219 vfs_event_init(void *arg) 6220 { 6221 knlist_init_mtx(&fs_knlist, NULL); 6222 } 6223 /* XXX - correct order? */ 6224 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 6225 6226 void 6227 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 6228 { 6229 6230 KNOTE_UNLOCKED(&fs_knlist, event); 6231 } 6232 6233 static int filt_fsattach(struct knote *kn); 6234 static void filt_fsdetach(struct knote *kn); 6235 static int filt_fsevent(struct knote *kn, long hint); 6236 6237 struct filterops fs_filtops = { 6238 .f_isfd = 0, 6239 .f_attach = filt_fsattach, 6240 .f_detach = filt_fsdetach, 6241 .f_event = filt_fsevent 6242 }; 6243 6244 static int 6245 filt_fsattach(struct knote *kn) 6246 { 6247 6248 kn->kn_flags |= EV_CLEAR; 6249 knlist_add(&fs_knlist, kn, 0); 6250 return (0); 6251 } 6252 6253 static void 6254 filt_fsdetach(struct knote *kn) 6255 { 6256 6257 knlist_remove(&fs_knlist, kn, 0); 6258 } 6259 6260 static int 6261 filt_fsevent(struct knote *kn, long hint) 6262 { 6263 6264 kn->kn_fflags |= kn->kn_sfflags & hint; 6265 6266 return (kn->kn_fflags != 0); 6267 } 6268 6269 static int 6270 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 6271 { 6272 struct vfsidctl vc; 6273 int error; 6274 struct mount *mp; 6275 6276 error = SYSCTL_IN(req, &vc, sizeof(vc)); 6277 if (error) 6278 return (error); 6279 if (vc.vc_vers != VFS_CTL_VERS1) 6280 return (EINVAL); 6281 mp = vfs_getvfs(&vc.vc_fsid); 6282 if (mp == NULL) 6283 return (ENOENT); 6284 /* ensure that a specific sysctl goes to the right filesystem. */ 6285 if (strcmp(vc.vc_fstypename, "*") != 0 && 6286 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 6287 vfs_rel(mp); 6288 return (EINVAL); 6289 } 6290 VCTLTOREQ(&vc, req); 6291 error = VFS_SYSCTL(mp, vc.vc_op, req); 6292 vfs_rel(mp); 6293 return (error); 6294 } 6295 6296 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 6297 NULL, 0, sysctl_vfs_ctl, "", 6298 "Sysctl by fsid"); 6299 6300 /* 6301 * Function to initialize a va_filerev field sensibly. 6302 * XXX: Wouldn't a random number make a lot more sense ?? 6303 */ 6304 u_quad_t 6305 init_va_filerev(void) 6306 { 6307 struct bintime bt; 6308 6309 getbinuptime(&bt); 6310 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 6311 } 6312 6313 static int filt_vfsread(struct knote *kn, long hint); 6314 static int filt_vfswrite(struct knote *kn, long hint); 6315 static int filt_vfsvnode(struct knote *kn, long hint); 6316 static void filt_vfsdetach(struct knote *kn); 6317 static struct filterops vfsread_filtops = { 6318 .f_isfd = 1, 6319 .f_detach = filt_vfsdetach, 6320 .f_event = filt_vfsread 6321 }; 6322 static struct filterops vfswrite_filtops = { 6323 .f_isfd = 1, 6324 .f_detach = filt_vfsdetach, 6325 .f_event = filt_vfswrite 6326 }; 6327 static struct filterops vfsvnode_filtops = { 6328 .f_isfd = 1, 6329 .f_detach = filt_vfsdetach, 6330 .f_event = filt_vfsvnode 6331 }; 6332 6333 static void 6334 vfs_knllock(void *arg) 6335 { 6336 struct vnode *vp = arg; 6337 6338 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 6339 } 6340 6341 static void 6342 vfs_knlunlock(void *arg) 6343 { 6344 struct vnode *vp = arg; 6345 6346 VOP_UNLOCK(vp); 6347 } 6348 6349 static void 6350 vfs_knl_assert_lock(void *arg, int what) 6351 { 6352 #ifdef DEBUG_VFS_LOCKS 6353 struct vnode *vp = arg; 6354 6355 if (what == LA_LOCKED) 6356 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6357 else 6358 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6359 #endif 6360 } 6361 6362 int 6363 vfs_kqfilter(struct vop_kqfilter_args *ap) 6364 { 6365 struct vnode *vp = ap->a_vp; 6366 struct knote *kn = ap->a_kn; 6367 struct knlist *knl; 6368 6369 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ && 6370 kn->kn_filter != EVFILT_WRITE), 6371 ("READ/WRITE filter on a FIFO leaked through")); 6372 switch (kn->kn_filter) { 6373 case EVFILT_READ: 6374 kn->kn_fop = &vfsread_filtops; 6375 break; 6376 case EVFILT_WRITE: 6377 kn->kn_fop = &vfswrite_filtops; 6378 break; 6379 case EVFILT_VNODE: 6380 kn->kn_fop = &vfsvnode_filtops; 6381 break; 6382 default: 6383 return (EINVAL); 6384 } 6385 6386 kn->kn_hook = (caddr_t)vp; 6387 6388 v_addpollinfo(vp); 6389 if (vp->v_pollinfo == NULL) 6390 return (ENOMEM); 6391 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6392 vhold(vp); 6393 knlist_add(knl, kn, 0); 6394 6395 return (0); 6396 } 6397 6398 /* 6399 * Detach knote from vnode 6400 */ 6401 static void 6402 filt_vfsdetach(struct knote *kn) 6403 { 6404 struct vnode *vp = (struct vnode *)kn->kn_hook; 6405 6406 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6407 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6408 vdrop(vp); 6409 } 6410 6411 /*ARGSUSED*/ 6412 static int 6413 filt_vfsread(struct knote *kn, long hint) 6414 { 6415 struct vnode *vp = (struct vnode *)kn->kn_hook; 6416 off_t size; 6417 int res; 6418 6419 /* 6420 * filesystem is gone, so set the EOF flag and schedule 6421 * the knote for deletion. 6422 */ 6423 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6424 VI_LOCK(vp); 6425 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6426 VI_UNLOCK(vp); 6427 return (1); 6428 } 6429 6430 if (vn_getsize_locked(vp, &size, curthread->td_ucred) != 0) 6431 return (0); 6432 6433 VI_LOCK(vp); 6434 kn->kn_data = size - kn->kn_fp->f_offset; 6435 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6436 VI_UNLOCK(vp); 6437 return (res); 6438 } 6439 6440 /*ARGSUSED*/ 6441 static int 6442 filt_vfswrite(struct knote *kn, long hint) 6443 { 6444 struct vnode *vp = (struct vnode *)kn->kn_hook; 6445 6446 VI_LOCK(vp); 6447 6448 /* 6449 * filesystem is gone, so set the EOF flag and schedule 6450 * the knote for deletion. 6451 */ 6452 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6453 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6454 6455 kn->kn_data = 0; 6456 VI_UNLOCK(vp); 6457 return (1); 6458 } 6459 6460 static int 6461 filt_vfsvnode(struct knote *kn, long hint) 6462 { 6463 struct vnode *vp = (struct vnode *)kn->kn_hook; 6464 int res; 6465 6466 VI_LOCK(vp); 6467 if (kn->kn_sfflags & hint) 6468 kn->kn_fflags |= hint; 6469 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6470 kn->kn_flags |= EV_EOF; 6471 VI_UNLOCK(vp); 6472 return (1); 6473 } 6474 res = (kn->kn_fflags != 0); 6475 VI_UNLOCK(vp); 6476 return (res); 6477 } 6478 6479 int 6480 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6481 { 6482 int error; 6483 6484 if (dp->d_reclen > ap->a_uio->uio_resid) 6485 return (ENAMETOOLONG); 6486 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6487 if (error) { 6488 if (ap->a_ncookies != NULL) { 6489 if (ap->a_cookies != NULL) 6490 free(ap->a_cookies, M_TEMP); 6491 ap->a_cookies = NULL; 6492 *ap->a_ncookies = 0; 6493 } 6494 return (error); 6495 } 6496 if (ap->a_ncookies == NULL) 6497 return (0); 6498 6499 KASSERT(ap->a_cookies, 6500 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6501 6502 *ap->a_cookies = realloc(*ap->a_cookies, 6503 (*ap->a_ncookies + 1) * sizeof(uint64_t), M_TEMP, M_WAITOK | M_ZERO); 6504 (*ap->a_cookies)[*ap->a_ncookies] = off; 6505 *ap->a_ncookies += 1; 6506 return (0); 6507 } 6508 6509 /* 6510 * The purpose of this routine is to remove granularity from accmode_t, 6511 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6512 * VADMIN and VAPPEND. 6513 * 6514 * If it returns 0, the caller is supposed to continue with the usual 6515 * access checks using 'accmode' as modified by this routine. If it 6516 * returns nonzero value, the caller is supposed to return that value 6517 * as errno. 6518 * 6519 * Note that after this routine runs, accmode may be zero. 6520 */ 6521 int 6522 vfs_unixify_accmode(accmode_t *accmode) 6523 { 6524 /* 6525 * There is no way to specify explicit "deny" rule using 6526 * file mode or POSIX.1e ACLs. 6527 */ 6528 if (*accmode & VEXPLICIT_DENY) { 6529 *accmode = 0; 6530 return (0); 6531 } 6532 6533 /* 6534 * None of these can be translated into usual access bits. 6535 * Also, the common case for NFSv4 ACLs is to not contain 6536 * either of these bits. Caller should check for VWRITE 6537 * on the containing directory instead. 6538 */ 6539 if (*accmode & (VDELETE_CHILD | VDELETE)) 6540 return (EPERM); 6541 6542 if (*accmode & VADMIN_PERMS) { 6543 *accmode &= ~VADMIN_PERMS; 6544 *accmode |= VADMIN; 6545 } 6546 6547 /* 6548 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6549 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6550 */ 6551 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6552 6553 return (0); 6554 } 6555 6556 /* 6557 * Clear out a doomed vnode (if any) and replace it with a new one as long 6558 * as the fs is not being unmounted. Return the root vnode to the caller. 6559 */ 6560 static int __noinline 6561 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6562 { 6563 struct vnode *vp; 6564 int error; 6565 6566 restart: 6567 if (mp->mnt_rootvnode != NULL) { 6568 MNT_ILOCK(mp); 6569 vp = mp->mnt_rootvnode; 6570 if (vp != NULL) { 6571 if (!VN_IS_DOOMED(vp)) { 6572 vrefact(vp); 6573 MNT_IUNLOCK(mp); 6574 error = vn_lock(vp, flags); 6575 if (error == 0) { 6576 *vpp = vp; 6577 return (0); 6578 } 6579 vrele(vp); 6580 goto restart; 6581 } 6582 /* 6583 * Clear the old one. 6584 */ 6585 mp->mnt_rootvnode = NULL; 6586 } 6587 MNT_IUNLOCK(mp); 6588 if (vp != NULL) { 6589 vfs_op_barrier_wait(mp); 6590 vrele(vp); 6591 } 6592 } 6593 error = VFS_CACHEDROOT(mp, flags, vpp); 6594 if (error != 0) 6595 return (error); 6596 if (mp->mnt_vfs_ops == 0) { 6597 MNT_ILOCK(mp); 6598 if (mp->mnt_vfs_ops != 0) { 6599 MNT_IUNLOCK(mp); 6600 return (0); 6601 } 6602 if (mp->mnt_rootvnode == NULL) { 6603 vrefact(*vpp); 6604 mp->mnt_rootvnode = *vpp; 6605 } else { 6606 if (mp->mnt_rootvnode != *vpp) { 6607 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6608 panic("%s: mismatch between vnode returned " 6609 " by VFS_CACHEDROOT and the one cached " 6610 " (%p != %p)", 6611 __func__, *vpp, mp->mnt_rootvnode); 6612 } 6613 } 6614 } 6615 MNT_IUNLOCK(mp); 6616 } 6617 return (0); 6618 } 6619 6620 int 6621 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6622 { 6623 struct mount_pcpu *mpcpu; 6624 struct vnode *vp; 6625 int error; 6626 6627 if (!vfs_op_thread_enter(mp, mpcpu)) 6628 return (vfs_cache_root_fallback(mp, flags, vpp)); 6629 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6630 if (vp == NULL || VN_IS_DOOMED(vp)) { 6631 vfs_op_thread_exit(mp, mpcpu); 6632 return (vfs_cache_root_fallback(mp, flags, vpp)); 6633 } 6634 vrefact(vp); 6635 vfs_op_thread_exit(mp, mpcpu); 6636 error = vn_lock(vp, flags); 6637 if (error != 0) { 6638 vrele(vp); 6639 return (vfs_cache_root_fallback(mp, flags, vpp)); 6640 } 6641 *vpp = vp; 6642 return (0); 6643 } 6644 6645 struct vnode * 6646 vfs_cache_root_clear(struct mount *mp) 6647 { 6648 struct vnode *vp; 6649 6650 /* 6651 * ops > 0 guarantees there is nobody who can see this vnode 6652 */ 6653 MPASS(mp->mnt_vfs_ops > 0); 6654 vp = mp->mnt_rootvnode; 6655 if (vp != NULL) 6656 vn_seqc_write_begin(vp); 6657 mp->mnt_rootvnode = NULL; 6658 return (vp); 6659 } 6660 6661 void 6662 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6663 { 6664 6665 MPASS(mp->mnt_vfs_ops > 0); 6666 vrefact(vp); 6667 mp->mnt_rootvnode = vp; 6668 } 6669 6670 /* 6671 * These are helper functions for filesystems to traverse all 6672 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6673 * 6674 * This interface replaces MNT_VNODE_FOREACH. 6675 */ 6676 6677 struct vnode * 6678 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6679 { 6680 struct vnode *vp; 6681 6682 maybe_yield(); 6683 MNT_ILOCK(mp); 6684 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6685 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6686 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6687 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6688 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6689 continue; 6690 VI_LOCK(vp); 6691 if (VN_IS_DOOMED(vp)) { 6692 VI_UNLOCK(vp); 6693 continue; 6694 } 6695 break; 6696 } 6697 if (vp == NULL) { 6698 __mnt_vnode_markerfree_all(mvp, mp); 6699 /* MNT_IUNLOCK(mp); -- done in above function */ 6700 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6701 return (NULL); 6702 } 6703 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6704 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6705 MNT_IUNLOCK(mp); 6706 return (vp); 6707 } 6708 6709 struct vnode * 6710 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6711 { 6712 struct vnode *vp; 6713 6714 *mvp = vn_alloc_marker(mp); 6715 MNT_ILOCK(mp); 6716 MNT_REF(mp); 6717 6718 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6719 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6720 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6721 continue; 6722 VI_LOCK(vp); 6723 if (VN_IS_DOOMED(vp)) { 6724 VI_UNLOCK(vp); 6725 continue; 6726 } 6727 break; 6728 } 6729 if (vp == NULL) { 6730 MNT_REL(mp); 6731 MNT_IUNLOCK(mp); 6732 vn_free_marker(*mvp); 6733 *mvp = NULL; 6734 return (NULL); 6735 } 6736 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6737 MNT_IUNLOCK(mp); 6738 return (vp); 6739 } 6740 6741 void 6742 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6743 { 6744 6745 if (*mvp == NULL) { 6746 MNT_IUNLOCK(mp); 6747 return; 6748 } 6749 6750 mtx_assert(MNT_MTX(mp), MA_OWNED); 6751 6752 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6753 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6754 MNT_REL(mp); 6755 MNT_IUNLOCK(mp); 6756 vn_free_marker(*mvp); 6757 *mvp = NULL; 6758 } 6759 6760 /* 6761 * These are helper functions for filesystems to traverse their 6762 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6763 */ 6764 static void 6765 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6766 { 6767 6768 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6769 6770 MNT_ILOCK(mp); 6771 MNT_REL(mp); 6772 MNT_IUNLOCK(mp); 6773 vn_free_marker(*mvp); 6774 *mvp = NULL; 6775 } 6776 6777 /* 6778 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6779 * conventional lock order during mnt_vnode_next_lazy iteration. 6780 * 6781 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6782 * The list lock is dropped and reacquired. On success, both locks are held. 6783 * On failure, the mount vnode list lock is held but the vnode interlock is 6784 * not, and the procedure may have yielded. 6785 */ 6786 static bool 6787 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6788 struct vnode *vp) 6789 { 6790 6791 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6792 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6793 ("%s: bad marker", __func__)); 6794 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6795 ("%s: inappropriate vnode", __func__)); 6796 ASSERT_VI_UNLOCKED(vp, __func__); 6797 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6798 6799 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6800 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6801 6802 /* 6803 * Note we may be racing against vdrop which transitioned the hold 6804 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 6805 * if we are the only user after we get the interlock we will just 6806 * vdrop. 6807 */ 6808 vhold(vp); 6809 mtx_unlock(&mp->mnt_listmtx); 6810 VI_LOCK(vp); 6811 if (VN_IS_DOOMED(vp)) { 6812 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 6813 goto out_lost; 6814 } 6815 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 6816 /* 6817 * There is nothing to do if we are the last user. 6818 */ 6819 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 6820 goto out_lost; 6821 mtx_lock(&mp->mnt_listmtx); 6822 return (true); 6823 out_lost: 6824 vdropl(vp); 6825 maybe_yield(); 6826 mtx_lock(&mp->mnt_listmtx); 6827 return (false); 6828 } 6829 6830 static struct vnode * 6831 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6832 void *cbarg) 6833 { 6834 struct vnode *vp; 6835 6836 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6837 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6838 restart: 6839 vp = TAILQ_NEXT(*mvp, v_lazylist); 6840 while (vp != NULL) { 6841 if (vp->v_type == VMARKER) { 6842 vp = TAILQ_NEXT(vp, v_lazylist); 6843 continue; 6844 } 6845 /* 6846 * See if we want to process the vnode. Note we may encounter a 6847 * long string of vnodes we don't care about and hog the list 6848 * as a result. Check for it and requeue the marker. 6849 */ 6850 VNPASS(!VN_IS_DOOMED(vp), vp); 6851 if (!cb(vp, cbarg)) { 6852 if (!should_yield()) { 6853 vp = TAILQ_NEXT(vp, v_lazylist); 6854 continue; 6855 } 6856 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 6857 v_lazylist); 6858 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 6859 v_lazylist); 6860 mtx_unlock(&mp->mnt_listmtx); 6861 kern_yield(PRI_USER); 6862 mtx_lock(&mp->mnt_listmtx); 6863 goto restart; 6864 } 6865 /* 6866 * Try-lock because this is the wrong lock order. 6867 */ 6868 if (!VI_TRYLOCK(vp) && 6869 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 6870 goto restart; 6871 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6872 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6873 ("alien vnode on the lazy list %p %p", vp, mp)); 6874 VNPASS(vp->v_mount == mp, vp); 6875 VNPASS(!VN_IS_DOOMED(vp), vp); 6876 break; 6877 } 6878 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6879 6880 /* Check if we are done */ 6881 if (vp == NULL) { 6882 mtx_unlock(&mp->mnt_listmtx); 6883 mnt_vnode_markerfree_lazy(mvp, mp); 6884 return (NULL); 6885 } 6886 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 6887 mtx_unlock(&mp->mnt_listmtx); 6888 ASSERT_VI_LOCKED(vp, "lazy iter"); 6889 return (vp); 6890 } 6891 6892 struct vnode * 6893 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6894 void *cbarg) 6895 { 6896 6897 maybe_yield(); 6898 mtx_lock(&mp->mnt_listmtx); 6899 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6900 } 6901 6902 struct vnode * 6903 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6904 void *cbarg) 6905 { 6906 struct vnode *vp; 6907 6908 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 6909 return (NULL); 6910 6911 *mvp = vn_alloc_marker(mp); 6912 MNT_ILOCK(mp); 6913 MNT_REF(mp); 6914 MNT_IUNLOCK(mp); 6915 6916 mtx_lock(&mp->mnt_listmtx); 6917 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 6918 if (vp == NULL) { 6919 mtx_unlock(&mp->mnt_listmtx); 6920 mnt_vnode_markerfree_lazy(mvp, mp); 6921 return (NULL); 6922 } 6923 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 6924 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6925 } 6926 6927 void 6928 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6929 { 6930 6931 if (*mvp == NULL) 6932 return; 6933 6934 mtx_lock(&mp->mnt_listmtx); 6935 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6936 mtx_unlock(&mp->mnt_listmtx); 6937 mnt_vnode_markerfree_lazy(mvp, mp); 6938 } 6939 6940 int 6941 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 6942 { 6943 6944 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 6945 cnp->cn_flags &= ~NOEXECCHECK; 6946 return (0); 6947 } 6948 6949 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread)); 6950 } 6951 6952 /* 6953 * Do not use this variant unless you have means other than the hold count 6954 * to prevent the vnode from getting freed. 6955 */ 6956 void 6957 vn_seqc_write_begin_locked(struct vnode *vp) 6958 { 6959 6960 ASSERT_VI_LOCKED(vp, __func__); 6961 VNPASS(vp->v_holdcnt > 0, vp); 6962 VNPASS(vp->v_seqc_users >= 0, vp); 6963 vp->v_seqc_users++; 6964 if (vp->v_seqc_users == 1) 6965 seqc_sleepable_write_begin(&vp->v_seqc); 6966 } 6967 6968 void 6969 vn_seqc_write_begin(struct vnode *vp) 6970 { 6971 6972 VI_LOCK(vp); 6973 vn_seqc_write_begin_locked(vp); 6974 VI_UNLOCK(vp); 6975 } 6976 6977 void 6978 vn_seqc_write_end_locked(struct vnode *vp) 6979 { 6980 6981 ASSERT_VI_LOCKED(vp, __func__); 6982 VNPASS(vp->v_seqc_users > 0, vp); 6983 vp->v_seqc_users--; 6984 if (vp->v_seqc_users == 0) 6985 seqc_sleepable_write_end(&vp->v_seqc); 6986 } 6987 6988 void 6989 vn_seqc_write_end(struct vnode *vp) 6990 { 6991 6992 VI_LOCK(vp); 6993 vn_seqc_write_end_locked(vp); 6994 VI_UNLOCK(vp); 6995 } 6996 6997 /* 6998 * Special case handling for allocating and freeing vnodes. 6999 * 7000 * The counter remains unchanged on free so that a doomed vnode will 7001 * keep testing as in modify as long as it is accessible with SMR. 7002 */ 7003 static void 7004 vn_seqc_init(struct vnode *vp) 7005 { 7006 7007 vp->v_seqc = 0; 7008 vp->v_seqc_users = 0; 7009 } 7010 7011 static void 7012 vn_seqc_write_end_free(struct vnode *vp) 7013 { 7014 7015 VNPASS(seqc_in_modify(vp->v_seqc), vp); 7016 VNPASS(vp->v_seqc_users == 1, vp); 7017 } 7018 7019 void 7020 vn_irflag_set_locked(struct vnode *vp, short toset) 7021 { 7022 short flags; 7023 7024 ASSERT_VI_LOCKED(vp, __func__); 7025 flags = vn_irflag_read(vp); 7026 VNASSERT((flags & toset) == 0, vp, 7027 ("%s: some of the passed flags already set (have %d, passed %d)\n", 7028 __func__, flags, toset)); 7029 atomic_store_short(&vp->v_irflag, flags | toset); 7030 } 7031 7032 void 7033 vn_irflag_set(struct vnode *vp, short toset) 7034 { 7035 7036 VI_LOCK(vp); 7037 vn_irflag_set_locked(vp, toset); 7038 VI_UNLOCK(vp); 7039 } 7040 7041 void 7042 vn_irflag_set_cond_locked(struct vnode *vp, short toset) 7043 { 7044 short flags; 7045 7046 ASSERT_VI_LOCKED(vp, __func__); 7047 flags = vn_irflag_read(vp); 7048 atomic_store_short(&vp->v_irflag, flags | toset); 7049 } 7050 7051 void 7052 vn_irflag_set_cond(struct vnode *vp, short toset) 7053 { 7054 7055 VI_LOCK(vp); 7056 vn_irflag_set_cond_locked(vp, toset); 7057 VI_UNLOCK(vp); 7058 } 7059 7060 void 7061 vn_irflag_unset_locked(struct vnode *vp, short tounset) 7062 { 7063 short flags; 7064 7065 ASSERT_VI_LOCKED(vp, __func__); 7066 flags = vn_irflag_read(vp); 7067 VNASSERT((flags & tounset) == tounset, vp, 7068 ("%s: some of the passed flags not set (have %d, passed %d)\n", 7069 __func__, flags, tounset)); 7070 atomic_store_short(&vp->v_irflag, flags & ~tounset); 7071 } 7072 7073 void 7074 vn_irflag_unset(struct vnode *vp, short tounset) 7075 { 7076 7077 VI_LOCK(vp); 7078 vn_irflag_unset_locked(vp, tounset); 7079 VI_UNLOCK(vp); 7080 } 7081 7082 int 7083 vn_getsize_locked(struct vnode *vp, off_t *size, struct ucred *cred) 7084 { 7085 struct vattr vattr; 7086 int error; 7087 7088 ASSERT_VOP_LOCKED(vp, __func__); 7089 error = VOP_GETATTR(vp, &vattr, cred); 7090 if (__predict_true(error == 0)) { 7091 if (vattr.va_size <= OFF_MAX) 7092 *size = vattr.va_size; 7093 else 7094 error = EFBIG; 7095 } 7096 return (error); 7097 } 7098 7099 int 7100 vn_getsize(struct vnode *vp, off_t *size, struct ucred *cred) 7101 { 7102 int error; 7103 7104 VOP_LOCK(vp, LK_SHARED); 7105 error = vn_getsize_locked(vp, size, cred); 7106 VOP_UNLOCK(vp); 7107 return (error); 7108 } 7109 7110 #ifdef INVARIANTS 7111 void 7112 vn_set_state_validate(struct vnode *vp, __enum_uint8(vstate) state) 7113 { 7114 7115 switch (vp->v_state) { 7116 case VSTATE_UNINITIALIZED: 7117 switch (state) { 7118 case VSTATE_CONSTRUCTED: 7119 case VSTATE_DESTROYING: 7120 return; 7121 default: 7122 break; 7123 } 7124 break; 7125 case VSTATE_CONSTRUCTED: 7126 ASSERT_VOP_ELOCKED(vp, __func__); 7127 switch (state) { 7128 case VSTATE_DESTROYING: 7129 return; 7130 default: 7131 break; 7132 } 7133 break; 7134 case VSTATE_DESTROYING: 7135 ASSERT_VOP_ELOCKED(vp, __func__); 7136 switch (state) { 7137 case VSTATE_DEAD: 7138 return; 7139 default: 7140 break; 7141 } 7142 break; 7143 case VSTATE_DEAD: 7144 switch (state) { 7145 case VSTATE_UNINITIALIZED: 7146 return; 7147 default: 7148 break; 7149 } 7150 break; 7151 } 7152 7153 vn_printf(vp, "invalid state transition %d -> %d\n", vp->v_state, state); 7154 panic("invalid state transition %d -> %d\n", vp->v_state, state); 7155 } 7156 #endif 7157