1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/asan.h> 52 #include <sys/bio.h> 53 #include <sys/buf.h> 54 #include <sys/capsicum.h> 55 #include <sys/condvar.h> 56 #include <sys/conf.h> 57 #include <sys/counter.h> 58 #include <sys/dirent.h> 59 #include <sys/event.h> 60 #include <sys/eventhandler.h> 61 #include <sys/extattr.h> 62 #include <sys/file.h> 63 #include <sys/fcntl.h> 64 #include <sys/jail.h> 65 #include <sys/kdb.h> 66 #include <sys/kernel.h> 67 #include <sys/kthread.h> 68 #include <sys/ktr.h> 69 #include <sys/lockf.h> 70 #include <sys/malloc.h> 71 #include <sys/mount.h> 72 #include <sys/namei.h> 73 #include <sys/pctrie.h> 74 #include <sys/priv.h> 75 #include <sys/reboot.h> 76 #include <sys/refcount.h> 77 #include <sys/rwlock.h> 78 #include <sys/sched.h> 79 #include <sys/sleepqueue.h> 80 #include <sys/smr.h> 81 #include <sys/smp.h> 82 #include <sys/stat.h> 83 #include <sys/sysctl.h> 84 #include <sys/syslog.h> 85 #include <sys/vmmeter.h> 86 #include <sys/vnode.h> 87 #include <sys/watchdog.h> 88 89 #include <machine/stdarg.h> 90 91 #include <security/mac/mac_framework.h> 92 93 #include <vm/vm.h> 94 #include <vm/vm_object.h> 95 #include <vm/vm_extern.h> 96 #include <vm/pmap.h> 97 #include <vm/vm_map.h> 98 #include <vm/vm_page.h> 99 #include <vm/vm_kern.h> 100 #include <vm/uma.h> 101 102 #ifdef DDB 103 #include <ddb/ddb.h> 104 #endif 105 106 static void delmntque(struct vnode *vp); 107 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 108 int slpflag, int slptimeo); 109 static void syncer_shutdown(void *arg, int howto); 110 static int vtryrecycle(struct vnode *vp); 111 static void v_init_counters(struct vnode *); 112 static void vn_seqc_init(struct vnode *); 113 static void vn_seqc_write_end_free(struct vnode *vp); 114 static void vgonel(struct vnode *); 115 static bool vhold_recycle_free(struct vnode *); 116 static void vfs_knllock(void *arg); 117 static void vfs_knlunlock(void *arg); 118 static void vfs_knl_assert_lock(void *arg, int what); 119 static void destroy_vpollinfo(struct vpollinfo *vi); 120 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 121 daddr_t startlbn, daddr_t endlbn); 122 static void vnlru_recalc(void); 123 124 /* 125 * Number of vnodes in existence. Increased whenever getnewvnode() 126 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 127 */ 128 static u_long __exclusive_cache_line numvnodes; 129 130 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 131 "Number of vnodes in existence"); 132 133 static counter_u64_t vnodes_created; 134 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 135 "Number of vnodes created by getnewvnode"); 136 137 /* 138 * Conversion tables for conversion from vnode types to inode formats 139 * and back. 140 */ 141 enum vtype iftovt_tab[16] = { 142 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 143 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 144 }; 145 int vttoif_tab[10] = { 146 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 147 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 148 }; 149 150 /* 151 * List of allocates vnodes in the system. 152 */ 153 static TAILQ_HEAD(freelst, vnode) vnode_list; 154 static struct vnode *vnode_list_free_marker; 155 static struct vnode *vnode_list_reclaim_marker; 156 157 /* 158 * "Free" vnode target. Free vnodes are rarely completely free, but are 159 * just ones that are cheap to recycle. Usually they are for files which 160 * have been stat'd but not read; these usually have inode and namecache 161 * data attached to them. This target is the preferred minimum size of a 162 * sub-cache consisting mostly of such files. The system balances the size 163 * of this sub-cache with its complement to try to prevent either from 164 * thrashing while the other is relatively inactive. The targets express 165 * a preference for the best balance. 166 * 167 * "Above" this target there are 2 further targets (watermarks) related 168 * to recyling of free vnodes. In the best-operating case, the cache is 169 * exactly full, the free list has size between vlowat and vhiwat above the 170 * free target, and recycling from it and normal use maintains this state. 171 * Sometimes the free list is below vlowat or even empty, but this state 172 * is even better for immediate use provided the cache is not full. 173 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 174 * ones) to reach one of these states. The watermarks are currently hard- 175 * coded as 4% and 9% of the available space higher. These and the default 176 * of 25% for wantfreevnodes are too large if the memory size is large. 177 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 178 * whenever vnlru_proc() becomes active. 179 */ 180 static long wantfreevnodes; 181 static long __exclusive_cache_line freevnodes; 182 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 183 &freevnodes, 0, "Number of \"free\" vnodes"); 184 static long freevnodes_old; 185 186 static counter_u64_t recycles_count; 187 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 188 "Number of vnodes recycled to meet vnode cache targets"); 189 190 static counter_u64_t recycles_free_count; 191 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 192 "Number of free vnodes recycled to meet vnode cache targets"); 193 194 static counter_u64_t deferred_inact; 195 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, &deferred_inact, 196 "Number of times inactive processing was deferred"); 197 198 /* To keep more than one thread at a time from running vfs_getnewfsid */ 199 static struct mtx mntid_mtx; 200 201 /* 202 * Lock for any access to the following: 203 * vnode_list 204 * numvnodes 205 * freevnodes 206 */ 207 static struct mtx __exclusive_cache_line vnode_list_mtx; 208 209 /* Publicly exported FS */ 210 struct nfs_public nfs_pub; 211 212 static uma_zone_t buf_trie_zone; 213 static smr_t buf_trie_smr; 214 215 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 216 static uma_zone_t vnode_zone; 217 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll"); 218 219 __read_frequently smr_t vfs_smr; 220 221 /* 222 * The workitem queue. 223 * 224 * It is useful to delay writes of file data and filesystem metadata 225 * for tens of seconds so that quickly created and deleted files need 226 * not waste disk bandwidth being created and removed. To realize this, 227 * we append vnodes to a "workitem" queue. When running with a soft 228 * updates implementation, most pending metadata dependencies should 229 * not wait for more than a few seconds. Thus, mounted on block devices 230 * are delayed only about a half the time that file data is delayed. 231 * Similarly, directory updates are more critical, so are only delayed 232 * about a third the time that file data is delayed. Thus, there are 233 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 234 * one each second (driven off the filesystem syncer process). The 235 * syncer_delayno variable indicates the next queue that is to be processed. 236 * Items that need to be processed soon are placed in this queue: 237 * 238 * syncer_workitem_pending[syncer_delayno] 239 * 240 * A delay of fifteen seconds is done by placing the request fifteen 241 * entries later in the queue: 242 * 243 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 244 * 245 */ 246 static int syncer_delayno; 247 static long syncer_mask; 248 LIST_HEAD(synclist, bufobj); 249 static struct synclist *syncer_workitem_pending; 250 /* 251 * The sync_mtx protects: 252 * bo->bo_synclist 253 * sync_vnode_count 254 * syncer_delayno 255 * syncer_state 256 * syncer_workitem_pending 257 * syncer_worklist_len 258 * rushjob 259 */ 260 static struct mtx sync_mtx; 261 static struct cv sync_wakeup; 262 263 #define SYNCER_MAXDELAY 32 264 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 265 static int syncdelay = 30; /* max time to delay syncing data */ 266 static int filedelay = 30; /* time to delay syncing files */ 267 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 268 "Time to delay syncing files (in seconds)"); 269 static int dirdelay = 29; /* time to delay syncing directories */ 270 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 271 "Time to delay syncing directories (in seconds)"); 272 static int metadelay = 28; /* time to delay syncing metadata */ 273 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 274 "Time to delay syncing metadata (in seconds)"); 275 static int rushjob; /* number of slots to run ASAP */ 276 static int stat_rush_requests; /* number of times I/O speeded up */ 277 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 278 "Number of times I/O speeded up (rush requests)"); 279 280 #define VDBATCH_SIZE 8 281 struct vdbatch { 282 u_int index; 283 long freevnodes; 284 struct mtx lock; 285 struct vnode *tab[VDBATCH_SIZE]; 286 }; 287 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 288 289 static void vdbatch_dequeue(struct vnode *vp); 290 291 /* 292 * When shutting down the syncer, run it at four times normal speed. 293 */ 294 #define SYNCER_SHUTDOWN_SPEEDUP 4 295 static int sync_vnode_count; 296 static int syncer_worklist_len; 297 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 298 syncer_state; 299 300 /* Target for maximum number of vnodes. */ 301 u_long desiredvnodes; 302 static u_long gapvnodes; /* gap between wanted and desired */ 303 static u_long vhiwat; /* enough extras after expansion */ 304 static u_long vlowat; /* minimal extras before expansion */ 305 static u_long vstir; /* nonzero to stir non-free vnodes */ 306 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 307 308 static u_long vnlru_read_freevnodes(void); 309 310 /* 311 * Note that no attempt is made to sanitize these parameters. 312 */ 313 static int 314 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 315 { 316 u_long val; 317 int error; 318 319 val = desiredvnodes; 320 error = sysctl_handle_long(oidp, &val, 0, req); 321 if (error != 0 || req->newptr == NULL) 322 return (error); 323 324 if (val == desiredvnodes) 325 return (0); 326 mtx_lock(&vnode_list_mtx); 327 desiredvnodes = val; 328 wantfreevnodes = desiredvnodes / 4; 329 vnlru_recalc(); 330 mtx_unlock(&vnode_list_mtx); 331 /* 332 * XXX There is no protection against multiple threads changing 333 * desiredvnodes at the same time. Locking above only helps vnlru and 334 * getnewvnode. 335 */ 336 vfs_hash_changesize(desiredvnodes); 337 cache_changesize(desiredvnodes); 338 return (0); 339 } 340 341 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 342 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 343 "LU", "Target for maximum number of vnodes"); 344 345 static int 346 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 347 { 348 u_long val; 349 int error; 350 351 val = wantfreevnodes; 352 error = sysctl_handle_long(oidp, &val, 0, req); 353 if (error != 0 || req->newptr == NULL) 354 return (error); 355 356 if (val == wantfreevnodes) 357 return (0); 358 mtx_lock(&vnode_list_mtx); 359 wantfreevnodes = val; 360 vnlru_recalc(); 361 mtx_unlock(&vnode_list_mtx); 362 return (0); 363 } 364 365 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 366 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 367 "LU", "Target for minimum number of \"free\" vnodes"); 368 369 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 370 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 371 static int vnlru_nowhere; 372 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 373 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 374 375 static int 376 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 377 { 378 struct vnode *vp; 379 struct nameidata nd; 380 char *buf; 381 unsigned long ndflags; 382 int error; 383 384 if (req->newptr == NULL) 385 return (EINVAL); 386 if (req->newlen >= PATH_MAX) 387 return (E2BIG); 388 389 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 390 error = SYSCTL_IN(req, buf, req->newlen); 391 if (error != 0) 392 goto out; 393 394 buf[req->newlen] = '\0'; 395 396 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1 | SAVENAME; 397 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf, curthread); 398 if ((error = namei(&nd)) != 0) 399 goto out; 400 vp = nd.ni_vp; 401 402 if (VN_IS_DOOMED(vp)) { 403 /* 404 * This vnode is being recycled. Return != 0 to let the caller 405 * know that the sysctl had no effect. Return EAGAIN because a 406 * subsequent call will likely succeed (since namei will create 407 * a new vnode if necessary) 408 */ 409 error = EAGAIN; 410 goto putvnode; 411 } 412 413 counter_u64_add(recycles_count, 1); 414 vgone(vp); 415 putvnode: 416 NDFREE(&nd, 0); 417 out: 418 free(buf, M_TEMP); 419 return (error); 420 } 421 422 static int 423 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 424 { 425 struct thread *td = curthread; 426 struct vnode *vp; 427 struct file *fp; 428 int error; 429 int fd; 430 431 if (req->newptr == NULL) 432 return (EBADF); 433 434 error = sysctl_handle_int(oidp, &fd, 0, req); 435 if (error != 0) 436 return (error); 437 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 438 if (error != 0) 439 return (error); 440 vp = fp->f_vnode; 441 442 error = vn_lock(vp, LK_EXCLUSIVE); 443 if (error != 0) 444 goto drop; 445 446 counter_u64_add(recycles_count, 1); 447 vgone(vp); 448 VOP_UNLOCK(vp); 449 drop: 450 fdrop(fp, td); 451 return (error); 452 } 453 454 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 455 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 456 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 457 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 458 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 459 sysctl_ftry_reclaim_vnode, "I", 460 "Try to reclaim a vnode by its file descriptor"); 461 462 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 463 static int vnsz2log; 464 465 /* 466 * Support for the bufobj clean & dirty pctrie. 467 */ 468 static void * 469 buf_trie_alloc(struct pctrie *ptree) 470 { 471 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 472 } 473 474 static void 475 buf_trie_free(struct pctrie *ptree, void *node) 476 { 477 uma_zfree_smr(buf_trie_zone, node); 478 } 479 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 480 buf_trie_smr); 481 482 /* 483 * Initialize the vnode management data structures. 484 * 485 * Reevaluate the following cap on the number of vnodes after the physical 486 * memory size exceeds 512GB. In the limit, as the physical memory size 487 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 488 */ 489 #ifndef MAXVNODES_MAX 490 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 491 #endif 492 493 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 494 495 static struct vnode * 496 vn_alloc_marker(struct mount *mp) 497 { 498 struct vnode *vp; 499 500 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 501 vp->v_type = VMARKER; 502 vp->v_mount = mp; 503 504 return (vp); 505 } 506 507 static void 508 vn_free_marker(struct vnode *vp) 509 { 510 511 MPASS(vp->v_type == VMARKER); 512 free(vp, M_VNODE_MARKER); 513 } 514 515 #ifdef KASAN 516 static int 517 vnode_ctor(void *mem, int size, void *arg __unused, int flags __unused) 518 { 519 kasan_mark(mem, size, roundup2(size, UMA_ALIGN_PTR + 1), 0); 520 return (0); 521 } 522 523 static void 524 vnode_dtor(void *mem, int size, void *arg __unused) 525 { 526 size_t end1, end2, off1, off2; 527 528 _Static_assert(offsetof(struct vnode, v_vnodelist) < 529 offsetof(struct vnode, v_dbatchcpu), 530 "KASAN marks require updating"); 531 532 off1 = offsetof(struct vnode, v_vnodelist); 533 off2 = offsetof(struct vnode, v_dbatchcpu); 534 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist); 535 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu); 536 537 /* 538 * Access to the v_vnodelist and v_dbatchcpu fields are permitted even 539 * after the vnode has been freed. Try to get some KASAN coverage by 540 * marking everything except those two fields as invalid. Because 541 * KASAN's tracking is not byte-granular, any preceding fields sharing 542 * the same 8-byte aligned word must also be marked valid. 543 */ 544 545 /* Handle the area from the start until v_vnodelist... */ 546 off1 = rounddown2(off1, KASAN_SHADOW_SCALE); 547 kasan_mark(mem, off1, off1, KASAN_UMA_FREED); 548 549 /* ... then the area between v_vnodelist and v_dbatchcpu ... */ 550 off1 = roundup2(end1, KASAN_SHADOW_SCALE); 551 off2 = rounddown2(off2, KASAN_SHADOW_SCALE); 552 if (off2 > off1) 553 kasan_mark((void *)((char *)mem + off1), off2 - off1, 554 off2 - off1, KASAN_UMA_FREED); 555 556 /* ... and finally the area from v_dbatchcpu to the end. */ 557 off2 = roundup2(end2, KASAN_SHADOW_SCALE); 558 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2, 559 KASAN_UMA_FREED); 560 } 561 #endif /* KASAN */ 562 563 /* 564 * Initialize a vnode as it first enters the zone. 565 */ 566 static int 567 vnode_init(void *mem, int size, int flags) 568 { 569 struct vnode *vp; 570 571 vp = mem; 572 bzero(vp, size); 573 /* 574 * Setup locks. 575 */ 576 vp->v_vnlock = &vp->v_lock; 577 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 578 /* 579 * By default, don't allow shared locks unless filesystems opt-in. 580 */ 581 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 582 LK_NOSHARE | LK_IS_VNODE); 583 /* 584 * Initialize bufobj. 585 */ 586 bufobj_init(&vp->v_bufobj, vp); 587 /* 588 * Initialize namecache. 589 */ 590 cache_vnode_init(vp); 591 /* 592 * Initialize rangelocks. 593 */ 594 rangelock_init(&vp->v_rl); 595 596 vp->v_dbatchcpu = NOCPU; 597 598 /* 599 * Check vhold_recycle_free for an explanation. 600 */ 601 vp->v_holdcnt = VHOLD_NO_SMR; 602 vp->v_type = VNON; 603 mtx_lock(&vnode_list_mtx); 604 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 605 mtx_unlock(&vnode_list_mtx); 606 return (0); 607 } 608 609 /* 610 * Free a vnode when it is cleared from the zone. 611 */ 612 static void 613 vnode_fini(void *mem, int size) 614 { 615 struct vnode *vp; 616 struct bufobj *bo; 617 618 vp = mem; 619 vdbatch_dequeue(vp); 620 mtx_lock(&vnode_list_mtx); 621 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 622 mtx_unlock(&vnode_list_mtx); 623 rangelock_destroy(&vp->v_rl); 624 lockdestroy(vp->v_vnlock); 625 mtx_destroy(&vp->v_interlock); 626 bo = &vp->v_bufobj; 627 rw_destroy(BO_LOCKPTR(bo)); 628 629 kasan_mark(mem, size, size, 0); 630 } 631 632 /* 633 * Provide the size of NFS nclnode and NFS fh for calculation of the 634 * vnode memory consumption. The size is specified directly to 635 * eliminate dependency on NFS-private header. 636 * 637 * Other filesystems may use bigger or smaller (like UFS and ZFS) 638 * private inode data, but the NFS-based estimation is ample enough. 639 * Still, we care about differences in the size between 64- and 32-bit 640 * platforms. 641 * 642 * Namecache structure size is heuristically 643 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 644 */ 645 #ifdef _LP64 646 #define NFS_NCLNODE_SZ (528 + 64) 647 #define NC_SZ 148 648 #else 649 #define NFS_NCLNODE_SZ (360 + 32) 650 #define NC_SZ 92 651 #endif 652 653 static void 654 vntblinit(void *dummy __unused) 655 { 656 struct vdbatch *vd; 657 uma_ctor ctor; 658 uma_dtor dtor; 659 int cpu, physvnodes, virtvnodes; 660 u_int i; 661 662 /* 663 * Desiredvnodes is a function of the physical memory size and the 664 * kernel's heap size. Generally speaking, it scales with the 665 * physical memory size. The ratio of desiredvnodes to the physical 666 * memory size is 1:16 until desiredvnodes exceeds 98,304. 667 * Thereafter, the 668 * marginal ratio of desiredvnodes to the physical memory size is 669 * 1:64. However, desiredvnodes is limited by the kernel's heap 670 * size. The memory required by desiredvnodes vnodes and vm objects 671 * must not exceed 1/10th of the kernel's heap size. 672 */ 673 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 674 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 675 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 676 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 677 desiredvnodes = min(physvnodes, virtvnodes); 678 if (desiredvnodes > MAXVNODES_MAX) { 679 if (bootverbose) 680 printf("Reducing kern.maxvnodes %lu -> %lu\n", 681 desiredvnodes, MAXVNODES_MAX); 682 desiredvnodes = MAXVNODES_MAX; 683 } 684 wantfreevnodes = desiredvnodes / 4; 685 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 686 TAILQ_INIT(&vnode_list); 687 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 688 /* 689 * The lock is taken to appease WITNESS. 690 */ 691 mtx_lock(&vnode_list_mtx); 692 vnlru_recalc(); 693 mtx_unlock(&vnode_list_mtx); 694 vnode_list_free_marker = vn_alloc_marker(NULL); 695 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 696 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 697 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 698 699 #ifdef KASAN 700 ctor = vnode_ctor; 701 dtor = vnode_dtor; 702 #else 703 ctor = NULL; 704 dtor = NULL; 705 #endif 706 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor, 707 vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_NOKASAN); 708 uma_zone_set_smr(vnode_zone, vfs_smr); 709 710 /* 711 * Preallocate enough nodes to support one-per buf so that 712 * we can not fail an insert. reassignbuf() callers can not 713 * tolerate the insertion failure. 714 */ 715 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 716 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 717 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 718 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 719 uma_prealloc(buf_trie_zone, nbuf); 720 721 vnodes_created = counter_u64_alloc(M_WAITOK); 722 recycles_count = counter_u64_alloc(M_WAITOK); 723 recycles_free_count = counter_u64_alloc(M_WAITOK); 724 deferred_inact = counter_u64_alloc(M_WAITOK); 725 726 /* 727 * Initialize the filesystem syncer. 728 */ 729 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 730 &syncer_mask); 731 syncer_maxdelay = syncer_mask + 1; 732 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 733 cv_init(&sync_wakeup, "syncer"); 734 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 735 vnsz2log++; 736 vnsz2log--; 737 738 CPU_FOREACH(cpu) { 739 vd = DPCPU_ID_PTR((cpu), vd); 740 bzero(vd, sizeof(*vd)); 741 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 742 } 743 } 744 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 745 746 /* 747 * Mark a mount point as busy. Used to synchronize access and to delay 748 * unmounting. Eventually, mountlist_mtx is not released on failure. 749 * 750 * vfs_busy() is a custom lock, it can block the caller. 751 * vfs_busy() only sleeps if the unmount is active on the mount point. 752 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 753 * vnode belonging to mp. 754 * 755 * Lookup uses vfs_busy() to traverse mount points. 756 * root fs var fs 757 * / vnode lock A / vnode lock (/var) D 758 * /var vnode lock B /log vnode lock(/var/log) E 759 * vfs_busy lock C vfs_busy lock F 760 * 761 * Within each file system, the lock order is C->A->B and F->D->E. 762 * 763 * When traversing across mounts, the system follows that lock order: 764 * 765 * C->A->B 766 * | 767 * +->F->D->E 768 * 769 * The lookup() process for namei("/var") illustrates the process: 770 * VOP_LOOKUP() obtains B while A is held 771 * vfs_busy() obtains a shared lock on F while A and B are held 772 * vput() releases lock on B 773 * vput() releases lock on A 774 * VFS_ROOT() obtains lock on D while shared lock on F is held 775 * vfs_unbusy() releases shared lock on F 776 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 777 * Attempt to lock A (instead of vp_crossmp) while D is held would 778 * violate the global order, causing deadlocks. 779 * 780 * dounmount() locks B while F is drained. 781 */ 782 int 783 vfs_busy(struct mount *mp, int flags) 784 { 785 struct mount_pcpu *mpcpu; 786 787 MPASS((flags & ~MBF_MASK) == 0); 788 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 789 790 if (vfs_op_thread_enter(mp, mpcpu)) { 791 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 792 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 793 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 794 vfs_mp_count_add_pcpu(mpcpu, ref, 1); 795 vfs_mp_count_add_pcpu(mpcpu, lockref, 1); 796 vfs_op_thread_exit(mp, mpcpu); 797 if (flags & MBF_MNTLSTLOCK) 798 mtx_unlock(&mountlist_mtx); 799 return (0); 800 } 801 802 MNT_ILOCK(mp); 803 vfs_assert_mount_counters(mp); 804 MNT_REF(mp); 805 /* 806 * If mount point is currently being unmounted, sleep until the 807 * mount point fate is decided. If thread doing the unmounting fails, 808 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 809 * that this mount point has survived the unmount attempt and vfs_busy 810 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 811 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 812 * about to be really destroyed. vfs_busy needs to release its 813 * reference on the mount point in this case and return with ENOENT, 814 * telling the caller that mount mount it tried to busy is no longer 815 * valid. 816 */ 817 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 818 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), 819 ("%s: non-empty upper mount list with pending unmount", 820 __func__)); 821 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 822 MNT_REL(mp); 823 MNT_IUNLOCK(mp); 824 CTR1(KTR_VFS, "%s: failed busying before sleeping", 825 __func__); 826 return (ENOENT); 827 } 828 if (flags & MBF_MNTLSTLOCK) 829 mtx_unlock(&mountlist_mtx); 830 mp->mnt_kern_flag |= MNTK_MWAIT; 831 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 832 if (flags & MBF_MNTLSTLOCK) 833 mtx_lock(&mountlist_mtx); 834 MNT_ILOCK(mp); 835 } 836 if (flags & MBF_MNTLSTLOCK) 837 mtx_unlock(&mountlist_mtx); 838 mp->mnt_lockref++; 839 MNT_IUNLOCK(mp); 840 return (0); 841 } 842 843 /* 844 * Free a busy filesystem. 845 */ 846 void 847 vfs_unbusy(struct mount *mp) 848 { 849 struct mount_pcpu *mpcpu; 850 int c; 851 852 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 853 854 if (vfs_op_thread_enter(mp, mpcpu)) { 855 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 856 vfs_mp_count_sub_pcpu(mpcpu, lockref, 1); 857 vfs_mp_count_sub_pcpu(mpcpu, ref, 1); 858 vfs_op_thread_exit(mp, mpcpu); 859 return; 860 } 861 862 MNT_ILOCK(mp); 863 vfs_assert_mount_counters(mp); 864 MNT_REL(mp); 865 c = --mp->mnt_lockref; 866 if (mp->mnt_vfs_ops == 0) { 867 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 868 MNT_IUNLOCK(mp); 869 return; 870 } 871 if (c < 0) 872 vfs_dump_mount_counters(mp); 873 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 874 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 875 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 876 mp->mnt_kern_flag &= ~MNTK_DRAINING; 877 wakeup(&mp->mnt_lockref); 878 } 879 MNT_IUNLOCK(mp); 880 } 881 882 /* 883 * Lookup a mount point by filesystem identifier. 884 */ 885 struct mount * 886 vfs_getvfs(fsid_t *fsid) 887 { 888 struct mount *mp; 889 890 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 891 mtx_lock(&mountlist_mtx); 892 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 893 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 894 vfs_ref(mp); 895 mtx_unlock(&mountlist_mtx); 896 return (mp); 897 } 898 } 899 mtx_unlock(&mountlist_mtx); 900 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 901 return ((struct mount *) 0); 902 } 903 904 /* 905 * Lookup a mount point by filesystem identifier, busying it before 906 * returning. 907 * 908 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 909 * cache for popular filesystem identifiers. The cache is lockess, using 910 * the fact that struct mount's are never freed. In worst case we may 911 * get pointer to unmounted or even different filesystem, so we have to 912 * check what we got, and go slow way if so. 913 */ 914 struct mount * 915 vfs_busyfs(fsid_t *fsid) 916 { 917 #define FSID_CACHE_SIZE 256 918 typedef struct mount * volatile vmp_t; 919 static vmp_t cache[FSID_CACHE_SIZE]; 920 struct mount *mp; 921 int error; 922 uint32_t hash; 923 924 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 925 hash = fsid->val[0] ^ fsid->val[1]; 926 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 927 mp = cache[hash]; 928 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 929 goto slow; 930 if (vfs_busy(mp, 0) != 0) { 931 cache[hash] = NULL; 932 goto slow; 933 } 934 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 935 return (mp); 936 else 937 vfs_unbusy(mp); 938 939 slow: 940 mtx_lock(&mountlist_mtx); 941 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 942 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 943 error = vfs_busy(mp, MBF_MNTLSTLOCK); 944 if (error) { 945 cache[hash] = NULL; 946 mtx_unlock(&mountlist_mtx); 947 return (NULL); 948 } 949 cache[hash] = mp; 950 return (mp); 951 } 952 } 953 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 954 mtx_unlock(&mountlist_mtx); 955 return ((struct mount *) 0); 956 } 957 958 /* 959 * Check if a user can access privileged mount options. 960 */ 961 int 962 vfs_suser(struct mount *mp, struct thread *td) 963 { 964 int error; 965 966 if (jailed(td->td_ucred)) { 967 /* 968 * If the jail of the calling thread lacks permission for 969 * this type of file system, deny immediately. 970 */ 971 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 972 return (EPERM); 973 974 /* 975 * If the file system was mounted outside the jail of the 976 * calling thread, deny immediately. 977 */ 978 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 979 return (EPERM); 980 } 981 982 /* 983 * If file system supports delegated administration, we don't check 984 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 985 * by the file system itself. 986 * If this is not the user that did original mount, we check for 987 * the PRIV_VFS_MOUNT_OWNER privilege. 988 */ 989 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 990 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 991 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 992 return (error); 993 } 994 return (0); 995 } 996 997 /* 998 * Get a new unique fsid. Try to make its val[0] unique, since this value 999 * will be used to create fake device numbers for stat(). Also try (but 1000 * not so hard) make its val[0] unique mod 2^16, since some emulators only 1001 * support 16-bit device numbers. We end up with unique val[0]'s for the 1002 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 1003 * 1004 * Keep in mind that several mounts may be running in parallel. Starting 1005 * the search one past where the previous search terminated is both a 1006 * micro-optimization and a defense against returning the same fsid to 1007 * different mounts. 1008 */ 1009 void 1010 vfs_getnewfsid(struct mount *mp) 1011 { 1012 static uint16_t mntid_base; 1013 struct mount *nmp; 1014 fsid_t tfsid; 1015 int mtype; 1016 1017 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 1018 mtx_lock(&mntid_mtx); 1019 mtype = mp->mnt_vfc->vfc_typenum; 1020 tfsid.val[1] = mtype; 1021 mtype = (mtype & 0xFF) << 24; 1022 for (;;) { 1023 tfsid.val[0] = makedev(255, 1024 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 1025 mntid_base++; 1026 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 1027 break; 1028 vfs_rel(nmp); 1029 } 1030 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 1031 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 1032 mtx_unlock(&mntid_mtx); 1033 } 1034 1035 /* 1036 * Knob to control the precision of file timestamps: 1037 * 1038 * 0 = seconds only; nanoseconds zeroed. 1039 * 1 = seconds and nanoseconds, accurate within 1/HZ. 1040 * 2 = seconds and nanoseconds, truncated to microseconds. 1041 * >=3 = seconds and nanoseconds, maximum precision. 1042 */ 1043 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 1044 1045 static int timestamp_precision = TSP_USEC; 1046 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 1047 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 1048 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 1049 "3+: sec + ns (max. precision))"); 1050 1051 /* 1052 * Get a current timestamp. 1053 */ 1054 void 1055 vfs_timestamp(struct timespec *tsp) 1056 { 1057 struct timeval tv; 1058 1059 switch (timestamp_precision) { 1060 case TSP_SEC: 1061 tsp->tv_sec = time_second; 1062 tsp->tv_nsec = 0; 1063 break; 1064 case TSP_HZ: 1065 getnanotime(tsp); 1066 break; 1067 case TSP_USEC: 1068 microtime(&tv); 1069 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1070 break; 1071 case TSP_NSEC: 1072 default: 1073 nanotime(tsp); 1074 break; 1075 } 1076 } 1077 1078 /* 1079 * Set vnode attributes to VNOVAL 1080 */ 1081 void 1082 vattr_null(struct vattr *vap) 1083 { 1084 1085 vap->va_type = VNON; 1086 vap->va_size = VNOVAL; 1087 vap->va_bytes = VNOVAL; 1088 vap->va_mode = VNOVAL; 1089 vap->va_nlink = VNOVAL; 1090 vap->va_uid = VNOVAL; 1091 vap->va_gid = VNOVAL; 1092 vap->va_fsid = VNOVAL; 1093 vap->va_fileid = VNOVAL; 1094 vap->va_blocksize = VNOVAL; 1095 vap->va_rdev = VNOVAL; 1096 vap->va_atime.tv_sec = VNOVAL; 1097 vap->va_atime.tv_nsec = VNOVAL; 1098 vap->va_mtime.tv_sec = VNOVAL; 1099 vap->va_mtime.tv_nsec = VNOVAL; 1100 vap->va_ctime.tv_sec = VNOVAL; 1101 vap->va_ctime.tv_nsec = VNOVAL; 1102 vap->va_birthtime.tv_sec = VNOVAL; 1103 vap->va_birthtime.tv_nsec = VNOVAL; 1104 vap->va_flags = VNOVAL; 1105 vap->va_gen = VNOVAL; 1106 vap->va_vaflags = 0; 1107 } 1108 1109 /* 1110 * Try to reduce the total number of vnodes. 1111 * 1112 * This routine (and its user) are buggy in at least the following ways: 1113 * - all parameters were picked years ago when RAM sizes were significantly 1114 * smaller 1115 * - it can pick vnodes based on pages used by the vm object, but filesystems 1116 * like ZFS don't use it making the pick broken 1117 * - since ZFS has its own aging policy it gets partially combated by this one 1118 * - a dedicated method should be provided for filesystems to let them decide 1119 * whether the vnode should be recycled 1120 * 1121 * This routine is called when we have too many vnodes. It attempts 1122 * to free <count> vnodes and will potentially free vnodes that still 1123 * have VM backing store (VM backing store is typically the cause 1124 * of a vnode blowout so we want to do this). Therefore, this operation 1125 * is not considered cheap. 1126 * 1127 * A number of conditions may prevent a vnode from being reclaimed. 1128 * the buffer cache may have references on the vnode, a directory 1129 * vnode may still have references due to the namei cache representing 1130 * underlying files, or the vnode may be in active use. It is not 1131 * desirable to reuse such vnodes. These conditions may cause the 1132 * number of vnodes to reach some minimum value regardless of what 1133 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1134 * 1135 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1136 * entries if this argument is strue 1137 * @param trigger Only reclaim vnodes with fewer than this many resident 1138 * pages. 1139 * @param target How many vnodes to reclaim. 1140 * @return The number of vnodes that were reclaimed. 1141 */ 1142 static int 1143 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1144 { 1145 struct vnode *vp, *mvp; 1146 struct mount *mp; 1147 struct vm_object *object; 1148 u_long done; 1149 bool retried; 1150 1151 mtx_assert(&vnode_list_mtx, MA_OWNED); 1152 1153 retried = false; 1154 done = 0; 1155 1156 mvp = vnode_list_reclaim_marker; 1157 restart: 1158 vp = mvp; 1159 while (done < target) { 1160 vp = TAILQ_NEXT(vp, v_vnodelist); 1161 if (__predict_false(vp == NULL)) 1162 break; 1163 1164 if (__predict_false(vp->v_type == VMARKER)) 1165 continue; 1166 1167 /* 1168 * If it's been deconstructed already, it's still 1169 * referenced, or it exceeds the trigger, skip it. 1170 * Also skip free vnodes. We are trying to make space 1171 * to expand the free list, not reduce it. 1172 */ 1173 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1174 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1175 goto next_iter; 1176 1177 if (vp->v_type == VBAD || vp->v_type == VNON) 1178 goto next_iter; 1179 1180 object = atomic_load_ptr(&vp->v_object); 1181 if (object == NULL || object->resident_page_count > trigger) { 1182 goto next_iter; 1183 } 1184 1185 /* 1186 * Handle races against vnode allocation. Filesystems lock the 1187 * vnode some time after it gets returned from getnewvnode, 1188 * despite type and hold count being manipulated earlier. 1189 * Resorting to checking v_mount restores guarantees present 1190 * before the global list was reworked to contain all vnodes. 1191 */ 1192 if (!VI_TRYLOCK(vp)) 1193 goto next_iter; 1194 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1195 VI_UNLOCK(vp); 1196 goto next_iter; 1197 } 1198 if (vp->v_mount == NULL) { 1199 VI_UNLOCK(vp); 1200 goto next_iter; 1201 } 1202 vholdl(vp); 1203 VI_UNLOCK(vp); 1204 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1205 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1206 mtx_unlock(&vnode_list_mtx); 1207 1208 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1209 vdrop(vp); 1210 goto next_iter_unlocked; 1211 } 1212 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1213 vdrop(vp); 1214 vn_finished_write(mp); 1215 goto next_iter_unlocked; 1216 } 1217 1218 VI_LOCK(vp); 1219 if (vp->v_usecount > 0 || 1220 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1221 (vp->v_object != NULL && vp->v_object->handle == vp && 1222 vp->v_object->resident_page_count > trigger)) { 1223 VOP_UNLOCK(vp); 1224 vdropl(vp); 1225 vn_finished_write(mp); 1226 goto next_iter_unlocked; 1227 } 1228 counter_u64_add(recycles_count, 1); 1229 vgonel(vp); 1230 VOP_UNLOCK(vp); 1231 vdropl(vp); 1232 vn_finished_write(mp); 1233 done++; 1234 next_iter_unlocked: 1235 if (should_yield()) 1236 kern_yield(PRI_USER); 1237 mtx_lock(&vnode_list_mtx); 1238 goto restart; 1239 next_iter: 1240 MPASS(vp->v_type != VMARKER); 1241 if (!should_yield()) 1242 continue; 1243 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1244 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1245 mtx_unlock(&vnode_list_mtx); 1246 kern_yield(PRI_USER); 1247 mtx_lock(&vnode_list_mtx); 1248 goto restart; 1249 } 1250 if (done == 0 && !retried) { 1251 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1252 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1253 retried = true; 1254 goto restart; 1255 } 1256 return (done); 1257 } 1258 1259 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1260 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1261 0, 1262 "limit on vnode free requests per call to the vnlru_free routine"); 1263 1264 /* 1265 * Attempt to reduce the free list by the requested amount. 1266 */ 1267 static int 1268 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp) 1269 { 1270 struct vnode *vp; 1271 struct mount *mp; 1272 int ocount; 1273 1274 mtx_assert(&vnode_list_mtx, MA_OWNED); 1275 if (count > max_vnlru_free) 1276 count = max_vnlru_free; 1277 ocount = count; 1278 vp = mvp; 1279 for (;;) { 1280 if (count == 0) { 1281 break; 1282 } 1283 vp = TAILQ_NEXT(vp, v_vnodelist); 1284 if (__predict_false(vp == NULL)) { 1285 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1286 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1287 break; 1288 } 1289 if (__predict_false(vp->v_type == VMARKER)) 1290 continue; 1291 if (vp->v_holdcnt > 0) 1292 continue; 1293 /* 1294 * Don't recycle if our vnode is from different type 1295 * of mount point. Note that mp is type-safe, the 1296 * check does not reach unmapped address even if 1297 * vnode is reclaimed. 1298 */ 1299 if (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1300 mp->mnt_op != mnt_op) { 1301 continue; 1302 } 1303 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1304 continue; 1305 } 1306 if (!vhold_recycle_free(vp)) 1307 continue; 1308 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1309 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1310 mtx_unlock(&vnode_list_mtx); 1311 if (vtryrecycle(vp) == 0) 1312 count--; 1313 mtx_lock(&vnode_list_mtx); 1314 vp = mvp; 1315 } 1316 return (ocount - count); 1317 } 1318 1319 static int 1320 vnlru_free_locked(int count) 1321 { 1322 1323 mtx_assert(&vnode_list_mtx, MA_OWNED); 1324 return (vnlru_free_impl(count, NULL, vnode_list_free_marker)); 1325 } 1326 1327 void 1328 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp) 1329 { 1330 1331 MPASS(mnt_op != NULL); 1332 MPASS(mvp != NULL); 1333 VNPASS(mvp->v_type == VMARKER, mvp); 1334 mtx_lock(&vnode_list_mtx); 1335 vnlru_free_impl(count, mnt_op, mvp); 1336 mtx_unlock(&vnode_list_mtx); 1337 } 1338 1339 struct vnode * 1340 vnlru_alloc_marker(void) 1341 { 1342 struct vnode *mvp; 1343 1344 mvp = vn_alloc_marker(NULL); 1345 mtx_lock(&vnode_list_mtx); 1346 TAILQ_INSERT_BEFORE(vnode_list_free_marker, mvp, v_vnodelist); 1347 mtx_unlock(&vnode_list_mtx); 1348 return (mvp); 1349 } 1350 1351 void 1352 vnlru_free_marker(struct vnode *mvp) 1353 { 1354 mtx_lock(&vnode_list_mtx); 1355 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1356 mtx_unlock(&vnode_list_mtx); 1357 vn_free_marker(mvp); 1358 } 1359 1360 static void 1361 vnlru_recalc(void) 1362 { 1363 1364 mtx_assert(&vnode_list_mtx, MA_OWNED); 1365 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1366 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1367 vlowat = vhiwat / 2; 1368 } 1369 1370 /* 1371 * Attempt to recycle vnodes in a context that is always safe to block. 1372 * Calling vlrurecycle() from the bowels of filesystem code has some 1373 * interesting deadlock problems. 1374 */ 1375 static struct proc *vnlruproc; 1376 static int vnlruproc_sig; 1377 1378 /* 1379 * The main freevnodes counter is only updated when threads requeue their vnode 1380 * batches. CPUs are conditionally walked to compute a more accurate total. 1381 * 1382 * Limit how much of a slop are we willing to tolerate. Note: the actual value 1383 * at any given moment can still exceed slop, but it should not be by significant 1384 * margin in practice. 1385 */ 1386 #define VNLRU_FREEVNODES_SLOP 128 1387 1388 static __inline void 1389 vfs_freevnodes_inc(void) 1390 { 1391 struct vdbatch *vd; 1392 1393 critical_enter(); 1394 vd = DPCPU_PTR(vd); 1395 vd->freevnodes++; 1396 critical_exit(); 1397 } 1398 1399 static __inline void 1400 vfs_freevnodes_dec(void) 1401 { 1402 struct vdbatch *vd; 1403 1404 critical_enter(); 1405 vd = DPCPU_PTR(vd); 1406 vd->freevnodes--; 1407 critical_exit(); 1408 } 1409 1410 static u_long 1411 vnlru_read_freevnodes(void) 1412 { 1413 struct vdbatch *vd; 1414 long slop; 1415 int cpu; 1416 1417 mtx_assert(&vnode_list_mtx, MA_OWNED); 1418 if (freevnodes > freevnodes_old) 1419 slop = freevnodes - freevnodes_old; 1420 else 1421 slop = freevnodes_old - freevnodes; 1422 if (slop < VNLRU_FREEVNODES_SLOP) 1423 return (freevnodes >= 0 ? freevnodes : 0); 1424 freevnodes_old = freevnodes; 1425 CPU_FOREACH(cpu) { 1426 vd = DPCPU_ID_PTR((cpu), vd); 1427 freevnodes_old += vd->freevnodes; 1428 } 1429 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1430 } 1431 1432 static bool 1433 vnlru_under(u_long rnumvnodes, u_long limit) 1434 { 1435 u_long rfreevnodes, space; 1436 1437 if (__predict_false(rnumvnodes > desiredvnodes)) 1438 return (true); 1439 1440 space = desiredvnodes - rnumvnodes; 1441 if (space < limit) { 1442 rfreevnodes = vnlru_read_freevnodes(); 1443 if (rfreevnodes > wantfreevnodes) 1444 space += rfreevnodes - wantfreevnodes; 1445 } 1446 return (space < limit); 1447 } 1448 1449 static bool 1450 vnlru_under_unlocked(u_long rnumvnodes, u_long limit) 1451 { 1452 long rfreevnodes, space; 1453 1454 if (__predict_false(rnumvnodes > desiredvnodes)) 1455 return (true); 1456 1457 space = desiredvnodes - rnumvnodes; 1458 if (space < limit) { 1459 rfreevnodes = atomic_load_long(&freevnodes); 1460 if (rfreevnodes > wantfreevnodes) 1461 space += rfreevnodes - wantfreevnodes; 1462 } 1463 return (space < limit); 1464 } 1465 1466 static void 1467 vnlru_kick(void) 1468 { 1469 1470 mtx_assert(&vnode_list_mtx, MA_OWNED); 1471 if (vnlruproc_sig == 0) { 1472 vnlruproc_sig = 1; 1473 wakeup(vnlruproc); 1474 } 1475 } 1476 1477 static void 1478 vnlru_proc(void) 1479 { 1480 u_long rnumvnodes, rfreevnodes, target; 1481 unsigned long onumvnodes; 1482 int done, force, trigger, usevnodes; 1483 bool reclaim_nc_src, want_reread; 1484 1485 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1486 SHUTDOWN_PRI_FIRST); 1487 1488 force = 0; 1489 want_reread = false; 1490 for (;;) { 1491 kproc_suspend_check(vnlruproc); 1492 mtx_lock(&vnode_list_mtx); 1493 rnumvnodes = atomic_load_long(&numvnodes); 1494 1495 if (want_reread) { 1496 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1497 want_reread = false; 1498 } 1499 1500 /* 1501 * If numvnodes is too large (due to desiredvnodes being 1502 * adjusted using its sysctl, or emergency growth), first 1503 * try to reduce it by discarding from the free list. 1504 */ 1505 if (rnumvnodes > desiredvnodes) { 1506 vnlru_free_locked(rnumvnodes - desiredvnodes); 1507 rnumvnodes = atomic_load_long(&numvnodes); 1508 } 1509 /* 1510 * Sleep if the vnode cache is in a good state. This is 1511 * when it is not over-full and has space for about a 4% 1512 * or 9% expansion (by growing its size or inexcessively 1513 * reducing its free list). Otherwise, try to reclaim 1514 * space for a 10% expansion. 1515 */ 1516 if (vstir && force == 0) { 1517 force = 1; 1518 vstir = 0; 1519 } 1520 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1521 vnlruproc_sig = 0; 1522 wakeup(&vnlruproc_sig); 1523 msleep(vnlruproc, &vnode_list_mtx, 1524 PVFS|PDROP, "vlruwt", hz); 1525 continue; 1526 } 1527 rfreevnodes = vnlru_read_freevnodes(); 1528 1529 onumvnodes = rnumvnodes; 1530 /* 1531 * Calculate parameters for recycling. These are the same 1532 * throughout the loop to give some semblance of fairness. 1533 * The trigger point is to avoid recycling vnodes with lots 1534 * of resident pages. We aren't trying to free memory; we 1535 * are trying to recycle or at least free vnodes. 1536 */ 1537 if (rnumvnodes <= desiredvnodes) 1538 usevnodes = rnumvnodes - rfreevnodes; 1539 else 1540 usevnodes = rnumvnodes; 1541 if (usevnodes <= 0) 1542 usevnodes = 1; 1543 /* 1544 * The trigger value is is chosen to give a conservatively 1545 * large value to ensure that it alone doesn't prevent 1546 * making progress. The value can easily be so large that 1547 * it is effectively infinite in some congested and 1548 * misconfigured cases, and this is necessary. Normally 1549 * it is about 8 to 100 (pages), which is quite large. 1550 */ 1551 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1552 if (force < 2) 1553 trigger = vsmalltrigger; 1554 reclaim_nc_src = force >= 3; 1555 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1556 target = target / 10 + 1; 1557 done = vlrureclaim(reclaim_nc_src, trigger, target); 1558 mtx_unlock(&vnode_list_mtx); 1559 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1560 uma_reclaim(UMA_RECLAIM_DRAIN); 1561 if (done == 0) { 1562 if (force == 0 || force == 1) { 1563 force = 2; 1564 continue; 1565 } 1566 if (force == 2) { 1567 force = 3; 1568 continue; 1569 } 1570 want_reread = true; 1571 force = 0; 1572 vnlru_nowhere++; 1573 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1574 } else { 1575 want_reread = true; 1576 kern_yield(PRI_USER); 1577 } 1578 } 1579 } 1580 1581 static struct kproc_desc vnlru_kp = { 1582 "vnlru", 1583 vnlru_proc, 1584 &vnlruproc 1585 }; 1586 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1587 &vnlru_kp); 1588 1589 /* 1590 * Routines having to do with the management of the vnode table. 1591 */ 1592 1593 /* 1594 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1595 * before we actually vgone(). This function must be called with the vnode 1596 * held to prevent the vnode from being returned to the free list midway 1597 * through vgone(). 1598 */ 1599 static int 1600 vtryrecycle(struct vnode *vp) 1601 { 1602 struct mount *vnmp; 1603 1604 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1605 VNASSERT(vp->v_holdcnt, vp, 1606 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1607 /* 1608 * This vnode may found and locked via some other list, if so we 1609 * can't recycle it yet. 1610 */ 1611 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1612 CTR2(KTR_VFS, 1613 "%s: impossible to recycle, vp %p lock is already held", 1614 __func__, vp); 1615 vdrop(vp); 1616 return (EWOULDBLOCK); 1617 } 1618 /* 1619 * Don't recycle if its filesystem is being suspended. 1620 */ 1621 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1622 VOP_UNLOCK(vp); 1623 CTR2(KTR_VFS, 1624 "%s: impossible to recycle, cannot start the write for %p", 1625 __func__, vp); 1626 vdrop(vp); 1627 return (EBUSY); 1628 } 1629 /* 1630 * If we got this far, we need to acquire the interlock and see if 1631 * anyone picked up this vnode from another list. If not, we will 1632 * mark it with DOOMED via vgonel() so that anyone who does find it 1633 * will skip over it. 1634 */ 1635 VI_LOCK(vp); 1636 if (vp->v_usecount) { 1637 VOP_UNLOCK(vp); 1638 vdropl(vp); 1639 vn_finished_write(vnmp); 1640 CTR2(KTR_VFS, 1641 "%s: impossible to recycle, %p is already referenced", 1642 __func__, vp); 1643 return (EBUSY); 1644 } 1645 if (!VN_IS_DOOMED(vp)) { 1646 counter_u64_add(recycles_free_count, 1); 1647 vgonel(vp); 1648 } 1649 VOP_UNLOCK(vp); 1650 vdropl(vp); 1651 vn_finished_write(vnmp); 1652 return (0); 1653 } 1654 1655 /* 1656 * Allocate a new vnode. 1657 * 1658 * The operation never returns an error. Returning an error was disabled 1659 * in r145385 (dated 2005) with the following comment: 1660 * 1661 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1662 * 1663 * Given the age of this commit (almost 15 years at the time of writing this 1664 * comment) restoring the ability to fail requires a significant audit of 1665 * all codepaths. 1666 * 1667 * The routine can try to free a vnode or stall for up to 1 second waiting for 1668 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1669 */ 1670 static u_long vn_alloc_cyclecount; 1671 1672 static struct vnode * __noinline 1673 vn_alloc_hard(struct mount *mp) 1674 { 1675 u_long rnumvnodes, rfreevnodes; 1676 1677 mtx_lock(&vnode_list_mtx); 1678 rnumvnodes = atomic_load_long(&numvnodes); 1679 if (rnumvnodes + 1 < desiredvnodes) { 1680 vn_alloc_cyclecount = 0; 1681 goto alloc; 1682 } 1683 rfreevnodes = vnlru_read_freevnodes(); 1684 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1685 vn_alloc_cyclecount = 0; 1686 vstir = 1; 1687 } 1688 /* 1689 * Grow the vnode cache if it will not be above its target max 1690 * after growing. Otherwise, if the free list is nonempty, try 1691 * to reclaim 1 item from it before growing the cache (possibly 1692 * above its target max if the reclamation failed or is delayed). 1693 * Otherwise, wait for some space. In all cases, schedule 1694 * vnlru_proc() if we are getting short of space. The watermarks 1695 * should be chosen so that we never wait or even reclaim from 1696 * the free list to below its target minimum. 1697 */ 1698 if (vnlru_free_locked(1) > 0) 1699 goto alloc; 1700 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1701 /* 1702 * Wait for space for a new vnode. 1703 */ 1704 vnlru_kick(); 1705 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 1706 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 1707 vnlru_read_freevnodes() > 1) 1708 vnlru_free_locked(1); 1709 } 1710 alloc: 1711 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1712 if (vnlru_under(rnumvnodes, vlowat)) 1713 vnlru_kick(); 1714 mtx_unlock(&vnode_list_mtx); 1715 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1716 } 1717 1718 static struct vnode * 1719 vn_alloc(struct mount *mp) 1720 { 1721 u_long rnumvnodes; 1722 1723 if (__predict_false(vn_alloc_cyclecount != 0)) 1724 return (vn_alloc_hard(mp)); 1725 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1726 if (__predict_false(vnlru_under_unlocked(rnumvnodes, vlowat))) { 1727 atomic_subtract_long(&numvnodes, 1); 1728 return (vn_alloc_hard(mp)); 1729 } 1730 1731 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1732 } 1733 1734 static void 1735 vn_free(struct vnode *vp) 1736 { 1737 1738 atomic_subtract_long(&numvnodes, 1); 1739 uma_zfree_smr(vnode_zone, vp); 1740 } 1741 1742 /* 1743 * Return the next vnode from the free list. 1744 */ 1745 int 1746 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1747 struct vnode **vpp) 1748 { 1749 struct vnode *vp; 1750 struct thread *td; 1751 struct lock_object *lo; 1752 1753 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1754 1755 KASSERT(vops->registered, 1756 ("%s: not registered vector op %p\n", __func__, vops)); 1757 1758 td = curthread; 1759 if (td->td_vp_reserved != NULL) { 1760 vp = td->td_vp_reserved; 1761 td->td_vp_reserved = NULL; 1762 } else { 1763 vp = vn_alloc(mp); 1764 } 1765 counter_u64_add(vnodes_created, 1); 1766 /* 1767 * Locks are given the generic name "vnode" when created. 1768 * Follow the historic practice of using the filesystem 1769 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1770 * 1771 * Locks live in a witness group keyed on their name. Thus, 1772 * when a lock is renamed, it must also move from the witness 1773 * group of its old name to the witness group of its new name. 1774 * 1775 * The change only needs to be made when the vnode moves 1776 * from one filesystem type to another. We ensure that each 1777 * filesystem use a single static name pointer for its tag so 1778 * that we can compare pointers rather than doing a strcmp(). 1779 */ 1780 lo = &vp->v_vnlock->lock_object; 1781 #ifdef WITNESS 1782 if (lo->lo_name != tag) { 1783 #endif 1784 lo->lo_name = tag; 1785 #ifdef WITNESS 1786 WITNESS_DESTROY(lo); 1787 WITNESS_INIT(lo, tag); 1788 } 1789 #endif 1790 /* 1791 * By default, don't allow shared locks unless filesystems opt-in. 1792 */ 1793 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1794 /* 1795 * Finalize various vnode identity bits. 1796 */ 1797 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1798 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1799 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1800 vp->v_type = VNON; 1801 vp->v_op = vops; 1802 vp->v_irflag = 0; 1803 v_init_counters(vp); 1804 vn_seqc_init(vp); 1805 vp->v_bufobj.bo_ops = &buf_ops_bio; 1806 #ifdef DIAGNOSTIC 1807 if (mp == NULL && vops != &dead_vnodeops) 1808 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1809 #endif 1810 #ifdef MAC 1811 mac_vnode_init(vp); 1812 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1813 mac_vnode_associate_singlelabel(mp, vp); 1814 #endif 1815 if (mp != NULL) { 1816 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1817 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1818 vp->v_vflag |= VV_NOKNOTE; 1819 } 1820 1821 /* 1822 * For the filesystems which do not use vfs_hash_insert(), 1823 * still initialize v_hash to have vfs_hash_index() useful. 1824 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1825 * its own hashing. 1826 */ 1827 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1828 1829 *vpp = vp; 1830 return (0); 1831 } 1832 1833 void 1834 getnewvnode_reserve(void) 1835 { 1836 struct thread *td; 1837 1838 td = curthread; 1839 MPASS(td->td_vp_reserved == NULL); 1840 td->td_vp_reserved = vn_alloc(NULL); 1841 } 1842 1843 void 1844 getnewvnode_drop_reserve(void) 1845 { 1846 struct thread *td; 1847 1848 td = curthread; 1849 if (td->td_vp_reserved != NULL) { 1850 vn_free(td->td_vp_reserved); 1851 td->td_vp_reserved = NULL; 1852 } 1853 } 1854 1855 static void __noinline 1856 freevnode(struct vnode *vp) 1857 { 1858 struct bufobj *bo; 1859 1860 /* 1861 * The vnode has been marked for destruction, so free it. 1862 * 1863 * The vnode will be returned to the zone where it will 1864 * normally remain until it is needed for another vnode. We 1865 * need to cleanup (or verify that the cleanup has already 1866 * been done) any residual data left from its current use 1867 * so as not to contaminate the freshly allocated vnode. 1868 */ 1869 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 1870 /* 1871 * Paired with vgone. 1872 */ 1873 vn_seqc_write_end_free(vp); 1874 1875 bo = &vp->v_bufobj; 1876 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 1877 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 1878 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 1879 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 1880 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 1881 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 1882 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 1883 ("clean blk trie not empty")); 1884 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 1885 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 1886 ("dirty blk trie not empty")); 1887 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 1888 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 1889 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 1890 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 1891 ("Dangling rangelock waiters")); 1892 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, 1893 ("Leaked inactivation")); 1894 VI_UNLOCK(vp); 1895 #ifdef MAC 1896 mac_vnode_destroy(vp); 1897 #endif 1898 if (vp->v_pollinfo != NULL) { 1899 destroy_vpollinfo(vp->v_pollinfo); 1900 vp->v_pollinfo = NULL; 1901 } 1902 vp->v_mountedhere = NULL; 1903 vp->v_unpcb = NULL; 1904 vp->v_rdev = NULL; 1905 vp->v_fifoinfo = NULL; 1906 vp->v_iflag = 0; 1907 vp->v_vflag = 0; 1908 bo->bo_flag = 0; 1909 vn_free(vp); 1910 } 1911 1912 /* 1913 * Delete from old mount point vnode list, if on one. 1914 */ 1915 static void 1916 delmntque(struct vnode *vp) 1917 { 1918 struct mount *mp; 1919 1920 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 1921 1922 mp = vp->v_mount; 1923 if (mp == NULL) 1924 return; 1925 MNT_ILOCK(mp); 1926 VI_LOCK(vp); 1927 vp->v_mount = NULL; 1928 VI_UNLOCK(vp); 1929 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1930 ("bad mount point vnode list size")); 1931 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1932 mp->mnt_nvnodelistsize--; 1933 MNT_REL(mp); 1934 MNT_IUNLOCK(mp); 1935 } 1936 1937 static void 1938 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1939 { 1940 1941 vp->v_data = NULL; 1942 vp->v_op = &dead_vnodeops; 1943 vgone(vp); 1944 vput(vp); 1945 } 1946 1947 /* 1948 * Insert into list of vnodes for the new mount point, if available. 1949 */ 1950 int 1951 insmntque1(struct vnode *vp, struct mount *mp, 1952 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1953 { 1954 1955 KASSERT(vp->v_mount == NULL, 1956 ("insmntque: vnode already on per mount vnode list")); 1957 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1958 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1959 1960 /* 1961 * We acquire the vnode interlock early to ensure that the 1962 * vnode cannot be recycled by another process releasing a 1963 * holdcnt on it before we get it on both the vnode list 1964 * and the active vnode list. The mount mutex protects only 1965 * manipulation of the vnode list and the vnode freelist 1966 * mutex protects only manipulation of the active vnode list. 1967 * Hence the need to hold the vnode interlock throughout. 1968 */ 1969 MNT_ILOCK(mp); 1970 VI_LOCK(vp); 1971 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 1972 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1973 mp->mnt_nvnodelistsize == 0)) && 1974 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1975 VI_UNLOCK(vp); 1976 MNT_IUNLOCK(mp); 1977 if (dtr != NULL) 1978 dtr(vp, dtr_arg); 1979 return (EBUSY); 1980 } 1981 vp->v_mount = mp; 1982 MNT_REF(mp); 1983 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1984 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1985 ("neg mount point vnode list size")); 1986 mp->mnt_nvnodelistsize++; 1987 VI_UNLOCK(vp); 1988 MNT_IUNLOCK(mp); 1989 return (0); 1990 } 1991 1992 int 1993 insmntque(struct vnode *vp, struct mount *mp) 1994 { 1995 1996 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1997 } 1998 1999 /* 2000 * Flush out and invalidate all buffers associated with a bufobj 2001 * Called with the underlying object locked. 2002 */ 2003 int 2004 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 2005 { 2006 int error; 2007 2008 BO_LOCK(bo); 2009 if (flags & V_SAVE) { 2010 error = bufobj_wwait(bo, slpflag, slptimeo); 2011 if (error) { 2012 BO_UNLOCK(bo); 2013 return (error); 2014 } 2015 if (bo->bo_dirty.bv_cnt > 0) { 2016 BO_UNLOCK(bo); 2017 do { 2018 error = BO_SYNC(bo, MNT_WAIT); 2019 } while (error == ERELOOKUP); 2020 if (error != 0) 2021 return (error); 2022 BO_LOCK(bo); 2023 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 2024 BO_UNLOCK(bo); 2025 return (EBUSY); 2026 } 2027 } 2028 } 2029 /* 2030 * If you alter this loop please notice that interlock is dropped and 2031 * reacquired in flushbuflist. Special care is needed to ensure that 2032 * no race conditions occur from this. 2033 */ 2034 do { 2035 error = flushbuflist(&bo->bo_clean, 2036 flags, bo, slpflag, slptimeo); 2037 if (error == 0 && !(flags & V_CLEANONLY)) 2038 error = flushbuflist(&bo->bo_dirty, 2039 flags, bo, slpflag, slptimeo); 2040 if (error != 0 && error != EAGAIN) { 2041 BO_UNLOCK(bo); 2042 return (error); 2043 } 2044 } while (error != 0); 2045 2046 /* 2047 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 2048 * have write I/O in-progress but if there is a VM object then the 2049 * VM object can also have read-I/O in-progress. 2050 */ 2051 do { 2052 bufobj_wwait(bo, 0, 0); 2053 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 2054 BO_UNLOCK(bo); 2055 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 2056 BO_LOCK(bo); 2057 } 2058 } while (bo->bo_numoutput > 0); 2059 BO_UNLOCK(bo); 2060 2061 /* 2062 * Destroy the copy in the VM cache, too. 2063 */ 2064 if (bo->bo_object != NULL && 2065 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 2066 VM_OBJECT_WLOCK(bo->bo_object); 2067 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 2068 OBJPR_CLEANONLY : 0); 2069 VM_OBJECT_WUNLOCK(bo->bo_object); 2070 } 2071 2072 #ifdef INVARIANTS 2073 BO_LOCK(bo); 2074 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 2075 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 2076 bo->bo_clean.bv_cnt > 0)) 2077 panic("vinvalbuf: flush failed"); 2078 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 2079 bo->bo_dirty.bv_cnt > 0) 2080 panic("vinvalbuf: flush dirty failed"); 2081 BO_UNLOCK(bo); 2082 #endif 2083 return (0); 2084 } 2085 2086 /* 2087 * Flush out and invalidate all buffers associated with a vnode. 2088 * Called with the underlying object locked. 2089 */ 2090 int 2091 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 2092 { 2093 2094 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2095 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 2096 if (vp->v_object != NULL && vp->v_object->handle != vp) 2097 return (0); 2098 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 2099 } 2100 2101 /* 2102 * Flush out buffers on the specified list. 2103 * 2104 */ 2105 static int 2106 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2107 int slptimeo) 2108 { 2109 struct buf *bp, *nbp; 2110 int retval, error; 2111 daddr_t lblkno; 2112 b_xflags_t xflags; 2113 2114 ASSERT_BO_WLOCKED(bo); 2115 2116 retval = 0; 2117 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2118 /* 2119 * If we are flushing both V_NORMAL and V_ALT buffers then 2120 * do not skip any buffers. If we are flushing only V_NORMAL 2121 * buffers then skip buffers marked as BX_ALTDATA. If we are 2122 * flushing only V_ALT buffers then skip buffers not marked 2123 * as BX_ALTDATA. 2124 */ 2125 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2126 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2127 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2128 continue; 2129 } 2130 if (nbp != NULL) { 2131 lblkno = nbp->b_lblkno; 2132 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2133 } 2134 retval = EAGAIN; 2135 error = BUF_TIMELOCK(bp, 2136 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2137 "flushbuf", slpflag, slptimeo); 2138 if (error) { 2139 BO_LOCK(bo); 2140 return (error != ENOLCK ? error : EAGAIN); 2141 } 2142 KASSERT(bp->b_bufobj == bo, 2143 ("bp %p wrong b_bufobj %p should be %p", 2144 bp, bp->b_bufobj, bo)); 2145 /* 2146 * XXX Since there are no node locks for NFS, I 2147 * believe there is a slight chance that a delayed 2148 * write will occur while sleeping just above, so 2149 * check for it. 2150 */ 2151 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2152 (flags & V_SAVE)) { 2153 bremfree(bp); 2154 bp->b_flags |= B_ASYNC; 2155 bwrite(bp); 2156 BO_LOCK(bo); 2157 return (EAGAIN); /* XXX: why not loop ? */ 2158 } 2159 bremfree(bp); 2160 bp->b_flags |= (B_INVAL | B_RELBUF); 2161 bp->b_flags &= ~B_ASYNC; 2162 brelse(bp); 2163 BO_LOCK(bo); 2164 if (nbp == NULL) 2165 break; 2166 nbp = gbincore(bo, lblkno); 2167 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2168 != xflags) 2169 break; /* nbp invalid */ 2170 } 2171 return (retval); 2172 } 2173 2174 int 2175 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2176 { 2177 struct buf *bp; 2178 int error; 2179 daddr_t lblkno; 2180 2181 ASSERT_BO_LOCKED(bo); 2182 2183 for (lblkno = startn;;) { 2184 again: 2185 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2186 if (bp == NULL || bp->b_lblkno >= endn || 2187 bp->b_lblkno < startn) 2188 break; 2189 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2190 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2191 if (error != 0) { 2192 BO_RLOCK(bo); 2193 if (error == ENOLCK) 2194 goto again; 2195 return (error); 2196 } 2197 KASSERT(bp->b_bufobj == bo, 2198 ("bp %p wrong b_bufobj %p should be %p", 2199 bp, bp->b_bufobj, bo)); 2200 lblkno = bp->b_lblkno + 1; 2201 if ((bp->b_flags & B_MANAGED) == 0) 2202 bremfree(bp); 2203 bp->b_flags |= B_RELBUF; 2204 /* 2205 * In the VMIO case, use the B_NOREUSE flag to hint that the 2206 * pages backing each buffer in the range are unlikely to be 2207 * reused. Dirty buffers will have the hint applied once 2208 * they've been written. 2209 */ 2210 if ((bp->b_flags & B_VMIO) != 0) 2211 bp->b_flags |= B_NOREUSE; 2212 brelse(bp); 2213 BO_RLOCK(bo); 2214 } 2215 return (0); 2216 } 2217 2218 /* 2219 * Truncate a file's buffer and pages to a specified length. This 2220 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2221 * sync activity. 2222 */ 2223 int 2224 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2225 { 2226 struct buf *bp, *nbp; 2227 struct bufobj *bo; 2228 daddr_t startlbn; 2229 2230 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2231 vp, blksize, (uintmax_t)length); 2232 2233 /* 2234 * Round up to the *next* lbn. 2235 */ 2236 startlbn = howmany(length, blksize); 2237 2238 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2239 2240 bo = &vp->v_bufobj; 2241 restart_unlocked: 2242 BO_LOCK(bo); 2243 2244 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2245 ; 2246 2247 if (length > 0) { 2248 restartsync: 2249 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2250 if (bp->b_lblkno > 0) 2251 continue; 2252 /* 2253 * Since we hold the vnode lock this should only 2254 * fail if we're racing with the buf daemon. 2255 */ 2256 if (BUF_LOCK(bp, 2257 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2258 BO_LOCKPTR(bo)) == ENOLCK) 2259 goto restart_unlocked; 2260 2261 VNASSERT((bp->b_flags & B_DELWRI), vp, 2262 ("buf(%p) on dirty queue without DELWRI", bp)); 2263 2264 bremfree(bp); 2265 bawrite(bp); 2266 BO_LOCK(bo); 2267 goto restartsync; 2268 } 2269 } 2270 2271 bufobj_wwait(bo, 0, 0); 2272 BO_UNLOCK(bo); 2273 vnode_pager_setsize(vp, length); 2274 2275 return (0); 2276 } 2277 2278 /* 2279 * Invalidate the cached pages of a file's buffer within the range of block 2280 * numbers [startlbn, endlbn). 2281 */ 2282 void 2283 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2284 int blksize) 2285 { 2286 struct bufobj *bo; 2287 off_t start, end; 2288 2289 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2290 2291 start = blksize * startlbn; 2292 end = blksize * endlbn; 2293 2294 bo = &vp->v_bufobj; 2295 BO_LOCK(bo); 2296 MPASS(blksize == bo->bo_bsize); 2297 2298 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2299 ; 2300 2301 BO_UNLOCK(bo); 2302 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2303 } 2304 2305 static int 2306 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2307 daddr_t startlbn, daddr_t endlbn) 2308 { 2309 struct buf *bp, *nbp; 2310 bool anyfreed; 2311 2312 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2313 ASSERT_BO_LOCKED(bo); 2314 2315 do { 2316 anyfreed = false; 2317 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2318 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2319 continue; 2320 if (BUF_LOCK(bp, 2321 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2322 BO_LOCKPTR(bo)) == ENOLCK) { 2323 BO_LOCK(bo); 2324 return (EAGAIN); 2325 } 2326 2327 bremfree(bp); 2328 bp->b_flags |= B_INVAL | B_RELBUF; 2329 bp->b_flags &= ~B_ASYNC; 2330 brelse(bp); 2331 anyfreed = true; 2332 2333 BO_LOCK(bo); 2334 if (nbp != NULL && 2335 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2336 nbp->b_vp != vp || 2337 (nbp->b_flags & B_DELWRI) != 0)) 2338 return (EAGAIN); 2339 } 2340 2341 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2342 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2343 continue; 2344 if (BUF_LOCK(bp, 2345 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2346 BO_LOCKPTR(bo)) == ENOLCK) { 2347 BO_LOCK(bo); 2348 return (EAGAIN); 2349 } 2350 bremfree(bp); 2351 bp->b_flags |= B_INVAL | B_RELBUF; 2352 bp->b_flags &= ~B_ASYNC; 2353 brelse(bp); 2354 anyfreed = true; 2355 2356 BO_LOCK(bo); 2357 if (nbp != NULL && 2358 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2359 (nbp->b_vp != vp) || 2360 (nbp->b_flags & B_DELWRI) == 0)) 2361 return (EAGAIN); 2362 } 2363 } while (anyfreed); 2364 return (0); 2365 } 2366 2367 static void 2368 buf_vlist_remove(struct buf *bp) 2369 { 2370 struct bufv *bv; 2371 b_xflags_t flags; 2372 2373 flags = bp->b_xflags; 2374 2375 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2376 ASSERT_BO_WLOCKED(bp->b_bufobj); 2377 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 && 2378 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN), 2379 ("%s: buffer %p has invalid queue state", __func__, bp)); 2380 2381 if ((flags & BX_VNDIRTY) != 0) 2382 bv = &bp->b_bufobj->bo_dirty; 2383 else 2384 bv = &bp->b_bufobj->bo_clean; 2385 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2386 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2387 bv->bv_cnt--; 2388 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2389 } 2390 2391 /* 2392 * Add the buffer to the sorted clean or dirty block list. 2393 * 2394 * NOTE: xflags is passed as a constant, optimizing this inline function! 2395 */ 2396 static void 2397 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2398 { 2399 struct bufv *bv; 2400 struct buf *n; 2401 int error; 2402 2403 ASSERT_BO_WLOCKED(bo); 2404 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2405 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2406 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2407 ("dead bo %p", bo)); 2408 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2409 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2410 bp->b_xflags |= xflags; 2411 if (xflags & BX_VNDIRTY) 2412 bv = &bo->bo_dirty; 2413 else 2414 bv = &bo->bo_clean; 2415 2416 /* 2417 * Keep the list ordered. Optimize empty list insertion. Assume 2418 * we tend to grow at the tail so lookup_le should usually be cheaper 2419 * than _ge. 2420 */ 2421 if (bv->bv_cnt == 0 || 2422 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2423 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2424 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2425 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2426 else 2427 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2428 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2429 if (error) 2430 panic("buf_vlist_add: Preallocated nodes insufficient."); 2431 bv->bv_cnt++; 2432 } 2433 2434 /* 2435 * Look up a buffer using the buffer tries. 2436 */ 2437 struct buf * 2438 gbincore(struct bufobj *bo, daddr_t lblkno) 2439 { 2440 struct buf *bp; 2441 2442 ASSERT_BO_LOCKED(bo); 2443 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2444 if (bp != NULL) 2445 return (bp); 2446 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2447 } 2448 2449 /* 2450 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2451 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2452 * stability of the result. Like other lockless lookups, the found buf may 2453 * already be invalid by the time this function returns. 2454 */ 2455 struct buf * 2456 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2457 { 2458 struct buf *bp; 2459 2460 ASSERT_BO_UNLOCKED(bo); 2461 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2462 if (bp != NULL) 2463 return (bp); 2464 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2465 } 2466 2467 /* 2468 * Associate a buffer with a vnode. 2469 */ 2470 void 2471 bgetvp(struct vnode *vp, struct buf *bp) 2472 { 2473 struct bufobj *bo; 2474 2475 bo = &vp->v_bufobj; 2476 ASSERT_BO_WLOCKED(bo); 2477 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2478 2479 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2480 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2481 ("bgetvp: bp already attached! %p", bp)); 2482 2483 vhold(vp); 2484 bp->b_vp = vp; 2485 bp->b_bufobj = bo; 2486 /* 2487 * Insert onto list for new vnode. 2488 */ 2489 buf_vlist_add(bp, bo, BX_VNCLEAN); 2490 } 2491 2492 /* 2493 * Disassociate a buffer from a vnode. 2494 */ 2495 void 2496 brelvp(struct buf *bp) 2497 { 2498 struct bufobj *bo; 2499 struct vnode *vp; 2500 2501 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2502 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2503 2504 /* 2505 * Delete from old vnode list, if on one. 2506 */ 2507 vp = bp->b_vp; /* XXX */ 2508 bo = bp->b_bufobj; 2509 BO_LOCK(bo); 2510 buf_vlist_remove(bp); 2511 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2512 bo->bo_flag &= ~BO_ONWORKLST; 2513 mtx_lock(&sync_mtx); 2514 LIST_REMOVE(bo, bo_synclist); 2515 syncer_worklist_len--; 2516 mtx_unlock(&sync_mtx); 2517 } 2518 bp->b_vp = NULL; 2519 bp->b_bufobj = NULL; 2520 BO_UNLOCK(bo); 2521 vdrop(vp); 2522 } 2523 2524 /* 2525 * Add an item to the syncer work queue. 2526 */ 2527 static void 2528 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2529 { 2530 int slot; 2531 2532 ASSERT_BO_WLOCKED(bo); 2533 2534 mtx_lock(&sync_mtx); 2535 if (bo->bo_flag & BO_ONWORKLST) 2536 LIST_REMOVE(bo, bo_synclist); 2537 else { 2538 bo->bo_flag |= BO_ONWORKLST; 2539 syncer_worklist_len++; 2540 } 2541 2542 if (delay > syncer_maxdelay - 2) 2543 delay = syncer_maxdelay - 2; 2544 slot = (syncer_delayno + delay) & syncer_mask; 2545 2546 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2547 mtx_unlock(&sync_mtx); 2548 } 2549 2550 static int 2551 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2552 { 2553 int error, len; 2554 2555 mtx_lock(&sync_mtx); 2556 len = syncer_worklist_len - sync_vnode_count; 2557 mtx_unlock(&sync_mtx); 2558 error = SYSCTL_OUT(req, &len, sizeof(len)); 2559 return (error); 2560 } 2561 2562 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2563 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2564 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2565 2566 static struct proc *updateproc; 2567 static void sched_sync(void); 2568 static struct kproc_desc up_kp = { 2569 "syncer", 2570 sched_sync, 2571 &updateproc 2572 }; 2573 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2574 2575 static int 2576 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2577 { 2578 struct vnode *vp; 2579 struct mount *mp; 2580 2581 *bo = LIST_FIRST(slp); 2582 if (*bo == NULL) 2583 return (0); 2584 vp = bo2vnode(*bo); 2585 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2586 return (1); 2587 /* 2588 * We use vhold in case the vnode does not 2589 * successfully sync. vhold prevents the vnode from 2590 * going away when we unlock the sync_mtx so that 2591 * we can acquire the vnode interlock. 2592 */ 2593 vholdl(vp); 2594 mtx_unlock(&sync_mtx); 2595 VI_UNLOCK(vp); 2596 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2597 vdrop(vp); 2598 mtx_lock(&sync_mtx); 2599 return (*bo == LIST_FIRST(slp)); 2600 } 2601 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2602 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2603 VOP_UNLOCK(vp); 2604 vn_finished_write(mp); 2605 BO_LOCK(*bo); 2606 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2607 /* 2608 * Put us back on the worklist. The worklist 2609 * routine will remove us from our current 2610 * position and then add us back in at a later 2611 * position. 2612 */ 2613 vn_syncer_add_to_worklist(*bo, syncdelay); 2614 } 2615 BO_UNLOCK(*bo); 2616 vdrop(vp); 2617 mtx_lock(&sync_mtx); 2618 return (0); 2619 } 2620 2621 static int first_printf = 1; 2622 2623 /* 2624 * System filesystem synchronizer daemon. 2625 */ 2626 static void 2627 sched_sync(void) 2628 { 2629 struct synclist *next, *slp; 2630 struct bufobj *bo; 2631 long starttime; 2632 struct thread *td = curthread; 2633 int last_work_seen; 2634 int net_worklist_len; 2635 int syncer_final_iter; 2636 int error; 2637 2638 last_work_seen = 0; 2639 syncer_final_iter = 0; 2640 syncer_state = SYNCER_RUNNING; 2641 starttime = time_uptime; 2642 td->td_pflags |= TDP_NORUNNINGBUF; 2643 2644 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2645 SHUTDOWN_PRI_LAST); 2646 2647 mtx_lock(&sync_mtx); 2648 for (;;) { 2649 if (syncer_state == SYNCER_FINAL_DELAY && 2650 syncer_final_iter == 0) { 2651 mtx_unlock(&sync_mtx); 2652 kproc_suspend_check(td->td_proc); 2653 mtx_lock(&sync_mtx); 2654 } 2655 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2656 if (syncer_state != SYNCER_RUNNING && 2657 starttime != time_uptime) { 2658 if (first_printf) { 2659 printf("\nSyncing disks, vnodes remaining... "); 2660 first_printf = 0; 2661 } 2662 printf("%d ", net_worklist_len); 2663 } 2664 starttime = time_uptime; 2665 2666 /* 2667 * Push files whose dirty time has expired. Be careful 2668 * of interrupt race on slp queue. 2669 * 2670 * Skip over empty worklist slots when shutting down. 2671 */ 2672 do { 2673 slp = &syncer_workitem_pending[syncer_delayno]; 2674 syncer_delayno += 1; 2675 if (syncer_delayno == syncer_maxdelay) 2676 syncer_delayno = 0; 2677 next = &syncer_workitem_pending[syncer_delayno]; 2678 /* 2679 * If the worklist has wrapped since the 2680 * it was emptied of all but syncer vnodes, 2681 * switch to the FINAL_DELAY state and run 2682 * for one more second. 2683 */ 2684 if (syncer_state == SYNCER_SHUTTING_DOWN && 2685 net_worklist_len == 0 && 2686 last_work_seen == syncer_delayno) { 2687 syncer_state = SYNCER_FINAL_DELAY; 2688 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2689 } 2690 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2691 syncer_worklist_len > 0); 2692 2693 /* 2694 * Keep track of the last time there was anything 2695 * on the worklist other than syncer vnodes. 2696 * Return to the SHUTTING_DOWN state if any 2697 * new work appears. 2698 */ 2699 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2700 last_work_seen = syncer_delayno; 2701 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2702 syncer_state = SYNCER_SHUTTING_DOWN; 2703 while (!LIST_EMPTY(slp)) { 2704 error = sync_vnode(slp, &bo, td); 2705 if (error == 1) { 2706 LIST_REMOVE(bo, bo_synclist); 2707 LIST_INSERT_HEAD(next, bo, bo_synclist); 2708 continue; 2709 } 2710 2711 if (first_printf == 0) { 2712 /* 2713 * Drop the sync mutex, because some watchdog 2714 * drivers need to sleep while patting 2715 */ 2716 mtx_unlock(&sync_mtx); 2717 wdog_kern_pat(WD_LASTVAL); 2718 mtx_lock(&sync_mtx); 2719 } 2720 } 2721 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2722 syncer_final_iter--; 2723 /* 2724 * The variable rushjob allows the kernel to speed up the 2725 * processing of the filesystem syncer process. A rushjob 2726 * value of N tells the filesystem syncer to process the next 2727 * N seconds worth of work on its queue ASAP. Currently rushjob 2728 * is used by the soft update code to speed up the filesystem 2729 * syncer process when the incore state is getting so far 2730 * ahead of the disk that the kernel memory pool is being 2731 * threatened with exhaustion. 2732 */ 2733 if (rushjob > 0) { 2734 rushjob -= 1; 2735 continue; 2736 } 2737 /* 2738 * Just sleep for a short period of time between 2739 * iterations when shutting down to allow some I/O 2740 * to happen. 2741 * 2742 * If it has taken us less than a second to process the 2743 * current work, then wait. Otherwise start right over 2744 * again. We can still lose time if any single round 2745 * takes more than two seconds, but it does not really 2746 * matter as we are just trying to generally pace the 2747 * filesystem activity. 2748 */ 2749 if (syncer_state != SYNCER_RUNNING || 2750 time_uptime == starttime) { 2751 thread_lock(td); 2752 sched_prio(td, PPAUSE); 2753 thread_unlock(td); 2754 } 2755 if (syncer_state != SYNCER_RUNNING) 2756 cv_timedwait(&sync_wakeup, &sync_mtx, 2757 hz / SYNCER_SHUTDOWN_SPEEDUP); 2758 else if (time_uptime == starttime) 2759 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2760 } 2761 } 2762 2763 /* 2764 * Request the syncer daemon to speed up its work. 2765 * We never push it to speed up more than half of its 2766 * normal turn time, otherwise it could take over the cpu. 2767 */ 2768 int 2769 speedup_syncer(void) 2770 { 2771 int ret = 0; 2772 2773 mtx_lock(&sync_mtx); 2774 if (rushjob < syncdelay / 2) { 2775 rushjob += 1; 2776 stat_rush_requests += 1; 2777 ret = 1; 2778 } 2779 mtx_unlock(&sync_mtx); 2780 cv_broadcast(&sync_wakeup); 2781 return (ret); 2782 } 2783 2784 /* 2785 * Tell the syncer to speed up its work and run though its work 2786 * list several times, then tell it to shut down. 2787 */ 2788 static void 2789 syncer_shutdown(void *arg, int howto) 2790 { 2791 2792 if (howto & RB_NOSYNC) 2793 return; 2794 mtx_lock(&sync_mtx); 2795 syncer_state = SYNCER_SHUTTING_DOWN; 2796 rushjob = 0; 2797 mtx_unlock(&sync_mtx); 2798 cv_broadcast(&sync_wakeup); 2799 kproc_shutdown(arg, howto); 2800 } 2801 2802 void 2803 syncer_suspend(void) 2804 { 2805 2806 syncer_shutdown(updateproc, 0); 2807 } 2808 2809 void 2810 syncer_resume(void) 2811 { 2812 2813 mtx_lock(&sync_mtx); 2814 first_printf = 1; 2815 syncer_state = SYNCER_RUNNING; 2816 mtx_unlock(&sync_mtx); 2817 cv_broadcast(&sync_wakeup); 2818 kproc_resume(updateproc); 2819 } 2820 2821 /* 2822 * Move the buffer between the clean and dirty lists of its vnode. 2823 */ 2824 void 2825 reassignbuf(struct buf *bp) 2826 { 2827 struct vnode *vp; 2828 struct bufobj *bo; 2829 int delay; 2830 #ifdef INVARIANTS 2831 struct bufv *bv; 2832 #endif 2833 2834 vp = bp->b_vp; 2835 bo = bp->b_bufobj; 2836 2837 KASSERT((bp->b_flags & B_PAGING) == 0, 2838 ("%s: cannot reassign paging buffer %p", __func__, bp)); 2839 2840 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2841 bp, bp->b_vp, bp->b_flags); 2842 2843 BO_LOCK(bo); 2844 buf_vlist_remove(bp); 2845 2846 /* 2847 * If dirty, put on list of dirty buffers; otherwise insert onto list 2848 * of clean buffers. 2849 */ 2850 if (bp->b_flags & B_DELWRI) { 2851 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2852 switch (vp->v_type) { 2853 case VDIR: 2854 delay = dirdelay; 2855 break; 2856 case VCHR: 2857 delay = metadelay; 2858 break; 2859 default: 2860 delay = filedelay; 2861 } 2862 vn_syncer_add_to_worklist(bo, delay); 2863 } 2864 buf_vlist_add(bp, bo, BX_VNDIRTY); 2865 } else { 2866 buf_vlist_add(bp, bo, BX_VNCLEAN); 2867 2868 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2869 mtx_lock(&sync_mtx); 2870 LIST_REMOVE(bo, bo_synclist); 2871 syncer_worklist_len--; 2872 mtx_unlock(&sync_mtx); 2873 bo->bo_flag &= ~BO_ONWORKLST; 2874 } 2875 } 2876 #ifdef INVARIANTS 2877 bv = &bo->bo_clean; 2878 bp = TAILQ_FIRST(&bv->bv_hd); 2879 KASSERT(bp == NULL || bp->b_bufobj == bo, 2880 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2881 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2882 KASSERT(bp == NULL || bp->b_bufobj == bo, 2883 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2884 bv = &bo->bo_dirty; 2885 bp = TAILQ_FIRST(&bv->bv_hd); 2886 KASSERT(bp == NULL || bp->b_bufobj == bo, 2887 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2888 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2889 KASSERT(bp == NULL || bp->b_bufobj == bo, 2890 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2891 #endif 2892 BO_UNLOCK(bo); 2893 } 2894 2895 static void 2896 v_init_counters(struct vnode *vp) 2897 { 2898 2899 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2900 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2901 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2902 2903 refcount_init(&vp->v_holdcnt, 1); 2904 refcount_init(&vp->v_usecount, 1); 2905 } 2906 2907 /* 2908 * Grab a particular vnode from the free list, increment its 2909 * reference count and lock it. VIRF_DOOMED is set if the vnode 2910 * is being destroyed. Only callers who specify LK_RETRY will 2911 * see doomed vnodes. If inactive processing was delayed in 2912 * vput try to do it here. 2913 * 2914 * usecount is manipulated using atomics without holding any locks. 2915 * 2916 * holdcnt can be manipulated using atomics without holding any locks, 2917 * except when transitioning 1<->0, in which case the interlock is held. 2918 * 2919 * Consumers which don't guarantee liveness of the vnode can use SMR to 2920 * try to get a reference. Note this operation can fail since the vnode 2921 * may be awaiting getting freed by the time they get to it. 2922 */ 2923 enum vgetstate 2924 vget_prep_smr(struct vnode *vp) 2925 { 2926 enum vgetstate vs; 2927 2928 VFS_SMR_ASSERT_ENTERED(); 2929 2930 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2931 vs = VGET_USECOUNT; 2932 } else { 2933 if (vhold_smr(vp)) 2934 vs = VGET_HOLDCNT; 2935 else 2936 vs = VGET_NONE; 2937 } 2938 return (vs); 2939 } 2940 2941 enum vgetstate 2942 vget_prep(struct vnode *vp) 2943 { 2944 enum vgetstate vs; 2945 2946 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2947 vs = VGET_USECOUNT; 2948 } else { 2949 vhold(vp); 2950 vs = VGET_HOLDCNT; 2951 } 2952 return (vs); 2953 } 2954 2955 void 2956 vget_abort(struct vnode *vp, enum vgetstate vs) 2957 { 2958 2959 switch (vs) { 2960 case VGET_USECOUNT: 2961 vrele(vp); 2962 break; 2963 case VGET_HOLDCNT: 2964 vdrop(vp); 2965 break; 2966 default: 2967 __assert_unreachable(); 2968 } 2969 } 2970 2971 int 2972 vget(struct vnode *vp, int flags) 2973 { 2974 enum vgetstate vs; 2975 2976 vs = vget_prep(vp); 2977 return (vget_finish(vp, flags, vs)); 2978 } 2979 2980 int 2981 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 2982 { 2983 int error; 2984 2985 if ((flags & LK_INTERLOCK) != 0) 2986 ASSERT_VI_LOCKED(vp, __func__); 2987 else 2988 ASSERT_VI_UNLOCKED(vp, __func__); 2989 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 2990 VNPASS(vp->v_holdcnt > 0, vp); 2991 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 2992 2993 error = vn_lock(vp, flags); 2994 if (__predict_false(error != 0)) { 2995 vget_abort(vp, vs); 2996 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2997 vp); 2998 return (error); 2999 } 3000 3001 vget_finish_ref(vp, vs); 3002 return (0); 3003 } 3004 3005 void 3006 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 3007 { 3008 int old; 3009 3010 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3011 VNPASS(vp->v_holdcnt > 0, vp); 3012 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3013 3014 if (vs == VGET_USECOUNT) 3015 return; 3016 3017 /* 3018 * We hold the vnode. If the usecount is 0 it will be utilized to keep 3019 * the vnode around. Otherwise someone else lended their hold count and 3020 * we have to drop ours. 3021 */ 3022 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3023 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3024 if (old != 0) { 3025 #ifdef INVARIANTS 3026 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3027 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3028 #else 3029 refcount_release(&vp->v_holdcnt); 3030 #endif 3031 } 3032 } 3033 3034 void 3035 vref(struct vnode *vp) 3036 { 3037 enum vgetstate vs; 3038 3039 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3040 vs = vget_prep(vp); 3041 vget_finish_ref(vp, vs); 3042 } 3043 3044 void 3045 vrefact(struct vnode *vp) 3046 { 3047 3048 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3049 #ifdef INVARIANTS 3050 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 3051 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3052 #else 3053 refcount_acquire(&vp->v_usecount); 3054 #endif 3055 } 3056 3057 void 3058 vlazy(struct vnode *vp) 3059 { 3060 struct mount *mp; 3061 3062 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 3063 3064 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 3065 return; 3066 /* 3067 * We may get here for inactive routines after the vnode got doomed. 3068 */ 3069 if (VN_IS_DOOMED(vp)) 3070 return; 3071 mp = vp->v_mount; 3072 mtx_lock(&mp->mnt_listmtx); 3073 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 3074 vp->v_mflag |= VMP_LAZYLIST; 3075 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3076 mp->mnt_lazyvnodelistsize++; 3077 } 3078 mtx_unlock(&mp->mnt_listmtx); 3079 } 3080 3081 static void 3082 vunlazy(struct vnode *vp) 3083 { 3084 struct mount *mp; 3085 3086 ASSERT_VI_LOCKED(vp, __func__); 3087 VNPASS(!VN_IS_DOOMED(vp), vp); 3088 3089 mp = vp->v_mount; 3090 mtx_lock(&mp->mnt_listmtx); 3091 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3092 /* 3093 * Don't remove the vnode from the lazy list if another thread 3094 * has increased the hold count. It may have re-enqueued the 3095 * vnode to the lazy list and is now responsible for its 3096 * removal. 3097 */ 3098 if (vp->v_holdcnt == 0) { 3099 vp->v_mflag &= ~VMP_LAZYLIST; 3100 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3101 mp->mnt_lazyvnodelistsize--; 3102 } 3103 mtx_unlock(&mp->mnt_listmtx); 3104 } 3105 3106 /* 3107 * This routine is only meant to be called from vgonel prior to dooming 3108 * the vnode. 3109 */ 3110 static void 3111 vunlazy_gone(struct vnode *vp) 3112 { 3113 struct mount *mp; 3114 3115 ASSERT_VOP_ELOCKED(vp, __func__); 3116 ASSERT_VI_LOCKED(vp, __func__); 3117 VNPASS(!VN_IS_DOOMED(vp), vp); 3118 3119 if (vp->v_mflag & VMP_LAZYLIST) { 3120 mp = vp->v_mount; 3121 mtx_lock(&mp->mnt_listmtx); 3122 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3123 vp->v_mflag &= ~VMP_LAZYLIST; 3124 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3125 mp->mnt_lazyvnodelistsize--; 3126 mtx_unlock(&mp->mnt_listmtx); 3127 } 3128 } 3129 3130 static void 3131 vdefer_inactive(struct vnode *vp) 3132 { 3133 3134 ASSERT_VI_LOCKED(vp, __func__); 3135 VNASSERT(vp->v_holdcnt > 0, vp, 3136 ("%s: vnode without hold count", __func__)); 3137 if (VN_IS_DOOMED(vp)) { 3138 vdropl(vp); 3139 return; 3140 } 3141 if (vp->v_iflag & VI_DEFINACT) { 3142 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3143 vdropl(vp); 3144 return; 3145 } 3146 if (vp->v_usecount > 0) { 3147 vp->v_iflag &= ~VI_OWEINACT; 3148 vdropl(vp); 3149 return; 3150 } 3151 vlazy(vp); 3152 vp->v_iflag |= VI_DEFINACT; 3153 VI_UNLOCK(vp); 3154 counter_u64_add(deferred_inact, 1); 3155 } 3156 3157 static void 3158 vdefer_inactive_unlocked(struct vnode *vp) 3159 { 3160 3161 VI_LOCK(vp); 3162 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3163 vdropl(vp); 3164 return; 3165 } 3166 vdefer_inactive(vp); 3167 } 3168 3169 enum vput_op { VRELE, VPUT, VUNREF }; 3170 3171 /* 3172 * Handle ->v_usecount transitioning to 0. 3173 * 3174 * By releasing the last usecount we take ownership of the hold count which 3175 * provides liveness of the vnode, meaning we have to vdrop. 3176 * 3177 * For all vnodes we may need to perform inactive processing. It requires an 3178 * exclusive lock on the vnode, while it is legal to call here with only a 3179 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3180 * inactive processing gets deferred to the syncer. 3181 * 3182 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3183 * on the lock being held all the way until VOP_INACTIVE. This in particular 3184 * happens with UFS which adds half-constructed vnodes to the hash, where they 3185 * can be found by other code. 3186 */ 3187 static void 3188 vput_final(struct vnode *vp, enum vput_op func) 3189 { 3190 int error; 3191 bool want_unlock; 3192 3193 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3194 VNPASS(vp->v_holdcnt > 0, vp); 3195 3196 VI_LOCK(vp); 3197 3198 /* 3199 * By the time we got here someone else might have transitioned 3200 * the count back to > 0. 3201 */ 3202 if (vp->v_usecount > 0) 3203 goto out; 3204 3205 /* 3206 * If the vnode is doomed vgone already performed inactive processing 3207 * (if needed). 3208 */ 3209 if (VN_IS_DOOMED(vp)) 3210 goto out; 3211 3212 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3213 goto out; 3214 3215 if (vp->v_iflag & VI_DOINGINACT) 3216 goto out; 3217 3218 /* 3219 * Locking operations here will drop the interlock and possibly the 3220 * vnode lock, opening a window where the vnode can get doomed all the 3221 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3222 * perform inactive. 3223 */ 3224 vp->v_iflag |= VI_OWEINACT; 3225 want_unlock = false; 3226 error = 0; 3227 switch (func) { 3228 case VRELE: 3229 switch (VOP_ISLOCKED(vp)) { 3230 case LK_EXCLUSIVE: 3231 break; 3232 case LK_EXCLOTHER: 3233 case 0: 3234 want_unlock = true; 3235 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3236 VI_LOCK(vp); 3237 break; 3238 default: 3239 /* 3240 * The lock has at least one sharer, but we have no way 3241 * to conclude whether this is us. Play it safe and 3242 * defer processing. 3243 */ 3244 error = EAGAIN; 3245 break; 3246 } 3247 break; 3248 case VPUT: 3249 want_unlock = true; 3250 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3251 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3252 LK_NOWAIT); 3253 VI_LOCK(vp); 3254 } 3255 break; 3256 case VUNREF: 3257 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3258 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3259 VI_LOCK(vp); 3260 } 3261 break; 3262 } 3263 if (error == 0) { 3264 if (func == VUNREF) { 3265 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp, 3266 ("recursive vunref")); 3267 vp->v_vflag |= VV_UNREF; 3268 } 3269 for (;;) { 3270 error = vinactive(vp); 3271 if (want_unlock) 3272 VOP_UNLOCK(vp); 3273 if (error != ERELOOKUP || !want_unlock) 3274 break; 3275 VOP_LOCK(vp, LK_EXCLUSIVE); 3276 } 3277 if (func == VUNREF) 3278 vp->v_vflag &= ~VV_UNREF; 3279 vdropl(vp); 3280 } else { 3281 vdefer_inactive(vp); 3282 } 3283 return; 3284 out: 3285 if (func == VPUT) 3286 VOP_UNLOCK(vp); 3287 vdropl(vp); 3288 } 3289 3290 /* 3291 * Decrement ->v_usecount for a vnode. 3292 * 3293 * Releasing the last use count requires additional processing, see vput_final 3294 * above for details. 3295 * 3296 * Comment above each variant denotes lock state on entry and exit. 3297 */ 3298 3299 /* 3300 * in: any 3301 * out: same as passed in 3302 */ 3303 void 3304 vrele(struct vnode *vp) 3305 { 3306 3307 ASSERT_VI_UNLOCKED(vp, __func__); 3308 if (!refcount_release(&vp->v_usecount)) 3309 return; 3310 vput_final(vp, VRELE); 3311 } 3312 3313 /* 3314 * in: locked 3315 * out: unlocked 3316 */ 3317 void 3318 vput(struct vnode *vp) 3319 { 3320 3321 ASSERT_VOP_LOCKED(vp, __func__); 3322 ASSERT_VI_UNLOCKED(vp, __func__); 3323 if (!refcount_release(&vp->v_usecount)) { 3324 VOP_UNLOCK(vp); 3325 return; 3326 } 3327 vput_final(vp, VPUT); 3328 } 3329 3330 /* 3331 * in: locked 3332 * out: locked 3333 */ 3334 void 3335 vunref(struct vnode *vp) 3336 { 3337 3338 ASSERT_VOP_LOCKED(vp, __func__); 3339 ASSERT_VI_UNLOCKED(vp, __func__); 3340 if (!refcount_release(&vp->v_usecount)) 3341 return; 3342 vput_final(vp, VUNREF); 3343 } 3344 3345 void 3346 vhold(struct vnode *vp) 3347 { 3348 int old; 3349 3350 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3351 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3352 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3353 ("%s: wrong hold count %d", __func__, old)); 3354 if (old == 0) 3355 vfs_freevnodes_dec(); 3356 } 3357 3358 void 3359 vholdnz(struct vnode *vp) 3360 { 3361 3362 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3363 #ifdef INVARIANTS 3364 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3365 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3366 ("%s: wrong hold count %d", __func__, old)); 3367 #else 3368 atomic_add_int(&vp->v_holdcnt, 1); 3369 #endif 3370 } 3371 3372 /* 3373 * Grab a hold count unless the vnode is freed. 3374 * 3375 * Only use this routine if vfs smr is the only protection you have against 3376 * freeing the vnode. 3377 * 3378 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3379 * is not set. After the flag is set the vnode becomes immutable to anyone but 3380 * the thread which managed to set the flag. 3381 * 3382 * It may be tempting to replace the loop with: 3383 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3384 * if (count & VHOLD_NO_SMR) { 3385 * backpedal and error out; 3386 * } 3387 * 3388 * However, while this is more performant, it hinders debugging by eliminating 3389 * the previously mentioned invariant. 3390 */ 3391 bool 3392 vhold_smr(struct vnode *vp) 3393 { 3394 int count; 3395 3396 VFS_SMR_ASSERT_ENTERED(); 3397 3398 count = atomic_load_int(&vp->v_holdcnt); 3399 for (;;) { 3400 if (count & VHOLD_NO_SMR) { 3401 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3402 ("non-zero hold count with flags %d\n", count)); 3403 return (false); 3404 } 3405 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3406 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3407 if (count == 0) 3408 vfs_freevnodes_dec(); 3409 return (true); 3410 } 3411 } 3412 } 3413 3414 /* 3415 * Hold a free vnode for recycling. 3416 * 3417 * Note: vnode_init references this comment. 3418 * 3419 * Attempts to recycle only need the global vnode list lock and have no use for 3420 * SMR. 3421 * 3422 * However, vnodes get inserted into the global list before they get fully 3423 * initialized and stay there until UMA decides to free the memory. This in 3424 * particular means the target can be found before it becomes usable and after 3425 * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to 3426 * VHOLD_NO_SMR. 3427 * 3428 * Note: the vnode may gain more references after we transition the count 0->1. 3429 */ 3430 static bool 3431 vhold_recycle_free(struct vnode *vp) 3432 { 3433 int count; 3434 3435 mtx_assert(&vnode_list_mtx, MA_OWNED); 3436 3437 count = atomic_load_int(&vp->v_holdcnt); 3438 for (;;) { 3439 if (count & VHOLD_NO_SMR) { 3440 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3441 ("non-zero hold count with flags %d\n", count)); 3442 return (false); 3443 } 3444 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3445 if (count > 0) { 3446 return (false); 3447 } 3448 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3449 vfs_freevnodes_dec(); 3450 return (true); 3451 } 3452 } 3453 } 3454 3455 static void __noinline 3456 vdbatch_process(struct vdbatch *vd) 3457 { 3458 struct vnode *vp; 3459 int i; 3460 3461 mtx_assert(&vd->lock, MA_OWNED); 3462 MPASS(curthread->td_pinned > 0); 3463 MPASS(vd->index == VDBATCH_SIZE); 3464 3465 mtx_lock(&vnode_list_mtx); 3466 critical_enter(); 3467 freevnodes += vd->freevnodes; 3468 for (i = 0; i < VDBATCH_SIZE; i++) { 3469 vp = vd->tab[i]; 3470 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3471 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3472 MPASS(vp->v_dbatchcpu != NOCPU); 3473 vp->v_dbatchcpu = NOCPU; 3474 } 3475 mtx_unlock(&vnode_list_mtx); 3476 vd->freevnodes = 0; 3477 bzero(vd->tab, sizeof(vd->tab)); 3478 vd->index = 0; 3479 critical_exit(); 3480 } 3481 3482 static void 3483 vdbatch_enqueue(struct vnode *vp) 3484 { 3485 struct vdbatch *vd; 3486 3487 ASSERT_VI_LOCKED(vp, __func__); 3488 VNASSERT(!VN_IS_DOOMED(vp), vp, 3489 ("%s: deferring requeue of a doomed vnode", __func__)); 3490 3491 if (vp->v_dbatchcpu != NOCPU) { 3492 VI_UNLOCK(vp); 3493 return; 3494 } 3495 3496 sched_pin(); 3497 vd = DPCPU_PTR(vd); 3498 mtx_lock(&vd->lock); 3499 MPASS(vd->index < VDBATCH_SIZE); 3500 MPASS(vd->tab[vd->index] == NULL); 3501 /* 3502 * A hack: we depend on being pinned so that we know what to put in 3503 * ->v_dbatchcpu. 3504 */ 3505 vp->v_dbatchcpu = curcpu; 3506 vd->tab[vd->index] = vp; 3507 vd->index++; 3508 VI_UNLOCK(vp); 3509 if (vd->index == VDBATCH_SIZE) 3510 vdbatch_process(vd); 3511 mtx_unlock(&vd->lock); 3512 sched_unpin(); 3513 } 3514 3515 /* 3516 * This routine must only be called for vnodes which are about to be 3517 * deallocated. Supporting dequeue for arbitrary vndoes would require 3518 * validating that the locked batch matches. 3519 */ 3520 static void 3521 vdbatch_dequeue(struct vnode *vp) 3522 { 3523 struct vdbatch *vd; 3524 int i; 3525 short cpu; 3526 3527 VNASSERT(vp->v_type == VBAD || vp->v_type == VNON, vp, 3528 ("%s: called for a used vnode\n", __func__)); 3529 3530 cpu = vp->v_dbatchcpu; 3531 if (cpu == NOCPU) 3532 return; 3533 3534 vd = DPCPU_ID_PTR(cpu, vd); 3535 mtx_lock(&vd->lock); 3536 for (i = 0; i < vd->index; i++) { 3537 if (vd->tab[i] != vp) 3538 continue; 3539 vp->v_dbatchcpu = NOCPU; 3540 vd->index--; 3541 vd->tab[i] = vd->tab[vd->index]; 3542 vd->tab[vd->index] = NULL; 3543 break; 3544 } 3545 mtx_unlock(&vd->lock); 3546 /* 3547 * Either we dequeued the vnode above or the target CPU beat us to it. 3548 */ 3549 MPASS(vp->v_dbatchcpu == NOCPU); 3550 } 3551 3552 /* 3553 * Drop the hold count of the vnode. If this is the last reference to 3554 * the vnode we place it on the free list unless it has been vgone'd 3555 * (marked VIRF_DOOMED) in which case we will free it. 3556 * 3557 * Because the vnode vm object keeps a hold reference on the vnode if 3558 * there is at least one resident non-cached page, the vnode cannot 3559 * leave the active list without the page cleanup done. 3560 */ 3561 static void __noinline 3562 vdropl_final(struct vnode *vp) 3563 { 3564 3565 ASSERT_VI_LOCKED(vp, __func__); 3566 VNPASS(VN_IS_DOOMED(vp), vp); 3567 /* 3568 * Set the VHOLD_NO_SMR flag. 3569 * 3570 * We may be racing against vhold_smr. If they win we can just pretend 3571 * we never got this far, they will vdrop later. 3572 */ 3573 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { 3574 vfs_freevnodes_inc(); 3575 VI_UNLOCK(vp); 3576 /* 3577 * We lost the aforementioned race. Any subsequent access is 3578 * invalid as they might have managed to vdropl on their own. 3579 */ 3580 return; 3581 } 3582 /* 3583 * Don't bump freevnodes as this one is going away. 3584 */ 3585 freevnode(vp); 3586 } 3587 3588 void 3589 vdrop(struct vnode *vp) 3590 { 3591 3592 ASSERT_VI_UNLOCKED(vp, __func__); 3593 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3594 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3595 return; 3596 VI_LOCK(vp); 3597 vdropl(vp); 3598 } 3599 3600 void 3601 vdropl(struct vnode *vp) 3602 { 3603 3604 ASSERT_VI_LOCKED(vp, __func__); 3605 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3606 if (!refcount_release(&vp->v_holdcnt)) { 3607 VI_UNLOCK(vp); 3608 return; 3609 } 3610 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp); 3611 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 3612 if (VN_IS_DOOMED(vp)) { 3613 vdropl_final(vp); 3614 return; 3615 } 3616 3617 vfs_freevnodes_inc(); 3618 if (vp->v_mflag & VMP_LAZYLIST) { 3619 vunlazy(vp); 3620 } 3621 /* 3622 * Also unlocks the interlock. We can't assert on it as we 3623 * released our hold and by now the vnode might have been 3624 * freed. 3625 */ 3626 vdbatch_enqueue(vp); 3627 } 3628 3629 /* 3630 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3631 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3632 */ 3633 static int 3634 vinactivef(struct vnode *vp) 3635 { 3636 struct vm_object *obj; 3637 int error; 3638 3639 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3640 ASSERT_VI_LOCKED(vp, "vinactive"); 3641 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3642 ("vinactive: recursed on VI_DOINGINACT")); 3643 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3644 vp->v_iflag |= VI_DOINGINACT; 3645 vp->v_iflag &= ~VI_OWEINACT; 3646 VI_UNLOCK(vp); 3647 /* 3648 * Before moving off the active list, we must be sure that any 3649 * modified pages are converted into the vnode's dirty 3650 * buffers, since these will no longer be checked once the 3651 * vnode is on the inactive list. 3652 * 3653 * The write-out of the dirty pages is asynchronous. At the 3654 * point that VOP_INACTIVE() is called, there could still be 3655 * pending I/O and dirty pages in the object. 3656 */ 3657 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3658 vm_object_mightbedirty(obj)) { 3659 VM_OBJECT_WLOCK(obj); 3660 vm_object_page_clean(obj, 0, 0, 0); 3661 VM_OBJECT_WUNLOCK(obj); 3662 } 3663 error = VOP_INACTIVE(vp); 3664 VI_LOCK(vp); 3665 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3666 ("vinactive: lost VI_DOINGINACT")); 3667 vp->v_iflag &= ~VI_DOINGINACT; 3668 return (error); 3669 } 3670 3671 int 3672 vinactive(struct vnode *vp) 3673 { 3674 3675 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3676 ASSERT_VI_LOCKED(vp, "vinactive"); 3677 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3678 3679 if ((vp->v_iflag & VI_OWEINACT) == 0) 3680 return (0); 3681 if (vp->v_iflag & VI_DOINGINACT) 3682 return (0); 3683 if (vp->v_usecount > 0) { 3684 vp->v_iflag &= ~VI_OWEINACT; 3685 return (0); 3686 } 3687 return (vinactivef(vp)); 3688 } 3689 3690 /* 3691 * Remove any vnodes in the vnode table belonging to mount point mp. 3692 * 3693 * If FORCECLOSE is not specified, there should not be any active ones, 3694 * return error if any are found (nb: this is a user error, not a 3695 * system error). If FORCECLOSE is specified, detach any active vnodes 3696 * that are found. 3697 * 3698 * If WRITECLOSE is set, only flush out regular file vnodes open for 3699 * writing. 3700 * 3701 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3702 * 3703 * `rootrefs' specifies the base reference count for the root vnode 3704 * of this filesystem. The root vnode is considered busy if its 3705 * v_usecount exceeds this value. On a successful return, vflush(, td) 3706 * will call vrele() on the root vnode exactly rootrefs times. 3707 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3708 * be zero. 3709 */ 3710 #ifdef DIAGNOSTIC 3711 static int busyprt = 0; /* print out busy vnodes */ 3712 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3713 #endif 3714 3715 int 3716 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3717 { 3718 struct vnode *vp, *mvp, *rootvp = NULL; 3719 struct vattr vattr; 3720 int busy = 0, error; 3721 3722 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3723 rootrefs, flags); 3724 if (rootrefs > 0) { 3725 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3726 ("vflush: bad args")); 3727 /* 3728 * Get the filesystem root vnode. We can vput() it 3729 * immediately, since with rootrefs > 0, it won't go away. 3730 */ 3731 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3732 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3733 __func__, error); 3734 return (error); 3735 } 3736 vput(rootvp); 3737 } 3738 loop: 3739 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3740 vholdl(vp); 3741 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3742 if (error) { 3743 vdrop(vp); 3744 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3745 goto loop; 3746 } 3747 /* 3748 * Skip over a vnodes marked VV_SYSTEM. 3749 */ 3750 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3751 VOP_UNLOCK(vp); 3752 vdrop(vp); 3753 continue; 3754 } 3755 /* 3756 * If WRITECLOSE is set, flush out unlinked but still open 3757 * files (even if open only for reading) and regular file 3758 * vnodes open for writing. 3759 */ 3760 if (flags & WRITECLOSE) { 3761 if (vp->v_object != NULL) { 3762 VM_OBJECT_WLOCK(vp->v_object); 3763 vm_object_page_clean(vp->v_object, 0, 0, 0); 3764 VM_OBJECT_WUNLOCK(vp->v_object); 3765 } 3766 do { 3767 error = VOP_FSYNC(vp, MNT_WAIT, td); 3768 } while (error == ERELOOKUP); 3769 if (error != 0) { 3770 VOP_UNLOCK(vp); 3771 vdrop(vp); 3772 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3773 return (error); 3774 } 3775 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3776 VI_LOCK(vp); 3777 3778 if ((vp->v_type == VNON || 3779 (error == 0 && vattr.va_nlink > 0)) && 3780 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3781 VOP_UNLOCK(vp); 3782 vdropl(vp); 3783 continue; 3784 } 3785 } else 3786 VI_LOCK(vp); 3787 /* 3788 * With v_usecount == 0, all we need to do is clear out the 3789 * vnode data structures and we are done. 3790 * 3791 * If FORCECLOSE is set, forcibly close the vnode. 3792 */ 3793 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3794 vgonel(vp); 3795 } else { 3796 busy++; 3797 #ifdef DIAGNOSTIC 3798 if (busyprt) 3799 vn_printf(vp, "vflush: busy vnode "); 3800 #endif 3801 } 3802 VOP_UNLOCK(vp); 3803 vdropl(vp); 3804 } 3805 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3806 /* 3807 * If just the root vnode is busy, and if its refcount 3808 * is equal to `rootrefs', then go ahead and kill it. 3809 */ 3810 VI_LOCK(rootvp); 3811 KASSERT(busy > 0, ("vflush: not busy")); 3812 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3813 ("vflush: usecount %d < rootrefs %d", 3814 rootvp->v_usecount, rootrefs)); 3815 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3816 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3817 vgone(rootvp); 3818 VOP_UNLOCK(rootvp); 3819 busy = 0; 3820 } else 3821 VI_UNLOCK(rootvp); 3822 } 3823 if (busy) { 3824 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3825 busy); 3826 return (EBUSY); 3827 } 3828 for (; rootrefs > 0; rootrefs--) 3829 vrele(rootvp); 3830 return (0); 3831 } 3832 3833 /* 3834 * Recycle an unused vnode to the front of the free list. 3835 */ 3836 int 3837 vrecycle(struct vnode *vp) 3838 { 3839 int recycled; 3840 3841 VI_LOCK(vp); 3842 recycled = vrecyclel(vp); 3843 VI_UNLOCK(vp); 3844 return (recycled); 3845 } 3846 3847 /* 3848 * vrecycle, with the vp interlock held. 3849 */ 3850 int 3851 vrecyclel(struct vnode *vp) 3852 { 3853 int recycled; 3854 3855 ASSERT_VOP_ELOCKED(vp, __func__); 3856 ASSERT_VI_LOCKED(vp, __func__); 3857 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3858 recycled = 0; 3859 if (vp->v_usecount == 0) { 3860 recycled = 1; 3861 vgonel(vp); 3862 } 3863 return (recycled); 3864 } 3865 3866 /* 3867 * Eliminate all activity associated with a vnode 3868 * in preparation for reuse. 3869 */ 3870 void 3871 vgone(struct vnode *vp) 3872 { 3873 VI_LOCK(vp); 3874 vgonel(vp); 3875 VI_UNLOCK(vp); 3876 } 3877 3878 /* 3879 * Notify upper mounts about reclaimed or unlinked vnode. 3880 */ 3881 void 3882 vfs_notify_upper(struct vnode *vp, int event) 3883 { 3884 struct mount *mp; 3885 struct mount_upper_node *ump; 3886 3887 mp = atomic_load_ptr(&vp->v_mount); 3888 if (mp == NULL) 3889 return; 3890 if (TAILQ_EMPTY(&mp->mnt_notify)) 3891 return; 3892 3893 MNT_ILOCK(mp); 3894 mp->mnt_upper_pending++; 3895 KASSERT(mp->mnt_upper_pending > 0, 3896 ("%s: mnt_upper_pending %d", __func__, mp->mnt_upper_pending)); 3897 TAILQ_FOREACH(ump, &mp->mnt_notify, mnt_upper_link) { 3898 MNT_IUNLOCK(mp); 3899 switch (event) { 3900 case VFS_NOTIFY_UPPER_RECLAIM: 3901 VFS_RECLAIM_LOWERVP(ump->mp, vp); 3902 break; 3903 case VFS_NOTIFY_UPPER_UNLINK: 3904 VFS_UNLINK_LOWERVP(ump->mp, vp); 3905 break; 3906 default: 3907 KASSERT(0, ("invalid event %d", event)); 3908 break; 3909 } 3910 MNT_ILOCK(mp); 3911 } 3912 mp->mnt_upper_pending--; 3913 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && 3914 mp->mnt_upper_pending == 0) { 3915 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; 3916 wakeup(&mp->mnt_uppers); 3917 } 3918 MNT_IUNLOCK(mp); 3919 } 3920 3921 /* 3922 * vgone, with the vp interlock held. 3923 */ 3924 static void 3925 vgonel(struct vnode *vp) 3926 { 3927 struct thread *td; 3928 struct mount *mp; 3929 vm_object_t object; 3930 bool active, doinginact, oweinact; 3931 3932 ASSERT_VOP_ELOCKED(vp, "vgonel"); 3933 ASSERT_VI_LOCKED(vp, "vgonel"); 3934 VNASSERT(vp->v_holdcnt, vp, 3935 ("vgonel: vp %p has no reference.", vp)); 3936 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3937 td = curthread; 3938 3939 /* 3940 * Don't vgonel if we're already doomed. 3941 */ 3942 if (VN_IS_DOOMED(vp)) 3943 return; 3944 /* 3945 * Paired with freevnode. 3946 */ 3947 vn_seqc_write_begin_locked(vp); 3948 vunlazy_gone(vp); 3949 vn_irflag_set_locked(vp, VIRF_DOOMED); 3950 3951 /* 3952 * Check to see if the vnode is in use. If so, we have to 3953 * call VOP_CLOSE() and VOP_INACTIVE(). 3954 * 3955 * It could be that VOP_INACTIVE() requested reclamation, in 3956 * which case we should avoid recursion, so check 3957 * VI_DOINGINACT. This is not precise but good enough. 3958 */ 3959 active = vp->v_usecount > 0; 3960 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 3961 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; 3962 3963 /* 3964 * If we need to do inactive VI_OWEINACT will be set. 3965 */ 3966 if (vp->v_iflag & VI_DEFINACT) { 3967 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3968 vp->v_iflag &= ~VI_DEFINACT; 3969 vdropl(vp); 3970 } else { 3971 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 3972 VI_UNLOCK(vp); 3973 } 3974 cache_purge_vgone(vp); 3975 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 3976 3977 /* 3978 * If purging an active vnode, it must be closed and 3979 * deactivated before being reclaimed. 3980 */ 3981 if (active) 3982 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 3983 if (!doinginact) { 3984 do { 3985 if (oweinact || active) { 3986 VI_LOCK(vp); 3987 vinactivef(vp); 3988 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 3989 VI_UNLOCK(vp); 3990 } 3991 } while (oweinact); 3992 } 3993 if (vp->v_type == VSOCK) 3994 vfs_unp_reclaim(vp); 3995 3996 /* 3997 * Clean out any buffers associated with the vnode. 3998 * If the flush fails, just toss the buffers. 3999 */ 4000 mp = NULL; 4001 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 4002 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 4003 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 4004 while (vinvalbuf(vp, 0, 0, 0) != 0) 4005 ; 4006 } 4007 4008 BO_LOCK(&vp->v_bufobj); 4009 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 4010 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 4011 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 4012 vp->v_bufobj.bo_clean.bv_cnt == 0, 4013 ("vp %p bufobj not invalidated", vp)); 4014 4015 /* 4016 * For VMIO bufobj, BO_DEAD is set later, or in 4017 * vm_object_terminate() after the object's page queue is 4018 * flushed. 4019 */ 4020 object = vp->v_bufobj.bo_object; 4021 if (object == NULL) 4022 vp->v_bufobj.bo_flag |= BO_DEAD; 4023 BO_UNLOCK(&vp->v_bufobj); 4024 4025 /* 4026 * Handle the VM part. Tmpfs handles v_object on its own (the 4027 * OBJT_VNODE check). Nullfs or other bypassing filesystems 4028 * should not touch the object borrowed from the lower vnode 4029 * (the handle check). 4030 */ 4031 if (object != NULL && object->type == OBJT_VNODE && 4032 object->handle == vp) 4033 vnode_destroy_vobject(vp); 4034 4035 /* 4036 * Reclaim the vnode. 4037 */ 4038 if (VOP_RECLAIM(vp)) 4039 panic("vgone: cannot reclaim"); 4040 if (mp != NULL) 4041 vn_finished_secondary_write(mp); 4042 VNASSERT(vp->v_object == NULL, vp, 4043 ("vop_reclaim left v_object vp=%p", vp)); 4044 /* 4045 * Clear the advisory locks and wake up waiting threads. 4046 */ 4047 (void)VOP_ADVLOCKPURGE(vp); 4048 vp->v_lockf = NULL; 4049 /* 4050 * Delete from old mount point vnode list. 4051 */ 4052 delmntque(vp); 4053 /* 4054 * Done with purge, reset to the standard lock and invalidate 4055 * the vnode. 4056 */ 4057 VI_LOCK(vp); 4058 vp->v_vnlock = &vp->v_lock; 4059 vp->v_op = &dead_vnodeops; 4060 vp->v_type = VBAD; 4061 } 4062 4063 /* 4064 * Print out a description of a vnode. 4065 */ 4066 static const char * const typename[] = 4067 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 4068 "VMARKER"}; 4069 4070 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 4071 "new hold count flag not added to vn_printf"); 4072 4073 void 4074 vn_printf(struct vnode *vp, const char *fmt, ...) 4075 { 4076 va_list ap; 4077 char buf[256], buf2[16]; 4078 u_long flags; 4079 u_int holdcnt; 4080 short irflag; 4081 4082 va_start(ap, fmt); 4083 vprintf(fmt, ap); 4084 va_end(ap); 4085 printf("%p: ", (void *)vp); 4086 printf("type %s\n", typename[vp->v_type]); 4087 holdcnt = atomic_load_int(&vp->v_holdcnt); 4088 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 4089 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 4090 vp->v_seqc_users); 4091 switch (vp->v_type) { 4092 case VDIR: 4093 printf(" mountedhere %p\n", vp->v_mountedhere); 4094 break; 4095 case VCHR: 4096 printf(" rdev %p\n", vp->v_rdev); 4097 break; 4098 case VSOCK: 4099 printf(" socket %p\n", vp->v_unpcb); 4100 break; 4101 case VFIFO: 4102 printf(" fifoinfo %p\n", vp->v_fifoinfo); 4103 break; 4104 default: 4105 printf("\n"); 4106 break; 4107 } 4108 buf[0] = '\0'; 4109 buf[1] = '\0'; 4110 if (holdcnt & VHOLD_NO_SMR) 4111 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 4112 printf(" hold count flags (%s)\n", buf + 1); 4113 4114 buf[0] = '\0'; 4115 buf[1] = '\0'; 4116 irflag = vn_irflag_read(vp); 4117 if (irflag & VIRF_DOOMED) 4118 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 4119 if (irflag & VIRF_PGREAD) 4120 strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); 4121 if (irflag & VIRF_MOUNTPOINT) 4122 strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf)); 4123 flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT); 4124 if (flags != 0) { 4125 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 4126 strlcat(buf, buf2, sizeof(buf)); 4127 } 4128 if (vp->v_vflag & VV_ROOT) 4129 strlcat(buf, "|VV_ROOT", sizeof(buf)); 4130 if (vp->v_vflag & VV_ISTTY) 4131 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4132 if (vp->v_vflag & VV_NOSYNC) 4133 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4134 if (vp->v_vflag & VV_ETERNALDEV) 4135 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4136 if (vp->v_vflag & VV_CACHEDLABEL) 4137 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4138 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4139 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4140 if (vp->v_vflag & VV_COPYONWRITE) 4141 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4142 if (vp->v_vflag & VV_SYSTEM) 4143 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4144 if (vp->v_vflag & VV_PROCDEP) 4145 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4146 if (vp->v_vflag & VV_NOKNOTE) 4147 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 4148 if (vp->v_vflag & VV_DELETED) 4149 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4150 if (vp->v_vflag & VV_MD) 4151 strlcat(buf, "|VV_MD", sizeof(buf)); 4152 if (vp->v_vflag & VV_FORCEINSMQ) 4153 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4154 if (vp->v_vflag & VV_READLINK) 4155 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4156 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4157 VV_CACHEDLABEL | VV_VMSIZEVNLOCK | VV_COPYONWRITE | VV_SYSTEM | 4158 VV_PROCDEP | VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ | 4159 VV_READLINK); 4160 if (flags != 0) { 4161 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4162 strlcat(buf, buf2, sizeof(buf)); 4163 } 4164 if (vp->v_iflag & VI_TEXT_REF) 4165 strlcat(buf, "|VI_TEXT_REF", sizeof(buf)); 4166 if (vp->v_iflag & VI_MOUNT) 4167 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4168 if (vp->v_iflag & VI_DOINGINACT) 4169 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4170 if (vp->v_iflag & VI_OWEINACT) 4171 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4172 if (vp->v_iflag & VI_DEFINACT) 4173 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4174 if (vp->v_iflag & VI_FOPENING) 4175 strlcat(buf, "|VI_FOPENING", sizeof(buf)); 4176 flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_DOINGINACT | 4177 VI_OWEINACT | VI_DEFINACT | VI_FOPENING); 4178 if (flags != 0) { 4179 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4180 strlcat(buf, buf2, sizeof(buf)); 4181 } 4182 if (vp->v_mflag & VMP_LAZYLIST) 4183 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4184 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4185 if (flags != 0) { 4186 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4187 strlcat(buf, buf2, sizeof(buf)); 4188 } 4189 printf(" flags (%s)", buf + 1); 4190 if (mtx_owned(VI_MTX(vp))) 4191 printf(" VI_LOCKed"); 4192 printf("\n"); 4193 if (vp->v_object != NULL) 4194 printf(" v_object %p ref %d pages %d " 4195 "cleanbuf %d dirtybuf %d\n", 4196 vp->v_object, vp->v_object->ref_count, 4197 vp->v_object->resident_page_count, 4198 vp->v_bufobj.bo_clean.bv_cnt, 4199 vp->v_bufobj.bo_dirty.bv_cnt); 4200 printf(" "); 4201 lockmgr_printinfo(vp->v_vnlock); 4202 if (vp->v_data != NULL) 4203 VOP_PRINT(vp); 4204 } 4205 4206 #ifdef DDB 4207 /* 4208 * List all of the locked vnodes in the system. 4209 * Called when debugging the kernel. 4210 */ 4211 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 4212 { 4213 struct mount *mp; 4214 struct vnode *vp; 4215 4216 /* 4217 * Note: because this is DDB, we can't obey the locking semantics 4218 * for these structures, which means we could catch an inconsistent 4219 * state and dereference a nasty pointer. Not much to be done 4220 * about that. 4221 */ 4222 db_printf("Locked vnodes\n"); 4223 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4224 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4225 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4226 vn_printf(vp, "vnode "); 4227 } 4228 } 4229 } 4230 4231 /* 4232 * Show details about the given vnode. 4233 */ 4234 DB_SHOW_COMMAND(vnode, db_show_vnode) 4235 { 4236 struct vnode *vp; 4237 4238 if (!have_addr) 4239 return; 4240 vp = (struct vnode *)addr; 4241 vn_printf(vp, "vnode "); 4242 } 4243 4244 /* 4245 * Show details about the given mount point. 4246 */ 4247 DB_SHOW_COMMAND(mount, db_show_mount) 4248 { 4249 struct mount *mp; 4250 struct vfsopt *opt; 4251 struct statfs *sp; 4252 struct vnode *vp; 4253 char buf[512]; 4254 uint64_t mflags; 4255 u_int flags; 4256 4257 if (!have_addr) { 4258 /* No address given, print short info about all mount points. */ 4259 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4260 db_printf("%p %s on %s (%s)\n", mp, 4261 mp->mnt_stat.f_mntfromname, 4262 mp->mnt_stat.f_mntonname, 4263 mp->mnt_stat.f_fstypename); 4264 if (db_pager_quit) 4265 break; 4266 } 4267 db_printf("\nMore info: show mount <addr>\n"); 4268 return; 4269 } 4270 4271 mp = (struct mount *)addr; 4272 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4273 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4274 4275 buf[0] = '\0'; 4276 mflags = mp->mnt_flag; 4277 #define MNT_FLAG(flag) do { \ 4278 if (mflags & (flag)) { \ 4279 if (buf[0] != '\0') \ 4280 strlcat(buf, ", ", sizeof(buf)); \ 4281 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4282 mflags &= ~(flag); \ 4283 } \ 4284 } while (0) 4285 MNT_FLAG(MNT_RDONLY); 4286 MNT_FLAG(MNT_SYNCHRONOUS); 4287 MNT_FLAG(MNT_NOEXEC); 4288 MNT_FLAG(MNT_NOSUID); 4289 MNT_FLAG(MNT_NFS4ACLS); 4290 MNT_FLAG(MNT_UNION); 4291 MNT_FLAG(MNT_ASYNC); 4292 MNT_FLAG(MNT_SUIDDIR); 4293 MNT_FLAG(MNT_SOFTDEP); 4294 MNT_FLAG(MNT_NOSYMFOLLOW); 4295 MNT_FLAG(MNT_GJOURNAL); 4296 MNT_FLAG(MNT_MULTILABEL); 4297 MNT_FLAG(MNT_ACLS); 4298 MNT_FLAG(MNT_NOATIME); 4299 MNT_FLAG(MNT_NOCLUSTERR); 4300 MNT_FLAG(MNT_NOCLUSTERW); 4301 MNT_FLAG(MNT_SUJ); 4302 MNT_FLAG(MNT_EXRDONLY); 4303 MNT_FLAG(MNT_EXPORTED); 4304 MNT_FLAG(MNT_DEFEXPORTED); 4305 MNT_FLAG(MNT_EXPORTANON); 4306 MNT_FLAG(MNT_EXKERB); 4307 MNT_FLAG(MNT_EXPUBLIC); 4308 MNT_FLAG(MNT_LOCAL); 4309 MNT_FLAG(MNT_QUOTA); 4310 MNT_FLAG(MNT_ROOTFS); 4311 MNT_FLAG(MNT_USER); 4312 MNT_FLAG(MNT_IGNORE); 4313 MNT_FLAG(MNT_UPDATE); 4314 MNT_FLAG(MNT_DELEXPORT); 4315 MNT_FLAG(MNT_RELOAD); 4316 MNT_FLAG(MNT_FORCE); 4317 MNT_FLAG(MNT_SNAPSHOT); 4318 MNT_FLAG(MNT_BYFSID); 4319 #undef MNT_FLAG 4320 if (mflags != 0) { 4321 if (buf[0] != '\0') 4322 strlcat(buf, ", ", sizeof(buf)); 4323 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4324 "0x%016jx", mflags); 4325 } 4326 db_printf(" mnt_flag = %s\n", buf); 4327 4328 buf[0] = '\0'; 4329 flags = mp->mnt_kern_flag; 4330 #define MNT_KERN_FLAG(flag) do { \ 4331 if (flags & (flag)) { \ 4332 if (buf[0] != '\0') \ 4333 strlcat(buf, ", ", sizeof(buf)); \ 4334 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4335 flags &= ~(flag); \ 4336 } \ 4337 } while (0) 4338 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4339 MNT_KERN_FLAG(MNTK_ASYNC); 4340 MNT_KERN_FLAG(MNTK_SOFTDEP); 4341 MNT_KERN_FLAG(MNTK_DRAINING); 4342 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4343 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4344 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4345 MNT_KERN_FLAG(MNTK_NO_IOPF); 4346 MNT_KERN_FLAG(MNTK_RECURSE); 4347 MNT_KERN_FLAG(MNTK_UPPER_WAITER); 4348 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 4349 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4350 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4351 MNT_KERN_FLAG(MNTK_TASKQUEUE_WAITER); 4352 MNT_KERN_FLAG(MNTK_NOASYNC); 4353 MNT_KERN_FLAG(MNTK_UNMOUNT); 4354 MNT_KERN_FLAG(MNTK_MWAIT); 4355 MNT_KERN_FLAG(MNTK_SUSPEND); 4356 MNT_KERN_FLAG(MNTK_SUSPEND2); 4357 MNT_KERN_FLAG(MNTK_SUSPENDED); 4358 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4359 MNT_KERN_FLAG(MNTK_NOKNOTE); 4360 #undef MNT_KERN_FLAG 4361 if (flags != 0) { 4362 if (buf[0] != '\0') 4363 strlcat(buf, ", ", sizeof(buf)); 4364 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4365 "0x%08x", flags); 4366 } 4367 db_printf(" mnt_kern_flag = %s\n", buf); 4368 4369 db_printf(" mnt_opt = "); 4370 opt = TAILQ_FIRST(mp->mnt_opt); 4371 if (opt != NULL) { 4372 db_printf("%s", opt->name); 4373 opt = TAILQ_NEXT(opt, link); 4374 while (opt != NULL) { 4375 db_printf(", %s", opt->name); 4376 opt = TAILQ_NEXT(opt, link); 4377 } 4378 } 4379 db_printf("\n"); 4380 4381 sp = &mp->mnt_stat; 4382 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4383 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4384 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4385 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4386 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4387 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4388 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4389 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4390 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4391 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4392 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4393 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4394 4395 db_printf(" mnt_cred = { uid=%u ruid=%u", 4396 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4397 if (jailed(mp->mnt_cred)) 4398 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4399 db_printf(" }\n"); 4400 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4401 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4402 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4403 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4404 db_printf(" mnt_lazyvnodelistsize = %d\n", 4405 mp->mnt_lazyvnodelistsize); 4406 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4407 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4408 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4409 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4410 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4411 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4412 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4413 db_printf(" mnt_secondary_accwrites = %d\n", 4414 mp->mnt_secondary_accwrites); 4415 db_printf(" mnt_gjprovider = %s\n", 4416 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4417 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4418 4419 db_printf("\n\nList of active vnodes\n"); 4420 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4421 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4422 vn_printf(vp, "vnode "); 4423 if (db_pager_quit) 4424 break; 4425 } 4426 } 4427 db_printf("\n\nList of inactive vnodes\n"); 4428 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4429 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4430 vn_printf(vp, "vnode "); 4431 if (db_pager_quit) 4432 break; 4433 } 4434 } 4435 } 4436 #endif /* DDB */ 4437 4438 /* 4439 * Fill in a struct xvfsconf based on a struct vfsconf. 4440 */ 4441 static int 4442 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4443 { 4444 struct xvfsconf xvfsp; 4445 4446 bzero(&xvfsp, sizeof(xvfsp)); 4447 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4448 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4449 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4450 xvfsp.vfc_flags = vfsp->vfc_flags; 4451 /* 4452 * These are unused in userland, we keep them 4453 * to not break binary compatibility. 4454 */ 4455 xvfsp.vfc_vfsops = NULL; 4456 xvfsp.vfc_next = NULL; 4457 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4458 } 4459 4460 #ifdef COMPAT_FREEBSD32 4461 struct xvfsconf32 { 4462 uint32_t vfc_vfsops; 4463 char vfc_name[MFSNAMELEN]; 4464 int32_t vfc_typenum; 4465 int32_t vfc_refcount; 4466 int32_t vfc_flags; 4467 uint32_t vfc_next; 4468 }; 4469 4470 static int 4471 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4472 { 4473 struct xvfsconf32 xvfsp; 4474 4475 bzero(&xvfsp, sizeof(xvfsp)); 4476 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4477 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4478 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4479 xvfsp.vfc_flags = vfsp->vfc_flags; 4480 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4481 } 4482 #endif 4483 4484 /* 4485 * Top level filesystem related information gathering. 4486 */ 4487 static int 4488 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4489 { 4490 struct vfsconf *vfsp; 4491 int error; 4492 4493 error = 0; 4494 vfsconf_slock(); 4495 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4496 #ifdef COMPAT_FREEBSD32 4497 if (req->flags & SCTL_MASK32) 4498 error = vfsconf2x32(req, vfsp); 4499 else 4500 #endif 4501 error = vfsconf2x(req, vfsp); 4502 if (error) 4503 break; 4504 } 4505 vfsconf_sunlock(); 4506 return (error); 4507 } 4508 4509 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4510 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4511 "S,xvfsconf", "List of all configured filesystems"); 4512 4513 #ifndef BURN_BRIDGES 4514 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4515 4516 static int 4517 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4518 { 4519 int *name = (int *)arg1 - 1; /* XXX */ 4520 u_int namelen = arg2 + 1; /* XXX */ 4521 struct vfsconf *vfsp; 4522 4523 log(LOG_WARNING, "userland calling deprecated sysctl, " 4524 "please rebuild world\n"); 4525 4526 #if 1 || defined(COMPAT_PRELITE2) 4527 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4528 if (namelen == 1) 4529 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4530 #endif 4531 4532 switch (name[1]) { 4533 case VFS_MAXTYPENUM: 4534 if (namelen != 2) 4535 return (ENOTDIR); 4536 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4537 case VFS_CONF: 4538 if (namelen != 3) 4539 return (ENOTDIR); /* overloaded */ 4540 vfsconf_slock(); 4541 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4542 if (vfsp->vfc_typenum == name[2]) 4543 break; 4544 } 4545 vfsconf_sunlock(); 4546 if (vfsp == NULL) 4547 return (EOPNOTSUPP); 4548 #ifdef COMPAT_FREEBSD32 4549 if (req->flags & SCTL_MASK32) 4550 return (vfsconf2x32(req, vfsp)); 4551 else 4552 #endif 4553 return (vfsconf2x(req, vfsp)); 4554 } 4555 return (EOPNOTSUPP); 4556 } 4557 4558 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4559 CTLFLAG_MPSAFE, vfs_sysctl, 4560 "Generic filesystem"); 4561 4562 #if 1 || defined(COMPAT_PRELITE2) 4563 4564 static int 4565 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4566 { 4567 int error; 4568 struct vfsconf *vfsp; 4569 struct ovfsconf ovfs; 4570 4571 vfsconf_slock(); 4572 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4573 bzero(&ovfs, sizeof(ovfs)); 4574 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4575 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4576 ovfs.vfc_index = vfsp->vfc_typenum; 4577 ovfs.vfc_refcount = vfsp->vfc_refcount; 4578 ovfs.vfc_flags = vfsp->vfc_flags; 4579 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4580 if (error != 0) { 4581 vfsconf_sunlock(); 4582 return (error); 4583 } 4584 } 4585 vfsconf_sunlock(); 4586 return (0); 4587 } 4588 4589 #endif /* 1 || COMPAT_PRELITE2 */ 4590 #endif /* !BURN_BRIDGES */ 4591 4592 #define KINFO_VNODESLOP 10 4593 #ifdef notyet 4594 /* 4595 * Dump vnode list (via sysctl). 4596 */ 4597 /* ARGSUSED */ 4598 static int 4599 sysctl_vnode(SYSCTL_HANDLER_ARGS) 4600 { 4601 struct xvnode *xvn; 4602 struct mount *mp; 4603 struct vnode *vp; 4604 int error, len, n; 4605 4606 /* 4607 * Stale numvnodes access is not fatal here. 4608 */ 4609 req->lock = 0; 4610 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 4611 if (!req->oldptr) 4612 /* Make an estimate */ 4613 return (SYSCTL_OUT(req, 0, len)); 4614 4615 error = sysctl_wire_old_buffer(req, 0); 4616 if (error != 0) 4617 return (error); 4618 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 4619 n = 0; 4620 mtx_lock(&mountlist_mtx); 4621 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4622 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 4623 continue; 4624 MNT_ILOCK(mp); 4625 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4626 if (n == len) 4627 break; 4628 vref(vp); 4629 xvn[n].xv_size = sizeof *xvn; 4630 xvn[n].xv_vnode = vp; 4631 xvn[n].xv_id = 0; /* XXX compat */ 4632 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 4633 XV_COPY(usecount); 4634 XV_COPY(writecount); 4635 XV_COPY(holdcnt); 4636 XV_COPY(mount); 4637 XV_COPY(numoutput); 4638 XV_COPY(type); 4639 #undef XV_COPY 4640 xvn[n].xv_flag = vp->v_vflag; 4641 4642 switch (vp->v_type) { 4643 case VREG: 4644 case VDIR: 4645 case VLNK: 4646 break; 4647 case VBLK: 4648 case VCHR: 4649 if (vp->v_rdev == NULL) { 4650 vrele(vp); 4651 continue; 4652 } 4653 xvn[n].xv_dev = dev2udev(vp->v_rdev); 4654 break; 4655 case VSOCK: 4656 xvn[n].xv_socket = vp->v_socket; 4657 break; 4658 case VFIFO: 4659 xvn[n].xv_fifo = vp->v_fifoinfo; 4660 break; 4661 case VNON: 4662 case VBAD: 4663 default: 4664 /* shouldn't happen? */ 4665 vrele(vp); 4666 continue; 4667 } 4668 vrele(vp); 4669 ++n; 4670 } 4671 MNT_IUNLOCK(mp); 4672 mtx_lock(&mountlist_mtx); 4673 vfs_unbusy(mp); 4674 if (n == len) 4675 break; 4676 } 4677 mtx_unlock(&mountlist_mtx); 4678 4679 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 4680 free(xvn, M_TEMP); 4681 return (error); 4682 } 4683 4684 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 4685 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 4686 ""); 4687 #endif 4688 4689 static void 4690 unmount_or_warn(struct mount *mp) 4691 { 4692 int error; 4693 4694 error = dounmount(mp, MNT_FORCE, curthread); 4695 if (error != 0) { 4696 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4697 if (error == EBUSY) 4698 printf("BUSY)\n"); 4699 else 4700 printf("%d)\n", error); 4701 } 4702 } 4703 4704 /* 4705 * Unmount all filesystems. The list is traversed in reverse order 4706 * of mounting to avoid dependencies. 4707 */ 4708 void 4709 vfs_unmountall(void) 4710 { 4711 struct mount *mp, *tmp; 4712 4713 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4714 4715 /* 4716 * Since this only runs when rebooting, it is not interlocked. 4717 */ 4718 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4719 vfs_ref(mp); 4720 4721 /* 4722 * Forcibly unmounting "/dev" before "/" would prevent clean 4723 * unmount of the latter. 4724 */ 4725 if (mp == rootdevmp) 4726 continue; 4727 4728 unmount_or_warn(mp); 4729 } 4730 4731 if (rootdevmp != NULL) 4732 unmount_or_warn(rootdevmp); 4733 } 4734 4735 static void 4736 vfs_deferred_inactive(struct vnode *vp, int lkflags) 4737 { 4738 4739 ASSERT_VI_LOCKED(vp, __func__); 4740 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, ("VI_DEFINACT still set")); 4741 if ((vp->v_iflag & VI_OWEINACT) == 0) { 4742 vdropl(vp); 4743 return; 4744 } 4745 if (vn_lock(vp, lkflags) == 0) { 4746 VI_LOCK(vp); 4747 vinactive(vp); 4748 VOP_UNLOCK(vp); 4749 vdropl(vp); 4750 return; 4751 } 4752 vdefer_inactive_unlocked(vp); 4753 } 4754 4755 static int 4756 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 4757 { 4758 4759 return (vp->v_iflag & VI_DEFINACT); 4760 } 4761 4762 static void __noinline 4763 vfs_periodic_inactive(struct mount *mp, int flags) 4764 { 4765 struct vnode *vp, *mvp; 4766 int lkflags; 4767 4768 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4769 if (flags != MNT_WAIT) 4770 lkflags |= LK_NOWAIT; 4771 4772 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 4773 if ((vp->v_iflag & VI_DEFINACT) == 0) { 4774 VI_UNLOCK(vp); 4775 continue; 4776 } 4777 vp->v_iflag &= ~VI_DEFINACT; 4778 vfs_deferred_inactive(vp, lkflags); 4779 } 4780 } 4781 4782 static inline bool 4783 vfs_want_msync(struct vnode *vp) 4784 { 4785 struct vm_object *obj; 4786 4787 /* 4788 * This test may be performed without any locks held. 4789 * We rely on vm_object's type stability. 4790 */ 4791 if (vp->v_vflag & VV_NOSYNC) 4792 return (false); 4793 obj = vp->v_object; 4794 return (obj != NULL && vm_object_mightbedirty(obj)); 4795 } 4796 4797 static int 4798 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 4799 { 4800 4801 if (vp->v_vflag & VV_NOSYNC) 4802 return (false); 4803 if (vp->v_iflag & VI_DEFINACT) 4804 return (true); 4805 return (vfs_want_msync(vp)); 4806 } 4807 4808 static void __noinline 4809 vfs_periodic_msync_inactive(struct mount *mp, int flags) 4810 { 4811 struct vnode *vp, *mvp; 4812 struct vm_object *obj; 4813 int lkflags, objflags; 4814 bool seen_defer; 4815 4816 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4817 if (flags != MNT_WAIT) { 4818 lkflags |= LK_NOWAIT; 4819 objflags = OBJPC_NOSYNC; 4820 } else { 4821 objflags = OBJPC_SYNC; 4822 } 4823 4824 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 4825 seen_defer = false; 4826 if (vp->v_iflag & VI_DEFINACT) { 4827 vp->v_iflag &= ~VI_DEFINACT; 4828 seen_defer = true; 4829 } 4830 if (!vfs_want_msync(vp)) { 4831 if (seen_defer) 4832 vfs_deferred_inactive(vp, lkflags); 4833 else 4834 VI_UNLOCK(vp); 4835 continue; 4836 } 4837 if (vget(vp, lkflags) == 0) { 4838 obj = vp->v_object; 4839 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 4840 VM_OBJECT_WLOCK(obj); 4841 vm_object_page_clean(obj, 0, 0, objflags); 4842 VM_OBJECT_WUNLOCK(obj); 4843 } 4844 vput(vp); 4845 if (seen_defer) 4846 vdrop(vp); 4847 } else { 4848 if (seen_defer) 4849 vdefer_inactive_unlocked(vp); 4850 } 4851 } 4852 } 4853 4854 void 4855 vfs_periodic(struct mount *mp, int flags) 4856 { 4857 4858 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4859 4860 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 4861 vfs_periodic_inactive(mp, flags); 4862 else 4863 vfs_periodic_msync_inactive(mp, flags); 4864 } 4865 4866 static void 4867 destroy_vpollinfo_free(struct vpollinfo *vi) 4868 { 4869 4870 knlist_destroy(&vi->vpi_selinfo.si_note); 4871 mtx_destroy(&vi->vpi_lock); 4872 free(vi, M_VNODEPOLL); 4873 } 4874 4875 static void 4876 destroy_vpollinfo(struct vpollinfo *vi) 4877 { 4878 4879 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4880 seldrain(&vi->vpi_selinfo); 4881 destroy_vpollinfo_free(vi); 4882 } 4883 4884 /* 4885 * Initialize per-vnode helper structure to hold poll-related state. 4886 */ 4887 void 4888 v_addpollinfo(struct vnode *vp) 4889 { 4890 struct vpollinfo *vi; 4891 4892 if (vp->v_pollinfo != NULL) 4893 return; 4894 vi = malloc(sizeof(*vi), M_VNODEPOLL, M_WAITOK | M_ZERO); 4895 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4896 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4897 vfs_knlunlock, vfs_knl_assert_lock); 4898 VI_LOCK(vp); 4899 if (vp->v_pollinfo != NULL) { 4900 VI_UNLOCK(vp); 4901 destroy_vpollinfo_free(vi); 4902 return; 4903 } 4904 vp->v_pollinfo = vi; 4905 VI_UNLOCK(vp); 4906 } 4907 4908 /* 4909 * Record a process's interest in events which might happen to 4910 * a vnode. Because poll uses the historic select-style interface 4911 * internally, this routine serves as both the ``check for any 4912 * pending events'' and the ``record my interest in future events'' 4913 * functions. (These are done together, while the lock is held, 4914 * to avoid race conditions.) 4915 */ 4916 int 4917 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4918 { 4919 4920 v_addpollinfo(vp); 4921 mtx_lock(&vp->v_pollinfo->vpi_lock); 4922 if (vp->v_pollinfo->vpi_revents & events) { 4923 /* 4924 * This leaves events we are not interested 4925 * in available for the other process which 4926 * which presumably had requested them 4927 * (otherwise they would never have been 4928 * recorded). 4929 */ 4930 events &= vp->v_pollinfo->vpi_revents; 4931 vp->v_pollinfo->vpi_revents &= ~events; 4932 4933 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4934 return (events); 4935 } 4936 vp->v_pollinfo->vpi_events |= events; 4937 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 4938 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4939 return (0); 4940 } 4941 4942 /* 4943 * Routine to create and manage a filesystem syncer vnode. 4944 */ 4945 #define sync_close ((int (*)(struct vop_close_args *))nullop) 4946 static int sync_fsync(struct vop_fsync_args *); 4947 static int sync_inactive(struct vop_inactive_args *); 4948 static int sync_reclaim(struct vop_reclaim_args *); 4949 4950 static struct vop_vector sync_vnodeops = { 4951 .vop_bypass = VOP_EOPNOTSUPP, 4952 .vop_close = sync_close, /* close */ 4953 .vop_fsync = sync_fsync, /* fsync */ 4954 .vop_inactive = sync_inactive, /* inactive */ 4955 .vop_need_inactive = vop_stdneed_inactive, /* need_inactive */ 4956 .vop_reclaim = sync_reclaim, /* reclaim */ 4957 .vop_lock1 = vop_stdlock, /* lock */ 4958 .vop_unlock = vop_stdunlock, /* unlock */ 4959 .vop_islocked = vop_stdislocked, /* islocked */ 4960 }; 4961 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 4962 4963 /* 4964 * Create a new filesystem syncer vnode for the specified mount point. 4965 */ 4966 void 4967 vfs_allocate_syncvnode(struct mount *mp) 4968 { 4969 struct vnode *vp; 4970 struct bufobj *bo; 4971 static long start, incr, next; 4972 int error; 4973 4974 /* Allocate a new vnode */ 4975 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 4976 if (error != 0) 4977 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 4978 vp->v_type = VNON; 4979 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4980 vp->v_vflag |= VV_FORCEINSMQ; 4981 error = insmntque(vp, mp); 4982 if (error != 0) 4983 panic("vfs_allocate_syncvnode: insmntque() failed"); 4984 vp->v_vflag &= ~VV_FORCEINSMQ; 4985 VOP_UNLOCK(vp); 4986 /* 4987 * Place the vnode onto the syncer worklist. We attempt to 4988 * scatter them about on the list so that they will go off 4989 * at evenly distributed times even if all the filesystems 4990 * are mounted at once. 4991 */ 4992 next += incr; 4993 if (next == 0 || next > syncer_maxdelay) { 4994 start /= 2; 4995 incr /= 2; 4996 if (start == 0) { 4997 start = syncer_maxdelay / 2; 4998 incr = syncer_maxdelay; 4999 } 5000 next = start; 5001 } 5002 bo = &vp->v_bufobj; 5003 BO_LOCK(bo); 5004 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 5005 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 5006 mtx_lock(&sync_mtx); 5007 sync_vnode_count++; 5008 if (mp->mnt_syncer == NULL) { 5009 mp->mnt_syncer = vp; 5010 vp = NULL; 5011 } 5012 mtx_unlock(&sync_mtx); 5013 BO_UNLOCK(bo); 5014 if (vp != NULL) { 5015 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5016 vgone(vp); 5017 vput(vp); 5018 } 5019 } 5020 5021 void 5022 vfs_deallocate_syncvnode(struct mount *mp) 5023 { 5024 struct vnode *vp; 5025 5026 mtx_lock(&sync_mtx); 5027 vp = mp->mnt_syncer; 5028 if (vp != NULL) 5029 mp->mnt_syncer = NULL; 5030 mtx_unlock(&sync_mtx); 5031 if (vp != NULL) 5032 vrele(vp); 5033 } 5034 5035 /* 5036 * Do a lazy sync of the filesystem. 5037 */ 5038 static int 5039 sync_fsync(struct vop_fsync_args *ap) 5040 { 5041 struct vnode *syncvp = ap->a_vp; 5042 struct mount *mp = syncvp->v_mount; 5043 int error, save; 5044 struct bufobj *bo; 5045 5046 /* 5047 * We only need to do something if this is a lazy evaluation. 5048 */ 5049 if (ap->a_waitfor != MNT_LAZY) 5050 return (0); 5051 5052 /* 5053 * Move ourselves to the back of the sync list. 5054 */ 5055 bo = &syncvp->v_bufobj; 5056 BO_LOCK(bo); 5057 vn_syncer_add_to_worklist(bo, syncdelay); 5058 BO_UNLOCK(bo); 5059 5060 /* 5061 * Walk the list of vnodes pushing all that are dirty and 5062 * not already on the sync list. 5063 */ 5064 if (vfs_busy(mp, MBF_NOWAIT) != 0) 5065 return (0); 5066 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 5067 vfs_unbusy(mp); 5068 return (0); 5069 } 5070 save = curthread_pflags_set(TDP_SYNCIO); 5071 /* 5072 * The filesystem at hand may be idle with free vnodes stored in the 5073 * batch. Return them instead of letting them stay there indefinitely. 5074 */ 5075 vfs_periodic(mp, MNT_NOWAIT); 5076 error = VFS_SYNC(mp, MNT_LAZY); 5077 curthread_pflags_restore(save); 5078 vn_finished_write(mp); 5079 vfs_unbusy(mp); 5080 return (error); 5081 } 5082 5083 /* 5084 * The syncer vnode is no referenced. 5085 */ 5086 static int 5087 sync_inactive(struct vop_inactive_args *ap) 5088 { 5089 5090 vgone(ap->a_vp); 5091 return (0); 5092 } 5093 5094 /* 5095 * The syncer vnode is no longer needed and is being decommissioned. 5096 * 5097 * Modifications to the worklist must be protected by sync_mtx. 5098 */ 5099 static int 5100 sync_reclaim(struct vop_reclaim_args *ap) 5101 { 5102 struct vnode *vp = ap->a_vp; 5103 struct bufobj *bo; 5104 5105 bo = &vp->v_bufobj; 5106 BO_LOCK(bo); 5107 mtx_lock(&sync_mtx); 5108 if (vp->v_mount->mnt_syncer == vp) 5109 vp->v_mount->mnt_syncer = NULL; 5110 if (bo->bo_flag & BO_ONWORKLST) { 5111 LIST_REMOVE(bo, bo_synclist); 5112 syncer_worklist_len--; 5113 sync_vnode_count--; 5114 bo->bo_flag &= ~BO_ONWORKLST; 5115 } 5116 mtx_unlock(&sync_mtx); 5117 BO_UNLOCK(bo); 5118 5119 return (0); 5120 } 5121 5122 int 5123 vn_need_pageq_flush(struct vnode *vp) 5124 { 5125 struct vm_object *obj; 5126 5127 obj = vp->v_object; 5128 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 5129 vm_object_mightbedirty(obj)); 5130 } 5131 5132 /* 5133 * Check if vnode represents a disk device 5134 */ 5135 bool 5136 vn_isdisk_error(struct vnode *vp, int *errp) 5137 { 5138 int error; 5139 5140 if (vp->v_type != VCHR) { 5141 error = ENOTBLK; 5142 goto out; 5143 } 5144 error = 0; 5145 dev_lock(); 5146 if (vp->v_rdev == NULL) 5147 error = ENXIO; 5148 else if (vp->v_rdev->si_devsw == NULL) 5149 error = ENXIO; 5150 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5151 error = ENOTBLK; 5152 dev_unlock(); 5153 out: 5154 *errp = error; 5155 return (error == 0); 5156 } 5157 5158 bool 5159 vn_isdisk(struct vnode *vp) 5160 { 5161 int error; 5162 5163 return (vn_isdisk_error(vp, &error)); 5164 } 5165 5166 /* 5167 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5168 * the comment above cache_fplookup for details. 5169 */ 5170 int 5171 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5172 { 5173 int error; 5174 5175 VFS_SMR_ASSERT_ENTERED(); 5176 5177 /* Check the owner. */ 5178 if (cred->cr_uid == file_uid) { 5179 if (file_mode & S_IXUSR) 5180 return (0); 5181 goto out_error; 5182 } 5183 5184 /* Otherwise, check the groups (first match) */ 5185 if (groupmember(file_gid, cred)) { 5186 if (file_mode & S_IXGRP) 5187 return (0); 5188 goto out_error; 5189 } 5190 5191 /* Otherwise, check everyone else. */ 5192 if (file_mode & S_IXOTH) 5193 return (0); 5194 out_error: 5195 /* 5196 * Permission check failed, but it is possible denial will get overwritten 5197 * (e.g., when root is traversing through a 700 directory owned by someone 5198 * else). 5199 * 5200 * vaccess() calls priv_check_cred which in turn can descent into MAC 5201 * modules overriding this result. It's quite unclear what semantics 5202 * are allowed for them to operate, thus for safety we don't call them 5203 * from within the SMR section. This also means if any such modules 5204 * are present, we have to let the regular lookup decide. 5205 */ 5206 error = priv_check_cred_vfs_lookup_nomac(cred); 5207 switch (error) { 5208 case 0: 5209 return (0); 5210 case EAGAIN: 5211 /* 5212 * MAC modules present. 5213 */ 5214 return (EAGAIN); 5215 case EPERM: 5216 return (EACCES); 5217 default: 5218 return (error); 5219 } 5220 } 5221 5222 /* 5223 * Common filesystem object access control check routine. Accepts a 5224 * vnode's type, "mode", uid and gid, requested access mode, and credentials. 5225 * Returns 0 on success, or an errno on failure. 5226 */ 5227 int 5228 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5229 accmode_t accmode, struct ucred *cred) 5230 { 5231 accmode_t dac_granted; 5232 accmode_t priv_granted; 5233 5234 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5235 ("invalid bit in accmode")); 5236 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5237 ("VAPPEND without VWRITE")); 5238 5239 /* 5240 * Look for a normal, non-privileged way to access the file/directory 5241 * as requested. If it exists, go with that. 5242 */ 5243 5244 dac_granted = 0; 5245 5246 /* Check the owner. */ 5247 if (cred->cr_uid == file_uid) { 5248 dac_granted |= VADMIN; 5249 if (file_mode & S_IXUSR) 5250 dac_granted |= VEXEC; 5251 if (file_mode & S_IRUSR) 5252 dac_granted |= VREAD; 5253 if (file_mode & S_IWUSR) 5254 dac_granted |= (VWRITE | VAPPEND); 5255 5256 if ((accmode & dac_granted) == accmode) 5257 return (0); 5258 5259 goto privcheck; 5260 } 5261 5262 /* Otherwise, check the groups (first match) */ 5263 if (groupmember(file_gid, cred)) { 5264 if (file_mode & S_IXGRP) 5265 dac_granted |= VEXEC; 5266 if (file_mode & S_IRGRP) 5267 dac_granted |= VREAD; 5268 if (file_mode & S_IWGRP) 5269 dac_granted |= (VWRITE | VAPPEND); 5270 5271 if ((accmode & dac_granted) == accmode) 5272 return (0); 5273 5274 goto privcheck; 5275 } 5276 5277 /* Otherwise, check everyone else. */ 5278 if (file_mode & S_IXOTH) 5279 dac_granted |= VEXEC; 5280 if (file_mode & S_IROTH) 5281 dac_granted |= VREAD; 5282 if (file_mode & S_IWOTH) 5283 dac_granted |= (VWRITE | VAPPEND); 5284 if ((accmode & dac_granted) == accmode) 5285 return (0); 5286 5287 privcheck: 5288 /* 5289 * Build a privilege mask to determine if the set of privileges 5290 * satisfies the requirements when combined with the granted mask 5291 * from above. For each privilege, if the privilege is required, 5292 * bitwise or the request type onto the priv_granted mask. 5293 */ 5294 priv_granted = 0; 5295 5296 if (type == VDIR) { 5297 /* 5298 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5299 * requests, instead of PRIV_VFS_EXEC. 5300 */ 5301 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5302 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5303 priv_granted |= VEXEC; 5304 } else { 5305 /* 5306 * Ensure that at least one execute bit is on. Otherwise, 5307 * a privileged user will always succeed, and we don't want 5308 * this to happen unless the file really is executable. 5309 */ 5310 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5311 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5312 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5313 priv_granted |= VEXEC; 5314 } 5315 5316 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5317 !priv_check_cred(cred, PRIV_VFS_READ)) 5318 priv_granted |= VREAD; 5319 5320 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5321 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5322 priv_granted |= (VWRITE | VAPPEND); 5323 5324 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5325 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5326 priv_granted |= VADMIN; 5327 5328 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5329 return (0); 5330 } 5331 5332 return ((accmode & VADMIN) ? EPERM : EACCES); 5333 } 5334 5335 /* 5336 * Credential check based on process requesting service, and per-attribute 5337 * permissions. 5338 */ 5339 int 5340 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5341 struct thread *td, accmode_t accmode) 5342 { 5343 5344 /* 5345 * Kernel-invoked always succeeds. 5346 */ 5347 if (cred == NOCRED) 5348 return (0); 5349 5350 /* 5351 * Do not allow privileged processes in jail to directly manipulate 5352 * system attributes. 5353 */ 5354 switch (attrnamespace) { 5355 case EXTATTR_NAMESPACE_SYSTEM: 5356 /* Potentially should be: return (EPERM); */ 5357 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5358 case EXTATTR_NAMESPACE_USER: 5359 return (VOP_ACCESS(vp, accmode, cred, td)); 5360 default: 5361 return (EPERM); 5362 } 5363 } 5364 5365 #ifdef DEBUG_VFS_LOCKS 5366 /* 5367 * This only exists to suppress warnings from unlocked specfs accesses. It is 5368 * no longer ok to have an unlocked VFS. 5369 */ 5370 #define IGNORE_LOCK(vp) (KERNEL_PANICKED() || (vp) == NULL || \ 5371 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 5372 5373 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5374 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5375 "Drop into debugger on lock violation"); 5376 5377 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5378 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5379 0, "Check for interlock across VOPs"); 5380 5381 int vfs_badlock_print = 1; /* Print lock violations. */ 5382 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5383 0, "Print lock violations"); 5384 5385 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5386 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5387 0, "Print vnode details on lock violations"); 5388 5389 #ifdef KDB 5390 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5391 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5392 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5393 #endif 5394 5395 static void 5396 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5397 { 5398 5399 #ifdef KDB 5400 if (vfs_badlock_backtrace) 5401 kdb_backtrace(); 5402 #endif 5403 if (vfs_badlock_vnode) 5404 vn_printf(vp, "vnode "); 5405 if (vfs_badlock_print) 5406 printf("%s: %p %s\n", str, (void *)vp, msg); 5407 if (vfs_badlock_ddb) 5408 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5409 } 5410 5411 void 5412 assert_vi_locked(struct vnode *vp, const char *str) 5413 { 5414 5415 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5416 vfs_badlock("interlock is not locked but should be", str, vp); 5417 } 5418 5419 void 5420 assert_vi_unlocked(struct vnode *vp, const char *str) 5421 { 5422 5423 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5424 vfs_badlock("interlock is locked but should not be", str, vp); 5425 } 5426 5427 void 5428 assert_vop_locked(struct vnode *vp, const char *str) 5429 { 5430 int locked; 5431 5432 if (!IGNORE_LOCK(vp)) { 5433 locked = VOP_ISLOCKED(vp); 5434 if (locked == 0 || locked == LK_EXCLOTHER) 5435 vfs_badlock("is not locked but should be", str, vp); 5436 } 5437 } 5438 5439 void 5440 assert_vop_unlocked(struct vnode *vp, const char *str) 5441 { 5442 5443 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5444 vfs_badlock("is locked but should not be", str, vp); 5445 } 5446 5447 void 5448 assert_vop_elocked(struct vnode *vp, const char *str) 5449 { 5450 5451 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5452 vfs_badlock("is not exclusive locked but should be", str, vp); 5453 } 5454 #endif /* DEBUG_VFS_LOCKS */ 5455 5456 void 5457 vop_rename_fail(struct vop_rename_args *ap) 5458 { 5459 5460 if (ap->a_tvp != NULL) 5461 vput(ap->a_tvp); 5462 if (ap->a_tdvp == ap->a_tvp) 5463 vrele(ap->a_tdvp); 5464 else 5465 vput(ap->a_tdvp); 5466 vrele(ap->a_fdvp); 5467 vrele(ap->a_fvp); 5468 } 5469 5470 void 5471 vop_rename_pre(void *ap) 5472 { 5473 struct vop_rename_args *a = ap; 5474 5475 #ifdef DEBUG_VFS_LOCKS 5476 if (a->a_tvp) 5477 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5478 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5479 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5480 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5481 5482 /* Check the source (from). */ 5483 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5484 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5485 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5486 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5487 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5488 5489 /* Check the target. */ 5490 if (a->a_tvp) 5491 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5492 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5493 #endif 5494 /* 5495 * It may be tempting to add vn_seqc_write_begin/end calls here and 5496 * in vop_rename_post but that's not going to work out since some 5497 * filesystems relookup vnodes mid-rename. This is probably a bug. 5498 * 5499 * For now filesystems are expected to do the relevant calls after they 5500 * decide what vnodes to operate on. 5501 */ 5502 if (a->a_tdvp != a->a_fdvp) 5503 vhold(a->a_fdvp); 5504 if (a->a_tvp != a->a_fvp) 5505 vhold(a->a_fvp); 5506 vhold(a->a_tdvp); 5507 if (a->a_tvp) 5508 vhold(a->a_tvp); 5509 } 5510 5511 #ifdef DEBUG_VFS_LOCKS 5512 void 5513 vop_fplookup_vexec_debugpre(void *ap __unused) 5514 { 5515 5516 VFS_SMR_ASSERT_ENTERED(); 5517 } 5518 5519 void 5520 vop_fplookup_vexec_debugpost(void *ap __unused, int rc __unused) 5521 { 5522 5523 VFS_SMR_ASSERT_ENTERED(); 5524 } 5525 5526 void 5527 vop_fplookup_symlink_debugpre(void *ap __unused) 5528 { 5529 5530 VFS_SMR_ASSERT_ENTERED(); 5531 } 5532 5533 void 5534 vop_fplookup_symlink_debugpost(void *ap __unused, int rc __unused) 5535 { 5536 5537 VFS_SMR_ASSERT_ENTERED(); 5538 } 5539 void 5540 vop_strategy_debugpre(void *ap) 5541 { 5542 struct vop_strategy_args *a; 5543 struct buf *bp; 5544 5545 a = ap; 5546 bp = a->a_bp; 5547 5548 /* 5549 * Cluster ops lock their component buffers but not the IO container. 5550 */ 5551 if ((bp->b_flags & B_CLUSTER) != 0) 5552 return; 5553 5554 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5555 if (vfs_badlock_print) 5556 printf( 5557 "VOP_STRATEGY: bp is not locked but should be\n"); 5558 if (vfs_badlock_ddb) 5559 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5560 } 5561 } 5562 5563 void 5564 vop_lock_debugpre(void *ap) 5565 { 5566 struct vop_lock1_args *a = ap; 5567 5568 if ((a->a_flags & LK_INTERLOCK) == 0) 5569 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5570 else 5571 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5572 } 5573 5574 void 5575 vop_lock_debugpost(void *ap, int rc) 5576 { 5577 struct vop_lock1_args *a = ap; 5578 5579 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5580 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5581 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5582 } 5583 5584 void 5585 vop_unlock_debugpre(void *ap) 5586 { 5587 struct vop_unlock_args *a = ap; 5588 5589 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 5590 } 5591 5592 void 5593 vop_need_inactive_debugpre(void *ap) 5594 { 5595 struct vop_need_inactive_args *a = ap; 5596 5597 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5598 } 5599 5600 void 5601 vop_need_inactive_debugpost(void *ap, int rc) 5602 { 5603 struct vop_need_inactive_args *a = ap; 5604 5605 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5606 } 5607 #endif 5608 5609 void 5610 vop_create_pre(void *ap) 5611 { 5612 struct vop_create_args *a; 5613 struct vnode *dvp; 5614 5615 a = ap; 5616 dvp = a->a_dvp; 5617 vn_seqc_write_begin(dvp); 5618 } 5619 5620 void 5621 vop_create_post(void *ap, int rc) 5622 { 5623 struct vop_create_args *a; 5624 struct vnode *dvp; 5625 5626 a = ap; 5627 dvp = a->a_dvp; 5628 vn_seqc_write_end(dvp); 5629 if (!rc) 5630 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5631 } 5632 5633 void 5634 vop_whiteout_pre(void *ap) 5635 { 5636 struct vop_whiteout_args *a; 5637 struct vnode *dvp; 5638 5639 a = ap; 5640 dvp = a->a_dvp; 5641 vn_seqc_write_begin(dvp); 5642 } 5643 5644 void 5645 vop_whiteout_post(void *ap, int rc) 5646 { 5647 struct vop_whiteout_args *a; 5648 struct vnode *dvp; 5649 5650 a = ap; 5651 dvp = a->a_dvp; 5652 vn_seqc_write_end(dvp); 5653 } 5654 5655 void 5656 vop_deleteextattr_pre(void *ap) 5657 { 5658 struct vop_deleteextattr_args *a; 5659 struct vnode *vp; 5660 5661 a = ap; 5662 vp = a->a_vp; 5663 vn_seqc_write_begin(vp); 5664 } 5665 5666 void 5667 vop_deleteextattr_post(void *ap, int rc) 5668 { 5669 struct vop_deleteextattr_args *a; 5670 struct vnode *vp; 5671 5672 a = ap; 5673 vp = a->a_vp; 5674 vn_seqc_write_end(vp); 5675 if (!rc) 5676 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5677 } 5678 5679 void 5680 vop_link_pre(void *ap) 5681 { 5682 struct vop_link_args *a; 5683 struct vnode *vp, *tdvp; 5684 5685 a = ap; 5686 vp = a->a_vp; 5687 tdvp = a->a_tdvp; 5688 vn_seqc_write_begin(vp); 5689 vn_seqc_write_begin(tdvp); 5690 } 5691 5692 void 5693 vop_link_post(void *ap, int rc) 5694 { 5695 struct vop_link_args *a; 5696 struct vnode *vp, *tdvp; 5697 5698 a = ap; 5699 vp = a->a_vp; 5700 tdvp = a->a_tdvp; 5701 vn_seqc_write_end(vp); 5702 vn_seqc_write_end(tdvp); 5703 if (!rc) { 5704 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 5705 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 5706 } 5707 } 5708 5709 void 5710 vop_mkdir_pre(void *ap) 5711 { 5712 struct vop_mkdir_args *a; 5713 struct vnode *dvp; 5714 5715 a = ap; 5716 dvp = a->a_dvp; 5717 vn_seqc_write_begin(dvp); 5718 } 5719 5720 void 5721 vop_mkdir_post(void *ap, int rc) 5722 { 5723 struct vop_mkdir_args *a; 5724 struct vnode *dvp; 5725 5726 a = ap; 5727 dvp = a->a_dvp; 5728 vn_seqc_write_end(dvp); 5729 if (!rc) 5730 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5731 } 5732 5733 #ifdef DEBUG_VFS_LOCKS 5734 void 5735 vop_mkdir_debugpost(void *ap, int rc) 5736 { 5737 struct vop_mkdir_args *a; 5738 5739 a = ap; 5740 if (!rc) 5741 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); 5742 } 5743 #endif 5744 5745 void 5746 vop_mknod_pre(void *ap) 5747 { 5748 struct vop_mknod_args *a; 5749 struct vnode *dvp; 5750 5751 a = ap; 5752 dvp = a->a_dvp; 5753 vn_seqc_write_begin(dvp); 5754 } 5755 5756 void 5757 vop_mknod_post(void *ap, int rc) 5758 { 5759 struct vop_mknod_args *a; 5760 struct vnode *dvp; 5761 5762 a = ap; 5763 dvp = a->a_dvp; 5764 vn_seqc_write_end(dvp); 5765 if (!rc) 5766 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5767 } 5768 5769 void 5770 vop_reclaim_post(void *ap, int rc) 5771 { 5772 struct vop_reclaim_args *a; 5773 struct vnode *vp; 5774 5775 a = ap; 5776 vp = a->a_vp; 5777 ASSERT_VOP_IN_SEQC(vp); 5778 if (!rc) 5779 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 5780 } 5781 5782 void 5783 vop_remove_pre(void *ap) 5784 { 5785 struct vop_remove_args *a; 5786 struct vnode *dvp, *vp; 5787 5788 a = ap; 5789 dvp = a->a_dvp; 5790 vp = a->a_vp; 5791 vn_seqc_write_begin(dvp); 5792 vn_seqc_write_begin(vp); 5793 } 5794 5795 void 5796 vop_remove_post(void *ap, int rc) 5797 { 5798 struct vop_remove_args *a; 5799 struct vnode *dvp, *vp; 5800 5801 a = ap; 5802 dvp = a->a_dvp; 5803 vp = a->a_vp; 5804 vn_seqc_write_end(dvp); 5805 vn_seqc_write_end(vp); 5806 if (!rc) { 5807 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5808 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5809 } 5810 } 5811 5812 void 5813 vop_rename_post(void *ap, int rc) 5814 { 5815 struct vop_rename_args *a = ap; 5816 long hint; 5817 5818 if (!rc) { 5819 hint = NOTE_WRITE; 5820 if (a->a_fdvp == a->a_tdvp) { 5821 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 5822 hint |= NOTE_LINK; 5823 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5824 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5825 } else { 5826 hint |= NOTE_EXTEND; 5827 if (a->a_fvp->v_type == VDIR) 5828 hint |= NOTE_LINK; 5829 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5830 5831 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 5832 a->a_tvp->v_type == VDIR) 5833 hint &= ~NOTE_LINK; 5834 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5835 } 5836 5837 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 5838 if (a->a_tvp) 5839 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 5840 } 5841 if (a->a_tdvp != a->a_fdvp) 5842 vdrop(a->a_fdvp); 5843 if (a->a_tvp != a->a_fvp) 5844 vdrop(a->a_fvp); 5845 vdrop(a->a_tdvp); 5846 if (a->a_tvp) 5847 vdrop(a->a_tvp); 5848 } 5849 5850 void 5851 vop_rmdir_pre(void *ap) 5852 { 5853 struct vop_rmdir_args *a; 5854 struct vnode *dvp, *vp; 5855 5856 a = ap; 5857 dvp = a->a_dvp; 5858 vp = a->a_vp; 5859 vn_seqc_write_begin(dvp); 5860 vn_seqc_write_begin(vp); 5861 } 5862 5863 void 5864 vop_rmdir_post(void *ap, int rc) 5865 { 5866 struct vop_rmdir_args *a; 5867 struct vnode *dvp, *vp; 5868 5869 a = ap; 5870 dvp = a->a_dvp; 5871 vp = a->a_vp; 5872 vn_seqc_write_end(dvp); 5873 vn_seqc_write_end(vp); 5874 if (!rc) { 5875 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5876 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5877 } 5878 } 5879 5880 void 5881 vop_setattr_pre(void *ap) 5882 { 5883 struct vop_setattr_args *a; 5884 struct vnode *vp; 5885 5886 a = ap; 5887 vp = a->a_vp; 5888 vn_seqc_write_begin(vp); 5889 } 5890 5891 void 5892 vop_setattr_post(void *ap, int rc) 5893 { 5894 struct vop_setattr_args *a; 5895 struct vnode *vp; 5896 5897 a = ap; 5898 vp = a->a_vp; 5899 vn_seqc_write_end(vp); 5900 if (!rc) 5901 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 5902 } 5903 5904 void 5905 vop_setacl_pre(void *ap) 5906 { 5907 struct vop_setacl_args *a; 5908 struct vnode *vp; 5909 5910 a = ap; 5911 vp = a->a_vp; 5912 vn_seqc_write_begin(vp); 5913 } 5914 5915 void 5916 vop_setacl_post(void *ap, int rc __unused) 5917 { 5918 struct vop_setacl_args *a; 5919 struct vnode *vp; 5920 5921 a = ap; 5922 vp = a->a_vp; 5923 vn_seqc_write_end(vp); 5924 } 5925 5926 void 5927 vop_setextattr_pre(void *ap) 5928 { 5929 struct vop_setextattr_args *a; 5930 struct vnode *vp; 5931 5932 a = ap; 5933 vp = a->a_vp; 5934 vn_seqc_write_begin(vp); 5935 } 5936 5937 void 5938 vop_setextattr_post(void *ap, int rc) 5939 { 5940 struct vop_setextattr_args *a; 5941 struct vnode *vp; 5942 5943 a = ap; 5944 vp = a->a_vp; 5945 vn_seqc_write_end(vp); 5946 if (!rc) 5947 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 5948 } 5949 5950 void 5951 vop_symlink_pre(void *ap) 5952 { 5953 struct vop_symlink_args *a; 5954 struct vnode *dvp; 5955 5956 a = ap; 5957 dvp = a->a_dvp; 5958 vn_seqc_write_begin(dvp); 5959 } 5960 5961 void 5962 vop_symlink_post(void *ap, int rc) 5963 { 5964 struct vop_symlink_args *a; 5965 struct vnode *dvp; 5966 5967 a = ap; 5968 dvp = a->a_dvp; 5969 vn_seqc_write_end(dvp); 5970 if (!rc) 5971 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5972 } 5973 5974 void 5975 vop_open_post(void *ap, int rc) 5976 { 5977 struct vop_open_args *a = ap; 5978 5979 if (!rc) 5980 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 5981 } 5982 5983 void 5984 vop_close_post(void *ap, int rc) 5985 { 5986 struct vop_close_args *a = ap; 5987 5988 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 5989 !VN_IS_DOOMED(a->a_vp))) { 5990 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 5991 NOTE_CLOSE_WRITE : NOTE_CLOSE); 5992 } 5993 } 5994 5995 void 5996 vop_read_post(void *ap, int rc) 5997 { 5998 struct vop_read_args *a = ap; 5999 6000 if (!rc) 6001 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6002 } 6003 6004 void 6005 vop_read_pgcache_post(void *ap, int rc) 6006 { 6007 struct vop_read_pgcache_args *a = ap; 6008 6009 if (!rc) 6010 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); 6011 } 6012 6013 void 6014 vop_readdir_post(void *ap, int rc) 6015 { 6016 struct vop_readdir_args *a = ap; 6017 6018 if (!rc) 6019 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6020 } 6021 6022 static struct knlist fs_knlist; 6023 6024 static void 6025 vfs_event_init(void *arg) 6026 { 6027 knlist_init_mtx(&fs_knlist, NULL); 6028 } 6029 /* XXX - correct order? */ 6030 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 6031 6032 void 6033 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 6034 { 6035 6036 KNOTE_UNLOCKED(&fs_knlist, event); 6037 } 6038 6039 static int filt_fsattach(struct knote *kn); 6040 static void filt_fsdetach(struct knote *kn); 6041 static int filt_fsevent(struct knote *kn, long hint); 6042 6043 struct filterops fs_filtops = { 6044 .f_isfd = 0, 6045 .f_attach = filt_fsattach, 6046 .f_detach = filt_fsdetach, 6047 .f_event = filt_fsevent 6048 }; 6049 6050 static int 6051 filt_fsattach(struct knote *kn) 6052 { 6053 6054 kn->kn_flags |= EV_CLEAR; 6055 knlist_add(&fs_knlist, kn, 0); 6056 return (0); 6057 } 6058 6059 static void 6060 filt_fsdetach(struct knote *kn) 6061 { 6062 6063 knlist_remove(&fs_knlist, kn, 0); 6064 } 6065 6066 static int 6067 filt_fsevent(struct knote *kn, long hint) 6068 { 6069 6070 kn->kn_fflags |= kn->kn_sfflags & hint; 6071 6072 return (kn->kn_fflags != 0); 6073 } 6074 6075 static int 6076 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 6077 { 6078 struct vfsidctl vc; 6079 int error; 6080 struct mount *mp; 6081 6082 error = SYSCTL_IN(req, &vc, sizeof(vc)); 6083 if (error) 6084 return (error); 6085 if (vc.vc_vers != VFS_CTL_VERS1) 6086 return (EINVAL); 6087 mp = vfs_getvfs(&vc.vc_fsid); 6088 if (mp == NULL) 6089 return (ENOENT); 6090 /* ensure that a specific sysctl goes to the right filesystem. */ 6091 if (strcmp(vc.vc_fstypename, "*") != 0 && 6092 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 6093 vfs_rel(mp); 6094 return (EINVAL); 6095 } 6096 VCTLTOREQ(&vc, req); 6097 error = VFS_SYSCTL(mp, vc.vc_op, req); 6098 vfs_rel(mp); 6099 return (error); 6100 } 6101 6102 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 6103 NULL, 0, sysctl_vfs_ctl, "", 6104 "Sysctl by fsid"); 6105 6106 /* 6107 * Function to initialize a va_filerev field sensibly. 6108 * XXX: Wouldn't a random number make a lot more sense ?? 6109 */ 6110 u_quad_t 6111 init_va_filerev(void) 6112 { 6113 struct bintime bt; 6114 6115 getbinuptime(&bt); 6116 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 6117 } 6118 6119 static int filt_vfsread(struct knote *kn, long hint); 6120 static int filt_vfswrite(struct knote *kn, long hint); 6121 static int filt_vfsvnode(struct knote *kn, long hint); 6122 static void filt_vfsdetach(struct knote *kn); 6123 static struct filterops vfsread_filtops = { 6124 .f_isfd = 1, 6125 .f_detach = filt_vfsdetach, 6126 .f_event = filt_vfsread 6127 }; 6128 static struct filterops vfswrite_filtops = { 6129 .f_isfd = 1, 6130 .f_detach = filt_vfsdetach, 6131 .f_event = filt_vfswrite 6132 }; 6133 static struct filterops vfsvnode_filtops = { 6134 .f_isfd = 1, 6135 .f_detach = filt_vfsdetach, 6136 .f_event = filt_vfsvnode 6137 }; 6138 6139 static void 6140 vfs_knllock(void *arg) 6141 { 6142 struct vnode *vp = arg; 6143 6144 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 6145 } 6146 6147 static void 6148 vfs_knlunlock(void *arg) 6149 { 6150 struct vnode *vp = arg; 6151 6152 VOP_UNLOCK(vp); 6153 } 6154 6155 static void 6156 vfs_knl_assert_lock(void *arg, int what) 6157 { 6158 #ifdef DEBUG_VFS_LOCKS 6159 struct vnode *vp = arg; 6160 6161 if (what == LA_LOCKED) 6162 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6163 else 6164 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6165 #endif 6166 } 6167 6168 int 6169 vfs_kqfilter(struct vop_kqfilter_args *ap) 6170 { 6171 struct vnode *vp = ap->a_vp; 6172 struct knote *kn = ap->a_kn; 6173 struct knlist *knl; 6174 6175 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ && 6176 kn->kn_filter != EVFILT_WRITE), 6177 ("READ/WRITE filter on a FIFO leaked through")); 6178 switch (kn->kn_filter) { 6179 case EVFILT_READ: 6180 kn->kn_fop = &vfsread_filtops; 6181 break; 6182 case EVFILT_WRITE: 6183 kn->kn_fop = &vfswrite_filtops; 6184 break; 6185 case EVFILT_VNODE: 6186 kn->kn_fop = &vfsvnode_filtops; 6187 break; 6188 default: 6189 return (EINVAL); 6190 } 6191 6192 kn->kn_hook = (caddr_t)vp; 6193 6194 v_addpollinfo(vp); 6195 if (vp->v_pollinfo == NULL) 6196 return (ENOMEM); 6197 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6198 vhold(vp); 6199 knlist_add(knl, kn, 0); 6200 6201 return (0); 6202 } 6203 6204 /* 6205 * Detach knote from vnode 6206 */ 6207 static void 6208 filt_vfsdetach(struct knote *kn) 6209 { 6210 struct vnode *vp = (struct vnode *)kn->kn_hook; 6211 6212 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6213 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6214 vdrop(vp); 6215 } 6216 6217 /*ARGSUSED*/ 6218 static int 6219 filt_vfsread(struct knote *kn, long hint) 6220 { 6221 struct vnode *vp = (struct vnode *)kn->kn_hook; 6222 struct vattr va; 6223 int res; 6224 6225 /* 6226 * filesystem is gone, so set the EOF flag and schedule 6227 * the knote for deletion. 6228 */ 6229 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6230 VI_LOCK(vp); 6231 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6232 VI_UNLOCK(vp); 6233 return (1); 6234 } 6235 6236 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 6237 return (0); 6238 6239 VI_LOCK(vp); 6240 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 6241 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6242 VI_UNLOCK(vp); 6243 return (res); 6244 } 6245 6246 /*ARGSUSED*/ 6247 static int 6248 filt_vfswrite(struct knote *kn, long hint) 6249 { 6250 struct vnode *vp = (struct vnode *)kn->kn_hook; 6251 6252 VI_LOCK(vp); 6253 6254 /* 6255 * filesystem is gone, so set the EOF flag and schedule 6256 * the knote for deletion. 6257 */ 6258 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6259 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6260 6261 kn->kn_data = 0; 6262 VI_UNLOCK(vp); 6263 return (1); 6264 } 6265 6266 static int 6267 filt_vfsvnode(struct knote *kn, long hint) 6268 { 6269 struct vnode *vp = (struct vnode *)kn->kn_hook; 6270 int res; 6271 6272 VI_LOCK(vp); 6273 if (kn->kn_sfflags & hint) 6274 kn->kn_fflags |= hint; 6275 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6276 kn->kn_flags |= EV_EOF; 6277 VI_UNLOCK(vp); 6278 return (1); 6279 } 6280 res = (kn->kn_fflags != 0); 6281 VI_UNLOCK(vp); 6282 return (res); 6283 } 6284 6285 /* 6286 * Returns whether the directory is empty or not. 6287 * If it is empty, the return value is 0; otherwise 6288 * the return value is an error value (which may 6289 * be ENOTEMPTY). 6290 */ 6291 int 6292 vfs_emptydir(struct vnode *vp) 6293 { 6294 struct uio uio; 6295 struct iovec iov; 6296 struct dirent *dirent, *dp, *endp; 6297 int error, eof; 6298 6299 error = 0; 6300 eof = 0; 6301 6302 ASSERT_VOP_LOCKED(vp, "vfs_emptydir"); 6303 VNASSERT(vp->v_type == VDIR, vp, ("vp is not a directory")); 6304 6305 dirent = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK); 6306 iov.iov_base = dirent; 6307 iov.iov_len = sizeof(struct dirent); 6308 6309 uio.uio_iov = &iov; 6310 uio.uio_iovcnt = 1; 6311 uio.uio_offset = 0; 6312 uio.uio_resid = sizeof(struct dirent); 6313 uio.uio_segflg = UIO_SYSSPACE; 6314 uio.uio_rw = UIO_READ; 6315 uio.uio_td = curthread; 6316 6317 while (eof == 0 && error == 0) { 6318 error = VOP_READDIR(vp, &uio, curthread->td_ucred, &eof, 6319 NULL, NULL); 6320 if (error != 0) 6321 break; 6322 endp = (void *)((uint8_t *)dirent + 6323 sizeof(struct dirent) - uio.uio_resid); 6324 for (dp = dirent; dp < endp; 6325 dp = (void *)((uint8_t *)dp + GENERIC_DIRSIZ(dp))) { 6326 if (dp->d_type == DT_WHT) 6327 continue; 6328 if (dp->d_namlen == 0) 6329 continue; 6330 if (dp->d_type != DT_DIR && 6331 dp->d_type != DT_UNKNOWN) { 6332 error = ENOTEMPTY; 6333 break; 6334 } 6335 if (dp->d_namlen > 2) { 6336 error = ENOTEMPTY; 6337 break; 6338 } 6339 if (dp->d_namlen == 1 && 6340 dp->d_name[0] != '.') { 6341 error = ENOTEMPTY; 6342 break; 6343 } 6344 if (dp->d_namlen == 2 && 6345 dp->d_name[1] != '.') { 6346 error = ENOTEMPTY; 6347 break; 6348 } 6349 uio.uio_resid = sizeof(struct dirent); 6350 } 6351 } 6352 free(dirent, M_TEMP); 6353 return (error); 6354 } 6355 6356 int 6357 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6358 { 6359 int error; 6360 6361 if (dp->d_reclen > ap->a_uio->uio_resid) 6362 return (ENAMETOOLONG); 6363 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6364 if (error) { 6365 if (ap->a_ncookies != NULL) { 6366 if (ap->a_cookies != NULL) 6367 free(ap->a_cookies, M_TEMP); 6368 ap->a_cookies = NULL; 6369 *ap->a_ncookies = 0; 6370 } 6371 return (error); 6372 } 6373 if (ap->a_ncookies == NULL) 6374 return (0); 6375 6376 KASSERT(ap->a_cookies, 6377 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6378 6379 *ap->a_cookies = realloc(*ap->a_cookies, 6380 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 6381 (*ap->a_cookies)[*ap->a_ncookies] = off; 6382 *ap->a_ncookies += 1; 6383 return (0); 6384 } 6385 6386 /* 6387 * The purpose of this routine is to remove granularity from accmode_t, 6388 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6389 * VADMIN and VAPPEND. 6390 * 6391 * If it returns 0, the caller is supposed to continue with the usual 6392 * access checks using 'accmode' as modified by this routine. If it 6393 * returns nonzero value, the caller is supposed to return that value 6394 * as errno. 6395 * 6396 * Note that after this routine runs, accmode may be zero. 6397 */ 6398 int 6399 vfs_unixify_accmode(accmode_t *accmode) 6400 { 6401 /* 6402 * There is no way to specify explicit "deny" rule using 6403 * file mode or POSIX.1e ACLs. 6404 */ 6405 if (*accmode & VEXPLICIT_DENY) { 6406 *accmode = 0; 6407 return (0); 6408 } 6409 6410 /* 6411 * None of these can be translated into usual access bits. 6412 * Also, the common case for NFSv4 ACLs is to not contain 6413 * either of these bits. Caller should check for VWRITE 6414 * on the containing directory instead. 6415 */ 6416 if (*accmode & (VDELETE_CHILD | VDELETE)) 6417 return (EPERM); 6418 6419 if (*accmode & VADMIN_PERMS) { 6420 *accmode &= ~VADMIN_PERMS; 6421 *accmode |= VADMIN; 6422 } 6423 6424 /* 6425 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6426 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6427 */ 6428 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6429 6430 return (0); 6431 } 6432 6433 /* 6434 * Clear out a doomed vnode (if any) and replace it with a new one as long 6435 * as the fs is not being unmounted. Return the root vnode to the caller. 6436 */ 6437 static int __noinline 6438 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6439 { 6440 struct vnode *vp; 6441 int error; 6442 6443 restart: 6444 if (mp->mnt_rootvnode != NULL) { 6445 MNT_ILOCK(mp); 6446 vp = mp->mnt_rootvnode; 6447 if (vp != NULL) { 6448 if (!VN_IS_DOOMED(vp)) { 6449 vrefact(vp); 6450 MNT_IUNLOCK(mp); 6451 error = vn_lock(vp, flags); 6452 if (error == 0) { 6453 *vpp = vp; 6454 return (0); 6455 } 6456 vrele(vp); 6457 goto restart; 6458 } 6459 /* 6460 * Clear the old one. 6461 */ 6462 mp->mnt_rootvnode = NULL; 6463 } 6464 MNT_IUNLOCK(mp); 6465 if (vp != NULL) { 6466 vfs_op_barrier_wait(mp); 6467 vrele(vp); 6468 } 6469 } 6470 error = VFS_CACHEDROOT(mp, flags, vpp); 6471 if (error != 0) 6472 return (error); 6473 if (mp->mnt_vfs_ops == 0) { 6474 MNT_ILOCK(mp); 6475 if (mp->mnt_vfs_ops != 0) { 6476 MNT_IUNLOCK(mp); 6477 return (0); 6478 } 6479 if (mp->mnt_rootvnode == NULL) { 6480 vrefact(*vpp); 6481 mp->mnt_rootvnode = *vpp; 6482 } else { 6483 if (mp->mnt_rootvnode != *vpp) { 6484 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6485 panic("%s: mismatch between vnode returned " 6486 " by VFS_CACHEDROOT and the one cached " 6487 " (%p != %p)", 6488 __func__, *vpp, mp->mnt_rootvnode); 6489 } 6490 } 6491 } 6492 MNT_IUNLOCK(mp); 6493 } 6494 return (0); 6495 } 6496 6497 int 6498 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6499 { 6500 struct mount_pcpu *mpcpu; 6501 struct vnode *vp; 6502 int error; 6503 6504 if (!vfs_op_thread_enter(mp, mpcpu)) 6505 return (vfs_cache_root_fallback(mp, flags, vpp)); 6506 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6507 if (vp == NULL || VN_IS_DOOMED(vp)) { 6508 vfs_op_thread_exit(mp, mpcpu); 6509 return (vfs_cache_root_fallback(mp, flags, vpp)); 6510 } 6511 vrefact(vp); 6512 vfs_op_thread_exit(mp, mpcpu); 6513 error = vn_lock(vp, flags); 6514 if (error != 0) { 6515 vrele(vp); 6516 return (vfs_cache_root_fallback(mp, flags, vpp)); 6517 } 6518 *vpp = vp; 6519 return (0); 6520 } 6521 6522 struct vnode * 6523 vfs_cache_root_clear(struct mount *mp) 6524 { 6525 struct vnode *vp; 6526 6527 /* 6528 * ops > 0 guarantees there is nobody who can see this vnode 6529 */ 6530 MPASS(mp->mnt_vfs_ops > 0); 6531 vp = mp->mnt_rootvnode; 6532 if (vp != NULL) 6533 vn_seqc_write_begin(vp); 6534 mp->mnt_rootvnode = NULL; 6535 return (vp); 6536 } 6537 6538 void 6539 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6540 { 6541 6542 MPASS(mp->mnt_vfs_ops > 0); 6543 vrefact(vp); 6544 mp->mnt_rootvnode = vp; 6545 } 6546 6547 /* 6548 * These are helper functions for filesystems to traverse all 6549 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6550 * 6551 * This interface replaces MNT_VNODE_FOREACH. 6552 */ 6553 6554 struct vnode * 6555 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6556 { 6557 struct vnode *vp; 6558 6559 if (should_yield()) 6560 kern_yield(PRI_USER); 6561 MNT_ILOCK(mp); 6562 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6563 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6564 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6565 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6566 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6567 continue; 6568 VI_LOCK(vp); 6569 if (VN_IS_DOOMED(vp)) { 6570 VI_UNLOCK(vp); 6571 continue; 6572 } 6573 break; 6574 } 6575 if (vp == NULL) { 6576 __mnt_vnode_markerfree_all(mvp, mp); 6577 /* MNT_IUNLOCK(mp); -- done in above function */ 6578 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6579 return (NULL); 6580 } 6581 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6582 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6583 MNT_IUNLOCK(mp); 6584 return (vp); 6585 } 6586 6587 struct vnode * 6588 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6589 { 6590 struct vnode *vp; 6591 6592 *mvp = vn_alloc_marker(mp); 6593 MNT_ILOCK(mp); 6594 MNT_REF(mp); 6595 6596 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6597 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6598 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6599 continue; 6600 VI_LOCK(vp); 6601 if (VN_IS_DOOMED(vp)) { 6602 VI_UNLOCK(vp); 6603 continue; 6604 } 6605 break; 6606 } 6607 if (vp == NULL) { 6608 MNT_REL(mp); 6609 MNT_IUNLOCK(mp); 6610 vn_free_marker(*mvp); 6611 *mvp = NULL; 6612 return (NULL); 6613 } 6614 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6615 MNT_IUNLOCK(mp); 6616 return (vp); 6617 } 6618 6619 void 6620 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6621 { 6622 6623 if (*mvp == NULL) { 6624 MNT_IUNLOCK(mp); 6625 return; 6626 } 6627 6628 mtx_assert(MNT_MTX(mp), MA_OWNED); 6629 6630 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6631 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6632 MNT_REL(mp); 6633 MNT_IUNLOCK(mp); 6634 vn_free_marker(*mvp); 6635 *mvp = NULL; 6636 } 6637 6638 /* 6639 * These are helper functions for filesystems to traverse their 6640 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6641 */ 6642 static void 6643 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6644 { 6645 6646 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6647 6648 MNT_ILOCK(mp); 6649 MNT_REL(mp); 6650 MNT_IUNLOCK(mp); 6651 vn_free_marker(*mvp); 6652 *mvp = NULL; 6653 } 6654 6655 /* 6656 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6657 * conventional lock order during mnt_vnode_next_lazy iteration. 6658 * 6659 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6660 * The list lock is dropped and reacquired. On success, both locks are held. 6661 * On failure, the mount vnode list lock is held but the vnode interlock is 6662 * not, and the procedure may have yielded. 6663 */ 6664 static bool 6665 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6666 struct vnode *vp) 6667 { 6668 6669 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6670 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6671 ("%s: bad marker", __func__)); 6672 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6673 ("%s: inappropriate vnode", __func__)); 6674 ASSERT_VI_UNLOCKED(vp, __func__); 6675 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6676 6677 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6678 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6679 6680 /* 6681 * Note we may be racing against vdrop which transitioned the hold 6682 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 6683 * if we are the only user after we get the interlock we will just 6684 * vdrop. 6685 */ 6686 vhold(vp); 6687 mtx_unlock(&mp->mnt_listmtx); 6688 VI_LOCK(vp); 6689 if (VN_IS_DOOMED(vp)) { 6690 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 6691 goto out_lost; 6692 } 6693 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 6694 /* 6695 * There is nothing to do if we are the last user. 6696 */ 6697 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 6698 goto out_lost; 6699 mtx_lock(&mp->mnt_listmtx); 6700 return (true); 6701 out_lost: 6702 vdropl(vp); 6703 maybe_yield(); 6704 mtx_lock(&mp->mnt_listmtx); 6705 return (false); 6706 } 6707 6708 static struct vnode * 6709 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6710 void *cbarg) 6711 { 6712 struct vnode *vp; 6713 6714 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6715 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6716 restart: 6717 vp = TAILQ_NEXT(*mvp, v_lazylist); 6718 while (vp != NULL) { 6719 if (vp->v_type == VMARKER) { 6720 vp = TAILQ_NEXT(vp, v_lazylist); 6721 continue; 6722 } 6723 /* 6724 * See if we want to process the vnode. Note we may encounter a 6725 * long string of vnodes we don't care about and hog the list 6726 * as a result. Check for it and requeue the marker. 6727 */ 6728 VNPASS(!VN_IS_DOOMED(vp), vp); 6729 if (!cb(vp, cbarg)) { 6730 if (!should_yield()) { 6731 vp = TAILQ_NEXT(vp, v_lazylist); 6732 continue; 6733 } 6734 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 6735 v_lazylist); 6736 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 6737 v_lazylist); 6738 mtx_unlock(&mp->mnt_listmtx); 6739 kern_yield(PRI_USER); 6740 mtx_lock(&mp->mnt_listmtx); 6741 goto restart; 6742 } 6743 /* 6744 * Try-lock because this is the wrong lock order. 6745 */ 6746 if (!VI_TRYLOCK(vp) && 6747 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 6748 goto restart; 6749 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6750 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6751 ("alien vnode on the lazy list %p %p", vp, mp)); 6752 VNPASS(vp->v_mount == mp, vp); 6753 VNPASS(!VN_IS_DOOMED(vp), vp); 6754 break; 6755 } 6756 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6757 6758 /* Check if we are done */ 6759 if (vp == NULL) { 6760 mtx_unlock(&mp->mnt_listmtx); 6761 mnt_vnode_markerfree_lazy(mvp, mp); 6762 return (NULL); 6763 } 6764 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 6765 mtx_unlock(&mp->mnt_listmtx); 6766 ASSERT_VI_LOCKED(vp, "lazy iter"); 6767 return (vp); 6768 } 6769 6770 struct vnode * 6771 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6772 void *cbarg) 6773 { 6774 6775 if (should_yield()) 6776 kern_yield(PRI_USER); 6777 mtx_lock(&mp->mnt_listmtx); 6778 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6779 } 6780 6781 struct vnode * 6782 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6783 void *cbarg) 6784 { 6785 struct vnode *vp; 6786 6787 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 6788 return (NULL); 6789 6790 *mvp = vn_alloc_marker(mp); 6791 MNT_ILOCK(mp); 6792 MNT_REF(mp); 6793 MNT_IUNLOCK(mp); 6794 6795 mtx_lock(&mp->mnt_listmtx); 6796 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 6797 if (vp == NULL) { 6798 mtx_unlock(&mp->mnt_listmtx); 6799 mnt_vnode_markerfree_lazy(mvp, mp); 6800 return (NULL); 6801 } 6802 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 6803 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6804 } 6805 6806 void 6807 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6808 { 6809 6810 if (*mvp == NULL) 6811 return; 6812 6813 mtx_lock(&mp->mnt_listmtx); 6814 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6815 mtx_unlock(&mp->mnt_listmtx); 6816 mnt_vnode_markerfree_lazy(mvp, mp); 6817 } 6818 6819 int 6820 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 6821 { 6822 6823 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 6824 cnp->cn_flags &= ~NOEXECCHECK; 6825 return (0); 6826 } 6827 6828 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread)); 6829 } 6830 6831 /* 6832 * Do not use this variant unless you have means other than the hold count 6833 * to prevent the vnode from getting freed. 6834 */ 6835 void 6836 vn_seqc_write_begin_locked(struct vnode *vp) 6837 { 6838 6839 ASSERT_VI_LOCKED(vp, __func__); 6840 VNPASS(vp->v_holdcnt > 0, vp); 6841 VNPASS(vp->v_seqc_users >= 0, vp); 6842 vp->v_seqc_users++; 6843 if (vp->v_seqc_users == 1) 6844 seqc_sleepable_write_begin(&vp->v_seqc); 6845 } 6846 6847 void 6848 vn_seqc_write_begin(struct vnode *vp) 6849 { 6850 6851 VI_LOCK(vp); 6852 vn_seqc_write_begin_locked(vp); 6853 VI_UNLOCK(vp); 6854 } 6855 6856 void 6857 vn_seqc_write_end_locked(struct vnode *vp) 6858 { 6859 6860 ASSERT_VI_LOCKED(vp, __func__); 6861 VNPASS(vp->v_seqc_users > 0, vp); 6862 vp->v_seqc_users--; 6863 if (vp->v_seqc_users == 0) 6864 seqc_sleepable_write_end(&vp->v_seqc); 6865 } 6866 6867 void 6868 vn_seqc_write_end(struct vnode *vp) 6869 { 6870 6871 VI_LOCK(vp); 6872 vn_seqc_write_end_locked(vp); 6873 VI_UNLOCK(vp); 6874 } 6875 6876 /* 6877 * Special case handling for allocating and freeing vnodes. 6878 * 6879 * The counter remains unchanged on free so that a doomed vnode will 6880 * keep testing as in modify as long as it is accessible with SMR. 6881 */ 6882 static void 6883 vn_seqc_init(struct vnode *vp) 6884 { 6885 6886 vp->v_seqc = 0; 6887 vp->v_seqc_users = 0; 6888 } 6889 6890 static void 6891 vn_seqc_write_end_free(struct vnode *vp) 6892 { 6893 6894 VNPASS(seqc_in_modify(vp->v_seqc), vp); 6895 VNPASS(vp->v_seqc_users == 1, vp); 6896 } 6897 6898 void 6899 vn_irflag_set_locked(struct vnode *vp, short toset) 6900 { 6901 short flags; 6902 6903 ASSERT_VI_LOCKED(vp, __func__); 6904 flags = vn_irflag_read(vp); 6905 VNASSERT((flags & toset) == 0, vp, 6906 ("%s: some of the passed flags already set (have %d, passed %d)\n", 6907 __func__, flags, toset)); 6908 atomic_store_short(&vp->v_irflag, flags | toset); 6909 } 6910 6911 void 6912 vn_irflag_set(struct vnode *vp, short toset) 6913 { 6914 6915 VI_LOCK(vp); 6916 vn_irflag_set_locked(vp, toset); 6917 VI_UNLOCK(vp); 6918 } 6919 6920 void 6921 vn_irflag_set_cond_locked(struct vnode *vp, short toset) 6922 { 6923 short flags; 6924 6925 ASSERT_VI_LOCKED(vp, __func__); 6926 flags = vn_irflag_read(vp); 6927 atomic_store_short(&vp->v_irflag, flags | toset); 6928 } 6929 6930 void 6931 vn_irflag_set_cond(struct vnode *vp, short toset) 6932 { 6933 6934 VI_LOCK(vp); 6935 vn_irflag_set_cond_locked(vp, toset); 6936 VI_UNLOCK(vp); 6937 } 6938 6939 void 6940 vn_irflag_unset_locked(struct vnode *vp, short tounset) 6941 { 6942 short flags; 6943 6944 ASSERT_VI_LOCKED(vp, __func__); 6945 flags = vn_irflag_read(vp); 6946 VNASSERT((flags & tounset) == tounset, vp, 6947 ("%s: some of the passed flags not set (have %d, passed %d)\n", 6948 __func__, flags, tounset)); 6949 atomic_store_short(&vp->v_irflag, flags & ~tounset); 6950 } 6951 6952 void 6953 vn_irflag_unset(struct vnode *vp, short tounset) 6954 { 6955 6956 VI_LOCK(vp); 6957 vn_irflag_unset_locked(vp, tounset); 6958 VI_UNLOCK(vp); 6959 } 6960