1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 #include "opt_ddb.h" 45 #include "opt_watchdog.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/asan.h> 50 #include <sys/bio.h> 51 #include <sys/buf.h> 52 #include <sys/capsicum.h> 53 #include <sys/condvar.h> 54 #include <sys/conf.h> 55 #include <sys/counter.h> 56 #include <sys/dirent.h> 57 #include <sys/event.h> 58 #include <sys/eventhandler.h> 59 #include <sys/extattr.h> 60 #include <sys/file.h> 61 #include <sys/fcntl.h> 62 #include <sys/jail.h> 63 #include <sys/kdb.h> 64 #include <sys/kernel.h> 65 #include <sys/kthread.h> 66 #include <sys/ktr.h> 67 #include <sys/limits.h> 68 #include <sys/lockf.h> 69 #include <sys/malloc.h> 70 #include <sys/mount.h> 71 #include <sys/namei.h> 72 #include <sys/pctrie.h> 73 #include <sys/priv.h> 74 #include <sys/reboot.h> 75 #include <sys/refcount.h> 76 #include <sys/rwlock.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smr.h> 80 #include <sys/smp.h> 81 #include <sys/stat.h> 82 #include <sys/sysctl.h> 83 #include <sys/syslog.h> 84 #include <sys/vmmeter.h> 85 #include <sys/vnode.h> 86 #include <sys/watchdog.h> 87 88 #include <machine/stdarg.h> 89 90 #include <security/mac/mac_framework.h> 91 92 #include <vm/vm.h> 93 #include <vm/vm_object.h> 94 #include <vm/vm_extern.h> 95 #include <vm/pmap.h> 96 #include <vm/vm_map.h> 97 #include <vm/vm_page.h> 98 #include <vm/vm_kern.h> 99 #include <vm/uma.h> 100 101 #if defined(DEBUG_VFS_LOCKS) && (!defined(INVARIANTS) || !defined(WITNESS)) 102 #error DEBUG_VFS_LOCKS requires INVARIANTS and WITNESS 103 #endif 104 105 #ifdef DDB 106 #include <ddb/ddb.h> 107 #endif 108 109 static void delmntque(struct vnode *vp); 110 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 111 int slpflag, int slptimeo); 112 static void syncer_shutdown(void *arg, int howto); 113 static int vtryrecycle(struct vnode *vp, bool isvnlru); 114 static void v_init_counters(struct vnode *); 115 static void vn_seqc_init(struct vnode *); 116 static void vn_seqc_write_end_free(struct vnode *vp); 117 static void vgonel(struct vnode *); 118 static bool vhold_recycle_free(struct vnode *); 119 static void vdropl_recycle(struct vnode *vp); 120 static void vdrop_recycle(struct vnode *vp); 121 static void vfs_knllock(void *arg); 122 static void vfs_knlunlock(void *arg); 123 static void vfs_knl_assert_lock(void *arg, int what); 124 static void destroy_vpollinfo(struct vpollinfo *vi); 125 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 126 daddr_t startlbn, daddr_t endlbn); 127 static void vnlru_recalc(void); 128 129 static SYSCTL_NODE(_vfs, OID_AUTO, vnode, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 130 "vnode configuration and statistics"); 131 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, param, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 132 "vnode configuration"); 133 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 134 "vnode statistics"); 135 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, vnlru, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 136 "vnode recycling"); 137 138 /* 139 * Number of vnodes in existence. Increased whenever getnewvnode() 140 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 141 */ 142 static u_long __exclusive_cache_line numvnodes; 143 144 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 145 "Number of vnodes in existence (legacy)"); 146 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, count, CTLFLAG_RD, &numvnodes, 0, 147 "Number of vnodes in existence"); 148 149 static counter_u64_t vnodes_created; 150 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 151 "Number of vnodes created by getnewvnode (legacy)"); 152 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, created, CTLFLAG_RD, &vnodes_created, 153 "Number of vnodes created by getnewvnode"); 154 155 /* 156 * Conversion tables for conversion from vnode types to inode formats 157 * and back. 158 */ 159 __enum_uint8(vtype) iftovt_tab[16] = { 160 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 161 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 162 }; 163 int vttoif_tab[10] = { 164 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 165 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 166 }; 167 168 /* 169 * List of allocates vnodes in the system. 170 */ 171 static TAILQ_HEAD(freelst, vnode) vnode_list; 172 static struct vnode *vnode_list_free_marker; 173 static struct vnode *vnode_list_reclaim_marker; 174 175 /* 176 * "Free" vnode target. Free vnodes are rarely completely free, but are 177 * just ones that are cheap to recycle. Usually they are for files which 178 * have been stat'd but not read; these usually have inode and namecache 179 * data attached to them. This target is the preferred minimum size of a 180 * sub-cache consisting mostly of such files. The system balances the size 181 * of this sub-cache with its complement to try to prevent either from 182 * thrashing while the other is relatively inactive. The targets express 183 * a preference for the best balance. 184 * 185 * "Above" this target there are 2 further targets (watermarks) related 186 * to recyling of free vnodes. In the best-operating case, the cache is 187 * exactly full, the free list has size between vlowat and vhiwat above the 188 * free target, and recycling from it and normal use maintains this state. 189 * Sometimes the free list is below vlowat or even empty, but this state 190 * is even better for immediate use provided the cache is not full. 191 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 192 * ones) to reach one of these states. The watermarks are currently hard- 193 * coded as 4% and 9% of the available space higher. These and the default 194 * of 25% for wantfreevnodes are too large if the memory size is large. 195 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 196 * whenever vnlru_proc() becomes active. 197 */ 198 static long wantfreevnodes; 199 static long __exclusive_cache_line freevnodes; 200 static long freevnodes_old; 201 202 static u_long recycles_count; 203 SYSCTL_ULONG(_vfs, OID_AUTO, recycles, CTLFLAG_RD | CTLFLAG_STATS, &recycles_count, 0, 204 "Number of vnodes recycled to meet vnode cache targets (legacy)"); 205 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, recycles, CTLFLAG_RD | CTLFLAG_STATS, 206 &recycles_count, 0, 207 "Number of vnodes recycled to meet vnode cache targets"); 208 209 static u_long recycles_free_count; 210 SYSCTL_ULONG(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD | CTLFLAG_STATS, 211 &recycles_free_count, 0, 212 "Number of free vnodes recycled to meet vnode cache targets (legacy)"); 213 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, recycles_free, CTLFLAG_RD | CTLFLAG_STATS, 214 &recycles_free_count, 0, 215 "Number of free vnodes recycled to meet vnode cache targets"); 216 217 static counter_u64_t direct_recycles_free_count; 218 SYSCTL_COUNTER_U64(_vfs_vnode_vnlru, OID_AUTO, direct_recycles_free, CTLFLAG_RD, 219 &direct_recycles_free_count, 220 "Number of free vnodes recycled by vn_alloc callers to meet vnode cache targets"); 221 222 static counter_u64_t vnode_skipped_requeues; 223 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, skipped_requeues, CTLFLAG_RD, &vnode_skipped_requeues, 224 "Number of times LRU requeue was skipped due to lock contention"); 225 226 static u_long deferred_inact; 227 SYSCTL_ULONG(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, 228 &deferred_inact, 0, "Number of times inactive processing was deferred"); 229 230 /* To keep more than one thread at a time from running vfs_getnewfsid */ 231 static struct mtx mntid_mtx; 232 233 /* 234 * Lock for any access to the following: 235 * vnode_list 236 * numvnodes 237 * freevnodes 238 */ 239 static struct mtx __exclusive_cache_line vnode_list_mtx; 240 241 /* Publicly exported FS */ 242 struct nfs_public nfs_pub; 243 244 static uma_zone_t buf_trie_zone; 245 static smr_t buf_trie_smr; 246 247 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 248 static uma_zone_t vnode_zone; 249 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll"); 250 251 __read_frequently smr_t vfs_smr; 252 253 /* 254 * The workitem queue. 255 * 256 * It is useful to delay writes of file data and filesystem metadata 257 * for tens of seconds so that quickly created and deleted files need 258 * not waste disk bandwidth being created and removed. To realize this, 259 * we append vnodes to a "workitem" queue. When running with a soft 260 * updates implementation, most pending metadata dependencies should 261 * not wait for more than a few seconds. Thus, mounted on block devices 262 * are delayed only about a half the time that file data is delayed. 263 * Similarly, directory updates are more critical, so are only delayed 264 * about a third the time that file data is delayed. Thus, there are 265 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 266 * one each second (driven off the filesystem syncer process). The 267 * syncer_delayno variable indicates the next queue that is to be processed. 268 * Items that need to be processed soon are placed in this queue: 269 * 270 * syncer_workitem_pending[syncer_delayno] 271 * 272 * A delay of fifteen seconds is done by placing the request fifteen 273 * entries later in the queue: 274 * 275 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 276 * 277 */ 278 static int syncer_delayno; 279 static long syncer_mask; 280 LIST_HEAD(synclist, bufobj); 281 static struct synclist *syncer_workitem_pending; 282 /* 283 * The sync_mtx protects: 284 * bo->bo_synclist 285 * sync_vnode_count 286 * syncer_delayno 287 * syncer_state 288 * syncer_workitem_pending 289 * syncer_worklist_len 290 * rushjob 291 */ 292 static struct mtx sync_mtx; 293 static struct cv sync_wakeup; 294 295 #define SYNCER_MAXDELAY 32 296 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 297 static int syncdelay = 30; /* max time to delay syncing data */ 298 static int filedelay = 30; /* time to delay syncing files */ 299 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 300 "Time to delay syncing files (in seconds)"); 301 static int dirdelay = 29; /* time to delay syncing directories */ 302 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 303 "Time to delay syncing directories (in seconds)"); 304 static int metadelay = 28; /* time to delay syncing metadata */ 305 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 306 "Time to delay syncing metadata (in seconds)"); 307 static int rushjob; /* number of slots to run ASAP */ 308 static int stat_rush_requests; /* number of times I/O speeded up */ 309 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 310 "Number of times I/O speeded up (rush requests)"); 311 312 #define VDBATCH_SIZE 8 313 struct vdbatch { 314 u_int index; 315 struct mtx lock; 316 struct vnode *tab[VDBATCH_SIZE]; 317 }; 318 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 319 320 static void vdbatch_dequeue(struct vnode *vp); 321 322 /* 323 * When shutting down the syncer, run it at four times normal speed. 324 */ 325 #define SYNCER_SHUTDOWN_SPEEDUP 4 326 static int sync_vnode_count; 327 static int syncer_worklist_len; 328 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 329 syncer_state; 330 331 /* Target for maximum number of vnodes. */ 332 u_long desiredvnodes; 333 static u_long gapvnodes; /* gap between wanted and desired */ 334 static u_long vhiwat; /* enough extras after expansion */ 335 static u_long vlowat; /* minimal extras before expansion */ 336 static bool vstir; /* nonzero to stir non-free vnodes */ 337 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 338 339 static u_long vnlru_read_freevnodes(void); 340 341 /* 342 * Note that no attempt is made to sanitize these parameters. 343 */ 344 static int 345 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 346 { 347 u_long val; 348 int error; 349 350 val = desiredvnodes; 351 error = sysctl_handle_long(oidp, &val, 0, req); 352 if (error != 0 || req->newptr == NULL) 353 return (error); 354 355 if (val == desiredvnodes) 356 return (0); 357 mtx_lock(&vnode_list_mtx); 358 desiredvnodes = val; 359 wantfreevnodes = desiredvnodes / 4; 360 vnlru_recalc(); 361 mtx_unlock(&vnode_list_mtx); 362 /* 363 * XXX There is no protection against multiple threads changing 364 * desiredvnodes at the same time. Locking above only helps vnlru and 365 * getnewvnode. 366 */ 367 vfs_hash_changesize(desiredvnodes); 368 cache_changesize(desiredvnodes); 369 return (0); 370 } 371 372 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 373 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 374 "LU", "Target for maximum number of vnodes (legacy)"); 375 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, limit, 376 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 377 "LU", "Target for maximum number of vnodes"); 378 379 static int 380 sysctl_freevnodes(SYSCTL_HANDLER_ARGS) 381 { 382 u_long rfreevnodes; 383 384 rfreevnodes = vnlru_read_freevnodes(); 385 return (sysctl_handle_long(oidp, &rfreevnodes, 0, req)); 386 } 387 388 SYSCTL_PROC(_vfs, OID_AUTO, freevnodes, 389 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes, 390 "LU", "Number of \"free\" vnodes (legacy)"); 391 SYSCTL_PROC(_vfs_vnode_stats, OID_AUTO, free, 392 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes, 393 "LU", "Number of \"free\" vnodes"); 394 395 static int 396 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 397 { 398 u_long val; 399 int error; 400 401 val = wantfreevnodes; 402 error = sysctl_handle_long(oidp, &val, 0, req); 403 if (error != 0 || req->newptr == NULL) 404 return (error); 405 406 if (val == wantfreevnodes) 407 return (0); 408 mtx_lock(&vnode_list_mtx); 409 wantfreevnodes = val; 410 vnlru_recalc(); 411 mtx_unlock(&vnode_list_mtx); 412 return (0); 413 } 414 415 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 416 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 417 "LU", "Target for minimum number of \"free\" vnodes (legacy)"); 418 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, wantfree, 419 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 420 "LU", "Target for minimum number of \"free\" vnodes"); 421 422 static int vnlru_nowhere; 423 SYSCTL_INT(_vfs_vnode_vnlru, OID_AUTO, failed_runs, CTLFLAG_RD | CTLFLAG_STATS, 424 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 425 426 static int 427 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 428 { 429 struct vnode *vp; 430 struct nameidata nd; 431 char *buf; 432 unsigned long ndflags; 433 int error; 434 435 if (req->newptr == NULL) 436 return (EINVAL); 437 if (req->newlen >= PATH_MAX) 438 return (E2BIG); 439 440 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 441 error = SYSCTL_IN(req, buf, req->newlen); 442 if (error != 0) 443 goto out; 444 445 buf[req->newlen] = '\0'; 446 447 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1; 448 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf); 449 if ((error = namei(&nd)) != 0) 450 goto out; 451 vp = nd.ni_vp; 452 453 if (VN_IS_DOOMED(vp)) { 454 /* 455 * This vnode is being recycled. Return != 0 to let the caller 456 * know that the sysctl had no effect. Return EAGAIN because a 457 * subsequent call will likely succeed (since namei will create 458 * a new vnode if necessary) 459 */ 460 error = EAGAIN; 461 goto putvnode; 462 } 463 464 vgone(vp); 465 putvnode: 466 vput(vp); 467 NDFREE_PNBUF(&nd); 468 out: 469 free(buf, M_TEMP); 470 return (error); 471 } 472 473 static int 474 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 475 { 476 struct thread *td = curthread; 477 struct vnode *vp; 478 struct file *fp; 479 int error; 480 int fd; 481 482 if (req->newptr == NULL) 483 return (EBADF); 484 485 error = sysctl_handle_int(oidp, &fd, 0, req); 486 if (error != 0) 487 return (error); 488 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 489 if (error != 0) 490 return (error); 491 vp = fp->f_vnode; 492 493 error = vn_lock(vp, LK_EXCLUSIVE); 494 if (error != 0) 495 goto drop; 496 497 vgone(vp); 498 VOP_UNLOCK(vp); 499 drop: 500 fdrop(fp, td); 501 return (error); 502 } 503 504 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 505 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 506 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 507 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 508 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 509 sysctl_ftry_reclaim_vnode, "I", 510 "Try to reclaim a vnode by its file descriptor"); 511 512 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 513 #define vnsz2log 8 514 #ifndef DEBUG_LOCKS 515 _Static_assert(sizeof(struct vnode) >= 1UL << vnsz2log && 516 sizeof(struct vnode) < 1UL << (vnsz2log + 1), 517 "vnsz2log needs to be updated"); 518 #endif 519 520 /* 521 * Support for the bufobj clean & dirty pctrie. 522 */ 523 static void * 524 buf_trie_alloc(struct pctrie *ptree) 525 { 526 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 527 } 528 529 static void 530 buf_trie_free(struct pctrie *ptree, void *node) 531 { 532 uma_zfree_smr(buf_trie_zone, node); 533 } 534 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 535 buf_trie_smr); 536 537 /* 538 * Initialize the vnode management data structures. 539 * 540 * Reevaluate the following cap on the number of vnodes after the physical 541 * memory size exceeds 512GB. In the limit, as the physical memory size 542 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 543 */ 544 #ifndef MAXVNODES_MAX 545 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 546 #endif 547 548 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 549 550 static struct vnode * 551 vn_alloc_marker(struct mount *mp) 552 { 553 struct vnode *vp; 554 555 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 556 vp->v_type = VMARKER; 557 vp->v_mount = mp; 558 559 return (vp); 560 } 561 562 static void 563 vn_free_marker(struct vnode *vp) 564 { 565 566 MPASS(vp->v_type == VMARKER); 567 free(vp, M_VNODE_MARKER); 568 } 569 570 #ifdef KASAN 571 static int 572 vnode_ctor(void *mem, int size, void *arg __unused, int flags __unused) 573 { 574 kasan_mark(mem, size, roundup2(size, UMA_ALIGN_PTR + 1), 0); 575 return (0); 576 } 577 578 static void 579 vnode_dtor(void *mem, int size, void *arg __unused) 580 { 581 size_t end1, end2, off1, off2; 582 583 _Static_assert(offsetof(struct vnode, v_vnodelist) < 584 offsetof(struct vnode, v_dbatchcpu), 585 "KASAN marks require updating"); 586 587 off1 = offsetof(struct vnode, v_vnodelist); 588 off2 = offsetof(struct vnode, v_dbatchcpu); 589 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist); 590 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu); 591 592 /* 593 * Access to the v_vnodelist and v_dbatchcpu fields are permitted even 594 * after the vnode has been freed. Try to get some KASAN coverage by 595 * marking everything except those two fields as invalid. Because 596 * KASAN's tracking is not byte-granular, any preceding fields sharing 597 * the same 8-byte aligned word must also be marked valid. 598 */ 599 600 /* Handle the area from the start until v_vnodelist... */ 601 off1 = rounddown2(off1, KASAN_SHADOW_SCALE); 602 kasan_mark(mem, off1, off1, KASAN_UMA_FREED); 603 604 /* ... then the area between v_vnodelist and v_dbatchcpu ... */ 605 off1 = roundup2(end1, KASAN_SHADOW_SCALE); 606 off2 = rounddown2(off2, KASAN_SHADOW_SCALE); 607 if (off2 > off1) 608 kasan_mark((void *)((char *)mem + off1), off2 - off1, 609 off2 - off1, KASAN_UMA_FREED); 610 611 /* ... and finally the area from v_dbatchcpu to the end. */ 612 off2 = roundup2(end2, KASAN_SHADOW_SCALE); 613 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2, 614 KASAN_UMA_FREED); 615 } 616 #endif /* KASAN */ 617 618 /* 619 * Initialize a vnode as it first enters the zone. 620 */ 621 static int 622 vnode_init(void *mem, int size, int flags) 623 { 624 struct vnode *vp; 625 626 vp = mem; 627 bzero(vp, size); 628 /* 629 * Setup locks. 630 */ 631 vp->v_vnlock = &vp->v_lock; 632 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 633 /* 634 * By default, don't allow shared locks unless filesystems opt-in. 635 */ 636 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 637 LK_NOSHARE | LK_IS_VNODE); 638 /* 639 * Initialize bufobj. 640 */ 641 bufobj_init(&vp->v_bufobj, vp); 642 /* 643 * Initialize namecache. 644 */ 645 cache_vnode_init(vp); 646 /* 647 * Initialize rangelocks. 648 */ 649 rangelock_init(&vp->v_rl); 650 651 vp->v_dbatchcpu = NOCPU; 652 653 vp->v_state = VSTATE_DEAD; 654 655 /* 656 * Check vhold_recycle_free for an explanation. 657 */ 658 vp->v_holdcnt = VHOLD_NO_SMR; 659 vp->v_type = VNON; 660 mtx_lock(&vnode_list_mtx); 661 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 662 mtx_unlock(&vnode_list_mtx); 663 return (0); 664 } 665 666 /* 667 * Free a vnode when it is cleared from the zone. 668 */ 669 static void 670 vnode_fini(void *mem, int size) 671 { 672 struct vnode *vp; 673 struct bufobj *bo; 674 675 vp = mem; 676 vdbatch_dequeue(vp); 677 mtx_lock(&vnode_list_mtx); 678 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 679 mtx_unlock(&vnode_list_mtx); 680 rangelock_destroy(&vp->v_rl); 681 lockdestroy(vp->v_vnlock); 682 mtx_destroy(&vp->v_interlock); 683 bo = &vp->v_bufobj; 684 rw_destroy(BO_LOCKPTR(bo)); 685 686 kasan_mark(mem, size, size, 0); 687 } 688 689 /* 690 * Provide the size of NFS nclnode and NFS fh for calculation of the 691 * vnode memory consumption. The size is specified directly to 692 * eliminate dependency on NFS-private header. 693 * 694 * Other filesystems may use bigger or smaller (like UFS and ZFS) 695 * private inode data, but the NFS-based estimation is ample enough. 696 * Still, we care about differences in the size between 64- and 32-bit 697 * platforms. 698 * 699 * Namecache structure size is heuristically 700 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 701 */ 702 #ifdef _LP64 703 #define NFS_NCLNODE_SZ (528 + 64) 704 #define NC_SZ 148 705 #else 706 #define NFS_NCLNODE_SZ (360 + 32) 707 #define NC_SZ 92 708 #endif 709 710 static void 711 vntblinit(void *dummy __unused) 712 { 713 struct vdbatch *vd; 714 uma_ctor ctor; 715 uma_dtor dtor; 716 int cpu, physvnodes, virtvnodes; 717 718 /* 719 * Desiredvnodes is a function of the physical memory size and the 720 * kernel's heap size. Generally speaking, it scales with the 721 * physical memory size. The ratio of desiredvnodes to the physical 722 * memory size is 1:16 until desiredvnodes exceeds 98,304. 723 * Thereafter, the 724 * marginal ratio of desiredvnodes to the physical memory size is 725 * 1:64. However, desiredvnodes is limited by the kernel's heap 726 * size. The memory required by desiredvnodes vnodes and vm objects 727 * must not exceed 1/10th of the kernel's heap size. 728 */ 729 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 730 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 731 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 732 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 733 desiredvnodes = min(physvnodes, virtvnodes); 734 if (desiredvnodes > MAXVNODES_MAX) { 735 if (bootverbose) 736 printf("Reducing kern.maxvnodes %lu -> %lu\n", 737 desiredvnodes, MAXVNODES_MAX); 738 desiredvnodes = MAXVNODES_MAX; 739 } 740 wantfreevnodes = desiredvnodes / 4; 741 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 742 TAILQ_INIT(&vnode_list); 743 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 744 /* 745 * The lock is taken to appease WITNESS. 746 */ 747 mtx_lock(&vnode_list_mtx); 748 vnlru_recalc(); 749 mtx_unlock(&vnode_list_mtx); 750 vnode_list_free_marker = vn_alloc_marker(NULL); 751 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 752 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 753 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 754 755 #ifdef KASAN 756 ctor = vnode_ctor; 757 dtor = vnode_dtor; 758 #else 759 ctor = NULL; 760 dtor = NULL; 761 #endif 762 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor, 763 vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_NOKASAN); 764 uma_zone_set_smr(vnode_zone, vfs_smr); 765 766 /* 767 * Preallocate enough nodes to support one-per buf so that 768 * we can not fail an insert. reassignbuf() callers can not 769 * tolerate the insertion failure. 770 */ 771 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 772 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 773 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 774 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 775 uma_prealloc(buf_trie_zone, nbuf); 776 777 vnodes_created = counter_u64_alloc(M_WAITOK); 778 direct_recycles_free_count = counter_u64_alloc(M_WAITOK); 779 vnode_skipped_requeues = counter_u64_alloc(M_WAITOK); 780 781 /* 782 * Initialize the filesystem syncer. 783 */ 784 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 785 &syncer_mask); 786 syncer_maxdelay = syncer_mask + 1; 787 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 788 cv_init(&sync_wakeup, "syncer"); 789 790 CPU_FOREACH(cpu) { 791 vd = DPCPU_ID_PTR((cpu), vd); 792 bzero(vd, sizeof(*vd)); 793 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 794 } 795 } 796 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 797 798 /* 799 * Mark a mount point as busy. Used to synchronize access and to delay 800 * unmounting. Eventually, mountlist_mtx is not released on failure. 801 * 802 * vfs_busy() is a custom lock, it can block the caller. 803 * vfs_busy() only sleeps if the unmount is active on the mount point. 804 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 805 * vnode belonging to mp. 806 * 807 * Lookup uses vfs_busy() to traverse mount points. 808 * root fs var fs 809 * / vnode lock A / vnode lock (/var) D 810 * /var vnode lock B /log vnode lock(/var/log) E 811 * vfs_busy lock C vfs_busy lock F 812 * 813 * Within each file system, the lock order is C->A->B and F->D->E. 814 * 815 * When traversing across mounts, the system follows that lock order: 816 * 817 * C->A->B 818 * | 819 * +->F->D->E 820 * 821 * The lookup() process for namei("/var") illustrates the process: 822 * 1. VOP_LOOKUP() obtains B while A is held 823 * 2. vfs_busy() obtains a shared lock on F while A and B are held 824 * 3. vput() releases lock on B 825 * 4. vput() releases lock on A 826 * 5. VFS_ROOT() obtains lock on D while shared lock on F is held 827 * 6. vfs_unbusy() releases shared lock on F 828 * 7. vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 829 * Attempt to lock A (instead of vp_crossmp) while D is held would 830 * violate the global order, causing deadlocks. 831 * 832 * dounmount() locks B while F is drained. Note that for stacked 833 * filesystems, D and B in the example above may be the same lock, 834 * which introdues potential lock order reversal deadlock between 835 * dounmount() and step 5 above. These filesystems may avoid the LOR 836 * by setting VV_CROSSLOCK on the covered vnode so that lock B will 837 * remain held until after step 5. 838 */ 839 int 840 vfs_busy(struct mount *mp, int flags) 841 { 842 struct mount_pcpu *mpcpu; 843 844 MPASS((flags & ~MBF_MASK) == 0); 845 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 846 847 if (vfs_op_thread_enter(mp, mpcpu)) { 848 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 849 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 850 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 851 vfs_mp_count_add_pcpu(mpcpu, ref, 1); 852 vfs_mp_count_add_pcpu(mpcpu, lockref, 1); 853 vfs_op_thread_exit(mp, mpcpu); 854 if (flags & MBF_MNTLSTLOCK) 855 mtx_unlock(&mountlist_mtx); 856 return (0); 857 } 858 859 MNT_ILOCK(mp); 860 vfs_assert_mount_counters(mp); 861 MNT_REF(mp); 862 /* 863 * If mount point is currently being unmounted, sleep until the 864 * mount point fate is decided. If thread doing the unmounting fails, 865 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 866 * that this mount point has survived the unmount attempt and vfs_busy 867 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 868 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 869 * about to be really destroyed. vfs_busy needs to release its 870 * reference on the mount point in this case and return with ENOENT, 871 * telling the caller the mount it tried to busy is no longer valid. 872 */ 873 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 874 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), 875 ("%s: non-empty upper mount list with pending unmount", 876 __func__)); 877 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 878 MNT_REL(mp); 879 MNT_IUNLOCK(mp); 880 CTR1(KTR_VFS, "%s: failed busying before sleeping", 881 __func__); 882 return (ENOENT); 883 } 884 if (flags & MBF_MNTLSTLOCK) 885 mtx_unlock(&mountlist_mtx); 886 mp->mnt_kern_flag |= MNTK_MWAIT; 887 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 888 if (flags & MBF_MNTLSTLOCK) 889 mtx_lock(&mountlist_mtx); 890 MNT_ILOCK(mp); 891 } 892 if (flags & MBF_MNTLSTLOCK) 893 mtx_unlock(&mountlist_mtx); 894 mp->mnt_lockref++; 895 MNT_IUNLOCK(mp); 896 return (0); 897 } 898 899 /* 900 * Free a busy filesystem. 901 */ 902 void 903 vfs_unbusy(struct mount *mp) 904 { 905 struct mount_pcpu *mpcpu; 906 int c; 907 908 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 909 910 if (vfs_op_thread_enter(mp, mpcpu)) { 911 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 912 vfs_mp_count_sub_pcpu(mpcpu, lockref, 1); 913 vfs_mp_count_sub_pcpu(mpcpu, ref, 1); 914 vfs_op_thread_exit(mp, mpcpu); 915 return; 916 } 917 918 MNT_ILOCK(mp); 919 vfs_assert_mount_counters(mp); 920 MNT_REL(mp); 921 c = --mp->mnt_lockref; 922 if (mp->mnt_vfs_ops == 0) { 923 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 924 MNT_IUNLOCK(mp); 925 return; 926 } 927 if (c < 0) 928 vfs_dump_mount_counters(mp); 929 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 930 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 931 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 932 mp->mnt_kern_flag &= ~MNTK_DRAINING; 933 wakeup(&mp->mnt_lockref); 934 } 935 MNT_IUNLOCK(mp); 936 } 937 938 /* 939 * Lookup a mount point by filesystem identifier. 940 */ 941 struct mount * 942 vfs_getvfs(fsid_t *fsid) 943 { 944 struct mount *mp; 945 946 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 947 mtx_lock(&mountlist_mtx); 948 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 949 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 950 vfs_ref(mp); 951 mtx_unlock(&mountlist_mtx); 952 return (mp); 953 } 954 } 955 mtx_unlock(&mountlist_mtx); 956 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 957 return ((struct mount *) 0); 958 } 959 960 /* 961 * Lookup a mount point by filesystem identifier, busying it before 962 * returning. 963 * 964 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 965 * cache for popular filesystem identifiers. The cache is lockess, using 966 * the fact that struct mount's are never freed. In worst case we may 967 * get pointer to unmounted or even different filesystem, so we have to 968 * check what we got, and go slow way if so. 969 */ 970 struct mount * 971 vfs_busyfs(fsid_t *fsid) 972 { 973 #define FSID_CACHE_SIZE 256 974 typedef struct mount * volatile vmp_t; 975 static vmp_t cache[FSID_CACHE_SIZE]; 976 struct mount *mp; 977 int error; 978 uint32_t hash; 979 980 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 981 hash = fsid->val[0] ^ fsid->val[1]; 982 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 983 mp = cache[hash]; 984 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 985 goto slow; 986 if (vfs_busy(mp, 0) != 0) { 987 cache[hash] = NULL; 988 goto slow; 989 } 990 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 991 return (mp); 992 else 993 vfs_unbusy(mp); 994 995 slow: 996 mtx_lock(&mountlist_mtx); 997 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 998 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 999 error = vfs_busy(mp, MBF_MNTLSTLOCK); 1000 if (error) { 1001 cache[hash] = NULL; 1002 mtx_unlock(&mountlist_mtx); 1003 return (NULL); 1004 } 1005 cache[hash] = mp; 1006 return (mp); 1007 } 1008 } 1009 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 1010 mtx_unlock(&mountlist_mtx); 1011 return ((struct mount *) 0); 1012 } 1013 1014 /* 1015 * Check if a user can access privileged mount options. 1016 */ 1017 int 1018 vfs_suser(struct mount *mp, struct thread *td) 1019 { 1020 int error; 1021 1022 if (jailed(td->td_ucred)) { 1023 /* 1024 * If the jail of the calling thread lacks permission for 1025 * this type of file system, deny immediately. 1026 */ 1027 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 1028 return (EPERM); 1029 1030 /* 1031 * If the file system was mounted outside the jail of the 1032 * calling thread, deny immediately. 1033 */ 1034 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 1035 return (EPERM); 1036 } 1037 1038 /* 1039 * If file system supports delegated administration, we don't check 1040 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 1041 * by the file system itself. 1042 * If this is not the user that did original mount, we check for 1043 * the PRIV_VFS_MOUNT_OWNER privilege. 1044 */ 1045 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 1046 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 1047 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 1048 return (error); 1049 } 1050 return (0); 1051 } 1052 1053 /* 1054 * Get a new unique fsid. Try to make its val[0] unique, since this value 1055 * will be used to create fake device numbers for stat(). Also try (but 1056 * not so hard) make its val[0] unique mod 2^16, since some emulators only 1057 * support 16-bit device numbers. We end up with unique val[0]'s for the 1058 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 1059 * 1060 * Keep in mind that several mounts may be running in parallel. Starting 1061 * the search one past where the previous search terminated is both a 1062 * micro-optimization and a defense against returning the same fsid to 1063 * different mounts. 1064 */ 1065 void 1066 vfs_getnewfsid(struct mount *mp) 1067 { 1068 static uint16_t mntid_base; 1069 struct mount *nmp; 1070 fsid_t tfsid; 1071 int mtype; 1072 1073 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 1074 mtx_lock(&mntid_mtx); 1075 mtype = mp->mnt_vfc->vfc_typenum; 1076 tfsid.val[1] = mtype; 1077 mtype = (mtype & 0xFF) << 24; 1078 for (;;) { 1079 tfsid.val[0] = makedev(255, 1080 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 1081 mntid_base++; 1082 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 1083 break; 1084 vfs_rel(nmp); 1085 } 1086 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 1087 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 1088 mtx_unlock(&mntid_mtx); 1089 } 1090 1091 /* 1092 * Knob to control the precision of file timestamps: 1093 * 1094 * 0 = seconds only; nanoseconds zeroed. 1095 * 1 = seconds and nanoseconds, accurate within 1/HZ. 1096 * 2 = seconds and nanoseconds, truncated to microseconds. 1097 * >=3 = seconds and nanoseconds, maximum precision. 1098 */ 1099 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 1100 1101 static int timestamp_precision = TSP_USEC; 1102 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 1103 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 1104 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 1105 "3+: sec + ns (max. precision))"); 1106 1107 /* 1108 * Get a current timestamp. 1109 */ 1110 void 1111 vfs_timestamp(struct timespec *tsp) 1112 { 1113 struct timeval tv; 1114 1115 switch (timestamp_precision) { 1116 case TSP_SEC: 1117 tsp->tv_sec = time_second; 1118 tsp->tv_nsec = 0; 1119 break; 1120 case TSP_HZ: 1121 getnanotime(tsp); 1122 break; 1123 case TSP_USEC: 1124 microtime(&tv); 1125 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1126 break; 1127 case TSP_NSEC: 1128 default: 1129 nanotime(tsp); 1130 break; 1131 } 1132 } 1133 1134 /* 1135 * Set vnode attributes to VNOVAL 1136 */ 1137 void 1138 vattr_null(struct vattr *vap) 1139 { 1140 1141 vap->va_type = VNON; 1142 vap->va_size = VNOVAL; 1143 vap->va_bytes = VNOVAL; 1144 vap->va_mode = VNOVAL; 1145 vap->va_nlink = VNOVAL; 1146 vap->va_uid = VNOVAL; 1147 vap->va_gid = VNOVAL; 1148 vap->va_fsid = VNOVAL; 1149 vap->va_fileid = VNOVAL; 1150 vap->va_blocksize = VNOVAL; 1151 vap->va_rdev = VNOVAL; 1152 vap->va_atime.tv_sec = VNOVAL; 1153 vap->va_atime.tv_nsec = VNOVAL; 1154 vap->va_mtime.tv_sec = VNOVAL; 1155 vap->va_mtime.tv_nsec = VNOVAL; 1156 vap->va_ctime.tv_sec = VNOVAL; 1157 vap->va_ctime.tv_nsec = VNOVAL; 1158 vap->va_birthtime.tv_sec = VNOVAL; 1159 vap->va_birthtime.tv_nsec = VNOVAL; 1160 vap->va_flags = VNOVAL; 1161 vap->va_gen = VNOVAL; 1162 vap->va_vaflags = 0; 1163 } 1164 1165 /* 1166 * Try to reduce the total number of vnodes. 1167 * 1168 * This routine (and its user) are buggy in at least the following ways: 1169 * - all parameters were picked years ago when RAM sizes were significantly 1170 * smaller 1171 * - it can pick vnodes based on pages used by the vm object, but filesystems 1172 * like ZFS don't use it making the pick broken 1173 * - since ZFS has its own aging policy it gets partially combated by this one 1174 * - a dedicated method should be provided for filesystems to let them decide 1175 * whether the vnode should be recycled 1176 * 1177 * This routine is called when we have too many vnodes. It attempts 1178 * to free <count> vnodes and will potentially free vnodes that still 1179 * have VM backing store (VM backing store is typically the cause 1180 * of a vnode blowout so we want to do this). Therefore, this operation 1181 * is not considered cheap. 1182 * 1183 * A number of conditions may prevent a vnode from being reclaimed. 1184 * the buffer cache may have references on the vnode, a directory 1185 * vnode may still have references due to the namei cache representing 1186 * underlying files, or the vnode may be in active use. It is not 1187 * desirable to reuse such vnodes. These conditions may cause the 1188 * number of vnodes to reach some minimum value regardless of what 1189 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1190 * 1191 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1192 * entries if this argument is strue 1193 * @param trigger Only reclaim vnodes with fewer than this many resident 1194 * pages. 1195 * @param target How many vnodes to reclaim. 1196 * @return The number of vnodes that were reclaimed. 1197 */ 1198 static int 1199 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1200 { 1201 struct vnode *vp, *mvp; 1202 struct mount *mp; 1203 struct vm_object *object; 1204 u_long done; 1205 bool retried; 1206 1207 mtx_assert(&vnode_list_mtx, MA_OWNED); 1208 1209 retried = false; 1210 done = 0; 1211 1212 mvp = vnode_list_reclaim_marker; 1213 restart: 1214 vp = mvp; 1215 while (done < target) { 1216 vp = TAILQ_NEXT(vp, v_vnodelist); 1217 if (__predict_false(vp == NULL)) 1218 break; 1219 1220 if (__predict_false(vp->v_type == VMARKER)) 1221 continue; 1222 1223 /* 1224 * If it's been deconstructed already, it's still 1225 * referenced, or it exceeds the trigger, skip it. 1226 * Also skip free vnodes. We are trying to make space 1227 * for more free vnodes, not reduce their count. 1228 */ 1229 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1230 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1231 goto next_iter; 1232 1233 if (vp->v_type == VBAD || vp->v_type == VNON) 1234 goto next_iter; 1235 1236 object = atomic_load_ptr(&vp->v_object); 1237 if (object == NULL || object->resident_page_count > trigger) { 1238 goto next_iter; 1239 } 1240 1241 /* 1242 * Handle races against vnode allocation. Filesystems lock the 1243 * vnode some time after it gets returned from getnewvnode, 1244 * despite type and hold count being manipulated earlier. 1245 * Resorting to checking v_mount restores guarantees present 1246 * before the global list was reworked to contain all vnodes. 1247 */ 1248 if (!VI_TRYLOCK(vp)) 1249 goto next_iter; 1250 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1251 VI_UNLOCK(vp); 1252 goto next_iter; 1253 } 1254 if (vp->v_mount == NULL) { 1255 VI_UNLOCK(vp); 1256 goto next_iter; 1257 } 1258 vholdl(vp); 1259 VI_UNLOCK(vp); 1260 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1261 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1262 mtx_unlock(&vnode_list_mtx); 1263 1264 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1265 vdrop_recycle(vp); 1266 goto next_iter_unlocked; 1267 } 1268 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1269 vdrop_recycle(vp); 1270 vn_finished_write(mp); 1271 goto next_iter_unlocked; 1272 } 1273 1274 VI_LOCK(vp); 1275 if (vp->v_usecount > 0 || 1276 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1277 (vp->v_object != NULL && vp->v_object->handle == vp && 1278 vp->v_object->resident_page_count > trigger)) { 1279 VOP_UNLOCK(vp); 1280 vdropl_recycle(vp); 1281 vn_finished_write(mp); 1282 goto next_iter_unlocked; 1283 } 1284 recycles_count++; 1285 vgonel(vp); 1286 VOP_UNLOCK(vp); 1287 vdropl_recycle(vp); 1288 vn_finished_write(mp); 1289 done++; 1290 next_iter_unlocked: 1291 maybe_yield(); 1292 mtx_lock(&vnode_list_mtx); 1293 goto restart; 1294 next_iter: 1295 MPASS(vp->v_type != VMARKER); 1296 if (!should_yield()) 1297 continue; 1298 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1299 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1300 mtx_unlock(&vnode_list_mtx); 1301 kern_yield(PRI_USER); 1302 mtx_lock(&vnode_list_mtx); 1303 goto restart; 1304 } 1305 if (done == 0 && !retried) { 1306 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1307 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1308 retried = true; 1309 goto restart; 1310 } 1311 return (done); 1312 } 1313 1314 static int max_free_per_call = 10000; 1315 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_free_per_call, 0, 1316 "limit on vnode free requests per call to the vnlru_free routine (legacy)"); 1317 SYSCTL_INT(_vfs_vnode_vnlru, OID_AUTO, max_free_per_call, CTLFLAG_RW, 1318 &max_free_per_call, 0, 1319 "limit on vnode free requests per call to the vnlru_free routine"); 1320 1321 /* 1322 * Attempt to recycle requested amount of free vnodes. 1323 */ 1324 static int 1325 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp, bool isvnlru) 1326 { 1327 struct vnode *vp; 1328 struct mount *mp; 1329 int ocount; 1330 bool retried; 1331 1332 mtx_assert(&vnode_list_mtx, MA_OWNED); 1333 if (count > max_free_per_call) 1334 count = max_free_per_call; 1335 if (count == 0) { 1336 mtx_unlock(&vnode_list_mtx); 1337 return (0); 1338 } 1339 ocount = count; 1340 retried = false; 1341 vp = mvp; 1342 for (;;) { 1343 vp = TAILQ_NEXT(vp, v_vnodelist); 1344 if (__predict_false(vp == NULL)) { 1345 /* 1346 * The free vnode marker can be past eligible vnodes: 1347 * 1. if vdbatch_process trylock failed 1348 * 2. if vtryrecycle failed 1349 * 1350 * If so, start the scan from scratch. 1351 */ 1352 if (!retried && vnlru_read_freevnodes() > 0) { 1353 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1354 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1355 vp = mvp; 1356 retried = true; 1357 continue; 1358 } 1359 1360 /* 1361 * Give up 1362 */ 1363 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1364 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1365 mtx_unlock(&vnode_list_mtx); 1366 break; 1367 } 1368 if (__predict_false(vp->v_type == VMARKER)) 1369 continue; 1370 if (vp->v_holdcnt > 0) 1371 continue; 1372 /* 1373 * Don't recycle if our vnode is from different type 1374 * of mount point. Note that mp is type-safe, the 1375 * check does not reach unmapped address even if 1376 * vnode is reclaimed. 1377 */ 1378 if (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1379 mp->mnt_op != mnt_op) { 1380 continue; 1381 } 1382 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1383 continue; 1384 } 1385 if (!vhold_recycle_free(vp)) 1386 continue; 1387 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1388 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1389 mtx_unlock(&vnode_list_mtx); 1390 /* 1391 * FIXME: ignores the return value, meaning it may be nothing 1392 * got recycled but it claims otherwise to the caller. 1393 * 1394 * Originally the value started being ignored in 2005 with 1395 * 114a1006a8204aa156e1f9ad6476cdff89cada7f . 1396 * 1397 * Respecting the value can run into significant stalls if most 1398 * vnodes belong to one file system and it has writes 1399 * suspended. In presence of many threads and millions of 1400 * vnodes they keep contending on the vnode_list_mtx lock only 1401 * to find vnodes they can't recycle. 1402 * 1403 * The solution would be to pre-check if the vnode is likely to 1404 * be recycle-able, but it needs to happen with the 1405 * vnode_list_mtx lock held. This runs into a problem where 1406 * VOP_GETWRITEMOUNT (currently needed to find out about if 1407 * writes are frozen) can take locks which LOR against it. 1408 * 1409 * Check nullfs for one example (null_getwritemount). 1410 */ 1411 vtryrecycle(vp, isvnlru); 1412 count--; 1413 if (count == 0) { 1414 break; 1415 } 1416 mtx_lock(&vnode_list_mtx); 1417 vp = mvp; 1418 } 1419 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1420 return (ocount - count); 1421 } 1422 1423 /* 1424 * XXX: returns without vnode_list_mtx locked! 1425 */ 1426 static int 1427 vnlru_free_locked_direct(int count) 1428 { 1429 int ret; 1430 1431 mtx_assert(&vnode_list_mtx, MA_OWNED); 1432 ret = vnlru_free_impl(count, NULL, vnode_list_free_marker, false); 1433 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1434 return (ret); 1435 } 1436 1437 static int 1438 vnlru_free_locked_vnlru(int count) 1439 { 1440 int ret; 1441 1442 mtx_assert(&vnode_list_mtx, MA_OWNED); 1443 ret = vnlru_free_impl(count, NULL, vnode_list_free_marker, true); 1444 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1445 return (ret); 1446 } 1447 1448 static int 1449 vnlru_free_vnlru(int count) 1450 { 1451 1452 mtx_lock(&vnode_list_mtx); 1453 return (vnlru_free_locked_vnlru(count)); 1454 } 1455 1456 void 1457 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp) 1458 { 1459 1460 MPASS(mnt_op != NULL); 1461 MPASS(mvp != NULL); 1462 VNPASS(mvp->v_type == VMARKER, mvp); 1463 mtx_lock(&vnode_list_mtx); 1464 vnlru_free_impl(count, mnt_op, mvp, true); 1465 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1466 } 1467 1468 struct vnode * 1469 vnlru_alloc_marker(void) 1470 { 1471 struct vnode *mvp; 1472 1473 mvp = vn_alloc_marker(NULL); 1474 mtx_lock(&vnode_list_mtx); 1475 TAILQ_INSERT_BEFORE(vnode_list_free_marker, mvp, v_vnodelist); 1476 mtx_unlock(&vnode_list_mtx); 1477 return (mvp); 1478 } 1479 1480 void 1481 vnlru_free_marker(struct vnode *mvp) 1482 { 1483 mtx_lock(&vnode_list_mtx); 1484 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1485 mtx_unlock(&vnode_list_mtx); 1486 vn_free_marker(mvp); 1487 } 1488 1489 static void 1490 vnlru_recalc(void) 1491 { 1492 1493 mtx_assert(&vnode_list_mtx, MA_OWNED); 1494 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1495 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1496 vlowat = vhiwat / 2; 1497 } 1498 1499 /* 1500 * Attempt to recycle vnodes in a context that is always safe to block. 1501 * Calling vlrurecycle() from the bowels of filesystem code has some 1502 * interesting deadlock problems. 1503 */ 1504 static struct proc *vnlruproc; 1505 static int vnlruproc_sig; 1506 static u_long vnlruproc_kicks; 1507 1508 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, kicks, CTLFLAG_RD, &vnlruproc_kicks, 0, 1509 "Number of times vnlru awakened due to vnode shortage"); 1510 1511 #define VNLRU_COUNT_SLOP 100 1512 1513 /* 1514 * The main freevnodes counter is only updated when a counter local to CPU 1515 * diverges from 0 by more than VNLRU_FREEVNODES_SLOP. CPUs are conditionally 1516 * walked to compute a more accurate total. 1517 * 1518 * Note: the actual value at any given moment can still exceed slop, but it 1519 * should not be by significant margin in practice. 1520 */ 1521 #define VNLRU_FREEVNODES_SLOP 126 1522 1523 static void __noinline 1524 vfs_freevnodes_rollup(int8_t *lfreevnodes) 1525 { 1526 1527 atomic_add_long(&freevnodes, *lfreevnodes); 1528 *lfreevnodes = 0; 1529 critical_exit(); 1530 } 1531 1532 static __inline void 1533 vfs_freevnodes_inc(void) 1534 { 1535 int8_t *lfreevnodes; 1536 1537 critical_enter(); 1538 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1539 (*lfreevnodes)++; 1540 if (__predict_false(*lfreevnodes == VNLRU_FREEVNODES_SLOP)) 1541 vfs_freevnodes_rollup(lfreevnodes); 1542 else 1543 critical_exit(); 1544 } 1545 1546 static __inline void 1547 vfs_freevnodes_dec(void) 1548 { 1549 int8_t *lfreevnodes; 1550 1551 critical_enter(); 1552 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1553 (*lfreevnodes)--; 1554 if (__predict_false(*lfreevnodes == -VNLRU_FREEVNODES_SLOP)) 1555 vfs_freevnodes_rollup(lfreevnodes); 1556 else 1557 critical_exit(); 1558 } 1559 1560 static u_long 1561 vnlru_read_freevnodes(void) 1562 { 1563 long slop, rfreevnodes, rfreevnodes_old; 1564 int cpu; 1565 1566 rfreevnodes = atomic_load_long(&freevnodes); 1567 rfreevnodes_old = atomic_load_long(&freevnodes_old); 1568 1569 if (rfreevnodes > rfreevnodes_old) 1570 slop = rfreevnodes - rfreevnodes_old; 1571 else 1572 slop = rfreevnodes_old - rfreevnodes; 1573 if (slop < VNLRU_FREEVNODES_SLOP) 1574 return (rfreevnodes >= 0 ? rfreevnodes : 0); 1575 CPU_FOREACH(cpu) { 1576 rfreevnodes += cpuid_to_pcpu[cpu]->pc_vfs_freevnodes; 1577 } 1578 atomic_store_long(&freevnodes_old, rfreevnodes); 1579 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1580 } 1581 1582 static bool 1583 vnlru_under(u_long rnumvnodes, u_long limit) 1584 { 1585 u_long rfreevnodes, space; 1586 1587 if (__predict_false(rnumvnodes > desiredvnodes)) 1588 return (true); 1589 1590 space = desiredvnodes - rnumvnodes; 1591 if (space < limit) { 1592 rfreevnodes = vnlru_read_freevnodes(); 1593 if (rfreevnodes > wantfreevnodes) 1594 space += rfreevnodes - wantfreevnodes; 1595 } 1596 return (space < limit); 1597 } 1598 1599 static void 1600 vnlru_kick_locked(void) 1601 { 1602 1603 mtx_assert(&vnode_list_mtx, MA_OWNED); 1604 if (vnlruproc_sig == 0) { 1605 vnlruproc_sig = 1; 1606 vnlruproc_kicks++; 1607 wakeup(vnlruproc); 1608 } 1609 } 1610 1611 static void 1612 vnlru_kick_cond(void) 1613 { 1614 1615 if (vnlru_read_freevnodes() > wantfreevnodes) 1616 return; 1617 1618 if (vnlruproc_sig) 1619 return; 1620 mtx_lock(&vnode_list_mtx); 1621 vnlru_kick_locked(); 1622 mtx_unlock(&vnode_list_mtx); 1623 } 1624 1625 static void 1626 vnlru_proc_sleep(void) 1627 { 1628 1629 if (vnlruproc_sig) { 1630 vnlruproc_sig = 0; 1631 wakeup(&vnlruproc_sig); 1632 } 1633 msleep(vnlruproc, &vnode_list_mtx, PVFS|PDROP, "vlruwt", hz); 1634 } 1635 1636 /* 1637 * A lighter version of the machinery below. 1638 * 1639 * Tries to reach goals only by recycling free vnodes and does not invoke 1640 * uma_reclaim(UMA_RECLAIM_DRAIN). 1641 * 1642 * This works around pathological behavior in vnlru in presence of tons of free 1643 * vnodes, but without having to rewrite the machinery at this time. Said 1644 * behavior boils down to continuously trying to reclaim all kinds of vnodes 1645 * (cycling through all levels of "force") when the count is transiently above 1646 * limit. This happens a lot when all vnodes are used up and vn_alloc 1647 * speculatively increments the counter. 1648 * 1649 * Sample testcase: vnode limit 8388608, 20 separate directory trees each with 1650 * 1 million files in total and 20 find(1) processes stating them in parallel 1651 * (one per each tree). 1652 * 1653 * On a kernel with only stock machinery this needs anywhere between 60 and 120 1654 * seconds to execute (time varies *wildly* between runs). With the workaround 1655 * it consistently stays around 20 seconds [it got further down with later 1656 * changes]. 1657 * 1658 * That is to say the entire thing needs a fundamental redesign (most notably 1659 * to accommodate faster recycling), the above only tries to get it ouf the way. 1660 * 1661 * Return values are: 1662 * -1 -- fallback to regular vnlru loop 1663 * 0 -- do nothing, go to sleep 1664 * >0 -- recycle this many vnodes 1665 */ 1666 static long 1667 vnlru_proc_light_pick(void) 1668 { 1669 u_long rnumvnodes, rfreevnodes; 1670 1671 if (vstir || vnlruproc_sig == 1) 1672 return (-1); 1673 1674 rnumvnodes = atomic_load_long(&numvnodes); 1675 rfreevnodes = vnlru_read_freevnodes(); 1676 1677 /* 1678 * vnode limit might have changed and now we may be at a significant 1679 * excess. Bail if we can't sort it out with free vnodes. 1680 * 1681 * Due to atomic updates the count can legitimately go above 1682 * the limit for a short period, don't bother doing anything in 1683 * that case. 1684 */ 1685 if (rnumvnodes > desiredvnodes + VNLRU_COUNT_SLOP + 10) { 1686 if (rnumvnodes - rfreevnodes >= desiredvnodes || 1687 rfreevnodes <= wantfreevnodes) { 1688 return (-1); 1689 } 1690 1691 return (rnumvnodes - desiredvnodes); 1692 } 1693 1694 /* 1695 * Don't try to reach wantfreevnodes target if there are too few vnodes 1696 * to begin with. 1697 */ 1698 if (rnumvnodes < wantfreevnodes) { 1699 return (0); 1700 } 1701 1702 if (rfreevnodes < wantfreevnodes) { 1703 return (-1); 1704 } 1705 1706 return (0); 1707 } 1708 1709 static bool 1710 vnlru_proc_light(void) 1711 { 1712 long freecount; 1713 1714 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1715 1716 freecount = vnlru_proc_light_pick(); 1717 if (freecount == -1) 1718 return (false); 1719 1720 if (freecount != 0) { 1721 vnlru_free_vnlru(freecount); 1722 } 1723 1724 mtx_lock(&vnode_list_mtx); 1725 vnlru_proc_sleep(); 1726 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1727 return (true); 1728 } 1729 1730 static u_long uma_reclaim_calls; 1731 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, uma_reclaim_calls, CTLFLAG_RD | CTLFLAG_STATS, 1732 &uma_reclaim_calls, 0, "Number of calls to uma_reclaim"); 1733 1734 static void 1735 vnlru_proc(void) 1736 { 1737 u_long rnumvnodes, rfreevnodes, target; 1738 unsigned long onumvnodes; 1739 int done, force, trigger, usevnodes; 1740 bool reclaim_nc_src, want_reread; 1741 1742 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1743 SHUTDOWN_PRI_FIRST); 1744 1745 force = 0; 1746 want_reread = false; 1747 for (;;) { 1748 kproc_suspend_check(vnlruproc); 1749 1750 if (force == 0 && vnlru_proc_light()) 1751 continue; 1752 1753 mtx_lock(&vnode_list_mtx); 1754 rnumvnodes = atomic_load_long(&numvnodes); 1755 1756 if (want_reread) { 1757 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1758 want_reread = false; 1759 } 1760 1761 /* 1762 * If numvnodes is too large (due to desiredvnodes being 1763 * adjusted using its sysctl, or emergency growth), first 1764 * try to reduce it by discarding free vnodes. 1765 */ 1766 if (rnumvnodes > desiredvnodes + 10) { 1767 vnlru_free_locked_vnlru(rnumvnodes - desiredvnodes); 1768 mtx_lock(&vnode_list_mtx); 1769 rnumvnodes = atomic_load_long(&numvnodes); 1770 } 1771 /* 1772 * Sleep if the vnode cache is in a good state. This is 1773 * when it is not over-full and has space for about a 4% 1774 * or 9% expansion (by growing its size or inexcessively 1775 * reducing free vnode count). Otherwise, try to reclaim 1776 * space for a 10% expansion. 1777 */ 1778 if (vstir && force == 0) { 1779 force = 1; 1780 vstir = false; 1781 } 1782 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1783 vnlru_proc_sleep(); 1784 continue; 1785 } 1786 rfreevnodes = vnlru_read_freevnodes(); 1787 1788 onumvnodes = rnumvnodes; 1789 /* 1790 * Calculate parameters for recycling. These are the same 1791 * throughout the loop to give some semblance of fairness. 1792 * The trigger point is to avoid recycling vnodes with lots 1793 * of resident pages. We aren't trying to free memory; we 1794 * are trying to recycle or at least free vnodes. 1795 */ 1796 if (rnumvnodes <= desiredvnodes) 1797 usevnodes = rnumvnodes - rfreevnodes; 1798 else 1799 usevnodes = rnumvnodes; 1800 if (usevnodes <= 0) 1801 usevnodes = 1; 1802 /* 1803 * The trigger value is chosen to give a conservatively 1804 * large value to ensure that it alone doesn't prevent 1805 * making progress. The value can easily be so large that 1806 * it is effectively infinite in some congested and 1807 * misconfigured cases, and this is necessary. Normally 1808 * it is about 8 to 100 (pages), which is quite large. 1809 */ 1810 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1811 if (force < 2) 1812 trigger = vsmalltrigger; 1813 reclaim_nc_src = force >= 3; 1814 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1815 target = target / 10 + 1; 1816 done = vlrureclaim(reclaim_nc_src, trigger, target); 1817 mtx_unlock(&vnode_list_mtx); 1818 /* 1819 * Total number of vnodes can transiently go slightly above the 1820 * limit (see vn_alloc_hard), no need to call uma_reclaim if 1821 * this happens. 1822 */ 1823 if (onumvnodes + VNLRU_COUNT_SLOP + 1000 > desiredvnodes && 1824 numvnodes <= desiredvnodes) { 1825 uma_reclaim_calls++; 1826 uma_reclaim(UMA_RECLAIM_DRAIN); 1827 } 1828 if (done == 0) { 1829 if (force == 0 || force == 1) { 1830 force = 2; 1831 continue; 1832 } 1833 if (force == 2) { 1834 force = 3; 1835 continue; 1836 } 1837 want_reread = true; 1838 force = 0; 1839 vnlru_nowhere++; 1840 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1841 } else { 1842 want_reread = true; 1843 kern_yield(PRI_USER); 1844 } 1845 } 1846 } 1847 1848 static struct kproc_desc vnlru_kp = { 1849 "vnlru", 1850 vnlru_proc, 1851 &vnlruproc 1852 }; 1853 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1854 &vnlru_kp); 1855 1856 /* 1857 * Routines having to do with the management of the vnode table. 1858 */ 1859 1860 /* 1861 * Try to recycle a freed vnode. 1862 */ 1863 static int 1864 vtryrecycle(struct vnode *vp, bool isvnlru) 1865 { 1866 struct mount *vnmp; 1867 1868 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1869 VNPASS(vp->v_holdcnt > 0, vp); 1870 /* 1871 * This vnode may found and locked via some other list, if so we 1872 * can't recycle it yet. 1873 */ 1874 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1875 CTR2(KTR_VFS, 1876 "%s: impossible to recycle, vp %p lock is already held", 1877 __func__, vp); 1878 vdrop_recycle(vp); 1879 return (EWOULDBLOCK); 1880 } 1881 /* 1882 * Don't recycle if its filesystem is being suspended. 1883 */ 1884 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1885 VOP_UNLOCK(vp); 1886 CTR2(KTR_VFS, 1887 "%s: impossible to recycle, cannot start the write for %p", 1888 __func__, vp); 1889 vdrop_recycle(vp); 1890 return (EBUSY); 1891 } 1892 /* 1893 * If we got this far, we need to acquire the interlock and see if 1894 * anyone picked up this vnode from another list. If not, we will 1895 * mark it with DOOMED via vgonel() so that anyone who does find it 1896 * will skip over it. 1897 */ 1898 VI_LOCK(vp); 1899 if (vp->v_usecount) { 1900 VOP_UNLOCK(vp); 1901 vdropl_recycle(vp); 1902 vn_finished_write(vnmp); 1903 CTR2(KTR_VFS, 1904 "%s: impossible to recycle, %p is already referenced", 1905 __func__, vp); 1906 return (EBUSY); 1907 } 1908 if (!VN_IS_DOOMED(vp)) { 1909 if (isvnlru) 1910 recycles_free_count++; 1911 else 1912 counter_u64_add(direct_recycles_free_count, 1); 1913 vgonel(vp); 1914 } 1915 VOP_UNLOCK(vp); 1916 vdropl_recycle(vp); 1917 vn_finished_write(vnmp); 1918 return (0); 1919 } 1920 1921 /* 1922 * Allocate a new vnode. 1923 * 1924 * The operation never returns an error. Returning an error was disabled 1925 * in r145385 (dated 2005) with the following comment: 1926 * 1927 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1928 * 1929 * Given the age of this commit (almost 15 years at the time of writing this 1930 * comment) restoring the ability to fail requires a significant audit of 1931 * all codepaths. 1932 * 1933 * The routine can try to free a vnode or stall for up to 1 second waiting for 1934 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1935 */ 1936 static u_long vn_alloc_cyclecount; 1937 static u_long vn_alloc_sleeps; 1938 1939 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, alloc_sleeps, CTLFLAG_RD, &vn_alloc_sleeps, 0, 1940 "Number of times vnode allocation blocked waiting on vnlru"); 1941 1942 static struct vnode * __noinline 1943 vn_alloc_hard(struct mount *mp, u_long rnumvnodes, bool bumped) 1944 { 1945 u_long rfreevnodes; 1946 1947 if (bumped) { 1948 if (rnumvnodes > desiredvnodes + VNLRU_COUNT_SLOP) { 1949 atomic_subtract_long(&numvnodes, 1); 1950 bumped = false; 1951 } 1952 } 1953 1954 mtx_lock(&vnode_list_mtx); 1955 1956 if (vn_alloc_cyclecount != 0) { 1957 rnumvnodes = atomic_load_long(&numvnodes); 1958 if (rnumvnodes + 1 < desiredvnodes) { 1959 vn_alloc_cyclecount = 0; 1960 mtx_unlock(&vnode_list_mtx); 1961 goto alloc; 1962 } 1963 1964 rfreevnodes = vnlru_read_freevnodes(); 1965 if (rfreevnodes < wantfreevnodes) { 1966 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1967 vn_alloc_cyclecount = 0; 1968 vstir = true; 1969 } 1970 } else { 1971 vn_alloc_cyclecount = 0; 1972 } 1973 } 1974 1975 /* 1976 * Grow the vnode cache if it will not be above its target max after 1977 * growing. Otherwise, if there is at least one free vnode, try to 1978 * reclaim 1 item from it before growing the cache (possibly above its 1979 * target max if the reclamation failed or is delayed). 1980 */ 1981 if (vnlru_free_locked_direct(1) > 0) 1982 goto alloc; 1983 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1984 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1985 /* 1986 * Wait for space for a new vnode. 1987 */ 1988 if (bumped) { 1989 atomic_subtract_long(&numvnodes, 1); 1990 bumped = false; 1991 } 1992 mtx_lock(&vnode_list_mtx); 1993 vnlru_kick_locked(); 1994 vn_alloc_sleeps++; 1995 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 1996 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 1997 vnlru_read_freevnodes() > 1) 1998 vnlru_free_locked_direct(1); 1999 else 2000 mtx_unlock(&vnode_list_mtx); 2001 } 2002 alloc: 2003 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 2004 if (!bumped) 2005 atomic_add_long(&numvnodes, 1); 2006 vnlru_kick_cond(); 2007 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 2008 } 2009 2010 static struct vnode * 2011 vn_alloc(struct mount *mp) 2012 { 2013 u_long rnumvnodes; 2014 2015 if (__predict_false(vn_alloc_cyclecount != 0)) 2016 return (vn_alloc_hard(mp, 0, false)); 2017 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 2018 if (__predict_false(vnlru_under(rnumvnodes, vlowat))) { 2019 return (vn_alloc_hard(mp, rnumvnodes, true)); 2020 } 2021 2022 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 2023 } 2024 2025 static void 2026 vn_free(struct vnode *vp) 2027 { 2028 2029 atomic_subtract_long(&numvnodes, 1); 2030 uma_zfree_smr(vnode_zone, vp); 2031 } 2032 2033 /* 2034 * Allocate a new vnode. 2035 */ 2036 int 2037 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 2038 struct vnode **vpp) 2039 { 2040 struct vnode *vp; 2041 struct thread *td; 2042 struct lock_object *lo; 2043 2044 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 2045 2046 KASSERT(vops->registered, 2047 ("%s: not registered vector op %p\n", __func__, vops)); 2048 cache_validate_vop_vector(mp, vops); 2049 2050 td = curthread; 2051 if (td->td_vp_reserved != NULL) { 2052 vp = td->td_vp_reserved; 2053 td->td_vp_reserved = NULL; 2054 } else { 2055 vp = vn_alloc(mp); 2056 } 2057 counter_u64_add(vnodes_created, 1); 2058 2059 vn_set_state(vp, VSTATE_UNINITIALIZED); 2060 2061 /* 2062 * Locks are given the generic name "vnode" when created. 2063 * Follow the historic practice of using the filesystem 2064 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 2065 * 2066 * Locks live in a witness group keyed on their name. Thus, 2067 * when a lock is renamed, it must also move from the witness 2068 * group of its old name to the witness group of its new name. 2069 * 2070 * The change only needs to be made when the vnode moves 2071 * from one filesystem type to another. We ensure that each 2072 * filesystem use a single static name pointer for its tag so 2073 * that we can compare pointers rather than doing a strcmp(). 2074 */ 2075 lo = &vp->v_vnlock->lock_object; 2076 #ifdef WITNESS 2077 if (lo->lo_name != tag) { 2078 #endif 2079 lo->lo_name = tag; 2080 #ifdef WITNESS 2081 WITNESS_DESTROY(lo); 2082 WITNESS_INIT(lo, tag); 2083 } 2084 #endif 2085 /* 2086 * By default, don't allow shared locks unless filesystems opt-in. 2087 */ 2088 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 2089 /* 2090 * Finalize various vnode identity bits. 2091 */ 2092 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 2093 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 2094 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 2095 vp->v_type = VNON; 2096 vp->v_op = vops; 2097 vp->v_irflag = 0; 2098 v_init_counters(vp); 2099 vn_seqc_init(vp); 2100 vp->v_bufobj.bo_ops = &buf_ops_bio; 2101 #ifdef DIAGNOSTIC 2102 if (mp == NULL && vops != &dead_vnodeops) 2103 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 2104 #endif 2105 #ifdef MAC 2106 mac_vnode_init(vp); 2107 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 2108 mac_vnode_associate_singlelabel(mp, vp); 2109 #endif 2110 if (mp != NULL) { 2111 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 2112 } 2113 2114 /* 2115 * For the filesystems which do not use vfs_hash_insert(), 2116 * still initialize v_hash to have vfs_hash_index() useful. 2117 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 2118 * its own hashing. 2119 */ 2120 vp->v_hash = (uintptr_t)vp >> vnsz2log; 2121 2122 *vpp = vp; 2123 return (0); 2124 } 2125 2126 void 2127 getnewvnode_reserve(void) 2128 { 2129 struct thread *td; 2130 2131 td = curthread; 2132 MPASS(td->td_vp_reserved == NULL); 2133 td->td_vp_reserved = vn_alloc(NULL); 2134 } 2135 2136 void 2137 getnewvnode_drop_reserve(void) 2138 { 2139 struct thread *td; 2140 2141 td = curthread; 2142 if (td->td_vp_reserved != NULL) { 2143 vn_free(td->td_vp_reserved); 2144 td->td_vp_reserved = NULL; 2145 } 2146 } 2147 2148 static void __noinline 2149 freevnode(struct vnode *vp) 2150 { 2151 struct bufobj *bo; 2152 2153 /* 2154 * The vnode has been marked for destruction, so free it. 2155 * 2156 * The vnode will be returned to the zone where it will 2157 * normally remain until it is needed for another vnode. We 2158 * need to cleanup (or verify that the cleanup has already 2159 * been done) any residual data left from its current use 2160 * so as not to contaminate the freshly allocated vnode. 2161 */ 2162 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 2163 /* 2164 * Paired with vgone. 2165 */ 2166 vn_seqc_write_end_free(vp); 2167 2168 bo = &vp->v_bufobj; 2169 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 2170 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 2171 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 2172 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 2173 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 2174 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 2175 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 2176 ("clean blk trie not empty")); 2177 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 2178 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 2179 ("dirty blk trie not empty")); 2180 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 2181 ("Dangling rangelock waiters")); 2182 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, 2183 ("Leaked inactivation")); 2184 VI_UNLOCK(vp); 2185 cache_assert_no_entries(vp); 2186 2187 #ifdef MAC 2188 mac_vnode_destroy(vp); 2189 #endif 2190 if (vp->v_pollinfo != NULL) { 2191 /* 2192 * Use LK_NOWAIT to shut up witness about the lock. We may get 2193 * here while having another vnode locked when trying to 2194 * satisfy a lookup and needing to recycle. 2195 */ 2196 VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT); 2197 destroy_vpollinfo(vp->v_pollinfo); 2198 VOP_UNLOCK(vp); 2199 vp->v_pollinfo = NULL; 2200 } 2201 vp->v_mountedhere = NULL; 2202 vp->v_unpcb = NULL; 2203 vp->v_rdev = NULL; 2204 vp->v_fifoinfo = NULL; 2205 vp->v_iflag = 0; 2206 vp->v_vflag = 0; 2207 bo->bo_flag = 0; 2208 vn_free(vp); 2209 } 2210 2211 /* 2212 * Delete from old mount point vnode list, if on one. 2213 */ 2214 static void 2215 delmntque(struct vnode *vp) 2216 { 2217 struct mount *mp; 2218 2219 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 2220 2221 mp = vp->v_mount; 2222 MNT_ILOCK(mp); 2223 VI_LOCK(vp); 2224 vp->v_mount = NULL; 2225 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 2226 ("bad mount point vnode list size")); 2227 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2228 mp->mnt_nvnodelistsize--; 2229 MNT_REL(mp); 2230 MNT_IUNLOCK(mp); 2231 /* 2232 * The caller expects the interlock to be still held. 2233 */ 2234 ASSERT_VI_LOCKED(vp, __func__); 2235 } 2236 2237 static int 2238 insmntque1_int(struct vnode *vp, struct mount *mp, bool dtr) 2239 { 2240 2241 KASSERT(vp->v_mount == NULL, 2242 ("insmntque: vnode already on per mount vnode list")); 2243 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 2244 if ((mp->mnt_kern_flag & MNTK_UNLOCKED_INSMNTQUE) == 0) { 2245 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 2246 } else { 2247 KASSERT(!dtr, 2248 ("%s: can't have MNTK_UNLOCKED_INSMNTQUE and cleanup", 2249 __func__)); 2250 } 2251 2252 /* 2253 * We acquire the vnode interlock early to ensure that the 2254 * vnode cannot be recycled by another process releasing a 2255 * holdcnt on it before we get it on both the vnode list 2256 * and the active vnode list. The mount mutex protects only 2257 * manipulation of the vnode list and the vnode freelist 2258 * mutex protects only manipulation of the active vnode list. 2259 * Hence the need to hold the vnode interlock throughout. 2260 */ 2261 MNT_ILOCK(mp); 2262 VI_LOCK(vp); 2263 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 2264 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 2265 mp->mnt_nvnodelistsize == 0)) && 2266 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 2267 VI_UNLOCK(vp); 2268 MNT_IUNLOCK(mp); 2269 if (dtr) { 2270 vp->v_data = NULL; 2271 vp->v_op = &dead_vnodeops; 2272 vgone(vp); 2273 vput(vp); 2274 } 2275 return (EBUSY); 2276 } 2277 vp->v_mount = mp; 2278 MNT_REF(mp); 2279 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2280 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 2281 ("neg mount point vnode list size")); 2282 mp->mnt_nvnodelistsize++; 2283 VI_UNLOCK(vp); 2284 MNT_IUNLOCK(mp); 2285 return (0); 2286 } 2287 2288 /* 2289 * Insert into list of vnodes for the new mount point, if available. 2290 * insmntque() reclaims the vnode on insertion failure, insmntque1() 2291 * leaves handling of the vnode to the caller. 2292 */ 2293 int 2294 insmntque(struct vnode *vp, struct mount *mp) 2295 { 2296 return (insmntque1_int(vp, mp, true)); 2297 } 2298 2299 int 2300 insmntque1(struct vnode *vp, struct mount *mp) 2301 { 2302 return (insmntque1_int(vp, mp, false)); 2303 } 2304 2305 /* 2306 * Flush out and invalidate all buffers associated with a bufobj 2307 * Called with the underlying object locked. 2308 */ 2309 int 2310 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 2311 { 2312 int error; 2313 2314 BO_LOCK(bo); 2315 if (flags & V_SAVE) { 2316 error = bufobj_wwait(bo, slpflag, slptimeo); 2317 if (error) { 2318 BO_UNLOCK(bo); 2319 return (error); 2320 } 2321 if (bo->bo_dirty.bv_cnt > 0) { 2322 BO_UNLOCK(bo); 2323 do { 2324 error = BO_SYNC(bo, MNT_WAIT); 2325 } while (error == ERELOOKUP); 2326 if (error != 0) 2327 return (error); 2328 BO_LOCK(bo); 2329 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 2330 BO_UNLOCK(bo); 2331 return (EBUSY); 2332 } 2333 } 2334 } 2335 /* 2336 * If you alter this loop please notice that interlock is dropped and 2337 * reacquired in flushbuflist. Special care is needed to ensure that 2338 * no race conditions occur from this. 2339 */ 2340 do { 2341 error = flushbuflist(&bo->bo_clean, 2342 flags, bo, slpflag, slptimeo); 2343 if (error == 0 && !(flags & V_CLEANONLY)) 2344 error = flushbuflist(&bo->bo_dirty, 2345 flags, bo, slpflag, slptimeo); 2346 if (error != 0 && error != EAGAIN) { 2347 BO_UNLOCK(bo); 2348 return (error); 2349 } 2350 } while (error != 0); 2351 2352 /* 2353 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 2354 * have write I/O in-progress but if there is a VM object then the 2355 * VM object can also have read-I/O in-progress. 2356 */ 2357 do { 2358 bufobj_wwait(bo, 0, 0); 2359 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 2360 BO_UNLOCK(bo); 2361 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 2362 BO_LOCK(bo); 2363 } 2364 } while (bo->bo_numoutput > 0); 2365 BO_UNLOCK(bo); 2366 2367 /* 2368 * Destroy the copy in the VM cache, too. 2369 */ 2370 if (bo->bo_object != NULL && 2371 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 2372 VM_OBJECT_WLOCK(bo->bo_object); 2373 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 2374 OBJPR_CLEANONLY : 0); 2375 VM_OBJECT_WUNLOCK(bo->bo_object); 2376 } 2377 2378 #ifdef INVARIANTS 2379 BO_LOCK(bo); 2380 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 2381 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 2382 bo->bo_clean.bv_cnt > 0)) 2383 panic("vinvalbuf: flush failed"); 2384 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 2385 bo->bo_dirty.bv_cnt > 0) 2386 panic("vinvalbuf: flush dirty failed"); 2387 BO_UNLOCK(bo); 2388 #endif 2389 return (0); 2390 } 2391 2392 /* 2393 * Flush out and invalidate all buffers associated with a vnode. 2394 * Called with the underlying object locked. 2395 */ 2396 int 2397 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 2398 { 2399 2400 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2401 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 2402 if (vp->v_object != NULL && vp->v_object->handle != vp) 2403 return (0); 2404 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 2405 } 2406 2407 /* 2408 * Flush out buffers on the specified list. 2409 * 2410 */ 2411 static int 2412 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2413 int slptimeo) 2414 { 2415 struct buf *bp, *nbp; 2416 int retval, error; 2417 daddr_t lblkno; 2418 b_xflags_t xflags; 2419 2420 ASSERT_BO_WLOCKED(bo); 2421 2422 retval = 0; 2423 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2424 /* 2425 * If we are flushing both V_NORMAL and V_ALT buffers then 2426 * do not skip any buffers. If we are flushing only V_NORMAL 2427 * buffers then skip buffers marked as BX_ALTDATA. If we are 2428 * flushing only V_ALT buffers then skip buffers not marked 2429 * as BX_ALTDATA. 2430 */ 2431 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2432 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2433 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2434 continue; 2435 } 2436 if (nbp != NULL) { 2437 lblkno = nbp->b_lblkno; 2438 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2439 } 2440 retval = EAGAIN; 2441 error = BUF_TIMELOCK(bp, 2442 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2443 "flushbuf", slpflag, slptimeo); 2444 if (error) { 2445 BO_LOCK(bo); 2446 return (error != ENOLCK ? error : EAGAIN); 2447 } 2448 KASSERT(bp->b_bufobj == bo, 2449 ("bp %p wrong b_bufobj %p should be %p", 2450 bp, bp->b_bufobj, bo)); 2451 /* 2452 * XXX Since there are no node locks for NFS, I 2453 * believe there is a slight chance that a delayed 2454 * write will occur while sleeping just above, so 2455 * check for it. 2456 */ 2457 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2458 (flags & V_SAVE)) { 2459 bremfree(bp); 2460 bp->b_flags |= B_ASYNC; 2461 bwrite(bp); 2462 BO_LOCK(bo); 2463 return (EAGAIN); /* XXX: why not loop ? */ 2464 } 2465 bremfree(bp); 2466 bp->b_flags |= (B_INVAL | B_RELBUF); 2467 bp->b_flags &= ~B_ASYNC; 2468 brelse(bp); 2469 BO_LOCK(bo); 2470 if (nbp == NULL) 2471 break; 2472 nbp = gbincore(bo, lblkno); 2473 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2474 != xflags) 2475 break; /* nbp invalid */ 2476 } 2477 return (retval); 2478 } 2479 2480 int 2481 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2482 { 2483 struct buf *bp; 2484 int error; 2485 daddr_t lblkno; 2486 2487 ASSERT_BO_LOCKED(bo); 2488 2489 for (lblkno = startn;;) { 2490 again: 2491 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2492 if (bp == NULL || bp->b_lblkno >= endn || 2493 bp->b_lblkno < startn) 2494 break; 2495 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2496 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2497 if (error != 0) { 2498 BO_RLOCK(bo); 2499 if (error == ENOLCK) 2500 goto again; 2501 return (error); 2502 } 2503 KASSERT(bp->b_bufobj == bo, 2504 ("bp %p wrong b_bufobj %p should be %p", 2505 bp, bp->b_bufobj, bo)); 2506 lblkno = bp->b_lblkno + 1; 2507 if ((bp->b_flags & B_MANAGED) == 0) 2508 bremfree(bp); 2509 bp->b_flags |= B_RELBUF; 2510 /* 2511 * In the VMIO case, use the B_NOREUSE flag to hint that the 2512 * pages backing each buffer in the range are unlikely to be 2513 * reused. Dirty buffers will have the hint applied once 2514 * they've been written. 2515 */ 2516 if ((bp->b_flags & B_VMIO) != 0) 2517 bp->b_flags |= B_NOREUSE; 2518 brelse(bp); 2519 BO_RLOCK(bo); 2520 } 2521 return (0); 2522 } 2523 2524 /* 2525 * Truncate a file's buffer and pages to a specified length. This 2526 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2527 * sync activity. 2528 */ 2529 int 2530 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2531 { 2532 struct buf *bp, *nbp; 2533 struct bufobj *bo; 2534 daddr_t startlbn; 2535 2536 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2537 vp, blksize, (uintmax_t)length); 2538 2539 /* 2540 * Round up to the *next* lbn. 2541 */ 2542 startlbn = howmany(length, blksize); 2543 2544 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2545 2546 bo = &vp->v_bufobj; 2547 restart_unlocked: 2548 BO_LOCK(bo); 2549 2550 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2551 ; 2552 2553 if (length > 0) { 2554 restartsync: 2555 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2556 if (bp->b_lblkno > 0) 2557 continue; 2558 /* 2559 * Since we hold the vnode lock this should only 2560 * fail if we're racing with the buf daemon. 2561 */ 2562 if (BUF_LOCK(bp, 2563 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2564 BO_LOCKPTR(bo)) == ENOLCK) 2565 goto restart_unlocked; 2566 2567 VNASSERT((bp->b_flags & B_DELWRI), vp, 2568 ("buf(%p) on dirty queue without DELWRI", bp)); 2569 2570 bremfree(bp); 2571 bawrite(bp); 2572 BO_LOCK(bo); 2573 goto restartsync; 2574 } 2575 } 2576 2577 bufobj_wwait(bo, 0, 0); 2578 BO_UNLOCK(bo); 2579 vnode_pager_setsize(vp, length); 2580 2581 return (0); 2582 } 2583 2584 /* 2585 * Invalidate the cached pages of a file's buffer within the range of block 2586 * numbers [startlbn, endlbn). 2587 */ 2588 void 2589 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2590 int blksize) 2591 { 2592 struct bufobj *bo; 2593 off_t start, end; 2594 2595 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2596 2597 start = blksize * startlbn; 2598 end = blksize * endlbn; 2599 2600 bo = &vp->v_bufobj; 2601 BO_LOCK(bo); 2602 MPASS(blksize == bo->bo_bsize); 2603 2604 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2605 ; 2606 2607 BO_UNLOCK(bo); 2608 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2609 } 2610 2611 static int 2612 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2613 daddr_t startlbn, daddr_t endlbn) 2614 { 2615 struct buf *bp, *nbp; 2616 bool anyfreed; 2617 2618 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2619 ASSERT_BO_LOCKED(bo); 2620 2621 do { 2622 anyfreed = false; 2623 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2624 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2625 continue; 2626 if (BUF_LOCK(bp, 2627 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2628 BO_LOCKPTR(bo)) == ENOLCK) { 2629 BO_LOCK(bo); 2630 return (EAGAIN); 2631 } 2632 2633 bremfree(bp); 2634 bp->b_flags |= B_INVAL | B_RELBUF; 2635 bp->b_flags &= ~B_ASYNC; 2636 brelse(bp); 2637 anyfreed = true; 2638 2639 BO_LOCK(bo); 2640 if (nbp != NULL && 2641 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2642 nbp->b_vp != vp || 2643 (nbp->b_flags & B_DELWRI) != 0)) 2644 return (EAGAIN); 2645 } 2646 2647 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2648 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2649 continue; 2650 if (BUF_LOCK(bp, 2651 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2652 BO_LOCKPTR(bo)) == ENOLCK) { 2653 BO_LOCK(bo); 2654 return (EAGAIN); 2655 } 2656 bremfree(bp); 2657 bp->b_flags |= B_INVAL | B_RELBUF; 2658 bp->b_flags &= ~B_ASYNC; 2659 brelse(bp); 2660 anyfreed = true; 2661 2662 BO_LOCK(bo); 2663 if (nbp != NULL && 2664 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2665 (nbp->b_vp != vp) || 2666 (nbp->b_flags & B_DELWRI) == 0)) 2667 return (EAGAIN); 2668 } 2669 } while (anyfreed); 2670 return (0); 2671 } 2672 2673 static void 2674 buf_vlist_remove(struct buf *bp) 2675 { 2676 struct bufv *bv; 2677 b_xflags_t flags; 2678 2679 flags = bp->b_xflags; 2680 2681 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2682 ASSERT_BO_WLOCKED(bp->b_bufobj); 2683 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 && 2684 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN), 2685 ("%s: buffer %p has invalid queue state", __func__, bp)); 2686 2687 if ((flags & BX_VNDIRTY) != 0) 2688 bv = &bp->b_bufobj->bo_dirty; 2689 else 2690 bv = &bp->b_bufobj->bo_clean; 2691 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2692 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2693 bv->bv_cnt--; 2694 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2695 } 2696 2697 /* 2698 * Add the buffer to the sorted clean or dirty block list. 2699 * 2700 * NOTE: xflags is passed as a constant, optimizing this inline function! 2701 */ 2702 static void 2703 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2704 { 2705 struct bufv *bv; 2706 struct buf *n; 2707 int error; 2708 2709 ASSERT_BO_WLOCKED(bo); 2710 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2711 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2712 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2713 ("dead bo %p", bo)); 2714 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2715 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2716 bp->b_xflags |= xflags; 2717 if (xflags & BX_VNDIRTY) 2718 bv = &bo->bo_dirty; 2719 else 2720 bv = &bo->bo_clean; 2721 2722 /* 2723 * Keep the list ordered. Optimize empty list insertion. Assume 2724 * we tend to grow at the tail so lookup_le should usually be cheaper 2725 * than _ge. 2726 */ 2727 if (bv->bv_cnt == 0 || 2728 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2729 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2730 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2731 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2732 else 2733 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2734 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2735 if (error) 2736 panic("buf_vlist_add: Preallocated nodes insufficient."); 2737 bv->bv_cnt++; 2738 } 2739 2740 /* 2741 * Look up a buffer using the buffer tries. 2742 */ 2743 struct buf * 2744 gbincore(struct bufobj *bo, daddr_t lblkno) 2745 { 2746 struct buf *bp; 2747 2748 ASSERT_BO_LOCKED(bo); 2749 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2750 if (bp != NULL) 2751 return (bp); 2752 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2753 } 2754 2755 /* 2756 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2757 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2758 * stability of the result. Like other lockless lookups, the found buf may 2759 * already be invalid by the time this function returns. 2760 */ 2761 struct buf * 2762 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2763 { 2764 struct buf *bp; 2765 2766 ASSERT_BO_UNLOCKED(bo); 2767 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2768 if (bp != NULL) 2769 return (bp); 2770 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2771 } 2772 2773 /* 2774 * Associate a buffer with a vnode. 2775 */ 2776 void 2777 bgetvp(struct vnode *vp, struct buf *bp) 2778 { 2779 struct bufobj *bo; 2780 2781 bo = &vp->v_bufobj; 2782 ASSERT_BO_WLOCKED(bo); 2783 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2784 2785 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2786 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2787 ("bgetvp: bp already attached! %p", bp)); 2788 2789 vhold(vp); 2790 bp->b_vp = vp; 2791 bp->b_bufobj = bo; 2792 /* 2793 * Insert onto list for new vnode. 2794 */ 2795 buf_vlist_add(bp, bo, BX_VNCLEAN); 2796 } 2797 2798 /* 2799 * Disassociate a buffer from a vnode. 2800 */ 2801 void 2802 brelvp(struct buf *bp) 2803 { 2804 struct bufobj *bo; 2805 struct vnode *vp; 2806 2807 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2808 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2809 2810 /* 2811 * Delete from old vnode list, if on one. 2812 */ 2813 vp = bp->b_vp; /* XXX */ 2814 bo = bp->b_bufobj; 2815 BO_LOCK(bo); 2816 buf_vlist_remove(bp); 2817 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2818 bo->bo_flag &= ~BO_ONWORKLST; 2819 mtx_lock(&sync_mtx); 2820 LIST_REMOVE(bo, bo_synclist); 2821 syncer_worklist_len--; 2822 mtx_unlock(&sync_mtx); 2823 } 2824 bp->b_vp = NULL; 2825 bp->b_bufobj = NULL; 2826 BO_UNLOCK(bo); 2827 vdrop(vp); 2828 } 2829 2830 /* 2831 * Add an item to the syncer work queue. 2832 */ 2833 static void 2834 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2835 { 2836 int slot; 2837 2838 ASSERT_BO_WLOCKED(bo); 2839 2840 mtx_lock(&sync_mtx); 2841 if (bo->bo_flag & BO_ONWORKLST) 2842 LIST_REMOVE(bo, bo_synclist); 2843 else { 2844 bo->bo_flag |= BO_ONWORKLST; 2845 syncer_worklist_len++; 2846 } 2847 2848 if (delay > syncer_maxdelay - 2) 2849 delay = syncer_maxdelay - 2; 2850 slot = (syncer_delayno + delay) & syncer_mask; 2851 2852 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2853 mtx_unlock(&sync_mtx); 2854 } 2855 2856 static int 2857 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2858 { 2859 int error, len; 2860 2861 mtx_lock(&sync_mtx); 2862 len = syncer_worklist_len - sync_vnode_count; 2863 mtx_unlock(&sync_mtx); 2864 error = SYSCTL_OUT(req, &len, sizeof(len)); 2865 return (error); 2866 } 2867 2868 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2869 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2870 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2871 2872 static struct proc *updateproc; 2873 static void sched_sync(void); 2874 static struct kproc_desc up_kp = { 2875 "syncer", 2876 sched_sync, 2877 &updateproc 2878 }; 2879 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2880 2881 static int 2882 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2883 { 2884 struct vnode *vp; 2885 struct mount *mp; 2886 2887 *bo = LIST_FIRST(slp); 2888 if (*bo == NULL) 2889 return (0); 2890 vp = bo2vnode(*bo); 2891 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2892 return (1); 2893 /* 2894 * We use vhold in case the vnode does not 2895 * successfully sync. vhold prevents the vnode from 2896 * going away when we unlock the sync_mtx so that 2897 * we can acquire the vnode interlock. 2898 */ 2899 vholdl(vp); 2900 mtx_unlock(&sync_mtx); 2901 VI_UNLOCK(vp); 2902 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2903 vdrop(vp); 2904 mtx_lock(&sync_mtx); 2905 return (*bo == LIST_FIRST(slp)); 2906 } 2907 MPASSERT(mp == NULL || (curthread->td_pflags & TDP_IGNSUSP) != 0 || 2908 (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0, mp, 2909 ("suspended mp syncing vp %p", vp)); 2910 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2911 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2912 VOP_UNLOCK(vp); 2913 vn_finished_write(mp); 2914 BO_LOCK(*bo); 2915 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2916 /* 2917 * Put us back on the worklist. The worklist 2918 * routine will remove us from our current 2919 * position and then add us back in at a later 2920 * position. 2921 */ 2922 vn_syncer_add_to_worklist(*bo, syncdelay); 2923 } 2924 BO_UNLOCK(*bo); 2925 vdrop(vp); 2926 mtx_lock(&sync_mtx); 2927 return (0); 2928 } 2929 2930 static int first_printf = 1; 2931 2932 /* 2933 * System filesystem synchronizer daemon. 2934 */ 2935 static void 2936 sched_sync(void) 2937 { 2938 struct synclist *next, *slp; 2939 struct bufobj *bo; 2940 long starttime; 2941 struct thread *td = curthread; 2942 int last_work_seen; 2943 int net_worklist_len; 2944 int syncer_final_iter; 2945 int error; 2946 2947 last_work_seen = 0; 2948 syncer_final_iter = 0; 2949 syncer_state = SYNCER_RUNNING; 2950 starttime = time_uptime; 2951 td->td_pflags |= TDP_NORUNNINGBUF; 2952 2953 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2954 SHUTDOWN_PRI_LAST); 2955 2956 mtx_lock(&sync_mtx); 2957 for (;;) { 2958 if (syncer_state == SYNCER_FINAL_DELAY && 2959 syncer_final_iter == 0) { 2960 mtx_unlock(&sync_mtx); 2961 kproc_suspend_check(td->td_proc); 2962 mtx_lock(&sync_mtx); 2963 } 2964 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2965 if (syncer_state != SYNCER_RUNNING && 2966 starttime != time_uptime) { 2967 if (first_printf) { 2968 printf("\nSyncing disks, vnodes remaining... "); 2969 first_printf = 0; 2970 } 2971 printf("%d ", net_worklist_len); 2972 } 2973 starttime = time_uptime; 2974 2975 /* 2976 * Push files whose dirty time has expired. Be careful 2977 * of interrupt race on slp queue. 2978 * 2979 * Skip over empty worklist slots when shutting down. 2980 */ 2981 do { 2982 slp = &syncer_workitem_pending[syncer_delayno]; 2983 syncer_delayno += 1; 2984 if (syncer_delayno == syncer_maxdelay) 2985 syncer_delayno = 0; 2986 next = &syncer_workitem_pending[syncer_delayno]; 2987 /* 2988 * If the worklist has wrapped since the 2989 * it was emptied of all but syncer vnodes, 2990 * switch to the FINAL_DELAY state and run 2991 * for one more second. 2992 */ 2993 if (syncer_state == SYNCER_SHUTTING_DOWN && 2994 net_worklist_len == 0 && 2995 last_work_seen == syncer_delayno) { 2996 syncer_state = SYNCER_FINAL_DELAY; 2997 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2998 } 2999 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 3000 syncer_worklist_len > 0); 3001 3002 /* 3003 * Keep track of the last time there was anything 3004 * on the worklist other than syncer vnodes. 3005 * Return to the SHUTTING_DOWN state if any 3006 * new work appears. 3007 */ 3008 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 3009 last_work_seen = syncer_delayno; 3010 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 3011 syncer_state = SYNCER_SHUTTING_DOWN; 3012 while (!LIST_EMPTY(slp)) { 3013 error = sync_vnode(slp, &bo, td); 3014 if (error == 1) { 3015 LIST_REMOVE(bo, bo_synclist); 3016 LIST_INSERT_HEAD(next, bo, bo_synclist); 3017 continue; 3018 } 3019 3020 if (first_printf == 0) { 3021 /* 3022 * Drop the sync mutex, because some watchdog 3023 * drivers need to sleep while patting 3024 */ 3025 mtx_unlock(&sync_mtx); 3026 wdog_kern_pat(WD_LASTVAL); 3027 mtx_lock(&sync_mtx); 3028 } 3029 } 3030 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 3031 syncer_final_iter--; 3032 /* 3033 * The variable rushjob allows the kernel to speed up the 3034 * processing of the filesystem syncer process. A rushjob 3035 * value of N tells the filesystem syncer to process the next 3036 * N seconds worth of work on its queue ASAP. Currently rushjob 3037 * is used by the soft update code to speed up the filesystem 3038 * syncer process when the incore state is getting so far 3039 * ahead of the disk that the kernel memory pool is being 3040 * threatened with exhaustion. 3041 */ 3042 if (rushjob > 0) { 3043 rushjob -= 1; 3044 continue; 3045 } 3046 /* 3047 * Just sleep for a short period of time between 3048 * iterations when shutting down to allow some I/O 3049 * to happen. 3050 * 3051 * If it has taken us less than a second to process the 3052 * current work, then wait. Otherwise start right over 3053 * again. We can still lose time if any single round 3054 * takes more than two seconds, but it does not really 3055 * matter as we are just trying to generally pace the 3056 * filesystem activity. 3057 */ 3058 if (syncer_state != SYNCER_RUNNING || 3059 time_uptime == starttime) { 3060 thread_lock(td); 3061 sched_prio(td, PPAUSE); 3062 thread_unlock(td); 3063 } 3064 if (syncer_state != SYNCER_RUNNING) 3065 cv_timedwait(&sync_wakeup, &sync_mtx, 3066 hz / SYNCER_SHUTDOWN_SPEEDUP); 3067 else if (time_uptime == starttime) 3068 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 3069 } 3070 } 3071 3072 /* 3073 * Request the syncer daemon to speed up its work. 3074 * We never push it to speed up more than half of its 3075 * normal turn time, otherwise it could take over the cpu. 3076 */ 3077 int 3078 speedup_syncer(void) 3079 { 3080 int ret = 0; 3081 3082 mtx_lock(&sync_mtx); 3083 if (rushjob < syncdelay / 2) { 3084 rushjob += 1; 3085 stat_rush_requests += 1; 3086 ret = 1; 3087 } 3088 mtx_unlock(&sync_mtx); 3089 cv_broadcast(&sync_wakeup); 3090 return (ret); 3091 } 3092 3093 /* 3094 * Tell the syncer to speed up its work and run though its work 3095 * list several times, then tell it to shut down. 3096 */ 3097 static void 3098 syncer_shutdown(void *arg, int howto) 3099 { 3100 3101 if (howto & RB_NOSYNC) 3102 return; 3103 mtx_lock(&sync_mtx); 3104 syncer_state = SYNCER_SHUTTING_DOWN; 3105 rushjob = 0; 3106 mtx_unlock(&sync_mtx); 3107 cv_broadcast(&sync_wakeup); 3108 kproc_shutdown(arg, howto); 3109 } 3110 3111 void 3112 syncer_suspend(void) 3113 { 3114 3115 syncer_shutdown(updateproc, 0); 3116 } 3117 3118 void 3119 syncer_resume(void) 3120 { 3121 3122 mtx_lock(&sync_mtx); 3123 first_printf = 1; 3124 syncer_state = SYNCER_RUNNING; 3125 mtx_unlock(&sync_mtx); 3126 cv_broadcast(&sync_wakeup); 3127 kproc_resume(updateproc); 3128 } 3129 3130 /* 3131 * Move the buffer between the clean and dirty lists of its vnode. 3132 */ 3133 void 3134 reassignbuf(struct buf *bp) 3135 { 3136 struct vnode *vp; 3137 struct bufobj *bo; 3138 int delay; 3139 #ifdef INVARIANTS 3140 struct bufv *bv; 3141 #endif 3142 3143 vp = bp->b_vp; 3144 bo = bp->b_bufobj; 3145 3146 KASSERT((bp->b_flags & B_PAGING) == 0, 3147 ("%s: cannot reassign paging buffer %p", __func__, bp)); 3148 3149 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 3150 bp, bp->b_vp, bp->b_flags); 3151 3152 BO_LOCK(bo); 3153 buf_vlist_remove(bp); 3154 3155 /* 3156 * If dirty, put on list of dirty buffers; otherwise insert onto list 3157 * of clean buffers. 3158 */ 3159 if (bp->b_flags & B_DELWRI) { 3160 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 3161 switch (vp->v_type) { 3162 case VDIR: 3163 delay = dirdelay; 3164 break; 3165 case VCHR: 3166 delay = metadelay; 3167 break; 3168 default: 3169 delay = filedelay; 3170 } 3171 vn_syncer_add_to_worklist(bo, delay); 3172 } 3173 buf_vlist_add(bp, bo, BX_VNDIRTY); 3174 } else { 3175 buf_vlist_add(bp, bo, BX_VNCLEAN); 3176 3177 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 3178 mtx_lock(&sync_mtx); 3179 LIST_REMOVE(bo, bo_synclist); 3180 syncer_worklist_len--; 3181 mtx_unlock(&sync_mtx); 3182 bo->bo_flag &= ~BO_ONWORKLST; 3183 } 3184 } 3185 #ifdef INVARIANTS 3186 bv = &bo->bo_clean; 3187 bp = TAILQ_FIRST(&bv->bv_hd); 3188 KASSERT(bp == NULL || bp->b_bufobj == bo, 3189 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3190 bp = TAILQ_LAST(&bv->bv_hd, buflists); 3191 KASSERT(bp == NULL || bp->b_bufobj == bo, 3192 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3193 bv = &bo->bo_dirty; 3194 bp = TAILQ_FIRST(&bv->bv_hd); 3195 KASSERT(bp == NULL || bp->b_bufobj == bo, 3196 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3197 bp = TAILQ_LAST(&bv->bv_hd, buflists); 3198 KASSERT(bp == NULL || bp->b_bufobj == bo, 3199 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3200 #endif 3201 BO_UNLOCK(bo); 3202 } 3203 3204 static void 3205 v_init_counters(struct vnode *vp) 3206 { 3207 3208 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 3209 vp, ("%s called for an initialized vnode", __FUNCTION__)); 3210 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 3211 3212 refcount_init(&vp->v_holdcnt, 1); 3213 refcount_init(&vp->v_usecount, 1); 3214 } 3215 3216 /* 3217 * Get a usecount on a vnode. 3218 * 3219 * vget and vget_finish may fail to lock the vnode if they lose a race against 3220 * it being doomed. LK_RETRY can be passed in flags to lock it anyway. 3221 * 3222 * Consumers which don't guarantee liveness of the vnode can use SMR to 3223 * try to get a reference. Note this operation can fail since the vnode 3224 * may be awaiting getting freed by the time they get to it. 3225 */ 3226 enum vgetstate 3227 vget_prep_smr(struct vnode *vp) 3228 { 3229 enum vgetstate vs; 3230 3231 VFS_SMR_ASSERT_ENTERED(); 3232 3233 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3234 vs = VGET_USECOUNT; 3235 } else { 3236 if (vhold_smr(vp)) 3237 vs = VGET_HOLDCNT; 3238 else 3239 vs = VGET_NONE; 3240 } 3241 return (vs); 3242 } 3243 3244 enum vgetstate 3245 vget_prep(struct vnode *vp) 3246 { 3247 enum vgetstate vs; 3248 3249 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3250 vs = VGET_USECOUNT; 3251 } else { 3252 vhold(vp); 3253 vs = VGET_HOLDCNT; 3254 } 3255 return (vs); 3256 } 3257 3258 void 3259 vget_abort(struct vnode *vp, enum vgetstate vs) 3260 { 3261 3262 switch (vs) { 3263 case VGET_USECOUNT: 3264 vrele(vp); 3265 break; 3266 case VGET_HOLDCNT: 3267 vdrop(vp); 3268 break; 3269 default: 3270 __assert_unreachable(); 3271 } 3272 } 3273 3274 int 3275 vget(struct vnode *vp, int flags) 3276 { 3277 enum vgetstate vs; 3278 3279 vs = vget_prep(vp); 3280 return (vget_finish(vp, flags, vs)); 3281 } 3282 3283 int 3284 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 3285 { 3286 int error; 3287 3288 if ((flags & LK_INTERLOCK) != 0) 3289 ASSERT_VI_LOCKED(vp, __func__); 3290 else 3291 ASSERT_VI_UNLOCKED(vp, __func__); 3292 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3293 VNPASS(vp->v_holdcnt > 0, vp); 3294 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3295 3296 error = vn_lock(vp, flags); 3297 if (__predict_false(error != 0)) { 3298 vget_abort(vp, vs); 3299 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 3300 vp); 3301 return (error); 3302 } 3303 3304 vget_finish_ref(vp, vs); 3305 return (0); 3306 } 3307 3308 void 3309 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 3310 { 3311 int old; 3312 3313 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3314 VNPASS(vp->v_holdcnt > 0, vp); 3315 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3316 3317 if (vs == VGET_USECOUNT) 3318 return; 3319 3320 /* 3321 * We hold the vnode. If the usecount is 0 it will be utilized to keep 3322 * the vnode around. Otherwise someone else lended their hold count and 3323 * we have to drop ours. 3324 */ 3325 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3326 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3327 if (old != 0) { 3328 #ifdef INVARIANTS 3329 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3330 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3331 #else 3332 refcount_release(&vp->v_holdcnt); 3333 #endif 3334 } 3335 } 3336 3337 void 3338 vref(struct vnode *vp) 3339 { 3340 enum vgetstate vs; 3341 3342 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3343 vs = vget_prep(vp); 3344 vget_finish_ref(vp, vs); 3345 } 3346 3347 void 3348 vrefact(struct vnode *vp) 3349 { 3350 3351 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3352 #ifdef INVARIANTS 3353 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 3354 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3355 #else 3356 refcount_acquire(&vp->v_usecount); 3357 #endif 3358 } 3359 3360 void 3361 vlazy(struct vnode *vp) 3362 { 3363 struct mount *mp; 3364 3365 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 3366 3367 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 3368 return; 3369 /* 3370 * We may get here for inactive routines after the vnode got doomed. 3371 */ 3372 if (VN_IS_DOOMED(vp)) 3373 return; 3374 mp = vp->v_mount; 3375 mtx_lock(&mp->mnt_listmtx); 3376 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 3377 vp->v_mflag |= VMP_LAZYLIST; 3378 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3379 mp->mnt_lazyvnodelistsize++; 3380 } 3381 mtx_unlock(&mp->mnt_listmtx); 3382 } 3383 3384 static void 3385 vunlazy(struct vnode *vp) 3386 { 3387 struct mount *mp; 3388 3389 ASSERT_VI_LOCKED(vp, __func__); 3390 VNPASS(!VN_IS_DOOMED(vp), vp); 3391 3392 mp = vp->v_mount; 3393 mtx_lock(&mp->mnt_listmtx); 3394 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3395 /* 3396 * Don't remove the vnode from the lazy list if another thread 3397 * has increased the hold count. It may have re-enqueued the 3398 * vnode to the lazy list and is now responsible for its 3399 * removal. 3400 */ 3401 if (vp->v_holdcnt == 0) { 3402 vp->v_mflag &= ~VMP_LAZYLIST; 3403 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3404 mp->mnt_lazyvnodelistsize--; 3405 } 3406 mtx_unlock(&mp->mnt_listmtx); 3407 } 3408 3409 /* 3410 * This routine is only meant to be called from vgonel prior to dooming 3411 * the vnode. 3412 */ 3413 static void 3414 vunlazy_gone(struct vnode *vp) 3415 { 3416 struct mount *mp; 3417 3418 ASSERT_VOP_ELOCKED(vp, __func__); 3419 ASSERT_VI_LOCKED(vp, __func__); 3420 VNPASS(!VN_IS_DOOMED(vp), vp); 3421 3422 if (vp->v_mflag & VMP_LAZYLIST) { 3423 mp = vp->v_mount; 3424 mtx_lock(&mp->mnt_listmtx); 3425 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3426 vp->v_mflag &= ~VMP_LAZYLIST; 3427 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3428 mp->mnt_lazyvnodelistsize--; 3429 mtx_unlock(&mp->mnt_listmtx); 3430 } 3431 } 3432 3433 static void 3434 vdefer_inactive(struct vnode *vp) 3435 { 3436 3437 ASSERT_VI_LOCKED(vp, __func__); 3438 VNPASS(vp->v_holdcnt > 0, vp); 3439 if (VN_IS_DOOMED(vp)) { 3440 vdropl(vp); 3441 return; 3442 } 3443 if (vp->v_iflag & VI_DEFINACT) { 3444 VNPASS(vp->v_holdcnt > 1, vp); 3445 vdropl(vp); 3446 return; 3447 } 3448 if (vp->v_usecount > 0) { 3449 vp->v_iflag &= ~VI_OWEINACT; 3450 vdropl(vp); 3451 return; 3452 } 3453 vlazy(vp); 3454 vp->v_iflag |= VI_DEFINACT; 3455 VI_UNLOCK(vp); 3456 atomic_add_long(&deferred_inact, 1); 3457 } 3458 3459 static void 3460 vdefer_inactive_unlocked(struct vnode *vp) 3461 { 3462 3463 VI_LOCK(vp); 3464 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3465 vdropl(vp); 3466 return; 3467 } 3468 vdefer_inactive(vp); 3469 } 3470 3471 enum vput_op { VRELE, VPUT, VUNREF }; 3472 3473 /* 3474 * Handle ->v_usecount transitioning to 0. 3475 * 3476 * By releasing the last usecount we take ownership of the hold count which 3477 * provides liveness of the vnode, meaning we have to vdrop. 3478 * 3479 * For all vnodes we may need to perform inactive processing. It requires an 3480 * exclusive lock on the vnode, while it is legal to call here with only a 3481 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3482 * inactive processing gets deferred to the syncer. 3483 * 3484 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3485 * on the lock being held all the way until VOP_INACTIVE. This in particular 3486 * happens with UFS which adds half-constructed vnodes to the hash, where they 3487 * can be found by other code. 3488 */ 3489 static void 3490 vput_final(struct vnode *vp, enum vput_op func) 3491 { 3492 int error; 3493 bool want_unlock; 3494 3495 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3496 VNPASS(vp->v_holdcnt > 0, vp); 3497 3498 VI_LOCK(vp); 3499 3500 /* 3501 * By the time we got here someone else might have transitioned 3502 * the count back to > 0. 3503 */ 3504 if (vp->v_usecount > 0) 3505 goto out; 3506 3507 /* 3508 * If the vnode is doomed vgone already performed inactive processing 3509 * (if needed). 3510 */ 3511 if (VN_IS_DOOMED(vp)) 3512 goto out; 3513 3514 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3515 goto out; 3516 3517 if (vp->v_iflag & VI_DOINGINACT) 3518 goto out; 3519 3520 /* 3521 * Locking operations here will drop the interlock and possibly the 3522 * vnode lock, opening a window where the vnode can get doomed all the 3523 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3524 * perform inactive. 3525 */ 3526 vp->v_iflag |= VI_OWEINACT; 3527 want_unlock = false; 3528 error = 0; 3529 switch (func) { 3530 case VRELE: 3531 switch (VOP_ISLOCKED(vp)) { 3532 case LK_EXCLUSIVE: 3533 break; 3534 case LK_EXCLOTHER: 3535 case 0: 3536 want_unlock = true; 3537 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3538 VI_LOCK(vp); 3539 break; 3540 default: 3541 /* 3542 * The lock has at least one sharer, but we have no way 3543 * to conclude whether this is us. Play it safe and 3544 * defer processing. 3545 */ 3546 error = EAGAIN; 3547 break; 3548 } 3549 break; 3550 case VPUT: 3551 want_unlock = true; 3552 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3553 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3554 LK_NOWAIT); 3555 VI_LOCK(vp); 3556 } 3557 break; 3558 case VUNREF: 3559 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3560 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3561 VI_LOCK(vp); 3562 } 3563 break; 3564 } 3565 if (error == 0) { 3566 if (func == VUNREF) { 3567 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp, 3568 ("recursive vunref")); 3569 vp->v_vflag |= VV_UNREF; 3570 } 3571 for (;;) { 3572 error = vinactive(vp); 3573 if (want_unlock) 3574 VOP_UNLOCK(vp); 3575 if (error != ERELOOKUP || !want_unlock) 3576 break; 3577 VOP_LOCK(vp, LK_EXCLUSIVE); 3578 } 3579 if (func == VUNREF) 3580 vp->v_vflag &= ~VV_UNREF; 3581 vdropl(vp); 3582 } else { 3583 vdefer_inactive(vp); 3584 } 3585 return; 3586 out: 3587 if (func == VPUT) 3588 VOP_UNLOCK(vp); 3589 vdropl(vp); 3590 } 3591 3592 /* 3593 * Decrement ->v_usecount for a vnode. 3594 * 3595 * Releasing the last use count requires additional processing, see vput_final 3596 * above for details. 3597 * 3598 * Comment above each variant denotes lock state on entry and exit. 3599 */ 3600 3601 /* 3602 * in: any 3603 * out: same as passed in 3604 */ 3605 void 3606 vrele(struct vnode *vp) 3607 { 3608 3609 ASSERT_VI_UNLOCKED(vp, __func__); 3610 if (!refcount_release(&vp->v_usecount)) 3611 return; 3612 vput_final(vp, VRELE); 3613 } 3614 3615 /* 3616 * in: locked 3617 * out: unlocked 3618 */ 3619 void 3620 vput(struct vnode *vp) 3621 { 3622 3623 ASSERT_VOP_LOCKED(vp, __func__); 3624 ASSERT_VI_UNLOCKED(vp, __func__); 3625 if (!refcount_release(&vp->v_usecount)) { 3626 VOP_UNLOCK(vp); 3627 return; 3628 } 3629 vput_final(vp, VPUT); 3630 } 3631 3632 /* 3633 * in: locked 3634 * out: locked 3635 */ 3636 void 3637 vunref(struct vnode *vp) 3638 { 3639 3640 ASSERT_VOP_LOCKED(vp, __func__); 3641 ASSERT_VI_UNLOCKED(vp, __func__); 3642 if (!refcount_release(&vp->v_usecount)) 3643 return; 3644 vput_final(vp, VUNREF); 3645 } 3646 3647 void 3648 vhold(struct vnode *vp) 3649 { 3650 int old; 3651 3652 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3653 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3654 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3655 ("%s: wrong hold count %d", __func__, old)); 3656 if (old == 0) 3657 vfs_freevnodes_dec(); 3658 } 3659 3660 void 3661 vholdnz(struct vnode *vp) 3662 { 3663 3664 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3665 #ifdef INVARIANTS 3666 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3667 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3668 ("%s: wrong hold count %d", __func__, old)); 3669 #else 3670 atomic_add_int(&vp->v_holdcnt, 1); 3671 #endif 3672 } 3673 3674 /* 3675 * Grab a hold count unless the vnode is freed. 3676 * 3677 * Only use this routine if vfs smr is the only protection you have against 3678 * freeing the vnode. 3679 * 3680 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3681 * is not set. After the flag is set the vnode becomes immutable to anyone but 3682 * the thread which managed to set the flag. 3683 * 3684 * It may be tempting to replace the loop with: 3685 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3686 * if (count & VHOLD_NO_SMR) { 3687 * backpedal and error out; 3688 * } 3689 * 3690 * However, while this is more performant, it hinders debugging by eliminating 3691 * the previously mentioned invariant. 3692 */ 3693 bool 3694 vhold_smr(struct vnode *vp) 3695 { 3696 int count; 3697 3698 VFS_SMR_ASSERT_ENTERED(); 3699 3700 count = atomic_load_int(&vp->v_holdcnt); 3701 for (;;) { 3702 if (count & VHOLD_NO_SMR) { 3703 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3704 ("non-zero hold count with flags %d\n", count)); 3705 return (false); 3706 } 3707 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3708 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3709 if (count == 0) 3710 vfs_freevnodes_dec(); 3711 return (true); 3712 } 3713 } 3714 } 3715 3716 /* 3717 * Hold a free vnode for recycling. 3718 * 3719 * Note: vnode_init references this comment. 3720 * 3721 * Attempts to recycle only need the global vnode list lock and have no use for 3722 * SMR. 3723 * 3724 * However, vnodes get inserted into the global list before they get fully 3725 * initialized and stay there until UMA decides to free the memory. This in 3726 * particular means the target can be found before it becomes usable and after 3727 * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to 3728 * VHOLD_NO_SMR. 3729 * 3730 * Note: the vnode may gain more references after we transition the count 0->1. 3731 */ 3732 static bool 3733 vhold_recycle_free(struct vnode *vp) 3734 { 3735 int count; 3736 3737 mtx_assert(&vnode_list_mtx, MA_OWNED); 3738 3739 count = atomic_load_int(&vp->v_holdcnt); 3740 for (;;) { 3741 if (count & VHOLD_NO_SMR) { 3742 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3743 ("non-zero hold count with flags %d\n", count)); 3744 return (false); 3745 } 3746 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3747 if (count > 0) { 3748 return (false); 3749 } 3750 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3751 vfs_freevnodes_dec(); 3752 return (true); 3753 } 3754 } 3755 } 3756 3757 static void __noinline 3758 vdbatch_process(struct vdbatch *vd) 3759 { 3760 struct vnode *vp; 3761 int i; 3762 3763 mtx_assert(&vd->lock, MA_OWNED); 3764 MPASS(curthread->td_pinned > 0); 3765 MPASS(vd->index == VDBATCH_SIZE); 3766 3767 /* 3768 * Attempt to requeue the passed batch, but give up easily. 3769 * 3770 * Despite batching the mechanism is prone to transient *significant* 3771 * lock contention, where vnode_list_mtx becomes the primary bottleneck 3772 * if multiple CPUs get here (one real-world example is highly parallel 3773 * do-nothing make , which will stat *tons* of vnodes). Since it is 3774 * quasi-LRU (read: not that great even if fully honoured) just dodge 3775 * the problem. Parties which don't like it are welcome to implement 3776 * something better. 3777 */ 3778 critical_enter(); 3779 if (mtx_trylock(&vnode_list_mtx)) { 3780 for (i = 0; i < VDBATCH_SIZE; i++) { 3781 vp = vd->tab[i]; 3782 vd->tab[i] = NULL; 3783 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3784 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3785 MPASS(vp->v_dbatchcpu != NOCPU); 3786 vp->v_dbatchcpu = NOCPU; 3787 } 3788 mtx_unlock(&vnode_list_mtx); 3789 } else { 3790 counter_u64_add(vnode_skipped_requeues, 1); 3791 3792 for (i = 0; i < VDBATCH_SIZE; i++) { 3793 vp = vd->tab[i]; 3794 vd->tab[i] = NULL; 3795 MPASS(vp->v_dbatchcpu != NOCPU); 3796 vp->v_dbatchcpu = NOCPU; 3797 } 3798 } 3799 vd->index = 0; 3800 critical_exit(); 3801 } 3802 3803 static void 3804 vdbatch_enqueue(struct vnode *vp) 3805 { 3806 struct vdbatch *vd; 3807 3808 ASSERT_VI_LOCKED(vp, __func__); 3809 VNPASS(!VN_IS_DOOMED(vp), vp); 3810 3811 if (vp->v_dbatchcpu != NOCPU) { 3812 VI_UNLOCK(vp); 3813 return; 3814 } 3815 3816 sched_pin(); 3817 vd = DPCPU_PTR(vd); 3818 mtx_lock(&vd->lock); 3819 MPASS(vd->index < VDBATCH_SIZE); 3820 MPASS(vd->tab[vd->index] == NULL); 3821 /* 3822 * A hack: we depend on being pinned so that we know what to put in 3823 * ->v_dbatchcpu. 3824 */ 3825 vp->v_dbatchcpu = curcpu; 3826 vd->tab[vd->index] = vp; 3827 vd->index++; 3828 VI_UNLOCK(vp); 3829 if (vd->index == VDBATCH_SIZE) 3830 vdbatch_process(vd); 3831 mtx_unlock(&vd->lock); 3832 sched_unpin(); 3833 } 3834 3835 /* 3836 * This routine must only be called for vnodes which are about to be 3837 * deallocated. Supporting dequeue for arbitrary vndoes would require 3838 * validating that the locked batch matches. 3839 */ 3840 static void 3841 vdbatch_dequeue(struct vnode *vp) 3842 { 3843 struct vdbatch *vd; 3844 int i; 3845 short cpu; 3846 3847 VNPASS(vp->v_type == VBAD || vp->v_type == VNON, vp); 3848 3849 cpu = vp->v_dbatchcpu; 3850 if (cpu == NOCPU) 3851 return; 3852 3853 vd = DPCPU_ID_PTR(cpu, vd); 3854 mtx_lock(&vd->lock); 3855 for (i = 0; i < vd->index; i++) { 3856 if (vd->tab[i] != vp) 3857 continue; 3858 vp->v_dbatchcpu = NOCPU; 3859 vd->index--; 3860 vd->tab[i] = vd->tab[vd->index]; 3861 vd->tab[vd->index] = NULL; 3862 break; 3863 } 3864 mtx_unlock(&vd->lock); 3865 /* 3866 * Either we dequeued the vnode above or the target CPU beat us to it. 3867 */ 3868 MPASS(vp->v_dbatchcpu == NOCPU); 3869 } 3870 3871 /* 3872 * Drop the hold count of the vnode. 3873 * 3874 * It will only get freed if this is the last hold *and* it has been vgone'd. 3875 * 3876 * Because the vnode vm object keeps a hold reference on the vnode if 3877 * there is at least one resident non-cached page, the vnode cannot 3878 * leave the active list without the page cleanup done. 3879 */ 3880 static void __noinline 3881 vdropl_final(struct vnode *vp) 3882 { 3883 3884 ASSERT_VI_LOCKED(vp, __func__); 3885 VNPASS(VN_IS_DOOMED(vp), vp); 3886 /* 3887 * Set the VHOLD_NO_SMR flag. 3888 * 3889 * We may be racing against vhold_smr. If they win we can just pretend 3890 * we never got this far, they will vdrop later. 3891 */ 3892 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { 3893 vfs_freevnodes_inc(); 3894 VI_UNLOCK(vp); 3895 /* 3896 * We lost the aforementioned race. Any subsequent access is 3897 * invalid as they might have managed to vdropl on their own. 3898 */ 3899 return; 3900 } 3901 /* 3902 * Don't bump freevnodes as this one is going away. 3903 */ 3904 freevnode(vp); 3905 } 3906 3907 void 3908 vdrop(struct vnode *vp) 3909 { 3910 3911 ASSERT_VI_UNLOCKED(vp, __func__); 3912 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3913 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3914 return; 3915 VI_LOCK(vp); 3916 vdropl(vp); 3917 } 3918 3919 static void __always_inline 3920 vdropl_impl(struct vnode *vp, bool enqueue) 3921 { 3922 3923 ASSERT_VI_LOCKED(vp, __func__); 3924 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3925 if (!refcount_release(&vp->v_holdcnt)) { 3926 VI_UNLOCK(vp); 3927 return; 3928 } 3929 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp); 3930 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 3931 if (VN_IS_DOOMED(vp)) { 3932 vdropl_final(vp); 3933 return; 3934 } 3935 3936 vfs_freevnodes_inc(); 3937 if (vp->v_mflag & VMP_LAZYLIST) { 3938 vunlazy(vp); 3939 } 3940 3941 if (!enqueue) { 3942 VI_UNLOCK(vp); 3943 return; 3944 } 3945 3946 /* 3947 * Also unlocks the interlock. We can't assert on it as we 3948 * released our hold and by now the vnode might have been 3949 * freed. 3950 */ 3951 vdbatch_enqueue(vp); 3952 } 3953 3954 void 3955 vdropl(struct vnode *vp) 3956 { 3957 3958 vdropl_impl(vp, true); 3959 } 3960 3961 /* 3962 * vdrop a vnode when recycling 3963 * 3964 * This is a special case routine only to be used when recycling, differs from 3965 * regular vdrop by not requeieing the vnode on LRU. 3966 * 3967 * Consider a case where vtryrecycle continuously fails with all vnodes (due to 3968 * e.g., frozen writes on the filesystem), filling the batch and causing it to 3969 * be requeued. Then vnlru will end up revisiting the same vnodes. This is a 3970 * loop which can last for as long as writes are frozen. 3971 */ 3972 static void 3973 vdropl_recycle(struct vnode *vp) 3974 { 3975 3976 vdropl_impl(vp, false); 3977 } 3978 3979 static void 3980 vdrop_recycle(struct vnode *vp) 3981 { 3982 3983 VI_LOCK(vp); 3984 vdropl_recycle(vp); 3985 } 3986 3987 /* 3988 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3989 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3990 */ 3991 static int 3992 vinactivef(struct vnode *vp) 3993 { 3994 struct vm_object *obj; 3995 int error; 3996 3997 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3998 ASSERT_VI_LOCKED(vp, "vinactive"); 3999 VNPASS((vp->v_iflag & VI_DOINGINACT) == 0, vp); 4000 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4001 vp->v_iflag |= VI_DOINGINACT; 4002 vp->v_iflag &= ~VI_OWEINACT; 4003 VI_UNLOCK(vp); 4004 /* 4005 * Before moving off the active list, we must be sure that any 4006 * modified pages are converted into the vnode's dirty 4007 * buffers, since these will no longer be checked once the 4008 * vnode is on the inactive list. 4009 * 4010 * The write-out of the dirty pages is asynchronous. At the 4011 * point that VOP_INACTIVE() is called, there could still be 4012 * pending I/O and dirty pages in the object. 4013 */ 4014 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 4015 vm_object_mightbedirty(obj)) { 4016 VM_OBJECT_WLOCK(obj); 4017 vm_object_page_clean(obj, 0, 0, 0); 4018 VM_OBJECT_WUNLOCK(obj); 4019 } 4020 error = VOP_INACTIVE(vp); 4021 VI_LOCK(vp); 4022 VNPASS(vp->v_iflag & VI_DOINGINACT, vp); 4023 vp->v_iflag &= ~VI_DOINGINACT; 4024 return (error); 4025 } 4026 4027 int 4028 vinactive(struct vnode *vp) 4029 { 4030 4031 ASSERT_VOP_ELOCKED(vp, "vinactive"); 4032 ASSERT_VI_LOCKED(vp, "vinactive"); 4033 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4034 4035 if ((vp->v_iflag & VI_OWEINACT) == 0) 4036 return (0); 4037 if (vp->v_iflag & VI_DOINGINACT) 4038 return (0); 4039 if (vp->v_usecount > 0) { 4040 vp->v_iflag &= ~VI_OWEINACT; 4041 return (0); 4042 } 4043 return (vinactivef(vp)); 4044 } 4045 4046 /* 4047 * Remove any vnodes in the vnode table belonging to mount point mp. 4048 * 4049 * If FORCECLOSE is not specified, there should not be any active ones, 4050 * return error if any are found (nb: this is a user error, not a 4051 * system error). If FORCECLOSE is specified, detach any active vnodes 4052 * that are found. 4053 * 4054 * If WRITECLOSE is set, only flush out regular file vnodes open for 4055 * writing. 4056 * 4057 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 4058 * 4059 * `rootrefs' specifies the base reference count for the root vnode 4060 * of this filesystem. The root vnode is considered busy if its 4061 * v_usecount exceeds this value. On a successful return, vflush(, td) 4062 * will call vrele() on the root vnode exactly rootrefs times. 4063 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 4064 * be zero. 4065 */ 4066 #ifdef DIAGNOSTIC 4067 static int busyprt = 0; /* print out busy vnodes */ 4068 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 4069 #endif 4070 4071 int 4072 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 4073 { 4074 struct vnode *vp, *mvp, *rootvp = NULL; 4075 struct vattr vattr; 4076 int busy = 0, error; 4077 4078 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 4079 rootrefs, flags); 4080 if (rootrefs > 0) { 4081 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 4082 ("vflush: bad args")); 4083 /* 4084 * Get the filesystem root vnode. We can vput() it 4085 * immediately, since with rootrefs > 0, it won't go away. 4086 */ 4087 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 4088 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 4089 __func__, error); 4090 return (error); 4091 } 4092 vput(rootvp); 4093 } 4094 loop: 4095 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 4096 vholdl(vp); 4097 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 4098 if (error) { 4099 vdrop(vp); 4100 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 4101 goto loop; 4102 } 4103 /* 4104 * Skip over a vnodes marked VV_SYSTEM. 4105 */ 4106 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 4107 VOP_UNLOCK(vp); 4108 vdrop(vp); 4109 continue; 4110 } 4111 /* 4112 * If WRITECLOSE is set, flush out unlinked but still open 4113 * files (even if open only for reading) and regular file 4114 * vnodes open for writing. 4115 */ 4116 if (flags & WRITECLOSE) { 4117 if (vp->v_object != NULL) { 4118 VM_OBJECT_WLOCK(vp->v_object); 4119 vm_object_page_clean(vp->v_object, 0, 0, 0); 4120 VM_OBJECT_WUNLOCK(vp->v_object); 4121 } 4122 do { 4123 error = VOP_FSYNC(vp, MNT_WAIT, td); 4124 } while (error == ERELOOKUP); 4125 if (error != 0) { 4126 VOP_UNLOCK(vp); 4127 vdrop(vp); 4128 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 4129 return (error); 4130 } 4131 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 4132 VI_LOCK(vp); 4133 4134 if ((vp->v_type == VNON || 4135 (error == 0 && vattr.va_nlink > 0)) && 4136 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 4137 VOP_UNLOCK(vp); 4138 vdropl(vp); 4139 continue; 4140 } 4141 } else 4142 VI_LOCK(vp); 4143 /* 4144 * With v_usecount == 0, all we need to do is clear out the 4145 * vnode data structures and we are done. 4146 * 4147 * If FORCECLOSE is set, forcibly close the vnode. 4148 */ 4149 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 4150 vgonel(vp); 4151 } else { 4152 busy++; 4153 #ifdef DIAGNOSTIC 4154 if (busyprt) 4155 vn_printf(vp, "vflush: busy vnode "); 4156 #endif 4157 } 4158 VOP_UNLOCK(vp); 4159 vdropl(vp); 4160 } 4161 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 4162 /* 4163 * If just the root vnode is busy, and if its refcount 4164 * is equal to `rootrefs', then go ahead and kill it. 4165 */ 4166 VI_LOCK(rootvp); 4167 KASSERT(busy > 0, ("vflush: not busy")); 4168 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 4169 ("vflush: usecount %d < rootrefs %d", 4170 rootvp->v_usecount, rootrefs)); 4171 if (busy == 1 && rootvp->v_usecount == rootrefs) { 4172 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 4173 vgone(rootvp); 4174 VOP_UNLOCK(rootvp); 4175 busy = 0; 4176 } else 4177 VI_UNLOCK(rootvp); 4178 } 4179 if (busy) { 4180 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 4181 busy); 4182 return (EBUSY); 4183 } 4184 for (; rootrefs > 0; rootrefs--) 4185 vrele(rootvp); 4186 return (0); 4187 } 4188 4189 /* 4190 * Recycle an unused vnode. 4191 */ 4192 int 4193 vrecycle(struct vnode *vp) 4194 { 4195 int recycled; 4196 4197 VI_LOCK(vp); 4198 recycled = vrecyclel(vp); 4199 VI_UNLOCK(vp); 4200 return (recycled); 4201 } 4202 4203 /* 4204 * vrecycle, with the vp interlock held. 4205 */ 4206 int 4207 vrecyclel(struct vnode *vp) 4208 { 4209 int recycled; 4210 4211 ASSERT_VOP_ELOCKED(vp, __func__); 4212 ASSERT_VI_LOCKED(vp, __func__); 4213 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4214 recycled = 0; 4215 if (vp->v_usecount == 0) { 4216 recycled = 1; 4217 vgonel(vp); 4218 } 4219 return (recycled); 4220 } 4221 4222 /* 4223 * Eliminate all activity associated with a vnode 4224 * in preparation for reuse. 4225 */ 4226 void 4227 vgone(struct vnode *vp) 4228 { 4229 VI_LOCK(vp); 4230 vgonel(vp); 4231 VI_UNLOCK(vp); 4232 } 4233 4234 /* 4235 * Notify upper mounts about reclaimed or unlinked vnode. 4236 */ 4237 void 4238 vfs_notify_upper(struct vnode *vp, enum vfs_notify_upper_type event) 4239 { 4240 struct mount *mp; 4241 struct mount_upper_node *ump; 4242 4243 mp = atomic_load_ptr(&vp->v_mount); 4244 if (mp == NULL) 4245 return; 4246 if (TAILQ_EMPTY(&mp->mnt_notify)) 4247 return; 4248 4249 MNT_ILOCK(mp); 4250 mp->mnt_upper_pending++; 4251 KASSERT(mp->mnt_upper_pending > 0, 4252 ("%s: mnt_upper_pending %d", __func__, mp->mnt_upper_pending)); 4253 TAILQ_FOREACH(ump, &mp->mnt_notify, mnt_upper_link) { 4254 MNT_IUNLOCK(mp); 4255 switch (event) { 4256 case VFS_NOTIFY_UPPER_RECLAIM: 4257 VFS_RECLAIM_LOWERVP(ump->mp, vp); 4258 break; 4259 case VFS_NOTIFY_UPPER_UNLINK: 4260 VFS_UNLINK_LOWERVP(ump->mp, vp); 4261 break; 4262 } 4263 MNT_ILOCK(mp); 4264 } 4265 mp->mnt_upper_pending--; 4266 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && 4267 mp->mnt_upper_pending == 0) { 4268 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; 4269 wakeup(&mp->mnt_uppers); 4270 } 4271 MNT_IUNLOCK(mp); 4272 } 4273 4274 /* 4275 * vgone, with the vp interlock held. 4276 */ 4277 static void 4278 vgonel(struct vnode *vp) 4279 { 4280 struct thread *td; 4281 struct mount *mp; 4282 vm_object_t object; 4283 bool active, doinginact, oweinact; 4284 4285 ASSERT_VOP_ELOCKED(vp, "vgonel"); 4286 ASSERT_VI_LOCKED(vp, "vgonel"); 4287 VNASSERT(vp->v_holdcnt, vp, 4288 ("vgonel: vp %p has no reference.", vp)); 4289 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4290 td = curthread; 4291 4292 /* 4293 * Don't vgonel if we're already doomed. 4294 */ 4295 if (VN_IS_DOOMED(vp)) { 4296 VNPASS(vn_get_state(vp) == VSTATE_DESTROYING || \ 4297 vn_get_state(vp) == VSTATE_DEAD, vp); 4298 return; 4299 } 4300 /* 4301 * Paired with freevnode. 4302 */ 4303 vn_seqc_write_begin_locked(vp); 4304 vunlazy_gone(vp); 4305 vn_irflag_set_locked(vp, VIRF_DOOMED); 4306 vn_set_state(vp, VSTATE_DESTROYING); 4307 4308 /* 4309 * Check to see if the vnode is in use. If so, we have to 4310 * call VOP_CLOSE() and VOP_INACTIVE(). 4311 * 4312 * It could be that VOP_INACTIVE() requested reclamation, in 4313 * which case we should avoid recursion, so check 4314 * VI_DOINGINACT. This is not precise but good enough. 4315 */ 4316 active = vp->v_usecount > 0; 4317 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4318 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; 4319 4320 /* 4321 * If we need to do inactive VI_OWEINACT will be set. 4322 */ 4323 if (vp->v_iflag & VI_DEFINACT) { 4324 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 4325 vp->v_iflag &= ~VI_DEFINACT; 4326 vdropl(vp); 4327 } else { 4328 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 4329 VI_UNLOCK(vp); 4330 } 4331 cache_purge_vgone(vp); 4332 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 4333 4334 /* 4335 * If purging an active vnode, it must be closed and 4336 * deactivated before being reclaimed. 4337 */ 4338 if (active) 4339 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 4340 if (!doinginact) { 4341 do { 4342 if (oweinact || active) { 4343 VI_LOCK(vp); 4344 vinactivef(vp); 4345 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4346 VI_UNLOCK(vp); 4347 } 4348 } while (oweinact); 4349 } 4350 if (vp->v_type == VSOCK) 4351 vfs_unp_reclaim(vp); 4352 4353 /* 4354 * Clean out any buffers associated with the vnode. 4355 * If the flush fails, just toss the buffers. 4356 */ 4357 mp = NULL; 4358 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 4359 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 4360 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 4361 while (vinvalbuf(vp, 0, 0, 0) != 0) 4362 ; 4363 } 4364 4365 BO_LOCK(&vp->v_bufobj); 4366 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 4367 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 4368 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 4369 vp->v_bufobj.bo_clean.bv_cnt == 0, 4370 ("vp %p bufobj not invalidated", vp)); 4371 4372 /* 4373 * For VMIO bufobj, BO_DEAD is set later, or in 4374 * vm_object_terminate() after the object's page queue is 4375 * flushed. 4376 */ 4377 object = vp->v_bufobj.bo_object; 4378 if (object == NULL) 4379 vp->v_bufobj.bo_flag |= BO_DEAD; 4380 BO_UNLOCK(&vp->v_bufobj); 4381 4382 /* 4383 * Handle the VM part. Tmpfs handles v_object on its own (the 4384 * OBJT_VNODE check). Nullfs or other bypassing filesystems 4385 * should not touch the object borrowed from the lower vnode 4386 * (the handle check). 4387 */ 4388 if (object != NULL && object->type == OBJT_VNODE && 4389 object->handle == vp) 4390 vnode_destroy_vobject(vp); 4391 4392 /* 4393 * Reclaim the vnode. 4394 */ 4395 if (VOP_RECLAIM(vp)) 4396 panic("vgone: cannot reclaim"); 4397 if (mp != NULL) 4398 vn_finished_secondary_write(mp); 4399 VNASSERT(vp->v_object == NULL, vp, 4400 ("vop_reclaim left v_object vp=%p", vp)); 4401 /* 4402 * Clear the advisory locks and wake up waiting threads. 4403 */ 4404 if (vp->v_lockf != NULL) { 4405 (void)VOP_ADVLOCKPURGE(vp); 4406 vp->v_lockf = NULL; 4407 } 4408 /* 4409 * Delete from old mount point vnode list. 4410 */ 4411 if (vp->v_mount == NULL) { 4412 VI_LOCK(vp); 4413 } else { 4414 delmntque(vp); 4415 ASSERT_VI_LOCKED(vp, "vgonel 2"); 4416 } 4417 /* 4418 * Done with purge, reset to the standard lock and invalidate 4419 * the vnode. 4420 */ 4421 vp->v_vnlock = &vp->v_lock; 4422 vp->v_op = &dead_vnodeops; 4423 vp->v_type = VBAD; 4424 vn_set_state(vp, VSTATE_DEAD); 4425 } 4426 4427 /* 4428 * Print out a description of a vnode. 4429 */ 4430 static const char *const vtypename[] = { 4431 [VNON] = "VNON", 4432 [VREG] = "VREG", 4433 [VDIR] = "VDIR", 4434 [VBLK] = "VBLK", 4435 [VCHR] = "VCHR", 4436 [VLNK] = "VLNK", 4437 [VSOCK] = "VSOCK", 4438 [VFIFO] = "VFIFO", 4439 [VBAD] = "VBAD", 4440 [VMARKER] = "VMARKER", 4441 }; 4442 _Static_assert(nitems(vtypename) == VLASTTYPE + 1, 4443 "vnode type name not added to vtypename"); 4444 4445 static const char *const vstatename[] = { 4446 [VSTATE_UNINITIALIZED] = "VSTATE_UNINITIALIZED", 4447 [VSTATE_CONSTRUCTED] = "VSTATE_CONSTRUCTED", 4448 [VSTATE_DESTROYING] = "VSTATE_DESTROYING", 4449 [VSTATE_DEAD] = "VSTATE_DEAD", 4450 }; 4451 _Static_assert(nitems(vstatename) == VLASTSTATE + 1, 4452 "vnode state name not added to vstatename"); 4453 4454 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 4455 "new hold count flag not added to vn_printf"); 4456 4457 void 4458 vn_printf(struct vnode *vp, const char *fmt, ...) 4459 { 4460 va_list ap; 4461 char buf[256], buf2[16]; 4462 u_long flags; 4463 u_int holdcnt; 4464 short irflag; 4465 4466 va_start(ap, fmt); 4467 vprintf(fmt, ap); 4468 va_end(ap); 4469 printf("%p: ", (void *)vp); 4470 printf("type %s state %s op %p\n", vtypename[vp->v_type], 4471 vstatename[vp->v_state], vp->v_op); 4472 holdcnt = atomic_load_int(&vp->v_holdcnt); 4473 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 4474 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 4475 vp->v_seqc_users); 4476 switch (vp->v_type) { 4477 case VDIR: 4478 printf(" mountedhere %p\n", vp->v_mountedhere); 4479 break; 4480 case VCHR: 4481 printf(" rdev %p\n", vp->v_rdev); 4482 break; 4483 case VSOCK: 4484 printf(" socket %p\n", vp->v_unpcb); 4485 break; 4486 case VFIFO: 4487 printf(" fifoinfo %p\n", vp->v_fifoinfo); 4488 break; 4489 default: 4490 printf("\n"); 4491 break; 4492 } 4493 buf[0] = '\0'; 4494 buf[1] = '\0'; 4495 if (holdcnt & VHOLD_NO_SMR) 4496 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 4497 printf(" hold count flags (%s)\n", buf + 1); 4498 4499 buf[0] = '\0'; 4500 buf[1] = '\0'; 4501 irflag = vn_irflag_read(vp); 4502 if (irflag & VIRF_DOOMED) 4503 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 4504 if (irflag & VIRF_PGREAD) 4505 strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); 4506 if (irflag & VIRF_MOUNTPOINT) 4507 strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf)); 4508 if (irflag & VIRF_TEXT_REF) 4509 strlcat(buf, "|VIRF_TEXT_REF", sizeof(buf)); 4510 flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT | VIRF_TEXT_REF); 4511 if (flags != 0) { 4512 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 4513 strlcat(buf, buf2, sizeof(buf)); 4514 } 4515 if (vp->v_vflag & VV_ROOT) 4516 strlcat(buf, "|VV_ROOT", sizeof(buf)); 4517 if (vp->v_vflag & VV_ISTTY) 4518 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4519 if (vp->v_vflag & VV_NOSYNC) 4520 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4521 if (vp->v_vflag & VV_ETERNALDEV) 4522 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4523 if (vp->v_vflag & VV_CACHEDLABEL) 4524 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4525 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4526 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4527 if (vp->v_vflag & VV_COPYONWRITE) 4528 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4529 if (vp->v_vflag & VV_SYSTEM) 4530 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4531 if (vp->v_vflag & VV_PROCDEP) 4532 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4533 if (vp->v_vflag & VV_DELETED) 4534 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4535 if (vp->v_vflag & VV_MD) 4536 strlcat(buf, "|VV_MD", sizeof(buf)); 4537 if (vp->v_vflag & VV_FORCEINSMQ) 4538 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4539 if (vp->v_vflag & VV_READLINK) 4540 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4541 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4542 VV_CACHEDLABEL | VV_VMSIZEVNLOCK | VV_COPYONWRITE | VV_SYSTEM | 4543 VV_PROCDEP | VV_DELETED | VV_MD | VV_FORCEINSMQ | VV_READLINK); 4544 if (flags != 0) { 4545 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4546 strlcat(buf, buf2, sizeof(buf)); 4547 } 4548 if (vp->v_iflag & VI_MOUNT) 4549 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4550 if (vp->v_iflag & VI_DOINGINACT) 4551 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4552 if (vp->v_iflag & VI_OWEINACT) 4553 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4554 if (vp->v_iflag & VI_DEFINACT) 4555 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4556 if (vp->v_iflag & VI_FOPENING) 4557 strlcat(buf, "|VI_FOPENING", sizeof(buf)); 4558 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOINGINACT | 4559 VI_OWEINACT | VI_DEFINACT | VI_FOPENING); 4560 if (flags != 0) { 4561 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4562 strlcat(buf, buf2, sizeof(buf)); 4563 } 4564 if (vp->v_mflag & VMP_LAZYLIST) 4565 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4566 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4567 if (flags != 0) { 4568 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4569 strlcat(buf, buf2, sizeof(buf)); 4570 } 4571 printf(" flags (%s)", buf + 1); 4572 if (mtx_owned(VI_MTX(vp))) 4573 printf(" VI_LOCKed"); 4574 printf("\n"); 4575 if (vp->v_object != NULL) 4576 printf(" v_object %p ref %d pages %d " 4577 "cleanbuf %d dirtybuf %d\n", 4578 vp->v_object, vp->v_object->ref_count, 4579 vp->v_object->resident_page_count, 4580 vp->v_bufobj.bo_clean.bv_cnt, 4581 vp->v_bufobj.bo_dirty.bv_cnt); 4582 printf(" "); 4583 lockmgr_printinfo(vp->v_vnlock); 4584 if (vp->v_data != NULL) 4585 VOP_PRINT(vp); 4586 } 4587 4588 #ifdef DDB 4589 /* 4590 * List all of the locked vnodes in the system. 4591 * Called when debugging the kernel. 4592 */ 4593 DB_SHOW_COMMAND_FLAGS(lockedvnods, lockedvnodes, DB_CMD_MEMSAFE) 4594 { 4595 struct mount *mp; 4596 struct vnode *vp; 4597 4598 /* 4599 * Note: because this is DDB, we can't obey the locking semantics 4600 * for these structures, which means we could catch an inconsistent 4601 * state and dereference a nasty pointer. Not much to be done 4602 * about that. 4603 */ 4604 db_printf("Locked vnodes\n"); 4605 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4606 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4607 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4608 vn_printf(vp, "vnode "); 4609 } 4610 } 4611 } 4612 4613 /* 4614 * Show details about the given vnode. 4615 */ 4616 DB_SHOW_COMMAND(vnode, db_show_vnode) 4617 { 4618 struct vnode *vp; 4619 4620 if (!have_addr) 4621 return; 4622 vp = (struct vnode *)addr; 4623 vn_printf(vp, "vnode "); 4624 } 4625 4626 /* 4627 * Show details about the given mount point. 4628 */ 4629 DB_SHOW_COMMAND(mount, db_show_mount) 4630 { 4631 struct mount *mp; 4632 struct vfsopt *opt; 4633 struct statfs *sp; 4634 struct vnode *vp; 4635 char buf[512]; 4636 uint64_t mflags; 4637 u_int flags; 4638 4639 if (!have_addr) { 4640 /* No address given, print short info about all mount points. */ 4641 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4642 db_printf("%p %s on %s (%s)\n", mp, 4643 mp->mnt_stat.f_mntfromname, 4644 mp->mnt_stat.f_mntonname, 4645 mp->mnt_stat.f_fstypename); 4646 if (db_pager_quit) 4647 break; 4648 } 4649 db_printf("\nMore info: show mount <addr>\n"); 4650 return; 4651 } 4652 4653 mp = (struct mount *)addr; 4654 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4655 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4656 4657 buf[0] = '\0'; 4658 mflags = mp->mnt_flag; 4659 #define MNT_FLAG(flag) do { \ 4660 if (mflags & (flag)) { \ 4661 if (buf[0] != '\0') \ 4662 strlcat(buf, ", ", sizeof(buf)); \ 4663 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4664 mflags &= ~(flag); \ 4665 } \ 4666 } while (0) 4667 MNT_FLAG(MNT_RDONLY); 4668 MNT_FLAG(MNT_SYNCHRONOUS); 4669 MNT_FLAG(MNT_NOEXEC); 4670 MNT_FLAG(MNT_NOSUID); 4671 MNT_FLAG(MNT_NFS4ACLS); 4672 MNT_FLAG(MNT_UNION); 4673 MNT_FLAG(MNT_ASYNC); 4674 MNT_FLAG(MNT_SUIDDIR); 4675 MNT_FLAG(MNT_SOFTDEP); 4676 MNT_FLAG(MNT_NOSYMFOLLOW); 4677 MNT_FLAG(MNT_GJOURNAL); 4678 MNT_FLAG(MNT_MULTILABEL); 4679 MNT_FLAG(MNT_ACLS); 4680 MNT_FLAG(MNT_NOATIME); 4681 MNT_FLAG(MNT_NOCLUSTERR); 4682 MNT_FLAG(MNT_NOCLUSTERW); 4683 MNT_FLAG(MNT_SUJ); 4684 MNT_FLAG(MNT_EXRDONLY); 4685 MNT_FLAG(MNT_EXPORTED); 4686 MNT_FLAG(MNT_DEFEXPORTED); 4687 MNT_FLAG(MNT_EXPORTANON); 4688 MNT_FLAG(MNT_EXKERB); 4689 MNT_FLAG(MNT_EXPUBLIC); 4690 MNT_FLAG(MNT_LOCAL); 4691 MNT_FLAG(MNT_QUOTA); 4692 MNT_FLAG(MNT_ROOTFS); 4693 MNT_FLAG(MNT_USER); 4694 MNT_FLAG(MNT_IGNORE); 4695 MNT_FLAG(MNT_UPDATE); 4696 MNT_FLAG(MNT_DELEXPORT); 4697 MNT_FLAG(MNT_RELOAD); 4698 MNT_FLAG(MNT_FORCE); 4699 MNT_FLAG(MNT_SNAPSHOT); 4700 MNT_FLAG(MNT_BYFSID); 4701 #undef MNT_FLAG 4702 if (mflags != 0) { 4703 if (buf[0] != '\0') 4704 strlcat(buf, ", ", sizeof(buf)); 4705 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4706 "0x%016jx", mflags); 4707 } 4708 db_printf(" mnt_flag = %s\n", buf); 4709 4710 buf[0] = '\0'; 4711 flags = mp->mnt_kern_flag; 4712 #define MNT_KERN_FLAG(flag) do { \ 4713 if (flags & (flag)) { \ 4714 if (buf[0] != '\0') \ 4715 strlcat(buf, ", ", sizeof(buf)); \ 4716 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4717 flags &= ~(flag); \ 4718 } \ 4719 } while (0) 4720 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4721 MNT_KERN_FLAG(MNTK_ASYNC); 4722 MNT_KERN_FLAG(MNTK_SOFTDEP); 4723 MNT_KERN_FLAG(MNTK_NOMSYNC); 4724 MNT_KERN_FLAG(MNTK_DRAINING); 4725 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4726 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4727 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4728 MNT_KERN_FLAG(MNTK_NO_IOPF); 4729 MNT_KERN_FLAG(MNTK_RECURSE); 4730 MNT_KERN_FLAG(MNTK_UPPER_WAITER); 4731 MNT_KERN_FLAG(MNTK_UNLOCKED_INSMNTQUE); 4732 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4733 MNT_KERN_FLAG(MNTK_VMSETSIZE_BUG); 4734 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4735 MNT_KERN_FLAG(MNTK_TASKQUEUE_WAITER); 4736 MNT_KERN_FLAG(MNTK_NOASYNC); 4737 MNT_KERN_FLAG(MNTK_UNMOUNT); 4738 MNT_KERN_FLAG(MNTK_MWAIT); 4739 MNT_KERN_FLAG(MNTK_SUSPEND); 4740 MNT_KERN_FLAG(MNTK_SUSPEND2); 4741 MNT_KERN_FLAG(MNTK_SUSPENDED); 4742 MNT_KERN_FLAG(MNTK_NULL_NOCACHE); 4743 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4744 #undef MNT_KERN_FLAG 4745 if (flags != 0) { 4746 if (buf[0] != '\0') 4747 strlcat(buf, ", ", sizeof(buf)); 4748 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4749 "0x%08x", flags); 4750 } 4751 db_printf(" mnt_kern_flag = %s\n", buf); 4752 4753 db_printf(" mnt_opt = "); 4754 opt = TAILQ_FIRST(mp->mnt_opt); 4755 if (opt != NULL) { 4756 db_printf("%s", opt->name); 4757 opt = TAILQ_NEXT(opt, link); 4758 while (opt != NULL) { 4759 db_printf(", %s", opt->name); 4760 opt = TAILQ_NEXT(opt, link); 4761 } 4762 } 4763 db_printf("\n"); 4764 4765 sp = &mp->mnt_stat; 4766 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4767 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4768 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4769 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4770 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4771 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4772 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4773 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4774 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4775 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4776 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4777 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4778 4779 db_printf(" mnt_cred = { uid=%u ruid=%u", 4780 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4781 if (jailed(mp->mnt_cred)) 4782 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4783 db_printf(" }\n"); 4784 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4785 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4786 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4787 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4788 db_printf(" mnt_lazyvnodelistsize = %d\n", 4789 mp->mnt_lazyvnodelistsize); 4790 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4791 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4792 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4793 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4794 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4795 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4796 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4797 db_printf(" mnt_secondary_accwrites = %d\n", 4798 mp->mnt_secondary_accwrites); 4799 db_printf(" mnt_gjprovider = %s\n", 4800 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4801 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4802 4803 db_printf("\n\nList of active vnodes\n"); 4804 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4805 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4806 vn_printf(vp, "vnode "); 4807 if (db_pager_quit) 4808 break; 4809 } 4810 } 4811 db_printf("\n\nList of inactive vnodes\n"); 4812 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4813 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4814 vn_printf(vp, "vnode "); 4815 if (db_pager_quit) 4816 break; 4817 } 4818 } 4819 } 4820 #endif /* DDB */ 4821 4822 /* 4823 * Fill in a struct xvfsconf based on a struct vfsconf. 4824 */ 4825 static int 4826 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4827 { 4828 struct xvfsconf xvfsp; 4829 4830 bzero(&xvfsp, sizeof(xvfsp)); 4831 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4832 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4833 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4834 xvfsp.vfc_flags = vfsp->vfc_flags; 4835 /* 4836 * These are unused in userland, we keep them 4837 * to not break binary compatibility. 4838 */ 4839 xvfsp.vfc_vfsops = NULL; 4840 xvfsp.vfc_next = NULL; 4841 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4842 } 4843 4844 #ifdef COMPAT_FREEBSD32 4845 struct xvfsconf32 { 4846 uint32_t vfc_vfsops; 4847 char vfc_name[MFSNAMELEN]; 4848 int32_t vfc_typenum; 4849 int32_t vfc_refcount; 4850 int32_t vfc_flags; 4851 uint32_t vfc_next; 4852 }; 4853 4854 static int 4855 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4856 { 4857 struct xvfsconf32 xvfsp; 4858 4859 bzero(&xvfsp, sizeof(xvfsp)); 4860 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4861 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4862 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4863 xvfsp.vfc_flags = vfsp->vfc_flags; 4864 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4865 } 4866 #endif 4867 4868 /* 4869 * Top level filesystem related information gathering. 4870 */ 4871 static int 4872 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4873 { 4874 struct vfsconf *vfsp; 4875 int error; 4876 4877 error = 0; 4878 vfsconf_slock(); 4879 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4880 #ifdef COMPAT_FREEBSD32 4881 if (req->flags & SCTL_MASK32) 4882 error = vfsconf2x32(req, vfsp); 4883 else 4884 #endif 4885 error = vfsconf2x(req, vfsp); 4886 if (error) 4887 break; 4888 } 4889 vfsconf_sunlock(); 4890 return (error); 4891 } 4892 4893 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4894 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4895 "S,xvfsconf", "List of all configured filesystems"); 4896 4897 #ifndef BURN_BRIDGES 4898 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4899 4900 static int 4901 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4902 { 4903 int *name = (int *)arg1 - 1; /* XXX */ 4904 u_int namelen = arg2 + 1; /* XXX */ 4905 struct vfsconf *vfsp; 4906 4907 log(LOG_WARNING, "userland calling deprecated sysctl, " 4908 "please rebuild world\n"); 4909 4910 #if 1 || defined(COMPAT_PRELITE2) 4911 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4912 if (namelen == 1) 4913 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4914 #endif 4915 4916 switch (name[1]) { 4917 case VFS_MAXTYPENUM: 4918 if (namelen != 2) 4919 return (ENOTDIR); 4920 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4921 case VFS_CONF: 4922 if (namelen != 3) 4923 return (ENOTDIR); /* overloaded */ 4924 vfsconf_slock(); 4925 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4926 if (vfsp->vfc_typenum == name[2]) 4927 break; 4928 } 4929 vfsconf_sunlock(); 4930 if (vfsp == NULL) 4931 return (EOPNOTSUPP); 4932 #ifdef COMPAT_FREEBSD32 4933 if (req->flags & SCTL_MASK32) 4934 return (vfsconf2x32(req, vfsp)); 4935 else 4936 #endif 4937 return (vfsconf2x(req, vfsp)); 4938 } 4939 return (EOPNOTSUPP); 4940 } 4941 4942 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4943 CTLFLAG_MPSAFE, vfs_sysctl, 4944 "Generic filesystem"); 4945 4946 #if 1 || defined(COMPAT_PRELITE2) 4947 4948 static int 4949 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4950 { 4951 int error; 4952 struct vfsconf *vfsp; 4953 struct ovfsconf ovfs; 4954 4955 vfsconf_slock(); 4956 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4957 bzero(&ovfs, sizeof(ovfs)); 4958 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4959 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4960 ovfs.vfc_index = vfsp->vfc_typenum; 4961 ovfs.vfc_refcount = vfsp->vfc_refcount; 4962 ovfs.vfc_flags = vfsp->vfc_flags; 4963 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4964 if (error != 0) { 4965 vfsconf_sunlock(); 4966 return (error); 4967 } 4968 } 4969 vfsconf_sunlock(); 4970 return (0); 4971 } 4972 4973 #endif /* 1 || COMPAT_PRELITE2 */ 4974 #endif /* !BURN_BRIDGES */ 4975 4976 static void 4977 unmount_or_warn(struct mount *mp) 4978 { 4979 int error; 4980 4981 error = dounmount(mp, MNT_FORCE, curthread); 4982 if (error != 0) { 4983 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4984 if (error == EBUSY) 4985 printf("BUSY)\n"); 4986 else 4987 printf("%d)\n", error); 4988 } 4989 } 4990 4991 /* 4992 * Unmount all filesystems. The list is traversed in reverse order 4993 * of mounting to avoid dependencies. 4994 */ 4995 void 4996 vfs_unmountall(void) 4997 { 4998 struct mount *mp, *tmp; 4999 5000 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 5001 5002 /* 5003 * Since this only runs when rebooting, it is not interlocked. 5004 */ 5005 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 5006 vfs_ref(mp); 5007 5008 /* 5009 * Forcibly unmounting "/dev" before "/" would prevent clean 5010 * unmount of the latter. 5011 */ 5012 if (mp == rootdevmp) 5013 continue; 5014 5015 unmount_or_warn(mp); 5016 } 5017 5018 if (rootdevmp != NULL) 5019 unmount_or_warn(rootdevmp); 5020 } 5021 5022 static void 5023 vfs_deferred_inactive(struct vnode *vp, int lkflags) 5024 { 5025 5026 ASSERT_VI_LOCKED(vp, __func__); 5027 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 5028 if ((vp->v_iflag & VI_OWEINACT) == 0) { 5029 vdropl(vp); 5030 return; 5031 } 5032 if (vn_lock(vp, lkflags) == 0) { 5033 VI_LOCK(vp); 5034 vinactive(vp); 5035 VOP_UNLOCK(vp); 5036 vdropl(vp); 5037 return; 5038 } 5039 vdefer_inactive_unlocked(vp); 5040 } 5041 5042 static int 5043 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 5044 { 5045 5046 return (vp->v_iflag & VI_DEFINACT); 5047 } 5048 5049 static void __noinline 5050 vfs_periodic_inactive(struct mount *mp, int flags) 5051 { 5052 struct vnode *vp, *mvp; 5053 int lkflags; 5054 5055 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 5056 if (flags != MNT_WAIT) 5057 lkflags |= LK_NOWAIT; 5058 5059 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 5060 if ((vp->v_iflag & VI_DEFINACT) == 0) { 5061 VI_UNLOCK(vp); 5062 continue; 5063 } 5064 vp->v_iflag &= ~VI_DEFINACT; 5065 vfs_deferred_inactive(vp, lkflags); 5066 } 5067 } 5068 5069 static inline bool 5070 vfs_want_msync(struct vnode *vp) 5071 { 5072 struct vm_object *obj; 5073 5074 /* 5075 * This test may be performed without any locks held. 5076 * We rely on vm_object's type stability. 5077 */ 5078 if (vp->v_vflag & VV_NOSYNC) 5079 return (false); 5080 obj = vp->v_object; 5081 return (obj != NULL && vm_object_mightbedirty(obj)); 5082 } 5083 5084 static int 5085 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 5086 { 5087 5088 if (vp->v_vflag & VV_NOSYNC) 5089 return (false); 5090 if (vp->v_iflag & VI_DEFINACT) 5091 return (true); 5092 return (vfs_want_msync(vp)); 5093 } 5094 5095 static void __noinline 5096 vfs_periodic_msync_inactive(struct mount *mp, int flags) 5097 { 5098 struct vnode *vp, *mvp; 5099 struct vm_object *obj; 5100 int lkflags, objflags; 5101 bool seen_defer; 5102 5103 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 5104 if (flags != MNT_WAIT) { 5105 lkflags |= LK_NOWAIT; 5106 objflags = OBJPC_NOSYNC; 5107 } else { 5108 objflags = OBJPC_SYNC; 5109 } 5110 5111 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 5112 seen_defer = false; 5113 if (vp->v_iflag & VI_DEFINACT) { 5114 vp->v_iflag &= ~VI_DEFINACT; 5115 seen_defer = true; 5116 } 5117 if (!vfs_want_msync(vp)) { 5118 if (seen_defer) 5119 vfs_deferred_inactive(vp, lkflags); 5120 else 5121 VI_UNLOCK(vp); 5122 continue; 5123 } 5124 if (vget(vp, lkflags) == 0) { 5125 obj = vp->v_object; 5126 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 5127 VM_OBJECT_WLOCK(obj); 5128 vm_object_page_clean(obj, 0, 0, objflags); 5129 VM_OBJECT_WUNLOCK(obj); 5130 } 5131 vput(vp); 5132 if (seen_defer) 5133 vdrop(vp); 5134 } else { 5135 if (seen_defer) 5136 vdefer_inactive_unlocked(vp); 5137 } 5138 } 5139 } 5140 5141 void 5142 vfs_periodic(struct mount *mp, int flags) 5143 { 5144 5145 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 5146 5147 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 5148 vfs_periodic_inactive(mp, flags); 5149 else 5150 vfs_periodic_msync_inactive(mp, flags); 5151 } 5152 5153 static void 5154 destroy_vpollinfo_free(struct vpollinfo *vi) 5155 { 5156 5157 knlist_destroy(&vi->vpi_selinfo.si_note); 5158 mtx_destroy(&vi->vpi_lock); 5159 free(vi, M_VNODEPOLL); 5160 } 5161 5162 static void 5163 destroy_vpollinfo(struct vpollinfo *vi) 5164 { 5165 5166 knlist_clear(&vi->vpi_selinfo.si_note, 1); 5167 seldrain(&vi->vpi_selinfo); 5168 destroy_vpollinfo_free(vi); 5169 } 5170 5171 /* 5172 * Initialize per-vnode helper structure to hold poll-related state. 5173 */ 5174 void 5175 v_addpollinfo(struct vnode *vp) 5176 { 5177 struct vpollinfo *vi; 5178 5179 if (vp->v_pollinfo != NULL) 5180 return; 5181 vi = malloc(sizeof(*vi), M_VNODEPOLL, M_WAITOK | M_ZERO); 5182 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 5183 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 5184 vfs_knlunlock, vfs_knl_assert_lock); 5185 VI_LOCK(vp); 5186 if (vp->v_pollinfo != NULL) { 5187 VI_UNLOCK(vp); 5188 destroy_vpollinfo_free(vi); 5189 return; 5190 } 5191 vp->v_pollinfo = vi; 5192 VI_UNLOCK(vp); 5193 } 5194 5195 /* 5196 * Record a process's interest in events which might happen to 5197 * a vnode. Because poll uses the historic select-style interface 5198 * internally, this routine serves as both the ``check for any 5199 * pending events'' and the ``record my interest in future events'' 5200 * functions. (These are done together, while the lock is held, 5201 * to avoid race conditions.) 5202 */ 5203 int 5204 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 5205 { 5206 5207 v_addpollinfo(vp); 5208 mtx_lock(&vp->v_pollinfo->vpi_lock); 5209 if (vp->v_pollinfo->vpi_revents & events) { 5210 /* 5211 * This leaves events we are not interested 5212 * in available for the other process which 5213 * which presumably had requested them 5214 * (otherwise they would never have been 5215 * recorded). 5216 */ 5217 events &= vp->v_pollinfo->vpi_revents; 5218 vp->v_pollinfo->vpi_revents &= ~events; 5219 5220 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5221 return (events); 5222 } 5223 vp->v_pollinfo->vpi_events |= events; 5224 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 5225 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5226 return (0); 5227 } 5228 5229 /* 5230 * Routine to create and manage a filesystem syncer vnode. 5231 */ 5232 #define sync_close ((int (*)(struct vop_close_args *))nullop) 5233 static int sync_fsync(struct vop_fsync_args *); 5234 static int sync_inactive(struct vop_inactive_args *); 5235 static int sync_reclaim(struct vop_reclaim_args *); 5236 5237 static struct vop_vector sync_vnodeops = { 5238 .vop_bypass = VOP_EOPNOTSUPP, 5239 .vop_close = sync_close, 5240 .vop_fsync = sync_fsync, 5241 .vop_getwritemount = vop_stdgetwritemount, 5242 .vop_inactive = sync_inactive, 5243 .vop_need_inactive = vop_stdneed_inactive, 5244 .vop_reclaim = sync_reclaim, 5245 .vop_lock1 = vop_stdlock, 5246 .vop_unlock = vop_stdunlock, 5247 .vop_islocked = vop_stdislocked, 5248 .vop_fplookup_vexec = VOP_EAGAIN, 5249 .vop_fplookup_symlink = VOP_EAGAIN, 5250 }; 5251 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 5252 5253 /* 5254 * Create a new filesystem syncer vnode for the specified mount point. 5255 */ 5256 void 5257 vfs_allocate_syncvnode(struct mount *mp) 5258 { 5259 struct vnode *vp; 5260 struct bufobj *bo; 5261 static long start, incr, next; 5262 int error; 5263 5264 /* Allocate a new vnode */ 5265 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 5266 if (error != 0) 5267 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 5268 vp->v_type = VNON; 5269 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5270 vp->v_vflag |= VV_FORCEINSMQ; 5271 error = insmntque1(vp, mp); 5272 if (error != 0) 5273 panic("vfs_allocate_syncvnode: insmntque() failed"); 5274 vp->v_vflag &= ~VV_FORCEINSMQ; 5275 vn_set_state(vp, VSTATE_CONSTRUCTED); 5276 VOP_UNLOCK(vp); 5277 /* 5278 * Place the vnode onto the syncer worklist. We attempt to 5279 * scatter them about on the list so that they will go off 5280 * at evenly distributed times even if all the filesystems 5281 * are mounted at once. 5282 */ 5283 next += incr; 5284 if (next == 0 || next > syncer_maxdelay) { 5285 start /= 2; 5286 incr /= 2; 5287 if (start == 0) { 5288 start = syncer_maxdelay / 2; 5289 incr = syncer_maxdelay; 5290 } 5291 next = start; 5292 } 5293 bo = &vp->v_bufobj; 5294 BO_LOCK(bo); 5295 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 5296 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 5297 mtx_lock(&sync_mtx); 5298 sync_vnode_count++; 5299 if (mp->mnt_syncer == NULL) { 5300 mp->mnt_syncer = vp; 5301 vp = NULL; 5302 } 5303 mtx_unlock(&sync_mtx); 5304 BO_UNLOCK(bo); 5305 if (vp != NULL) { 5306 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5307 vgone(vp); 5308 vput(vp); 5309 } 5310 } 5311 5312 void 5313 vfs_deallocate_syncvnode(struct mount *mp) 5314 { 5315 struct vnode *vp; 5316 5317 mtx_lock(&sync_mtx); 5318 vp = mp->mnt_syncer; 5319 if (vp != NULL) 5320 mp->mnt_syncer = NULL; 5321 mtx_unlock(&sync_mtx); 5322 if (vp != NULL) 5323 vrele(vp); 5324 } 5325 5326 /* 5327 * Do a lazy sync of the filesystem. 5328 */ 5329 static int 5330 sync_fsync(struct vop_fsync_args *ap) 5331 { 5332 struct vnode *syncvp = ap->a_vp; 5333 struct mount *mp = syncvp->v_mount; 5334 int error, save; 5335 struct bufobj *bo; 5336 5337 /* 5338 * We only need to do something if this is a lazy evaluation. 5339 */ 5340 if (ap->a_waitfor != MNT_LAZY) 5341 return (0); 5342 5343 /* 5344 * Move ourselves to the back of the sync list. 5345 */ 5346 bo = &syncvp->v_bufobj; 5347 BO_LOCK(bo); 5348 vn_syncer_add_to_worklist(bo, syncdelay); 5349 BO_UNLOCK(bo); 5350 5351 /* 5352 * Walk the list of vnodes pushing all that are dirty and 5353 * not already on the sync list. 5354 */ 5355 if (vfs_busy(mp, MBF_NOWAIT) != 0) 5356 return (0); 5357 VOP_UNLOCK(syncvp); 5358 save = curthread_pflags_set(TDP_SYNCIO); 5359 /* 5360 * The filesystem at hand may be idle with free vnodes stored in the 5361 * batch. Return them instead of letting them stay there indefinitely. 5362 */ 5363 vfs_periodic(mp, MNT_NOWAIT); 5364 error = VFS_SYNC(mp, MNT_LAZY); 5365 curthread_pflags_restore(save); 5366 vn_lock(syncvp, LK_EXCLUSIVE | LK_RETRY); 5367 vfs_unbusy(mp); 5368 return (error); 5369 } 5370 5371 /* 5372 * The syncer vnode is no referenced. 5373 */ 5374 static int 5375 sync_inactive(struct vop_inactive_args *ap) 5376 { 5377 5378 vgone(ap->a_vp); 5379 return (0); 5380 } 5381 5382 /* 5383 * The syncer vnode is no longer needed and is being decommissioned. 5384 * 5385 * Modifications to the worklist must be protected by sync_mtx. 5386 */ 5387 static int 5388 sync_reclaim(struct vop_reclaim_args *ap) 5389 { 5390 struct vnode *vp = ap->a_vp; 5391 struct bufobj *bo; 5392 5393 bo = &vp->v_bufobj; 5394 BO_LOCK(bo); 5395 mtx_lock(&sync_mtx); 5396 if (vp->v_mount->mnt_syncer == vp) 5397 vp->v_mount->mnt_syncer = NULL; 5398 if (bo->bo_flag & BO_ONWORKLST) { 5399 LIST_REMOVE(bo, bo_synclist); 5400 syncer_worklist_len--; 5401 sync_vnode_count--; 5402 bo->bo_flag &= ~BO_ONWORKLST; 5403 } 5404 mtx_unlock(&sync_mtx); 5405 BO_UNLOCK(bo); 5406 5407 return (0); 5408 } 5409 5410 int 5411 vn_need_pageq_flush(struct vnode *vp) 5412 { 5413 struct vm_object *obj; 5414 5415 obj = vp->v_object; 5416 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 5417 vm_object_mightbedirty(obj)); 5418 } 5419 5420 /* 5421 * Check if vnode represents a disk device 5422 */ 5423 bool 5424 vn_isdisk_error(struct vnode *vp, int *errp) 5425 { 5426 int error; 5427 5428 if (vp->v_type != VCHR) { 5429 error = ENOTBLK; 5430 goto out; 5431 } 5432 error = 0; 5433 dev_lock(); 5434 if (vp->v_rdev == NULL) 5435 error = ENXIO; 5436 else if (vp->v_rdev->si_devsw == NULL) 5437 error = ENXIO; 5438 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5439 error = ENOTBLK; 5440 dev_unlock(); 5441 out: 5442 *errp = error; 5443 return (error == 0); 5444 } 5445 5446 bool 5447 vn_isdisk(struct vnode *vp) 5448 { 5449 int error; 5450 5451 return (vn_isdisk_error(vp, &error)); 5452 } 5453 5454 /* 5455 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5456 * the comment above cache_fplookup for details. 5457 */ 5458 int 5459 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5460 { 5461 int error; 5462 5463 VFS_SMR_ASSERT_ENTERED(); 5464 5465 /* Check the owner. */ 5466 if (cred->cr_uid == file_uid) { 5467 if (file_mode & S_IXUSR) 5468 return (0); 5469 goto out_error; 5470 } 5471 5472 /* Otherwise, check the groups (first match) */ 5473 if (groupmember(file_gid, cred)) { 5474 if (file_mode & S_IXGRP) 5475 return (0); 5476 goto out_error; 5477 } 5478 5479 /* Otherwise, check everyone else. */ 5480 if (file_mode & S_IXOTH) 5481 return (0); 5482 out_error: 5483 /* 5484 * Permission check failed, but it is possible denial will get overwritten 5485 * (e.g., when root is traversing through a 700 directory owned by someone 5486 * else). 5487 * 5488 * vaccess() calls priv_check_cred which in turn can descent into MAC 5489 * modules overriding this result. It's quite unclear what semantics 5490 * are allowed for them to operate, thus for safety we don't call them 5491 * from within the SMR section. This also means if any such modules 5492 * are present, we have to let the regular lookup decide. 5493 */ 5494 error = priv_check_cred_vfs_lookup_nomac(cred); 5495 switch (error) { 5496 case 0: 5497 return (0); 5498 case EAGAIN: 5499 /* 5500 * MAC modules present. 5501 */ 5502 return (EAGAIN); 5503 case EPERM: 5504 return (EACCES); 5505 default: 5506 return (error); 5507 } 5508 } 5509 5510 /* 5511 * Common filesystem object access control check routine. Accepts a 5512 * vnode's type, "mode", uid and gid, requested access mode, and credentials. 5513 * Returns 0 on success, or an errno on failure. 5514 */ 5515 int 5516 vaccess(__enum_uint8(vtype) type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5517 accmode_t accmode, struct ucred *cred) 5518 { 5519 accmode_t dac_granted; 5520 accmode_t priv_granted; 5521 5522 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5523 ("invalid bit in accmode")); 5524 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5525 ("VAPPEND without VWRITE")); 5526 5527 /* 5528 * Look for a normal, non-privileged way to access the file/directory 5529 * as requested. If it exists, go with that. 5530 */ 5531 5532 dac_granted = 0; 5533 5534 /* Check the owner. */ 5535 if (cred->cr_uid == file_uid) { 5536 dac_granted |= VADMIN; 5537 if (file_mode & S_IXUSR) 5538 dac_granted |= VEXEC; 5539 if (file_mode & S_IRUSR) 5540 dac_granted |= VREAD; 5541 if (file_mode & S_IWUSR) 5542 dac_granted |= (VWRITE | VAPPEND); 5543 5544 if ((accmode & dac_granted) == accmode) 5545 return (0); 5546 5547 goto privcheck; 5548 } 5549 5550 /* Otherwise, check the groups (first match) */ 5551 if (groupmember(file_gid, cred)) { 5552 if (file_mode & S_IXGRP) 5553 dac_granted |= VEXEC; 5554 if (file_mode & S_IRGRP) 5555 dac_granted |= VREAD; 5556 if (file_mode & S_IWGRP) 5557 dac_granted |= (VWRITE | VAPPEND); 5558 5559 if ((accmode & dac_granted) == accmode) 5560 return (0); 5561 5562 goto privcheck; 5563 } 5564 5565 /* Otherwise, check everyone else. */ 5566 if (file_mode & S_IXOTH) 5567 dac_granted |= VEXEC; 5568 if (file_mode & S_IROTH) 5569 dac_granted |= VREAD; 5570 if (file_mode & S_IWOTH) 5571 dac_granted |= (VWRITE | VAPPEND); 5572 if ((accmode & dac_granted) == accmode) 5573 return (0); 5574 5575 privcheck: 5576 /* 5577 * Build a privilege mask to determine if the set of privileges 5578 * satisfies the requirements when combined with the granted mask 5579 * from above. For each privilege, if the privilege is required, 5580 * bitwise or the request type onto the priv_granted mask. 5581 */ 5582 priv_granted = 0; 5583 5584 if (type == VDIR) { 5585 /* 5586 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5587 * requests, instead of PRIV_VFS_EXEC. 5588 */ 5589 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5590 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5591 priv_granted |= VEXEC; 5592 } else { 5593 /* 5594 * Ensure that at least one execute bit is on. Otherwise, 5595 * a privileged user will always succeed, and we don't want 5596 * this to happen unless the file really is executable. 5597 */ 5598 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5599 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5600 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5601 priv_granted |= VEXEC; 5602 } 5603 5604 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5605 !priv_check_cred(cred, PRIV_VFS_READ)) 5606 priv_granted |= VREAD; 5607 5608 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5609 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5610 priv_granted |= (VWRITE | VAPPEND); 5611 5612 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5613 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5614 priv_granted |= VADMIN; 5615 5616 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5617 return (0); 5618 } 5619 5620 return ((accmode & VADMIN) ? EPERM : EACCES); 5621 } 5622 5623 /* 5624 * Credential check based on process requesting service, and per-attribute 5625 * permissions. 5626 */ 5627 int 5628 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5629 struct thread *td, accmode_t accmode) 5630 { 5631 5632 /* 5633 * Kernel-invoked always succeeds. 5634 */ 5635 if (cred == NOCRED) 5636 return (0); 5637 5638 /* 5639 * Do not allow privileged processes in jail to directly manipulate 5640 * system attributes. 5641 */ 5642 switch (attrnamespace) { 5643 case EXTATTR_NAMESPACE_SYSTEM: 5644 /* Potentially should be: return (EPERM); */ 5645 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5646 case EXTATTR_NAMESPACE_USER: 5647 return (VOP_ACCESS(vp, accmode, cred, td)); 5648 default: 5649 return (EPERM); 5650 } 5651 } 5652 5653 #ifdef DEBUG_VFS_LOCKS 5654 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5655 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5656 "Drop into debugger on lock violation"); 5657 5658 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5659 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5660 0, "Check for interlock across VOPs"); 5661 5662 int vfs_badlock_print = 1; /* Print lock violations. */ 5663 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5664 0, "Print lock violations"); 5665 5666 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5667 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5668 0, "Print vnode details on lock violations"); 5669 5670 #ifdef KDB 5671 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5672 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5673 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5674 #endif 5675 5676 static void 5677 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5678 { 5679 5680 #ifdef KDB 5681 if (vfs_badlock_backtrace) 5682 kdb_backtrace(); 5683 #endif 5684 if (vfs_badlock_vnode) 5685 vn_printf(vp, "vnode "); 5686 if (vfs_badlock_print) 5687 printf("%s: %p %s\n", str, (void *)vp, msg); 5688 if (vfs_badlock_ddb) 5689 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5690 } 5691 5692 void 5693 assert_vi_locked(struct vnode *vp, const char *str) 5694 { 5695 5696 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5697 vfs_badlock("interlock is not locked but should be", str, vp); 5698 } 5699 5700 void 5701 assert_vi_unlocked(struct vnode *vp, const char *str) 5702 { 5703 5704 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5705 vfs_badlock("interlock is locked but should not be", str, vp); 5706 } 5707 5708 void 5709 assert_vop_locked(struct vnode *vp, const char *str) 5710 { 5711 if (KERNEL_PANICKED() || vp == NULL) 5712 return; 5713 5714 #ifdef WITNESS 5715 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && 5716 witness_is_owned(&vp->v_vnlock->lock_object) == -1) 5717 #else 5718 int locked = VOP_ISLOCKED(vp); 5719 if (locked == 0 || locked == LK_EXCLOTHER) 5720 #endif 5721 vfs_badlock("is not locked but should be", str, vp); 5722 } 5723 5724 void 5725 assert_vop_unlocked(struct vnode *vp, const char *str) 5726 { 5727 if (KERNEL_PANICKED() || vp == NULL) 5728 return; 5729 5730 #ifdef WITNESS 5731 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && 5732 witness_is_owned(&vp->v_vnlock->lock_object) == 1) 5733 #else 5734 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5735 #endif 5736 vfs_badlock("is locked but should not be", str, vp); 5737 } 5738 5739 void 5740 assert_vop_elocked(struct vnode *vp, const char *str) 5741 { 5742 if (KERNEL_PANICKED() || vp == NULL) 5743 return; 5744 5745 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5746 vfs_badlock("is not exclusive locked but should be", str, vp); 5747 } 5748 #endif /* DEBUG_VFS_LOCKS */ 5749 5750 void 5751 vop_rename_fail(struct vop_rename_args *ap) 5752 { 5753 5754 if (ap->a_tvp != NULL) 5755 vput(ap->a_tvp); 5756 if (ap->a_tdvp == ap->a_tvp) 5757 vrele(ap->a_tdvp); 5758 else 5759 vput(ap->a_tdvp); 5760 vrele(ap->a_fdvp); 5761 vrele(ap->a_fvp); 5762 } 5763 5764 void 5765 vop_rename_pre(void *ap) 5766 { 5767 struct vop_rename_args *a = ap; 5768 5769 #ifdef DEBUG_VFS_LOCKS 5770 if (a->a_tvp) 5771 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5772 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5773 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5774 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5775 5776 /* Check the source (from). */ 5777 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5778 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5779 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5780 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5781 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5782 5783 /* Check the target. */ 5784 if (a->a_tvp) 5785 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5786 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5787 #endif 5788 /* 5789 * It may be tempting to add vn_seqc_write_begin/end calls here and 5790 * in vop_rename_post but that's not going to work out since some 5791 * filesystems relookup vnodes mid-rename. This is probably a bug. 5792 * 5793 * For now filesystems are expected to do the relevant calls after they 5794 * decide what vnodes to operate on. 5795 */ 5796 if (a->a_tdvp != a->a_fdvp) 5797 vhold(a->a_fdvp); 5798 if (a->a_tvp != a->a_fvp) 5799 vhold(a->a_fvp); 5800 vhold(a->a_tdvp); 5801 if (a->a_tvp) 5802 vhold(a->a_tvp); 5803 } 5804 5805 #ifdef DEBUG_VFS_LOCKS 5806 void 5807 vop_fplookup_vexec_debugpre(void *ap __unused) 5808 { 5809 5810 VFS_SMR_ASSERT_ENTERED(); 5811 } 5812 5813 void 5814 vop_fplookup_vexec_debugpost(void *ap, int rc) 5815 { 5816 struct vop_fplookup_vexec_args *a; 5817 struct vnode *vp; 5818 5819 a = ap; 5820 vp = a->a_vp; 5821 5822 VFS_SMR_ASSERT_ENTERED(); 5823 if (rc == EOPNOTSUPP) 5824 VNPASS(VN_IS_DOOMED(vp), vp); 5825 } 5826 5827 void 5828 vop_fplookup_symlink_debugpre(void *ap __unused) 5829 { 5830 5831 VFS_SMR_ASSERT_ENTERED(); 5832 } 5833 5834 void 5835 vop_fplookup_symlink_debugpost(void *ap __unused, int rc __unused) 5836 { 5837 5838 VFS_SMR_ASSERT_ENTERED(); 5839 } 5840 5841 static void 5842 vop_fsync_debugprepost(struct vnode *vp, const char *name) 5843 { 5844 if (vp->v_type == VCHR) 5845 ; 5846 else if (MNT_EXTENDED_SHARED(vp->v_mount)) 5847 ASSERT_VOP_LOCKED(vp, name); 5848 else 5849 ASSERT_VOP_ELOCKED(vp, name); 5850 } 5851 5852 void 5853 vop_fsync_debugpre(void *a) 5854 { 5855 struct vop_fsync_args *ap; 5856 5857 ap = a; 5858 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5859 } 5860 5861 void 5862 vop_fsync_debugpost(void *a, int rc __unused) 5863 { 5864 struct vop_fsync_args *ap; 5865 5866 ap = a; 5867 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5868 } 5869 5870 void 5871 vop_fdatasync_debugpre(void *a) 5872 { 5873 struct vop_fdatasync_args *ap; 5874 5875 ap = a; 5876 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5877 } 5878 5879 void 5880 vop_fdatasync_debugpost(void *a, int rc __unused) 5881 { 5882 struct vop_fdatasync_args *ap; 5883 5884 ap = a; 5885 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5886 } 5887 5888 void 5889 vop_strategy_debugpre(void *ap) 5890 { 5891 struct vop_strategy_args *a; 5892 struct buf *bp; 5893 5894 a = ap; 5895 bp = a->a_bp; 5896 5897 /* 5898 * Cluster ops lock their component buffers but not the IO container. 5899 */ 5900 if ((bp->b_flags & B_CLUSTER) != 0) 5901 return; 5902 5903 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5904 if (vfs_badlock_print) 5905 printf( 5906 "VOP_STRATEGY: bp is not locked but should be\n"); 5907 if (vfs_badlock_ddb) 5908 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5909 } 5910 } 5911 5912 void 5913 vop_lock_debugpre(void *ap) 5914 { 5915 struct vop_lock1_args *a = ap; 5916 5917 if ((a->a_flags & LK_INTERLOCK) == 0) 5918 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5919 else 5920 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5921 } 5922 5923 void 5924 vop_lock_debugpost(void *ap, int rc) 5925 { 5926 struct vop_lock1_args *a = ap; 5927 5928 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5929 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5930 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5931 } 5932 5933 void 5934 vop_unlock_debugpre(void *ap) 5935 { 5936 struct vop_unlock_args *a = ap; 5937 struct vnode *vp = a->a_vp; 5938 5939 VNPASS(vn_get_state(vp) != VSTATE_UNINITIALIZED, vp); 5940 ASSERT_VOP_LOCKED(vp, "VOP_UNLOCK"); 5941 } 5942 5943 void 5944 vop_need_inactive_debugpre(void *ap) 5945 { 5946 struct vop_need_inactive_args *a = ap; 5947 5948 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5949 } 5950 5951 void 5952 vop_need_inactive_debugpost(void *ap, int rc) 5953 { 5954 struct vop_need_inactive_args *a = ap; 5955 5956 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5957 } 5958 #endif 5959 5960 void 5961 vop_create_pre(void *ap) 5962 { 5963 struct vop_create_args *a; 5964 struct vnode *dvp; 5965 5966 a = ap; 5967 dvp = a->a_dvp; 5968 vn_seqc_write_begin(dvp); 5969 } 5970 5971 void 5972 vop_create_post(void *ap, int rc) 5973 { 5974 struct vop_create_args *a; 5975 struct vnode *dvp; 5976 5977 a = ap; 5978 dvp = a->a_dvp; 5979 vn_seqc_write_end(dvp); 5980 if (!rc) 5981 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5982 } 5983 5984 void 5985 vop_whiteout_pre(void *ap) 5986 { 5987 struct vop_whiteout_args *a; 5988 struct vnode *dvp; 5989 5990 a = ap; 5991 dvp = a->a_dvp; 5992 vn_seqc_write_begin(dvp); 5993 } 5994 5995 void 5996 vop_whiteout_post(void *ap, int rc) 5997 { 5998 struct vop_whiteout_args *a; 5999 struct vnode *dvp; 6000 6001 a = ap; 6002 dvp = a->a_dvp; 6003 vn_seqc_write_end(dvp); 6004 } 6005 6006 void 6007 vop_deleteextattr_pre(void *ap) 6008 { 6009 struct vop_deleteextattr_args *a; 6010 struct vnode *vp; 6011 6012 a = ap; 6013 vp = a->a_vp; 6014 vn_seqc_write_begin(vp); 6015 } 6016 6017 void 6018 vop_deleteextattr_post(void *ap, int rc) 6019 { 6020 struct vop_deleteextattr_args *a; 6021 struct vnode *vp; 6022 6023 a = ap; 6024 vp = a->a_vp; 6025 vn_seqc_write_end(vp); 6026 if (!rc) 6027 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 6028 } 6029 6030 void 6031 vop_link_pre(void *ap) 6032 { 6033 struct vop_link_args *a; 6034 struct vnode *vp, *tdvp; 6035 6036 a = ap; 6037 vp = a->a_vp; 6038 tdvp = a->a_tdvp; 6039 vn_seqc_write_begin(vp); 6040 vn_seqc_write_begin(tdvp); 6041 } 6042 6043 void 6044 vop_link_post(void *ap, int rc) 6045 { 6046 struct vop_link_args *a; 6047 struct vnode *vp, *tdvp; 6048 6049 a = ap; 6050 vp = a->a_vp; 6051 tdvp = a->a_tdvp; 6052 vn_seqc_write_end(vp); 6053 vn_seqc_write_end(tdvp); 6054 if (!rc) { 6055 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 6056 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 6057 } 6058 } 6059 6060 void 6061 vop_mkdir_pre(void *ap) 6062 { 6063 struct vop_mkdir_args *a; 6064 struct vnode *dvp; 6065 6066 a = ap; 6067 dvp = a->a_dvp; 6068 vn_seqc_write_begin(dvp); 6069 } 6070 6071 void 6072 vop_mkdir_post(void *ap, int rc) 6073 { 6074 struct vop_mkdir_args *a; 6075 struct vnode *dvp; 6076 6077 a = ap; 6078 dvp = a->a_dvp; 6079 vn_seqc_write_end(dvp); 6080 if (!rc) 6081 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 6082 } 6083 6084 #ifdef DEBUG_VFS_LOCKS 6085 void 6086 vop_mkdir_debugpost(void *ap, int rc) 6087 { 6088 struct vop_mkdir_args *a; 6089 6090 a = ap; 6091 if (!rc) 6092 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); 6093 } 6094 #endif 6095 6096 void 6097 vop_mknod_pre(void *ap) 6098 { 6099 struct vop_mknod_args *a; 6100 struct vnode *dvp; 6101 6102 a = ap; 6103 dvp = a->a_dvp; 6104 vn_seqc_write_begin(dvp); 6105 } 6106 6107 void 6108 vop_mknod_post(void *ap, int rc) 6109 { 6110 struct vop_mknod_args *a; 6111 struct vnode *dvp; 6112 6113 a = ap; 6114 dvp = a->a_dvp; 6115 vn_seqc_write_end(dvp); 6116 if (!rc) 6117 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6118 } 6119 6120 void 6121 vop_reclaim_post(void *ap, int rc) 6122 { 6123 struct vop_reclaim_args *a; 6124 struct vnode *vp; 6125 6126 a = ap; 6127 vp = a->a_vp; 6128 ASSERT_VOP_IN_SEQC(vp); 6129 if (!rc) 6130 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 6131 } 6132 6133 void 6134 vop_remove_pre(void *ap) 6135 { 6136 struct vop_remove_args *a; 6137 struct vnode *dvp, *vp; 6138 6139 a = ap; 6140 dvp = a->a_dvp; 6141 vp = a->a_vp; 6142 vn_seqc_write_begin(dvp); 6143 vn_seqc_write_begin(vp); 6144 } 6145 6146 void 6147 vop_remove_post(void *ap, int rc) 6148 { 6149 struct vop_remove_args *a; 6150 struct vnode *dvp, *vp; 6151 6152 a = ap; 6153 dvp = a->a_dvp; 6154 vp = a->a_vp; 6155 vn_seqc_write_end(dvp); 6156 vn_seqc_write_end(vp); 6157 if (!rc) { 6158 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6159 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 6160 } 6161 } 6162 6163 void 6164 vop_rename_post(void *ap, int rc) 6165 { 6166 struct vop_rename_args *a = ap; 6167 long hint; 6168 6169 if (!rc) { 6170 hint = NOTE_WRITE; 6171 if (a->a_fdvp == a->a_tdvp) { 6172 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 6173 hint |= NOTE_LINK; 6174 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 6175 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 6176 } else { 6177 hint |= NOTE_EXTEND; 6178 if (a->a_fvp->v_type == VDIR) 6179 hint |= NOTE_LINK; 6180 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 6181 6182 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 6183 a->a_tvp->v_type == VDIR) 6184 hint &= ~NOTE_LINK; 6185 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 6186 } 6187 6188 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 6189 if (a->a_tvp) 6190 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 6191 } 6192 if (a->a_tdvp != a->a_fdvp) 6193 vdrop(a->a_fdvp); 6194 if (a->a_tvp != a->a_fvp) 6195 vdrop(a->a_fvp); 6196 vdrop(a->a_tdvp); 6197 if (a->a_tvp) 6198 vdrop(a->a_tvp); 6199 } 6200 6201 void 6202 vop_rmdir_pre(void *ap) 6203 { 6204 struct vop_rmdir_args *a; 6205 struct vnode *dvp, *vp; 6206 6207 a = ap; 6208 dvp = a->a_dvp; 6209 vp = a->a_vp; 6210 vn_seqc_write_begin(dvp); 6211 vn_seqc_write_begin(vp); 6212 } 6213 6214 void 6215 vop_rmdir_post(void *ap, int rc) 6216 { 6217 struct vop_rmdir_args *a; 6218 struct vnode *dvp, *vp; 6219 6220 a = ap; 6221 dvp = a->a_dvp; 6222 vp = a->a_vp; 6223 vn_seqc_write_end(dvp); 6224 vn_seqc_write_end(vp); 6225 if (!rc) { 6226 vp->v_vflag |= VV_UNLINKED; 6227 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 6228 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 6229 } 6230 } 6231 6232 void 6233 vop_setattr_pre(void *ap) 6234 { 6235 struct vop_setattr_args *a; 6236 struct vnode *vp; 6237 6238 a = ap; 6239 vp = a->a_vp; 6240 vn_seqc_write_begin(vp); 6241 } 6242 6243 void 6244 vop_setattr_post(void *ap, int rc) 6245 { 6246 struct vop_setattr_args *a; 6247 struct vnode *vp; 6248 6249 a = ap; 6250 vp = a->a_vp; 6251 vn_seqc_write_end(vp); 6252 if (!rc) 6253 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6254 } 6255 6256 void 6257 vop_setacl_pre(void *ap) 6258 { 6259 struct vop_setacl_args *a; 6260 struct vnode *vp; 6261 6262 a = ap; 6263 vp = a->a_vp; 6264 vn_seqc_write_begin(vp); 6265 } 6266 6267 void 6268 vop_setacl_post(void *ap, int rc __unused) 6269 { 6270 struct vop_setacl_args *a; 6271 struct vnode *vp; 6272 6273 a = ap; 6274 vp = a->a_vp; 6275 vn_seqc_write_end(vp); 6276 } 6277 6278 void 6279 vop_setextattr_pre(void *ap) 6280 { 6281 struct vop_setextattr_args *a; 6282 struct vnode *vp; 6283 6284 a = ap; 6285 vp = a->a_vp; 6286 vn_seqc_write_begin(vp); 6287 } 6288 6289 void 6290 vop_setextattr_post(void *ap, int rc) 6291 { 6292 struct vop_setextattr_args *a; 6293 struct vnode *vp; 6294 6295 a = ap; 6296 vp = a->a_vp; 6297 vn_seqc_write_end(vp); 6298 if (!rc) 6299 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6300 } 6301 6302 void 6303 vop_symlink_pre(void *ap) 6304 { 6305 struct vop_symlink_args *a; 6306 struct vnode *dvp; 6307 6308 a = ap; 6309 dvp = a->a_dvp; 6310 vn_seqc_write_begin(dvp); 6311 } 6312 6313 void 6314 vop_symlink_post(void *ap, int rc) 6315 { 6316 struct vop_symlink_args *a; 6317 struct vnode *dvp; 6318 6319 a = ap; 6320 dvp = a->a_dvp; 6321 vn_seqc_write_end(dvp); 6322 if (!rc) 6323 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6324 } 6325 6326 void 6327 vop_open_post(void *ap, int rc) 6328 { 6329 struct vop_open_args *a = ap; 6330 6331 if (!rc) 6332 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 6333 } 6334 6335 void 6336 vop_close_post(void *ap, int rc) 6337 { 6338 struct vop_close_args *a = ap; 6339 6340 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 6341 !VN_IS_DOOMED(a->a_vp))) { 6342 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 6343 NOTE_CLOSE_WRITE : NOTE_CLOSE); 6344 } 6345 } 6346 6347 void 6348 vop_read_post(void *ap, int rc) 6349 { 6350 struct vop_read_args *a = ap; 6351 6352 if (!rc) 6353 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6354 } 6355 6356 void 6357 vop_read_pgcache_post(void *ap, int rc) 6358 { 6359 struct vop_read_pgcache_args *a = ap; 6360 6361 if (!rc) 6362 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); 6363 } 6364 6365 void 6366 vop_readdir_post(void *ap, int rc) 6367 { 6368 struct vop_readdir_args *a = ap; 6369 6370 if (!rc) 6371 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6372 } 6373 6374 static struct knlist fs_knlist; 6375 6376 static void 6377 vfs_event_init(void *arg) 6378 { 6379 knlist_init_mtx(&fs_knlist, NULL); 6380 } 6381 /* XXX - correct order? */ 6382 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 6383 6384 void 6385 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 6386 { 6387 6388 KNOTE_UNLOCKED(&fs_knlist, event); 6389 } 6390 6391 static int filt_fsattach(struct knote *kn); 6392 static void filt_fsdetach(struct knote *kn); 6393 static int filt_fsevent(struct knote *kn, long hint); 6394 6395 struct filterops fs_filtops = { 6396 .f_isfd = 0, 6397 .f_attach = filt_fsattach, 6398 .f_detach = filt_fsdetach, 6399 .f_event = filt_fsevent 6400 }; 6401 6402 static int 6403 filt_fsattach(struct knote *kn) 6404 { 6405 6406 kn->kn_flags |= EV_CLEAR; 6407 knlist_add(&fs_knlist, kn, 0); 6408 return (0); 6409 } 6410 6411 static void 6412 filt_fsdetach(struct knote *kn) 6413 { 6414 6415 knlist_remove(&fs_knlist, kn, 0); 6416 } 6417 6418 static int 6419 filt_fsevent(struct knote *kn, long hint) 6420 { 6421 6422 kn->kn_fflags |= kn->kn_sfflags & hint; 6423 6424 return (kn->kn_fflags != 0); 6425 } 6426 6427 static int 6428 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 6429 { 6430 struct vfsidctl vc; 6431 int error; 6432 struct mount *mp; 6433 6434 error = SYSCTL_IN(req, &vc, sizeof(vc)); 6435 if (error) 6436 return (error); 6437 if (vc.vc_vers != VFS_CTL_VERS1) 6438 return (EINVAL); 6439 mp = vfs_getvfs(&vc.vc_fsid); 6440 if (mp == NULL) 6441 return (ENOENT); 6442 /* ensure that a specific sysctl goes to the right filesystem. */ 6443 if (strcmp(vc.vc_fstypename, "*") != 0 && 6444 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 6445 vfs_rel(mp); 6446 return (EINVAL); 6447 } 6448 VCTLTOREQ(&vc, req); 6449 error = VFS_SYSCTL(mp, vc.vc_op, req); 6450 vfs_rel(mp); 6451 return (error); 6452 } 6453 6454 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 6455 NULL, 0, sysctl_vfs_ctl, "", 6456 "Sysctl by fsid"); 6457 6458 /* 6459 * Function to initialize a va_filerev field sensibly. 6460 * XXX: Wouldn't a random number make a lot more sense ?? 6461 */ 6462 u_quad_t 6463 init_va_filerev(void) 6464 { 6465 struct bintime bt; 6466 6467 getbinuptime(&bt); 6468 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 6469 } 6470 6471 static int filt_vfsread(struct knote *kn, long hint); 6472 static int filt_vfswrite(struct knote *kn, long hint); 6473 static int filt_vfsvnode(struct knote *kn, long hint); 6474 static void filt_vfsdetach(struct knote *kn); 6475 static struct filterops vfsread_filtops = { 6476 .f_isfd = 1, 6477 .f_detach = filt_vfsdetach, 6478 .f_event = filt_vfsread 6479 }; 6480 static struct filterops vfswrite_filtops = { 6481 .f_isfd = 1, 6482 .f_detach = filt_vfsdetach, 6483 .f_event = filt_vfswrite 6484 }; 6485 static struct filterops vfsvnode_filtops = { 6486 .f_isfd = 1, 6487 .f_detach = filt_vfsdetach, 6488 .f_event = filt_vfsvnode 6489 }; 6490 6491 static void 6492 vfs_knllock(void *arg) 6493 { 6494 struct vnode *vp = arg; 6495 6496 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 6497 } 6498 6499 static void 6500 vfs_knlunlock(void *arg) 6501 { 6502 struct vnode *vp = arg; 6503 6504 VOP_UNLOCK(vp); 6505 } 6506 6507 static void 6508 vfs_knl_assert_lock(void *arg, int what) 6509 { 6510 #ifdef DEBUG_VFS_LOCKS 6511 struct vnode *vp = arg; 6512 6513 if (what == LA_LOCKED) 6514 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6515 else 6516 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6517 #endif 6518 } 6519 6520 int 6521 vfs_kqfilter(struct vop_kqfilter_args *ap) 6522 { 6523 struct vnode *vp = ap->a_vp; 6524 struct knote *kn = ap->a_kn; 6525 struct knlist *knl; 6526 6527 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ && 6528 kn->kn_filter != EVFILT_WRITE), 6529 ("READ/WRITE filter on a FIFO leaked through")); 6530 switch (kn->kn_filter) { 6531 case EVFILT_READ: 6532 kn->kn_fop = &vfsread_filtops; 6533 break; 6534 case EVFILT_WRITE: 6535 kn->kn_fop = &vfswrite_filtops; 6536 break; 6537 case EVFILT_VNODE: 6538 kn->kn_fop = &vfsvnode_filtops; 6539 break; 6540 default: 6541 return (EINVAL); 6542 } 6543 6544 kn->kn_hook = (caddr_t)vp; 6545 6546 v_addpollinfo(vp); 6547 if (vp->v_pollinfo == NULL) 6548 return (ENOMEM); 6549 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6550 vhold(vp); 6551 knlist_add(knl, kn, 0); 6552 6553 return (0); 6554 } 6555 6556 /* 6557 * Detach knote from vnode 6558 */ 6559 static void 6560 filt_vfsdetach(struct knote *kn) 6561 { 6562 struct vnode *vp = (struct vnode *)kn->kn_hook; 6563 6564 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6565 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6566 vdrop(vp); 6567 } 6568 6569 /*ARGSUSED*/ 6570 static int 6571 filt_vfsread(struct knote *kn, long hint) 6572 { 6573 struct vnode *vp = (struct vnode *)kn->kn_hook; 6574 off_t size; 6575 int res; 6576 6577 /* 6578 * filesystem is gone, so set the EOF flag and schedule 6579 * the knote for deletion. 6580 */ 6581 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6582 VI_LOCK(vp); 6583 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6584 VI_UNLOCK(vp); 6585 return (1); 6586 } 6587 6588 if (vn_getsize_locked(vp, &size, curthread->td_ucred) != 0) 6589 return (0); 6590 6591 VI_LOCK(vp); 6592 kn->kn_data = size - kn->kn_fp->f_offset; 6593 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6594 VI_UNLOCK(vp); 6595 return (res); 6596 } 6597 6598 /*ARGSUSED*/ 6599 static int 6600 filt_vfswrite(struct knote *kn, long hint) 6601 { 6602 struct vnode *vp = (struct vnode *)kn->kn_hook; 6603 6604 VI_LOCK(vp); 6605 6606 /* 6607 * filesystem is gone, so set the EOF flag and schedule 6608 * the knote for deletion. 6609 */ 6610 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6611 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6612 6613 kn->kn_data = 0; 6614 VI_UNLOCK(vp); 6615 return (1); 6616 } 6617 6618 static int 6619 filt_vfsvnode(struct knote *kn, long hint) 6620 { 6621 struct vnode *vp = (struct vnode *)kn->kn_hook; 6622 int res; 6623 6624 VI_LOCK(vp); 6625 if (kn->kn_sfflags & hint) 6626 kn->kn_fflags |= hint; 6627 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6628 kn->kn_flags |= EV_EOF; 6629 VI_UNLOCK(vp); 6630 return (1); 6631 } 6632 res = (kn->kn_fflags != 0); 6633 VI_UNLOCK(vp); 6634 return (res); 6635 } 6636 6637 int 6638 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6639 { 6640 int error; 6641 6642 if (dp->d_reclen > ap->a_uio->uio_resid) 6643 return (ENAMETOOLONG); 6644 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6645 if (error) { 6646 if (ap->a_ncookies != NULL) { 6647 if (ap->a_cookies != NULL) 6648 free(ap->a_cookies, M_TEMP); 6649 ap->a_cookies = NULL; 6650 *ap->a_ncookies = 0; 6651 } 6652 return (error); 6653 } 6654 if (ap->a_ncookies == NULL) 6655 return (0); 6656 6657 KASSERT(ap->a_cookies, 6658 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6659 6660 *ap->a_cookies = realloc(*ap->a_cookies, 6661 (*ap->a_ncookies + 1) * sizeof(uint64_t), M_TEMP, M_WAITOK | M_ZERO); 6662 (*ap->a_cookies)[*ap->a_ncookies] = off; 6663 *ap->a_ncookies += 1; 6664 return (0); 6665 } 6666 6667 /* 6668 * The purpose of this routine is to remove granularity from accmode_t, 6669 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6670 * VADMIN and VAPPEND. 6671 * 6672 * If it returns 0, the caller is supposed to continue with the usual 6673 * access checks using 'accmode' as modified by this routine. If it 6674 * returns nonzero value, the caller is supposed to return that value 6675 * as errno. 6676 * 6677 * Note that after this routine runs, accmode may be zero. 6678 */ 6679 int 6680 vfs_unixify_accmode(accmode_t *accmode) 6681 { 6682 /* 6683 * There is no way to specify explicit "deny" rule using 6684 * file mode or POSIX.1e ACLs. 6685 */ 6686 if (*accmode & VEXPLICIT_DENY) { 6687 *accmode = 0; 6688 return (0); 6689 } 6690 6691 /* 6692 * None of these can be translated into usual access bits. 6693 * Also, the common case for NFSv4 ACLs is to not contain 6694 * either of these bits. Caller should check for VWRITE 6695 * on the containing directory instead. 6696 */ 6697 if (*accmode & (VDELETE_CHILD | VDELETE)) 6698 return (EPERM); 6699 6700 if (*accmode & VADMIN_PERMS) { 6701 *accmode &= ~VADMIN_PERMS; 6702 *accmode |= VADMIN; 6703 } 6704 6705 /* 6706 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6707 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6708 */ 6709 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6710 6711 return (0); 6712 } 6713 6714 /* 6715 * Clear out a doomed vnode (if any) and replace it with a new one as long 6716 * as the fs is not being unmounted. Return the root vnode to the caller. 6717 */ 6718 static int __noinline 6719 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6720 { 6721 struct vnode *vp; 6722 int error; 6723 6724 restart: 6725 if (mp->mnt_rootvnode != NULL) { 6726 MNT_ILOCK(mp); 6727 vp = mp->mnt_rootvnode; 6728 if (vp != NULL) { 6729 if (!VN_IS_DOOMED(vp)) { 6730 vrefact(vp); 6731 MNT_IUNLOCK(mp); 6732 error = vn_lock(vp, flags); 6733 if (error == 0) { 6734 *vpp = vp; 6735 return (0); 6736 } 6737 vrele(vp); 6738 goto restart; 6739 } 6740 /* 6741 * Clear the old one. 6742 */ 6743 mp->mnt_rootvnode = NULL; 6744 } 6745 MNT_IUNLOCK(mp); 6746 if (vp != NULL) { 6747 vfs_op_barrier_wait(mp); 6748 vrele(vp); 6749 } 6750 } 6751 error = VFS_CACHEDROOT(mp, flags, vpp); 6752 if (error != 0) 6753 return (error); 6754 if (mp->mnt_vfs_ops == 0) { 6755 MNT_ILOCK(mp); 6756 if (mp->mnt_vfs_ops != 0) { 6757 MNT_IUNLOCK(mp); 6758 return (0); 6759 } 6760 if (mp->mnt_rootvnode == NULL) { 6761 vrefact(*vpp); 6762 mp->mnt_rootvnode = *vpp; 6763 } else { 6764 if (mp->mnt_rootvnode != *vpp) { 6765 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6766 panic("%s: mismatch between vnode returned " 6767 " by VFS_CACHEDROOT and the one cached " 6768 " (%p != %p)", 6769 __func__, *vpp, mp->mnt_rootvnode); 6770 } 6771 } 6772 } 6773 MNT_IUNLOCK(mp); 6774 } 6775 return (0); 6776 } 6777 6778 int 6779 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6780 { 6781 struct mount_pcpu *mpcpu; 6782 struct vnode *vp; 6783 int error; 6784 6785 if (!vfs_op_thread_enter(mp, mpcpu)) 6786 return (vfs_cache_root_fallback(mp, flags, vpp)); 6787 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6788 if (vp == NULL || VN_IS_DOOMED(vp)) { 6789 vfs_op_thread_exit(mp, mpcpu); 6790 return (vfs_cache_root_fallback(mp, flags, vpp)); 6791 } 6792 vrefact(vp); 6793 vfs_op_thread_exit(mp, mpcpu); 6794 error = vn_lock(vp, flags); 6795 if (error != 0) { 6796 vrele(vp); 6797 return (vfs_cache_root_fallback(mp, flags, vpp)); 6798 } 6799 *vpp = vp; 6800 return (0); 6801 } 6802 6803 struct vnode * 6804 vfs_cache_root_clear(struct mount *mp) 6805 { 6806 struct vnode *vp; 6807 6808 /* 6809 * ops > 0 guarantees there is nobody who can see this vnode 6810 */ 6811 MPASS(mp->mnt_vfs_ops > 0); 6812 vp = mp->mnt_rootvnode; 6813 if (vp != NULL) 6814 vn_seqc_write_begin(vp); 6815 mp->mnt_rootvnode = NULL; 6816 return (vp); 6817 } 6818 6819 void 6820 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6821 { 6822 6823 MPASS(mp->mnt_vfs_ops > 0); 6824 vrefact(vp); 6825 mp->mnt_rootvnode = vp; 6826 } 6827 6828 /* 6829 * These are helper functions for filesystems to traverse all 6830 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6831 * 6832 * This interface replaces MNT_VNODE_FOREACH. 6833 */ 6834 6835 struct vnode * 6836 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6837 { 6838 struct vnode *vp; 6839 6840 maybe_yield(); 6841 MNT_ILOCK(mp); 6842 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6843 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6844 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6845 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6846 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6847 continue; 6848 VI_LOCK(vp); 6849 if (VN_IS_DOOMED(vp)) { 6850 VI_UNLOCK(vp); 6851 continue; 6852 } 6853 break; 6854 } 6855 if (vp == NULL) { 6856 __mnt_vnode_markerfree_all(mvp, mp); 6857 /* MNT_IUNLOCK(mp); -- done in above function */ 6858 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6859 return (NULL); 6860 } 6861 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6862 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6863 MNT_IUNLOCK(mp); 6864 return (vp); 6865 } 6866 6867 struct vnode * 6868 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6869 { 6870 struct vnode *vp; 6871 6872 *mvp = vn_alloc_marker(mp); 6873 MNT_ILOCK(mp); 6874 MNT_REF(mp); 6875 6876 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6877 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6878 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6879 continue; 6880 VI_LOCK(vp); 6881 if (VN_IS_DOOMED(vp)) { 6882 VI_UNLOCK(vp); 6883 continue; 6884 } 6885 break; 6886 } 6887 if (vp == NULL) { 6888 MNT_REL(mp); 6889 MNT_IUNLOCK(mp); 6890 vn_free_marker(*mvp); 6891 *mvp = NULL; 6892 return (NULL); 6893 } 6894 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6895 MNT_IUNLOCK(mp); 6896 return (vp); 6897 } 6898 6899 void 6900 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6901 { 6902 6903 if (*mvp == NULL) { 6904 MNT_IUNLOCK(mp); 6905 return; 6906 } 6907 6908 mtx_assert(MNT_MTX(mp), MA_OWNED); 6909 6910 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6911 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6912 MNT_REL(mp); 6913 MNT_IUNLOCK(mp); 6914 vn_free_marker(*mvp); 6915 *mvp = NULL; 6916 } 6917 6918 /* 6919 * These are helper functions for filesystems to traverse their 6920 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6921 */ 6922 static void 6923 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6924 { 6925 6926 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6927 6928 MNT_ILOCK(mp); 6929 MNT_REL(mp); 6930 MNT_IUNLOCK(mp); 6931 vn_free_marker(*mvp); 6932 *mvp = NULL; 6933 } 6934 6935 /* 6936 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6937 * conventional lock order during mnt_vnode_next_lazy iteration. 6938 * 6939 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6940 * The list lock is dropped and reacquired. On success, both locks are held. 6941 * On failure, the mount vnode list lock is held but the vnode interlock is 6942 * not, and the procedure may have yielded. 6943 */ 6944 static bool 6945 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6946 struct vnode *vp) 6947 { 6948 6949 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6950 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6951 ("%s: bad marker", __func__)); 6952 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6953 ("%s: inappropriate vnode", __func__)); 6954 ASSERT_VI_UNLOCKED(vp, __func__); 6955 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6956 6957 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6958 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6959 6960 /* 6961 * Note we may be racing against vdrop which transitioned the hold 6962 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 6963 * if we are the only user after we get the interlock we will just 6964 * vdrop. 6965 */ 6966 vhold(vp); 6967 mtx_unlock(&mp->mnt_listmtx); 6968 VI_LOCK(vp); 6969 if (VN_IS_DOOMED(vp)) { 6970 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 6971 goto out_lost; 6972 } 6973 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 6974 /* 6975 * There is nothing to do if we are the last user. 6976 */ 6977 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 6978 goto out_lost; 6979 mtx_lock(&mp->mnt_listmtx); 6980 return (true); 6981 out_lost: 6982 vdropl(vp); 6983 maybe_yield(); 6984 mtx_lock(&mp->mnt_listmtx); 6985 return (false); 6986 } 6987 6988 static struct vnode * 6989 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6990 void *cbarg) 6991 { 6992 struct vnode *vp; 6993 6994 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6995 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6996 restart: 6997 vp = TAILQ_NEXT(*mvp, v_lazylist); 6998 while (vp != NULL) { 6999 if (vp->v_type == VMARKER) { 7000 vp = TAILQ_NEXT(vp, v_lazylist); 7001 continue; 7002 } 7003 /* 7004 * See if we want to process the vnode. Note we may encounter a 7005 * long string of vnodes we don't care about and hog the list 7006 * as a result. Check for it and requeue the marker. 7007 */ 7008 VNPASS(!VN_IS_DOOMED(vp), vp); 7009 if (!cb(vp, cbarg)) { 7010 if (!should_yield()) { 7011 vp = TAILQ_NEXT(vp, v_lazylist); 7012 continue; 7013 } 7014 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 7015 v_lazylist); 7016 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 7017 v_lazylist); 7018 mtx_unlock(&mp->mnt_listmtx); 7019 kern_yield(PRI_USER); 7020 mtx_lock(&mp->mnt_listmtx); 7021 goto restart; 7022 } 7023 /* 7024 * Try-lock because this is the wrong lock order. 7025 */ 7026 if (!VI_TRYLOCK(vp) && 7027 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 7028 goto restart; 7029 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 7030 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 7031 ("alien vnode on the lazy list %p %p", vp, mp)); 7032 VNPASS(vp->v_mount == mp, vp); 7033 VNPASS(!VN_IS_DOOMED(vp), vp); 7034 break; 7035 } 7036 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 7037 7038 /* Check if we are done */ 7039 if (vp == NULL) { 7040 mtx_unlock(&mp->mnt_listmtx); 7041 mnt_vnode_markerfree_lazy(mvp, mp); 7042 return (NULL); 7043 } 7044 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 7045 mtx_unlock(&mp->mnt_listmtx); 7046 ASSERT_VI_LOCKED(vp, "lazy iter"); 7047 return (vp); 7048 } 7049 7050 struct vnode * 7051 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 7052 void *cbarg) 7053 { 7054 7055 maybe_yield(); 7056 mtx_lock(&mp->mnt_listmtx); 7057 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 7058 } 7059 7060 struct vnode * 7061 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 7062 void *cbarg) 7063 { 7064 struct vnode *vp; 7065 7066 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 7067 return (NULL); 7068 7069 *mvp = vn_alloc_marker(mp); 7070 MNT_ILOCK(mp); 7071 MNT_REF(mp); 7072 MNT_IUNLOCK(mp); 7073 7074 mtx_lock(&mp->mnt_listmtx); 7075 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 7076 if (vp == NULL) { 7077 mtx_unlock(&mp->mnt_listmtx); 7078 mnt_vnode_markerfree_lazy(mvp, mp); 7079 return (NULL); 7080 } 7081 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 7082 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 7083 } 7084 7085 void 7086 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 7087 { 7088 7089 if (*mvp == NULL) 7090 return; 7091 7092 mtx_lock(&mp->mnt_listmtx); 7093 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 7094 mtx_unlock(&mp->mnt_listmtx); 7095 mnt_vnode_markerfree_lazy(mvp, mp); 7096 } 7097 7098 int 7099 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 7100 { 7101 7102 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 7103 cnp->cn_flags &= ~NOEXECCHECK; 7104 return (0); 7105 } 7106 7107 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread)); 7108 } 7109 7110 /* 7111 * Do not use this variant unless you have means other than the hold count 7112 * to prevent the vnode from getting freed. 7113 */ 7114 void 7115 vn_seqc_write_begin_locked(struct vnode *vp) 7116 { 7117 7118 ASSERT_VI_LOCKED(vp, __func__); 7119 VNPASS(vp->v_holdcnt > 0, vp); 7120 VNPASS(vp->v_seqc_users >= 0, vp); 7121 vp->v_seqc_users++; 7122 if (vp->v_seqc_users == 1) 7123 seqc_sleepable_write_begin(&vp->v_seqc); 7124 } 7125 7126 void 7127 vn_seqc_write_begin(struct vnode *vp) 7128 { 7129 7130 VI_LOCK(vp); 7131 vn_seqc_write_begin_locked(vp); 7132 VI_UNLOCK(vp); 7133 } 7134 7135 void 7136 vn_seqc_write_end_locked(struct vnode *vp) 7137 { 7138 7139 ASSERT_VI_LOCKED(vp, __func__); 7140 VNPASS(vp->v_seqc_users > 0, vp); 7141 vp->v_seqc_users--; 7142 if (vp->v_seqc_users == 0) 7143 seqc_sleepable_write_end(&vp->v_seqc); 7144 } 7145 7146 void 7147 vn_seqc_write_end(struct vnode *vp) 7148 { 7149 7150 VI_LOCK(vp); 7151 vn_seqc_write_end_locked(vp); 7152 VI_UNLOCK(vp); 7153 } 7154 7155 /* 7156 * Special case handling for allocating and freeing vnodes. 7157 * 7158 * The counter remains unchanged on free so that a doomed vnode will 7159 * keep testing as in modify as long as it is accessible with SMR. 7160 */ 7161 static void 7162 vn_seqc_init(struct vnode *vp) 7163 { 7164 7165 vp->v_seqc = 0; 7166 vp->v_seqc_users = 0; 7167 } 7168 7169 static void 7170 vn_seqc_write_end_free(struct vnode *vp) 7171 { 7172 7173 VNPASS(seqc_in_modify(vp->v_seqc), vp); 7174 VNPASS(vp->v_seqc_users == 1, vp); 7175 } 7176 7177 void 7178 vn_irflag_set_locked(struct vnode *vp, short toset) 7179 { 7180 short flags; 7181 7182 ASSERT_VI_LOCKED(vp, __func__); 7183 flags = vn_irflag_read(vp); 7184 VNASSERT((flags & toset) == 0, vp, 7185 ("%s: some of the passed flags already set (have %d, passed %d)\n", 7186 __func__, flags, toset)); 7187 atomic_store_short(&vp->v_irflag, flags | toset); 7188 } 7189 7190 void 7191 vn_irflag_set(struct vnode *vp, short toset) 7192 { 7193 7194 VI_LOCK(vp); 7195 vn_irflag_set_locked(vp, toset); 7196 VI_UNLOCK(vp); 7197 } 7198 7199 void 7200 vn_irflag_set_cond_locked(struct vnode *vp, short toset) 7201 { 7202 short flags; 7203 7204 ASSERT_VI_LOCKED(vp, __func__); 7205 flags = vn_irflag_read(vp); 7206 atomic_store_short(&vp->v_irflag, flags | toset); 7207 } 7208 7209 void 7210 vn_irflag_set_cond(struct vnode *vp, short toset) 7211 { 7212 7213 VI_LOCK(vp); 7214 vn_irflag_set_cond_locked(vp, toset); 7215 VI_UNLOCK(vp); 7216 } 7217 7218 void 7219 vn_irflag_unset_locked(struct vnode *vp, short tounset) 7220 { 7221 short flags; 7222 7223 ASSERT_VI_LOCKED(vp, __func__); 7224 flags = vn_irflag_read(vp); 7225 VNASSERT((flags & tounset) == tounset, vp, 7226 ("%s: some of the passed flags not set (have %d, passed %d)\n", 7227 __func__, flags, tounset)); 7228 atomic_store_short(&vp->v_irflag, flags & ~tounset); 7229 } 7230 7231 void 7232 vn_irflag_unset(struct vnode *vp, short tounset) 7233 { 7234 7235 VI_LOCK(vp); 7236 vn_irflag_unset_locked(vp, tounset); 7237 VI_UNLOCK(vp); 7238 } 7239 7240 int 7241 vn_getsize_locked(struct vnode *vp, off_t *size, struct ucred *cred) 7242 { 7243 struct vattr vattr; 7244 int error; 7245 7246 ASSERT_VOP_LOCKED(vp, __func__); 7247 error = VOP_GETATTR(vp, &vattr, cred); 7248 if (__predict_true(error == 0)) { 7249 if (vattr.va_size <= OFF_MAX) 7250 *size = vattr.va_size; 7251 else 7252 error = EFBIG; 7253 } 7254 return (error); 7255 } 7256 7257 int 7258 vn_getsize(struct vnode *vp, off_t *size, struct ucred *cred) 7259 { 7260 int error; 7261 7262 VOP_LOCK(vp, LK_SHARED); 7263 error = vn_getsize_locked(vp, size, cred); 7264 VOP_UNLOCK(vp); 7265 return (error); 7266 } 7267 7268 #ifdef INVARIANTS 7269 void 7270 vn_set_state_validate(struct vnode *vp, __enum_uint8(vstate) state) 7271 { 7272 7273 switch (vp->v_state) { 7274 case VSTATE_UNINITIALIZED: 7275 switch (state) { 7276 case VSTATE_CONSTRUCTED: 7277 case VSTATE_DESTROYING: 7278 return; 7279 default: 7280 break; 7281 } 7282 break; 7283 case VSTATE_CONSTRUCTED: 7284 ASSERT_VOP_ELOCKED(vp, __func__); 7285 switch (state) { 7286 case VSTATE_DESTROYING: 7287 return; 7288 default: 7289 break; 7290 } 7291 break; 7292 case VSTATE_DESTROYING: 7293 ASSERT_VOP_ELOCKED(vp, __func__); 7294 switch (state) { 7295 case VSTATE_DEAD: 7296 return; 7297 default: 7298 break; 7299 } 7300 break; 7301 case VSTATE_DEAD: 7302 switch (state) { 7303 case VSTATE_UNINITIALIZED: 7304 return; 7305 default: 7306 break; 7307 } 7308 break; 7309 } 7310 7311 vn_printf(vp, "invalid state transition %d -> %d\n", vp->v_state, state); 7312 panic("invalid state transition %d -> %d\n", vp->v_state, state); 7313 } 7314 #endif 7315