1 /*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 35 */ 36 37 /* 38 * External virtual filesystem routines 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 #include "opt_compat.h" 45 #include "opt_ddb.h" 46 #include "opt_watchdog.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/bio.h> 51 #include <sys/buf.h> 52 #include <sys/condvar.h> 53 #include <sys/conf.h> 54 #include <sys/dirent.h> 55 #include <sys/event.h> 56 #include <sys/eventhandler.h> 57 #include <sys/extattr.h> 58 #include <sys/file.h> 59 #include <sys/fcntl.h> 60 #include <sys/jail.h> 61 #include <sys/kdb.h> 62 #include <sys/kernel.h> 63 #include <sys/kthread.h> 64 #include <sys/lockf.h> 65 #include <sys/malloc.h> 66 #include <sys/mount.h> 67 #include <sys/namei.h> 68 #include <sys/pctrie.h> 69 #include <sys/priv.h> 70 #include <sys/reboot.h> 71 #include <sys/rwlock.h> 72 #include <sys/sched.h> 73 #include <sys/sleepqueue.h> 74 #include <sys/smp.h> 75 #include <sys/stat.h> 76 #include <sys/sysctl.h> 77 #include <sys/syslog.h> 78 #include <sys/vmmeter.h> 79 #include <sys/vnode.h> 80 #include <sys/watchdog.h> 81 82 #include <machine/stdarg.h> 83 84 #include <security/mac/mac_framework.h> 85 86 #include <vm/vm.h> 87 #include <vm/vm_object.h> 88 #include <vm/vm_extern.h> 89 #include <vm/pmap.h> 90 #include <vm/vm_map.h> 91 #include <vm/vm_page.h> 92 #include <vm/vm_kern.h> 93 #include <vm/uma.h> 94 95 #ifdef DDB 96 #include <ddb/ddb.h> 97 #endif 98 99 static void delmntque(struct vnode *vp); 100 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 101 int slpflag, int slptimeo); 102 static void syncer_shutdown(void *arg, int howto); 103 static int vtryrecycle(struct vnode *vp); 104 static void v_incr_usecount(struct vnode *); 105 static void v_decr_usecount(struct vnode *); 106 static void v_decr_useonly(struct vnode *); 107 static void v_upgrade_usecount(struct vnode *); 108 static void vnlru_free(int); 109 static void vgonel(struct vnode *); 110 static void vfs_knllock(void *arg); 111 static void vfs_knlunlock(void *arg); 112 static void vfs_knl_assert_locked(void *arg); 113 static void vfs_knl_assert_unlocked(void *arg); 114 static void destroy_vpollinfo(struct vpollinfo *vi); 115 116 /* 117 * Number of vnodes in existence. Increased whenever getnewvnode() 118 * allocates a new vnode, decreased in vdropl() for VI_DOOMED vnode. 119 */ 120 static unsigned long numvnodes; 121 122 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 123 "Number of vnodes in existence"); 124 125 /* 126 * Conversion tables for conversion from vnode types to inode formats 127 * and back. 128 */ 129 enum vtype iftovt_tab[16] = { 130 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 131 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 132 }; 133 int vttoif_tab[10] = { 134 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 135 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 136 }; 137 138 /* 139 * List of vnodes that are ready for recycling. 140 */ 141 static TAILQ_HEAD(freelst, vnode) vnode_free_list; 142 143 /* 144 * Free vnode target. Free vnodes may simply be files which have been stat'd 145 * but not read. This is somewhat common, and a small cache of such files 146 * should be kept to avoid recreation costs. 147 */ 148 static u_long wantfreevnodes; 149 SYSCTL_ULONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, ""); 150 /* Number of vnodes in the free list. */ 151 static u_long freevnodes; 152 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, 153 "Number of vnodes in the free list"); 154 155 static int vlru_allow_cache_src; 156 SYSCTL_INT(_vfs, OID_AUTO, vlru_allow_cache_src, CTLFLAG_RW, 157 &vlru_allow_cache_src, 0, "Allow vlru to reclaim source vnode"); 158 159 /* 160 * Various variables used for debugging the new implementation of 161 * reassignbuf(). 162 * XXX these are probably of (very) limited utility now. 163 */ 164 static int reassignbufcalls; 165 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, 166 "Number of calls to reassignbuf"); 167 168 /* 169 * Cache for the mount type id assigned to NFS. This is used for 170 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c. 171 */ 172 int nfs_mount_type = -1; 173 174 /* To keep more than one thread at a time from running vfs_getnewfsid */ 175 static struct mtx mntid_mtx; 176 177 /* 178 * Lock for any access to the following: 179 * vnode_free_list 180 * numvnodes 181 * freevnodes 182 */ 183 static struct mtx vnode_free_list_mtx; 184 185 /* Publicly exported FS */ 186 struct nfs_public nfs_pub; 187 188 static uma_zone_t buf_trie_zone; 189 190 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 191 static uma_zone_t vnode_zone; 192 static uma_zone_t vnodepoll_zone; 193 194 /* 195 * The workitem queue. 196 * 197 * It is useful to delay writes of file data and filesystem metadata 198 * for tens of seconds so that quickly created and deleted files need 199 * not waste disk bandwidth being created and removed. To realize this, 200 * we append vnodes to a "workitem" queue. When running with a soft 201 * updates implementation, most pending metadata dependencies should 202 * not wait for more than a few seconds. Thus, mounted on block devices 203 * are delayed only about a half the time that file data is delayed. 204 * Similarly, directory updates are more critical, so are only delayed 205 * about a third the time that file data is delayed. Thus, there are 206 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 207 * one each second (driven off the filesystem syncer process). The 208 * syncer_delayno variable indicates the next queue that is to be processed. 209 * Items that need to be processed soon are placed in this queue: 210 * 211 * syncer_workitem_pending[syncer_delayno] 212 * 213 * A delay of fifteen seconds is done by placing the request fifteen 214 * entries later in the queue: 215 * 216 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 217 * 218 */ 219 static int syncer_delayno; 220 static long syncer_mask; 221 LIST_HEAD(synclist, bufobj); 222 static struct synclist *syncer_workitem_pending; 223 /* 224 * The sync_mtx protects: 225 * bo->bo_synclist 226 * sync_vnode_count 227 * syncer_delayno 228 * syncer_state 229 * syncer_workitem_pending 230 * syncer_worklist_len 231 * rushjob 232 */ 233 static struct mtx sync_mtx; 234 static struct cv sync_wakeup; 235 236 #define SYNCER_MAXDELAY 32 237 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 238 static int syncdelay = 30; /* max time to delay syncing data */ 239 static int filedelay = 30; /* time to delay syncing files */ 240 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 241 "Time to delay syncing files (in seconds)"); 242 static int dirdelay = 29; /* time to delay syncing directories */ 243 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 244 "Time to delay syncing directories (in seconds)"); 245 static int metadelay = 28; /* time to delay syncing metadata */ 246 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 247 "Time to delay syncing metadata (in seconds)"); 248 static int rushjob; /* number of slots to run ASAP */ 249 static int stat_rush_requests; /* number of times I/O speeded up */ 250 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 251 "Number of times I/O speeded up (rush requests)"); 252 253 /* 254 * When shutting down the syncer, run it at four times normal speed. 255 */ 256 #define SYNCER_SHUTDOWN_SPEEDUP 4 257 static int sync_vnode_count; 258 static int syncer_worklist_len; 259 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 260 syncer_state; 261 262 /* 263 * Number of vnodes we want to exist at any one time. This is mostly used 264 * to size hash tables in vnode-related code. It is normally not used in 265 * getnewvnode(), as wantfreevnodes is normally nonzero.) 266 * 267 * XXX desiredvnodes is historical cruft and should not exist. 268 */ 269 int desiredvnodes; 270 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 271 &desiredvnodes, 0, "Maximum number of vnodes"); 272 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 273 &wantfreevnodes, 0, "Minimum number of vnodes (legacy)"); 274 static int vnlru_nowhere; 275 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 276 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 277 278 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 279 static int vnsz2log; 280 281 /* 282 * Support for the bufobj clean & dirty pctrie. 283 */ 284 static void * 285 buf_trie_alloc(struct pctrie *ptree) 286 { 287 288 return uma_zalloc(buf_trie_zone, M_NOWAIT); 289 } 290 291 static void 292 buf_trie_free(struct pctrie *ptree, void *node) 293 { 294 295 uma_zfree(buf_trie_zone, node); 296 } 297 PCTRIE_DEFINE(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free); 298 299 /* 300 * Initialize the vnode management data structures. 301 * 302 * Reevaluate the following cap on the number of vnodes after the physical 303 * memory size exceeds 512GB. In the limit, as the physical memory size 304 * grows, the ratio of physical pages to vnodes approaches sixteen to one. 305 */ 306 #ifndef MAXVNODES_MAX 307 #define MAXVNODES_MAX (512 * (1024 * 1024 * 1024 / (int)PAGE_SIZE / 16)) 308 #endif 309 static void 310 vntblinit(void *dummy __unused) 311 { 312 u_int i; 313 int physvnodes, virtvnodes; 314 315 /* 316 * Desiredvnodes is a function of the physical memory size and the 317 * kernel's heap size. Generally speaking, it scales with the 318 * physical memory size. The ratio of desiredvnodes to physical pages 319 * is one to four until desiredvnodes exceeds 98,304. Thereafter, the 320 * marginal ratio of desiredvnodes to physical pages is one to 321 * sixteen. However, desiredvnodes is limited by the kernel's heap 322 * size. The memory required by desiredvnodes vnodes and vm objects 323 * may not exceed one seventh of the kernel's heap size. 324 */ 325 physvnodes = maxproc + vm_cnt.v_page_count / 16 + 3 * min(98304 * 4, 326 vm_cnt.v_page_count) / 16; 327 virtvnodes = vm_kmem_size / (7 * (sizeof(struct vm_object) + 328 sizeof(struct vnode))); 329 desiredvnodes = min(physvnodes, virtvnodes); 330 if (desiredvnodes > MAXVNODES_MAX) { 331 if (bootverbose) 332 printf("Reducing kern.maxvnodes %d -> %d\n", 333 desiredvnodes, MAXVNODES_MAX); 334 desiredvnodes = MAXVNODES_MAX; 335 } 336 wantfreevnodes = desiredvnodes / 4; 337 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 338 TAILQ_INIT(&vnode_free_list); 339 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 340 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 341 NULL, NULL, UMA_ALIGN_PTR, 0); 342 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 343 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 344 /* 345 * Preallocate enough nodes to support one-per buf so that 346 * we can not fail an insert. reassignbuf() callers can not 347 * tolerate the insertion failure. 348 */ 349 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 350 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 351 UMA_ZONE_NOFREE | UMA_ZONE_VM); 352 uma_prealloc(buf_trie_zone, nbuf); 353 /* 354 * Initialize the filesystem syncer. 355 */ 356 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 357 &syncer_mask); 358 syncer_maxdelay = syncer_mask + 1; 359 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 360 cv_init(&sync_wakeup, "syncer"); 361 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 362 vnsz2log++; 363 vnsz2log--; 364 } 365 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 366 367 368 /* 369 * Mark a mount point as busy. Used to synchronize access and to delay 370 * unmounting. Eventually, mountlist_mtx is not released on failure. 371 * 372 * vfs_busy() is a custom lock, it can block the caller. 373 * vfs_busy() only sleeps if the unmount is active on the mount point. 374 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 375 * vnode belonging to mp. 376 * 377 * Lookup uses vfs_busy() to traverse mount points. 378 * root fs var fs 379 * / vnode lock A / vnode lock (/var) D 380 * /var vnode lock B /log vnode lock(/var/log) E 381 * vfs_busy lock C vfs_busy lock F 382 * 383 * Within each file system, the lock order is C->A->B and F->D->E. 384 * 385 * When traversing across mounts, the system follows that lock order: 386 * 387 * C->A->B 388 * | 389 * +->F->D->E 390 * 391 * The lookup() process for namei("/var") illustrates the process: 392 * VOP_LOOKUP() obtains B while A is held 393 * vfs_busy() obtains a shared lock on F while A and B are held 394 * vput() releases lock on B 395 * vput() releases lock on A 396 * VFS_ROOT() obtains lock on D while shared lock on F is held 397 * vfs_unbusy() releases shared lock on F 398 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 399 * Attempt to lock A (instead of vp_crossmp) while D is held would 400 * violate the global order, causing deadlocks. 401 * 402 * dounmount() locks B while F is drained. 403 */ 404 int 405 vfs_busy(struct mount *mp, int flags) 406 { 407 408 MPASS((flags & ~MBF_MASK) == 0); 409 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 410 411 MNT_ILOCK(mp); 412 MNT_REF(mp); 413 /* 414 * If mount point is currenly being unmounted, sleep until the 415 * mount point fate is decided. If thread doing the unmounting fails, 416 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 417 * that this mount point has survived the unmount attempt and vfs_busy 418 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 419 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 420 * about to be really destroyed. vfs_busy needs to release its 421 * reference on the mount point in this case and return with ENOENT, 422 * telling the caller that mount mount it tried to busy is no longer 423 * valid. 424 */ 425 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 426 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 427 MNT_REL(mp); 428 MNT_IUNLOCK(mp); 429 CTR1(KTR_VFS, "%s: failed busying before sleeping", 430 __func__); 431 return (ENOENT); 432 } 433 if (flags & MBF_MNTLSTLOCK) 434 mtx_unlock(&mountlist_mtx); 435 mp->mnt_kern_flag |= MNTK_MWAIT; 436 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 437 if (flags & MBF_MNTLSTLOCK) 438 mtx_lock(&mountlist_mtx); 439 MNT_ILOCK(mp); 440 } 441 if (flags & MBF_MNTLSTLOCK) 442 mtx_unlock(&mountlist_mtx); 443 mp->mnt_lockref++; 444 MNT_IUNLOCK(mp); 445 return (0); 446 } 447 448 /* 449 * Free a busy filesystem. 450 */ 451 void 452 vfs_unbusy(struct mount *mp) 453 { 454 455 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 456 MNT_ILOCK(mp); 457 MNT_REL(mp); 458 KASSERT(mp->mnt_lockref > 0, ("negative mnt_lockref")); 459 mp->mnt_lockref--; 460 if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 461 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 462 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 463 mp->mnt_kern_flag &= ~MNTK_DRAINING; 464 wakeup(&mp->mnt_lockref); 465 } 466 MNT_IUNLOCK(mp); 467 } 468 469 /* 470 * Lookup a mount point by filesystem identifier. 471 */ 472 struct mount * 473 vfs_getvfs(fsid_t *fsid) 474 { 475 struct mount *mp; 476 477 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 478 mtx_lock(&mountlist_mtx); 479 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 480 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 481 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 482 vfs_ref(mp); 483 mtx_unlock(&mountlist_mtx); 484 return (mp); 485 } 486 } 487 mtx_unlock(&mountlist_mtx); 488 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 489 return ((struct mount *) 0); 490 } 491 492 /* 493 * Lookup a mount point by filesystem identifier, busying it before 494 * returning. 495 * 496 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 497 * cache for popular filesystem identifiers. The cache is lockess, using 498 * the fact that struct mount's are never freed. In worst case we may 499 * get pointer to unmounted or even different filesystem, so we have to 500 * check what we got, and go slow way if so. 501 */ 502 struct mount * 503 vfs_busyfs(fsid_t *fsid) 504 { 505 #define FSID_CACHE_SIZE 256 506 typedef struct mount * volatile vmp_t; 507 static vmp_t cache[FSID_CACHE_SIZE]; 508 struct mount *mp; 509 int error; 510 uint32_t hash; 511 512 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 513 hash = fsid->val[0] ^ fsid->val[1]; 514 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 515 mp = cache[hash]; 516 if (mp == NULL || 517 mp->mnt_stat.f_fsid.val[0] != fsid->val[0] || 518 mp->mnt_stat.f_fsid.val[1] != fsid->val[1]) 519 goto slow; 520 if (vfs_busy(mp, 0) != 0) { 521 cache[hash] = NULL; 522 goto slow; 523 } 524 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 525 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) 526 return (mp); 527 else 528 vfs_unbusy(mp); 529 530 slow: 531 mtx_lock(&mountlist_mtx); 532 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 533 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 534 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 535 error = vfs_busy(mp, MBF_MNTLSTLOCK); 536 if (error) { 537 cache[hash] = NULL; 538 mtx_unlock(&mountlist_mtx); 539 return (NULL); 540 } 541 cache[hash] = mp; 542 return (mp); 543 } 544 } 545 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 546 mtx_unlock(&mountlist_mtx); 547 return ((struct mount *) 0); 548 } 549 550 /* 551 * Check if a user can access privileged mount options. 552 */ 553 int 554 vfs_suser(struct mount *mp, struct thread *td) 555 { 556 int error; 557 558 /* 559 * If the thread is jailed, but this is not a jail-friendly file 560 * system, deny immediately. 561 */ 562 if (!(mp->mnt_vfc->vfc_flags & VFCF_JAIL) && jailed(td->td_ucred)) 563 return (EPERM); 564 565 /* 566 * If the file system was mounted outside the jail of the calling 567 * thread, deny immediately. 568 */ 569 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 570 return (EPERM); 571 572 /* 573 * If file system supports delegated administration, we don't check 574 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 575 * by the file system itself. 576 * If this is not the user that did original mount, we check for 577 * the PRIV_VFS_MOUNT_OWNER privilege. 578 */ 579 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 580 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 581 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 582 return (error); 583 } 584 return (0); 585 } 586 587 /* 588 * Get a new unique fsid. Try to make its val[0] unique, since this value 589 * will be used to create fake device numbers for stat(). Also try (but 590 * not so hard) make its val[0] unique mod 2^16, since some emulators only 591 * support 16-bit device numbers. We end up with unique val[0]'s for the 592 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 593 * 594 * Keep in mind that several mounts may be running in parallel. Starting 595 * the search one past where the previous search terminated is both a 596 * micro-optimization and a defense against returning the same fsid to 597 * different mounts. 598 */ 599 void 600 vfs_getnewfsid(struct mount *mp) 601 { 602 static uint16_t mntid_base; 603 struct mount *nmp; 604 fsid_t tfsid; 605 int mtype; 606 607 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 608 mtx_lock(&mntid_mtx); 609 mtype = mp->mnt_vfc->vfc_typenum; 610 tfsid.val[1] = mtype; 611 mtype = (mtype & 0xFF) << 24; 612 for (;;) { 613 tfsid.val[0] = makedev(255, 614 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 615 mntid_base++; 616 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 617 break; 618 vfs_rel(nmp); 619 } 620 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 621 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 622 mtx_unlock(&mntid_mtx); 623 } 624 625 /* 626 * Knob to control the precision of file timestamps: 627 * 628 * 0 = seconds only; nanoseconds zeroed. 629 * 1 = seconds and nanoseconds, accurate within 1/HZ. 630 * 2 = seconds and nanoseconds, truncated to microseconds. 631 * >=3 = seconds and nanoseconds, maximum precision. 632 */ 633 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 634 635 static int timestamp_precision = TSP_USEC; 636 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 637 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 638 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to ms, " 639 "3+: sec + ns (max. precision))"); 640 641 /* 642 * Get a current timestamp. 643 */ 644 void 645 vfs_timestamp(struct timespec *tsp) 646 { 647 struct timeval tv; 648 649 switch (timestamp_precision) { 650 case TSP_SEC: 651 tsp->tv_sec = time_second; 652 tsp->tv_nsec = 0; 653 break; 654 case TSP_HZ: 655 getnanotime(tsp); 656 break; 657 case TSP_USEC: 658 microtime(&tv); 659 TIMEVAL_TO_TIMESPEC(&tv, tsp); 660 break; 661 case TSP_NSEC: 662 default: 663 nanotime(tsp); 664 break; 665 } 666 } 667 668 /* 669 * Set vnode attributes to VNOVAL 670 */ 671 void 672 vattr_null(struct vattr *vap) 673 { 674 675 vap->va_type = VNON; 676 vap->va_size = VNOVAL; 677 vap->va_bytes = VNOVAL; 678 vap->va_mode = VNOVAL; 679 vap->va_nlink = VNOVAL; 680 vap->va_uid = VNOVAL; 681 vap->va_gid = VNOVAL; 682 vap->va_fsid = VNOVAL; 683 vap->va_fileid = VNOVAL; 684 vap->va_blocksize = VNOVAL; 685 vap->va_rdev = VNOVAL; 686 vap->va_atime.tv_sec = VNOVAL; 687 vap->va_atime.tv_nsec = VNOVAL; 688 vap->va_mtime.tv_sec = VNOVAL; 689 vap->va_mtime.tv_nsec = VNOVAL; 690 vap->va_ctime.tv_sec = VNOVAL; 691 vap->va_ctime.tv_nsec = VNOVAL; 692 vap->va_birthtime.tv_sec = VNOVAL; 693 vap->va_birthtime.tv_nsec = VNOVAL; 694 vap->va_flags = VNOVAL; 695 vap->va_gen = VNOVAL; 696 vap->va_vaflags = 0; 697 } 698 699 /* 700 * This routine is called when we have too many vnodes. It attempts 701 * to free <count> vnodes and will potentially free vnodes that still 702 * have VM backing store (VM backing store is typically the cause 703 * of a vnode blowout so we want to do this). Therefore, this operation 704 * is not considered cheap. 705 * 706 * A number of conditions may prevent a vnode from being reclaimed. 707 * the buffer cache may have references on the vnode, a directory 708 * vnode may still have references due to the namei cache representing 709 * underlying files, or the vnode may be in active use. It is not 710 * desireable to reuse such vnodes. These conditions may cause the 711 * number of vnodes to reach some minimum value regardless of what 712 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 713 */ 714 static int 715 vlrureclaim(struct mount *mp) 716 { 717 struct vnode *vp; 718 int done; 719 int trigger; 720 int usevnodes; 721 int count; 722 723 /* 724 * Calculate the trigger point, don't allow user 725 * screwups to blow us up. This prevents us from 726 * recycling vnodes with lots of resident pages. We 727 * aren't trying to free memory, we are trying to 728 * free vnodes. 729 */ 730 usevnodes = desiredvnodes; 731 if (usevnodes <= 0) 732 usevnodes = 1; 733 trigger = vm_cnt.v_page_count * 2 / usevnodes; 734 done = 0; 735 vn_start_write(NULL, &mp, V_WAIT); 736 MNT_ILOCK(mp); 737 count = mp->mnt_nvnodelistsize / 10 + 1; 738 while (count != 0) { 739 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 740 while (vp != NULL && vp->v_type == VMARKER) 741 vp = TAILQ_NEXT(vp, v_nmntvnodes); 742 if (vp == NULL) 743 break; 744 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 745 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 746 --count; 747 if (!VI_TRYLOCK(vp)) 748 goto next_iter; 749 /* 750 * If it's been deconstructed already, it's still 751 * referenced, or it exceeds the trigger, skip it. 752 */ 753 if (vp->v_usecount || 754 (!vlru_allow_cache_src && 755 !LIST_EMPTY(&(vp)->v_cache_src)) || 756 (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL && 757 vp->v_object->resident_page_count > trigger)) { 758 VI_UNLOCK(vp); 759 goto next_iter; 760 } 761 MNT_IUNLOCK(mp); 762 vholdl(vp); 763 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) { 764 vdrop(vp); 765 goto next_iter_mntunlocked; 766 } 767 VI_LOCK(vp); 768 /* 769 * v_usecount may have been bumped after VOP_LOCK() dropped 770 * the vnode interlock and before it was locked again. 771 * 772 * It is not necessary to recheck VI_DOOMED because it can 773 * only be set by another thread that holds both the vnode 774 * lock and vnode interlock. If another thread has the 775 * vnode lock before we get to VOP_LOCK() and obtains the 776 * vnode interlock after VOP_LOCK() drops the vnode 777 * interlock, the other thread will be unable to drop the 778 * vnode lock before our VOP_LOCK() call fails. 779 */ 780 if (vp->v_usecount || 781 (!vlru_allow_cache_src && 782 !LIST_EMPTY(&(vp)->v_cache_src)) || 783 (vp->v_object != NULL && 784 vp->v_object->resident_page_count > trigger)) { 785 VOP_UNLOCK(vp, LK_INTERLOCK); 786 vdrop(vp); 787 goto next_iter_mntunlocked; 788 } 789 KASSERT((vp->v_iflag & VI_DOOMED) == 0, 790 ("VI_DOOMED unexpectedly detected in vlrureclaim()")); 791 vgonel(vp); 792 VOP_UNLOCK(vp, 0); 793 vdropl(vp); 794 done++; 795 next_iter_mntunlocked: 796 if (!should_yield()) 797 goto relock_mnt; 798 goto yield; 799 next_iter: 800 if (!should_yield()) 801 continue; 802 MNT_IUNLOCK(mp); 803 yield: 804 kern_yield(PRI_USER); 805 relock_mnt: 806 MNT_ILOCK(mp); 807 } 808 MNT_IUNLOCK(mp); 809 vn_finished_write(mp); 810 return done; 811 } 812 813 /* 814 * Attempt to keep the free list at wantfreevnodes length. 815 */ 816 static void 817 vnlru_free(int count) 818 { 819 struct vnode *vp; 820 821 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 822 for (; count > 0; count--) { 823 vp = TAILQ_FIRST(&vnode_free_list); 824 /* 825 * The list can be modified while the free_list_mtx 826 * has been dropped and vp could be NULL here. 827 */ 828 if (!vp) 829 break; 830 VNASSERT(vp->v_op != NULL, vp, 831 ("vnlru_free: vnode already reclaimed.")); 832 KASSERT((vp->v_iflag & VI_FREE) != 0, 833 ("Removing vnode not on freelist")); 834 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 835 ("Mangling active vnode")); 836 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 837 /* 838 * Don't recycle if we can't get the interlock. 839 */ 840 if (!VI_TRYLOCK(vp)) { 841 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist); 842 continue; 843 } 844 VNASSERT((vp->v_iflag & VI_FREE) != 0 && vp->v_holdcnt == 0, 845 vp, ("vp inconsistent on freelist")); 846 847 /* 848 * The clear of VI_FREE prevents activation of the 849 * vnode. There is no sense in putting the vnode on 850 * the mount point active list, only to remove it 851 * later during recycling. Inline the relevant part 852 * of vholdl(), to avoid triggering assertions or 853 * activating. 854 */ 855 freevnodes--; 856 vp->v_iflag &= ~VI_FREE; 857 vp->v_holdcnt++; 858 859 mtx_unlock(&vnode_free_list_mtx); 860 VI_UNLOCK(vp); 861 vtryrecycle(vp); 862 /* 863 * If the recycled succeeded this vdrop will actually free 864 * the vnode. If not it will simply place it back on 865 * the free list. 866 */ 867 vdrop(vp); 868 mtx_lock(&vnode_free_list_mtx); 869 } 870 } 871 /* 872 * Attempt to recycle vnodes in a context that is always safe to block. 873 * Calling vlrurecycle() from the bowels of filesystem code has some 874 * interesting deadlock problems. 875 */ 876 static struct proc *vnlruproc; 877 static int vnlruproc_sig; 878 879 static void 880 vnlru_proc(void) 881 { 882 struct mount *mp, *nmp; 883 int done; 884 struct proc *p = vnlruproc; 885 886 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p, 887 SHUTDOWN_PRI_FIRST); 888 889 for (;;) { 890 kproc_suspend_check(p); 891 mtx_lock(&vnode_free_list_mtx); 892 if (freevnodes > wantfreevnodes) 893 vnlru_free(freevnodes - wantfreevnodes); 894 if (numvnodes <= desiredvnodes * 9 / 10) { 895 vnlruproc_sig = 0; 896 wakeup(&vnlruproc_sig); 897 msleep(vnlruproc, &vnode_free_list_mtx, 898 PVFS|PDROP, "vlruwt", hz); 899 continue; 900 } 901 mtx_unlock(&vnode_free_list_mtx); 902 done = 0; 903 mtx_lock(&mountlist_mtx); 904 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 905 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) { 906 nmp = TAILQ_NEXT(mp, mnt_list); 907 continue; 908 } 909 done += vlrureclaim(mp); 910 mtx_lock(&mountlist_mtx); 911 nmp = TAILQ_NEXT(mp, mnt_list); 912 vfs_unbusy(mp); 913 } 914 mtx_unlock(&mountlist_mtx); 915 if (done == 0) { 916 #if 0 917 /* These messages are temporary debugging aids */ 918 if (vnlru_nowhere < 5) 919 printf("vnlru process getting nowhere..\n"); 920 else if (vnlru_nowhere == 5) 921 printf("vnlru process messages stopped.\n"); 922 #endif 923 vnlru_nowhere++; 924 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 925 } else 926 kern_yield(PRI_USER); 927 } 928 } 929 930 static struct kproc_desc vnlru_kp = { 931 "vnlru", 932 vnlru_proc, 933 &vnlruproc 934 }; 935 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 936 &vnlru_kp); 937 938 /* 939 * Routines having to do with the management of the vnode table. 940 */ 941 942 /* 943 * Try to recycle a freed vnode. We abort if anyone picks up a reference 944 * before we actually vgone(). This function must be called with the vnode 945 * held to prevent the vnode from being returned to the free list midway 946 * through vgone(). 947 */ 948 static int 949 vtryrecycle(struct vnode *vp) 950 { 951 struct mount *vnmp; 952 953 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 954 VNASSERT(vp->v_holdcnt, vp, 955 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 956 /* 957 * This vnode may found and locked via some other list, if so we 958 * can't recycle it yet. 959 */ 960 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 961 CTR2(KTR_VFS, 962 "%s: impossible to recycle, vp %p lock is already held", 963 __func__, vp); 964 return (EWOULDBLOCK); 965 } 966 /* 967 * Don't recycle if its filesystem is being suspended. 968 */ 969 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 970 VOP_UNLOCK(vp, 0); 971 CTR2(KTR_VFS, 972 "%s: impossible to recycle, cannot start the write for %p", 973 __func__, vp); 974 return (EBUSY); 975 } 976 /* 977 * If we got this far, we need to acquire the interlock and see if 978 * anyone picked up this vnode from another list. If not, we will 979 * mark it with DOOMED via vgonel() so that anyone who does find it 980 * will skip over it. 981 */ 982 VI_LOCK(vp); 983 if (vp->v_usecount) { 984 VOP_UNLOCK(vp, LK_INTERLOCK); 985 vn_finished_write(vnmp); 986 CTR2(KTR_VFS, 987 "%s: impossible to recycle, %p is already referenced", 988 __func__, vp); 989 return (EBUSY); 990 } 991 if ((vp->v_iflag & VI_DOOMED) == 0) 992 vgonel(vp); 993 VOP_UNLOCK(vp, LK_INTERLOCK); 994 vn_finished_write(vnmp); 995 return (0); 996 } 997 998 /* 999 * Wait for available vnodes. 1000 */ 1001 static int 1002 getnewvnode_wait(int suspended) 1003 { 1004 1005 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 1006 if (numvnodes > desiredvnodes) { 1007 if (suspended) { 1008 /* 1009 * File system is beeing suspended, we cannot risk a 1010 * deadlock here, so allocate new vnode anyway. 1011 */ 1012 if (freevnodes > wantfreevnodes) 1013 vnlru_free(freevnodes - wantfreevnodes); 1014 return (0); 1015 } 1016 if (vnlruproc_sig == 0) { 1017 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 1018 wakeup(vnlruproc); 1019 } 1020 msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS, 1021 "vlruwk", hz); 1022 } 1023 return (numvnodes > desiredvnodes ? ENFILE : 0); 1024 } 1025 1026 void 1027 getnewvnode_reserve(u_int count) 1028 { 1029 struct thread *td; 1030 1031 td = curthread; 1032 /* First try to be quick and racy. */ 1033 if (atomic_fetchadd_long(&numvnodes, count) + count <= desiredvnodes) { 1034 td->td_vp_reserv += count; 1035 return; 1036 } else 1037 atomic_subtract_long(&numvnodes, count); 1038 1039 mtx_lock(&vnode_free_list_mtx); 1040 while (count > 0) { 1041 if (getnewvnode_wait(0) == 0) { 1042 count--; 1043 td->td_vp_reserv++; 1044 atomic_add_long(&numvnodes, 1); 1045 } 1046 } 1047 mtx_unlock(&vnode_free_list_mtx); 1048 } 1049 1050 void 1051 getnewvnode_drop_reserve(void) 1052 { 1053 struct thread *td; 1054 1055 td = curthread; 1056 atomic_subtract_long(&numvnodes, td->td_vp_reserv); 1057 td->td_vp_reserv = 0; 1058 } 1059 1060 /* 1061 * Return the next vnode from the free list. 1062 */ 1063 int 1064 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1065 struct vnode **vpp) 1066 { 1067 struct vnode *vp; 1068 struct bufobj *bo; 1069 struct thread *td; 1070 int error; 1071 1072 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1073 vp = NULL; 1074 td = curthread; 1075 if (td->td_vp_reserv > 0) { 1076 td->td_vp_reserv -= 1; 1077 goto alloc; 1078 } 1079 mtx_lock(&vnode_free_list_mtx); 1080 /* 1081 * Lend our context to reclaim vnodes if they've exceeded the max. 1082 */ 1083 if (freevnodes > wantfreevnodes) 1084 vnlru_free(1); 1085 error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag & 1086 MNTK_SUSPEND)); 1087 #if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */ 1088 if (error != 0) { 1089 mtx_unlock(&vnode_free_list_mtx); 1090 return (error); 1091 } 1092 #endif 1093 atomic_add_long(&numvnodes, 1); 1094 mtx_unlock(&vnode_free_list_mtx); 1095 alloc: 1096 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO); 1097 /* 1098 * Setup locks. 1099 */ 1100 vp->v_vnlock = &vp->v_lock; 1101 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 1102 /* 1103 * By default, don't allow shared locks unless filesystems 1104 * opt-in. 1105 */ 1106 lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOSHARE | LK_IS_VNODE); 1107 /* 1108 * Initialize bufobj. 1109 */ 1110 bo = &vp->v_bufobj; 1111 bo->__bo_vnode = vp; 1112 rw_init(BO_LOCKPTR(bo), "bufobj interlock"); 1113 bo->bo_ops = &buf_ops_bio; 1114 bo->bo_private = vp; 1115 TAILQ_INIT(&bo->bo_clean.bv_hd); 1116 TAILQ_INIT(&bo->bo_dirty.bv_hd); 1117 /* 1118 * Initialize namecache. 1119 */ 1120 LIST_INIT(&vp->v_cache_src); 1121 TAILQ_INIT(&vp->v_cache_dst); 1122 /* 1123 * Finalize various vnode identity bits. 1124 */ 1125 vp->v_type = VNON; 1126 vp->v_tag = tag; 1127 vp->v_op = vops; 1128 v_incr_usecount(vp); 1129 vp->v_data = NULL; 1130 #ifdef MAC 1131 mac_vnode_init(vp); 1132 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1133 mac_vnode_associate_singlelabel(mp, vp); 1134 else if (mp == NULL && vops != &dead_vnodeops) 1135 printf("NULL mp in getnewvnode()\n"); 1136 #endif 1137 if (mp != NULL) { 1138 bo->bo_bsize = mp->mnt_stat.f_iosize; 1139 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1140 vp->v_vflag |= VV_NOKNOTE; 1141 } 1142 rangelock_init(&vp->v_rl); 1143 1144 /* 1145 * For the filesystems which do not use vfs_hash_insert(), 1146 * still initialize v_hash to have vfs_hash_index() useful. 1147 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1148 * its own hashing. 1149 */ 1150 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1151 1152 *vpp = vp; 1153 return (0); 1154 } 1155 1156 /* 1157 * Delete from old mount point vnode list, if on one. 1158 */ 1159 static void 1160 delmntque(struct vnode *vp) 1161 { 1162 struct mount *mp; 1163 int active; 1164 1165 mp = vp->v_mount; 1166 if (mp == NULL) 1167 return; 1168 MNT_ILOCK(mp); 1169 VI_LOCK(vp); 1170 KASSERT(mp->mnt_activevnodelistsize <= mp->mnt_nvnodelistsize, 1171 ("Active vnode list size %d > Vnode list size %d", 1172 mp->mnt_activevnodelistsize, mp->mnt_nvnodelistsize)); 1173 active = vp->v_iflag & VI_ACTIVE; 1174 vp->v_iflag &= ~VI_ACTIVE; 1175 if (active) { 1176 mtx_lock(&vnode_free_list_mtx); 1177 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); 1178 mp->mnt_activevnodelistsize--; 1179 mtx_unlock(&vnode_free_list_mtx); 1180 } 1181 vp->v_mount = NULL; 1182 VI_UNLOCK(vp); 1183 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1184 ("bad mount point vnode list size")); 1185 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1186 mp->mnt_nvnodelistsize--; 1187 MNT_REL(mp); 1188 MNT_IUNLOCK(mp); 1189 } 1190 1191 static void 1192 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1193 { 1194 1195 vp->v_data = NULL; 1196 vp->v_op = &dead_vnodeops; 1197 vgone(vp); 1198 vput(vp); 1199 } 1200 1201 /* 1202 * Insert into list of vnodes for the new mount point, if available. 1203 */ 1204 int 1205 insmntque1(struct vnode *vp, struct mount *mp, 1206 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1207 { 1208 1209 KASSERT(vp->v_mount == NULL, 1210 ("insmntque: vnode already on per mount vnode list")); 1211 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1212 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1213 1214 /* 1215 * We acquire the vnode interlock early to ensure that the 1216 * vnode cannot be recycled by another process releasing a 1217 * holdcnt on it before we get it on both the vnode list 1218 * and the active vnode list. The mount mutex protects only 1219 * manipulation of the vnode list and the vnode freelist 1220 * mutex protects only manipulation of the active vnode list. 1221 * Hence the need to hold the vnode interlock throughout. 1222 */ 1223 MNT_ILOCK(mp); 1224 VI_LOCK(vp); 1225 if (((mp->mnt_kern_flag & MNTK_NOINSMNTQ) != 0 && 1226 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1227 mp->mnt_nvnodelistsize == 0)) && 1228 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1229 VI_UNLOCK(vp); 1230 MNT_IUNLOCK(mp); 1231 if (dtr != NULL) 1232 dtr(vp, dtr_arg); 1233 return (EBUSY); 1234 } 1235 vp->v_mount = mp; 1236 MNT_REF(mp); 1237 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1238 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1239 ("neg mount point vnode list size")); 1240 mp->mnt_nvnodelistsize++; 1241 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 1242 ("Activating already active vnode")); 1243 vp->v_iflag |= VI_ACTIVE; 1244 mtx_lock(&vnode_free_list_mtx); 1245 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 1246 mp->mnt_activevnodelistsize++; 1247 mtx_unlock(&vnode_free_list_mtx); 1248 VI_UNLOCK(vp); 1249 MNT_IUNLOCK(mp); 1250 return (0); 1251 } 1252 1253 int 1254 insmntque(struct vnode *vp, struct mount *mp) 1255 { 1256 1257 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1258 } 1259 1260 /* 1261 * Flush out and invalidate all buffers associated with a bufobj 1262 * Called with the underlying object locked. 1263 */ 1264 int 1265 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1266 { 1267 int error; 1268 1269 BO_LOCK(bo); 1270 if (flags & V_SAVE) { 1271 error = bufobj_wwait(bo, slpflag, slptimeo); 1272 if (error) { 1273 BO_UNLOCK(bo); 1274 return (error); 1275 } 1276 if (bo->bo_dirty.bv_cnt > 0) { 1277 BO_UNLOCK(bo); 1278 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1279 return (error); 1280 /* 1281 * XXX We could save a lock/unlock if this was only 1282 * enabled under INVARIANTS 1283 */ 1284 BO_LOCK(bo); 1285 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1286 panic("vinvalbuf: dirty bufs"); 1287 } 1288 } 1289 /* 1290 * If you alter this loop please notice that interlock is dropped and 1291 * reacquired in flushbuflist. Special care is needed to ensure that 1292 * no race conditions occur from this. 1293 */ 1294 do { 1295 error = flushbuflist(&bo->bo_clean, 1296 flags, bo, slpflag, slptimeo); 1297 if (error == 0 && !(flags & V_CLEANONLY)) 1298 error = flushbuflist(&bo->bo_dirty, 1299 flags, bo, slpflag, slptimeo); 1300 if (error != 0 && error != EAGAIN) { 1301 BO_UNLOCK(bo); 1302 return (error); 1303 } 1304 } while (error != 0); 1305 1306 /* 1307 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1308 * have write I/O in-progress but if there is a VM object then the 1309 * VM object can also have read-I/O in-progress. 1310 */ 1311 do { 1312 bufobj_wwait(bo, 0, 0); 1313 BO_UNLOCK(bo); 1314 if (bo->bo_object != NULL) { 1315 VM_OBJECT_WLOCK(bo->bo_object); 1316 vm_object_pip_wait(bo->bo_object, "bovlbx"); 1317 VM_OBJECT_WUNLOCK(bo->bo_object); 1318 } 1319 BO_LOCK(bo); 1320 } while (bo->bo_numoutput > 0); 1321 BO_UNLOCK(bo); 1322 1323 /* 1324 * Destroy the copy in the VM cache, too. 1325 */ 1326 if (bo->bo_object != NULL && 1327 (flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0) { 1328 VM_OBJECT_WLOCK(bo->bo_object); 1329 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1330 OBJPR_CLEANONLY : 0); 1331 VM_OBJECT_WUNLOCK(bo->bo_object); 1332 } 1333 1334 #ifdef INVARIANTS 1335 BO_LOCK(bo); 1336 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0 && 1337 (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0)) 1338 panic("vinvalbuf: flush failed"); 1339 BO_UNLOCK(bo); 1340 #endif 1341 return (0); 1342 } 1343 1344 /* 1345 * Flush out and invalidate all buffers associated with a vnode. 1346 * Called with the underlying object locked. 1347 */ 1348 int 1349 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 1350 { 1351 1352 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 1353 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1354 if (vp->v_object != NULL && vp->v_object->handle != vp) 1355 return (0); 1356 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 1357 } 1358 1359 /* 1360 * Flush out buffers on the specified list. 1361 * 1362 */ 1363 static int 1364 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 1365 int slptimeo) 1366 { 1367 struct buf *bp, *nbp; 1368 int retval, error; 1369 daddr_t lblkno; 1370 b_xflags_t xflags; 1371 1372 ASSERT_BO_WLOCKED(bo); 1373 1374 retval = 0; 1375 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 1376 if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) || 1377 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) { 1378 continue; 1379 } 1380 lblkno = 0; 1381 xflags = 0; 1382 if (nbp != NULL) { 1383 lblkno = nbp->b_lblkno; 1384 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 1385 } 1386 retval = EAGAIN; 1387 error = BUF_TIMELOCK(bp, 1388 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 1389 "flushbuf", slpflag, slptimeo); 1390 if (error) { 1391 BO_LOCK(bo); 1392 return (error != ENOLCK ? error : EAGAIN); 1393 } 1394 KASSERT(bp->b_bufobj == bo, 1395 ("bp %p wrong b_bufobj %p should be %p", 1396 bp, bp->b_bufobj, bo)); 1397 if (bp->b_bufobj != bo) { /* XXX: necessary ? */ 1398 BUF_UNLOCK(bp); 1399 BO_LOCK(bo); 1400 return (EAGAIN); 1401 } 1402 /* 1403 * XXX Since there are no node locks for NFS, I 1404 * believe there is a slight chance that a delayed 1405 * write will occur while sleeping just above, so 1406 * check for it. 1407 */ 1408 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1409 (flags & V_SAVE)) { 1410 bremfree(bp); 1411 bp->b_flags |= B_ASYNC; 1412 bwrite(bp); 1413 BO_LOCK(bo); 1414 return (EAGAIN); /* XXX: why not loop ? */ 1415 } 1416 bremfree(bp); 1417 bp->b_flags |= (B_INVAL | B_RELBUF); 1418 bp->b_flags &= ~B_ASYNC; 1419 brelse(bp); 1420 BO_LOCK(bo); 1421 if (nbp != NULL && 1422 (nbp->b_bufobj != bo || 1423 nbp->b_lblkno != lblkno || 1424 (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) != xflags)) 1425 break; /* nbp invalid */ 1426 } 1427 return (retval); 1428 } 1429 1430 /* 1431 * Truncate a file's buffer and pages to a specified length. This 1432 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 1433 * sync activity. 1434 */ 1435 int 1436 vtruncbuf(struct vnode *vp, struct ucred *cred, off_t length, int blksize) 1437 { 1438 struct buf *bp, *nbp; 1439 int anyfreed; 1440 int trunclbn; 1441 struct bufobj *bo; 1442 1443 CTR5(KTR_VFS, "%s: vp %p with cred %p and block %d:%ju", __func__, 1444 vp, cred, blksize, (uintmax_t)length); 1445 1446 /* 1447 * Round up to the *next* lbn. 1448 */ 1449 trunclbn = (length + blksize - 1) / blksize; 1450 1451 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 1452 restart: 1453 bo = &vp->v_bufobj; 1454 BO_LOCK(bo); 1455 anyfreed = 1; 1456 for (;anyfreed;) { 1457 anyfreed = 0; 1458 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 1459 if (bp->b_lblkno < trunclbn) 1460 continue; 1461 if (BUF_LOCK(bp, 1462 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1463 BO_LOCKPTR(bo)) == ENOLCK) 1464 goto restart; 1465 1466 bremfree(bp); 1467 bp->b_flags |= (B_INVAL | B_RELBUF); 1468 bp->b_flags &= ~B_ASYNC; 1469 brelse(bp); 1470 anyfreed = 1; 1471 1472 BO_LOCK(bo); 1473 if (nbp != NULL && 1474 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 1475 (nbp->b_vp != vp) || 1476 (nbp->b_flags & B_DELWRI))) { 1477 BO_UNLOCK(bo); 1478 goto restart; 1479 } 1480 } 1481 1482 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1483 if (bp->b_lblkno < trunclbn) 1484 continue; 1485 if (BUF_LOCK(bp, 1486 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1487 BO_LOCKPTR(bo)) == ENOLCK) 1488 goto restart; 1489 bremfree(bp); 1490 bp->b_flags |= (B_INVAL | B_RELBUF); 1491 bp->b_flags &= ~B_ASYNC; 1492 brelse(bp); 1493 anyfreed = 1; 1494 1495 BO_LOCK(bo); 1496 if (nbp != NULL && 1497 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 1498 (nbp->b_vp != vp) || 1499 (nbp->b_flags & B_DELWRI) == 0)) { 1500 BO_UNLOCK(bo); 1501 goto restart; 1502 } 1503 } 1504 } 1505 1506 if (length > 0) { 1507 restartsync: 1508 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1509 if (bp->b_lblkno > 0) 1510 continue; 1511 /* 1512 * Since we hold the vnode lock this should only 1513 * fail if we're racing with the buf daemon. 1514 */ 1515 if (BUF_LOCK(bp, 1516 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1517 BO_LOCKPTR(bo)) == ENOLCK) { 1518 goto restart; 1519 } 1520 VNASSERT((bp->b_flags & B_DELWRI), vp, 1521 ("buf(%p) on dirty queue without DELWRI", bp)); 1522 1523 bremfree(bp); 1524 bawrite(bp); 1525 BO_LOCK(bo); 1526 goto restartsync; 1527 } 1528 } 1529 1530 bufobj_wwait(bo, 0, 0); 1531 BO_UNLOCK(bo); 1532 vnode_pager_setsize(vp, length); 1533 1534 return (0); 1535 } 1536 1537 static void 1538 buf_vlist_remove(struct buf *bp) 1539 { 1540 struct bufv *bv; 1541 1542 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1543 ASSERT_BO_WLOCKED(bp->b_bufobj); 1544 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != 1545 (BX_VNDIRTY|BX_VNCLEAN), 1546 ("buf_vlist_remove: Buf %p is on two lists", bp)); 1547 if (bp->b_xflags & BX_VNDIRTY) 1548 bv = &bp->b_bufobj->bo_dirty; 1549 else 1550 bv = &bp->b_bufobj->bo_clean; 1551 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 1552 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 1553 bv->bv_cnt--; 1554 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1555 } 1556 1557 /* 1558 * Add the buffer to the sorted clean or dirty block list. 1559 * 1560 * NOTE: xflags is passed as a constant, optimizing this inline function! 1561 */ 1562 static void 1563 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 1564 { 1565 struct bufv *bv; 1566 struct buf *n; 1567 int error; 1568 1569 ASSERT_BO_WLOCKED(bo); 1570 KASSERT((bo->bo_flag & BO_DEAD) == 0, ("dead bo %p", bo)); 1571 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 1572 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 1573 bp->b_xflags |= xflags; 1574 if (xflags & BX_VNDIRTY) 1575 bv = &bo->bo_dirty; 1576 else 1577 bv = &bo->bo_clean; 1578 1579 /* 1580 * Keep the list ordered. Optimize empty list insertion. Assume 1581 * we tend to grow at the tail so lookup_le should usually be cheaper 1582 * than _ge. 1583 */ 1584 if (bv->bv_cnt == 0 || 1585 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 1586 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 1587 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 1588 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 1589 else 1590 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 1591 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 1592 if (error) 1593 panic("buf_vlist_add: Preallocated nodes insufficient."); 1594 bv->bv_cnt++; 1595 } 1596 1597 /* 1598 * Lookup a buffer using the splay tree. Note that we specifically avoid 1599 * shadow buffers used in background bitmap writes. 1600 * 1601 * This code isn't quite efficient as it could be because we are maintaining 1602 * two sorted lists and do not know which list the block resides in. 1603 * 1604 * During a "make buildworld" the desired buffer is found at one of 1605 * the roots more than 60% of the time. Thus, checking both roots 1606 * before performing either splay eliminates unnecessary splays on the 1607 * first tree splayed. 1608 */ 1609 struct buf * 1610 gbincore(struct bufobj *bo, daddr_t lblkno) 1611 { 1612 struct buf *bp; 1613 1614 ASSERT_BO_LOCKED(bo); 1615 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 1616 if (bp != NULL) 1617 return (bp); 1618 return BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno); 1619 } 1620 1621 /* 1622 * Associate a buffer with a vnode. 1623 */ 1624 void 1625 bgetvp(struct vnode *vp, struct buf *bp) 1626 { 1627 struct bufobj *bo; 1628 1629 bo = &vp->v_bufobj; 1630 ASSERT_BO_WLOCKED(bo); 1631 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 1632 1633 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 1634 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 1635 ("bgetvp: bp already attached! %p", bp)); 1636 1637 vhold(vp); 1638 bp->b_vp = vp; 1639 bp->b_bufobj = bo; 1640 /* 1641 * Insert onto list for new vnode. 1642 */ 1643 buf_vlist_add(bp, bo, BX_VNCLEAN); 1644 } 1645 1646 /* 1647 * Disassociate a buffer from a vnode. 1648 */ 1649 void 1650 brelvp(struct buf *bp) 1651 { 1652 struct bufobj *bo; 1653 struct vnode *vp; 1654 1655 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1656 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 1657 1658 /* 1659 * Delete from old vnode list, if on one. 1660 */ 1661 vp = bp->b_vp; /* XXX */ 1662 bo = bp->b_bufobj; 1663 BO_LOCK(bo); 1664 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1665 buf_vlist_remove(bp); 1666 else 1667 panic("brelvp: Buffer %p not on queue.", bp); 1668 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 1669 bo->bo_flag &= ~BO_ONWORKLST; 1670 mtx_lock(&sync_mtx); 1671 LIST_REMOVE(bo, bo_synclist); 1672 syncer_worklist_len--; 1673 mtx_unlock(&sync_mtx); 1674 } 1675 bp->b_vp = NULL; 1676 bp->b_bufobj = NULL; 1677 BO_UNLOCK(bo); 1678 vdrop(vp); 1679 } 1680 1681 /* 1682 * Add an item to the syncer work queue. 1683 */ 1684 static void 1685 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 1686 { 1687 int slot; 1688 1689 ASSERT_BO_WLOCKED(bo); 1690 1691 mtx_lock(&sync_mtx); 1692 if (bo->bo_flag & BO_ONWORKLST) 1693 LIST_REMOVE(bo, bo_synclist); 1694 else { 1695 bo->bo_flag |= BO_ONWORKLST; 1696 syncer_worklist_len++; 1697 } 1698 1699 if (delay > syncer_maxdelay - 2) 1700 delay = syncer_maxdelay - 2; 1701 slot = (syncer_delayno + delay) & syncer_mask; 1702 1703 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 1704 mtx_unlock(&sync_mtx); 1705 } 1706 1707 static int 1708 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 1709 { 1710 int error, len; 1711 1712 mtx_lock(&sync_mtx); 1713 len = syncer_worklist_len - sync_vnode_count; 1714 mtx_unlock(&sync_mtx); 1715 error = SYSCTL_OUT(req, &len, sizeof(len)); 1716 return (error); 1717 } 1718 1719 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0, 1720 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 1721 1722 static struct proc *updateproc; 1723 static void sched_sync(void); 1724 static struct kproc_desc up_kp = { 1725 "syncer", 1726 sched_sync, 1727 &updateproc 1728 }; 1729 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 1730 1731 static int 1732 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 1733 { 1734 struct vnode *vp; 1735 struct mount *mp; 1736 1737 *bo = LIST_FIRST(slp); 1738 if (*bo == NULL) 1739 return (0); 1740 vp = (*bo)->__bo_vnode; /* XXX */ 1741 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 1742 return (1); 1743 /* 1744 * We use vhold in case the vnode does not 1745 * successfully sync. vhold prevents the vnode from 1746 * going away when we unlock the sync_mtx so that 1747 * we can acquire the vnode interlock. 1748 */ 1749 vholdl(vp); 1750 mtx_unlock(&sync_mtx); 1751 VI_UNLOCK(vp); 1752 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1753 vdrop(vp); 1754 mtx_lock(&sync_mtx); 1755 return (*bo == LIST_FIRST(slp)); 1756 } 1757 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1758 (void) VOP_FSYNC(vp, MNT_LAZY, td); 1759 VOP_UNLOCK(vp, 0); 1760 vn_finished_write(mp); 1761 BO_LOCK(*bo); 1762 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 1763 /* 1764 * Put us back on the worklist. The worklist 1765 * routine will remove us from our current 1766 * position and then add us back in at a later 1767 * position. 1768 */ 1769 vn_syncer_add_to_worklist(*bo, syncdelay); 1770 } 1771 BO_UNLOCK(*bo); 1772 vdrop(vp); 1773 mtx_lock(&sync_mtx); 1774 return (0); 1775 } 1776 1777 static int first_printf = 1; 1778 1779 /* 1780 * System filesystem synchronizer daemon. 1781 */ 1782 static void 1783 sched_sync(void) 1784 { 1785 struct synclist *next, *slp; 1786 struct bufobj *bo; 1787 long starttime; 1788 struct thread *td = curthread; 1789 int last_work_seen; 1790 int net_worklist_len; 1791 int syncer_final_iter; 1792 int error; 1793 1794 last_work_seen = 0; 1795 syncer_final_iter = 0; 1796 syncer_state = SYNCER_RUNNING; 1797 starttime = time_uptime; 1798 td->td_pflags |= TDP_NORUNNINGBUF; 1799 1800 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 1801 SHUTDOWN_PRI_LAST); 1802 1803 mtx_lock(&sync_mtx); 1804 for (;;) { 1805 if (syncer_state == SYNCER_FINAL_DELAY && 1806 syncer_final_iter == 0) { 1807 mtx_unlock(&sync_mtx); 1808 kproc_suspend_check(td->td_proc); 1809 mtx_lock(&sync_mtx); 1810 } 1811 net_worklist_len = syncer_worklist_len - sync_vnode_count; 1812 if (syncer_state != SYNCER_RUNNING && 1813 starttime != time_uptime) { 1814 if (first_printf) { 1815 printf("\nSyncing disks, vnodes remaining..."); 1816 first_printf = 0; 1817 } 1818 printf("%d ", net_worklist_len); 1819 } 1820 starttime = time_uptime; 1821 1822 /* 1823 * Push files whose dirty time has expired. Be careful 1824 * of interrupt race on slp queue. 1825 * 1826 * Skip over empty worklist slots when shutting down. 1827 */ 1828 do { 1829 slp = &syncer_workitem_pending[syncer_delayno]; 1830 syncer_delayno += 1; 1831 if (syncer_delayno == syncer_maxdelay) 1832 syncer_delayno = 0; 1833 next = &syncer_workitem_pending[syncer_delayno]; 1834 /* 1835 * If the worklist has wrapped since the 1836 * it was emptied of all but syncer vnodes, 1837 * switch to the FINAL_DELAY state and run 1838 * for one more second. 1839 */ 1840 if (syncer_state == SYNCER_SHUTTING_DOWN && 1841 net_worklist_len == 0 && 1842 last_work_seen == syncer_delayno) { 1843 syncer_state = SYNCER_FINAL_DELAY; 1844 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 1845 } 1846 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 1847 syncer_worklist_len > 0); 1848 1849 /* 1850 * Keep track of the last time there was anything 1851 * on the worklist other than syncer vnodes. 1852 * Return to the SHUTTING_DOWN state if any 1853 * new work appears. 1854 */ 1855 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 1856 last_work_seen = syncer_delayno; 1857 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 1858 syncer_state = SYNCER_SHUTTING_DOWN; 1859 while (!LIST_EMPTY(slp)) { 1860 error = sync_vnode(slp, &bo, td); 1861 if (error == 1) { 1862 LIST_REMOVE(bo, bo_synclist); 1863 LIST_INSERT_HEAD(next, bo, bo_synclist); 1864 continue; 1865 } 1866 1867 if (first_printf == 0) { 1868 /* 1869 * Drop the sync mutex, because some watchdog 1870 * drivers need to sleep while patting 1871 */ 1872 mtx_unlock(&sync_mtx); 1873 wdog_kern_pat(WD_LASTVAL); 1874 mtx_lock(&sync_mtx); 1875 } 1876 1877 } 1878 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 1879 syncer_final_iter--; 1880 /* 1881 * The variable rushjob allows the kernel to speed up the 1882 * processing of the filesystem syncer process. A rushjob 1883 * value of N tells the filesystem syncer to process the next 1884 * N seconds worth of work on its queue ASAP. Currently rushjob 1885 * is used by the soft update code to speed up the filesystem 1886 * syncer process when the incore state is getting so far 1887 * ahead of the disk that the kernel memory pool is being 1888 * threatened with exhaustion. 1889 */ 1890 if (rushjob > 0) { 1891 rushjob -= 1; 1892 continue; 1893 } 1894 /* 1895 * Just sleep for a short period of time between 1896 * iterations when shutting down to allow some I/O 1897 * to happen. 1898 * 1899 * If it has taken us less than a second to process the 1900 * current work, then wait. Otherwise start right over 1901 * again. We can still lose time if any single round 1902 * takes more than two seconds, but it does not really 1903 * matter as we are just trying to generally pace the 1904 * filesystem activity. 1905 */ 1906 if (syncer_state != SYNCER_RUNNING || 1907 time_uptime == starttime) { 1908 thread_lock(td); 1909 sched_prio(td, PPAUSE); 1910 thread_unlock(td); 1911 } 1912 if (syncer_state != SYNCER_RUNNING) 1913 cv_timedwait(&sync_wakeup, &sync_mtx, 1914 hz / SYNCER_SHUTDOWN_SPEEDUP); 1915 else if (time_uptime == starttime) 1916 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 1917 } 1918 } 1919 1920 /* 1921 * Request the syncer daemon to speed up its work. 1922 * We never push it to speed up more than half of its 1923 * normal turn time, otherwise it could take over the cpu. 1924 */ 1925 int 1926 speedup_syncer(void) 1927 { 1928 int ret = 0; 1929 1930 mtx_lock(&sync_mtx); 1931 if (rushjob < syncdelay / 2) { 1932 rushjob += 1; 1933 stat_rush_requests += 1; 1934 ret = 1; 1935 } 1936 mtx_unlock(&sync_mtx); 1937 cv_broadcast(&sync_wakeup); 1938 return (ret); 1939 } 1940 1941 /* 1942 * Tell the syncer to speed up its work and run though its work 1943 * list several times, then tell it to shut down. 1944 */ 1945 static void 1946 syncer_shutdown(void *arg, int howto) 1947 { 1948 1949 if (howto & RB_NOSYNC) 1950 return; 1951 mtx_lock(&sync_mtx); 1952 syncer_state = SYNCER_SHUTTING_DOWN; 1953 rushjob = 0; 1954 mtx_unlock(&sync_mtx); 1955 cv_broadcast(&sync_wakeup); 1956 kproc_shutdown(arg, howto); 1957 } 1958 1959 void 1960 syncer_suspend(void) 1961 { 1962 1963 syncer_shutdown(updateproc, 0); 1964 } 1965 1966 void 1967 syncer_resume(void) 1968 { 1969 1970 mtx_lock(&sync_mtx); 1971 first_printf = 1; 1972 syncer_state = SYNCER_RUNNING; 1973 mtx_unlock(&sync_mtx); 1974 cv_broadcast(&sync_wakeup); 1975 kproc_resume(updateproc); 1976 } 1977 1978 /* 1979 * Reassign a buffer from one vnode to another. 1980 * Used to assign file specific control information 1981 * (indirect blocks) to the vnode to which they belong. 1982 */ 1983 void 1984 reassignbuf(struct buf *bp) 1985 { 1986 struct vnode *vp; 1987 struct bufobj *bo; 1988 int delay; 1989 #ifdef INVARIANTS 1990 struct bufv *bv; 1991 #endif 1992 1993 vp = bp->b_vp; 1994 bo = bp->b_bufobj; 1995 ++reassignbufcalls; 1996 1997 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 1998 bp, bp->b_vp, bp->b_flags); 1999 /* 2000 * B_PAGING flagged buffers cannot be reassigned because their vp 2001 * is not fully linked in. 2002 */ 2003 if (bp->b_flags & B_PAGING) 2004 panic("cannot reassign paging buffer"); 2005 2006 /* 2007 * Delete from old vnode list, if on one. 2008 */ 2009 BO_LOCK(bo); 2010 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2011 buf_vlist_remove(bp); 2012 else 2013 panic("reassignbuf: Buffer %p not on queue.", bp); 2014 /* 2015 * If dirty, put on list of dirty buffers; otherwise insert onto list 2016 * of clean buffers. 2017 */ 2018 if (bp->b_flags & B_DELWRI) { 2019 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2020 switch (vp->v_type) { 2021 case VDIR: 2022 delay = dirdelay; 2023 break; 2024 case VCHR: 2025 delay = metadelay; 2026 break; 2027 default: 2028 delay = filedelay; 2029 } 2030 vn_syncer_add_to_worklist(bo, delay); 2031 } 2032 buf_vlist_add(bp, bo, BX_VNDIRTY); 2033 } else { 2034 buf_vlist_add(bp, bo, BX_VNCLEAN); 2035 2036 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2037 mtx_lock(&sync_mtx); 2038 LIST_REMOVE(bo, bo_synclist); 2039 syncer_worklist_len--; 2040 mtx_unlock(&sync_mtx); 2041 bo->bo_flag &= ~BO_ONWORKLST; 2042 } 2043 } 2044 #ifdef INVARIANTS 2045 bv = &bo->bo_clean; 2046 bp = TAILQ_FIRST(&bv->bv_hd); 2047 KASSERT(bp == NULL || bp->b_bufobj == bo, 2048 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2049 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2050 KASSERT(bp == NULL || bp->b_bufobj == bo, 2051 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2052 bv = &bo->bo_dirty; 2053 bp = TAILQ_FIRST(&bv->bv_hd); 2054 KASSERT(bp == NULL || bp->b_bufobj == bo, 2055 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2056 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2057 KASSERT(bp == NULL || bp->b_bufobj == bo, 2058 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2059 #endif 2060 BO_UNLOCK(bo); 2061 } 2062 2063 /* 2064 * Increment the use and hold counts on the vnode, taking care to reference 2065 * the driver's usecount if this is a chardev. The vholdl() will remove 2066 * the vnode from the free list if it is presently free. Requires the 2067 * vnode interlock and returns with it held. 2068 */ 2069 static void 2070 v_incr_usecount(struct vnode *vp) 2071 { 2072 2073 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2074 vholdl(vp); 2075 vp->v_usecount++; 2076 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2077 dev_lock(); 2078 vp->v_rdev->si_usecount++; 2079 dev_unlock(); 2080 } 2081 } 2082 2083 /* 2084 * Turn a holdcnt into a use+holdcnt such that only one call to 2085 * v_decr_usecount is needed. 2086 */ 2087 static void 2088 v_upgrade_usecount(struct vnode *vp) 2089 { 2090 2091 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2092 vp->v_usecount++; 2093 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2094 dev_lock(); 2095 vp->v_rdev->si_usecount++; 2096 dev_unlock(); 2097 } 2098 } 2099 2100 /* 2101 * Decrement the vnode use and hold count along with the driver's usecount 2102 * if this is a chardev. The vdropl() below releases the vnode interlock 2103 * as it may free the vnode. 2104 */ 2105 static void 2106 v_decr_usecount(struct vnode *vp) 2107 { 2108 2109 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2110 VNASSERT(vp->v_usecount > 0, vp, 2111 ("v_decr_usecount: negative usecount")); 2112 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2113 vp->v_usecount--; 2114 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2115 dev_lock(); 2116 vp->v_rdev->si_usecount--; 2117 dev_unlock(); 2118 } 2119 vdropl(vp); 2120 } 2121 2122 /* 2123 * Decrement only the use count and driver use count. This is intended to 2124 * be paired with a follow on vdropl() to release the remaining hold count. 2125 * In this way we may vgone() a vnode with a 0 usecount without risk of 2126 * having it end up on a free list because the hold count is kept above 0. 2127 */ 2128 static void 2129 v_decr_useonly(struct vnode *vp) 2130 { 2131 2132 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2133 VNASSERT(vp->v_usecount > 0, vp, 2134 ("v_decr_useonly: negative usecount")); 2135 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2136 vp->v_usecount--; 2137 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2138 dev_lock(); 2139 vp->v_rdev->si_usecount--; 2140 dev_unlock(); 2141 } 2142 } 2143 2144 /* 2145 * Grab a particular vnode from the free list, increment its 2146 * reference count and lock it. VI_DOOMED is set if the vnode 2147 * is being destroyed. Only callers who specify LK_RETRY will 2148 * see doomed vnodes. If inactive processing was delayed in 2149 * vput try to do it here. 2150 */ 2151 int 2152 vget(struct vnode *vp, int flags, struct thread *td) 2153 { 2154 int error; 2155 2156 error = 0; 2157 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 2158 ("vget: invalid lock operation")); 2159 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2160 2161 if ((flags & LK_INTERLOCK) == 0) 2162 VI_LOCK(vp); 2163 vholdl(vp); 2164 if ((error = vn_lock(vp, flags | LK_INTERLOCK)) != 0) { 2165 vdrop(vp); 2166 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2167 vp); 2168 return (error); 2169 } 2170 if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0) 2171 panic("vget: vn_lock failed to return ENOENT\n"); 2172 VI_LOCK(vp); 2173 /* Upgrade our holdcnt to a usecount. */ 2174 v_upgrade_usecount(vp); 2175 /* 2176 * We don't guarantee that any particular close will 2177 * trigger inactive processing so just make a best effort 2178 * here at preventing a reference to a removed file. If 2179 * we don't succeed no harm is done. 2180 */ 2181 if (vp->v_iflag & VI_OWEINACT) { 2182 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE && 2183 (flags & LK_NOWAIT) == 0) 2184 vinactive(vp, td); 2185 vp->v_iflag &= ~VI_OWEINACT; 2186 } 2187 VI_UNLOCK(vp); 2188 return (0); 2189 } 2190 2191 /* 2192 * Increase the reference count of a vnode. 2193 */ 2194 void 2195 vref(struct vnode *vp) 2196 { 2197 2198 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2199 VI_LOCK(vp); 2200 v_incr_usecount(vp); 2201 VI_UNLOCK(vp); 2202 } 2203 2204 /* 2205 * Return reference count of a vnode. 2206 * 2207 * The results of this call are only guaranteed when some mechanism other 2208 * than the VI lock is used to stop other processes from gaining references 2209 * to the vnode. This may be the case if the caller holds the only reference. 2210 * This is also useful when stale data is acceptable as race conditions may 2211 * be accounted for by some other means. 2212 */ 2213 int 2214 vrefcnt(struct vnode *vp) 2215 { 2216 int usecnt; 2217 2218 VI_LOCK(vp); 2219 usecnt = vp->v_usecount; 2220 VI_UNLOCK(vp); 2221 2222 return (usecnt); 2223 } 2224 2225 #define VPUTX_VRELE 1 2226 #define VPUTX_VPUT 2 2227 #define VPUTX_VUNREF 3 2228 2229 static void 2230 vputx(struct vnode *vp, int func) 2231 { 2232 int error; 2233 2234 KASSERT(vp != NULL, ("vputx: null vp")); 2235 if (func == VPUTX_VUNREF) 2236 ASSERT_VOP_LOCKED(vp, "vunref"); 2237 else if (func == VPUTX_VPUT) 2238 ASSERT_VOP_LOCKED(vp, "vput"); 2239 else 2240 KASSERT(func == VPUTX_VRELE, ("vputx: wrong func")); 2241 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2242 VI_LOCK(vp); 2243 2244 /* Skip this v_writecount check if we're going to panic below. */ 2245 VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp, 2246 ("vputx: missed vn_close")); 2247 error = 0; 2248 2249 if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) && 2250 vp->v_usecount == 1)) { 2251 if (func == VPUTX_VPUT) 2252 VOP_UNLOCK(vp, 0); 2253 v_decr_usecount(vp); 2254 return; 2255 } 2256 2257 if (vp->v_usecount != 1) { 2258 vprint("vputx: negative ref count", vp); 2259 panic("vputx: negative ref cnt"); 2260 } 2261 CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp); 2262 /* 2263 * We want to hold the vnode until the inactive finishes to 2264 * prevent vgone() races. We drop the use count here and the 2265 * hold count below when we're done. 2266 */ 2267 v_decr_useonly(vp); 2268 /* 2269 * We must call VOP_INACTIVE with the node locked. Mark 2270 * as VI_DOINGINACT to avoid recursion. 2271 */ 2272 vp->v_iflag |= VI_OWEINACT; 2273 switch (func) { 2274 case VPUTX_VRELE: 2275 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 2276 VI_LOCK(vp); 2277 break; 2278 case VPUTX_VPUT: 2279 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 2280 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 2281 LK_NOWAIT); 2282 VI_LOCK(vp); 2283 } 2284 break; 2285 case VPUTX_VUNREF: 2286 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 2287 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 2288 VI_LOCK(vp); 2289 } 2290 break; 2291 } 2292 if (vp->v_usecount > 0) 2293 vp->v_iflag &= ~VI_OWEINACT; 2294 if (error == 0) { 2295 if (vp->v_iflag & VI_OWEINACT) 2296 vinactive(vp, curthread); 2297 if (func != VPUTX_VUNREF) 2298 VOP_UNLOCK(vp, 0); 2299 } 2300 vdropl(vp); 2301 } 2302 2303 /* 2304 * Vnode put/release. 2305 * If count drops to zero, call inactive routine and return to freelist. 2306 */ 2307 void 2308 vrele(struct vnode *vp) 2309 { 2310 2311 vputx(vp, VPUTX_VRELE); 2312 } 2313 2314 /* 2315 * Release an already locked vnode. This give the same effects as 2316 * unlock+vrele(), but takes less time and avoids releasing and 2317 * re-aquiring the lock (as vrele() acquires the lock internally.) 2318 */ 2319 void 2320 vput(struct vnode *vp) 2321 { 2322 2323 vputx(vp, VPUTX_VPUT); 2324 } 2325 2326 /* 2327 * Release an exclusively locked vnode. Do not unlock the vnode lock. 2328 */ 2329 void 2330 vunref(struct vnode *vp) 2331 { 2332 2333 vputx(vp, VPUTX_VUNREF); 2334 } 2335 2336 /* 2337 * Somebody doesn't want the vnode recycled. 2338 */ 2339 void 2340 vhold(struct vnode *vp) 2341 { 2342 2343 VI_LOCK(vp); 2344 vholdl(vp); 2345 VI_UNLOCK(vp); 2346 } 2347 2348 /* 2349 * Increase the hold count and activate if this is the first reference. 2350 */ 2351 void 2352 vholdl(struct vnode *vp) 2353 { 2354 struct mount *mp; 2355 2356 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2357 #ifdef INVARIANTS 2358 /* getnewvnode() calls v_incr_usecount() without holding interlock. */ 2359 if (vp->v_type != VNON || vp->v_data != NULL) { 2360 ASSERT_VI_LOCKED(vp, "vholdl"); 2361 VNASSERT(vp->v_holdcnt > 0 || (vp->v_iflag & VI_FREE) != 0, 2362 vp, ("vholdl: free vnode is held")); 2363 } 2364 #endif 2365 vp->v_holdcnt++; 2366 if ((vp->v_iflag & VI_FREE) == 0) 2367 return; 2368 VNASSERT(vp->v_holdcnt == 1, vp, ("vholdl: wrong hold count")); 2369 VNASSERT(vp->v_op != NULL, vp, ("vholdl: vnode already reclaimed.")); 2370 /* 2371 * Remove a vnode from the free list, mark it as in use, 2372 * and put it on the active list. 2373 */ 2374 mtx_lock(&vnode_free_list_mtx); 2375 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 2376 freevnodes--; 2377 vp->v_iflag &= ~(VI_FREE|VI_AGE); 2378 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 2379 ("Activating already active vnode")); 2380 vp->v_iflag |= VI_ACTIVE; 2381 mp = vp->v_mount; 2382 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 2383 mp->mnt_activevnodelistsize++; 2384 mtx_unlock(&vnode_free_list_mtx); 2385 } 2386 2387 /* 2388 * Note that there is one less who cares about this vnode. 2389 * vdrop() is the opposite of vhold(). 2390 */ 2391 void 2392 vdrop(struct vnode *vp) 2393 { 2394 2395 VI_LOCK(vp); 2396 vdropl(vp); 2397 } 2398 2399 /* 2400 * Drop the hold count of the vnode. If this is the last reference to 2401 * the vnode we place it on the free list unless it has been vgone'd 2402 * (marked VI_DOOMED) in which case we will free it. 2403 */ 2404 void 2405 vdropl(struct vnode *vp) 2406 { 2407 struct bufobj *bo; 2408 struct mount *mp; 2409 int active; 2410 2411 ASSERT_VI_LOCKED(vp, "vdropl"); 2412 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2413 if (vp->v_holdcnt <= 0) 2414 panic("vdrop: holdcnt %d", vp->v_holdcnt); 2415 vp->v_holdcnt--; 2416 VNASSERT(vp->v_holdcnt >= vp->v_usecount, vp, 2417 ("hold count less than use count")); 2418 if (vp->v_holdcnt > 0) { 2419 VI_UNLOCK(vp); 2420 return; 2421 } 2422 if ((vp->v_iflag & VI_DOOMED) == 0) { 2423 /* 2424 * Mark a vnode as free: remove it from its active list 2425 * and put it up for recycling on the freelist. 2426 */ 2427 VNASSERT(vp->v_op != NULL, vp, 2428 ("vdropl: vnode already reclaimed.")); 2429 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 2430 ("vnode already free")); 2431 VNASSERT(vp->v_holdcnt == 0, vp, 2432 ("vdropl: freeing when we shouldn't")); 2433 active = vp->v_iflag & VI_ACTIVE; 2434 vp->v_iflag &= ~VI_ACTIVE; 2435 mp = vp->v_mount; 2436 mtx_lock(&vnode_free_list_mtx); 2437 if (active) { 2438 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, 2439 v_actfreelist); 2440 mp->mnt_activevnodelistsize--; 2441 } 2442 if (vp->v_iflag & VI_AGE) { 2443 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_actfreelist); 2444 } else { 2445 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist); 2446 } 2447 freevnodes++; 2448 vp->v_iflag &= ~VI_AGE; 2449 vp->v_iflag |= VI_FREE; 2450 mtx_unlock(&vnode_free_list_mtx); 2451 VI_UNLOCK(vp); 2452 return; 2453 } 2454 /* 2455 * The vnode has been marked for destruction, so free it. 2456 */ 2457 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 2458 atomic_subtract_long(&numvnodes, 1); 2459 bo = &vp->v_bufobj; 2460 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 2461 ("cleaned vnode still on the free list.")); 2462 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 2463 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); 2464 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 2465 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 2466 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 2467 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 2468 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 2469 ("clean blk trie not empty")); 2470 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 2471 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 2472 ("dirty blk trie not empty")); 2473 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 2474 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 2475 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 2476 VI_UNLOCK(vp); 2477 #ifdef MAC 2478 mac_vnode_destroy(vp); 2479 #endif 2480 if (vp->v_pollinfo != NULL) 2481 destroy_vpollinfo(vp->v_pollinfo); 2482 #ifdef INVARIANTS 2483 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ 2484 vp->v_op = NULL; 2485 #endif 2486 rangelock_destroy(&vp->v_rl); 2487 lockdestroy(vp->v_vnlock); 2488 mtx_destroy(&vp->v_interlock); 2489 rw_destroy(BO_LOCKPTR(bo)); 2490 uma_zfree(vnode_zone, vp); 2491 } 2492 2493 /* 2494 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 2495 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 2496 * OWEINACT tracks whether a vnode missed a call to inactive due to a 2497 * failed lock upgrade. 2498 */ 2499 void 2500 vinactive(struct vnode *vp, struct thread *td) 2501 { 2502 struct vm_object *obj; 2503 2504 ASSERT_VOP_ELOCKED(vp, "vinactive"); 2505 ASSERT_VI_LOCKED(vp, "vinactive"); 2506 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 2507 ("vinactive: recursed on VI_DOINGINACT")); 2508 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2509 vp->v_iflag |= VI_DOINGINACT; 2510 vp->v_iflag &= ~VI_OWEINACT; 2511 VI_UNLOCK(vp); 2512 /* 2513 * Before moving off the active list, we must be sure that any 2514 * modified pages are on the vnode's dirty list since these will 2515 * no longer be checked once the vnode is on the inactive list. 2516 * Because the vnode vm object keeps a hold reference on the vnode 2517 * if there is at least one resident non-cached page, the vnode 2518 * cannot leave the active list without the page cleanup done. 2519 */ 2520 obj = vp->v_object; 2521 if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0) { 2522 VM_OBJECT_WLOCK(obj); 2523 vm_object_page_clean(obj, 0, 0, OBJPC_NOSYNC); 2524 VM_OBJECT_WUNLOCK(obj); 2525 } 2526 VOP_INACTIVE(vp, td); 2527 VI_LOCK(vp); 2528 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 2529 ("vinactive: lost VI_DOINGINACT")); 2530 vp->v_iflag &= ~VI_DOINGINACT; 2531 } 2532 2533 /* 2534 * Remove any vnodes in the vnode table belonging to mount point mp. 2535 * 2536 * If FORCECLOSE is not specified, there should not be any active ones, 2537 * return error if any are found (nb: this is a user error, not a 2538 * system error). If FORCECLOSE is specified, detach any active vnodes 2539 * that are found. 2540 * 2541 * If WRITECLOSE is set, only flush out regular file vnodes open for 2542 * writing. 2543 * 2544 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 2545 * 2546 * `rootrefs' specifies the base reference count for the root vnode 2547 * of this filesystem. The root vnode is considered busy if its 2548 * v_usecount exceeds this value. On a successful return, vflush(, td) 2549 * will call vrele() on the root vnode exactly rootrefs times. 2550 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 2551 * be zero. 2552 */ 2553 #ifdef DIAGNOSTIC 2554 static int busyprt = 0; /* print out busy vnodes */ 2555 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 2556 #endif 2557 2558 int 2559 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 2560 { 2561 struct vnode *vp, *mvp, *rootvp = NULL; 2562 struct vattr vattr; 2563 int busy = 0, error; 2564 2565 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 2566 rootrefs, flags); 2567 if (rootrefs > 0) { 2568 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 2569 ("vflush: bad args")); 2570 /* 2571 * Get the filesystem root vnode. We can vput() it 2572 * immediately, since with rootrefs > 0, it won't go away. 2573 */ 2574 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 2575 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 2576 __func__, error); 2577 return (error); 2578 } 2579 vput(rootvp); 2580 } 2581 loop: 2582 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 2583 vholdl(vp); 2584 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 2585 if (error) { 2586 vdrop(vp); 2587 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 2588 goto loop; 2589 } 2590 /* 2591 * Skip over a vnodes marked VV_SYSTEM. 2592 */ 2593 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 2594 VOP_UNLOCK(vp, 0); 2595 vdrop(vp); 2596 continue; 2597 } 2598 /* 2599 * If WRITECLOSE is set, flush out unlinked but still open 2600 * files (even if open only for reading) and regular file 2601 * vnodes open for writing. 2602 */ 2603 if (flags & WRITECLOSE) { 2604 if (vp->v_object != NULL) { 2605 VM_OBJECT_WLOCK(vp->v_object); 2606 vm_object_page_clean(vp->v_object, 0, 0, 0); 2607 VM_OBJECT_WUNLOCK(vp->v_object); 2608 } 2609 error = VOP_FSYNC(vp, MNT_WAIT, td); 2610 if (error != 0) { 2611 VOP_UNLOCK(vp, 0); 2612 vdrop(vp); 2613 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 2614 return (error); 2615 } 2616 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 2617 VI_LOCK(vp); 2618 2619 if ((vp->v_type == VNON || 2620 (error == 0 && vattr.va_nlink > 0)) && 2621 (vp->v_writecount == 0 || vp->v_type != VREG)) { 2622 VOP_UNLOCK(vp, 0); 2623 vdropl(vp); 2624 continue; 2625 } 2626 } else 2627 VI_LOCK(vp); 2628 /* 2629 * With v_usecount == 0, all we need to do is clear out the 2630 * vnode data structures and we are done. 2631 * 2632 * If FORCECLOSE is set, forcibly close the vnode. 2633 */ 2634 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 2635 VNASSERT(vp->v_usecount == 0 || 2636 (vp->v_type != VCHR && vp->v_type != VBLK), vp, 2637 ("device VNODE %p is FORCECLOSED", vp)); 2638 vgonel(vp); 2639 } else { 2640 busy++; 2641 #ifdef DIAGNOSTIC 2642 if (busyprt) 2643 vprint("vflush: busy vnode", vp); 2644 #endif 2645 } 2646 VOP_UNLOCK(vp, 0); 2647 vdropl(vp); 2648 } 2649 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 2650 /* 2651 * If just the root vnode is busy, and if its refcount 2652 * is equal to `rootrefs', then go ahead and kill it. 2653 */ 2654 VI_LOCK(rootvp); 2655 KASSERT(busy > 0, ("vflush: not busy")); 2656 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 2657 ("vflush: usecount %d < rootrefs %d", 2658 rootvp->v_usecount, rootrefs)); 2659 if (busy == 1 && rootvp->v_usecount == rootrefs) { 2660 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 2661 vgone(rootvp); 2662 VOP_UNLOCK(rootvp, 0); 2663 busy = 0; 2664 } else 2665 VI_UNLOCK(rootvp); 2666 } 2667 if (busy) { 2668 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 2669 busy); 2670 return (EBUSY); 2671 } 2672 for (; rootrefs > 0; rootrefs--) 2673 vrele(rootvp); 2674 return (0); 2675 } 2676 2677 /* 2678 * Recycle an unused vnode to the front of the free list. 2679 */ 2680 int 2681 vrecycle(struct vnode *vp) 2682 { 2683 int recycled; 2684 2685 ASSERT_VOP_ELOCKED(vp, "vrecycle"); 2686 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2687 recycled = 0; 2688 VI_LOCK(vp); 2689 if (vp->v_usecount == 0) { 2690 recycled = 1; 2691 vgonel(vp); 2692 } 2693 VI_UNLOCK(vp); 2694 return (recycled); 2695 } 2696 2697 /* 2698 * Eliminate all activity associated with a vnode 2699 * in preparation for reuse. 2700 */ 2701 void 2702 vgone(struct vnode *vp) 2703 { 2704 VI_LOCK(vp); 2705 vgonel(vp); 2706 VI_UNLOCK(vp); 2707 } 2708 2709 static void 2710 notify_lowervp_vfs_dummy(struct mount *mp __unused, 2711 struct vnode *lowervp __unused) 2712 { 2713 } 2714 2715 /* 2716 * Notify upper mounts about reclaimed or unlinked vnode. 2717 */ 2718 void 2719 vfs_notify_upper(struct vnode *vp, int event) 2720 { 2721 static struct vfsops vgonel_vfsops = { 2722 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 2723 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 2724 }; 2725 struct mount *mp, *ump, *mmp; 2726 2727 mp = vp->v_mount; 2728 if (mp == NULL) 2729 return; 2730 2731 MNT_ILOCK(mp); 2732 if (TAILQ_EMPTY(&mp->mnt_uppers)) 2733 goto unlock; 2734 MNT_IUNLOCK(mp); 2735 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 2736 mmp->mnt_op = &vgonel_vfsops; 2737 mmp->mnt_kern_flag |= MNTK_MARKER; 2738 MNT_ILOCK(mp); 2739 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 2740 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 2741 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 2742 ump = TAILQ_NEXT(ump, mnt_upper_link); 2743 continue; 2744 } 2745 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 2746 MNT_IUNLOCK(mp); 2747 switch (event) { 2748 case VFS_NOTIFY_UPPER_RECLAIM: 2749 VFS_RECLAIM_LOWERVP(ump, vp); 2750 break; 2751 case VFS_NOTIFY_UPPER_UNLINK: 2752 VFS_UNLINK_LOWERVP(ump, vp); 2753 break; 2754 default: 2755 KASSERT(0, ("invalid event %d", event)); 2756 break; 2757 } 2758 MNT_ILOCK(mp); 2759 ump = TAILQ_NEXT(mmp, mnt_upper_link); 2760 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 2761 } 2762 free(mmp, M_TEMP); 2763 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 2764 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 2765 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 2766 wakeup(&mp->mnt_uppers); 2767 } 2768 unlock: 2769 MNT_IUNLOCK(mp); 2770 } 2771 2772 /* 2773 * vgone, with the vp interlock held. 2774 */ 2775 void 2776 vgonel(struct vnode *vp) 2777 { 2778 struct thread *td; 2779 int oweinact; 2780 int active; 2781 struct mount *mp; 2782 2783 ASSERT_VOP_ELOCKED(vp, "vgonel"); 2784 ASSERT_VI_LOCKED(vp, "vgonel"); 2785 VNASSERT(vp->v_holdcnt, vp, 2786 ("vgonel: vp %p has no reference.", vp)); 2787 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2788 td = curthread; 2789 2790 /* 2791 * Don't vgonel if we're already doomed. 2792 */ 2793 if (vp->v_iflag & VI_DOOMED) 2794 return; 2795 vp->v_iflag |= VI_DOOMED; 2796 2797 /* 2798 * Check to see if the vnode is in use. If so, we have to call 2799 * VOP_CLOSE() and VOP_INACTIVE(). 2800 */ 2801 active = vp->v_usecount; 2802 oweinact = (vp->v_iflag & VI_OWEINACT); 2803 VI_UNLOCK(vp); 2804 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 2805 2806 /* 2807 * If purging an active vnode, it must be closed and 2808 * deactivated before being reclaimed. 2809 */ 2810 if (active) 2811 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 2812 if (oweinact || active) { 2813 VI_LOCK(vp); 2814 if ((vp->v_iflag & VI_DOINGINACT) == 0) 2815 vinactive(vp, td); 2816 VI_UNLOCK(vp); 2817 } 2818 if (vp->v_type == VSOCK) 2819 vfs_unp_reclaim(vp); 2820 2821 /* 2822 * Clean out any buffers associated with the vnode. 2823 * If the flush fails, just toss the buffers. 2824 */ 2825 mp = NULL; 2826 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 2827 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 2828 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 2829 while (vinvalbuf(vp, 0, 0, 0) != 0) 2830 ; 2831 } 2832 #ifdef INVARIANTS 2833 BO_LOCK(&vp->v_bufobj); 2834 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 2835 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 2836 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 2837 vp->v_bufobj.bo_clean.bv_cnt == 0, 2838 ("vp %p bufobj not invalidated", vp)); 2839 vp->v_bufobj.bo_flag |= BO_DEAD; 2840 BO_UNLOCK(&vp->v_bufobj); 2841 #endif 2842 2843 /* 2844 * Reclaim the vnode. 2845 */ 2846 if (VOP_RECLAIM(vp, td)) 2847 panic("vgone: cannot reclaim"); 2848 if (mp != NULL) 2849 vn_finished_secondary_write(mp); 2850 VNASSERT(vp->v_object == NULL, vp, 2851 ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag)); 2852 /* 2853 * Clear the advisory locks and wake up waiting threads. 2854 */ 2855 (void)VOP_ADVLOCKPURGE(vp); 2856 /* 2857 * Delete from old mount point vnode list. 2858 */ 2859 delmntque(vp); 2860 cache_purge(vp); 2861 /* 2862 * Done with purge, reset to the standard lock and invalidate 2863 * the vnode. 2864 */ 2865 VI_LOCK(vp); 2866 vp->v_vnlock = &vp->v_lock; 2867 vp->v_op = &dead_vnodeops; 2868 vp->v_tag = "none"; 2869 vp->v_type = VBAD; 2870 } 2871 2872 /* 2873 * Calculate the total number of references to a special device. 2874 */ 2875 int 2876 vcount(struct vnode *vp) 2877 { 2878 int count; 2879 2880 dev_lock(); 2881 count = vp->v_rdev->si_usecount; 2882 dev_unlock(); 2883 return (count); 2884 } 2885 2886 /* 2887 * Same as above, but using the struct cdev *as argument 2888 */ 2889 int 2890 count_dev(struct cdev *dev) 2891 { 2892 int count; 2893 2894 dev_lock(); 2895 count = dev->si_usecount; 2896 dev_unlock(); 2897 return(count); 2898 } 2899 2900 /* 2901 * Print out a description of a vnode. 2902 */ 2903 static char *typename[] = 2904 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 2905 "VMARKER"}; 2906 2907 void 2908 vn_printf(struct vnode *vp, const char *fmt, ...) 2909 { 2910 va_list ap; 2911 char buf[256], buf2[16]; 2912 u_long flags; 2913 2914 va_start(ap, fmt); 2915 vprintf(fmt, ap); 2916 va_end(ap); 2917 printf("%p: ", (void *)vp); 2918 printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]); 2919 printf(" usecount %d, writecount %d, refcount %d mountedhere %p\n", 2920 vp->v_usecount, vp->v_writecount, vp->v_holdcnt, vp->v_mountedhere); 2921 buf[0] = '\0'; 2922 buf[1] = '\0'; 2923 if (vp->v_vflag & VV_ROOT) 2924 strlcat(buf, "|VV_ROOT", sizeof(buf)); 2925 if (vp->v_vflag & VV_ISTTY) 2926 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 2927 if (vp->v_vflag & VV_NOSYNC) 2928 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 2929 if (vp->v_vflag & VV_ETERNALDEV) 2930 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 2931 if (vp->v_vflag & VV_CACHEDLABEL) 2932 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 2933 if (vp->v_vflag & VV_TEXT) 2934 strlcat(buf, "|VV_TEXT", sizeof(buf)); 2935 if (vp->v_vflag & VV_COPYONWRITE) 2936 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 2937 if (vp->v_vflag & VV_SYSTEM) 2938 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 2939 if (vp->v_vflag & VV_PROCDEP) 2940 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 2941 if (vp->v_vflag & VV_NOKNOTE) 2942 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 2943 if (vp->v_vflag & VV_DELETED) 2944 strlcat(buf, "|VV_DELETED", sizeof(buf)); 2945 if (vp->v_vflag & VV_MD) 2946 strlcat(buf, "|VV_MD", sizeof(buf)); 2947 if (vp->v_vflag & VV_FORCEINSMQ) 2948 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 2949 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 2950 VV_CACHEDLABEL | VV_TEXT | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 2951 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); 2952 if (flags != 0) { 2953 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 2954 strlcat(buf, buf2, sizeof(buf)); 2955 } 2956 if (vp->v_iflag & VI_MOUNT) 2957 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 2958 if (vp->v_iflag & VI_AGE) 2959 strlcat(buf, "|VI_AGE", sizeof(buf)); 2960 if (vp->v_iflag & VI_DOOMED) 2961 strlcat(buf, "|VI_DOOMED", sizeof(buf)); 2962 if (vp->v_iflag & VI_FREE) 2963 strlcat(buf, "|VI_FREE", sizeof(buf)); 2964 if (vp->v_iflag & VI_ACTIVE) 2965 strlcat(buf, "|VI_ACTIVE", sizeof(buf)); 2966 if (vp->v_iflag & VI_DOINGINACT) 2967 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 2968 if (vp->v_iflag & VI_OWEINACT) 2969 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 2970 flags = vp->v_iflag & ~(VI_MOUNT | VI_AGE | VI_DOOMED | VI_FREE | 2971 VI_ACTIVE | VI_DOINGINACT | VI_OWEINACT); 2972 if (flags != 0) { 2973 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 2974 strlcat(buf, buf2, sizeof(buf)); 2975 } 2976 printf(" flags (%s)\n", buf + 1); 2977 if (mtx_owned(VI_MTX(vp))) 2978 printf(" VI_LOCKed"); 2979 if (vp->v_object != NULL) 2980 printf(" v_object %p ref %d pages %d " 2981 "cleanbuf %d dirtybuf %d\n", 2982 vp->v_object, vp->v_object->ref_count, 2983 vp->v_object->resident_page_count, 2984 vp->v_bufobj.bo_dirty.bv_cnt, 2985 vp->v_bufobj.bo_clean.bv_cnt); 2986 printf(" "); 2987 lockmgr_printinfo(vp->v_vnlock); 2988 if (vp->v_data != NULL) 2989 VOP_PRINT(vp); 2990 } 2991 2992 #ifdef DDB 2993 /* 2994 * List all of the locked vnodes in the system. 2995 * Called when debugging the kernel. 2996 */ 2997 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 2998 { 2999 struct mount *mp; 3000 struct vnode *vp; 3001 3002 /* 3003 * Note: because this is DDB, we can't obey the locking semantics 3004 * for these structures, which means we could catch an inconsistent 3005 * state and dereference a nasty pointer. Not much to be done 3006 * about that. 3007 */ 3008 db_printf("Locked vnodes\n"); 3009 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3010 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3011 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 3012 vprint("", vp); 3013 } 3014 } 3015 } 3016 3017 /* 3018 * Show details about the given vnode. 3019 */ 3020 DB_SHOW_COMMAND(vnode, db_show_vnode) 3021 { 3022 struct vnode *vp; 3023 3024 if (!have_addr) 3025 return; 3026 vp = (struct vnode *)addr; 3027 vn_printf(vp, "vnode "); 3028 } 3029 3030 /* 3031 * Show details about the given mount point. 3032 */ 3033 DB_SHOW_COMMAND(mount, db_show_mount) 3034 { 3035 struct mount *mp; 3036 struct vfsopt *opt; 3037 struct statfs *sp; 3038 struct vnode *vp; 3039 char buf[512]; 3040 uint64_t mflags; 3041 u_int flags; 3042 3043 if (!have_addr) { 3044 /* No address given, print short info about all mount points. */ 3045 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3046 db_printf("%p %s on %s (%s)\n", mp, 3047 mp->mnt_stat.f_mntfromname, 3048 mp->mnt_stat.f_mntonname, 3049 mp->mnt_stat.f_fstypename); 3050 if (db_pager_quit) 3051 break; 3052 } 3053 db_printf("\nMore info: show mount <addr>\n"); 3054 return; 3055 } 3056 3057 mp = (struct mount *)addr; 3058 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 3059 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 3060 3061 buf[0] = '\0'; 3062 mflags = mp->mnt_flag; 3063 #define MNT_FLAG(flag) do { \ 3064 if (mflags & (flag)) { \ 3065 if (buf[0] != '\0') \ 3066 strlcat(buf, ", ", sizeof(buf)); \ 3067 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 3068 mflags &= ~(flag); \ 3069 } \ 3070 } while (0) 3071 MNT_FLAG(MNT_RDONLY); 3072 MNT_FLAG(MNT_SYNCHRONOUS); 3073 MNT_FLAG(MNT_NOEXEC); 3074 MNT_FLAG(MNT_NOSUID); 3075 MNT_FLAG(MNT_NFS4ACLS); 3076 MNT_FLAG(MNT_UNION); 3077 MNT_FLAG(MNT_ASYNC); 3078 MNT_FLAG(MNT_SUIDDIR); 3079 MNT_FLAG(MNT_SOFTDEP); 3080 MNT_FLAG(MNT_NOSYMFOLLOW); 3081 MNT_FLAG(MNT_GJOURNAL); 3082 MNT_FLAG(MNT_MULTILABEL); 3083 MNT_FLAG(MNT_ACLS); 3084 MNT_FLAG(MNT_NOATIME); 3085 MNT_FLAG(MNT_NOCLUSTERR); 3086 MNT_FLAG(MNT_NOCLUSTERW); 3087 MNT_FLAG(MNT_SUJ); 3088 MNT_FLAG(MNT_EXRDONLY); 3089 MNT_FLAG(MNT_EXPORTED); 3090 MNT_FLAG(MNT_DEFEXPORTED); 3091 MNT_FLAG(MNT_EXPORTANON); 3092 MNT_FLAG(MNT_EXKERB); 3093 MNT_FLAG(MNT_EXPUBLIC); 3094 MNT_FLAG(MNT_LOCAL); 3095 MNT_FLAG(MNT_QUOTA); 3096 MNT_FLAG(MNT_ROOTFS); 3097 MNT_FLAG(MNT_USER); 3098 MNT_FLAG(MNT_IGNORE); 3099 MNT_FLAG(MNT_UPDATE); 3100 MNT_FLAG(MNT_DELEXPORT); 3101 MNT_FLAG(MNT_RELOAD); 3102 MNT_FLAG(MNT_FORCE); 3103 MNT_FLAG(MNT_SNAPSHOT); 3104 MNT_FLAG(MNT_BYFSID); 3105 #undef MNT_FLAG 3106 if (mflags != 0) { 3107 if (buf[0] != '\0') 3108 strlcat(buf, ", ", sizeof(buf)); 3109 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3110 "0x%016jx", mflags); 3111 } 3112 db_printf(" mnt_flag = %s\n", buf); 3113 3114 buf[0] = '\0'; 3115 flags = mp->mnt_kern_flag; 3116 #define MNT_KERN_FLAG(flag) do { \ 3117 if (flags & (flag)) { \ 3118 if (buf[0] != '\0') \ 3119 strlcat(buf, ", ", sizeof(buf)); \ 3120 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 3121 flags &= ~(flag); \ 3122 } \ 3123 } while (0) 3124 MNT_KERN_FLAG(MNTK_UNMOUNTF); 3125 MNT_KERN_FLAG(MNTK_ASYNC); 3126 MNT_KERN_FLAG(MNTK_SOFTDEP); 3127 MNT_KERN_FLAG(MNTK_NOINSMNTQ); 3128 MNT_KERN_FLAG(MNTK_DRAINING); 3129 MNT_KERN_FLAG(MNTK_REFEXPIRE); 3130 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 3131 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 3132 MNT_KERN_FLAG(MNTK_NO_IOPF); 3133 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 3134 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 3135 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 3136 MNT_KERN_FLAG(MNTK_MARKER); 3137 MNT_KERN_FLAG(MNTK_NOASYNC); 3138 MNT_KERN_FLAG(MNTK_UNMOUNT); 3139 MNT_KERN_FLAG(MNTK_MWAIT); 3140 MNT_KERN_FLAG(MNTK_SUSPEND); 3141 MNT_KERN_FLAG(MNTK_SUSPEND2); 3142 MNT_KERN_FLAG(MNTK_SUSPENDED); 3143 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 3144 MNT_KERN_FLAG(MNTK_NOKNOTE); 3145 #undef MNT_KERN_FLAG 3146 if (flags != 0) { 3147 if (buf[0] != '\0') 3148 strlcat(buf, ", ", sizeof(buf)); 3149 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3150 "0x%08x", flags); 3151 } 3152 db_printf(" mnt_kern_flag = %s\n", buf); 3153 3154 db_printf(" mnt_opt = "); 3155 opt = TAILQ_FIRST(mp->mnt_opt); 3156 if (opt != NULL) { 3157 db_printf("%s", opt->name); 3158 opt = TAILQ_NEXT(opt, link); 3159 while (opt != NULL) { 3160 db_printf(", %s", opt->name); 3161 opt = TAILQ_NEXT(opt, link); 3162 } 3163 } 3164 db_printf("\n"); 3165 3166 sp = &mp->mnt_stat; 3167 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 3168 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 3169 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 3170 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 3171 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 3172 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 3173 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 3174 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 3175 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 3176 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 3177 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 3178 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 3179 3180 db_printf(" mnt_cred = { uid=%u ruid=%u", 3181 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 3182 if (jailed(mp->mnt_cred)) 3183 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 3184 db_printf(" }\n"); 3185 db_printf(" mnt_ref = %d\n", mp->mnt_ref); 3186 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 3187 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 3188 db_printf(" mnt_activevnodelistsize = %d\n", 3189 mp->mnt_activevnodelistsize); 3190 db_printf(" mnt_writeopcount = %d\n", mp->mnt_writeopcount); 3191 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 3192 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 3193 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 3194 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 3195 db_printf(" mnt_secondary_accwrites = %d\n", 3196 mp->mnt_secondary_accwrites); 3197 db_printf(" mnt_gjprovider = %s\n", 3198 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 3199 3200 db_printf("\n\nList of active vnodes\n"); 3201 TAILQ_FOREACH(vp, &mp->mnt_activevnodelist, v_actfreelist) { 3202 if (vp->v_type != VMARKER) { 3203 vn_printf(vp, "vnode "); 3204 if (db_pager_quit) 3205 break; 3206 } 3207 } 3208 db_printf("\n\nList of inactive vnodes\n"); 3209 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3210 if (vp->v_type != VMARKER && (vp->v_iflag & VI_ACTIVE) == 0) { 3211 vn_printf(vp, "vnode "); 3212 if (db_pager_quit) 3213 break; 3214 } 3215 } 3216 } 3217 #endif /* DDB */ 3218 3219 /* 3220 * Fill in a struct xvfsconf based on a struct vfsconf. 3221 */ 3222 static int 3223 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 3224 { 3225 struct xvfsconf xvfsp; 3226 3227 bzero(&xvfsp, sizeof(xvfsp)); 3228 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 3229 xvfsp.vfc_typenum = vfsp->vfc_typenum; 3230 xvfsp.vfc_refcount = vfsp->vfc_refcount; 3231 xvfsp.vfc_flags = vfsp->vfc_flags; 3232 /* 3233 * These are unused in userland, we keep them 3234 * to not break binary compatibility. 3235 */ 3236 xvfsp.vfc_vfsops = NULL; 3237 xvfsp.vfc_next = NULL; 3238 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 3239 } 3240 3241 #ifdef COMPAT_FREEBSD32 3242 struct xvfsconf32 { 3243 uint32_t vfc_vfsops; 3244 char vfc_name[MFSNAMELEN]; 3245 int32_t vfc_typenum; 3246 int32_t vfc_refcount; 3247 int32_t vfc_flags; 3248 uint32_t vfc_next; 3249 }; 3250 3251 static int 3252 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 3253 { 3254 struct xvfsconf32 xvfsp; 3255 3256 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 3257 xvfsp.vfc_typenum = vfsp->vfc_typenum; 3258 xvfsp.vfc_refcount = vfsp->vfc_refcount; 3259 xvfsp.vfc_flags = vfsp->vfc_flags; 3260 xvfsp.vfc_vfsops = 0; 3261 xvfsp.vfc_next = 0; 3262 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 3263 } 3264 #endif 3265 3266 /* 3267 * Top level filesystem related information gathering. 3268 */ 3269 static int 3270 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 3271 { 3272 struct vfsconf *vfsp; 3273 int error; 3274 3275 error = 0; 3276 vfsconf_slock(); 3277 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3278 #ifdef COMPAT_FREEBSD32 3279 if (req->flags & SCTL_MASK32) 3280 error = vfsconf2x32(req, vfsp); 3281 else 3282 #endif 3283 error = vfsconf2x(req, vfsp); 3284 if (error) 3285 break; 3286 } 3287 vfsconf_sunlock(); 3288 return (error); 3289 } 3290 3291 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 3292 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 3293 "S,xvfsconf", "List of all configured filesystems"); 3294 3295 #ifndef BURN_BRIDGES 3296 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 3297 3298 static int 3299 vfs_sysctl(SYSCTL_HANDLER_ARGS) 3300 { 3301 int *name = (int *)arg1 - 1; /* XXX */ 3302 u_int namelen = arg2 + 1; /* XXX */ 3303 struct vfsconf *vfsp; 3304 3305 log(LOG_WARNING, "userland calling deprecated sysctl, " 3306 "please rebuild world\n"); 3307 3308 #if 1 || defined(COMPAT_PRELITE2) 3309 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 3310 if (namelen == 1) 3311 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 3312 #endif 3313 3314 switch (name[1]) { 3315 case VFS_MAXTYPENUM: 3316 if (namelen != 2) 3317 return (ENOTDIR); 3318 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 3319 case VFS_CONF: 3320 if (namelen != 3) 3321 return (ENOTDIR); /* overloaded */ 3322 vfsconf_slock(); 3323 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3324 if (vfsp->vfc_typenum == name[2]) 3325 break; 3326 } 3327 vfsconf_sunlock(); 3328 if (vfsp == NULL) 3329 return (EOPNOTSUPP); 3330 #ifdef COMPAT_FREEBSD32 3331 if (req->flags & SCTL_MASK32) 3332 return (vfsconf2x32(req, vfsp)); 3333 else 3334 #endif 3335 return (vfsconf2x(req, vfsp)); 3336 } 3337 return (EOPNOTSUPP); 3338 } 3339 3340 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 3341 CTLFLAG_MPSAFE, vfs_sysctl, 3342 "Generic filesystem"); 3343 3344 #if 1 || defined(COMPAT_PRELITE2) 3345 3346 static int 3347 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 3348 { 3349 int error; 3350 struct vfsconf *vfsp; 3351 struct ovfsconf ovfs; 3352 3353 vfsconf_slock(); 3354 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3355 bzero(&ovfs, sizeof(ovfs)); 3356 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 3357 strcpy(ovfs.vfc_name, vfsp->vfc_name); 3358 ovfs.vfc_index = vfsp->vfc_typenum; 3359 ovfs.vfc_refcount = vfsp->vfc_refcount; 3360 ovfs.vfc_flags = vfsp->vfc_flags; 3361 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 3362 if (error != 0) { 3363 vfsconf_sunlock(); 3364 return (error); 3365 } 3366 } 3367 vfsconf_sunlock(); 3368 return (0); 3369 } 3370 3371 #endif /* 1 || COMPAT_PRELITE2 */ 3372 #endif /* !BURN_BRIDGES */ 3373 3374 #define KINFO_VNODESLOP 10 3375 #ifdef notyet 3376 /* 3377 * Dump vnode list (via sysctl). 3378 */ 3379 /* ARGSUSED */ 3380 static int 3381 sysctl_vnode(SYSCTL_HANDLER_ARGS) 3382 { 3383 struct xvnode *xvn; 3384 struct mount *mp; 3385 struct vnode *vp; 3386 int error, len, n; 3387 3388 /* 3389 * Stale numvnodes access is not fatal here. 3390 */ 3391 req->lock = 0; 3392 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 3393 if (!req->oldptr) 3394 /* Make an estimate */ 3395 return (SYSCTL_OUT(req, 0, len)); 3396 3397 error = sysctl_wire_old_buffer(req, 0); 3398 if (error != 0) 3399 return (error); 3400 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 3401 n = 0; 3402 mtx_lock(&mountlist_mtx); 3403 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3404 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 3405 continue; 3406 MNT_ILOCK(mp); 3407 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3408 if (n == len) 3409 break; 3410 vref(vp); 3411 xvn[n].xv_size = sizeof *xvn; 3412 xvn[n].xv_vnode = vp; 3413 xvn[n].xv_id = 0; /* XXX compat */ 3414 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 3415 XV_COPY(usecount); 3416 XV_COPY(writecount); 3417 XV_COPY(holdcnt); 3418 XV_COPY(mount); 3419 XV_COPY(numoutput); 3420 XV_COPY(type); 3421 #undef XV_COPY 3422 xvn[n].xv_flag = vp->v_vflag; 3423 3424 switch (vp->v_type) { 3425 case VREG: 3426 case VDIR: 3427 case VLNK: 3428 break; 3429 case VBLK: 3430 case VCHR: 3431 if (vp->v_rdev == NULL) { 3432 vrele(vp); 3433 continue; 3434 } 3435 xvn[n].xv_dev = dev2udev(vp->v_rdev); 3436 break; 3437 case VSOCK: 3438 xvn[n].xv_socket = vp->v_socket; 3439 break; 3440 case VFIFO: 3441 xvn[n].xv_fifo = vp->v_fifoinfo; 3442 break; 3443 case VNON: 3444 case VBAD: 3445 default: 3446 /* shouldn't happen? */ 3447 vrele(vp); 3448 continue; 3449 } 3450 vrele(vp); 3451 ++n; 3452 } 3453 MNT_IUNLOCK(mp); 3454 mtx_lock(&mountlist_mtx); 3455 vfs_unbusy(mp); 3456 if (n == len) 3457 break; 3458 } 3459 mtx_unlock(&mountlist_mtx); 3460 3461 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 3462 free(xvn, M_TEMP); 3463 return (error); 3464 } 3465 3466 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 3467 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 3468 ""); 3469 #endif 3470 3471 /* 3472 * Unmount all filesystems. The list is traversed in reverse order 3473 * of mounting to avoid dependencies. 3474 */ 3475 void 3476 vfs_unmountall(void) 3477 { 3478 struct mount *mp; 3479 struct thread *td; 3480 int error; 3481 3482 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 3483 td = curthread; 3484 3485 /* 3486 * Since this only runs when rebooting, it is not interlocked. 3487 */ 3488 while(!TAILQ_EMPTY(&mountlist)) { 3489 mp = TAILQ_LAST(&mountlist, mntlist); 3490 error = dounmount(mp, MNT_FORCE, td); 3491 if (error) { 3492 TAILQ_REMOVE(&mountlist, mp, mnt_list); 3493 /* 3494 * XXX: Due to the way in which we mount the root 3495 * file system off of devfs, devfs will generate a 3496 * "busy" warning when we try to unmount it before 3497 * the root. Don't print a warning as a result in 3498 * order to avoid false positive errors that may 3499 * cause needless upset. 3500 */ 3501 if (strcmp(mp->mnt_vfc->vfc_name, "devfs") != 0) { 3502 printf("unmount of %s failed (", 3503 mp->mnt_stat.f_mntonname); 3504 if (error == EBUSY) 3505 printf("BUSY)\n"); 3506 else 3507 printf("%d)\n", error); 3508 } 3509 } else { 3510 /* The unmount has removed mp from the mountlist */ 3511 } 3512 } 3513 } 3514 3515 /* 3516 * perform msync on all vnodes under a mount point 3517 * the mount point must be locked. 3518 */ 3519 void 3520 vfs_msync(struct mount *mp, int flags) 3521 { 3522 struct vnode *vp, *mvp; 3523 struct vm_object *obj; 3524 3525 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 3526 MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { 3527 obj = vp->v_object; 3528 if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0 && 3529 (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) { 3530 if (!vget(vp, 3531 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, 3532 curthread)) { 3533 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */ 3534 vput(vp); 3535 continue; 3536 } 3537 3538 obj = vp->v_object; 3539 if (obj != NULL) { 3540 VM_OBJECT_WLOCK(obj); 3541 vm_object_page_clean(obj, 0, 0, 3542 flags == MNT_WAIT ? 3543 OBJPC_SYNC : OBJPC_NOSYNC); 3544 VM_OBJECT_WUNLOCK(obj); 3545 } 3546 vput(vp); 3547 } 3548 } else 3549 VI_UNLOCK(vp); 3550 } 3551 } 3552 3553 static void 3554 destroy_vpollinfo_free(struct vpollinfo *vi) 3555 { 3556 3557 knlist_destroy(&vi->vpi_selinfo.si_note); 3558 mtx_destroy(&vi->vpi_lock); 3559 uma_zfree(vnodepoll_zone, vi); 3560 } 3561 3562 static void 3563 destroy_vpollinfo(struct vpollinfo *vi) 3564 { 3565 3566 knlist_clear(&vi->vpi_selinfo.si_note, 1); 3567 seldrain(&vi->vpi_selinfo); 3568 destroy_vpollinfo_free(vi); 3569 } 3570 3571 /* 3572 * Initalize per-vnode helper structure to hold poll-related state. 3573 */ 3574 void 3575 v_addpollinfo(struct vnode *vp) 3576 { 3577 struct vpollinfo *vi; 3578 3579 if (vp->v_pollinfo != NULL) 3580 return; 3581 vi = uma_zalloc(vnodepoll_zone, M_WAITOK); 3582 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 3583 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 3584 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 3585 VI_LOCK(vp); 3586 if (vp->v_pollinfo != NULL) { 3587 VI_UNLOCK(vp); 3588 destroy_vpollinfo_free(vi); 3589 return; 3590 } 3591 vp->v_pollinfo = vi; 3592 VI_UNLOCK(vp); 3593 } 3594 3595 /* 3596 * Record a process's interest in events which might happen to 3597 * a vnode. Because poll uses the historic select-style interface 3598 * internally, this routine serves as both the ``check for any 3599 * pending events'' and the ``record my interest in future events'' 3600 * functions. (These are done together, while the lock is held, 3601 * to avoid race conditions.) 3602 */ 3603 int 3604 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 3605 { 3606 3607 v_addpollinfo(vp); 3608 mtx_lock(&vp->v_pollinfo->vpi_lock); 3609 if (vp->v_pollinfo->vpi_revents & events) { 3610 /* 3611 * This leaves events we are not interested 3612 * in available for the other process which 3613 * which presumably had requested them 3614 * (otherwise they would never have been 3615 * recorded). 3616 */ 3617 events &= vp->v_pollinfo->vpi_revents; 3618 vp->v_pollinfo->vpi_revents &= ~events; 3619 3620 mtx_unlock(&vp->v_pollinfo->vpi_lock); 3621 return (events); 3622 } 3623 vp->v_pollinfo->vpi_events |= events; 3624 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 3625 mtx_unlock(&vp->v_pollinfo->vpi_lock); 3626 return (0); 3627 } 3628 3629 /* 3630 * Routine to create and manage a filesystem syncer vnode. 3631 */ 3632 #define sync_close ((int (*)(struct vop_close_args *))nullop) 3633 static int sync_fsync(struct vop_fsync_args *); 3634 static int sync_inactive(struct vop_inactive_args *); 3635 static int sync_reclaim(struct vop_reclaim_args *); 3636 3637 static struct vop_vector sync_vnodeops = { 3638 .vop_bypass = VOP_EOPNOTSUPP, 3639 .vop_close = sync_close, /* close */ 3640 .vop_fsync = sync_fsync, /* fsync */ 3641 .vop_inactive = sync_inactive, /* inactive */ 3642 .vop_reclaim = sync_reclaim, /* reclaim */ 3643 .vop_lock1 = vop_stdlock, /* lock */ 3644 .vop_unlock = vop_stdunlock, /* unlock */ 3645 .vop_islocked = vop_stdislocked, /* islocked */ 3646 }; 3647 3648 /* 3649 * Create a new filesystem syncer vnode for the specified mount point. 3650 */ 3651 void 3652 vfs_allocate_syncvnode(struct mount *mp) 3653 { 3654 struct vnode *vp; 3655 struct bufobj *bo; 3656 static long start, incr, next; 3657 int error; 3658 3659 /* Allocate a new vnode */ 3660 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 3661 if (error != 0) 3662 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 3663 vp->v_type = VNON; 3664 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3665 vp->v_vflag |= VV_FORCEINSMQ; 3666 error = insmntque(vp, mp); 3667 if (error != 0) 3668 panic("vfs_allocate_syncvnode: insmntque() failed"); 3669 vp->v_vflag &= ~VV_FORCEINSMQ; 3670 VOP_UNLOCK(vp, 0); 3671 /* 3672 * Place the vnode onto the syncer worklist. We attempt to 3673 * scatter them about on the list so that they will go off 3674 * at evenly distributed times even if all the filesystems 3675 * are mounted at once. 3676 */ 3677 next += incr; 3678 if (next == 0 || next > syncer_maxdelay) { 3679 start /= 2; 3680 incr /= 2; 3681 if (start == 0) { 3682 start = syncer_maxdelay / 2; 3683 incr = syncer_maxdelay; 3684 } 3685 next = start; 3686 } 3687 bo = &vp->v_bufobj; 3688 BO_LOCK(bo); 3689 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 3690 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 3691 mtx_lock(&sync_mtx); 3692 sync_vnode_count++; 3693 if (mp->mnt_syncer == NULL) { 3694 mp->mnt_syncer = vp; 3695 vp = NULL; 3696 } 3697 mtx_unlock(&sync_mtx); 3698 BO_UNLOCK(bo); 3699 if (vp != NULL) { 3700 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3701 vgone(vp); 3702 vput(vp); 3703 } 3704 } 3705 3706 void 3707 vfs_deallocate_syncvnode(struct mount *mp) 3708 { 3709 struct vnode *vp; 3710 3711 mtx_lock(&sync_mtx); 3712 vp = mp->mnt_syncer; 3713 if (vp != NULL) 3714 mp->mnt_syncer = NULL; 3715 mtx_unlock(&sync_mtx); 3716 if (vp != NULL) 3717 vrele(vp); 3718 } 3719 3720 /* 3721 * Do a lazy sync of the filesystem. 3722 */ 3723 static int 3724 sync_fsync(struct vop_fsync_args *ap) 3725 { 3726 struct vnode *syncvp = ap->a_vp; 3727 struct mount *mp = syncvp->v_mount; 3728 int error, save; 3729 struct bufobj *bo; 3730 3731 /* 3732 * We only need to do something if this is a lazy evaluation. 3733 */ 3734 if (ap->a_waitfor != MNT_LAZY) 3735 return (0); 3736 3737 /* 3738 * Move ourselves to the back of the sync list. 3739 */ 3740 bo = &syncvp->v_bufobj; 3741 BO_LOCK(bo); 3742 vn_syncer_add_to_worklist(bo, syncdelay); 3743 BO_UNLOCK(bo); 3744 3745 /* 3746 * Walk the list of vnodes pushing all that are dirty and 3747 * not already on the sync list. 3748 */ 3749 if (vfs_busy(mp, MBF_NOWAIT) != 0) 3750 return (0); 3751 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 3752 vfs_unbusy(mp); 3753 return (0); 3754 } 3755 save = curthread_pflags_set(TDP_SYNCIO); 3756 vfs_msync(mp, MNT_NOWAIT); 3757 error = VFS_SYNC(mp, MNT_LAZY); 3758 curthread_pflags_restore(save); 3759 vn_finished_write(mp); 3760 vfs_unbusy(mp); 3761 return (error); 3762 } 3763 3764 /* 3765 * The syncer vnode is no referenced. 3766 */ 3767 static int 3768 sync_inactive(struct vop_inactive_args *ap) 3769 { 3770 3771 vgone(ap->a_vp); 3772 return (0); 3773 } 3774 3775 /* 3776 * The syncer vnode is no longer needed and is being decommissioned. 3777 * 3778 * Modifications to the worklist must be protected by sync_mtx. 3779 */ 3780 static int 3781 sync_reclaim(struct vop_reclaim_args *ap) 3782 { 3783 struct vnode *vp = ap->a_vp; 3784 struct bufobj *bo; 3785 3786 bo = &vp->v_bufobj; 3787 BO_LOCK(bo); 3788 mtx_lock(&sync_mtx); 3789 if (vp->v_mount->mnt_syncer == vp) 3790 vp->v_mount->mnt_syncer = NULL; 3791 if (bo->bo_flag & BO_ONWORKLST) { 3792 LIST_REMOVE(bo, bo_synclist); 3793 syncer_worklist_len--; 3794 sync_vnode_count--; 3795 bo->bo_flag &= ~BO_ONWORKLST; 3796 } 3797 mtx_unlock(&sync_mtx); 3798 BO_UNLOCK(bo); 3799 3800 return (0); 3801 } 3802 3803 /* 3804 * Check if vnode represents a disk device 3805 */ 3806 int 3807 vn_isdisk(struct vnode *vp, int *errp) 3808 { 3809 int error; 3810 3811 if (vp->v_type != VCHR) { 3812 error = ENOTBLK; 3813 goto out; 3814 } 3815 error = 0; 3816 dev_lock(); 3817 if (vp->v_rdev == NULL) 3818 error = ENXIO; 3819 else if (vp->v_rdev->si_devsw == NULL) 3820 error = ENXIO; 3821 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 3822 error = ENOTBLK; 3823 dev_unlock(); 3824 out: 3825 if (errp != NULL) 3826 *errp = error; 3827 return (error == 0); 3828 } 3829 3830 /* 3831 * Common filesystem object access control check routine. Accepts a 3832 * vnode's type, "mode", uid and gid, requested access mode, credentials, 3833 * and optional call-by-reference privused argument allowing vaccess() 3834 * to indicate to the caller whether privilege was used to satisfy the 3835 * request (obsoleted). Returns 0 on success, or an errno on failure. 3836 */ 3837 int 3838 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 3839 accmode_t accmode, struct ucred *cred, int *privused) 3840 { 3841 accmode_t dac_granted; 3842 accmode_t priv_granted; 3843 3844 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 3845 ("invalid bit in accmode")); 3846 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 3847 ("VAPPEND without VWRITE")); 3848 3849 /* 3850 * Look for a normal, non-privileged way to access the file/directory 3851 * as requested. If it exists, go with that. 3852 */ 3853 3854 if (privused != NULL) 3855 *privused = 0; 3856 3857 dac_granted = 0; 3858 3859 /* Check the owner. */ 3860 if (cred->cr_uid == file_uid) { 3861 dac_granted |= VADMIN; 3862 if (file_mode & S_IXUSR) 3863 dac_granted |= VEXEC; 3864 if (file_mode & S_IRUSR) 3865 dac_granted |= VREAD; 3866 if (file_mode & S_IWUSR) 3867 dac_granted |= (VWRITE | VAPPEND); 3868 3869 if ((accmode & dac_granted) == accmode) 3870 return (0); 3871 3872 goto privcheck; 3873 } 3874 3875 /* Otherwise, check the groups (first match) */ 3876 if (groupmember(file_gid, cred)) { 3877 if (file_mode & S_IXGRP) 3878 dac_granted |= VEXEC; 3879 if (file_mode & S_IRGRP) 3880 dac_granted |= VREAD; 3881 if (file_mode & S_IWGRP) 3882 dac_granted |= (VWRITE | VAPPEND); 3883 3884 if ((accmode & dac_granted) == accmode) 3885 return (0); 3886 3887 goto privcheck; 3888 } 3889 3890 /* Otherwise, check everyone else. */ 3891 if (file_mode & S_IXOTH) 3892 dac_granted |= VEXEC; 3893 if (file_mode & S_IROTH) 3894 dac_granted |= VREAD; 3895 if (file_mode & S_IWOTH) 3896 dac_granted |= (VWRITE | VAPPEND); 3897 if ((accmode & dac_granted) == accmode) 3898 return (0); 3899 3900 privcheck: 3901 /* 3902 * Build a privilege mask to determine if the set of privileges 3903 * satisfies the requirements when combined with the granted mask 3904 * from above. For each privilege, if the privilege is required, 3905 * bitwise or the request type onto the priv_granted mask. 3906 */ 3907 priv_granted = 0; 3908 3909 if (type == VDIR) { 3910 /* 3911 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 3912 * requests, instead of PRIV_VFS_EXEC. 3913 */ 3914 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3915 !priv_check_cred(cred, PRIV_VFS_LOOKUP, 0)) 3916 priv_granted |= VEXEC; 3917 } else { 3918 /* 3919 * Ensure that at least one execute bit is on. Otherwise, 3920 * a privileged user will always succeed, and we don't want 3921 * this to happen unless the file really is executable. 3922 */ 3923 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3924 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 3925 !priv_check_cred(cred, PRIV_VFS_EXEC, 0)) 3926 priv_granted |= VEXEC; 3927 } 3928 3929 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 3930 !priv_check_cred(cred, PRIV_VFS_READ, 0)) 3931 priv_granted |= VREAD; 3932 3933 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 3934 !priv_check_cred(cred, PRIV_VFS_WRITE, 0)) 3935 priv_granted |= (VWRITE | VAPPEND); 3936 3937 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 3938 !priv_check_cred(cred, PRIV_VFS_ADMIN, 0)) 3939 priv_granted |= VADMIN; 3940 3941 if ((accmode & (priv_granted | dac_granted)) == accmode) { 3942 /* XXX audit: privilege used */ 3943 if (privused != NULL) 3944 *privused = 1; 3945 return (0); 3946 } 3947 3948 return ((accmode & VADMIN) ? EPERM : EACCES); 3949 } 3950 3951 /* 3952 * Credential check based on process requesting service, and per-attribute 3953 * permissions. 3954 */ 3955 int 3956 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 3957 struct thread *td, accmode_t accmode) 3958 { 3959 3960 /* 3961 * Kernel-invoked always succeeds. 3962 */ 3963 if (cred == NOCRED) 3964 return (0); 3965 3966 /* 3967 * Do not allow privileged processes in jail to directly manipulate 3968 * system attributes. 3969 */ 3970 switch (attrnamespace) { 3971 case EXTATTR_NAMESPACE_SYSTEM: 3972 /* Potentially should be: return (EPERM); */ 3973 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM, 0)); 3974 case EXTATTR_NAMESPACE_USER: 3975 return (VOP_ACCESS(vp, accmode, cred, td)); 3976 default: 3977 return (EPERM); 3978 } 3979 } 3980 3981 #ifdef DEBUG_VFS_LOCKS 3982 /* 3983 * This only exists to supress warnings from unlocked specfs accesses. It is 3984 * no longer ok to have an unlocked VFS. 3985 */ 3986 #define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \ 3987 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 3988 3989 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 3990 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 3991 "Drop into debugger on lock violation"); 3992 3993 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 3994 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 3995 0, "Check for interlock across VOPs"); 3996 3997 int vfs_badlock_print = 1; /* Print lock violations. */ 3998 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 3999 0, "Print lock violations"); 4000 4001 #ifdef KDB 4002 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 4003 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 4004 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 4005 #endif 4006 4007 static void 4008 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 4009 { 4010 4011 #ifdef KDB 4012 if (vfs_badlock_backtrace) 4013 kdb_backtrace(); 4014 #endif 4015 if (vfs_badlock_print) 4016 printf("%s: %p %s\n", str, (void *)vp, msg); 4017 if (vfs_badlock_ddb) 4018 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 4019 } 4020 4021 void 4022 assert_vi_locked(struct vnode *vp, const char *str) 4023 { 4024 4025 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 4026 vfs_badlock("interlock is not locked but should be", str, vp); 4027 } 4028 4029 void 4030 assert_vi_unlocked(struct vnode *vp, const char *str) 4031 { 4032 4033 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 4034 vfs_badlock("interlock is locked but should not be", str, vp); 4035 } 4036 4037 void 4038 assert_vop_locked(struct vnode *vp, const char *str) 4039 { 4040 int locked; 4041 4042 if (!IGNORE_LOCK(vp)) { 4043 locked = VOP_ISLOCKED(vp); 4044 if (locked == 0 || locked == LK_EXCLOTHER) 4045 vfs_badlock("is not locked but should be", str, vp); 4046 } 4047 } 4048 4049 void 4050 assert_vop_unlocked(struct vnode *vp, const char *str) 4051 { 4052 4053 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 4054 vfs_badlock("is locked but should not be", str, vp); 4055 } 4056 4057 void 4058 assert_vop_elocked(struct vnode *vp, const char *str) 4059 { 4060 4061 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 4062 vfs_badlock("is not exclusive locked but should be", str, vp); 4063 } 4064 4065 #if 0 4066 void 4067 assert_vop_elocked_other(struct vnode *vp, const char *str) 4068 { 4069 4070 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLOTHER) 4071 vfs_badlock("is not exclusive locked by another thread", 4072 str, vp); 4073 } 4074 4075 void 4076 assert_vop_slocked(struct vnode *vp, const char *str) 4077 { 4078 4079 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_SHARED) 4080 vfs_badlock("is not locked shared but should be", str, vp); 4081 } 4082 #endif /* 0 */ 4083 #endif /* DEBUG_VFS_LOCKS */ 4084 4085 void 4086 vop_rename_fail(struct vop_rename_args *ap) 4087 { 4088 4089 if (ap->a_tvp != NULL) 4090 vput(ap->a_tvp); 4091 if (ap->a_tdvp == ap->a_tvp) 4092 vrele(ap->a_tdvp); 4093 else 4094 vput(ap->a_tdvp); 4095 vrele(ap->a_fdvp); 4096 vrele(ap->a_fvp); 4097 } 4098 4099 void 4100 vop_rename_pre(void *ap) 4101 { 4102 struct vop_rename_args *a = ap; 4103 4104 #ifdef DEBUG_VFS_LOCKS 4105 if (a->a_tvp) 4106 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 4107 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 4108 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 4109 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 4110 4111 /* Check the source (from). */ 4112 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 4113 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 4114 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 4115 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 4116 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 4117 4118 /* Check the target. */ 4119 if (a->a_tvp) 4120 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 4121 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 4122 #endif 4123 if (a->a_tdvp != a->a_fdvp) 4124 vhold(a->a_fdvp); 4125 if (a->a_tvp != a->a_fvp) 4126 vhold(a->a_fvp); 4127 vhold(a->a_tdvp); 4128 if (a->a_tvp) 4129 vhold(a->a_tvp); 4130 } 4131 4132 void 4133 vop_strategy_pre(void *ap) 4134 { 4135 #ifdef DEBUG_VFS_LOCKS 4136 struct vop_strategy_args *a; 4137 struct buf *bp; 4138 4139 a = ap; 4140 bp = a->a_bp; 4141 4142 /* 4143 * Cluster ops lock their component buffers but not the IO container. 4144 */ 4145 if ((bp->b_flags & B_CLUSTER) != 0) 4146 return; 4147 4148 if (panicstr == NULL && !BUF_ISLOCKED(bp)) { 4149 if (vfs_badlock_print) 4150 printf( 4151 "VOP_STRATEGY: bp is not locked but should be\n"); 4152 if (vfs_badlock_ddb) 4153 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 4154 } 4155 #endif 4156 } 4157 4158 void 4159 vop_lock_pre(void *ap) 4160 { 4161 #ifdef DEBUG_VFS_LOCKS 4162 struct vop_lock1_args *a = ap; 4163 4164 if ((a->a_flags & LK_INTERLOCK) == 0) 4165 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 4166 else 4167 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 4168 #endif 4169 } 4170 4171 void 4172 vop_lock_post(void *ap, int rc) 4173 { 4174 #ifdef DEBUG_VFS_LOCKS 4175 struct vop_lock1_args *a = ap; 4176 4177 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 4178 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 4179 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 4180 #endif 4181 } 4182 4183 void 4184 vop_unlock_pre(void *ap) 4185 { 4186 #ifdef DEBUG_VFS_LOCKS 4187 struct vop_unlock_args *a = ap; 4188 4189 if (a->a_flags & LK_INTERLOCK) 4190 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK"); 4191 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 4192 #endif 4193 } 4194 4195 void 4196 vop_unlock_post(void *ap, int rc) 4197 { 4198 #ifdef DEBUG_VFS_LOCKS 4199 struct vop_unlock_args *a = ap; 4200 4201 if (a->a_flags & LK_INTERLOCK) 4202 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK"); 4203 #endif 4204 } 4205 4206 void 4207 vop_create_post(void *ap, int rc) 4208 { 4209 struct vop_create_args *a = ap; 4210 4211 if (!rc) 4212 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4213 } 4214 4215 void 4216 vop_deleteextattr_post(void *ap, int rc) 4217 { 4218 struct vop_deleteextattr_args *a = ap; 4219 4220 if (!rc) 4221 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4222 } 4223 4224 void 4225 vop_link_post(void *ap, int rc) 4226 { 4227 struct vop_link_args *a = ap; 4228 4229 if (!rc) { 4230 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); 4231 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE); 4232 } 4233 } 4234 4235 void 4236 vop_mkdir_post(void *ap, int rc) 4237 { 4238 struct vop_mkdir_args *a = ap; 4239 4240 if (!rc) 4241 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 4242 } 4243 4244 void 4245 vop_mknod_post(void *ap, int rc) 4246 { 4247 struct vop_mknod_args *a = ap; 4248 4249 if (!rc) 4250 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4251 } 4252 4253 void 4254 vop_remove_post(void *ap, int rc) 4255 { 4256 struct vop_remove_args *a = ap; 4257 4258 if (!rc) { 4259 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4260 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 4261 } 4262 } 4263 4264 void 4265 vop_rename_post(void *ap, int rc) 4266 { 4267 struct vop_rename_args *a = ap; 4268 4269 if (!rc) { 4270 VFS_KNOTE_UNLOCKED(a->a_fdvp, NOTE_WRITE); 4271 VFS_KNOTE_UNLOCKED(a->a_tdvp, NOTE_WRITE); 4272 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 4273 if (a->a_tvp) 4274 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 4275 } 4276 if (a->a_tdvp != a->a_fdvp) 4277 vdrop(a->a_fdvp); 4278 if (a->a_tvp != a->a_fvp) 4279 vdrop(a->a_fvp); 4280 vdrop(a->a_tdvp); 4281 if (a->a_tvp) 4282 vdrop(a->a_tvp); 4283 } 4284 4285 void 4286 vop_rmdir_post(void *ap, int rc) 4287 { 4288 struct vop_rmdir_args *a = ap; 4289 4290 if (!rc) { 4291 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 4292 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 4293 } 4294 } 4295 4296 void 4297 vop_setattr_post(void *ap, int rc) 4298 { 4299 struct vop_setattr_args *a = ap; 4300 4301 if (!rc) 4302 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4303 } 4304 4305 void 4306 vop_setextattr_post(void *ap, int rc) 4307 { 4308 struct vop_setextattr_args *a = ap; 4309 4310 if (!rc) 4311 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4312 } 4313 4314 void 4315 vop_symlink_post(void *ap, int rc) 4316 { 4317 struct vop_symlink_args *a = ap; 4318 4319 if (!rc) 4320 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4321 } 4322 4323 static struct knlist fs_knlist; 4324 4325 static void 4326 vfs_event_init(void *arg) 4327 { 4328 knlist_init_mtx(&fs_knlist, NULL); 4329 } 4330 /* XXX - correct order? */ 4331 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 4332 4333 void 4334 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 4335 { 4336 4337 KNOTE_UNLOCKED(&fs_knlist, event); 4338 } 4339 4340 static int filt_fsattach(struct knote *kn); 4341 static void filt_fsdetach(struct knote *kn); 4342 static int filt_fsevent(struct knote *kn, long hint); 4343 4344 struct filterops fs_filtops = { 4345 .f_isfd = 0, 4346 .f_attach = filt_fsattach, 4347 .f_detach = filt_fsdetach, 4348 .f_event = filt_fsevent 4349 }; 4350 4351 static int 4352 filt_fsattach(struct knote *kn) 4353 { 4354 4355 kn->kn_flags |= EV_CLEAR; 4356 knlist_add(&fs_knlist, kn, 0); 4357 return (0); 4358 } 4359 4360 static void 4361 filt_fsdetach(struct knote *kn) 4362 { 4363 4364 knlist_remove(&fs_knlist, kn, 0); 4365 } 4366 4367 static int 4368 filt_fsevent(struct knote *kn, long hint) 4369 { 4370 4371 kn->kn_fflags |= hint; 4372 return (kn->kn_fflags != 0); 4373 } 4374 4375 static int 4376 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 4377 { 4378 struct vfsidctl vc; 4379 int error; 4380 struct mount *mp; 4381 4382 error = SYSCTL_IN(req, &vc, sizeof(vc)); 4383 if (error) 4384 return (error); 4385 if (vc.vc_vers != VFS_CTL_VERS1) 4386 return (EINVAL); 4387 mp = vfs_getvfs(&vc.vc_fsid); 4388 if (mp == NULL) 4389 return (ENOENT); 4390 /* ensure that a specific sysctl goes to the right filesystem. */ 4391 if (strcmp(vc.vc_fstypename, "*") != 0 && 4392 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 4393 vfs_rel(mp); 4394 return (EINVAL); 4395 } 4396 VCTLTOREQ(&vc, req); 4397 error = VFS_SYSCTL(mp, vc.vc_op, req); 4398 vfs_rel(mp); 4399 return (error); 4400 } 4401 4402 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_WR, 4403 NULL, 0, sysctl_vfs_ctl, "", 4404 "Sysctl by fsid"); 4405 4406 /* 4407 * Function to initialize a va_filerev field sensibly. 4408 * XXX: Wouldn't a random number make a lot more sense ?? 4409 */ 4410 u_quad_t 4411 init_va_filerev(void) 4412 { 4413 struct bintime bt; 4414 4415 getbinuptime(&bt); 4416 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 4417 } 4418 4419 static int filt_vfsread(struct knote *kn, long hint); 4420 static int filt_vfswrite(struct knote *kn, long hint); 4421 static int filt_vfsvnode(struct knote *kn, long hint); 4422 static void filt_vfsdetach(struct knote *kn); 4423 static struct filterops vfsread_filtops = { 4424 .f_isfd = 1, 4425 .f_detach = filt_vfsdetach, 4426 .f_event = filt_vfsread 4427 }; 4428 static struct filterops vfswrite_filtops = { 4429 .f_isfd = 1, 4430 .f_detach = filt_vfsdetach, 4431 .f_event = filt_vfswrite 4432 }; 4433 static struct filterops vfsvnode_filtops = { 4434 .f_isfd = 1, 4435 .f_detach = filt_vfsdetach, 4436 .f_event = filt_vfsvnode 4437 }; 4438 4439 static void 4440 vfs_knllock(void *arg) 4441 { 4442 struct vnode *vp = arg; 4443 4444 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4445 } 4446 4447 static void 4448 vfs_knlunlock(void *arg) 4449 { 4450 struct vnode *vp = arg; 4451 4452 VOP_UNLOCK(vp, 0); 4453 } 4454 4455 static void 4456 vfs_knl_assert_locked(void *arg) 4457 { 4458 #ifdef DEBUG_VFS_LOCKS 4459 struct vnode *vp = arg; 4460 4461 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 4462 #endif 4463 } 4464 4465 static void 4466 vfs_knl_assert_unlocked(void *arg) 4467 { 4468 #ifdef DEBUG_VFS_LOCKS 4469 struct vnode *vp = arg; 4470 4471 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 4472 #endif 4473 } 4474 4475 int 4476 vfs_kqfilter(struct vop_kqfilter_args *ap) 4477 { 4478 struct vnode *vp = ap->a_vp; 4479 struct knote *kn = ap->a_kn; 4480 struct knlist *knl; 4481 4482 switch (kn->kn_filter) { 4483 case EVFILT_READ: 4484 kn->kn_fop = &vfsread_filtops; 4485 break; 4486 case EVFILT_WRITE: 4487 kn->kn_fop = &vfswrite_filtops; 4488 break; 4489 case EVFILT_VNODE: 4490 kn->kn_fop = &vfsvnode_filtops; 4491 break; 4492 default: 4493 return (EINVAL); 4494 } 4495 4496 kn->kn_hook = (caddr_t)vp; 4497 4498 v_addpollinfo(vp); 4499 if (vp->v_pollinfo == NULL) 4500 return (ENOMEM); 4501 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 4502 vhold(vp); 4503 knlist_add(knl, kn, 0); 4504 4505 return (0); 4506 } 4507 4508 /* 4509 * Detach knote from vnode 4510 */ 4511 static void 4512 filt_vfsdetach(struct knote *kn) 4513 { 4514 struct vnode *vp = (struct vnode *)kn->kn_hook; 4515 4516 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 4517 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 4518 vdrop(vp); 4519 } 4520 4521 /*ARGSUSED*/ 4522 static int 4523 filt_vfsread(struct knote *kn, long hint) 4524 { 4525 struct vnode *vp = (struct vnode *)kn->kn_hook; 4526 struct vattr va; 4527 int res; 4528 4529 /* 4530 * filesystem is gone, so set the EOF flag and schedule 4531 * the knote for deletion. 4532 */ 4533 if (hint == NOTE_REVOKE) { 4534 VI_LOCK(vp); 4535 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 4536 VI_UNLOCK(vp); 4537 return (1); 4538 } 4539 4540 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 4541 return (0); 4542 4543 VI_LOCK(vp); 4544 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 4545 res = (kn->kn_data != 0); 4546 VI_UNLOCK(vp); 4547 return (res); 4548 } 4549 4550 /*ARGSUSED*/ 4551 static int 4552 filt_vfswrite(struct knote *kn, long hint) 4553 { 4554 struct vnode *vp = (struct vnode *)kn->kn_hook; 4555 4556 VI_LOCK(vp); 4557 4558 /* 4559 * filesystem is gone, so set the EOF flag and schedule 4560 * the knote for deletion. 4561 */ 4562 if (hint == NOTE_REVOKE) 4563 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 4564 4565 kn->kn_data = 0; 4566 VI_UNLOCK(vp); 4567 return (1); 4568 } 4569 4570 static int 4571 filt_vfsvnode(struct knote *kn, long hint) 4572 { 4573 struct vnode *vp = (struct vnode *)kn->kn_hook; 4574 int res; 4575 4576 VI_LOCK(vp); 4577 if (kn->kn_sfflags & hint) 4578 kn->kn_fflags |= hint; 4579 if (hint == NOTE_REVOKE) { 4580 kn->kn_flags |= EV_EOF; 4581 VI_UNLOCK(vp); 4582 return (1); 4583 } 4584 res = (kn->kn_fflags != 0); 4585 VI_UNLOCK(vp); 4586 return (res); 4587 } 4588 4589 int 4590 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 4591 { 4592 int error; 4593 4594 if (dp->d_reclen > ap->a_uio->uio_resid) 4595 return (ENAMETOOLONG); 4596 error = uiomove(dp, dp->d_reclen, ap->a_uio); 4597 if (error) { 4598 if (ap->a_ncookies != NULL) { 4599 if (ap->a_cookies != NULL) 4600 free(ap->a_cookies, M_TEMP); 4601 ap->a_cookies = NULL; 4602 *ap->a_ncookies = 0; 4603 } 4604 return (error); 4605 } 4606 if (ap->a_ncookies == NULL) 4607 return (0); 4608 4609 KASSERT(ap->a_cookies, 4610 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 4611 4612 *ap->a_cookies = realloc(*ap->a_cookies, 4613 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 4614 (*ap->a_cookies)[*ap->a_ncookies] = off; 4615 return (0); 4616 } 4617 4618 /* 4619 * Mark for update the access time of the file if the filesystem 4620 * supports VOP_MARKATIME. This functionality is used by execve and 4621 * mmap, so we want to avoid the I/O implied by directly setting 4622 * va_atime for the sake of efficiency. 4623 */ 4624 void 4625 vfs_mark_atime(struct vnode *vp, struct ucred *cred) 4626 { 4627 struct mount *mp; 4628 4629 mp = vp->v_mount; 4630 ASSERT_VOP_LOCKED(vp, "vfs_mark_atime"); 4631 if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) 4632 (void)VOP_MARKATIME(vp); 4633 } 4634 4635 /* 4636 * The purpose of this routine is to remove granularity from accmode_t, 4637 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 4638 * VADMIN and VAPPEND. 4639 * 4640 * If it returns 0, the caller is supposed to continue with the usual 4641 * access checks using 'accmode' as modified by this routine. If it 4642 * returns nonzero value, the caller is supposed to return that value 4643 * as errno. 4644 * 4645 * Note that after this routine runs, accmode may be zero. 4646 */ 4647 int 4648 vfs_unixify_accmode(accmode_t *accmode) 4649 { 4650 /* 4651 * There is no way to specify explicit "deny" rule using 4652 * file mode or POSIX.1e ACLs. 4653 */ 4654 if (*accmode & VEXPLICIT_DENY) { 4655 *accmode = 0; 4656 return (0); 4657 } 4658 4659 /* 4660 * None of these can be translated into usual access bits. 4661 * Also, the common case for NFSv4 ACLs is to not contain 4662 * either of these bits. Caller should check for VWRITE 4663 * on the containing directory instead. 4664 */ 4665 if (*accmode & (VDELETE_CHILD | VDELETE)) 4666 return (EPERM); 4667 4668 if (*accmode & VADMIN_PERMS) { 4669 *accmode &= ~VADMIN_PERMS; 4670 *accmode |= VADMIN; 4671 } 4672 4673 /* 4674 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 4675 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 4676 */ 4677 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 4678 4679 return (0); 4680 } 4681 4682 /* 4683 * These are helper functions for filesystems to traverse all 4684 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 4685 * 4686 * This interface replaces MNT_VNODE_FOREACH. 4687 */ 4688 4689 MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 4690 4691 struct vnode * 4692 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 4693 { 4694 struct vnode *vp; 4695 4696 if (should_yield()) 4697 kern_yield(PRI_USER); 4698 MNT_ILOCK(mp); 4699 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 4700 vp = TAILQ_NEXT(*mvp, v_nmntvnodes); 4701 while (vp != NULL && (vp->v_type == VMARKER || 4702 (vp->v_iflag & VI_DOOMED) != 0)) 4703 vp = TAILQ_NEXT(vp, v_nmntvnodes); 4704 4705 /* Check if we are done */ 4706 if (vp == NULL) { 4707 __mnt_vnode_markerfree_all(mvp, mp); 4708 /* MNT_IUNLOCK(mp); -- done in above function */ 4709 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 4710 return (NULL); 4711 } 4712 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 4713 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 4714 VI_LOCK(vp); 4715 MNT_IUNLOCK(mp); 4716 return (vp); 4717 } 4718 4719 struct vnode * 4720 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 4721 { 4722 struct vnode *vp; 4723 4724 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 4725 MNT_ILOCK(mp); 4726 MNT_REF(mp); 4727 (*mvp)->v_type = VMARKER; 4728 4729 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 4730 while (vp != NULL && (vp->v_type == VMARKER || 4731 (vp->v_iflag & VI_DOOMED) != 0)) 4732 vp = TAILQ_NEXT(vp, v_nmntvnodes); 4733 4734 /* Check if we are done */ 4735 if (vp == NULL) { 4736 MNT_REL(mp); 4737 MNT_IUNLOCK(mp); 4738 free(*mvp, M_VNODE_MARKER); 4739 *mvp = NULL; 4740 return (NULL); 4741 } 4742 (*mvp)->v_mount = mp; 4743 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 4744 VI_LOCK(vp); 4745 MNT_IUNLOCK(mp); 4746 return (vp); 4747 } 4748 4749 4750 void 4751 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 4752 { 4753 4754 if (*mvp == NULL) { 4755 MNT_IUNLOCK(mp); 4756 return; 4757 } 4758 4759 mtx_assert(MNT_MTX(mp), MA_OWNED); 4760 4761 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 4762 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 4763 MNT_REL(mp); 4764 MNT_IUNLOCK(mp); 4765 free(*mvp, M_VNODE_MARKER); 4766 *mvp = NULL; 4767 } 4768 4769 /* 4770 * These are helper functions for filesystems to traverse their 4771 * active vnodes. See MNT_VNODE_FOREACH_ACTIVE() in sys/mount.h 4772 */ 4773 static void 4774 mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 4775 { 4776 4777 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 4778 4779 MNT_ILOCK(mp); 4780 MNT_REL(mp); 4781 MNT_IUNLOCK(mp); 4782 free(*mvp, M_VNODE_MARKER); 4783 *mvp = NULL; 4784 } 4785 4786 static struct vnode * 4787 mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 4788 { 4789 struct vnode *vp, *nvp; 4790 4791 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 4792 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 4793 restart: 4794 vp = TAILQ_NEXT(*mvp, v_actfreelist); 4795 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 4796 while (vp != NULL) { 4797 if (vp->v_type == VMARKER) { 4798 vp = TAILQ_NEXT(vp, v_actfreelist); 4799 continue; 4800 } 4801 if (!VI_TRYLOCK(vp)) { 4802 if (mp_ncpus == 1 || should_yield()) { 4803 TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist); 4804 mtx_unlock(&vnode_free_list_mtx); 4805 pause("vnacti", 1); 4806 mtx_lock(&vnode_free_list_mtx); 4807 goto restart; 4808 } 4809 continue; 4810 } 4811 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 4812 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 4813 ("alien vnode on the active list %p %p", vp, mp)); 4814 if (vp->v_mount == mp && (vp->v_iflag & VI_DOOMED) == 0) 4815 break; 4816 nvp = TAILQ_NEXT(vp, v_actfreelist); 4817 VI_UNLOCK(vp); 4818 vp = nvp; 4819 } 4820 4821 /* Check if we are done */ 4822 if (vp == NULL) { 4823 mtx_unlock(&vnode_free_list_mtx); 4824 mnt_vnode_markerfree_active(mvp, mp); 4825 return (NULL); 4826 } 4827 TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist); 4828 mtx_unlock(&vnode_free_list_mtx); 4829 ASSERT_VI_LOCKED(vp, "active iter"); 4830 KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp)); 4831 return (vp); 4832 } 4833 4834 struct vnode * 4835 __mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 4836 { 4837 4838 if (should_yield()) 4839 kern_yield(PRI_USER); 4840 mtx_lock(&vnode_free_list_mtx); 4841 return (mnt_vnode_next_active(mvp, mp)); 4842 } 4843 4844 struct vnode * 4845 __mnt_vnode_first_active(struct vnode **mvp, struct mount *mp) 4846 { 4847 struct vnode *vp; 4848 4849 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 4850 MNT_ILOCK(mp); 4851 MNT_REF(mp); 4852 MNT_IUNLOCK(mp); 4853 (*mvp)->v_type = VMARKER; 4854 (*mvp)->v_mount = mp; 4855 4856 mtx_lock(&vnode_free_list_mtx); 4857 vp = TAILQ_FIRST(&mp->mnt_activevnodelist); 4858 if (vp == NULL) { 4859 mtx_unlock(&vnode_free_list_mtx); 4860 mnt_vnode_markerfree_active(mvp, mp); 4861 return (NULL); 4862 } 4863 TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist); 4864 return (mnt_vnode_next_active(mvp, mp)); 4865 } 4866 4867 void 4868 __mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 4869 { 4870 4871 if (*mvp == NULL) 4872 return; 4873 4874 mtx_lock(&vnode_free_list_mtx); 4875 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 4876 mtx_unlock(&vnode_free_list_mtx); 4877 mnt_vnode_markerfree_active(mvp, mp); 4878 } 4879