1 /*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 35 */ 36 37 /* 38 * External virtual filesystem routines 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 #include "opt_compat.h" 45 #include "opt_ddb.h" 46 #include "opt_watchdog.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/bio.h> 51 #include <sys/buf.h> 52 #include <sys/condvar.h> 53 #include <sys/conf.h> 54 #include <sys/dirent.h> 55 #include <sys/event.h> 56 #include <sys/eventhandler.h> 57 #include <sys/extattr.h> 58 #include <sys/file.h> 59 #include <sys/fcntl.h> 60 #include <sys/jail.h> 61 #include <sys/kdb.h> 62 #include <sys/kernel.h> 63 #include <sys/kthread.h> 64 #include <sys/lockf.h> 65 #include <sys/malloc.h> 66 #include <sys/mount.h> 67 #include <sys/namei.h> 68 #include <sys/pctrie.h> 69 #include <sys/priv.h> 70 #include <sys/reboot.h> 71 #include <sys/rwlock.h> 72 #include <sys/sched.h> 73 #include <sys/sleepqueue.h> 74 #include <sys/smp.h> 75 #include <sys/stat.h> 76 #include <sys/sysctl.h> 77 #include <sys/syslog.h> 78 #include <sys/vmmeter.h> 79 #include <sys/vnode.h> 80 #include <sys/watchdog.h> 81 82 #include <machine/stdarg.h> 83 84 #include <security/mac/mac_framework.h> 85 86 #include <vm/vm.h> 87 #include <vm/vm_object.h> 88 #include <vm/vm_extern.h> 89 #include <vm/pmap.h> 90 #include <vm/vm_map.h> 91 #include <vm/vm_page.h> 92 #include <vm/vm_kern.h> 93 #include <vm/uma.h> 94 95 #ifdef DDB 96 #include <ddb/ddb.h> 97 #endif 98 99 static void delmntque(struct vnode *vp); 100 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 101 int slpflag, int slptimeo); 102 static void syncer_shutdown(void *arg, int howto); 103 static int vtryrecycle(struct vnode *vp); 104 static void v_incr_usecount(struct vnode *); 105 static void v_decr_usecount(struct vnode *); 106 static void v_decr_useonly(struct vnode *); 107 static void v_upgrade_usecount(struct vnode *); 108 static void vnlru_free(int); 109 static void vgonel(struct vnode *); 110 static void vfs_knllock(void *arg); 111 static void vfs_knlunlock(void *arg); 112 static void vfs_knl_assert_locked(void *arg); 113 static void vfs_knl_assert_unlocked(void *arg); 114 static void destroy_vpollinfo(struct vpollinfo *vi); 115 116 /* 117 * Number of vnodes in existence. Increased whenever getnewvnode() 118 * allocates a new vnode, decreased in vdropl() for VI_DOOMED vnode. 119 */ 120 static unsigned long numvnodes; 121 122 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 123 "Number of vnodes in existence"); 124 125 /* 126 * Conversion tables for conversion from vnode types to inode formats 127 * and back. 128 */ 129 enum vtype iftovt_tab[16] = { 130 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 131 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 132 }; 133 int vttoif_tab[10] = { 134 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 135 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 136 }; 137 138 /* 139 * List of vnodes that are ready for recycling. 140 */ 141 static TAILQ_HEAD(freelst, vnode) vnode_free_list; 142 143 /* 144 * Free vnode target. Free vnodes may simply be files which have been stat'd 145 * but not read. This is somewhat common, and a small cache of such files 146 * should be kept to avoid recreation costs. 147 */ 148 static u_long wantfreevnodes; 149 SYSCTL_ULONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, ""); 150 /* Number of vnodes in the free list. */ 151 static u_long freevnodes; 152 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, 153 "Number of vnodes in the free list"); 154 155 static int vlru_allow_cache_src; 156 SYSCTL_INT(_vfs, OID_AUTO, vlru_allow_cache_src, CTLFLAG_RW, 157 &vlru_allow_cache_src, 0, "Allow vlru to reclaim source vnode"); 158 159 /* 160 * Various variables used for debugging the new implementation of 161 * reassignbuf(). 162 * XXX these are probably of (very) limited utility now. 163 */ 164 static int reassignbufcalls; 165 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, 166 "Number of calls to reassignbuf"); 167 168 /* 169 * Cache for the mount type id assigned to NFS. This is used for 170 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c. 171 */ 172 int nfs_mount_type = -1; 173 174 /* To keep more than one thread at a time from running vfs_getnewfsid */ 175 static struct mtx mntid_mtx; 176 177 /* 178 * Lock for any access to the following: 179 * vnode_free_list 180 * numvnodes 181 * freevnodes 182 */ 183 static struct mtx vnode_free_list_mtx; 184 185 /* Publicly exported FS */ 186 struct nfs_public nfs_pub; 187 188 static uma_zone_t buf_trie_zone; 189 190 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 191 static uma_zone_t vnode_zone; 192 static uma_zone_t vnodepoll_zone; 193 194 /* 195 * The workitem queue. 196 * 197 * It is useful to delay writes of file data and filesystem metadata 198 * for tens of seconds so that quickly created and deleted files need 199 * not waste disk bandwidth being created and removed. To realize this, 200 * we append vnodes to a "workitem" queue. When running with a soft 201 * updates implementation, most pending metadata dependencies should 202 * not wait for more than a few seconds. Thus, mounted on block devices 203 * are delayed only about a half the time that file data is delayed. 204 * Similarly, directory updates are more critical, so are only delayed 205 * about a third the time that file data is delayed. Thus, there are 206 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 207 * one each second (driven off the filesystem syncer process). The 208 * syncer_delayno variable indicates the next queue that is to be processed. 209 * Items that need to be processed soon are placed in this queue: 210 * 211 * syncer_workitem_pending[syncer_delayno] 212 * 213 * A delay of fifteen seconds is done by placing the request fifteen 214 * entries later in the queue: 215 * 216 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 217 * 218 */ 219 static int syncer_delayno; 220 static long syncer_mask; 221 LIST_HEAD(synclist, bufobj); 222 static struct synclist *syncer_workitem_pending; 223 /* 224 * The sync_mtx protects: 225 * bo->bo_synclist 226 * sync_vnode_count 227 * syncer_delayno 228 * syncer_state 229 * syncer_workitem_pending 230 * syncer_worklist_len 231 * rushjob 232 */ 233 static struct mtx sync_mtx; 234 static struct cv sync_wakeup; 235 236 #define SYNCER_MAXDELAY 32 237 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 238 static int syncdelay = 30; /* max time to delay syncing data */ 239 static int filedelay = 30; /* time to delay syncing files */ 240 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 241 "Time to delay syncing files (in seconds)"); 242 static int dirdelay = 29; /* time to delay syncing directories */ 243 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 244 "Time to delay syncing directories (in seconds)"); 245 static int metadelay = 28; /* time to delay syncing metadata */ 246 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 247 "Time to delay syncing metadata (in seconds)"); 248 static int rushjob; /* number of slots to run ASAP */ 249 static int stat_rush_requests; /* number of times I/O speeded up */ 250 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 251 "Number of times I/O speeded up (rush requests)"); 252 253 /* 254 * When shutting down the syncer, run it at four times normal speed. 255 */ 256 #define SYNCER_SHUTDOWN_SPEEDUP 4 257 static int sync_vnode_count; 258 static int syncer_worklist_len; 259 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 260 syncer_state; 261 262 /* 263 * Number of vnodes we want to exist at any one time. This is mostly used 264 * to size hash tables in vnode-related code. It is normally not used in 265 * getnewvnode(), as wantfreevnodes is normally nonzero.) 266 * 267 * XXX desiredvnodes is historical cruft and should not exist. 268 */ 269 int desiredvnodes; 270 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 271 &desiredvnodes, 0, "Maximum number of vnodes"); 272 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 273 &wantfreevnodes, 0, "Minimum number of vnodes (legacy)"); 274 static int vnlru_nowhere; 275 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 276 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 277 278 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 279 static int vnsz2log; 280 281 /* 282 * Support for the bufobj clean & dirty pctrie. 283 */ 284 static void * 285 buf_trie_alloc(struct pctrie *ptree) 286 { 287 288 return uma_zalloc(buf_trie_zone, M_NOWAIT); 289 } 290 291 static void 292 buf_trie_free(struct pctrie *ptree, void *node) 293 { 294 295 uma_zfree(buf_trie_zone, node); 296 } 297 PCTRIE_DEFINE(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free); 298 299 /* 300 * Initialize the vnode management data structures. 301 * 302 * Reevaluate the following cap on the number of vnodes after the physical 303 * memory size exceeds 512GB. In the limit, as the physical memory size 304 * grows, the ratio of physical pages to vnodes approaches sixteen to one. 305 */ 306 #ifndef MAXVNODES_MAX 307 #define MAXVNODES_MAX (512 * (1024 * 1024 * 1024 / (int)PAGE_SIZE / 16)) 308 #endif 309 static void 310 vntblinit(void *dummy __unused) 311 { 312 u_int i; 313 int physvnodes, virtvnodes; 314 315 /* 316 * Desiredvnodes is a function of the physical memory size and the 317 * kernel's heap size. Generally speaking, it scales with the 318 * physical memory size. The ratio of desiredvnodes to physical pages 319 * is one to four until desiredvnodes exceeds 98,304. Thereafter, the 320 * marginal ratio of desiredvnodes to physical pages is one to 321 * sixteen. However, desiredvnodes is limited by the kernel's heap 322 * size. The memory required by desiredvnodes vnodes and vm objects 323 * may not exceed one seventh of the kernel's heap size. 324 */ 325 physvnodes = maxproc + vm_cnt.v_page_count / 16 + 3 * min(98304 * 4, 326 vm_cnt.v_page_count) / 16; 327 virtvnodes = vm_kmem_size / (7 * (sizeof(struct vm_object) + 328 sizeof(struct vnode))); 329 desiredvnodes = min(physvnodes, virtvnodes); 330 if (desiredvnodes > MAXVNODES_MAX) { 331 if (bootverbose) 332 printf("Reducing kern.maxvnodes %d -> %d\n", 333 desiredvnodes, MAXVNODES_MAX); 334 desiredvnodes = MAXVNODES_MAX; 335 } 336 wantfreevnodes = desiredvnodes / 4; 337 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 338 TAILQ_INIT(&vnode_free_list); 339 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 340 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 341 NULL, NULL, UMA_ALIGN_PTR, 0); 342 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 343 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 344 /* 345 * Preallocate enough nodes to support one-per buf so that 346 * we can not fail an insert. reassignbuf() callers can not 347 * tolerate the insertion failure. 348 */ 349 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 350 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 351 UMA_ZONE_NOFREE | UMA_ZONE_VM); 352 uma_prealloc(buf_trie_zone, nbuf); 353 /* 354 * Initialize the filesystem syncer. 355 */ 356 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 357 &syncer_mask); 358 syncer_maxdelay = syncer_mask + 1; 359 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 360 cv_init(&sync_wakeup, "syncer"); 361 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 362 vnsz2log++; 363 vnsz2log--; 364 } 365 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 366 367 368 /* 369 * Mark a mount point as busy. Used to synchronize access and to delay 370 * unmounting. Eventually, mountlist_mtx is not released on failure. 371 * 372 * vfs_busy() is a custom lock, it can block the caller. 373 * vfs_busy() only sleeps if the unmount is active on the mount point. 374 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 375 * vnode belonging to mp. 376 * 377 * Lookup uses vfs_busy() to traverse mount points. 378 * root fs var fs 379 * / vnode lock A / vnode lock (/var) D 380 * /var vnode lock B /log vnode lock(/var/log) E 381 * vfs_busy lock C vfs_busy lock F 382 * 383 * Within each file system, the lock order is C->A->B and F->D->E. 384 * 385 * When traversing across mounts, the system follows that lock order: 386 * 387 * C->A->B 388 * | 389 * +->F->D->E 390 * 391 * The lookup() process for namei("/var") illustrates the process: 392 * VOP_LOOKUP() obtains B while A is held 393 * vfs_busy() obtains a shared lock on F while A and B are held 394 * vput() releases lock on B 395 * vput() releases lock on A 396 * VFS_ROOT() obtains lock on D while shared lock on F is held 397 * vfs_unbusy() releases shared lock on F 398 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 399 * Attempt to lock A (instead of vp_crossmp) while D is held would 400 * violate the global order, causing deadlocks. 401 * 402 * dounmount() locks B while F is drained. 403 */ 404 int 405 vfs_busy(struct mount *mp, int flags) 406 { 407 408 MPASS((flags & ~MBF_MASK) == 0); 409 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 410 411 MNT_ILOCK(mp); 412 MNT_REF(mp); 413 /* 414 * If mount point is currenly being unmounted, sleep until the 415 * mount point fate is decided. If thread doing the unmounting fails, 416 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 417 * that this mount point has survived the unmount attempt and vfs_busy 418 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 419 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 420 * about to be really destroyed. vfs_busy needs to release its 421 * reference on the mount point in this case and return with ENOENT, 422 * telling the caller that mount mount it tried to busy is no longer 423 * valid. 424 */ 425 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 426 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 427 MNT_REL(mp); 428 MNT_IUNLOCK(mp); 429 CTR1(KTR_VFS, "%s: failed busying before sleeping", 430 __func__); 431 return (ENOENT); 432 } 433 if (flags & MBF_MNTLSTLOCK) 434 mtx_unlock(&mountlist_mtx); 435 mp->mnt_kern_flag |= MNTK_MWAIT; 436 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 437 if (flags & MBF_MNTLSTLOCK) 438 mtx_lock(&mountlist_mtx); 439 MNT_ILOCK(mp); 440 } 441 if (flags & MBF_MNTLSTLOCK) 442 mtx_unlock(&mountlist_mtx); 443 mp->mnt_lockref++; 444 MNT_IUNLOCK(mp); 445 return (0); 446 } 447 448 /* 449 * Free a busy filesystem. 450 */ 451 void 452 vfs_unbusy(struct mount *mp) 453 { 454 455 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 456 MNT_ILOCK(mp); 457 MNT_REL(mp); 458 KASSERT(mp->mnt_lockref > 0, ("negative mnt_lockref")); 459 mp->mnt_lockref--; 460 if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 461 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 462 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 463 mp->mnt_kern_flag &= ~MNTK_DRAINING; 464 wakeup(&mp->mnt_lockref); 465 } 466 MNT_IUNLOCK(mp); 467 } 468 469 /* 470 * Lookup a mount point by filesystem identifier. 471 */ 472 struct mount * 473 vfs_getvfs(fsid_t *fsid) 474 { 475 struct mount *mp; 476 477 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 478 mtx_lock(&mountlist_mtx); 479 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 480 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 481 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 482 vfs_ref(mp); 483 mtx_unlock(&mountlist_mtx); 484 return (mp); 485 } 486 } 487 mtx_unlock(&mountlist_mtx); 488 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 489 return ((struct mount *) 0); 490 } 491 492 /* 493 * Lookup a mount point by filesystem identifier, busying it before 494 * returning. 495 * 496 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 497 * cache for popular filesystem identifiers. The cache is lockess, using 498 * the fact that struct mount's are never freed. In worst case we may 499 * get pointer to unmounted or even different filesystem, so we have to 500 * check what we got, and go slow way if so. 501 */ 502 struct mount * 503 vfs_busyfs(fsid_t *fsid) 504 { 505 #define FSID_CACHE_SIZE 256 506 typedef struct mount * volatile vmp_t; 507 static vmp_t cache[FSID_CACHE_SIZE]; 508 struct mount *mp; 509 int error; 510 uint32_t hash; 511 512 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 513 hash = fsid->val[0] ^ fsid->val[1]; 514 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 515 mp = cache[hash]; 516 if (mp == NULL || 517 mp->mnt_stat.f_fsid.val[0] != fsid->val[0] || 518 mp->mnt_stat.f_fsid.val[1] != fsid->val[1]) 519 goto slow; 520 if (vfs_busy(mp, 0) != 0) { 521 cache[hash] = NULL; 522 goto slow; 523 } 524 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 525 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) 526 return (mp); 527 else 528 vfs_unbusy(mp); 529 530 slow: 531 mtx_lock(&mountlist_mtx); 532 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 533 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 534 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 535 error = vfs_busy(mp, MBF_MNTLSTLOCK); 536 if (error) { 537 cache[hash] = NULL; 538 mtx_unlock(&mountlist_mtx); 539 return (NULL); 540 } 541 cache[hash] = mp; 542 return (mp); 543 } 544 } 545 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 546 mtx_unlock(&mountlist_mtx); 547 return ((struct mount *) 0); 548 } 549 550 /* 551 * Check if a user can access privileged mount options. 552 */ 553 int 554 vfs_suser(struct mount *mp, struct thread *td) 555 { 556 int error; 557 558 /* 559 * If the thread is jailed, but this is not a jail-friendly file 560 * system, deny immediately. 561 */ 562 if (!(mp->mnt_vfc->vfc_flags & VFCF_JAIL) && jailed(td->td_ucred)) 563 return (EPERM); 564 565 /* 566 * If the file system was mounted outside the jail of the calling 567 * thread, deny immediately. 568 */ 569 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 570 return (EPERM); 571 572 /* 573 * If file system supports delegated administration, we don't check 574 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 575 * by the file system itself. 576 * If this is not the user that did original mount, we check for 577 * the PRIV_VFS_MOUNT_OWNER privilege. 578 */ 579 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 580 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 581 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 582 return (error); 583 } 584 return (0); 585 } 586 587 /* 588 * Get a new unique fsid. Try to make its val[0] unique, since this value 589 * will be used to create fake device numbers for stat(). Also try (but 590 * not so hard) make its val[0] unique mod 2^16, since some emulators only 591 * support 16-bit device numbers. We end up with unique val[0]'s for the 592 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 593 * 594 * Keep in mind that several mounts may be running in parallel. Starting 595 * the search one past where the previous search terminated is both a 596 * micro-optimization and a defense against returning the same fsid to 597 * different mounts. 598 */ 599 void 600 vfs_getnewfsid(struct mount *mp) 601 { 602 static uint16_t mntid_base; 603 struct mount *nmp; 604 fsid_t tfsid; 605 int mtype; 606 607 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 608 mtx_lock(&mntid_mtx); 609 mtype = mp->mnt_vfc->vfc_typenum; 610 tfsid.val[1] = mtype; 611 mtype = (mtype & 0xFF) << 24; 612 for (;;) { 613 tfsid.val[0] = makedev(255, 614 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 615 mntid_base++; 616 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 617 break; 618 vfs_rel(nmp); 619 } 620 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 621 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 622 mtx_unlock(&mntid_mtx); 623 } 624 625 /* 626 * Knob to control the precision of file timestamps: 627 * 628 * 0 = seconds only; nanoseconds zeroed. 629 * 1 = seconds and nanoseconds, accurate within 1/HZ. 630 * 2 = seconds and nanoseconds, truncated to microseconds. 631 * >=3 = seconds and nanoseconds, maximum precision. 632 */ 633 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 634 635 static int timestamp_precision = TSP_SEC; 636 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 637 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 638 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to ms, " 639 "3+: sec + ns (max. precision))"); 640 641 /* 642 * Get a current timestamp. 643 */ 644 void 645 vfs_timestamp(struct timespec *tsp) 646 { 647 struct timeval tv; 648 649 switch (timestamp_precision) { 650 case TSP_SEC: 651 tsp->tv_sec = time_second; 652 tsp->tv_nsec = 0; 653 break; 654 case TSP_HZ: 655 getnanotime(tsp); 656 break; 657 case TSP_USEC: 658 microtime(&tv); 659 TIMEVAL_TO_TIMESPEC(&tv, tsp); 660 break; 661 case TSP_NSEC: 662 default: 663 nanotime(tsp); 664 break; 665 } 666 } 667 668 /* 669 * Set vnode attributes to VNOVAL 670 */ 671 void 672 vattr_null(struct vattr *vap) 673 { 674 675 vap->va_type = VNON; 676 vap->va_size = VNOVAL; 677 vap->va_bytes = VNOVAL; 678 vap->va_mode = VNOVAL; 679 vap->va_nlink = VNOVAL; 680 vap->va_uid = VNOVAL; 681 vap->va_gid = VNOVAL; 682 vap->va_fsid = VNOVAL; 683 vap->va_fileid = VNOVAL; 684 vap->va_blocksize = VNOVAL; 685 vap->va_rdev = VNOVAL; 686 vap->va_atime.tv_sec = VNOVAL; 687 vap->va_atime.tv_nsec = VNOVAL; 688 vap->va_mtime.tv_sec = VNOVAL; 689 vap->va_mtime.tv_nsec = VNOVAL; 690 vap->va_ctime.tv_sec = VNOVAL; 691 vap->va_ctime.tv_nsec = VNOVAL; 692 vap->va_birthtime.tv_sec = VNOVAL; 693 vap->va_birthtime.tv_nsec = VNOVAL; 694 vap->va_flags = VNOVAL; 695 vap->va_gen = VNOVAL; 696 vap->va_vaflags = 0; 697 } 698 699 /* 700 * This routine is called when we have too many vnodes. It attempts 701 * to free <count> vnodes and will potentially free vnodes that still 702 * have VM backing store (VM backing store is typically the cause 703 * of a vnode blowout so we want to do this). Therefore, this operation 704 * is not considered cheap. 705 * 706 * A number of conditions may prevent a vnode from being reclaimed. 707 * the buffer cache may have references on the vnode, a directory 708 * vnode may still have references due to the namei cache representing 709 * underlying files, or the vnode may be in active use. It is not 710 * desireable to reuse such vnodes. These conditions may cause the 711 * number of vnodes to reach some minimum value regardless of what 712 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 713 */ 714 static int 715 vlrureclaim(struct mount *mp) 716 { 717 struct vnode *vp; 718 int done; 719 int trigger; 720 int usevnodes; 721 int count; 722 723 /* 724 * Calculate the trigger point, don't allow user 725 * screwups to blow us up. This prevents us from 726 * recycling vnodes with lots of resident pages. We 727 * aren't trying to free memory, we are trying to 728 * free vnodes. 729 */ 730 usevnodes = desiredvnodes; 731 if (usevnodes <= 0) 732 usevnodes = 1; 733 trigger = vm_cnt.v_page_count * 2 / usevnodes; 734 done = 0; 735 vn_start_write(NULL, &mp, V_WAIT); 736 MNT_ILOCK(mp); 737 count = mp->mnt_nvnodelistsize / 10 + 1; 738 while (count != 0) { 739 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 740 while (vp != NULL && vp->v_type == VMARKER) 741 vp = TAILQ_NEXT(vp, v_nmntvnodes); 742 if (vp == NULL) 743 break; 744 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 745 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 746 --count; 747 if (!VI_TRYLOCK(vp)) 748 goto next_iter; 749 /* 750 * If it's been deconstructed already, it's still 751 * referenced, or it exceeds the trigger, skip it. 752 */ 753 if (vp->v_usecount || 754 (!vlru_allow_cache_src && 755 !LIST_EMPTY(&(vp)->v_cache_src)) || 756 (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL && 757 vp->v_object->resident_page_count > trigger)) { 758 VI_UNLOCK(vp); 759 goto next_iter; 760 } 761 MNT_IUNLOCK(mp); 762 vholdl(vp); 763 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) { 764 vdrop(vp); 765 goto next_iter_mntunlocked; 766 } 767 VI_LOCK(vp); 768 /* 769 * v_usecount may have been bumped after VOP_LOCK() dropped 770 * the vnode interlock and before it was locked again. 771 * 772 * It is not necessary to recheck VI_DOOMED because it can 773 * only be set by another thread that holds both the vnode 774 * lock and vnode interlock. If another thread has the 775 * vnode lock before we get to VOP_LOCK() and obtains the 776 * vnode interlock after VOP_LOCK() drops the vnode 777 * interlock, the other thread will be unable to drop the 778 * vnode lock before our VOP_LOCK() call fails. 779 */ 780 if (vp->v_usecount || 781 (!vlru_allow_cache_src && 782 !LIST_EMPTY(&(vp)->v_cache_src)) || 783 (vp->v_object != NULL && 784 vp->v_object->resident_page_count > trigger)) { 785 VOP_UNLOCK(vp, LK_INTERLOCK); 786 vdrop(vp); 787 goto next_iter_mntunlocked; 788 } 789 KASSERT((vp->v_iflag & VI_DOOMED) == 0, 790 ("VI_DOOMED unexpectedly detected in vlrureclaim()")); 791 vgonel(vp); 792 VOP_UNLOCK(vp, 0); 793 vdropl(vp); 794 done++; 795 next_iter_mntunlocked: 796 if (!should_yield()) 797 goto relock_mnt; 798 goto yield; 799 next_iter: 800 if (!should_yield()) 801 continue; 802 MNT_IUNLOCK(mp); 803 yield: 804 kern_yield(PRI_USER); 805 relock_mnt: 806 MNT_ILOCK(mp); 807 } 808 MNT_IUNLOCK(mp); 809 vn_finished_write(mp); 810 return done; 811 } 812 813 /* 814 * Attempt to keep the free list at wantfreevnodes length. 815 */ 816 static void 817 vnlru_free(int count) 818 { 819 struct vnode *vp; 820 821 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 822 for (; count > 0; count--) { 823 vp = TAILQ_FIRST(&vnode_free_list); 824 /* 825 * The list can be modified while the free_list_mtx 826 * has been dropped and vp could be NULL here. 827 */ 828 if (!vp) 829 break; 830 VNASSERT(vp->v_op != NULL, vp, 831 ("vnlru_free: vnode already reclaimed.")); 832 KASSERT((vp->v_iflag & VI_FREE) != 0, 833 ("Removing vnode not on freelist")); 834 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 835 ("Mangling active vnode")); 836 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 837 /* 838 * Don't recycle if we can't get the interlock. 839 */ 840 if (!VI_TRYLOCK(vp)) { 841 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist); 842 continue; 843 } 844 VNASSERT((vp->v_iflag & VI_FREE) != 0 && vp->v_holdcnt == 0, 845 vp, ("vp inconsistent on freelist")); 846 847 /* 848 * The clear of VI_FREE prevents activation of the 849 * vnode. There is no sense in putting the vnode on 850 * the mount point active list, only to remove it 851 * later during recycling. Inline the relevant part 852 * of vholdl(), to avoid triggering assertions or 853 * activating. 854 */ 855 freevnodes--; 856 vp->v_iflag &= ~VI_FREE; 857 vp->v_holdcnt++; 858 859 mtx_unlock(&vnode_free_list_mtx); 860 VI_UNLOCK(vp); 861 vtryrecycle(vp); 862 /* 863 * If the recycled succeeded this vdrop will actually free 864 * the vnode. If not it will simply place it back on 865 * the free list. 866 */ 867 vdrop(vp); 868 mtx_lock(&vnode_free_list_mtx); 869 } 870 } 871 /* 872 * Attempt to recycle vnodes in a context that is always safe to block. 873 * Calling vlrurecycle() from the bowels of filesystem code has some 874 * interesting deadlock problems. 875 */ 876 static struct proc *vnlruproc; 877 static int vnlruproc_sig; 878 879 static void 880 vnlru_proc(void) 881 { 882 struct mount *mp, *nmp; 883 int done; 884 struct proc *p = vnlruproc; 885 886 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p, 887 SHUTDOWN_PRI_FIRST); 888 889 for (;;) { 890 kproc_suspend_check(p); 891 mtx_lock(&vnode_free_list_mtx); 892 if (freevnodes > wantfreevnodes) 893 vnlru_free(freevnodes - wantfreevnodes); 894 if (numvnodes <= desiredvnodes * 9 / 10) { 895 vnlruproc_sig = 0; 896 wakeup(&vnlruproc_sig); 897 msleep(vnlruproc, &vnode_free_list_mtx, 898 PVFS|PDROP, "vlruwt", hz); 899 continue; 900 } 901 mtx_unlock(&vnode_free_list_mtx); 902 done = 0; 903 mtx_lock(&mountlist_mtx); 904 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 905 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) { 906 nmp = TAILQ_NEXT(mp, mnt_list); 907 continue; 908 } 909 done += vlrureclaim(mp); 910 mtx_lock(&mountlist_mtx); 911 nmp = TAILQ_NEXT(mp, mnt_list); 912 vfs_unbusy(mp); 913 } 914 mtx_unlock(&mountlist_mtx); 915 if (done == 0) { 916 #if 0 917 /* These messages are temporary debugging aids */ 918 if (vnlru_nowhere < 5) 919 printf("vnlru process getting nowhere..\n"); 920 else if (vnlru_nowhere == 5) 921 printf("vnlru process messages stopped.\n"); 922 #endif 923 vnlru_nowhere++; 924 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 925 } else 926 kern_yield(PRI_USER); 927 } 928 } 929 930 static struct kproc_desc vnlru_kp = { 931 "vnlru", 932 vnlru_proc, 933 &vnlruproc 934 }; 935 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 936 &vnlru_kp); 937 938 /* 939 * Routines having to do with the management of the vnode table. 940 */ 941 942 /* 943 * Try to recycle a freed vnode. We abort if anyone picks up a reference 944 * before we actually vgone(). This function must be called with the vnode 945 * held to prevent the vnode from being returned to the free list midway 946 * through vgone(). 947 */ 948 static int 949 vtryrecycle(struct vnode *vp) 950 { 951 struct mount *vnmp; 952 953 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 954 VNASSERT(vp->v_holdcnt, vp, 955 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 956 /* 957 * This vnode may found and locked via some other list, if so we 958 * can't recycle it yet. 959 */ 960 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 961 CTR2(KTR_VFS, 962 "%s: impossible to recycle, vp %p lock is already held", 963 __func__, vp); 964 return (EWOULDBLOCK); 965 } 966 /* 967 * Don't recycle if its filesystem is being suspended. 968 */ 969 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 970 VOP_UNLOCK(vp, 0); 971 CTR2(KTR_VFS, 972 "%s: impossible to recycle, cannot start the write for %p", 973 __func__, vp); 974 return (EBUSY); 975 } 976 /* 977 * If we got this far, we need to acquire the interlock and see if 978 * anyone picked up this vnode from another list. If not, we will 979 * mark it with DOOMED via vgonel() so that anyone who does find it 980 * will skip over it. 981 */ 982 VI_LOCK(vp); 983 if (vp->v_usecount) { 984 VOP_UNLOCK(vp, LK_INTERLOCK); 985 vn_finished_write(vnmp); 986 CTR2(KTR_VFS, 987 "%s: impossible to recycle, %p is already referenced", 988 __func__, vp); 989 return (EBUSY); 990 } 991 if ((vp->v_iflag & VI_DOOMED) == 0) 992 vgonel(vp); 993 VOP_UNLOCK(vp, LK_INTERLOCK); 994 vn_finished_write(vnmp); 995 return (0); 996 } 997 998 /* 999 * Wait for available vnodes. 1000 */ 1001 static int 1002 getnewvnode_wait(int suspended) 1003 { 1004 1005 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 1006 if (numvnodes > desiredvnodes) { 1007 if (suspended) { 1008 /* 1009 * File system is beeing suspended, we cannot risk a 1010 * deadlock here, so allocate new vnode anyway. 1011 */ 1012 if (freevnodes > wantfreevnodes) 1013 vnlru_free(freevnodes - wantfreevnodes); 1014 return (0); 1015 } 1016 if (vnlruproc_sig == 0) { 1017 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 1018 wakeup(vnlruproc); 1019 } 1020 msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS, 1021 "vlruwk", hz); 1022 } 1023 return (numvnodes > desiredvnodes ? ENFILE : 0); 1024 } 1025 1026 void 1027 getnewvnode_reserve(u_int count) 1028 { 1029 struct thread *td; 1030 1031 td = curthread; 1032 /* First try to be quick and racy. */ 1033 if (atomic_fetchadd_long(&numvnodes, count) + count <= desiredvnodes) { 1034 td->td_vp_reserv += count; 1035 return; 1036 } else 1037 atomic_subtract_long(&numvnodes, count); 1038 1039 mtx_lock(&vnode_free_list_mtx); 1040 while (count > 0) { 1041 if (getnewvnode_wait(0) == 0) { 1042 count--; 1043 td->td_vp_reserv++; 1044 atomic_add_long(&numvnodes, 1); 1045 } 1046 } 1047 mtx_unlock(&vnode_free_list_mtx); 1048 } 1049 1050 void 1051 getnewvnode_drop_reserve(void) 1052 { 1053 struct thread *td; 1054 1055 td = curthread; 1056 atomic_subtract_long(&numvnodes, td->td_vp_reserv); 1057 td->td_vp_reserv = 0; 1058 } 1059 1060 /* 1061 * Return the next vnode from the free list. 1062 */ 1063 int 1064 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1065 struct vnode **vpp) 1066 { 1067 struct vnode *vp; 1068 struct bufobj *bo; 1069 struct thread *td; 1070 int error; 1071 1072 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1073 vp = NULL; 1074 td = curthread; 1075 if (td->td_vp_reserv > 0) { 1076 td->td_vp_reserv -= 1; 1077 goto alloc; 1078 } 1079 mtx_lock(&vnode_free_list_mtx); 1080 /* 1081 * Lend our context to reclaim vnodes if they've exceeded the max. 1082 */ 1083 if (freevnodes > wantfreevnodes) 1084 vnlru_free(1); 1085 error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag & 1086 MNTK_SUSPEND)); 1087 #if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */ 1088 if (error != 0) { 1089 mtx_unlock(&vnode_free_list_mtx); 1090 return (error); 1091 } 1092 #endif 1093 atomic_add_long(&numvnodes, 1); 1094 mtx_unlock(&vnode_free_list_mtx); 1095 alloc: 1096 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO); 1097 /* 1098 * Setup locks. 1099 */ 1100 vp->v_vnlock = &vp->v_lock; 1101 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 1102 /* 1103 * By default, don't allow shared locks unless filesystems 1104 * opt-in. 1105 */ 1106 lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOSHARE | LK_IS_VNODE); 1107 /* 1108 * Initialize bufobj. 1109 */ 1110 bo = &vp->v_bufobj; 1111 bo->__bo_vnode = vp; 1112 rw_init(BO_LOCKPTR(bo), "bufobj interlock"); 1113 bo->bo_ops = &buf_ops_bio; 1114 bo->bo_private = vp; 1115 TAILQ_INIT(&bo->bo_clean.bv_hd); 1116 TAILQ_INIT(&bo->bo_dirty.bv_hd); 1117 /* 1118 * Initialize namecache. 1119 */ 1120 LIST_INIT(&vp->v_cache_src); 1121 TAILQ_INIT(&vp->v_cache_dst); 1122 /* 1123 * Finalize various vnode identity bits. 1124 */ 1125 vp->v_type = VNON; 1126 vp->v_tag = tag; 1127 vp->v_op = vops; 1128 v_incr_usecount(vp); 1129 vp->v_data = NULL; 1130 #ifdef MAC 1131 mac_vnode_init(vp); 1132 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1133 mac_vnode_associate_singlelabel(mp, vp); 1134 else if (mp == NULL && vops != &dead_vnodeops) 1135 printf("NULL mp in getnewvnode()\n"); 1136 #endif 1137 if (mp != NULL) { 1138 bo->bo_bsize = mp->mnt_stat.f_iosize; 1139 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1140 vp->v_vflag |= VV_NOKNOTE; 1141 } 1142 rangelock_init(&vp->v_rl); 1143 1144 /* 1145 * For the filesystems which do not use vfs_hash_insert(), 1146 * still initialize v_hash to have vfs_hash_index() useful. 1147 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1148 * its own hashing. 1149 */ 1150 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1151 1152 *vpp = vp; 1153 return (0); 1154 } 1155 1156 /* 1157 * Delete from old mount point vnode list, if on one. 1158 */ 1159 static void 1160 delmntque(struct vnode *vp) 1161 { 1162 struct mount *mp; 1163 int active; 1164 1165 mp = vp->v_mount; 1166 if (mp == NULL) 1167 return; 1168 MNT_ILOCK(mp); 1169 VI_LOCK(vp); 1170 KASSERT(mp->mnt_activevnodelistsize <= mp->mnt_nvnodelistsize, 1171 ("Active vnode list size %d > Vnode list size %d", 1172 mp->mnt_activevnodelistsize, mp->mnt_nvnodelistsize)); 1173 active = vp->v_iflag & VI_ACTIVE; 1174 vp->v_iflag &= ~VI_ACTIVE; 1175 if (active) { 1176 mtx_lock(&vnode_free_list_mtx); 1177 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); 1178 mp->mnt_activevnodelistsize--; 1179 mtx_unlock(&vnode_free_list_mtx); 1180 } 1181 vp->v_mount = NULL; 1182 VI_UNLOCK(vp); 1183 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1184 ("bad mount point vnode list size")); 1185 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1186 mp->mnt_nvnodelistsize--; 1187 MNT_REL(mp); 1188 MNT_IUNLOCK(mp); 1189 } 1190 1191 static void 1192 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1193 { 1194 1195 vp->v_data = NULL; 1196 vp->v_op = &dead_vnodeops; 1197 vgone(vp); 1198 vput(vp); 1199 } 1200 1201 /* 1202 * Insert into list of vnodes for the new mount point, if available. 1203 */ 1204 int 1205 insmntque1(struct vnode *vp, struct mount *mp, 1206 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1207 { 1208 1209 KASSERT(vp->v_mount == NULL, 1210 ("insmntque: vnode already on per mount vnode list")); 1211 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1212 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1213 1214 /* 1215 * We acquire the vnode interlock early to ensure that the 1216 * vnode cannot be recycled by another process releasing a 1217 * holdcnt on it before we get it on both the vnode list 1218 * and the active vnode list. The mount mutex protects only 1219 * manipulation of the vnode list and the vnode freelist 1220 * mutex protects only manipulation of the active vnode list. 1221 * Hence the need to hold the vnode interlock throughout. 1222 */ 1223 MNT_ILOCK(mp); 1224 VI_LOCK(vp); 1225 if (((mp->mnt_kern_flag & MNTK_NOINSMNTQ) != 0 && 1226 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1227 mp->mnt_nvnodelistsize == 0)) && 1228 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1229 VI_UNLOCK(vp); 1230 MNT_IUNLOCK(mp); 1231 if (dtr != NULL) 1232 dtr(vp, dtr_arg); 1233 return (EBUSY); 1234 } 1235 vp->v_mount = mp; 1236 MNT_REF(mp); 1237 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1238 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1239 ("neg mount point vnode list size")); 1240 mp->mnt_nvnodelistsize++; 1241 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 1242 ("Activating already active vnode")); 1243 vp->v_iflag |= VI_ACTIVE; 1244 mtx_lock(&vnode_free_list_mtx); 1245 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 1246 mp->mnt_activevnodelistsize++; 1247 mtx_unlock(&vnode_free_list_mtx); 1248 VI_UNLOCK(vp); 1249 MNT_IUNLOCK(mp); 1250 return (0); 1251 } 1252 1253 int 1254 insmntque(struct vnode *vp, struct mount *mp) 1255 { 1256 1257 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1258 } 1259 1260 /* 1261 * Flush out and invalidate all buffers associated with a bufobj 1262 * Called with the underlying object locked. 1263 */ 1264 int 1265 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1266 { 1267 int error; 1268 1269 BO_LOCK(bo); 1270 if (flags & V_SAVE) { 1271 error = bufobj_wwait(bo, slpflag, slptimeo); 1272 if (error) { 1273 BO_UNLOCK(bo); 1274 return (error); 1275 } 1276 if (bo->bo_dirty.bv_cnt > 0) { 1277 BO_UNLOCK(bo); 1278 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1279 return (error); 1280 /* 1281 * XXX We could save a lock/unlock if this was only 1282 * enabled under INVARIANTS 1283 */ 1284 BO_LOCK(bo); 1285 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1286 panic("vinvalbuf: dirty bufs"); 1287 } 1288 } 1289 /* 1290 * If you alter this loop please notice that interlock is dropped and 1291 * reacquired in flushbuflist. Special care is needed to ensure that 1292 * no race conditions occur from this. 1293 */ 1294 do { 1295 error = flushbuflist(&bo->bo_clean, 1296 flags, bo, slpflag, slptimeo); 1297 if (error == 0 && !(flags & V_CLEANONLY)) 1298 error = flushbuflist(&bo->bo_dirty, 1299 flags, bo, slpflag, slptimeo); 1300 if (error != 0 && error != EAGAIN) { 1301 BO_UNLOCK(bo); 1302 return (error); 1303 } 1304 } while (error != 0); 1305 1306 /* 1307 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1308 * have write I/O in-progress but if there is a VM object then the 1309 * VM object can also have read-I/O in-progress. 1310 */ 1311 do { 1312 bufobj_wwait(bo, 0, 0); 1313 BO_UNLOCK(bo); 1314 if (bo->bo_object != NULL) { 1315 VM_OBJECT_WLOCK(bo->bo_object); 1316 vm_object_pip_wait(bo->bo_object, "bovlbx"); 1317 VM_OBJECT_WUNLOCK(bo->bo_object); 1318 } 1319 BO_LOCK(bo); 1320 } while (bo->bo_numoutput > 0); 1321 BO_UNLOCK(bo); 1322 1323 /* 1324 * Destroy the copy in the VM cache, too. 1325 */ 1326 if (bo->bo_object != NULL && 1327 (flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0) { 1328 VM_OBJECT_WLOCK(bo->bo_object); 1329 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1330 OBJPR_CLEANONLY : 0); 1331 VM_OBJECT_WUNLOCK(bo->bo_object); 1332 } 1333 1334 #ifdef INVARIANTS 1335 BO_LOCK(bo); 1336 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0 && 1337 (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0)) 1338 panic("vinvalbuf: flush failed"); 1339 BO_UNLOCK(bo); 1340 #endif 1341 return (0); 1342 } 1343 1344 /* 1345 * Flush out and invalidate all buffers associated with a vnode. 1346 * Called with the underlying object locked. 1347 */ 1348 int 1349 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 1350 { 1351 1352 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 1353 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1354 if (vp->v_object != NULL && vp->v_object->handle != vp) 1355 return (0); 1356 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 1357 } 1358 1359 /* 1360 * Flush out buffers on the specified list. 1361 * 1362 */ 1363 static int 1364 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 1365 int slptimeo) 1366 { 1367 struct buf *bp, *nbp; 1368 int retval, error; 1369 daddr_t lblkno; 1370 b_xflags_t xflags; 1371 1372 ASSERT_BO_WLOCKED(bo); 1373 1374 retval = 0; 1375 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 1376 if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) || 1377 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) { 1378 continue; 1379 } 1380 lblkno = 0; 1381 xflags = 0; 1382 if (nbp != NULL) { 1383 lblkno = nbp->b_lblkno; 1384 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 1385 } 1386 retval = EAGAIN; 1387 error = BUF_TIMELOCK(bp, 1388 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 1389 "flushbuf", slpflag, slptimeo); 1390 if (error) { 1391 BO_LOCK(bo); 1392 return (error != ENOLCK ? error : EAGAIN); 1393 } 1394 KASSERT(bp->b_bufobj == bo, 1395 ("bp %p wrong b_bufobj %p should be %p", 1396 bp, bp->b_bufobj, bo)); 1397 if (bp->b_bufobj != bo) { /* XXX: necessary ? */ 1398 BUF_UNLOCK(bp); 1399 BO_LOCK(bo); 1400 return (EAGAIN); 1401 } 1402 /* 1403 * XXX Since there are no node locks for NFS, I 1404 * believe there is a slight chance that a delayed 1405 * write will occur while sleeping just above, so 1406 * check for it. 1407 */ 1408 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1409 (flags & V_SAVE)) { 1410 bremfree(bp); 1411 bp->b_flags |= B_ASYNC; 1412 bwrite(bp); 1413 BO_LOCK(bo); 1414 return (EAGAIN); /* XXX: why not loop ? */ 1415 } 1416 bremfree(bp); 1417 bp->b_flags |= (B_INVAL | B_RELBUF); 1418 bp->b_flags &= ~B_ASYNC; 1419 brelse(bp); 1420 BO_LOCK(bo); 1421 if (nbp != NULL && 1422 (nbp->b_bufobj != bo || 1423 nbp->b_lblkno != lblkno || 1424 (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) != xflags)) 1425 break; /* nbp invalid */ 1426 } 1427 return (retval); 1428 } 1429 1430 /* 1431 * Truncate a file's buffer and pages to a specified length. This 1432 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 1433 * sync activity. 1434 */ 1435 int 1436 vtruncbuf(struct vnode *vp, struct ucred *cred, off_t length, int blksize) 1437 { 1438 struct buf *bp, *nbp; 1439 int anyfreed; 1440 int trunclbn; 1441 struct bufobj *bo; 1442 1443 CTR5(KTR_VFS, "%s: vp %p with cred %p and block %d:%ju", __func__, 1444 vp, cred, blksize, (uintmax_t)length); 1445 1446 /* 1447 * Round up to the *next* lbn. 1448 */ 1449 trunclbn = (length + blksize - 1) / blksize; 1450 1451 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 1452 restart: 1453 bo = &vp->v_bufobj; 1454 BO_LOCK(bo); 1455 anyfreed = 1; 1456 for (;anyfreed;) { 1457 anyfreed = 0; 1458 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 1459 if (bp->b_lblkno < trunclbn) 1460 continue; 1461 if (BUF_LOCK(bp, 1462 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1463 BO_LOCKPTR(bo)) == ENOLCK) 1464 goto restart; 1465 1466 bremfree(bp); 1467 bp->b_flags |= (B_INVAL | B_RELBUF); 1468 bp->b_flags &= ~B_ASYNC; 1469 brelse(bp); 1470 anyfreed = 1; 1471 1472 BO_LOCK(bo); 1473 if (nbp != NULL && 1474 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 1475 (nbp->b_vp != vp) || 1476 (nbp->b_flags & B_DELWRI))) { 1477 BO_UNLOCK(bo); 1478 goto restart; 1479 } 1480 } 1481 1482 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1483 if (bp->b_lblkno < trunclbn) 1484 continue; 1485 if (BUF_LOCK(bp, 1486 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1487 BO_LOCKPTR(bo)) == ENOLCK) 1488 goto restart; 1489 bremfree(bp); 1490 bp->b_flags |= (B_INVAL | B_RELBUF); 1491 bp->b_flags &= ~B_ASYNC; 1492 brelse(bp); 1493 anyfreed = 1; 1494 1495 BO_LOCK(bo); 1496 if (nbp != NULL && 1497 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 1498 (nbp->b_vp != vp) || 1499 (nbp->b_flags & B_DELWRI) == 0)) { 1500 BO_UNLOCK(bo); 1501 goto restart; 1502 } 1503 } 1504 } 1505 1506 if (length > 0) { 1507 restartsync: 1508 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1509 if (bp->b_lblkno > 0) 1510 continue; 1511 /* 1512 * Since we hold the vnode lock this should only 1513 * fail if we're racing with the buf daemon. 1514 */ 1515 if (BUF_LOCK(bp, 1516 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1517 BO_LOCKPTR(bo)) == ENOLCK) { 1518 goto restart; 1519 } 1520 VNASSERT((bp->b_flags & B_DELWRI), vp, 1521 ("buf(%p) on dirty queue without DELWRI", bp)); 1522 1523 bremfree(bp); 1524 bawrite(bp); 1525 BO_LOCK(bo); 1526 goto restartsync; 1527 } 1528 } 1529 1530 bufobj_wwait(bo, 0, 0); 1531 BO_UNLOCK(bo); 1532 vnode_pager_setsize(vp, length); 1533 1534 return (0); 1535 } 1536 1537 static void 1538 buf_vlist_remove(struct buf *bp) 1539 { 1540 struct bufv *bv; 1541 1542 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1543 ASSERT_BO_WLOCKED(bp->b_bufobj); 1544 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != 1545 (BX_VNDIRTY|BX_VNCLEAN), 1546 ("buf_vlist_remove: Buf %p is on two lists", bp)); 1547 if (bp->b_xflags & BX_VNDIRTY) 1548 bv = &bp->b_bufobj->bo_dirty; 1549 else 1550 bv = &bp->b_bufobj->bo_clean; 1551 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 1552 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 1553 bv->bv_cnt--; 1554 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1555 } 1556 1557 /* 1558 * Add the buffer to the sorted clean or dirty block list. 1559 * 1560 * NOTE: xflags is passed as a constant, optimizing this inline function! 1561 */ 1562 static void 1563 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 1564 { 1565 struct bufv *bv; 1566 struct buf *n; 1567 int error; 1568 1569 ASSERT_BO_WLOCKED(bo); 1570 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 1571 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 1572 bp->b_xflags |= xflags; 1573 if (xflags & BX_VNDIRTY) 1574 bv = &bo->bo_dirty; 1575 else 1576 bv = &bo->bo_clean; 1577 1578 /* 1579 * Keep the list ordered. Optimize empty list insertion. Assume 1580 * we tend to grow at the tail so lookup_le should usually be cheaper 1581 * than _ge. 1582 */ 1583 if (bv->bv_cnt == 0 || 1584 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 1585 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 1586 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 1587 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 1588 else 1589 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 1590 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 1591 if (error) 1592 panic("buf_vlist_add: Preallocated nodes insufficient."); 1593 bv->bv_cnt++; 1594 } 1595 1596 /* 1597 * Lookup a buffer using the splay tree. Note that we specifically avoid 1598 * shadow buffers used in background bitmap writes. 1599 * 1600 * This code isn't quite efficient as it could be because we are maintaining 1601 * two sorted lists and do not know which list the block resides in. 1602 * 1603 * During a "make buildworld" the desired buffer is found at one of 1604 * the roots more than 60% of the time. Thus, checking both roots 1605 * before performing either splay eliminates unnecessary splays on the 1606 * first tree splayed. 1607 */ 1608 struct buf * 1609 gbincore(struct bufobj *bo, daddr_t lblkno) 1610 { 1611 struct buf *bp; 1612 1613 ASSERT_BO_LOCKED(bo); 1614 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 1615 if (bp != NULL) 1616 return (bp); 1617 return BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno); 1618 } 1619 1620 /* 1621 * Associate a buffer with a vnode. 1622 */ 1623 void 1624 bgetvp(struct vnode *vp, struct buf *bp) 1625 { 1626 struct bufobj *bo; 1627 1628 bo = &vp->v_bufobj; 1629 ASSERT_BO_WLOCKED(bo); 1630 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 1631 1632 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 1633 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 1634 ("bgetvp: bp already attached! %p", bp)); 1635 1636 vhold(vp); 1637 bp->b_vp = vp; 1638 bp->b_bufobj = bo; 1639 /* 1640 * Insert onto list for new vnode. 1641 */ 1642 buf_vlist_add(bp, bo, BX_VNCLEAN); 1643 } 1644 1645 /* 1646 * Disassociate a buffer from a vnode. 1647 */ 1648 void 1649 brelvp(struct buf *bp) 1650 { 1651 struct bufobj *bo; 1652 struct vnode *vp; 1653 1654 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1655 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 1656 1657 /* 1658 * Delete from old vnode list, if on one. 1659 */ 1660 vp = bp->b_vp; /* XXX */ 1661 bo = bp->b_bufobj; 1662 BO_LOCK(bo); 1663 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1664 buf_vlist_remove(bp); 1665 else 1666 panic("brelvp: Buffer %p not on queue.", bp); 1667 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 1668 bo->bo_flag &= ~BO_ONWORKLST; 1669 mtx_lock(&sync_mtx); 1670 LIST_REMOVE(bo, bo_synclist); 1671 syncer_worklist_len--; 1672 mtx_unlock(&sync_mtx); 1673 } 1674 bp->b_vp = NULL; 1675 bp->b_bufobj = NULL; 1676 BO_UNLOCK(bo); 1677 vdrop(vp); 1678 } 1679 1680 /* 1681 * Add an item to the syncer work queue. 1682 */ 1683 static void 1684 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 1685 { 1686 int slot; 1687 1688 ASSERT_BO_WLOCKED(bo); 1689 1690 mtx_lock(&sync_mtx); 1691 if (bo->bo_flag & BO_ONWORKLST) 1692 LIST_REMOVE(bo, bo_synclist); 1693 else { 1694 bo->bo_flag |= BO_ONWORKLST; 1695 syncer_worklist_len++; 1696 } 1697 1698 if (delay > syncer_maxdelay - 2) 1699 delay = syncer_maxdelay - 2; 1700 slot = (syncer_delayno + delay) & syncer_mask; 1701 1702 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 1703 mtx_unlock(&sync_mtx); 1704 } 1705 1706 static int 1707 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 1708 { 1709 int error, len; 1710 1711 mtx_lock(&sync_mtx); 1712 len = syncer_worklist_len - sync_vnode_count; 1713 mtx_unlock(&sync_mtx); 1714 error = SYSCTL_OUT(req, &len, sizeof(len)); 1715 return (error); 1716 } 1717 1718 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0, 1719 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 1720 1721 static struct proc *updateproc; 1722 static void sched_sync(void); 1723 static struct kproc_desc up_kp = { 1724 "syncer", 1725 sched_sync, 1726 &updateproc 1727 }; 1728 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 1729 1730 static int 1731 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 1732 { 1733 struct vnode *vp; 1734 struct mount *mp; 1735 1736 *bo = LIST_FIRST(slp); 1737 if (*bo == NULL) 1738 return (0); 1739 vp = (*bo)->__bo_vnode; /* XXX */ 1740 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 1741 return (1); 1742 /* 1743 * We use vhold in case the vnode does not 1744 * successfully sync. vhold prevents the vnode from 1745 * going away when we unlock the sync_mtx so that 1746 * we can acquire the vnode interlock. 1747 */ 1748 vholdl(vp); 1749 mtx_unlock(&sync_mtx); 1750 VI_UNLOCK(vp); 1751 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1752 vdrop(vp); 1753 mtx_lock(&sync_mtx); 1754 return (*bo == LIST_FIRST(slp)); 1755 } 1756 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1757 (void) VOP_FSYNC(vp, MNT_LAZY, td); 1758 VOP_UNLOCK(vp, 0); 1759 vn_finished_write(mp); 1760 BO_LOCK(*bo); 1761 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 1762 /* 1763 * Put us back on the worklist. The worklist 1764 * routine will remove us from our current 1765 * position and then add us back in at a later 1766 * position. 1767 */ 1768 vn_syncer_add_to_worklist(*bo, syncdelay); 1769 } 1770 BO_UNLOCK(*bo); 1771 vdrop(vp); 1772 mtx_lock(&sync_mtx); 1773 return (0); 1774 } 1775 1776 /* 1777 * System filesystem synchronizer daemon. 1778 */ 1779 static void 1780 sched_sync(void) 1781 { 1782 struct synclist *next, *slp; 1783 struct bufobj *bo; 1784 long starttime; 1785 struct thread *td = curthread; 1786 int last_work_seen; 1787 int net_worklist_len; 1788 int syncer_final_iter; 1789 int first_printf; 1790 int error; 1791 1792 last_work_seen = 0; 1793 syncer_final_iter = 0; 1794 first_printf = 1; 1795 syncer_state = SYNCER_RUNNING; 1796 starttime = time_uptime; 1797 td->td_pflags |= TDP_NORUNNINGBUF; 1798 1799 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 1800 SHUTDOWN_PRI_LAST); 1801 1802 mtx_lock(&sync_mtx); 1803 for (;;) { 1804 if (syncer_state == SYNCER_FINAL_DELAY && 1805 syncer_final_iter == 0) { 1806 mtx_unlock(&sync_mtx); 1807 kproc_suspend_check(td->td_proc); 1808 mtx_lock(&sync_mtx); 1809 } 1810 net_worklist_len = syncer_worklist_len - sync_vnode_count; 1811 if (syncer_state != SYNCER_RUNNING && 1812 starttime != time_uptime) { 1813 if (first_printf) { 1814 printf("\nSyncing disks, vnodes remaining..."); 1815 first_printf = 0; 1816 } 1817 printf("%d ", net_worklist_len); 1818 } 1819 starttime = time_uptime; 1820 1821 /* 1822 * Push files whose dirty time has expired. Be careful 1823 * of interrupt race on slp queue. 1824 * 1825 * Skip over empty worklist slots when shutting down. 1826 */ 1827 do { 1828 slp = &syncer_workitem_pending[syncer_delayno]; 1829 syncer_delayno += 1; 1830 if (syncer_delayno == syncer_maxdelay) 1831 syncer_delayno = 0; 1832 next = &syncer_workitem_pending[syncer_delayno]; 1833 /* 1834 * If the worklist has wrapped since the 1835 * it was emptied of all but syncer vnodes, 1836 * switch to the FINAL_DELAY state and run 1837 * for one more second. 1838 */ 1839 if (syncer_state == SYNCER_SHUTTING_DOWN && 1840 net_worklist_len == 0 && 1841 last_work_seen == syncer_delayno) { 1842 syncer_state = SYNCER_FINAL_DELAY; 1843 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 1844 } 1845 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 1846 syncer_worklist_len > 0); 1847 1848 /* 1849 * Keep track of the last time there was anything 1850 * on the worklist other than syncer vnodes. 1851 * Return to the SHUTTING_DOWN state if any 1852 * new work appears. 1853 */ 1854 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 1855 last_work_seen = syncer_delayno; 1856 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 1857 syncer_state = SYNCER_SHUTTING_DOWN; 1858 while (!LIST_EMPTY(slp)) { 1859 error = sync_vnode(slp, &bo, td); 1860 if (error == 1) { 1861 LIST_REMOVE(bo, bo_synclist); 1862 LIST_INSERT_HEAD(next, bo, bo_synclist); 1863 continue; 1864 } 1865 1866 if (first_printf == 0) { 1867 /* 1868 * Drop the sync mutex, because some watchdog 1869 * drivers need to sleep while patting 1870 */ 1871 mtx_unlock(&sync_mtx); 1872 wdog_kern_pat(WD_LASTVAL); 1873 mtx_lock(&sync_mtx); 1874 } 1875 1876 } 1877 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 1878 syncer_final_iter--; 1879 /* 1880 * The variable rushjob allows the kernel to speed up the 1881 * processing of the filesystem syncer process. A rushjob 1882 * value of N tells the filesystem syncer to process the next 1883 * N seconds worth of work on its queue ASAP. Currently rushjob 1884 * is used by the soft update code to speed up the filesystem 1885 * syncer process when the incore state is getting so far 1886 * ahead of the disk that the kernel memory pool is being 1887 * threatened with exhaustion. 1888 */ 1889 if (rushjob > 0) { 1890 rushjob -= 1; 1891 continue; 1892 } 1893 /* 1894 * Just sleep for a short period of time between 1895 * iterations when shutting down to allow some I/O 1896 * to happen. 1897 * 1898 * If it has taken us less than a second to process the 1899 * current work, then wait. Otherwise start right over 1900 * again. We can still lose time if any single round 1901 * takes more than two seconds, but it does not really 1902 * matter as we are just trying to generally pace the 1903 * filesystem activity. 1904 */ 1905 if (syncer_state != SYNCER_RUNNING || 1906 time_uptime == starttime) { 1907 thread_lock(td); 1908 sched_prio(td, PPAUSE); 1909 thread_unlock(td); 1910 } 1911 if (syncer_state != SYNCER_RUNNING) 1912 cv_timedwait(&sync_wakeup, &sync_mtx, 1913 hz / SYNCER_SHUTDOWN_SPEEDUP); 1914 else if (time_uptime == starttime) 1915 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 1916 } 1917 } 1918 1919 /* 1920 * Request the syncer daemon to speed up its work. 1921 * We never push it to speed up more than half of its 1922 * normal turn time, otherwise it could take over the cpu. 1923 */ 1924 int 1925 speedup_syncer(void) 1926 { 1927 int ret = 0; 1928 1929 mtx_lock(&sync_mtx); 1930 if (rushjob < syncdelay / 2) { 1931 rushjob += 1; 1932 stat_rush_requests += 1; 1933 ret = 1; 1934 } 1935 mtx_unlock(&sync_mtx); 1936 cv_broadcast(&sync_wakeup); 1937 return (ret); 1938 } 1939 1940 /* 1941 * Tell the syncer to speed up its work and run though its work 1942 * list several times, then tell it to shut down. 1943 */ 1944 static void 1945 syncer_shutdown(void *arg, int howto) 1946 { 1947 1948 if (howto & RB_NOSYNC) 1949 return; 1950 mtx_lock(&sync_mtx); 1951 syncer_state = SYNCER_SHUTTING_DOWN; 1952 rushjob = 0; 1953 mtx_unlock(&sync_mtx); 1954 cv_broadcast(&sync_wakeup); 1955 kproc_shutdown(arg, howto); 1956 } 1957 1958 /* 1959 * Reassign a buffer from one vnode to another. 1960 * Used to assign file specific control information 1961 * (indirect blocks) to the vnode to which they belong. 1962 */ 1963 void 1964 reassignbuf(struct buf *bp) 1965 { 1966 struct vnode *vp; 1967 struct bufobj *bo; 1968 int delay; 1969 #ifdef INVARIANTS 1970 struct bufv *bv; 1971 #endif 1972 1973 vp = bp->b_vp; 1974 bo = bp->b_bufobj; 1975 ++reassignbufcalls; 1976 1977 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 1978 bp, bp->b_vp, bp->b_flags); 1979 /* 1980 * B_PAGING flagged buffers cannot be reassigned because their vp 1981 * is not fully linked in. 1982 */ 1983 if (bp->b_flags & B_PAGING) 1984 panic("cannot reassign paging buffer"); 1985 1986 /* 1987 * Delete from old vnode list, if on one. 1988 */ 1989 BO_LOCK(bo); 1990 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1991 buf_vlist_remove(bp); 1992 else 1993 panic("reassignbuf: Buffer %p not on queue.", bp); 1994 /* 1995 * If dirty, put on list of dirty buffers; otherwise insert onto list 1996 * of clean buffers. 1997 */ 1998 if (bp->b_flags & B_DELWRI) { 1999 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2000 switch (vp->v_type) { 2001 case VDIR: 2002 delay = dirdelay; 2003 break; 2004 case VCHR: 2005 delay = metadelay; 2006 break; 2007 default: 2008 delay = filedelay; 2009 } 2010 vn_syncer_add_to_worklist(bo, delay); 2011 } 2012 buf_vlist_add(bp, bo, BX_VNDIRTY); 2013 } else { 2014 buf_vlist_add(bp, bo, BX_VNCLEAN); 2015 2016 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2017 mtx_lock(&sync_mtx); 2018 LIST_REMOVE(bo, bo_synclist); 2019 syncer_worklist_len--; 2020 mtx_unlock(&sync_mtx); 2021 bo->bo_flag &= ~BO_ONWORKLST; 2022 } 2023 } 2024 #ifdef INVARIANTS 2025 bv = &bo->bo_clean; 2026 bp = TAILQ_FIRST(&bv->bv_hd); 2027 KASSERT(bp == NULL || bp->b_bufobj == bo, 2028 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2029 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2030 KASSERT(bp == NULL || bp->b_bufobj == bo, 2031 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2032 bv = &bo->bo_dirty; 2033 bp = TAILQ_FIRST(&bv->bv_hd); 2034 KASSERT(bp == NULL || bp->b_bufobj == bo, 2035 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2036 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2037 KASSERT(bp == NULL || bp->b_bufobj == bo, 2038 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2039 #endif 2040 BO_UNLOCK(bo); 2041 } 2042 2043 /* 2044 * Increment the use and hold counts on the vnode, taking care to reference 2045 * the driver's usecount if this is a chardev. The vholdl() will remove 2046 * the vnode from the free list if it is presently free. Requires the 2047 * vnode interlock and returns with it held. 2048 */ 2049 static void 2050 v_incr_usecount(struct vnode *vp) 2051 { 2052 2053 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2054 vholdl(vp); 2055 vp->v_usecount++; 2056 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2057 dev_lock(); 2058 vp->v_rdev->si_usecount++; 2059 dev_unlock(); 2060 } 2061 } 2062 2063 /* 2064 * Turn a holdcnt into a use+holdcnt such that only one call to 2065 * v_decr_usecount is needed. 2066 */ 2067 static void 2068 v_upgrade_usecount(struct vnode *vp) 2069 { 2070 2071 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2072 vp->v_usecount++; 2073 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2074 dev_lock(); 2075 vp->v_rdev->si_usecount++; 2076 dev_unlock(); 2077 } 2078 } 2079 2080 /* 2081 * Decrement the vnode use and hold count along with the driver's usecount 2082 * if this is a chardev. The vdropl() below releases the vnode interlock 2083 * as it may free the vnode. 2084 */ 2085 static void 2086 v_decr_usecount(struct vnode *vp) 2087 { 2088 2089 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2090 VNASSERT(vp->v_usecount > 0, vp, 2091 ("v_decr_usecount: negative usecount")); 2092 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2093 vp->v_usecount--; 2094 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2095 dev_lock(); 2096 vp->v_rdev->si_usecount--; 2097 dev_unlock(); 2098 } 2099 vdropl(vp); 2100 } 2101 2102 /* 2103 * Decrement only the use count and driver use count. This is intended to 2104 * be paired with a follow on vdropl() to release the remaining hold count. 2105 * In this way we may vgone() a vnode with a 0 usecount without risk of 2106 * having it end up on a free list because the hold count is kept above 0. 2107 */ 2108 static void 2109 v_decr_useonly(struct vnode *vp) 2110 { 2111 2112 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2113 VNASSERT(vp->v_usecount > 0, vp, 2114 ("v_decr_useonly: negative usecount")); 2115 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2116 vp->v_usecount--; 2117 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2118 dev_lock(); 2119 vp->v_rdev->si_usecount--; 2120 dev_unlock(); 2121 } 2122 } 2123 2124 /* 2125 * Grab a particular vnode from the free list, increment its 2126 * reference count and lock it. VI_DOOMED is set if the vnode 2127 * is being destroyed. Only callers who specify LK_RETRY will 2128 * see doomed vnodes. If inactive processing was delayed in 2129 * vput try to do it here. 2130 */ 2131 int 2132 vget(struct vnode *vp, int flags, struct thread *td) 2133 { 2134 int error; 2135 2136 error = 0; 2137 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 2138 ("vget: invalid lock operation")); 2139 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2140 2141 if ((flags & LK_INTERLOCK) == 0) 2142 VI_LOCK(vp); 2143 vholdl(vp); 2144 if ((error = vn_lock(vp, flags | LK_INTERLOCK)) != 0) { 2145 vdrop(vp); 2146 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2147 vp); 2148 return (error); 2149 } 2150 if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0) 2151 panic("vget: vn_lock failed to return ENOENT\n"); 2152 VI_LOCK(vp); 2153 /* Upgrade our holdcnt to a usecount. */ 2154 v_upgrade_usecount(vp); 2155 /* 2156 * We don't guarantee that any particular close will 2157 * trigger inactive processing so just make a best effort 2158 * here at preventing a reference to a removed file. If 2159 * we don't succeed no harm is done. 2160 */ 2161 if (vp->v_iflag & VI_OWEINACT) { 2162 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE && 2163 (flags & LK_NOWAIT) == 0) 2164 vinactive(vp, td); 2165 vp->v_iflag &= ~VI_OWEINACT; 2166 } 2167 VI_UNLOCK(vp); 2168 return (0); 2169 } 2170 2171 /* 2172 * Increase the reference count of a vnode. 2173 */ 2174 void 2175 vref(struct vnode *vp) 2176 { 2177 2178 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2179 VI_LOCK(vp); 2180 v_incr_usecount(vp); 2181 VI_UNLOCK(vp); 2182 } 2183 2184 /* 2185 * Return reference count of a vnode. 2186 * 2187 * The results of this call are only guaranteed when some mechanism other 2188 * than the VI lock is used to stop other processes from gaining references 2189 * to the vnode. This may be the case if the caller holds the only reference. 2190 * This is also useful when stale data is acceptable as race conditions may 2191 * be accounted for by some other means. 2192 */ 2193 int 2194 vrefcnt(struct vnode *vp) 2195 { 2196 int usecnt; 2197 2198 VI_LOCK(vp); 2199 usecnt = vp->v_usecount; 2200 VI_UNLOCK(vp); 2201 2202 return (usecnt); 2203 } 2204 2205 #define VPUTX_VRELE 1 2206 #define VPUTX_VPUT 2 2207 #define VPUTX_VUNREF 3 2208 2209 static void 2210 vputx(struct vnode *vp, int func) 2211 { 2212 int error; 2213 2214 KASSERT(vp != NULL, ("vputx: null vp")); 2215 if (func == VPUTX_VUNREF) 2216 ASSERT_VOP_LOCKED(vp, "vunref"); 2217 else if (func == VPUTX_VPUT) 2218 ASSERT_VOP_LOCKED(vp, "vput"); 2219 else 2220 KASSERT(func == VPUTX_VRELE, ("vputx: wrong func")); 2221 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2222 VI_LOCK(vp); 2223 2224 /* Skip this v_writecount check if we're going to panic below. */ 2225 VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp, 2226 ("vputx: missed vn_close")); 2227 error = 0; 2228 2229 if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) && 2230 vp->v_usecount == 1)) { 2231 if (func == VPUTX_VPUT) 2232 VOP_UNLOCK(vp, 0); 2233 v_decr_usecount(vp); 2234 return; 2235 } 2236 2237 if (vp->v_usecount != 1) { 2238 vprint("vputx: negative ref count", vp); 2239 panic("vputx: negative ref cnt"); 2240 } 2241 CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp); 2242 /* 2243 * We want to hold the vnode until the inactive finishes to 2244 * prevent vgone() races. We drop the use count here and the 2245 * hold count below when we're done. 2246 */ 2247 v_decr_useonly(vp); 2248 /* 2249 * We must call VOP_INACTIVE with the node locked. Mark 2250 * as VI_DOINGINACT to avoid recursion. 2251 */ 2252 vp->v_iflag |= VI_OWEINACT; 2253 switch (func) { 2254 case VPUTX_VRELE: 2255 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 2256 VI_LOCK(vp); 2257 break; 2258 case VPUTX_VPUT: 2259 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 2260 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 2261 LK_NOWAIT); 2262 VI_LOCK(vp); 2263 } 2264 break; 2265 case VPUTX_VUNREF: 2266 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 2267 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 2268 VI_LOCK(vp); 2269 } 2270 break; 2271 } 2272 if (vp->v_usecount > 0) 2273 vp->v_iflag &= ~VI_OWEINACT; 2274 if (error == 0) { 2275 if (vp->v_iflag & VI_OWEINACT) 2276 vinactive(vp, curthread); 2277 if (func != VPUTX_VUNREF) 2278 VOP_UNLOCK(vp, 0); 2279 } 2280 vdropl(vp); 2281 } 2282 2283 /* 2284 * Vnode put/release. 2285 * If count drops to zero, call inactive routine and return to freelist. 2286 */ 2287 void 2288 vrele(struct vnode *vp) 2289 { 2290 2291 vputx(vp, VPUTX_VRELE); 2292 } 2293 2294 /* 2295 * Release an already locked vnode. This give the same effects as 2296 * unlock+vrele(), but takes less time and avoids releasing and 2297 * re-aquiring the lock (as vrele() acquires the lock internally.) 2298 */ 2299 void 2300 vput(struct vnode *vp) 2301 { 2302 2303 vputx(vp, VPUTX_VPUT); 2304 } 2305 2306 /* 2307 * Release an exclusively locked vnode. Do not unlock the vnode lock. 2308 */ 2309 void 2310 vunref(struct vnode *vp) 2311 { 2312 2313 vputx(vp, VPUTX_VUNREF); 2314 } 2315 2316 /* 2317 * Somebody doesn't want the vnode recycled. 2318 */ 2319 void 2320 vhold(struct vnode *vp) 2321 { 2322 2323 VI_LOCK(vp); 2324 vholdl(vp); 2325 VI_UNLOCK(vp); 2326 } 2327 2328 /* 2329 * Increase the hold count and activate if this is the first reference. 2330 */ 2331 void 2332 vholdl(struct vnode *vp) 2333 { 2334 struct mount *mp; 2335 2336 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2337 #ifdef INVARIANTS 2338 /* getnewvnode() calls v_incr_usecount() without holding interlock. */ 2339 if (vp->v_type != VNON || vp->v_data != NULL) { 2340 ASSERT_VI_LOCKED(vp, "vholdl"); 2341 VNASSERT(vp->v_holdcnt > 0 || (vp->v_iflag & VI_FREE) != 0, 2342 vp, ("vholdl: free vnode is held")); 2343 } 2344 #endif 2345 vp->v_holdcnt++; 2346 if ((vp->v_iflag & VI_FREE) == 0) 2347 return; 2348 VNASSERT(vp->v_holdcnt == 1, vp, ("vholdl: wrong hold count")); 2349 VNASSERT(vp->v_op != NULL, vp, ("vholdl: vnode already reclaimed.")); 2350 /* 2351 * Remove a vnode from the free list, mark it as in use, 2352 * and put it on the active list. 2353 */ 2354 mtx_lock(&vnode_free_list_mtx); 2355 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 2356 freevnodes--; 2357 vp->v_iflag &= ~(VI_FREE|VI_AGE); 2358 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 2359 ("Activating already active vnode")); 2360 vp->v_iflag |= VI_ACTIVE; 2361 mp = vp->v_mount; 2362 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 2363 mp->mnt_activevnodelistsize++; 2364 mtx_unlock(&vnode_free_list_mtx); 2365 } 2366 2367 /* 2368 * Note that there is one less who cares about this vnode. 2369 * vdrop() is the opposite of vhold(). 2370 */ 2371 void 2372 vdrop(struct vnode *vp) 2373 { 2374 2375 VI_LOCK(vp); 2376 vdropl(vp); 2377 } 2378 2379 /* 2380 * Drop the hold count of the vnode. If this is the last reference to 2381 * the vnode we place it on the free list unless it has been vgone'd 2382 * (marked VI_DOOMED) in which case we will free it. 2383 */ 2384 void 2385 vdropl(struct vnode *vp) 2386 { 2387 struct bufobj *bo; 2388 struct mount *mp; 2389 int active; 2390 2391 ASSERT_VI_LOCKED(vp, "vdropl"); 2392 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2393 if (vp->v_holdcnt <= 0) 2394 panic("vdrop: holdcnt %d", vp->v_holdcnt); 2395 vp->v_holdcnt--; 2396 VNASSERT(vp->v_holdcnt >= vp->v_usecount, vp, 2397 ("hold count less than use count")); 2398 if (vp->v_holdcnt > 0) { 2399 VI_UNLOCK(vp); 2400 return; 2401 } 2402 if ((vp->v_iflag & VI_DOOMED) == 0) { 2403 /* 2404 * Mark a vnode as free: remove it from its active list 2405 * and put it up for recycling on the freelist. 2406 */ 2407 VNASSERT(vp->v_op != NULL, vp, 2408 ("vdropl: vnode already reclaimed.")); 2409 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 2410 ("vnode already free")); 2411 VNASSERT(vp->v_holdcnt == 0, vp, 2412 ("vdropl: freeing when we shouldn't")); 2413 active = vp->v_iflag & VI_ACTIVE; 2414 vp->v_iflag &= ~VI_ACTIVE; 2415 mp = vp->v_mount; 2416 mtx_lock(&vnode_free_list_mtx); 2417 if (active) { 2418 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, 2419 v_actfreelist); 2420 mp->mnt_activevnodelistsize--; 2421 } 2422 if (vp->v_iflag & VI_AGE) { 2423 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_actfreelist); 2424 } else { 2425 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist); 2426 } 2427 freevnodes++; 2428 vp->v_iflag &= ~VI_AGE; 2429 vp->v_iflag |= VI_FREE; 2430 mtx_unlock(&vnode_free_list_mtx); 2431 VI_UNLOCK(vp); 2432 return; 2433 } 2434 /* 2435 * The vnode has been marked for destruction, so free it. 2436 */ 2437 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 2438 atomic_subtract_long(&numvnodes, 1); 2439 bo = &vp->v_bufobj; 2440 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 2441 ("cleaned vnode still on the free list.")); 2442 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 2443 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); 2444 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 2445 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 2446 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 2447 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 2448 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 2449 ("clean blk trie not empty")); 2450 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 2451 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 2452 ("dirty blk trie not empty")); 2453 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 2454 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 2455 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 2456 VI_UNLOCK(vp); 2457 #ifdef MAC 2458 mac_vnode_destroy(vp); 2459 #endif 2460 if (vp->v_pollinfo != NULL) 2461 destroy_vpollinfo(vp->v_pollinfo); 2462 #ifdef INVARIANTS 2463 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ 2464 vp->v_op = NULL; 2465 #endif 2466 rangelock_destroy(&vp->v_rl); 2467 lockdestroy(vp->v_vnlock); 2468 mtx_destroy(&vp->v_interlock); 2469 rw_destroy(BO_LOCKPTR(bo)); 2470 uma_zfree(vnode_zone, vp); 2471 } 2472 2473 /* 2474 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 2475 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 2476 * OWEINACT tracks whether a vnode missed a call to inactive due to a 2477 * failed lock upgrade. 2478 */ 2479 void 2480 vinactive(struct vnode *vp, struct thread *td) 2481 { 2482 struct vm_object *obj; 2483 2484 ASSERT_VOP_ELOCKED(vp, "vinactive"); 2485 ASSERT_VI_LOCKED(vp, "vinactive"); 2486 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 2487 ("vinactive: recursed on VI_DOINGINACT")); 2488 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2489 vp->v_iflag |= VI_DOINGINACT; 2490 vp->v_iflag &= ~VI_OWEINACT; 2491 VI_UNLOCK(vp); 2492 /* 2493 * Before moving off the active list, we must be sure that any 2494 * modified pages are on the vnode's dirty list since these will 2495 * no longer be checked once the vnode is on the inactive list. 2496 * Because the vnode vm object keeps a hold reference on the vnode 2497 * if there is at least one resident non-cached page, the vnode 2498 * cannot leave the active list without the page cleanup done. 2499 */ 2500 obj = vp->v_object; 2501 if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0) { 2502 VM_OBJECT_WLOCK(obj); 2503 vm_object_page_clean(obj, 0, 0, OBJPC_NOSYNC); 2504 VM_OBJECT_WUNLOCK(obj); 2505 } 2506 VOP_INACTIVE(vp, td); 2507 VI_LOCK(vp); 2508 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 2509 ("vinactive: lost VI_DOINGINACT")); 2510 vp->v_iflag &= ~VI_DOINGINACT; 2511 } 2512 2513 /* 2514 * Remove any vnodes in the vnode table belonging to mount point mp. 2515 * 2516 * If FORCECLOSE is not specified, there should not be any active ones, 2517 * return error if any are found (nb: this is a user error, not a 2518 * system error). If FORCECLOSE is specified, detach any active vnodes 2519 * that are found. 2520 * 2521 * If WRITECLOSE is set, only flush out regular file vnodes open for 2522 * writing. 2523 * 2524 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 2525 * 2526 * `rootrefs' specifies the base reference count for the root vnode 2527 * of this filesystem. The root vnode is considered busy if its 2528 * v_usecount exceeds this value. On a successful return, vflush(, td) 2529 * will call vrele() on the root vnode exactly rootrefs times. 2530 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 2531 * be zero. 2532 */ 2533 #ifdef DIAGNOSTIC 2534 static int busyprt = 0; /* print out busy vnodes */ 2535 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 2536 #endif 2537 2538 int 2539 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 2540 { 2541 struct vnode *vp, *mvp, *rootvp = NULL; 2542 struct vattr vattr; 2543 int busy = 0, error; 2544 2545 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 2546 rootrefs, flags); 2547 if (rootrefs > 0) { 2548 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 2549 ("vflush: bad args")); 2550 /* 2551 * Get the filesystem root vnode. We can vput() it 2552 * immediately, since with rootrefs > 0, it won't go away. 2553 */ 2554 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 2555 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 2556 __func__, error); 2557 return (error); 2558 } 2559 vput(rootvp); 2560 } 2561 loop: 2562 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 2563 vholdl(vp); 2564 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 2565 if (error) { 2566 vdrop(vp); 2567 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 2568 goto loop; 2569 } 2570 /* 2571 * Skip over a vnodes marked VV_SYSTEM. 2572 */ 2573 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 2574 VOP_UNLOCK(vp, 0); 2575 vdrop(vp); 2576 continue; 2577 } 2578 /* 2579 * If WRITECLOSE is set, flush out unlinked but still open 2580 * files (even if open only for reading) and regular file 2581 * vnodes open for writing. 2582 */ 2583 if (flags & WRITECLOSE) { 2584 if (vp->v_object != NULL) { 2585 VM_OBJECT_WLOCK(vp->v_object); 2586 vm_object_page_clean(vp->v_object, 0, 0, 0); 2587 VM_OBJECT_WUNLOCK(vp->v_object); 2588 } 2589 error = VOP_FSYNC(vp, MNT_WAIT, td); 2590 if (error != 0) { 2591 VOP_UNLOCK(vp, 0); 2592 vdrop(vp); 2593 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 2594 return (error); 2595 } 2596 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 2597 VI_LOCK(vp); 2598 2599 if ((vp->v_type == VNON || 2600 (error == 0 && vattr.va_nlink > 0)) && 2601 (vp->v_writecount == 0 || vp->v_type != VREG)) { 2602 VOP_UNLOCK(vp, 0); 2603 vdropl(vp); 2604 continue; 2605 } 2606 } else 2607 VI_LOCK(vp); 2608 /* 2609 * With v_usecount == 0, all we need to do is clear out the 2610 * vnode data structures and we are done. 2611 * 2612 * If FORCECLOSE is set, forcibly close the vnode. 2613 */ 2614 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 2615 VNASSERT(vp->v_usecount == 0 || 2616 (vp->v_type != VCHR && vp->v_type != VBLK), vp, 2617 ("device VNODE %p is FORCECLOSED", vp)); 2618 vgonel(vp); 2619 } else { 2620 busy++; 2621 #ifdef DIAGNOSTIC 2622 if (busyprt) 2623 vprint("vflush: busy vnode", vp); 2624 #endif 2625 } 2626 VOP_UNLOCK(vp, 0); 2627 vdropl(vp); 2628 } 2629 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 2630 /* 2631 * If just the root vnode is busy, and if its refcount 2632 * is equal to `rootrefs', then go ahead and kill it. 2633 */ 2634 VI_LOCK(rootvp); 2635 KASSERT(busy > 0, ("vflush: not busy")); 2636 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 2637 ("vflush: usecount %d < rootrefs %d", 2638 rootvp->v_usecount, rootrefs)); 2639 if (busy == 1 && rootvp->v_usecount == rootrefs) { 2640 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 2641 vgone(rootvp); 2642 VOP_UNLOCK(rootvp, 0); 2643 busy = 0; 2644 } else 2645 VI_UNLOCK(rootvp); 2646 } 2647 if (busy) { 2648 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 2649 busy); 2650 return (EBUSY); 2651 } 2652 for (; rootrefs > 0; rootrefs--) 2653 vrele(rootvp); 2654 return (0); 2655 } 2656 2657 /* 2658 * Recycle an unused vnode to the front of the free list. 2659 */ 2660 int 2661 vrecycle(struct vnode *vp) 2662 { 2663 int recycled; 2664 2665 ASSERT_VOP_ELOCKED(vp, "vrecycle"); 2666 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2667 recycled = 0; 2668 VI_LOCK(vp); 2669 if (vp->v_usecount == 0) { 2670 recycled = 1; 2671 vgonel(vp); 2672 } 2673 VI_UNLOCK(vp); 2674 return (recycled); 2675 } 2676 2677 /* 2678 * Eliminate all activity associated with a vnode 2679 * in preparation for reuse. 2680 */ 2681 void 2682 vgone(struct vnode *vp) 2683 { 2684 VI_LOCK(vp); 2685 vgonel(vp); 2686 VI_UNLOCK(vp); 2687 } 2688 2689 static void 2690 notify_lowervp_vfs_dummy(struct mount *mp __unused, 2691 struct vnode *lowervp __unused) 2692 { 2693 } 2694 2695 /* 2696 * Notify upper mounts about reclaimed or unlinked vnode. 2697 */ 2698 void 2699 vfs_notify_upper(struct vnode *vp, int event) 2700 { 2701 static struct vfsops vgonel_vfsops = { 2702 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 2703 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 2704 }; 2705 struct mount *mp, *ump, *mmp; 2706 2707 mp = vp->v_mount; 2708 if (mp == NULL) 2709 return; 2710 2711 MNT_ILOCK(mp); 2712 if (TAILQ_EMPTY(&mp->mnt_uppers)) 2713 goto unlock; 2714 MNT_IUNLOCK(mp); 2715 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 2716 mmp->mnt_op = &vgonel_vfsops; 2717 mmp->mnt_kern_flag |= MNTK_MARKER; 2718 MNT_ILOCK(mp); 2719 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 2720 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 2721 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 2722 ump = TAILQ_NEXT(ump, mnt_upper_link); 2723 continue; 2724 } 2725 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 2726 MNT_IUNLOCK(mp); 2727 switch (event) { 2728 case VFS_NOTIFY_UPPER_RECLAIM: 2729 VFS_RECLAIM_LOWERVP(ump, vp); 2730 break; 2731 case VFS_NOTIFY_UPPER_UNLINK: 2732 VFS_UNLINK_LOWERVP(ump, vp); 2733 break; 2734 default: 2735 KASSERT(0, ("invalid event %d", event)); 2736 break; 2737 } 2738 MNT_ILOCK(mp); 2739 ump = TAILQ_NEXT(mmp, mnt_upper_link); 2740 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 2741 } 2742 free(mmp, M_TEMP); 2743 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 2744 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 2745 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 2746 wakeup(&mp->mnt_uppers); 2747 } 2748 unlock: 2749 MNT_IUNLOCK(mp); 2750 } 2751 2752 /* 2753 * vgone, with the vp interlock held. 2754 */ 2755 void 2756 vgonel(struct vnode *vp) 2757 { 2758 struct thread *td; 2759 int oweinact; 2760 int active; 2761 struct mount *mp; 2762 2763 ASSERT_VOP_ELOCKED(vp, "vgonel"); 2764 ASSERT_VI_LOCKED(vp, "vgonel"); 2765 VNASSERT(vp->v_holdcnt, vp, 2766 ("vgonel: vp %p has no reference.", vp)); 2767 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2768 td = curthread; 2769 2770 /* 2771 * Don't vgonel if we're already doomed. 2772 */ 2773 if (vp->v_iflag & VI_DOOMED) 2774 return; 2775 vp->v_iflag |= VI_DOOMED; 2776 2777 /* 2778 * Check to see if the vnode is in use. If so, we have to call 2779 * VOP_CLOSE() and VOP_INACTIVE(). 2780 */ 2781 active = vp->v_usecount; 2782 oweinact = (vp->v_iflag & VI_OWEINACT); 2783 VI_UNLOCK(vp); 2784 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 2785 2786 /* 2787 * Clean out any buffers associated with the vnode. 2788 * If the flush fails, just toss the buffers. 2789 */ 2790 mp = NULL; 2791 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 2792 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 2793 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) 2794 vinvalbuf(vp, 0, 0, 0); 2795 2796 /* 2797 * If purging an active vnode, it must be closed and 2798 * deactivated before being reclaimed. 2799 */ 2800 if (active) 2801 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 2802 if (oweinact || active) { 2803 VI_LOCK(vp); 2804 if ((vp->v_iflag & VI_DOINGINACT) == 0) 2805 vinactive(vp, td); 2806 VI_UNLOCK(vp); 2807 } 2808 if (vp->v_type == VSOCK) 2809 vfs_unp_reclaim(vp); 2810 /* 2811 * Reclaim the vnode. 2812 */ 2813 if (VOP_RECLAIM(vp, td)) 2814 panic("vgone: cannot reclaim"); 2815 if (mp != NULL) 2816 vn_finished_secondary_write(mp); 2817 VNASSERT(vp->v_object == NULL, vp, 2818 ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag)); 2819 /* 2820 * Clear the advisory locks and wake up waiting threads. 2821 */ 2822 (void)VOP_ADVLOCKPURGE(vp); 2823 /* 2824 * Delete from old mount point vnode list. 2825 */ 2826 delmntque(vp); 2827 cache_purge(vp); 2828 /* 2829 * Done with purge, reset to the standard lock and invalidate 2830 * the vnode. 2831 */ 2832 VI_LOCK(vp); 2833 vp->v_vnlock = &vp->v_lock; 2834 vp->v_op = &dead_vnodeops; 2835 vp->v_tag = "none"; 2836 vp->v_type = VBAD; 2837 } 2838 2839 /* 2840 * Calculate the total number of references to a special device. 2841 */ 2842 int 2843 vcount(struct vnode *vp) 2844 { 2845 int count; 2846 2847 dev_lock(); 2848 count = vp->v_rdev->si_usecount; 2849 dev_unlock(); 2850 return (count); 2851 } 2852 2853 /* 2854 * Same as above, but using the struct cdev *as argument 2855 */ 2856 int 2857 count_dev(struct cdev *dev) 2858 { 2859 int count; 2860 2861 dev_lock(); 2862 count = dev->si_usecount; 2863 dev_unlock(); 2864 return(count); 2865 } 2866 2867 /* 2868 * Print out a description of a vnode. 2869 */ 2870 static char *typename[] = 2871 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 2872 "VMARKER"}; 2873 2874 void 2875 vn_printf(struct vnode *vp, const char *fmt, ...) 2876 { 2877 va_list ap; 2878 char buf[256], buf2[16]; 2879 u_long flags; 2880 2881 va_start(ap, fmt); 2882 vprintf(fmt, ap); 2883 va_end(ap); 2884 printf("%p: ", (void *)vp); 2885 printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]); 2886 printf(" usecount %d, writecount %d, refcount %d mountedhere %p\n", 2887 vp->v_usecount, vp->v_writecount, vp->v_holdcnt, vp->v_mountedhere); 2888 buf[0] = '\0'; 2889 buf[1] = '\0'; 2890 if (vp->v_vflag & VV_ROOT) 2891 strlcat(buf, "|VV_ROOT", sizeof(buf)); 2892 if (vp->v_vflag & VV_ISTTY) 2893 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 2894 if (vp->v_vflag & VV_NOSYNC) 2895 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 2896 if (vp->v_vflag & VV_ETERNALDEV) 2897 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 2898 if (vp->v_vflag & VV_CACHEDLABEL) 2899 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 2900 if (vp->v_vflag & VV_TEXT) 2901 strlcat(buf, "|VV_TEXT", sizeof(buf)); 2902 if (vp->v_vflag & VV_COPYONWRITE) 2903 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 2904 if (vp->v_vflag & VV_SYSTEM) 2905 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 2906 if (vp->v_vflag & VV_PROCDEP) 2907 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 2908 if (vp->v_vflag & VV_NOKNOTE) 2909 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 2910 if (vp->v_vflag & VV_DELETED) 2911 strlcat(buf, "|VV_DELETED", sizeof(buf)); 2912 if (vp->v_vflag & VV_MD) 2913 strlcat(buf, "|VV_MD", sizeof(buf)); 2914 if (vp->v_vflag & VV_FORCEINSMQ) 2915 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 2916 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 2917 VV_CACHEDLABEL | VV_TEXT | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 2918 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); 2919 if (flags != 0) { 2920 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 2921 strlcat(buf, buf2, sizeof(buf)); 2922 } 2923 if (vp->v_iflag & VI_MOUNT) 2924 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 2925 if (vp->v_iflag & VI_AGE) 2926 strlcat(buf, "|VI_AGE", sizeof(buf)); 2927 if (vp->v_iflag & VI_DOOMED) 2928 strlcat(buf, "|VI_DOOMED", sizeof(buf)); 2929 if (vp->v_iflag & VI_FREE) 2930 strlcat(buf, "|VI_FREE", sizeof(buf)); 2931 if (vp->v_iflag & VI_ACTIVE) 2932 strlcat(buf, "|VI_ACTIVE", sizeof(buf)); 2933 if (vp->v_iflag & VI_DOINGINACT) 2934 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 2935 if (vp->v_iflag & VI_OWEINACT) 2936 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 2937 flags = vp->v_iflag & ~(VI_MOUNT | VI_AGE | VI_DOOMED | VI_FREE | 2938 VI_ACTIVE | VI_DOINGINACT | VI_OWEINACT); 2939 if (flags != 0) { 2940 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 2941 strlcat(buf, buf2, sizeof(buf)); 2942 } 2943 printf(" flags (%s)\n", buf + 1); 2944 if (mtx_owned(VI_MTX(vp))) 2945 printf(" VI_LOCKed"); 2946 if (vp->v_object != NULL) 2947 printf(" v_object %p ref %d pages %d " 2948 "cleanbuf %d dirtybuf %d\n", 2949 vp->v_object, vp->v_object->ref_count, 2950 vp->v_object->resident_page_count, 2951 vp->v_bufobj.bo_dirty.bv_cnt, 2952 vp->v_bufobj.bo_clean.bv_cnt); 2953 printf(" "); 2954 lockmgr_printinfo(vp->v_vnlock); 2955 if (vp->v_data != NULL) 2956 VOP_PRINT(vp); 2957 } 2958 2959 #ifdef DDB 2960 /* 2961 * List all of the locked vnodes in the system. 2962 * Called when debugging the kernel. 2963 */ 2964 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 2965 { 2966 struct mount *mp; 2967 struct vnode *vp; 2968 2969 /* 2970 * Note: because this is DDB, we can't obey the locking semantics 2971 * for these structures, which means we could catch an inconsistent 2972 * state and dereference a nasty pointer. Not much to be done 2973 * about that. 2974 */ 2975 db_printf("Locked vnodes\n"); 2976 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 2977 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 2978 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 2979 vprint("", vp); 2980 } 2981 } 2982 } 2983 2984 /* 2985 * Show details about the given vnode. 2986 */ 2987 DB_SHOW_COMMAND(vnode, db_show_vnode) 2988 { 2989 struct vnode *vp; 2990 2991 if (!have_addr) 2992 return; 2993 vp = (struct vnode *)addr; 2994 vn_printf(vp, "vnode "); 2995 } 2996 2997 /* 2998 * Show details about the given mount point. 2999 */ 3000 DB_SHOW_COMMAND(mount, db_show_mount) 3001 { 3002 struct mount *mp; 3003 struct vfsopt *opt; 3004 struct statfs *sp; 3005 struct vnode *vp; 3006 char buf[512]; 3007 uint64_t mflags; 3008 u_int flags; 3009 3010 if (!have_addr) { 3011 /* No address given, print short info about all mount points. */ 3012 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3013 db_printf("%p %s on %s (%s)\n", mp, 3014 mp->mnt_stat.f_mntfromname, 3015 mp->mnt_stat.f_mntonname, 3016 mp->mnt_stat.f_fstypename); 3017 if (db_pager_quit) 3018 break; 3019 } 3020 db_printf("\nMore info: show mount <addr>\n"); 3021 return; 3022 } 3023 3024 mp = (struct mount *)addr; 3025 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 3026 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 3027 3028 buf[0] = '\0'; 3029 mflags = mp->mnt_flag; 3030 #define MNT_FLAG(flag) do { \ 3031 if (mflags & (flag)) { \ 3032 if (buf[0] != '\0') \ 3033 strlcat(buf, ", ", sizeof(buf)); \ 3034 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 3035 mflags &= ~(flag); \ 3036 } \ 3037 } while (0) 3038 MNT_FLAG(MNT_RDONLY); 3039 MNT_FLAG(MNT_SYNCHRONOUS); 3040 MNT_FLAG(MNT_NOEXEC); 3041 MNT_FLAG(MNT_NOSUID); 3042 MNT_FLAG(MNT_NFS4ACLS); 3043 MNT_FLAG(MNT_UNION); 3044 MNT_FLAG(MNT_ASYNC); 3045 MNT_FLAG(MNT_SUIDDIR); 3046 MNT_FLAG(MNT_SOFTDEP); 3047 MNT_FLAG(MNT_NOSYMFOLLOW); 3048 MNT_FLAG(MNT_GJOURNAL); 3049 MNT_FLAG(MNT_MULTILABEL); 3050 MNT_FLAG(MNT_ACLS); 3051 MNT_FLAG(MNT_NOATIME); 3052 MNT_FLAG(MNT_NOCLUSTERR); 3053 MNT_FLAG(MNT_NOCLUSTERW); 3054 MNT_FLAG(MNT_SUJ); 3055 MNT_FLAG(MNT_EXRDONLY); 3056 MNT_FLAG(MNT_EXPORTED); 3057 MNT_FLAG(MNT_DEFEXPORTED); 3058 MNT_FLAG(MNT_EXPORTANON); 3059 MNT_FLAG(MNT_EXKERB); 3060 MNT_FLAG(MNT_EXPUBLIC); 3061 MNT_FLAG(MNT_LOCAL); 3062 MNT_FLAG(MNT_QUOTA); 3063 MNT_FLAG(MNT_ROOTFS); 3064 MNT_FLAG(MNT_USER); 3065 MNT_FLAG(MNT_IGNORE); 3066 MNT_FLAG(MNT_UPDATE); 3067 MNT_FLAG(MNT_DELEXPORT); 3068 MNT_FLAG(MNT_RELOAD); 3069 MNT_FLAG(MNT_FORCE); 3070 MNT_FLAG(MNT_SNAPSHOT); 3071 MNT_FLAG(MNT_BYFSID); 3072 #undef MNT_FLAG 3073 if (mflags != 0) { 3074 if (buf[0] != '\0') 3075 strlcat(buf, ", ", sizeof(buf)); 3076 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3077 "0x%016jx", mflags); 3078 } 3079 db_printf(" mnt_flag = %s\n", buf); 3080 3081 buf[0] = '\0'; 3082 flags = mp->mnt_kern_flag; 3083 #define MNT_KERN_FLAG(flag) do { \ 3084 if (flags & (flag)) { \ 3085 if (buf[0] != '\0') \ 3086 strlcat(buf, ", ", sizeof(buf)); \ 3087 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 3088 flags &= ~(flag); \ 3089 } \ 3090 } while (0) 3091 MNT_KERN_FLAG(MNTK_UNMOUNTF); 3092 MNT_KERN_FLAG(MNTK_ASYNC); 3093 MNT_KERN_FLAG(MNTK_SOFTDEP); 3094 MNT_KERN_FLAG(MNTK_NOINSMNTQ); 3095 MNT_KERN_FLAG(MNTK_DRAINING); 3096 MNT_KERN_FLAG(MNTK_REFEXPIRE); 3097 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 3098 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 3099 MNT_KERN_FLAG(MNTK_NO_IOPF); 3100 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 3101 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 3102 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 3103 MNT_KERN_FLAG(MNTK_MARKER); 3104 MNT_KERN_FLAG(MNTK_NOASYNC); 3105 MNT_KERN_FLAG(MNTK_UNMOUNT); 3106 MNT_KERN_FLAG(MNTK_MWAIT); 3107 MNT_KERN_FLAG(MNTK_SUSPEND); 3108 MNT_KERN_FLAG(MNTK_SUSPEND2); 3109 MNT_KERN_FLAG(MNTK_SUSPENDED); 3110 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 3111 MNT_KERN_FLAG(MNTK_NOKNOTE); 3112 #undef MNT_KERN_FLAG 3113 if (flags != 0) { 3114 if (buf[0] != '\0') 3115 strlcat(buf, ", ", sizeof(buf)); 3116 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 3117 "0x%08x", flags); 3118 } 3119 db_printf(" mnt_kern_flag = %s\n", buf); 3120 3121 db_printf(" mnt_opt = "); 3122 opt = TAILQ_FIRST(mp->mnt_opt); 3123 if (opt != NULL) { 3124 db_printf("%s", opt->name); 3125 opt = TAILQ_NEXT(opt, link); 3126 while (opt != NULL) { 3127 db_printf(", %s", opt->name); 3128 opt = TAILQ_NEXT(opt, link); 3129 } 3130 } 3131 db_printf("\n"); 3132 3133 sp = &mp->mnt_stat; 3134 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 3135 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 3136 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 3137 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 3138 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 3139 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 3140 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 3141 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 3142 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 3143 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 3144 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 3145 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 3146 3147 db_printf(" mnt_cred = { uid=%u ruid=%u", 3148 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 3149 if (jailed(mp->mnt_cred)) 3150 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 3151 db_printf(" }\n"); 3152 db_printf(" mnt_ref = %d\n", mp->mnt_ref); 3153 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 3154 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 3155 db_printf(" mnt_activevnodelistsize = %d\n", 3156 mp->mnt_activevnodelistsize); 3157 db_printf(" mnt_writeopcount = %d\n", mp->mnt_writeopcount); 3158 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 3159 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 3160 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 3161 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 3162 db_printf(" mnt_secondary_accwrites = %d\n", 3163 mp->mnt_secondary_accwrites); 3164 db_printf(" mnt_gjprovider = %s\n", 3165 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 3166 3167 db_printf("\n\nList of active vnodes\n"); 3168 TAILQ_FOREACH(vp, &mp->mnt_activevnodelist, v_actfreelist) { 3169 if (vp->v_type != VMARKER) { 3170 vn_printf(vp, "vnode "); 3171 if (db_pager_quit) 3172 break; 3173 } 3174 } 3175 db_printf("\n\nList of inactive vnodes\n"); 3176 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3177 if (vp->v_type != VMARKER && (vp->v_iflag & VI_ACTIVE) == 0) { 3178 vn_printf(vp, "vnode "); 3179 if (db_pager_quit) 3180 break; 3181 } 3182 } 3183 } 3184 #endif /* DDB */ 3185 3186 /* 3187 * Fill in a struct xvfsconf based on a struct vfsconf. 3188 */ 3189 static int 3190 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 3191 { 3192 struct xvfsconf xvfsp; 3193 3194 bzero(&xvfsp, sizeof(xvfsp)); 3195 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 3196 xvfsp.vfc_typenum = vfsp->vfc_typenum; 3197 xvfsp.vfc_refcount = vfsp->vfc_refcount; 3198 xvfsp.vfc_flags = vfsp->vfc_flags; 3199 /* 3200 * These are unused in userland, we keep them 3201 * to not break binary compatibility. 3202 */ 3203 xvfsp.vfc_vfsops = NULL; 3204 xvfsp.vfc_next = NULL; 3205 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 3206 } 3207 3208 #ifdef COMPAT_FREEBSD32 3209 struct xvfsconf32 { 3210 uint32_t vfc_vfsops; 3211 char vfc_name[MFSNAMELEN]; 3212 int32_t vfc_typenum; 3213 int32_t vfc_refcount; 3214 int32_t vfc_flags; 3215 uint32_t vfc_next; 3216 }; 3217 3218 static int 3219 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 3220 { 3221 struct xvfsconf32 xvfsp; 3222 3223 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 3224 xvfsp.vfc_typenum = vfsp->vfc_typenum; 3225 xvfsp.vfc_refcount = vfsp->vfc_refcount; 3226 xvfsp.vfc_flags = vfsp->vfc_flags; 3227 xvfsp.vfc_vfsops = 0; 3228 xvfsp.vfc_next = 0; 3229 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 3230 } 3231 #endif 3232 3233 /* 3234 * Top level filesystem related information gathering. 3235 */ 3236 static int 3237 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 3238 { 3239 struct vfsconf *vfsp; 3240 int error; 3241 3242 error = 0; 3243 vfsconf_slock(); 3244 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3245 #ifdef COMPAT_FREEBSD32 3246 if (req->flags & SCTL_MASK32) 3247 error = vfsconf2x32(req, vfsp); 3248 else 3249 #endif 3250 error = vfsconf2x(req, vfsp); 3251 if (error) 3252 break; 3253 } 3254 vfsconf_sunlock(); 3255 return (error); 3256 } 3257 3258 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 3259 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 3260 "S,xvfsconf", "List of all configured filesystems"); 3261 3262 #ifndef BURN_BRIDGES 3263 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 3264 3265 static int 3266 vfs_sysctl(SYSCTL_HANDLER_ARGS) 3267 { 3268 int *name = (int *)arg1 - 1; /* XXX */ 3269 u_int namelen = arg2 + 1; /* XXX */ 3270 struct vfsconf *vfsp; 3271 3272 log(LOG_WARNING, "userland calling deprecated sysctl, " 3273 "please rebuild world\n"); 3274 3275 #if 1 || defined(COMPAT_PRELITE2) 3276 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 3277 if (namelen == 1) 3278 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 3279 #endif 3280 3281 switch (name[1]) { 3282 case VFS_MAXTYPENUM: 3283 if (namelen != 2) 3284 return (ENOTDIR); 3285 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 3286 case VFS_CONF: 3287 if (namelen != 3) 3288 return (ENOTDIR); /* overloaded */ 3289 vfsconf_slock(); 3290 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3291 if (vfsp->vfc_typenum == name[2]) 3292 break; 3293 } 3294 vfsconf_sunlock(); 3295 if (vfsp == NULL) 3296 return (EOPNOTSUPP); 3297 #ifdef COMPAT_FREEBSD32 3298 if (req->flags & SCTL_MASK32) 3299 return (vfsconf2x32(req, vfsp)); 3300 else 3301 #endif 3302 return (vfsconf2x(req, vfsp)); 3303 } 3304 return (EOPNOTSUPP); 3305 } 3306 3307 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 3308 CTLFLAG_MPSAFE, vfs_sysctl, 3309 "Generic filesystem"); 3310 3311 #if 1 || defined(COMPAT_PRELITE2) 3312 3313 static int 3314 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 3315 { 3316 int error; 3317 struct vfsconf *vfsp; 3318 struct ovfsconf ovfs; 3319 3320 vfsconf_slock(); 3321 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3322 bzero(&ovfs, sizeof(ovfs)); 3323 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 3324 strcpy(ovfs.vfc_name, vfsp->vfc_name); 3325 ovfs.vfc_index = vfsp->vfc_typenum; 3326 ovfs.vfc_refcount = vfsp->vfc_refcount; 3327 ovfs.vfc_flags = vfsp->vfc_flags; 3328 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 3329 if (error != 0) { 3330 vfsconf_sunlock(); 3331 return (error); 3332 } 3333 } 3334 vfsconf_sunlock(); 3335 return (0); 3336 } 3337 3338 #endif /* 1 || COMPAT_PRELITE2 */ 3339 #endif /* !BURN_BRIDGES */ 3340 3341 #define KINFO_VNODESLOP 10 3342 #ifdef notyet 3343 /* 3344 * Dump vnode list (via sysctl). 3345 */ 3346 /* ARGSUSED */ 3347 static int 3348 sysctl_vnode(SYSCTL_HANDLER_ARGS) 3349 { 3350 struct xvnode *xvn; 3351 struct mount *mp; 3352 struct vnode *vp; 3353 int error, len, n; 3354 3355 /* 3356 * Stale numvnodes access is not fatal here. 3357 */ 3358 req->lock = 0; 3359 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 3360 if (!req->oldptr) 3361 /* Make an estimate */ 3362 return (SYSCTL_OUT(req, 0, len)); 3363 3364 error = sysctl_wire_old_buffer(req, 0); 3365 if (error != 0) 3366 return (error); 3367 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 3368 n = 0; 3369 mtx_lock(&mountlist_mtx); 3370 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3371 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 3372 continue; 3373 MNT_ILOCK(mp); 3374 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3375 if (n == len) 3376 break; 3377 vref(vp); 3378 xvn[n].xv_size = sizeof *xvn; 3379 xvn[n].xv_vnode = vp; 3380 xvn[n].xv_id = 0; /* XXX compat */ 3381 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 3382 XV_COPY(usecount); 3383 XV_COPY(writecount); 3384 XV_COPY(holdcnt); 3385 XV_COPY(mount); 3386 XV_COPY(numoutput); 3387 XV_COPY(type); 3388 #undef XV_COPY 3389 xvn[n].xv_flag = vp->v_vflag; 3390 3391 switch (vp->v_type) { 3392 case VREG: 3393 case VDIR: 3394 case VLNK: 3395 break; 3396 case VBLK: 3397 case VCHR: 3398 if (vp->v_rdev == NULL) { 3399 vrele(vp); 3400 continue; 3401 } 3402 xvn[n].xv_dev = dev2udev(vp->v_rdev); 3403 break; 3404 case VSOCK: 3405 xvn[n].xv_socket = vp->v_socket; 3406 break; 3407 case VFIFO: 3408 xvn[n].xv_fifo = vp->v_fifoinfo; 3409 break; 3410 case VNON: 3411 case VBAD: 3412 default: 3413 /* shouldn't happen? */ 3414 vrele(vp); 3415 continue; 3416 } 3417 vrele(vp); 3418 ++n; 3419 } 3420 MNT_IUNLOCK(mp); 3421 mtx_lock(&mountlist_mtx); 3422 vfs_unbusy(mp); 3423 if (n == len) 3424 break; 3425 } 3426 mtx_unlock(&mountlist_mtx); 3427 3428 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 3429 free(xvn, M_TEMP); 3430 return (error); 3431 } 3432 3433 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 3434 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 3435 ""); 3436 #endif 3437 3438 /* 3439 * Unmount all filesystems. The list is traversed in reverse order 3440 * of mounting to avoid dependencies. 3441 */ 3442 void 3443 vfs_unmountall(void) 3444 { 3445 struct mount *mp; 3446 struct thread *td; 3447 int error; 3448 3449 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 3450 td = curthread; 3451 3452 /* 3453 * Since this only runs when rebooting, it is not interlocked. 3454 */ 3455 while(!TAILQ_EMPTY(&mountlist)) { 3456 mp = TAILQ_LAST(&mountlist, mntlist); 3457 error = dounmount(mp, MNT_FORCE, td); 3458 if (error) { 3459 TAILQ_REMOVE(&mountlist, mp, mnt_list); 3460 /* 3461 * XXX: Due to the way in which we mount the root 3462 * file system off of devfs, devfs will generate a 3463 * "busy" warning when we try to unmount it before 3464 * the root. Don't print a warning as a result in 3465 * order to avoid false positive errors that may 3466 * cause needless upset. 3467 */ 3468 if (strcmp(mp->mnt_vfc->vfc_name, "devfs") != 0) { 3469 printf("unmount of %s failed (", 3470 mp->mnt_stat.f_mntonname); 3471 if (error == EBUSY) 3472 printf("BUSY)\n"); 3473 else 3474 printf("%d)\n", error); 3475 } 3476 } else { 3477 /* The unmount has removed mp from the mountlist */ 3478 } 3479 } 3480 } 3481 3482 /* 3483 * perform msync on all vnodes under a mount point 3484 * the mount point must be locked. 3485 */ 3486 void 3487 vfs_msync(struct mount *mp, int flags) 3488 { 3489 struct vnode *vp, *mvp; 3490 struct vm_object *obj; 3491 3492 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 3493 MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { 3494 obj = vp->v_object; 3495 if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0 && 3496 (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) { 3497 if (!vget(vp, 3498 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, 3499 curthread)) { 3500 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */ 3501 vput(vp); 3502 continue; 3503 } 3504 3505 obj = vp->v_object; 3506 if (obj != NULL) { 3507 VM_OBJECT_WLOCK(obj); 3508 vm_object_page_clean(obj, 0, 0, 3509 flags == MNT_WAIT ? 3510 OBJPC_SYNC : OBJPC_NOSYNC); 3511 VM_OBJECT_WUNLOCK(obj); 3512 } 3513 vput(vp); 3514 } 3515 } else 3516 VI_UNLOCK(vp); 3517 } 3518 } 3519 3520 static void 3521 destroy_vpollinfo_free(struct vpollinfo *vi) 3522 { 3523 3524 knlist_destroy(&vi->vpi_selinfo.si_note); 3525 mtx_destroy(&vi->vpi_lock); 3526 uma_zfree(vnodepoll_zone, vi); 3527 } 3528 3529 static void 3530 destroy_vpollinfo(struct vpollinfo *vi) 3531 { 3532 3533 knlist_clear(&vi->vpi_selinfo.si_note, 1); 3534 seldrain(&vi->vpi_selinfo); 3535 destroy_vpollinfo_free(vi); 3536 } 3537 3538 /* 3539 * Initalize per-vnode helper structure to hold poll-related state. 3540 */ 3541 void 3542 v_addpollinfo(struct vnode *vp) 3543 { 3544 struct vpollinfo *vi; 3545 3546 if (vp->v_pollinfo != NULL) 3547 return; 3548 vi = uma_zalloc(vnodepoll_zone, M_WAITOK); 3549 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 3550 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 3551 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 3552 VI_LOCK(vp); 3553 if (vp->v_pollinfo != NULL) { 3554 VI_UNLOCK(vp); 3555 destroy_vpollinfo_free(vi); 3556 return; 3557 } 3558 vp->v_pollinfo = vi; 3559 VI_UNLOCK(vp); 3560 } 3561 3562 /* 3563 * Record a process's interest in events which might happen to 3564 * a vnode. Because poll uses the historic select-style interface 3565 * internally, this routine serves as both the ``check for any 3566 * pending events'' and the ``record my interest in future events'' 3567 * functions. (These are done together, while the lock is held, 3568 * to avoid race conditions.) 3569 */ 3570 int 3571 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 3572 { 3573 3574 v_addpollinfo(vp); 3575 mtx_lock(&vp->v_pollinfo->vpi_lock); 3576 if (vp->v_pollinfo->vpi_revents & events) { 3577 /* 3578 * This leaves events we are not interested 3579 * in available for the other process which 3580 * which presumably had requested them 3581 * (otherwise they would never have been 3582 * recorded). 3583 */ 3584 events &= vp->v_pollinfo->vpi_revents; 3585 vp->v_pollinfo->vpi_revents &= ~events; 3586 3587 mtx_unlock(&vp->v_pollinfo->vpi_lock); 3588 return (events); 3589 } 3590 vp->v_pollinfo->vpi_events |= events; 3591 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 3592 mtx_unlock(&vp->v_pollinfo->vpi_lock); 3593 return (0); 3594 } 3595 3596 /* 3597 * Routine to create and manage a filesystem syncer vnode. 3598 */ 3599 #define sync_close ((int (*)(struct vop_close_args *))nullop) 3600 static int sync_fsync(struct vop_fsync_args *); 3601 static int sync_inactive(struct vop_inactive_args *); 3602 static int sync_reclaim(struct vop_reclaim_args *); 3603 3604 static struct vop_vector sync_vnodeops = { 3605 .vop_bypass = VOP_EOPNOTSUPP, 3606 .vop_close = sync_close, /* close */ 3607 .vop_fsync = sync_fsync, /* fsync */ 3608 .vop_inactive = sync_inactive, /* inactive */ 3609 .vop_reclaim = sync_reclaim, /* reclaim */ 3610 .vop_lock1 = vop_stdlock, /* lock */ 3611 .vop_unlock = vop_stdunlock, /* unlock */ 3612 .vop_islocked = vop_stdislocked, /* islocked */ 3613 }; 3614 3615 /* 3616 * Create a new filesystem syncer vnode for the specified mount point. 3617 */ 3618 void 3619 vfs_allocate_syncvnode(struct mount *mp) 3620 { 3621 struct vnode *vp; 3622 struct bufobj *bo; 3623 static long start, incr, next; 3624 int error; 3625 3626 /* Allocate a new vnode */ 3627 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 3628 if (error != 0) 3629 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 3630 vp->v_type = VNON; 3631 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3632 vp->v_vflag |= VV_FORCEINSMQ; 3633 error = insmntque(vp, mp); 3634 if (error != 0) 3635 panic("vfs_allocate_syncvnode: insmntque() failed"); 3636 vp->v_vflag &= ~VV_FORCEINSMQ; 3637 VOP_UNLOCK(vp, 0); 3638 /* 3639 * Place the vnode onto the syncer worklist. We attempt to 3640 * scatter them about on the list so that they will go off 3641 * at evenly distributed times even if all the filesystems 3642 * are mounted at once. 3643 */ 3644 next += incr; 3645 if (next == 0 || next > syncer_maxdelay) { 3646 start /= 2; 3647 incr /= 2; 3648 if (start == 0) { 3649 start = syncer_maxdelay / 2; 3650 incr = syncer_maxdelay; 3651 } 3652 next = start; 3653 } 3654 bo = &vp->v_bufobj; 3655 BO_LOCK(bo); 3656 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 3657 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 3658 mtx_lock(&sync_mtx); 3659 sync_vnode_count++; 3660 if (mp->mnt_syncer == NULL) { 3661 mp->mnt_syncer = vp; 3662 vp = NULL; 3663 } 3664 mtx_unlock(&sync_mtx); 3665 BO_UNLOCK(bo); 3666 if (vp != NULL) { 3667 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3668 vgone(vp); 3669 vput(vp); 3670 } 3671 } 3672 3673 void 3674 vfs_deallocate_syncvnode(struct mount *mp) 3675 { 3676 struct vnode *vp; 3677 3678 mtx_lock(&sync_mtx); 3679 vp = mp->mnt_syncer; 3680 if (vp != NULL) 3681 mp->mnt_syncer = NULL; 3682 mtx_unlock(&sync_mtx); 3683 if (vp != NULL) 3684 vrele(vp); 3685 } 3686 3687 /* 3688 * Do a lazy sync of the filesystem. 3689 */ 3690 static int 3691 sync_fsync(struct vop_fsync_args *ap) 3692 { 3693 struct vnode *syncvp = ap->a_vp; 3694 struct mount *mp = syncvp->v_mount; 3695 int error, save; 3696 struct bufobj *bo; 3697 3698 /* 3699 * We only need to do something if this is a lazy evaluation. 3700 */ 3701 if (ap->a_waitfor != MNT_LAZY) 3702 return (0); 3703 3704 /* 3705 * Move ourselves to the back of the sync list. 3706 */ 3707 bo = &syncvp->v_bufobj; 3708 BO_LOCK(bo); 3709 vn_syncer_add_to_worklist(bo, syncdelay); 3710 BO_UNLOCK(bo); 3711 3712 /* 3713 * Walk the list of vnodes pushing all that are dirty and 3714 * not already on the sync list. 3715 */ 3716 if (vfs_busy(mp, MBF_NOWAIT) != 0) 3717 return (0); 3718 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 3719 vfs_unbusy(mp); 3720 return (0); 3721 } 3722 save = curthread_pflags_set(TDP_SYNCIO); 3723 vfs_msync(mp, MNT_NOWAIT); 3724 error = VFS_SYNC(mp, MNT_LAZY); 3725 curthread_pflags_restore(save); 3726 vn_finished_write(mp); 3727 vfs_unbusy(mp); 3728 return (error); 3729 } 3730 3731 /* 3732 * The syncer vnode is no referenced. 3733 */ 3734 static int 3735 sync_inactive(struct vop_inactive_args *ap) 3736 { 3737 3738 vgone(ap->a_vp); 3739 return (0); 3740 } 3741 3742 /* 3743 * The syncer vnode is no longer needed and is being decommissioned. 3744 * 3745 * Modifications to the worklist must be protected by sync_mtx. 3746 */ 3747 static int 3748 sync_reclaim(struct vop_reclaim_args *ap) 3749 { 3750 struct vnode *vp = ap->a_vp; 3751 struct bufobj *bo; 3752 3753 bo = &vp->v_bufobj; 3754 BO_LOCK(bo); 3755 mtx_lock(&sync_mtx); 3756 if (vp->v_mount->mnt_syncer == vp) 3757 vp->v_mount->mnt_syncer = NULL; 3758 if (bo->bo_flag & BO_ONWORKLST) { 3759 LIST_REMOVE(bo, bo_synclist); 3760 syncer_worklist_len--; 3761 sync_vnode_count--; 3762 bo->bo_flag &= ~BO_ONWORKLST; 3763 } 3764 mtx_unlock(&sync_mtx); 3765 BO_UNLOCK(bo); 3766 3767 return (0); 3768 } 3769 3770 /* 3771 * Check if vnode represents a disk device 3772 */ 3773 int 3774 vn_isdisk(struct vnode *vp, int *errp) 3775 { 3776 int error; 3777 3778 if (vp->v_type != VCHR) { 3779 error = ENOTBLK; 3780 goto out; 3781 } 3782 error = 0; 3783 dev_lock(); 3784 if (vp->v_rdev == NULL) 3785 error = ENXIO; 3786 else if (vp->v_rdev->si_devsw == NULL) 3787 error = ENXIO; 3788 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 3789 error = ENOTBLK; 3790 dev_unlock(); 3791 out: 3792 if (errp != NULL) 3793 *errp = error; 3794 return (error == 0); 3795 } 3796 3797 /* 3798 * Common filesystem object access control check routine. Accepts a 3799 * vnode's type, "mode", uid and gid, requested access mode, credentials, 3800 * and optional call-by-reference privused argument allowing vaccess() 3801 * to indicate to the caller whether privilege was used to satisfy the 3802 * request (obsoleted). Returns 0 on success, or an errno on failure. 3803 */ 3804 int 3805 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 3806 accmode_t accmode, struct ucred *cred, int *privused) 3807 { 3808 accmode_t dac_granted; 3809 accmode_t priv_granted; 3810 3811 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 3812 ("invalid bit in accmode")); 3813 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 3814 ("VAPPEND without VWRITE")); 3815 3816 /* 3817 * Look for a normal, non-privileged way to access the file/directory 3818 * as requested. If it exists, go with that. 3819 */ 3820 3821 if (privused != NULL) 3822 *privused = 0; 3823 3824 dac_granted = 0; 3825 3826 /* Check the owner. */ 3827 if (cred->cr_uid == file_uid) { 3828 dac_granted |= VADMIN; 3829 if (file_mode & S_IXUSR) 3830 dac_granted |= VEXEC; 3831 if (file_mode & S_IRUSR) 3832 dac_granted |= VREAD; 3833 if (file_mode & S_IWUSR) 3834 dac_granted |= (VWRITE | VAPPEND); 3835 3836 if ((accmode & dac_granted) == accmode) 3837 return (0); 3838 3839 goto privcheck; 3840 } 3841 3842 /* Otherwise, check the groups (first match) */ 3843 if (groupmember(file_gid, cred)) { 3844 if (file_mode & S_IXGRP) 3845 dac_granted |= VEXEC; 3846 if (file_mode & S_IRGRP) 3847 dac_granted |= VREAD; 3848 if (file_mode & S_IWGRP) 3849 dac_granted |= (VWRITE | VAPPEND); 3850 3851 if ((accmode & dac_granted) == accmode) 3852 return (0); 3853 3854 goto privcheck; 3855 } 3856 3857 /* Otherwise, check everyone else. */ 3858 if (file_mode & S_IXOTH) 3859 dac_granted |= VEXEC; 3860 if (file_mode & S_IROTH) 3861 dac_granted |= VREAD; 3862 if (file_mode & S_IWOTH) 3863 dac_granted |= (VWRITE | VAPPEND); 3864 if ((accmode & dac_granted) == accmode) 3865 return (0); 3866 3867 privcheck: 3868 /* 3869 * Build a privilege mask to determine if the set of privileges 3870 * satisfies the requirements when combined with the granted mask 3871 * from above. For each privilege, if the privilege is required, 3872 * bitwise or the request type onto the priv_granted mask. 3873 */ 3874 priv_granted = 0; 3875 3876 if (type == VDIR) { 3877 /* 3878 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 3879 * requests, instead of PRIV_VFS_EXEC. 3880 */ 3881 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3882 !priv_check_cred(cred, PRIV_VFS_LOOKUP, 0)) 3883 priv_granted |= VEXEC; 3884 } else { 3885 /* 3886 * Ensure that at least one execute bit is on. Otherwise, 3887 * a privileged user will always succeed, and we don't want 3888 * this to happen unless the file really is executable. 3889 */ 3890 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3891 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 3892 !priv_check_cred(cred, PRIV_VFS_EXEC, 0)) 3893 priv_granted |= VEXEC; 3894 } 3895 3896 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 3897 !priv_check_cred(cred, PRIV_VFS_READ, 0)) 3898 priv_granted |= VREAD; 3899 3900 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 3901 !priv_check_cred(cred, PRIV_VFS_WRITE, 0)) 3902 priv_granted |= (VWRITE | VAPPEND); 3903 3904 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 3905 !priv_check_cred(cred, PRIV_VFS_ADMIN, 0)) 3906 priv_granted |= VADMIN; 3907 3908 if ((accmode & (priv_granted | dac_granted)) == accmode) { 3909 /* XXX audit: privilege used */ 3910 if (privused != NULL) 3911 *privused = 1; 3912 return (0); 3913 } 3914 3915 return ((accmode & VADMIN) ? EPERM : EACCES); 3916 } 3917 3918 /* 3919 * Credential check based on process requesting service, and per-attribute 3920 * permissions. 3921 */ 3922 int 3923 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 3924 struct thread *td, accmode_t accmode) 3925 { 3926 3927 /* 3928 * Kernel-invoked always succeeds. 3929 */ 3930 if (cred == NOCRED) 3931 return (0); 3932 3933 /* 3934 * Do not allow privileged processes in jail to directly manipulate 3935 * system attributes. 3936 */ 3937 switch (attrnamespace) { 3938 case EXTATTR_NAMESPACE_SYSTEM: 3939 /* Potentially should be: return (EPERM); */ 3940 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM, 0)); 3941 case EXTATTR_NAMESPACE_USER: 3942 return (VOP_ACCESS(vp, accmode, cred, td)); 3943 default: 3944 return (EPERM); 3945 } 3946 } 3947 3948 #ifdef DEBUG_VFS_LOCKS 3949 /* 3950 * This only exists to supress warnings from unlocked specfs accesses. It is 3951 * no longer ok to have an unlocked VFS. 3952 */ 3953 #define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \ 3954 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 3955 3956 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 3957 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 3958 "Drop into debugger on lock violation"); 3959 3960 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 3961 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 3962 0, "Check for interlock across VOPs"); 3963 3964 int vfs_badlock_print = 1; /* Print lock violations. */ 3965 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 3966 0, "Print lock violations"); 3967 3968 #ifdef KDB 3969 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 3970 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 3971 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 3972 #endif 3973 3974 static void 3975 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 3976 { 3977 3978 #ifdef KDB 3979 if (vfs_badlock_backtrace) 3980 kdb_backtrace(); 3981 #endif 3982 if (vfs_badlock_print) 3983 printf("%s: %p %s\n", str, (void *)vp, msg); 3984 if (vfs_badlock_ddb) 3985 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 3986 } 3987 3988 void 3989 assert_vi_locked(struct vnode *vp, const char *str) 3990 { 3991 3992 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 3993 vfs_badlock("interlock is not locked but should be", str, vp); 3994 } 3995 3996 void 3997 assert_vi_unlocked(struct vnode *vp, const char *str) 3998 { 3999 4000 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 4001 vfs_badlock("interlock is locked but should not be", str, vp); 4002 } 4003 4004 void 4005 assert_vop_locked(struct vnode *vp, const char *str) 4006 { 4007 int locked; 4008 4009 if (!IGNORE_LOCK(vp)) { 4010 locked = VOP_ISLOCKED(vp); 4011 if (locked == 0 || locked == LK_EXCLOTHER) 4012 vfs_badlock("is not locked but should be", str, vp); 4013 } 4014 } 4015 4016 void 4017 assert_vop_unlocked(struct vnode *vp, const char *str) 4018 { 4019 4020 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 4021 vfs_badlock("is locked but should not be", str, vp); 4022 } 4023 4024 void 4025 assert_vop_elocked(struct vnode *vp, const char *str) 4026 { 4027 4028 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 4029 vfs_badlock("is not exclusive locked but should be", str, vp); 4030 } 4031 4032 #if 0 4033 void 4034 assert_vop_elocked_other(struct vnode *vp, const char *str) 4035 { 4036 4037 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLOTHER) 4038 vfs_badlock("is not exclusive locked by another thread", 4039 str, vp); 4040 } 4041 4042 void 4043 assert_vop_slocked(struct vnode *vp, const char *str) 4044 { 4045 4046 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_SHARED) 4047 vfs_badlock("is not locked shared but should be", str, vp); 4048 } 4049 #endif /* 0 */ 4050 #endif /* DEBUG_VFS_LOCKS */ 4051 4052 void 4053 vop_rename_fail(struct vop_rename_args *ap) 4054 { 4055 4056 if (ap->a_tvp != NULL) 4057 vput(ap->a_tvp); 4058 if (ap->a_tdvp == ap->a_tvp) 4059 vrele(ap->a_tdvp); 4060 else 4061 vput(ap->a_tdvp); 4062 vrele(ap->a_fdvp); 4063 vrele(ap->a_fvp); 4064 } 4065 4066 void 4067 vop_rename_pre(void *ap) 4068 { 4069 struct vop_rename_args *a = ap; 4070 4071 #ifdef DEBUG_VFS_LOCKS 4072 if (a->a_tvp) 4073 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 4074 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 4075 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 4076 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 4077 4078 /* Check the source (from). */ 4079 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 4080 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 4081 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 4082 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 4083 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 4084 4085 /* Check the target. */ 4086 if (a->a_tvp) 4087 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 4088 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 4089 #endif 4090 if (a->a_tdvp != a->a_fdvp) 4091 vhold(a->a_fdvp); 4092 if (a->a_tvp != a->a_fvp) 4093 vhold(a->a_fvp); 4094 vhold(a->a_tdvp); 4095 if (a->a_tvp) 4096 vhold(a->a_tvp); 4097 } 4098 4099 void 4100 vop_strategy_pre(void *ap) 4101 { 4102 #ifdef DEBUG_VFS_LOCKS 4103 struct vop_strategy_args *a; 4104 struct buf *bp; 4105 4106 a = ap; 4107 bp = a->a_bp; 4108 4109 /* 4110 * Cluster ops lock their component buffers but not the IO container. 4111 */ 4112 if ((bp->b_flags & B_CLUSTER) != 0) 4113 return; 4114 4115 if (panicstr == NULL && !BUF_ISLOCKED(bp)) { 4116 if (vfs_badlock_print) 4117 printf( 4118 "VOP_STRATEGY: bp is not locked but should be\n"); 4119 if (vfs_badlock_ddb) 4120 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 4121 } 4122 #endif 4123 } 4124 4125 void 4126 vop_lock_pre(void *ap) 4127 { 4128 #ifdef DEBUG_VFS_LOCKS 4129 struct vop_lock1_args *a = ap; 4130 4131 if ((a->a_flags & LK_INTERLOCK) == 0) 4132 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 4133 else 4134 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 4135 #endif 4136 } 4137 4138 void 4139 vop_lock_post(void *ap, int rc) 4140 { 4141 #ifdef DEBUG_VFS_LOCKS 4142 struct vop_lock1_args *a = ap; 4143 4144 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 4145 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 4146 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 4147 #endif 4148 } 4149 4150 void 4151 vop_unlock_pre(void *ap) 4152 { 4153 #ifdef DEBUG_VFS_LOCKS 4154 struct vop_unlock_args *a = ap; 4155 4156 if (a->a_flags & LK_INTERLOCK) 4157 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK"); 4158 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 4159 #endif 4160 } 4161 4162 void 4163 vop_unlock_post(void *ap, int rc) 4164 { 4165 #ifdef DEBUG_VFS_LOCKS 4166 struct vop_unlock_args *a = ap; 4167 4168 if (a->a_flags & LK_INTERLOCK) 4169 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK"); 4170 #endif 4171 } 4172 4173 void 4174 vop_create_post(void *ap, int rc) 4175 { 4176 struct vop_create_args *a = ap; 4177 4178 if (!rc) 4179 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4180 } 4181 4182 void 4183 vop_deleteextattr_post(void *ap, int rc) 4184 { 4185 struct vop_deleteextattr_args *a = ap; 4186 4187 if (!rc) 4188 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4189 } 4190 4191 void 4192 vop_link_post(void *ap, int rc) 4193 { 4194 struct vop_link_args *a = ap; 4195 4196 if (!rc) { 4197 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); 4198 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE); 4199 } 4200 } 4201 4202 void 4203 vop_mkdir_post(void *ap, int rc) 4204 { 4205 struct vop_mkdir_args *a = ap; 4206 4207 if (!rc) 4208 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 4209 } 4210 4211 void 4212 vop_mknod_post(void *ap, int rc) 4213 { 4214 struct vop_mknod_args *a = ap; 4215 4216 if (!rc) 4217 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4218 } 4219 4220 void 4221 vop_remove_post(void *ap, int rc) 4222 { 4223 struct vop_remove_args *a = ap; 4224 4225 if (!rc) { 4226 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4227 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 4228 } 4229 } 4230 4231 void 4232 vop_rename_post(void *ap, int rc) 4233 { 4234 struct vop_rename_args *a = ap; 4235 4236 if (!rc) { 4237 VFS_KNOTE_UNLOCKED(a->a_fdvp, NOTE_WRITE); 4238 VFS_KNOTE_UNLOCKED(a->a_tdvp, NOTE_WRITE); 4239 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 4240 if (a->a_tvp) 4241 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 4242 } 4243 if (a->a_tdvp != a->a_fdvp) 4244 vdrop(a->a_fdvp); 4245 if (a->a_tvp != a->a_fvp) 4246 vdrop(a->a_fvp); 4247 vdrop(a->a_tdvp); 4248 if (a->a_tvp) 4249 vdrop(a->a_tvp); 4250 } 4251 4252 void 4253 vop_rmdir_post(void *ap, int rc) 4254 { 4255 struct vop_rmdir_args *a = ap; 4256 4257 if (!rc) { 4258 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 4259 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 4260 } 4261 } 4262 4263 void 4264 vop_setattr_post(void *ap, int rc) 4265 { 4266 struct vop_setattr_args *a = ap; 4267 4268 if (!rc) 4269 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4270 } 4271 4272 void 4273 vop_setextattr_post(void *ap, int rc) 4274 { 4275 struct vop_setextattr_args *a = ap; 4276 4277 if (!rc) 4278 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4279 } 4280 4281 void 4282 vop_symlink_post(void *ap, int rc) 4283 { 4284 struct vop_symlink_args *a = ap; 4285 4286 if (!rc) 4287 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4288 } 4289 4290 static struct knlist fs_knlist; 4291 4292 static void 4293 vfs_event_init(void *arg) 4294 { 4295 knlist_init_mtx(&fs_knlist, NULL); 4296 } 4297 /* XXX - correct order? */ 4298 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 4299 4300 void 4301 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 4302 { 4303 4304 KNOTE_UNLOCKED(&fs_knlist, event); 4305 } 4306 4307 static int filt_fsattach(struct knote *kn); 4308 static void filt_fsdetach(struct knote *kn); 4309 static int filt_fsevent(struct knote *kn, long hint); 4310 4311 struct filterops fs_filtops = { 4312 .f_isfd = 0, 4313 .f_attach = filt_fsattach, 4314 .f_detach = filt_fsdetach, 4315 .f_event = filt_fsevent 4316 }; 4317 4318 static int 4319 filt_fsattach(struct knote *kn) 4320 { 4321 4322 kn->kn_flags |= EV_CLEAR; 4323 knlist_add(&fs_knlist, kn, 0); 4324 return (0); 4325 } 4326 4327 static void 4328 filt_fsdetach(struct knote *kn) 4329 { 4330 4331 knlist_remove(&fs_knlist, kn, 0); 4332 } 4333 4334 static int 4335 filt_fsevent(struct knote *kn, long hint) 4336 { 4337 4338 kn->kn_fflags |= hint; 4339 return (kn->kn_fflags != 0); 4340 } 4341 4342 static int 4343 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 4344 { 4345 struct vfsidctl vc; 4346 int error; 4347 struct mount *mp; 4348 4349 error = SYSCTL_IN(req, &vc, sizeof(vc)); 4350 if (error) 4351 return (error); 4352 if (vc.vc_vers != VFS_CTL_VERS1) 4353 return (EINVAL); 4354 mp = vfs_getvfs(&vc.vc_fsid); 4355 if (mp == NULL) 4356 return (ENOENT); 4357 /* ensure that a specific sysctl goes to the right filesystem. */ 4358 if (strcmp(vc.vc_fstypename, "*") != 0 && 4359 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 4360 vfs_rel(mp); 4361 return (EINVAL); 4362 } 4363 VCTLTOREQ(&vc, req); 4364 error = VFS_SYSCTL(mp, vc.vc_op, req); 4365 vfs_rel(mp); 4366 return (error); 4367 } 4368 4369 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_WR, 4370 NULL, 0, sysctl_vfs_ctl, "", 4371 "Sysctl by fsid"); 4372 4373 /* 4374 * Function to initialize a va_filerev field sensibly. 4375 * XXX: Wouldn't a random number make a lot more sense ?? 4376 */ 4377 u_quad_t 4378 init_va_filerev(void) 4379 { 4380 struct bintime bt; 4381 4382 getbinuptime(&bt); 4383 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 4384 } 4385 4386 static int filt_vfsread(struct knote *kn, long hint); 4387 static int filt_vfswrite(struct knote *kn, long hint); 4388 static int filt_vfsvnode(struct knote *kn, long hint); 4389 static void filt_vfsdetach(struct knote *kn); 4390 static struct filterops vfsread_filtops = { 4391 .f_isfd = 1, 4392 .f_detach = filt_vfsdetach, 4393 .f_event = filt_vfsread 4394 }; 4395 static struct filterops vfswrite_filtops = { 4396 .f_isfd = 1, 4397 .f_detach = filt_vfsdetach, 4398 .f_event = filt_vfswrite 4399 }; 4400 static struct filterops vfsvnode_filtops = { 4401 .f_isfd = 1, 4402 .f_detach = filt_vfsdetach, 4403 .f_event = filt_vfsvnode 4404 }; 4405 4406 static void 4407 vfs_knllock(void *arg) 4408 { 4409 struct vnode *vp = arg; 4410 4411 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4412 } 4413 4414 static void 4415 vfs_knlunlock(void *arg) 4416 { 4417 struct vnode *vp = arg; 4418 4419 VOP_UNLOCK(vp, 0); 4420 } 4421 4422 static void 4423 vfs_knl_assert_locked(void *arg) 4424 { 4425 #ifdef DEBUG_VFS_LOCKS 4426 struct vnode *vp = arg; 4427 4428 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 4429 #endif 4430 } 4431 4432 static void 4433 vfs_knl_assert_unlocked(void *arg) 4434 { 4435 #ifdef DEBUG_VFS_LOCKS 4436 struct vnode *vp = arg; 4437 4438 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 4439 #endif 4440 } 4441 4442 int 4443 vfs_kqfilter(struct vop_kqfilter_args *ap) 4444 { 4445 struct vnode *vp = ap->a_vp; 4446 struct knote *kn = ap->a_kn; 4447 struct knlist *knl; 4448 4449 switch (kn->kn_filter) { 4450 case EVFILT_READ: 4451 kn->kn_fop = &vfsread_filtops; 4452 break; 4453 case EVFILT_WRITE: 4454 kn->kn_fop = &vfswrite_filtops; 4455 break; 4456 case EVFILT_VNODE: 4457 kn->kn_fop = &vfsvnode_filtops; 4458 break; 4459 default: 4460 return (EINVAL); 4461 } 4462 4463 kn->kn_hook = (caddr_t)vp; 4464 4465 v_addpollinfo(vp); 4466 if (vp->v_pollinfo == NULL) 4467 return (ENOMEM); 4468 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 4469 vhold(vp); 4470 knlist_add(knl, kn, 0); 4471 4472 return (0); 4473 } 4474 4475 /* 4476 * Detach knote from vnode 4477 */ 4478 static void 4479 filt_vfsdetach(struct knote *kn) 4480 { 4481 struct vnode *vp = (struct vnode *)kn->kn_hook; 4482 4483 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 4484 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 4485 vdrop(vp); 4486 } 4487 4488 /*ARGSUSED*/ 4489 static int 4490 filt_vfsread(struct knote *kn, long hint) 4491 { 4492 struct vnode *vp = (struct vnode *)kn->kn_hook; 4493 struct vattr va; 4494 int res; 4495 4496 /* 4497 * filesystem is gone, so set the EOF flag and schedule 4498 * the knote for deletion. 4499 */ 4500 if (hint == NOTE_REVOKE) { 4501 VI_LOCK(vp); 4502 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 4503 VI_UNLOCK(vp); 4504 return (1); 4505 } 4506 4507 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 4508 return (0); 4509 4510 VI_LOCK(vp); 4511 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 4512 res = (kn->kn_data != 0); 4513 VI_UNLOCK(vp); 4514 return (res); 4515 } 4516 4517 /*ARGSUSED*/ 4518 static int 4519 filt_vfswrite(struct knote *kn, long hint) 4520 { 4521 struct vnode *vp = (struct vnode *)kn->kn_hook; 4522 4523 VI_LOCK(vp); 4524 4525 /* 4526 * filesystem is gone, so set the EOF flag and schedule 4527 * the knote for deletion. 4528 */ 4529 if (hint == NOTE_REVOKE) 4530 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 4531 4532 kn->kn_data = 0; 4533 VI_UNLOCK(vp); 4534 return (1); 4535 } 4536 4537 static int 4538 filt_vfsvnode(struct knote *kn, long hint) 4539 { 4540 struct vnode *vp = (struct vnode *)kn->kn_hook; 4541 int res; 4542 4543 VI_LOCK(vp); 4544 if (kn->kn_sfflags & hint) 4545 kn->kn_fflags |= hint; 4546 if (hint == NOTE_REVOKE) { 4547 kn->kn_flags |= EV_EOF; 4548 VI_UNLOCK(vp); 4549 return (1); 4550 } 4551 res = (kn->kn_fflags != 0); 4552 VI_UNLOCK(vp); 4553 return (res); 4554 } 4555 4556 int 4557 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 4558 { 4559 int error; 4560 4561 if (dp->d_reclen > ap->a_uio->uio_resid) 4562 return (ENAMETOOLONG); 4563 error = uiomove(dp, dp->d_reclen, ap->a_uio); 4564 if (error) { 4565 if (ap->a_ncookies != NULL) { 4566 if (ap->a_cookies != NULL) 4567 free(ap->a_cookies, M_TEMP); 4568 ap->a_cookies = NULL; 4569 *ap->a_ncookies = 0; 4570 } 4571 return (error); 4572 } 4573 if (ap->a_ncookies == NULL) 4574 return (0); 4575 4576 KASSERT(ap->a_cookies, 4577 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 4578 4579 *ap->a_cookies = realloc(*ap->a_cookies, 4580 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 4581 (*ap->a_cookies)[*ap->a_ncookies] = off; 4582 return (0); 4583 } 4584 4585 /* 4586 * Mark for update the access time of the file if the filesystem 4587 * supports VOP_MARKATIME. This functionality is used by execve and 4588 * mmap, so we want to avoid the I/O implied by directly setting 4589 * va_atime for the sake of efficiency. 4590 */ 4591 void 4592 vfs_mark_atime(struct vnode *vp, struct ucred *cred) 4593 { 4594 struct mount *mp; 4595 4596 mp = vp->v_mount; 4597 ASSERT_VOP_LOCKED(vp, "vfs_mark_atime"); 4598 if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) 4599 (void)VOP_MARKATIME(vp); 4600 } 4601 4602 /* 4603 * The purpose of this routine is to remove granularity from accmode_t, 4604 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 4605 * VADMIN and VAPPEND. 4606 * 4607 * If it returns 0, the caller is supposed to continue with the usual 4608 * access checks using 'accmode' as modified by this routine. If it 4609 * returns nonzero value, the caller is supposed to return that value 4610 * as errno. 4611 * 4612 * Note that after this routine runs, accmode may be zero. 4613 */ 4614 int 4615 vfs_unixify_accmode(accmode_t *accmode) 4616 { 4617 /* 4618 * There is no way to specify explicit "deny" rule using 4619 * file mode or POSIX.1e ACLs. 4620 */ 4621 if (*accmode & VEXPLICIT_DENY) { 4622 *accmode = 0; 4623 return (0); 4624 } 4625 4626 /* 4627 * None of these can be translated into usual access bits. 4628 * Also, the common case for NFSv4 ACLs is to not contain 4629 * either of these bits. Caller should check for VWRITE 4630 * on the containing directory instead. 4631 */ 4632 if (*accmode & (VDELETE_CHILD | VDELETE)) 4633 return (EPERM); 4634 4635 if (*accmode & VADMIN_PERMS) { 4636 *accmode &= ~VADMIN_PERMS; 4637 *accmode |= VADMIN; 4638 } 4639 4640 /* 4641 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 4642 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 4643 */ 4644 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 4645 4646 return (0); 4647 } 4648 4649 /* 4650 * These are helper functions for filesystems to traverse all 4651 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 4652 * 4653 * This interface replaces MNT_VNODE_FOREACH. 4654 */ 4655 4656 MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 4657 4658 struct vnode * 4659 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 4660 { 4661 struct vnode *vp; 4662 4663 if (should_yield()) 4664 kern_yield(PRI_USER); 4665 MNT_ILOCK(mp); 4666 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 4667 vp = TAILQ_NEXT(*mvp, v_nmntvnodes); 4668 while (vp != NULL && (vp->v_type == VMARKER || 4669 (vp->v_iflag & VI_DOOMED) != 0)) 4670 vp = TAILQ_NEXT(vp, v_nmntvnodes); 4671 4672 /* Check if we are done */ 4673 if (vp == NULL) { 4674 __mnt_vnode_markerfree_all(mvp, mp); 4675 /* MNT_IUNLOCK(mp); -- done in above function */ 4676 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 4677 return (NULL); 4678 } 4679 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 4680 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 4681 VI_LOCK(vp); 4682 MNT_IUNLOCK(mp); 4683 return (vp); 4684 } 4685 4686 struct vnode * 4687 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 4688 { 4689 struct vnode *vp; 4690 4691 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 4692 MNT_ILOCK(mp); 4693 MNT_REF(mp); 4694 (*mvp)->v_type = VMARKER; 4695 4696 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 4697 while (vp != NULL && (vp->v_type == VMARKER || 4698 (vp->v_iflag & VI_DOOMED) != 0)) 4699 vp = TAILQ_NEXT(vp, v_nmntvnodes); 4700 4701 /* Check if we are done */ 4702 if (vp == NULL) { 4703 MNT_REL(mp); 4704 MNT_IUNLOCK(mp); 4705 free(*mvp, M_VNODE_MARKER); 4706 *mvp = NULL; 4707 return (NULL); 4708 } 4709 (*mvp)->v_mount = mp; 4710 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 4711 VI_LOCK(vp); 4712 MNT_IUNLOCK(mp); 4713 return (vp); 4714 } 4715 4716 4717 void 4718 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 4719 { 4720 4721 if (*mvp == NULL) { 4722 MNT_IUNLOCK(mp); 4723 return; 4724 } 4725 4726 mtx_assert(MNT_MTX(mp), MA_OWNED); 4727 4728 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 4729 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 4730 MNT_REL(mp); 4731 MNT_IUNLOCK(mp); 4732 free(*mvp, M_VNODE_MARKER); 4733 *mvp = NULL; 4734 } 4735 4736 /* 4737 * These are helper functions for filesystems to traverse their 4738 * active vnodes. See MNT_VNODE_FOREACH_ACTIVE() in sys/mount.h 4739 */ 4740 static void 4741 mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 4742 { 4743 4744 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 4745 4746 MNT_ILOCK(mp); 4747 MNT_REL(mp); 4748 MNT_IUNLOCK(mp); 4749 free(*mvp, M_VNODE_MARKER); 4750 *mvp = NULL; 4751 } 4752 4753 static struct vnode * 4754 mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 4755 { 4756 struct vnode *vp, *nvp; 4757 4758 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 4759 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 4760 restart: 4761 vp = TAILQ_NEXT(*mvp, v_actfreelist); 4762 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 4763 while (vp != NULL) { 4764 if (vp->v_type == VMARKER) { 4765 vp = TAILQ_NEXT(vp, v_actfreelist); 4766 continue; 4767 } 4768 if (!VI_TRYLOCK(vp)) { 4769 if (mp_ncpus == 1 || should_yield()) { 4770 TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist); 4771 mtx_unlock(&vnode_free_list_mtx); 4772 pause("vnacti", 1); 4773 mtx_lock(&vnode_free_list_mtx); 4774 goto restart; 4775 } 4776 continue; 4777 } 4778 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 4779 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 4780 ("alien vnode on the active list %p %p", vp, mp)); 4781 if (vp->v_mount == mp && (vp->v_iflag & VI_DOOMED) == 0) 4782 break; 4783 nvp = TAILQ_NEXT(vp, v_actfreelist); 4784 VI_UNLOCK(vp); 4785 vp = nvp; 4786 } 4787 4788 /* Check if we are done */ 4789 if (vp == NULL) { 4790 mtx_unlock(&vnode_free_list_mtx); 4791 mnt_vnode_markerfree_active(mvp, mp); 4792 return (NULL); 4793 } 4794 TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist); 4795 mtx_unlock(&vnode_free_list_mtx); 4796 ASSERT_VI_LOCKED(vp, "active iter"); 4797 KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp)); 4798 return (vp); 4799 } 4800 4801 struct vnode * 4802 __mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 4803 { 4804 4805 if (should_yield()) 4806 kern_yield(PRI_USER); 4807 mtx_lock(&vnode_free_list_mtx); 4808 return (mnt_vnode_next_active(mvp, mp)); 4809 } 4810 4811 struct vnode * 4812 __mnt_vnode_first_active(struct vnode **mvp, struct mount *mp) 4813 { 4814 struct vnode *vp; 4815 4816 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 4817 MNT_ILOCK(mp); 4818 MNT_REF(mp); 4819 MNT_IUNLOCK(mp); 4820 (*mvp)->v_type = VMARKER; 4821 (*mvp)->v_mount = mp; 4822 4823 mtx_lock(&vnode_free_list_mtx); 4824 vp = TAILQ_FIRST(&mp->mnt_activevnodelist); 4825 if (vp == NULL) { 4826 mtx_unlock(&vnode_free_list_mtx); 4827 mnt_vnode_markerfree_active(mvp, mp); 4828 return (NULL); 4829 } 4830 TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist); 4831 return (mnt_vnode_next_active(mvp, mp)); 4832 } 4833 4834 void 4835 __mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 4836 { 4837 4838 if (*mvp == NULL) 4839 return; 4840 4841 mtx_lock(&vnode_free_list_mtx); 4842 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 4843 mtx_unlock(&vnode_free_list_mtx); 4844 mnt_vnode_markerfree_active(mvp, mp); 4845 } 4846