1 /*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 35 */ 36 37 /* 38 * External virtual filesystem routines 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 #include "opt_ddb.h" 45 #include "opt_watchdog.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/bio.h> 50 #include <sys/buf.h> 51 #include <sys/condvar.h> 52 #include <sys/conf.h> 53 #include <sys/dirent.h> 54 #include <sys/event.h> 55 #include <sys/eventhandler.h> 56 #include <sys/extattr.h> 57 #include <sys/file.h> 58 #include <sys/fcntl.h> 59 #include <sys/jail.h> 60 #include <sys/kdb.h> 61 #include <sys/kernel.h> 62 #include <sys/kthread.h> 63 #include <sys/lockf.h> 64 #include <sys/malloc.h> 65 #include <sys/mount.h> 66 #include <sys/namei.h> 67 #include <sys/priv.h> 68 #include <sys/reboot.h> 69 #include <sys/sched.h> 70 #include <sys/sleepqueue.h> 71 #include <sys/stat.h> 72 #include <sys/sysctl.h> 73 #include <sys/syslog.h> 74 #include <sys/vmmeter.h> 75 #include <sys/vnode.h> 76 #ifdef SW_WATCHDOG 77 #include <sys/watchdog.h> 78 #endif 79 80 #include <machine/stdarg.h> 81 82 #include <security/mac/mac_framework.h> 83 84 #include <vm/vm.h> 85 #include <vm/vm_object.h> 86 #include <vm/vm_extern.h> 87 #include <vm/pmap.h> 88 #include <vm/vm_map.h> 89 #include <vm/vm_page.h> 90 #include <vm/vm_kern.h> 91 #include <vm/uma.h> 92 93 #ifdef DDB 94 #include <ddb/ddb.h> 95 #endif 96 97 #define WI_MPSAFEQ 0 98 #define WI_GIANTQ 1 99 100 static void delmntque(struct vnode *vp); 101 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 102 int slpflag, int slptimeo); 103 static void syncer_shutdown(void *arg, int howto); 104 static int vtryrecycle(struct vnode *vp); 105 static void vbusy(struct vnode *vp); 106 static void vinactive(struct vnode *, struct thread *); 107 static void v_incr_usecount(struct vnode *); 108 static void v_decr_usecount(struct vnode *); 109 static void v_decr_useonly(struct vnode *); 110 static void v_upgrade_usecount(struct vnode *); 111 static void vfree(struct vnode *); 112 static void vnlru_free(int); 113 static void vgonel(struct vnode *); 114 static void vfs_knllock(void *arg); 115 static void vfs_knlunlock(void *arg); 116 static void vfs_knl_assert_locked(void *arg); 117 static void vfs_knl_assert_unlocked(void *arg); 118 static void destroy_vpollinfo(struct vpollinfo *vi); 119 120 /* 121 * Number of vnodes in existence. Increased whenever getnewvnode() 122 * allocates a new vnode, decreased on vdestroy() called on VI_DOOMed 123 * vnode. 124 */ 125 static unsigned long numvnodes; 126 127 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 128 "Number of vnodes in existence"); 129 130 /* 131 * Conversion tables for conversion from vnode types to inode formats 132 * and back. 133 */ 134 enum vtype iftovt_tab[16] = { 135 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 136 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 137 }; 138 int vttoif_tab[10] = { 139 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 140 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 141 }; 142 143 /* 144 * List of vnodes that are ready for recycling. 145 */ 146 static TAILQ_HEAD(freelst, vnode) vnode_free_list; 147 148 /* 149 * Free vnode target. Free vnodes may simply be files which have been stat'd 150 * but not read. This is somewhat common, and a small cache of such files 151 * should be kept to avoid recreation costs. 152 */ 153 static u_long wantfreevnodes; 154 SYSCTL_ULONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, ""); 155 /* Number of vnodes in the free list. */ 156 static u_long freevnodes; 157 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, 158 "Number of vnodes in the free list"); 159 160 static int vlru_allow_cache_src; 161 SYSCTL_INT(_vfs, OID_AUTO, vlru_allow_cache_src, CTLFLAG_RW, 162 &vlru_allow_cache_src, 0, "Allow vlru to reclaim source vnode"); 163 164 /* 165 * Various variables used for debugging the new implementation of 166 * reassignbuf(). 167 * XXX these are probably of (very) limited utility now. 168 */ 169 static int reassignbufcalls; 170 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, 171 "Number of calls to reassignbuf"); 172 173 /* 174 * Cache for the mount type id assigned to NFS. This is used for 175 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c. 176 */ 177 int nfs_mount_type = -1; 178 179 /* To keep more than one thread at a time from running vfs_getnewfsid */ 180 static struct mtx mntid_mtx; 181 182 /* 183 * Lock for any access to the following: 184 * vnode_free_list 185 * numvnodes 186 * freevnodes 187 */ 188 static struct mtx vnode_free_list_mtx; 189 190 /* Publicly exported FS */ 191 struct nfs_public nfs_pub; 192 193 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 194 static uma_zone_t vnode_zone; 195 static uma_zone_t vnodepoll_zone; 196 197 /* 198 * The workitem queue. 199 * 200 * It is useful to delay writes of file data and filesystem metadata 201 * for tens of seconds so that quickly created and deleted files need 202 * not waste disk bandwidth being created and removed. To realize this, 203 * we append vnodes to a "workitem" queue. When running with a soft 204 * updates implementation, most pending metadata dependencies should 205 * not wait for more than a few seconds. Thus, mounted on block devices 206 * are delayed only about a half the time that file data is delayed. 207 * Similarly, directory updates are more critical, so are only delayed 208 * about a third the time that file data is delayed. Thus, there are 209 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 210 * one each second (driven off the filesystem syncer process). The 211 * syncer_delayno variable indicates the next queue that is to be processed. 212 * Items that need to be processed soon are placed in this queue: 213 * 214 * syncer_workitem_pending[syncer_delayno] 215 * 216 * A delay of fifteen seconds is done by placing the request fifteen 217 * entries later in the queue: 218 * 219 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 220 * 221 */ 222 static int syncer_delayno; 223 static long syncer_mask; 224 LIST_HEAD(synclist, bufobj); 225 static struct synclist *syncer_workitem_pending[2]; 226 /* 227 * The sync_mtx protects: 228 * bo->bo_synclist 229 * sync_vnode_count 230 * syncer_delayno 231 * syncer_state 232 * syncer_workitem_pending 233 * syncer_worklist_len 234 * rushjob 235 */ 236 static struct mtx sync_mtx; 237 static struct cv sync_wakeup; 238 239 #define SYNCER_MAXDELAY 32 240 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 241 static int syncdelay = 30; /* max time to delay syncing data */ 242 static int filedelay = 30; /* time to delay syncing files */ 243 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 244 "Time to delay syncing files (in seconds)"); 245 static int dirdelay = 29; /* time to delay syncing directories */ 246 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 247 "Time to delay syncing directories (in seconds)"); 248 static int metadelay = 28; /* time to delay syncing metadata */ 249 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 250 "Time to delay syncing metadata (in seconds)"); 251 static int rushjob; /* number of slots to run ASAP */ 252 static int stat_rush_requests; /* number of times I/O speeded up */ 253 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 254 "Number of times I/O speeded up (rush requests)"); 255 256 /* 257 * When shutting down the syncer, run it at four times normal speed. 258 */ 259 #define SYNCER_SHUTDOWN_SPEEDUP 4 260 static int sync_vnode_count; 261 static int syncer_worklist_len; 262 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 263 syncer_state; 264 265 /* 266 * Number of vnodes we want to exist at any one time. This is mostly used 267 * to size hash tables in vnode-related code. It is normally not used in 268 * getnewvnode(), as wantfreevnodes is normally nonzero.) 269 * 270 * XXX desiredvnodes is historical cruft and should not exist. 271 */ 272 int desiredvnodes; 273 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 274 &desiredvnodes, 0, "Maximum number of vnodes"); 275 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 276 &wantfreevnodes, 0, "Minimum number of vnodes (legacy)"); 277 static int vnlru_nowhere; 278 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 279 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 280 281 /* 282 * Macros to control when a vnode is freed and recycled. All require 283 * the vnode interlock. 284 */ 285 #define VCANRECYCLE(vp) (((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt) 286 #define VSHOULDFREE(vp) (!((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt) 287 #define VSHOULDBUSY(vp) (((vp)->v_iflag & VI_FREE) && (vp)->v_holdcnt) 288 289 290 /* 291 * Initialize the vnode management data structures. 292 * 293 * Reevaluate the following cap on the number of vnodes after the physical 294 * memory size exceeds 512GB. In the limit, as the physical memory size 295 * grows, the ratio of physical pages to vnodes approaches sixteen to one. 296 */ 297 #ifndef MAXVNODES_MAX 298 #define MAXVNODES_MAX (512 * (1024 * 1024 * 1024 / (int)PAGE_SIZE / 16)) 299 #endif 300 static void 301 vntblinit(void *dummy __unused) 302 { 303 int physvnodes, virtvnodes; 304 305 /* 306 * Desiredvnodes is a function of the physical memory size and the 307 * kernel's heap size. Generally speaking, it scales with the 308 * physical memory size. The ratio of desiredvnodes to physical pages 309 * is one to four until desiredvnodes exceeds 98,304. Thereafter, the 310 * marginal ratio of desiredvnodes to physical pages is one to 311 * sixteen. However, desiredvnodes is limited by the kernel's heap 312 * size. The memory required by desiredvnodes vnodes and vm objects 313 * may not exceed one seventh of the kernel's heap size. 314 */ 315 physvnodes = maxproc + cnt.v_page_count / 16 + 3 * min(98304 * 4, 316 cnt.v_page_count) / 16; 317 virtvnodes = vm_kmem_size / (7 * (sizeof(struct vm_object) + 318 sizeof(struct vnode))); 319 desiredvnodes = min(physvnodes, virtvnodes); 320 if (desiredvnodes > MAXVNODES_MAX) { 321 if (bootverbose) 322 printf("Reducing kern.maxvnodes %d -> %d\n", 323 desiredvnodes, MAXVNODES_MAX); 324 desiredvnodes = MAXVNODES_MAX; 325 } 326 wantfreevnodes = desiredvnodes / 4; 327 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 328 TAILQ_INIT(&vnode_free_list); 329 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 330 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 331 NULL, NULL, UMA_ALIGN_PTR, 0); 332 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 333 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 334 /* 335 * Initialize the filesystem syncer. 336 */ 337 syncer_workitem_pending[WI_MPSAFEQ] = hashinit(syncer_maxdelay, M_VNODE, 338 &syncer_mask); 339 syncer_workitem_pending[WI_GIANTQ] = hashinit(syncer_maxdelay, M_VNODE, 340 &syncer_mask); 341 syncer_maxdelay = syncer_mask + 1; 342 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 343 cv_init(&sync_wakeup, "syncer"); 344 } 345 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 346 347 348 /* 349 * Mark a mount point as busy. Used to synchronize access and to delay 350 * unmounting. Eventually, mountlist_mtx is not released on failure. 351 * 352 * vfs_busy() is a custom lock, it can block the caller. 353 * vfs_busy() only sleeps if the unmount is active on the mount point. 354 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 355 * vnode belonging to mp. 356 * 357 * Lookup uses vfs_busy() to traverse mount points. 358 * root fs var fs 359 * / vnode lock A / vnode lock (/var) D 360 * /var vnode lock B /log vnode lock(/var/log) E 361 * vfs_busy lock C vfs_busy lock F 362 * 363 * Within each file system, the lock order is C->A->B and F->D->E. 364 * 365 * When traversing across mounts, the system follows that lock order: 366 * 367 * C->A->B 368 * | 369 * +->F->D->E 370 * 371 * The lookup() process for namei("/var") illustrates the process: 372 * VOP_LOOKUP() obtains B while A is held 373 * vfs_busy() obtains a shared lock on F while A and B are held 374 * vput() releases lock on B 375 * vput() releases lock on A 376 * VFS_ROOT() obtains lock on D while shared lock on F is held 377 * vfs_unbusy() releases shared lock on F 378 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 379 * Attempt to lock A (instead of vp_crossmp) while D is held would 380 * violate the global order, causing deadlocks. 381 * 382 * dounmount() locks B while F is drained. 383 */ 384 int 385 vfs_busy(struct mount *mp, int flags) 386 { 387 388 MPASS((flags & ~MBF_MASK) == 0); 389 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 390 391 MNT_ILOCK(mp); 392 MNT_REF(mp); 393 /* 394 * If mount point is currenly being unmounted, sleep until the 395 * mount point fate is decided. If thread doing the unmounting fails, 396 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 397 * that this mount point has survived the unmount attempt and vfs_busy 398 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 399 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 400 * about to be really destroyed. vfs_busy needs to release its 401 * reference on the mount point in this case and return with ENOENT, 402 * telling the caller that mount mount it tried to busy is no longer 403 * valid. 404 */ 405 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 406 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 407 MNT_REL(mp); 408 MNT_IUNLOCK(mp); 409 CTR1(KTR_VFS, "%s: failed busying before sleeping", 410 __func__); 411 return (ENOENT); 412 } 413 if (flags & MBF_MNTLSTLOCK) 414 mtx_unlock(&mountlist_mtx); 415 mp->mnt_kern_flag |= MNTK_MWAIT; 416 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 417 if (flags & MBF_MNTLSTLOCK) 418 mtx_lock(&mountlist_mtx); 419 MNT_ILOCK(mp); 420 } 421 if (flags & MBF_MNTLSTLOCK) 422 mtx_unlock(&mountlist_mtx); 423 mp->mnt_lockref++; 424 MNT_IUNLOCK(mp); 425 return (0); 426 } 427 428 /* 429 * Free a busy filesystem. 430 */ 431 void 432 vfs_unbusy(struct mount *mp) 433 { 434 435 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 436 MNT_ILOCK(mp); 437 MNT_REL(mp); 438 KASSERT(mp->mnt_lockref > 0, ("negative mnt_lockref")); 439 mp->mnt_lockref--; 440 if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 441 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 442 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 443 mp->mnt_kern_flag &= ~MNTK_DRAINING; 444 wakeup(&mp->mnt_lockref); 445 } 446 MNT_IUNLOCK(mp); 447 } 448 449 /* 450 * Lookup a mount point by filesystem identifier. 451 */ 452 struct mount * 453 vfs_getvfs(fsid_t *fsid) 454 { 455 struct mount *mp; 456 457 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 458 mtx_lock(&mountlist_mtx); 459 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 460 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 461 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 462 vfs_ref(mp); 463 mtx_unlock(&mountlist_mtx); 464 return (mp); 465 } 466 } 467 mtx_unlock(&mountlist_mtx); 468 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 469 return ((struct mount *) 0); 470 } 471 472 /* 473 * Lookup a mount point by filesystem identifier, busying it before 474 * returning. 475 */ 476 struct mount * 477 vfs_busyfs(fsid_t *fsid) 478 { 479 struct mount *mp; 480 int error; 481 482 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 483 mtx_lock(&mountlist_mtx); 484 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 485 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 486 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 487 error = vfs_busy(mp, MBF_MNTLSTLOCK); 488 if (error) { 489 mtx_unlock(&mountlist_mtx); 490 return (NULL); 491 } 492 return (mp); 493 } 494 } 495 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 496 mtx_unlock(&mountlist_mtx); 497 return ((struct mount *) 0); 498 } 499 500 /* 501 * Check if a user can access privileged mount options. 502 */ 503 int 504 vfs_suser(struct mount *mp, struct thread *td) 505 { 506 int error; 507 508 /* 509 * If the thread is jailed, but this is not a jail-friendly file 510 * system, deny immediately. 511 */ 512 if (!(mp->mnt_vfc->vfc_flags & VFCF_JAIL) && jailed(td->td_ucred)) 513 return (EPERM); 514 515 /* 516 * If the file system was mounted outside the jail of the calling 517 * thread, deny immediately. 518 */ 519 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 520 return (EPERM); 521 522 /* 523 * If file system supports delegated administration, we don't check 524 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 525 * by the file system itself. 526 * If this is not the user that did original mount, we check for 527 * the PRIV_VFS_MOUNT_OWNER privilege. 528 */ 529 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 530 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 531 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 532 return (error); 533 } 534 return (0); 535 } 536 537 /* 538 * Get a new unique fsid. Try to make its val[0] unique, since this value 539 * will be used to create fake device numbers for stat(). Also try (but 540 * not so hard) make its val[0] unique mod 2^16, since some emulators only 541 * support 16-bit device numbers. We end up with unique val[0]'s for the 542 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 543 * 544 * Keep in mind that several mounts may be running in parallel. Starting 545 * the search one past where the previous search terminated is both a 546 * micro-optimization and a defense against returning the same fsid to 547 * different mounts. 548 */ 549 void 550 vfs_getnewfsid(struct mount *mp) 551 { 552 static uint16_t mntid_base; 553 struct mount *nmp; 554 fsid_t tfsid; 555 int mtype; 556 557 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 558 mtx_lock(&mntid_mtx); 559 mtype = mp->mnt_vfc->vfc_typenum; 560 tfsid.val[1] = mtype; 561 mtype = (mtype & 0xFF) << 24; 562 for (;;) { 563 tfsid.val[0] = makedev(255, 564 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 565 mntid_base++; 566 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 567 break; 568 vfs_rel(nmp); 569 } 570 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 571 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 572 mtx_unlock(&mntid_mtx); 573 } 574 575 /* 576 * Knob to control the precision of file timestamps: 577 * 578 * 0 = seconds only; nanoseconds zeroed. 579 * 1 = seconds and nanoseconds, accurate within 1/HZ. 580 * 2 = seconds and nanoseconds, truncated to microseconds. 581 * >=3 = seconds and nanoseconds, maximum precision. 582 */ 583 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 584 585 static int timestamp_precision = TSP_SEC; 586 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 587 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 588 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to ms, " 589 "3+: sec + ns (max. precision))"); 590 591 /* 592 * Get a current timestamp. 593 */ 594 void 595 vfs_timestamp(struct timespec *tsp) 596 { 597 struct timeval tv; 598 599 switch (timestamp_precision) { 600 case TSP_SEC: 601 tsp->tv_sec = time_second; 602 tsp->tv_nsec = 0; 603 break; 604 case TSP_HZ: 605 getnanotime(tsp); 606 break; 607 case TSP_USEC: 608 microtime(&tv); 609 TIMEVAL_TO_TIMESPEC(&tv, tsp); 610 break; 611 case TSP_NSEC: 612 default: 613 nanotime(tsp); 614 break; 615 } 616 } 617 618 /* 619 * Set vnode attributes to VNOVAL 620 */ 621 void 622 vattr_null(struct vattr *vap) 623 { 624 625 vap->va_type = VNON; 626 vap->va_size = VNOVAL; 627 vap->va_bytes = VNOVAL; 628 vap->va_mode = VNOVAL; 629 vap->va_nlink = VNOVAL; 630 vap->va_uid = VNOVAL; 631 vap->va_gid = VNOVAL; 632 vap->va_fsid = VNOVAL; 633 vap->va_fileid = VNOVAL; 634 vap->va_blocksize = VNOVAL; 635 vap->va_rdev = VNOVAL; 636 vap->va_atime.tv_sec = VNOVAL; 637 vap->va_atime.tv_nsec = VNOVAL; 638 vap->va_mtime.tv_sec = VNOVAL; 639 vap->va_mtime.tv_nsec = VNOVAL; 640 vap->va_ctime.tv_sec = VNOVAL; 641 vap->va_ctime.tv_nsec = VNOVAL; 642 vap->va_birthtime.tv_sec = VNOVAL; 643 vap->va_birthtime.tv_nsec = VNOVAL; 644 vap->va_flags = VNOVAL; 645 vap->va_gen = VNOVAL; 646 vap->va_vaflags = 0; 647 } 648 649 /* 650 * This routine is called when we have too many vnodes. It attempts 651 * to free <count> vnodes and will potentially free vnodes that still 652 * have VM backing store (VM backing store is typically the cause 653 * of a vnode blowout so we want to do this). Therefore, this operation 654 * is not considered cheap. 655 * 656 * A number of conditions may prevent a vnode from being reclaimed. 657 * the buffer cache may have references on the vnode, a directory 658 * vnode may still have references due to the namei cache representing 659 * underlying files, or the vnode may be in active use. It is not 660 * desireable to reuse such vnodes. These conditions may cause the 661 * number of vnodes to reach some minimum value regardless of what 662 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 663 */ 664 static int 665 vlrureclaim(struct mount *mp) 666 { 667 struct vnode *vp; 668 int done; 669 int trigger; 670 int usevnodes; 671 int count; 672 673 /* 674 * Calculate the trigger point, don't allow user 675 * screwups to blow us up. This prevents us from 676 * recycling vnodes with lots of resident pages. We 677 * aren't trying to free memory, we are trying to 678 * free vnodes. 679 */ 680 usevnodes = desiredvnodes; 681 if (usevnodes <= 0) 682 usevnodes = 1; 683 trigger = cnt.v_page_count * 2 / usevnodes; 684 done = 0; 685 vn_start_write(NULL, &mp, V_WAIT); 686 MNT_ILOCK(mp); 687 count = mp->mnt_nvnodelistsize / 10 + 1; 688 while (count != 0) { 689 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 690 while (vp != NULL && vp->v_type == VMARKER) 691 vp = TAILQ_NEXT(vp, v_nmntvnodes); 692 if (vp == NULL) 693 break; 694 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 695 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 696 --count; 697 if (!VI_TRYLOCK(vp)) 698 goto next_iter; 699 /* 700 * If it's been deconstructed already, it's still 701 * referenced, or it exceeds the trigger, skip it. 702 */ 703 if (vp->v_usecount || 704 (!vlru_allow_cache_src && 705 !LIST_EMPTY(&(vp)->v_cache_src)) || 706 (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL && 707 vp->v_object->resident_page_count > trigger)) { 708 VI_UNLOCK(vp); 709 goto next_iter; 710 } 711 MNT_IUNLOCK(mp); 712 vholdl(vp); 713 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) { 714 vdrop(vp); 715 goto next_iter_mntunlocked; 716 } 717 VI_LOCK(vp); 718 /* 719 * v_usecount may have been bumped after VOP_LOCK() dropped 720 * the vnode interlock and before it was locked again. 721 * 722 * It is not necessary to recheck VI_DOOMED because it can 723 * only be set by another thread that holds both the vnode 724 * lock and vnode interlock. If another thread has the 725 * vnode lock before we get to VOP_LOCK() and obtains the 726 * vnode interlock after VOP_LOCK() drops the vnode 727 * interlock, the other thread will be unable to drop the 728 * vnode lock before our VOP_LOCK() call fails. 729 */ 730 if (vp->v_usecount || 731 (!vlru_allow_cache_src && 732 !LIST_EMPTY(&(vp)->v_cache_src)) || 733 (vp->v_object != NULL && 734 vp->v_object->resident_page_count > trigger)) { 735 VOP_UNLOCK(vp, LK_INTERLOCK); 736 goto next_iter_mntunlocked; 737 } 738 KASSERT((vp->v_iflag & VI_DOOMED) == 0, 739 ("VI_DOOMED unexpectedly detected in vlrureclaim()")); 740 vgonel(vp); 741 VOP_UNLOCK(vp, 0); 742 vdropl(vp); 743 done++; 744 next_iter_mntunlocked: 745 if (!should_yield()) 746 goto relock_mnt; 747 goto yield; 748 next_iter: 749 if (!should_yield()) 750 continue; 751 MNT_IUNLOCK(mp); 752 yield: 753 kern_yield(PRI_UNCHANGED); 754 relock_mnt: 755 MNT_ILOCK(mp); 756 } 757 MNT_IUNLOCK(mp); 758 vn_finished_write(mp); 759 return done; 760 } 761 762 /* 763 * Attempt to keep the free list at wantfreevnodes length. 764 */ 765 static void 766 vnlru_free(int count) 767 { 768 struct vnode *vp; 769 int vfslocked; 770 771 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 772 for (; count > 0; count--) { 773 vp = TAILQ_FIRST(&vnode_free_list); 774 /* 775 * The list can be modified while the free_list_mtx 776 * has been dropped and vp could be NULL here. 777 */ 778 if (!vp) 779 break; 780 VNASSERT(vp->v_op != NULL, vp, 781 ("vnlru_free: vnode already reclaimed.")); 782 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 783 /* 784 * Don't recycle if we can't get the interlock. 785 */ 786 if (!VI_TRYLOCK(vp)) { 787 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 788 continue; 789 } 790 VNASSERT(VCANRECYCLE(vp), vp, 791 ("vp inconsistent on freelist")); 792 freevnodes--; 793 vp->v_iflag &= ~VI_FREE; 794 vholdl(vp); 795 mtx_unlock(&vnode_free_list_mtx); 796 VI_UNLOCK(vp); 797 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 798 vtryrecycle(vp); 799 VFS_UNLOCK_GIANT(vfslocked); 800 /* 801 * If the recycled succeeded this vdrop will actually free 802 * the vnode. If not it will simply place it back on 803 * the free list. 804 */ 805 vdrop(vp); 806 mtx_lock(&vnode_free_list_mtx); 807 } 808 } 809 /* 810 * Attempt to recycle vnodes in a context that is always safe to block. 811 * Calling vlrurecycle() from the bowels of filesystem code has some 812 * interesting deadlock problems. 813 */ 814 static struct proc *vnlruproc; 815 static int vnlruproc_sig; 816 817 static void 818 vnlru_proc(void) 819 { 820 struct mount *mp, *nmp; 821 int done, vfslocked; 822 struct proc *p = vnlruproc; 823 824 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p, 825 SHUTDOWN_PRI_FIRST); 826 827 for (;;) { 828 kproc_suspend_check(p); 829 mtx_lock(&vnode_free_list_mtx); 830 if (freevnodes > wantfreevnodes) 831 vnlru_free(freevnodes - wantfreevnodes); 832 if (numvnodes <= desiredvnodes * 9 / 10) { 833 vnlruproc_sig = 0; 834 wakeup(&vnlruproc_sig); 835 msleep(vnlruproc, &vnode_free_list_mtx, 836 PVFS|PDROP, "vlruwt", hz); 837 continue; 838 } 839 mtx_unlock(&vnode_free_list_mtx); 840 done = 0; 841 mtx_lock(&mountlist_mtx); 842 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 843 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) { 844 nmp = TAILQ_NEXT(mp, mnt_list); 845 continue; 846 } 847 vfslocked = VFS_LOCK_GIANT(mp); 848 done += vlrureclaim(mp); 849 VFS_UNLOCK_GIANT(vfslocked); 850 mtx_lock(&mountlist_mtx); 851 nmp = TAILQ_NEXT(mp, mnt_list); 852 vfs_unbusy(mp); 853 } 854 mtx_unlock(&mountlist_mtx); 855 if (done == 0) { 856 #if 0 857 /* These messages are temporary debugging aids */ 858 if (vnlru_nowhere < 5) 859 printf("vnlru process getting nowhere..\n"); 860 else if (vnlru_nowhere == 5) 861 printf("vnlru process messages stopped.\n"); 862 #endif 863 vnlru_nowhere++; 864 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 865 } else 866 kern_yield(PRI_UNCHANGED); 867 } 868 } 869 870 static struct kproc_desc vnlru_kp = { 871 "vnlru", 872 vnlru_proc, 873 &vnlruproc 874 }; 875 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 876 &vnlru_kp); 877 878 /* 879 * Routines having to do with the management of the vnode table. 880 */ 881 882 void 883 vdestroy(struct vnode *vp) 884 { 885 struct bufobj *bo; 886 887 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 888 mtx_lock(&vnode_free_list_mtx); 889 numvnodes--; 890 mtx_unlock(&vnode_free_list_mtx); 891 bo = &vp->v_bufobj; 892 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 893 ("cleaned vnode still on the free list.")); 894 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 895 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); 896 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 897 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 898 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 899 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 900 VNASSERT(bo->bo_clean.bv_root == NULL, vp, ("cleanblkroot not NULL")); 901 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 902 VNASSERT(bo->bo_dirty.bv_root == NULL, vp, ("dirtyblkroot not NULL")); 903 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 904 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 905 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 906 VI_UNLOCK(vp); 907 #ifdef MAC 908 mac_vnode_destroy(vp); 909 #endif 910 if (vp->v_pollinfo != NULL) 911 destroy_vpollinfo(vp->v_pollinfo); 912 #ifdef INVARIANTS 913 /* XXX Elsewhere we can detect an already freed vnode via NULL v_op. */ 914 vp->v_op = NULL; 915 #endif 916 lockdestroy(vp->v_vnlock); 917 mtx_destroy(&vp->v_interlock); 918 mtx_destroy(BO_MTX(bo)); 919 uma_zfree(vnode_zone, vp); 920 } 921 922 /* 923 * Try to recycle a freed vnode. We abort if anyone picks up a reference 924 * before we actually vgone(). This function must be called with the vnode 925 * held to prevent the vnode from being returned to the free list midway 926 * through vgone(). 927 */ 928 static int 929 vtryrecycle(struct vnode *vp) 930 { 931 struct mount *vnmp; 932 933 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 934 VNASSERT(vp->v_holdcnt, vp, 935 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 936 /* 937 * This vnode may found and locked via some other list, if so we 938 * can't recycle it yet. 939 */ 940 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 941 CTR2(KTR_VFS, 942 "%s: impossible to recycle, vp %p lock is already held", 943 __func__, vp); 944 return (EWOULDBLOCK); 945 } 946 /* 947 * Don't recycle if its filesystem is being suspended. 948 */ 949 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 950 VOP_UNLOCK(vp, 0); 951 CTR2(KTR_VFS, 952 "%s: impossible to recycle, cannot start the write for %p", 953 __func__, vp); 954 return (EBUSY); 955 } 956 /* 957 * If we got this far, we need to acquire the interlock and see if 958 * anyone picked up this vnode from another list. If not, we will 959 * mark it with DOOMED via vgonel() so that anyone who does find it 960 * will skip over it. 961 */ 962 VI_LOCK(vp); 963 if (vp->v_usecount) { 964 VOP_UNLOCK(vp, LK_INTERLOCK); 965 vn_finished_write(vnmp); 966 CTR2(KTR_VFS, 967 "%s: impossible to recycle, %p is already referenced", 968 __func__, vp); 969 return (EBUSY); 970 } 971 if ((vp->v_iflag & VI_DOOMED) == 0) 972 vgonel(vp); 973 VOP_UNLOCK(vp, LK_INTERLOCK); 974 vn_finished_write(vnmp); 975 return (0); 976 } 977 978 /* 979 * Return the next vnode from the free list. 980 */ 981 int 982 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 983 struct vnode **vpp) 984 { 985 struct vnode *vp = NULL; 986 struct bufobj *bo; 987 988 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 989 mtx_lock(&vnode_free_list_mtx); 990 /* 991 * Lend our context to reclaim vnodes if they've exceeded the max. 992 */ 993 if (freevnodes > wantfreevnodes) 994 vnlru_free(1); 995 /* 996 * Wait for available vnodes. 997 */ 998 if (numvnodes > desiredvnodes) { 999 if (mp != NULL && (mp->mnt_kern_flag & MNTK_SUSPEND)) { 1000 /* 1001 * File system is beeing suspended, we cannot risk a 1002 * deadlock here, so allocate new vnode anyway. 1003 */ 1004 if (freevnodes > wantfreevnodes) 1005 vnlru_free(freevnodes - wantfreevnodes); 1006 goto alloc; 1007 } 1008 if (vnlruproc_sig == 0) { 1009 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 1010 wakeup(vnlruproc); 1011 } 1012 msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS, 1013 "vlruwk", hz); 1014 #if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */ 1015 if (numvnodes > desiredvnodes) { 1016 mtx_unlock(&vnode_free_list_mtx); 1017 return (ENFILE); 1018 } 1019 #endif 1020 } 1021 alloc: 1022 numvnodes++; 1023 mtx_unlock(&vnode_free_list_mtx); 1024 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO); 1025 /* 1026 * Setup locks. 1027 */ 1028 vp->v_vnlock = &vp->v_lock; 1029 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 1030 /* 1031 * By default, don't allow shared locks unless filesystems 1032 * opt-in. 1033 */ 1034 lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOSHARE); 1035 /* 1036 * Initialize bufobj. 1037 */ 1038 bo = &vp->v_bufobj; 1039 bo->__bo_vnode = vp; 1040 mtx_init(BO_MTX(bo), "bufobj interlock", NULL, MTX_DEF); 1041 bo->bo_ops = &buf_ops_bio; 1042 bo->bo_private = vp; 1043 TAILQ_INIT(&bo->bo_clean.bv_hd); 1044 TAILQ_INIT(&bo->bo_dirty.bv_hd); 1045 /* 1046 * Initialize namecache. 1047 */ 1048 LIST_INIT(&vp->v_cache_src); 1049 TAILQ_INIT(&vp->v_cache_dst); 1050 /* 1051 * Finalize various vnode identity bits. 1052 */ 1053 vp->v_type = VNON; 1054 vp->v_tag = tag; 1055 vp->v_op = vops; 1056 v_incr_usecount(vp); 1057 vp->v_data = NULL; 1058 #ifdef MAC 1059 mac_vnode_init(vp); 1060 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1061 mac_vnode_associate_singlelabel(mp, vp); 1062 else if (mp == NULL && vops != &dead_vnodeops) 1063 printf("NULL mp in getnewvnode()\n"); 1064 #endif 1065 if (mp != NULL) { 1066 bo->bo_bsize = mp->mnt_stat.f_iosize; 1067 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1068 vp->v_vflag |= VV_NOKNOTE; 1069 } 1070 1071 *vpp = vp; 1072 return (0); 1073 } 1074 1075 /* 1076 * Delete from old mount point vnode list, if on one. 1077 */ 1078 static void 1079 delmntque(struct vnode *vp) 1080 { 1081 struct mount *mp; 1082 1083 mp = vp->v_mount; 1084 if (mp == NULL) 1085 return; 1086 MNT_ILOCK(mp); 1087 vp->v_mount = NULL; 1088 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1089 ("bad mount point vnode list size")); 1090 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1091 mp->mnt_nvnodelistsize--; 1092 MNT_REL(mp); 1093 MNT_IUNLOCK(mp); 1094 } 1095 1096 static void 1097 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1098 { 1099 1100 vp->v_data = NULL; 1101 vp->v_op = &dead_vnodeops; 1102 /* XXX non mp-safe fs may still call insmntque with vnode 1103 unlocked */ 1104 if (!VOP_ISLOCKED(vp)) 1105 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1106 vgone(vp); 1107 vput(vp); 1108 } 1109 1110 /* 1111 * Insert into list of vnodes for the new mount point, if available. 1112 */ 1113 int 1114 insmntque1(struct vnode *vp, struct mount *mp, 1115 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1116 { 1117 int locked; 1118 1119 KASSERT(vp->v_mount == NULL, 1120 ("insmntque: vnode already on per mount vnode list")); 1121 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1122 #ifdef DEBUG_VFS_LOCKS 1123 if (!VFS_NEEDSGIANT(mp)) 1124 ASSERT_VOP_ELOCKED(vp, 1125 "insmntque: mp-safe fs and non-locked vp"); 1126 #endif 1127 MNT_ILOCK(mp); 1128 if ((mp->mnt_kern_flag & MNTK_NOINSMNTQ) != 0 && 1129 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1130 mp->mnt_nvnodelistsize == 0)) { 1131 locked = VOP_ISLOCKED(vp); 1132 if (!locked || (locked == LK_EXCLUSIVE && 1133 (vp->v_vflag & VV_FORCEINSMQ) == 0)) { 1134 MNT_IUNLOCK(mp); 1135 if (dtr != NULL) 1136 dtr(vp, dtr_arg); 1137 return (EBUSY); 1138 } 1139 } 1140 vp->v_mount = mp; 1141 MNT_REF(mp); 1142 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1143 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1144 ("neg mount point vnode list size")); 1145 mp->mnt_nvnodelistsize++; 1146 MNT_IUNLOCK(mp); 1147 return (0); 1148 } 1149 1150 int 1151 insmntque(struct vnode *vp, struct mount *mp) 1152 { 1153 1154 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1155 } 1156 1157 /* 1158 * Flush out and invalidate all buffers associated with a bufobj 1159 * Called with the underlying object locked. 1160 */ 1161 int 1162 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1163 { 1164 int error; 1165 1166 BO_LOCK(bo); 1167 if (flags & V_SAVE) { 1168 error = bufobj_wwait(bo, slpflag, slptimeo); 1169 if (error) { 1170 BO_UNLOCK(bo); 1171 return (error); 1172 } 1173 if (bo->bo_dirty.bv_cnt > 0) { 1174 BO_UNLOCK(bo); 1175 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1176 return (error); 1177 /* 1178 * XXX We could save a lock/unlock if this was only 1179 * enabled under INVARIANTS 1180 */ 1181 BO_LOCK(bo); 1182 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1183 panic("vinvalbuf: dirty bufs"); 1184 } 1185 } 1186 /* 1187 * If you alter this loop please notice that interlock is dropped and 1188 * reacquired in flushbuflist. Special care is needed to ensure that 1189 * no race conditions occur from this. 1190 */ 1191 do { 1192 error = flushbuflist(&bo->bo_clean, 1193 flags, bo, slpflag, slptimeo); 1194 if (error == 0 && !(flags & V_CLEANONLY)) 1195 error = flushbuflist(&bo->bo_dirty, 1196 flags, bo, slpflag, slptimeo); 1197 if (error != 0 && error != EAGAIN) { 1198 BO_UNLOCK(bo); 1199 return (error); 1200 } 1201 } while (error != 0); 1202 1203 /* 1204 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1205 * have write I/O in-progress but if there is a VM object then the 1206 * VM object can also have read-I/O in-progress. 1207 */ 1208 do { 1209 bufobj_wwait(bo, 0, 0); 1210 BO_UNLOCK(bo); 1211 if (bo->bo_object != NULL) { 1212 VM_OBJECT_LOCK(bo->bo_object); 1213 vm_object_pip_wait(bo->bo_object, "bovlbx"); 1214 VM_OBJECT_UNLOCK(bo->bo_object); 1215 } 1216 BO_LOCK(bo); 1217 } while (bo->bo_numoutput > 0); 1218 BO_UNLOCK(bo); 1219 1220 /* 1221 * Destroy the copy in the VM cache, too. 1222 */ 1223 if (bo->bo_object != NULL && 1224 (flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0) { 1225 VM_OBJECT_LOCK(bo->bo_object); 1226 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1227 OBJPR_CLEANONLY : 0); 1228 VM_OBJECT_UNLOCK(bo->bo_object); 1229 } 1230 1231 #ifdef INVARIANTS 1232 BO_LOCK(bo); 1233 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0 && 1234 (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0)) 1235 panic("vinvalbuf: flush failed"); 1236 BO_UNLOCK(bo); 1237 #endif 1238 return (0); 1239 } 1240 1241 /* 1242 * Flush out and invalidate all buffers associated with a vnode. 1243 * Called with the underlying object locked. 1244 */ 1245 int 1246 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 1247 { 1248 1249 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 1250 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1251 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 1252 } 1253 1254 /* 1255 * Flush out buffers on the specified list. 1256 * 1257 */ 1258 static int 1259 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 1260 int slptimeo) 1261 { 1262 struct buf *bp, *nbp; 1263 int retval, error; 1264 daddr_t lblkno; 1265 b_xflags_t xflags; 1266 1267 ASSERT_BO_LOCKED(bo); 1268 1269 retval = 0; 1270 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 1271 if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) || 1272 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) { 1273 continue; 1274 } 1275 lblkno = 0; 1276 xflags = 0; 1277 if (nbp != NULL) { 1278 lblkno = nbp->b_lblkno; 1279 xflags = nbp->b_xflags & 1280 (BX_BKGRDMARKER | BX_VNDIRTY | BX_VNCLEAN); 1281 } 1282 retval = EAGAIN; 1283 error = BUF_TIMELOCK(bp, 1284 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_MTX(bo), 1285 "flushbuf", slpflag, slptimeo); 1286 if (error) { 1287 BO_LOCK(bo); 1288 return (error != ENOLCK ? error : EAGAIN); 1289 } 1290 KASSERT(bp->b_bufobj == bo, 1291 ("bp %p wrong b_bufobj %p should be %p", 1292 bp, bp->b_bufobj, bo)); 1293 if (bp->b_bufobj != bo) { /* XXX: necessary ? */ 1294 BUF_UNLOCK(bp); 1295 BO_LOCK(bo); 1296 return (EAGAIN); 1297 } 1298 /* 1299 * XXX Since there are no node locks for NFS, I 1300 * believe there is a slight chance that a delayed 1301 * write will occur while sleeping just above, so 1302 * check for it. 1303 */ 1304 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1305 (flags & V_SAVE)) { 1306 BO_LOCK(bo); 1307 bremfree(bp); 1308 BO_UNLOCK(bo); 1309 bp->b_flags |= B_ASYNC; 1310 bwrite(bp); 1311 BO_LOCK(bo); 1312 return (EAGAIN); /* XXX: why not loop ? */ 1313 } 1314 BO_LOCK(bo); 1315 bremfree(bp); 1316 BO_UNLOCK(bo); 1317 bp->b_flags |= (B_INVAL | B_RELBUF); 1318 bp->b_flags &= ~B_ASYNC; 1319 brelse(bp); 1320 BO_LOCK(bo); 1321 if (nbp != NULL && 1322 (nbp->b_bufobj != bo || 1323 nbp->b_lblkno != lblkno || 1324 (nbp->b_xflags & 1325 (BX_BKGRDMARKER | BX_VNDIRTY | BX_VNCLEAN)) != xflags)) 1326 break; /* nbp invalid */ 1327 } 1328 return (retval); 1329 } 1330 1331 /* 1332 * Truncate a file's buffer and pages to a specified length. This 1333 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 1334 * sync activity. 1335 */ 1336 int 1337 vtruncbuf(struct vnode *vp, struct ucred *cred, struct thread *td, 1338 off_t length, int blksize) 1339 { 1340 struct buf *bp, *nbp; 1341 int anyfreed; 1342 int trunclbn; 1343 struct bufobj *bo; 1344 1345 CTR5(KTR_VFS, "%s: vp %p with cred %p and block %d:%ju", __func__, 1346 vp, cred, blksize, (uintmax_t)length); 1347 1348 /* 1349 * Round up to the *next* lbn. 1350 */ 1351 trunclbn = (length + blksize - 1) / blksize; 1352 1353 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 1354 restart: 1355 bo = &vp->v_bufobj; 1356 BO_LOCK(bo); 1357 anyfreed = 1; 1358 for (;anyfreed;) { 1359 anyfreed = 0; 1360 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 1361 if (bp->b_lblkno < trunclbn) 1362 continue; 1363 if (BUF_LOCK(bp, 1364 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1365 BO_MTX(bo)) == ENOLCK) 1366 goto restart; 1367 1368 BO_LOCK(bo); 1369 bremfree(bp); 1370 BO_UNLOCK(bo); 1371 bp->b_flags |= (B_INVAL | B_RELBUF); 1372 bp->b_flags &= ~B_ASYNC; 1373 brelse(bp); 1374 anyfreed = 1; 1375 1376 BO_LOCK(bo); 1377 if (nbp != NULL && 1378 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 1379 (nbp->b_vp != vp) || 1380 (nbp->b_flags & B_DELWRI))) { 1381 BO_UNLOCK(bo); 1382 goto restart; 1383 } 1384 } 1385 1386 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1387 if (bp->b_lblkno < trunclbn) 1388 continue; 1389 if (BUF_LOCK(bp, 1390 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1391 BO_MTX(bo)) == ENOLCK) 1392 goto restart; 1393 BO_LOCK(bo); 1394 bremfree(bp); 1395 BO_UNLOCK(bo); 1396 bp->b_flags |= (B_INVAL | B_RELBUF); 1397 bp->b_flags &= ~B_ASYNC; 1398 brelse(bp); 1399 anyfreed = 1; 1400 1401 BO_LOCK(bo); 1402 if (nbp != NULL && 1403 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 1404 (nbp->b_vp != vp) || 1405 (nbp->b_flags & B_DELWRI) == 0)) { 1406 BO_UNLOCK(bo); 1407 goto restart; 1408 } 1409 } 1410 } 1411 1412 if (length > 0) { 1413 restartsync: 1414 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 1415 if (bp->b_lblkno > 0) 1416 continue; 1417 /* 1418 * Since we hold the vnode lock this should only 1419 * fail if we're racing with the buf daemon. 1420 */ 1421 if (BUF_LOCK(bp, 1422 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 1423 BO_MTX(bo)) == ENOLCK) { 1424 goto restart; 1425 } 1426 VNASSERT((bp->b_flags & B_DELWRI), vp, 1427 ("buf(%p) on dirty queue without DELWRI", bp)); 1428 1429 BO_LOCK(bo); 1430 bremfree(bp); 1431 BO_UNLOCK(bo); 1432 bawrite(bp); 1433 BO_LOCK(bo); 1434 goto restartsync; 1435 } 1436 } 1437 1438 bufobj_wwait(bo, 0, 0); 1439 BO_UNLOCK(bo); 1440 vnode_pager_setsize(vp, length); 1441 1442 return (0); 1443 } 1444 1445 /* 1446 * buf_splay() - splay tree core for the clean/dirty list of buffers in 1447 * a vnode. 1448 * 1449 * NOTE: We have to deal with the special case of a background bitmap 1450 * buffer, a situation where two buffers will have the same logical 1451 * block offset. We want (1) only the foreground buffer to be accessed 1452 * in a lookup and (2) must differentiate between the foreground and 1453 * background buffer in the splay tree algorithm because the splay 1454 * tree cannot normally handle multiple entities with the same 'index'. 1455 * We accomplish this by adding differentiating flags to the splay tree's 1456 * numerical domain. 1457 */ 1458 static 1459 struct buf * 1460 buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root) 1461 { 1462 struct buf dummy; 1463 struct buf *lefttreemax, *righttreemin, *y; 1464 1465 if (root == NULL) 1466 return (NULL); 1467 lefttreemax = righttreemin = &dummy; 1468 for (;;) { 1469 if (lblkno < root->b_lblkno || 1470 (lblkno == root->b_lblkno && 1471 (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) { 1472 if ((y = root->b_left) == NULL) 1473 break; 1474 if (lblkno < y->b_lblkno) { 1475 /* Rotate right. */ 1476 root->b_left = y->b_right; 1477 y->b_right = root; 1478 root = y; 1479 if ((y = root->b_left) == NULL) 1480 break; 1481 } 1482 /* Link into the new root's right tree. */ 1483 righttreemin->b_left = root; 1484 righttreemin = root; 1485 } else if (lblkno > root->b_lblkno || 1486 (lblkno == root->b_lblkno && 1487 (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) { 1488 if ((y = root->b_right) == NULL) 1489 break; 1490 if (lblkno > y->b_lblkno) { 1491 /* Rotate left. */ 1492 root->b_right = y->b_left; 1493 y->b_left = root; 1494 root = y; 1495 if ((y = root->b_right) == NULL) 1496 break; 1497 } 1498 /* Link into the new root's left tree. */ 1499 lefttreemax->b_right = root; 1500 lefttreemax = root; 1501 } else { 1502 break; 1503 } 1504 root = y; 1505 } 1506 /* Assemble the new root. */ 1507 lefttreemax->b_right = root->b_left; 1508 righttreemin->b_left = root->b_right; 1509 root->b_left = dummy.b_right; 1510 root->b_right = dummy.b_left; 1511 return (root); 1512 } 1513 1514 static void 1515 buf_vlist_remove(struct buf *bp) 1516 { 1517 struct buf *root; 1518 struct bufv *bv; 1519 1520 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 1521 ASSERT_BO_LOCKED(bp->b_bufobj); 1522 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != 1523 (BX_VNDIRTY|BX_VNCLEAN), 1524 ("buf_vlist_remove: Buf %p is on two lists", bp)); 1525 if (bp->b_xflags & BX_VNDIRTY) 1526 bv = &bp->b_bufobj->bo_dirty; 1527 else 1528 bv = &bp->b_bufobj->bo_clean; 1529 if (bp != bv->bv_root) { 1530 root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root); 1531 KASSERT(root == bp, ("splay lookup failed in remove")); 1532 } 1533 if (bp->b_left == NULL) { 1534 root = bp->b_right; 1535 } else { 1536 root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left); 1537 root->b_right = bp->b_right; 1538 } 1539 bv->bv_root = root; 1540 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 1541 bv->bv_cnt--; 1542 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1543 } 1544 1545 /* 1546 * Add the buffer to the sorted clean or dirty block list using a 1547 * splay tree algorithm. 1548 * 1549 * NOTE: xflags is passed as a constant, optimizing this inline function! 1550 */ 1551 static void 1552 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 1553 { 1554 struct buf *root; 1555 struct bufv *bv; 1556 1557 ASSERT_BO_LOCKED(bo); 1558 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 1559 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 1560 bp->b_xflags |= xflags; 1561 if (xflags & BX_VNDIRTY) 1562 bv = &bo->bo_dirty; 1563 else 1564 bv = &bo->bo_clean; 1565 1566 root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root); 1567 if (root == NULL) { 1568 bp->b_left = NULL; 1569 bp->b_right = NULL; 1570 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 1571 } else if (bp->b_lblkno < root->b_lblkno || 1572 (bp->b_lblkno == root->b_lblkno && 1573 (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) { 1574 bp->b_left = root->b_left; 1575 bp->b_right = root; 1576 root->b_left = NULL; 1577 TAILQ_INSERT_BEFORE(root, bp, b_bobufs); 1578 } else { 1579 bp->b_right = root->b_right; 1580 bp->b_left = root; 1581 root->b_right = NULL; 1582 TAILQ_INSERT_AFTER(&bv->bv_hd, root, bp, b_bobufs); 1583 } 1584 bv->bv_cnt++; 1585 bv->bv_root = bp; 1586 } 1587 1588 /* 1589 * Lookup a buffer using the splay tree. Note that we specifically avoid 1590 * shadow buffers used in background bitmap writes. 1591 * 1592 * This code isn't quite efficient as it could be because we are maintaining 1593 * two sorted lists and do not know which list the block resides in. 1594 * 1595 * During a "make buildworld" the desired buffer is found at one of 1596 * the roots more than 60% of the time. Thus, checking both roots 1597 * before performing either splay eliminates unnecessary splays on the 1598 * first tree splayed. 1599 */ 1600 struct buf * 1601 gbincore(struct bufobj *bo, daddr_t lblkno) 1602 { 1603 struct buf *bp; 1604 1605 ASSERT_BO_LOCKED(bo); 1606 if ((bp = bo->bo_clean.bv_root) != NULL && 1607 bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER)) 1608 return (bp); 1609 if ((bp = bo->bo_dirty.bv_root) != NULL && 1610 bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER)) 1611 return (bp); 1612 if ((bp = bo->bo_clean.bv_root) != NULL) { 1613 bo->bo_clean.bv_root = bp = buf_splay(lblkno, 0, bp); 1614 if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER)) 1615 return (bp); 1616 } 1617 if ((bp = bo->bo_dirty.bv_root) != NULL) { 1618 bo->bo_dirty.bv_root = bp = buf_splay(lblkno, 0, bp); 1619 if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER)) 1620 return (bp); 1621 } 1622 return (NULL); 1623 } 1624 1625 /* 1626 * Associate a buffer with a vnode. 1627 */ 1628 void 1629 bgetvp(struct vnode *vp, struct buf *bp) 1630 { 1631 struct bufobj *bo; 1632 1633 bo = &vp->v_bufobj; 1634 ASSERT_BO_LOCKED(bo); 1635 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 1636 1637 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 1638 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 1639 ("bgetvp: bp already attached! %p", bp)); 1640 1641 vhold(vp); 1642 if (VFS_NEEDSGIANT(vp->v_mount) || bo->bo_flag & BO_NEEDSGIANT) 1643 bp->b_flags |= B_NEEDSGIANT; 1644 bp->b_vp = vp; 1645 bp->b_bufobj = bo; 1646 /* 1647 * Insert onto list for new vnode. 1648 */ 1649 buf_vlist_add(bp, bo, BX_VNCLEAN); 1650 } 1651 1652 /* 1653 * Disassociate a buffer from a vnode. 1654 */ 1655 void 1656 brelvp(struct buf *bp) 1657 { 1658 struct bufobj *bo; 1659 struct vnode *vp; 1660 1661 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 1662 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 1663 1664 /* 1665 * Delete from old vnode list, if on one. 1666 */ 1667 vp = bp->b_vp; /* XXX */ 1668 bo = bp->b_bufobj; 1669 BO_LOCK(bo); 1670 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1671 buf_vlist_remove(bp); 1672 else 1673 panic("brelvp: Buffer %p not on queue.", bp); 1674 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 1675 bo->bo_flag &= ~BO_ONWORKLST; 1676 mtx_lock(&sync_mtx); 1677 LIST_REMOVE(bo, bo_synclist); 1678 syncer_worklist_len--; 1679 mtx_unlock(&sync_mtx); 1680 } 1681 bp->b_flags &= ~B_NEEDSGIANT; 1682 bp->b_vp = NULL; 1683 bp->b_bufobj = NULL; 1684 BO_UNLOCK(bo); 1685 vdrop(vp); 1686 } 1687 1688 /* 1689 * Add an item to the syncer work queue. 1690 */ 1691 static void 1692 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 1693 { 1694 int queue, slot; 1695 1696 ASSERT_BO_LOCKED(bo); 1697 1698 mtx_lock(&sync_mtx); 1699 if (bo->bo_flag & BO_ONWORKLST) 1700 LIST_REMOVE(bo, bo_synclist); 1701 else { 1702 bo->bo_flag |= BO_ONWORKLST; 1703 syncer_worklist_len++; 1704 } 1705 1706 if (delay > syncer_maxdelay - 2) 1707 delay = syncer_maxdelay - 2; 1708 slot = (syncer_delayno + delay) & syncer_mask; 1709 1710 queue = VFS_NEEDSGIANT(bo->__bo_vnode->v_mount) ? WI_GIANTQ : 1711 WI_MPSAFEQ; 1712 LIST_INSERT_HEAD(&syncer_workitem_pending[queue][slot], bo, 1713 bo_synclist); 1714 mtx_unlock(&sync_mtx); 1715 } 1716 1717 static int 1718 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 1719 { 1720 int error, len; 1721 1722 mtx_lock(&sync_mtx); 1723 len = syncer_worklist_len - sync_vnode_count; 1724 mtx_unlock(&sync_mtx); 1725 error = SYSCTL_OUT(req, &len, sizeof(len)); 1726 return (error); 1727 } 1728 1729 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0, 1730 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 1731 1732 static struct proc *updateproc; 1733 static void sched_sync(void); 1734 static struct kproc_desc up_kp = { 1735 "syncer", 1736 sched_sync, 1737 &updateproc 1738 }; 1739 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 1740 1741 static int 1742 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 1743 { 1744 struct vnode *vp; 1745 struct mount *mp; 1746 1747 *bo = LIST_FIRST(slp); 1748 if (*bo == NULL) 1749 return (0); 1750 vp = (*bo)->__bo_vnode; /* XXX */ 1751 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 1752 return (1); 1753 /* 1754 * We use vhold in case the vnode does not 1755 * successfully sync. vhold prevents the vnode from 1756 * going away when we unlock the sync_mtx so that 1757 * we can acquire the vnode interlock. 1758 */ 1759 vholdl(vp); 1760 mtx_unlock(&sync_mtx); 1761 VI_UNLOCK(vp); 1762 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1763 vdrop(vp); 1764 mtx_lock(&sync_mtx); 1765 return (*bo == LIST_FIRST(slp)); 1766 } 1767 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1768 (void) VOP_FSYNC(vp, MNT_LAZY, td); 1769 VOP_UNLOCK(vp, 0); 1770 vn_finished_write(mp); 1771 BO_LOCK(*bo); 1772 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 1773 /* 1774 * Put us back on the worklist. The worklist 1775 * routine will remove us from our current 1776 * position and then add us back in at a later 1777 * position. 1778 */ 1779 vn_syncer_add_to_worklist(*bo, syncdelay); 1780 } 1781 BO_UNLOCK(*bo); 1782 vdrop(vp); 1783 mtx_lock(&sync_mtx); 1784 return (0); 1785 } 1786 1787 /* 1788 * System filesystem synchronizer daemon. 1789 */ 1790 static void 1791 sched_sync(void) 1792 { 1793 struct synclist *gnext, *next; 1794 struct synclist *gslp, *slp; 1795 struct bufobj *bo; 1796 long starttime; 1797 struct thread *td = curthread; 1798 int last_work_seen; 1799 int net_worklist_len; 1800 int syncer_final_iter; 1801 int first_printf; 1802 int error; 1803 1804 last_work_seen = 0; 1805 syncer_final_iter = 0; 1806 first_printf = 1; 1807 syncer_state = SYNCER_RUNNING; 1808 starttime = time_uptime; 1809 td->td_pflags |= TDP_NORUNNINGBUF; 1810 1811 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 1812 SHUTDOWN_PRI_LAST); 1813 1814 mtx_lock(&sync_mtx); 1815 for (;;) { 1816 if (syncer_state == SYNCER_FINAL_DELAY && 1817 syncer_final_iter == 0) { 1818 mtx_unlock(&sync_mtx); 1819 kproc_suspend_check(td->td_proc); 1820 mtx_lock(&sync_mtx); 1821 } 1822 net_worklist_len = syncer_worklist_len - sync_vnode_count; 1823 if (syncer_state != SYNCER_RUNNING && 1824 starttime != time_uptime) { 1825 if (first_printf) { 1826 printf("\nSyncing disks, vnodes remaining..."); 1827 first_printf = 0; 1828 } 1829 printf("%d ", net_worklist_len); 1830 } 1831 starttime = time_uptime; 1832 1833 /* 1834 * Push files whose dirty time has expired. Be careful 1835 * of interrupt race on slp queue. 1836 * 1837 * Skip over empty worklist slots when shutting down. 1838 */ 1839 do { 1840 slp = &syncer_workitem_pending[WI_MPSAFEQ][syncer_delayno]; 1841 gslp = &syncer_workitem_pending[WI_GIANTQ][syncer_delayno]; 1842 syncer_delayno += 1; 1843 if (syncer_delayno == syncer_maxdelay) 1844 syncer_delayno = 0; 1845 next = &syncer_workitem_pending[WI_MPSAFEQ][syncer_delayno]; 1846 gnext = &syncer_workitem_pending[WI_GIANTQ][syncer_delayno]; 1847 /* 1848 * If the worklist has wrapped since the 1849 * it was emptied of all but syncer vnodes, 1850 * switch to the FINAL_DELAY state and run 1851 * for one more second. 1852 */ 1853 if (syncer_state == SYNCER_SHUTTING_DOWN && 1854 net_worklist_len == 0 && 1855 last_work_seen == syncer_delayno) { 1856 syncer_state = SYNCER_FINAL_DELAY; 1857 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 1858 } 1859 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 1860 LIST_EMPTY(gslp) && syncer_worklist_len > 0); 1861 1862 /* 1863 * Keep track of the last time there was anything 1864 * on the worklist other than syncer vnodes. 1865 * Return to the SHUTTING_DOWN state if any 1866 * new work appears. 1867 */ 1868 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 1869 last_work_seen = syncer_delayno; 1870 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 1871 syncer_state = SYNCER_SHUTTING_DOWN; 1872 while (!LIST_EMPTY(slp)) { 1873 error = sync_vnode(slp, &bo, td); 1874 if (error == 1) { 1875 LIST_REMOVE(bo, bo_synclist); 1876 LIST_INSERT_HEAD(next, bo, bo_synclist); 1877 continue; 1878 } 1879 #ifdef SW_WATCHDOG 1880 if (first_printf == 0) 1881 wdog_kern_pat(WD_LASTVAL); 1882 #endif 1883 } 1884 if (!LIST_EMPTY(gslp)) { 1885 mtx_unlock(&sync_mtx); 1886 mtx_lock(&Giant); 1887 mtx_lock(&sync_mtx); 1888 while (!LIST_EMPTY(gslp)) { 1889 error = sync_vnode(gslp, &bo, td); 1890 if (error == 1) { 1891 LIST_REMOVE(bo, bo_synclist); 1892 LIST_INSERT_HEAD(gnext, bo, 1893 bo_synclist); 1894 continue; 1895 } 1896 } 1897 mtx_unlock(&Giant); 1898 } 1899 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 1900 syncer_final_iter--; 1901 /* 1902 * The variable rushjob allows the kernel to speed up the 1903 * processing of the filesystem syncer process. A rushjob 1904 * value of N tells the filesystem syncer to process the next 1905 * N seconds worth of work on its queue ASAP. Currently rushjob 1906 * is used by the soft update code to speed up the filesystem 1907 * syncer process when the incore state is getting so far 1908 * ahead of the disk that the kernel memory pool is being 1909 * threatened with exhaustion. 1910 */ 1911 if (rushjob > 0) { 1912 rushjob -= 1; 1913 continue; 1914 } 1915 /* 1916 * Just sleep for a short period of time between 1917 * iterations when shutting down to allow some I/O 1918 * to happen. 1919 * 1920 * If it has taken us less than a second to process the 1921 * current work, then wait. Otherwise start right over 1922 * again. We can still lose time if any single round 1923 * takes more than two seconds, but it does not really 1924 * matter as we are just trying to generally pace the 1925 * filesystem activity. 1926 */ 1927 if (syncer_state != SYNCER_RUNNING || 1928 time_uptime == starttime) { 1929 thread_lock(td); 1930 sched_prio(td, PPAUSE); 1931 thread_unlock(td); 1932 } 1933 if (syncer_state != SYNCER_RUNNING) 1934 cv_timedwait(&sync_wakeup, &sync_mtx, 1935 hz / SYNCER_SHUTDOWN_SPEEDUP); 1936 else if (time_uptime == starttime) 1937 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 1938 } 1939 } 1940 1941 /* 1942 * Request the syncer daemon to speed up its work. 1943 * We never push it to speed up more than half of its 1944 * normal turn time, otherwise it could take over the cpu. 1945 */ 1946 int 1947 speedup_syncer(void) 1948 { 1949 int ret = 0; 1950 1951 mtx_lock(&sync_mtx); 1952 if (rushjob < syncdelay / 2) { 1953 rushjob += 1; 1954 stat_rush_requests += 1; 1955 ret = 1; 1956 } 1957 mtx_unlock(&sync_mtx); 1958 cv_broadcast(&sync_wakeup); 1959 return (ret); 1960 } 1961 1962 /* 1963 * Tell the syncer to speed up its work and run though its work 1964 * list several times, then tell it to shut down. 1965 */ 1966 static void 1967 syncer_shutdown(void *arg, int howto) 1968 { 1969 1970 if (howto & RB_NOSYNC) 1971 return; 1972 mtx_lock(&sync_mtx); 1973 syncer_state = SYNCER_SHUTTING_DOWN; 1974 rushjob = 0; 1975 mtx_unlock(&sync_mtx); 1976 cv_broadcast(&sync_wakeup); 1977 kproc_shutdown(arg, howto); 1978 } 1979 1980 /* 1981 * Reassign a buffer from one vnode to another. 1982 * Used to assign file specific control information 1983 * (indirect blocks) to the vnode to which they belong. 1984 */ 1985 void 1986 reassignbuf(struct buf *bp) 1987 { 1988 struct vnode *vp; 1989 struct bufobj *bo; 1990 int delay; 1991 #ifdef INVARIANTS 1992 struct bufv *bv; 1993 #endif 1994 1995 vp = bp->b_vp; 1996 bo = bp->b_bufobj; 1997 ++reassignbufcalls; 1998 1999 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2000 bp, bp->b_vp, bp->b_flags); 2001 /* 2002 * B_PAGING flagged buffers cannot be reassigned because their vp 2003 * is not fully linked in. 2004 */ 2005 if (bp->b_flags & B_PAGING) 2006 panic("cannot reassign paging buffer"); 2007 2008 /* 2009 * Delete from old vnode list, if on one. 2010 */ 2011 BO_LOCK(bo); 2012 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2013 buf_vlist_remove(bp); 2014 else 2015 panic("reassignbuf: Buffer %p not on queue.", bp); 2016 /* 2017 * If dirty, put on list of dirty buffers; otherwise insert onto list 2018 * of clean buffers. 2019 */ 2020 if (bp->b_flags & B_DELWRI) { 2021 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2022 switch (vp->v_type) { 2023 case VDIR: 2024 delay = dirdelay; 2025 break; 2026 case VCHR: 2027 delay = metadelay; 2028 break; 2029 default: 2030 delay = filedelay; 2031 } 2032 vn_syncer_add_to_worklist(bo, delay); 2033 } 2034 buf_vlist_add(bp, bo, BX_VNDIRTY); 2035 } else { 2036 buf_vlist_add(bp, bo, BX_VNCLEAN); 2037 2038 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2039 mtx_lock(&sync_mtx); 2040 LIST_REMOVE(bo, bo_synclist); 2041 syncer_worklist_len--; 2042 mtx_unlock(&sync_mtx); 2043 bo->bo_flag &= ~BO_ONWORKLST; 2044 } 2045 } 2046 #ifdef INVARIANTS 2047 bv = &bo->bo_clean; 2048 bp = TAILQ_FIRST(&bv->bv_hd); 2049 KASSERT(bp == NULL || bp->b_bufobj == bo, 2050 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2051 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2052 KASSERT(bp == NULL || bp->b_bufobj == bo, 2053 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2054 bv = &bo->bo_dirty; 2055 bp = TAILQ_FIRST(&bv->bv_hd); 2056 KASSERT(bp == NULL || bp->b_bufobj == bo, 2057 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2058 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2059 KASSERT(bp == NULL || bp->b_bufobj == bo, 2060 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2061 #endif 2062 BO_UNLOCK(bo); 2063 } 2064 2065 /* 2066 * Increment the use and hold counts on the vnode, taking care to reference 2067 * the driver's usecount if this is a chardev. The vholdl() will remove 2068 * the vnode from the free list if it is presently free. Requires the 2069 * vnode interlock and returns with it held. 2070 */ 2071 static void 2072 v_incr_usecount(struct vnode *vp) 2073 { 2074 2075 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2076 vp->v_usecount++; 2077 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2078 dev_lock(); 2079 vp->v_rdev->si_usecount++; 2080 dev_unlock(); 2081 } 2082 vholdl(vp); 2083 } 2084 2085 /* 2086 * Turn a holdcnt into a use+holdcnt such that only one call to 2087 * v_decr_usecount is needed. 2088 */ 2089 static void 2090 v_upgrade_usecount(struct vnode *vp) 2091 { 2092 2093 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2094 vp->v_usecount++; 2095 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2096 dev_lock(); 2097 vp->v_rdev->si_usecount++; 2098 dev_unlock(); 2099 } 2100 } 2101 2102 /* 2103 * Decrement the vnode use and hold count along with the driver's usecount 2104 * if this is a chardev. The vdropl() below releases the vnode interlock 2105 * as it may free the vnode. 2106 */ 2107 static void 2108 v_decr_usecount(struct vnode *vp) 2109 { 2110 2111 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2112 VNASSERT(vp->v_usecount > 0, vp, 2113 ("v_decr_usecount: negative usecount")); 2114 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2115 vp->v_usecount--; 2116 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2117 dev_lock(); 2118 vp->v_rdev->si_usecount--; 2119 dev_unlock(); 2120 } 2121 vdropl(vp); 2122 } 2123 2124 /* 2125 * Decrement only the use count and driver use count. This is intended to 2126 * be paired with a follow on vdropl() to release the remaining hold count. 2127 * In this way we may vgone() a vnode with a 0 usecount without risk of 2128 * having it end up on a free list because the hold count is kept above 0. 2129 */ 2130 static void 2131 v_decr_useonly(struct vnode *vp) 2132 { 2133 2134 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2135 VNASSERT(vp->v_usecount > 0, vp, 2136 ("v_decr_useonly: negative usecount")); 2137 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2138 vp->v_usecount--; 2139 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2140 dev_lock(); 2141 vp->v_rdev->si_usecount--; 2142 dev_unlock(); 2143 } 2144 } 2145 2146 /* 2147 * Grab a particular vnode from the free list, increment its 2148 * reference count and lock it. VI_DOOMED is set if the vnode 2149 * is being destroyed. Only callers who specify LK_RETRY will 2150 * see doomed vnodes. If inactive processing was delayed in 2151 * vput try to do it here. 2152 */ 2153 int 2154 vget(struct vnode *vp, int flags, struct thread *td) 2155 { 2156 int error; 2157 2158 error = 0; 2159 VFS_ASSERT_GIANT(vp->v_mount); 2160 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 2161 ("vget: invalid lock operation")); 2162 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2163 2164 if ((flags & LK_INTERLOCK) == 0) 2165 VI_LOCK(vp); 2166 vholdl(vp); 2167 if ((error = vn_lock(vp, flags | LK_INTERLOCK)) != 0) { 2168 vdrop(vp); 2169 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2170 vp); 2171 return (error); 2172 } 2173 if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0) 2174 panic("vget: vn_lock failed to return ENOENT\n"); 2175 VI_LOCK(vp); 2176 /* Upgrade our holdcnt to a usecount. */ 2177 v_upgrade_usecount(vp); 2178 /* 2179 * We don't guarantee that any particular close will 2180 * trigger inactive processing so just make a best effort 2181 * here at preventing a reference to a removed file. If 2182 * we don't succeed no harm is done. 2183 */ 2184 if (vp->v_iflag & VI_OWEINACT) { 2185 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE && 2186 (flags & LK_NOWAIT) == 0) 2187 vinactive(vp, td); 2188 vp->v_iflag &= ~VI_OWEINACT; 2189 } 2190 VI_UNLOCK(vp); 2191 return (0); 2192 } 2193 2194 /* 2195 * Increase the reference count of a vnode. 2196 */ 2197 void 2198 vref(struct vnode *vp) 2199 { 2200 2201 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2202 VI_LOCK(vp); 2203 v_incr_usecount(vp); 2204 VI_UNLOCK(vp); 2205 } 2206 2207 /* 2208 * Return reference count of a vnode. 2209 * 2210 * The results of this call are only guaranteed when some mechanism other 2211 * than the VI lock is used to stop other processes from gaining references 2212 * to the vnode. This may be the case if the caller holds the only reference. 2213 * This is also useful when stale data is acceptable as race conditions may 2214 * be accounted for by some other means. 2215 */ 2216 int 2217 vrefcnt(struct vnode *vp) 2218 { 2219 int usecnt; 2220 2221 VI_LOCK(vp); 2222 usecnt = vp->v_usecount; 2223 VI_UNLOCK(vp); 2224 2225 return (usecnt); 2226 } 2227 2228 #define VPUTX_VRELE 1 2229 #define VPUTX_VPUT 2 2230 #define VPUTX_VUNREF 3 2231 2232 static void 2233 vputx(struct vnode *vp, int func) 2234 { 2235 int error; 2236 2237 KASSERT(vp != NULL, ("vputx: null vp")); 2238 if (func == VPUTX_VUNREF) 2239 ASSERT_VOP_LOCKED(vp, "vunref"); 2240 else if (func == VPUTX_VPUT) 2241 ASSERT_VOP_LOCKED(vp, "vput"); 2242 else 2243 KASSERT(func == VPUTX_VRELE, ("vputx: wrong func")); 2244 VFS_ASSERT_GIANT(vp->v_mount); 2245 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2246 VI_LOCK(vp); 2247 2248 /* Skip this v_writecount check if we're going to panic below. */ 2249 VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp, 2250 ("vputx: missed vn_close")); 2251 error = 0; 2252 2253 if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) && 2254 vp->v_usecount == 1)) { 2255 if (func == VPUTX_VPUT) 2256 VOP_UNLOCK(vp, 0); 2257 v_decr_usecount(vp); 2258 return; 2259 } 2260 2261 if (vp->v_usecount != 1) { 2262 vprint("vputx: negative ref count", vp); 2263 panic("vputx: negative ref cnt"); 2264 } 2265 CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp); 2266 /* 2267 * We want to hold the vnode until the inactive finishes to 2268 * prevent vgone() races. We drop the use count here and the 2269 * hold count below when we're done. 2270 */ 2271 v_decr_useonly(vp); 2272 /* 2273 * We must call VOP_INACTIVE with the node locked. Mark 2274 * as VI_DOINGINACT to avoid recursion. 2275 */ 2276 vp->v_iflag |= VI_OWEINACT; 2277 switch (func) { 2278 case VPUTX_VRELE: 2279 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 2280 VI_LOCK(vp); 2281 break; 2282 case VPUTX_VPUT: 2283 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 2284 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 2285 LK_NOWAIT); 2286 VI_LOCK(vp); 2287 } 2288 break; 2289 case VPUTX_VUNREF: 2290 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 2291 error = EBUSY; 2292 break; 2293 } 2294 if (vp->v_usecount > 0) 2295 vp->v_iflag &= ~VI_OWEINACT; 2296 if (error == 0) { 2297 if (vp->v_iflag & VI_OWEINACT) 2298 vinactive(vp, curthread); 2299 if (func != VPUTX_VUNREF) 2300 VOP_UNLOCK(vp, 0); 2301 } 2302 vdropl(vp); 2303 } 2304 2305 /* 2306 * Vnode put/release. 2307 * If count drops to zero, call inactive routine and return to freelist. 2308 */ 2309 void 2310 vrele(struct vnode *vp) 2311 { 2312 2313 vputx(vp, VPUTX_VRELE); 2314 } 2315 2316 /* 2317 * Release an already locked vnode. This give the same effects as 2318 * unlock+vrele(), but takes less time and avoids releasing and 2319 * re-aquiring the lock (as vrele() acquires the lock internally.) 2320 */ 2321 void 2322 vput(struct vnode *vp) 2323 { 2324 2325 vputx(vp, VPUTX_VPUT); 2326 } 2327 2328 /* 2329 * Release an exclusively locked vnode. Do not unlock the vnode lock. 2330 */ 2331 void 2332 vunref(struct vnode *vp) 2333 { 2334 2335 vputx(vp, VPUTX_VUNREF); 2336 } 2337 2338 /* 2339 * Somebody doesn't want the vnode recycled. 2340 */ 2341 void 2342 vhold(struct vnode *vp) 2343 { 2344 2345 VI_LOCK(vp); 2346 vholdl(vp); 2347 VI_UNLOCK(vp); 2348 } 2349 2350 void 2351 vholdl(struct vnode *vp) 2352 { 2353 2354 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2355 vp->v_holdcnt++; 2356 if (VSHOULDBUSY(vp)) 2357 vbusy(vp); 2358 } 2359 2360 /* 2361 * Note that there is one less who cares about this vnode. vdrop() is the 2362 * opposite of vhold(). 2363 */ 2364 void 2365 vdrop(struct vnode *vp) 2366 { 2367 2368 VI_LOCK(vp); 2369 vdropl(vp); 2370 } 2371 2372 /* 2373 * Drop the hold count of the vnode. If this is the last reference to 2374 * the vnode we will free it if it has been vgone'd otherwise it is 2375 * placed on the free list. 2376 */ 2377 void 2378 vdropl(struct vnode *vp) 2379 { 2380 2381 ASSERT_VI_LOCKED(vp, "vdropl"); 2382 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2383 if (vp->v_holdcnt <= 0) 2384 panic("vdrop: holdcnt %d", vp->v_holdcnt); 2385 vp->v_holdcnt--; 2386 if (vp->v_holdcnt == 0) { 2387 if (vp->v_iflag & VI_DOOMED) { 2388 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, 2389 vp); 2390 vdestroy(vp); 2391 return; 2392 } else 2393 vfree(vp); 2394 } 2395 VI_UNLOCK(vp); 2396 } 2397 2398 /* 2399 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 2400 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 2401 * OWEINACT tracks whether a vnode missed a call to inactive due to a 2402 * failed lock upgrade. 2403 */ 2404 static void 2405 vinactive(struct vnode *vp, struct thread *td) 2406 { 2407 2408 ASSERT_VOP_ELOCKED(vp, "vinactive"); 2409 ASSERT_VI_LOCKED(vp, "vinactive"); 2410 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 2411 ("vinactive: recursed on VI_DOINGINACT")); 2412 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2413 vp->v_iflag |= VI_DOINGINACT; 2414 vp->v_iflag &= ~VI_OWEINACT; 2415 VI_UNLOCK(vp); 2416 VOP_INACTIVE(vp, td); 2417 VI_LOCK(vp); 2418 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 2419 ("vinactive: lost VI_DOINGINACT")); 2420 vp->v_iflag &= ~VI_DOINGINACT; 2421 } 2422 2423 /* 2424 * Remove any vnodes in the vnode table belonging to mount point mp. 2425 * 2426 * If FORCECLOSE is not specified, there should not be any active ones, 2427 * return error if any are found (nb: this is a user error, not a 2428 * system error). If FORCECLOSE is specified, detach any active vnodes 2429 * that are found. 2430 * 2431 * If WRITECLOSE is set, only flush out regular file vnodes open for 2432 * writing. 2433 * 2434 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 2435 * 2436 * `rootrefs' specifies the base reference count for the root vnode 2437 * of this filesystem. The root vnode is considered busy if its 2438 * v_usecount exceeds this value. On a successful return, vflush(, td) 2439 * will call vrele() on the root vnode exactly rootrefs times. 2440 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 2441 * be zero. 2442 */ 2443 #ifdef DIAGNOSTIC 2444 static int busyprt = 0; /* print out busy vnodes */ 2445 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 2446 #endif 2447 2448 int 2449 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 2450 { 2451 struct vnode *vp, *mvp, *rootvp = NULL; 2452 struct vattr vattr; 2453 int busy = 0, error; 2454 2455 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 2456 rootrefs, flags); 2457 if (rootrefs > 0) { 2458 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 2459 ("vflush: bad args")); 2460 /* 2461 * Get the filesystem root vnode. We can vput() it 2462 * immediately, since with rootrefs > 0, it won't go away. 2463 */ 2464 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 2465 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 2466 __func__, error); 2467 return (error); 2468 } 2469 vput(rootvp); 2470 } 2471 MNT_ILOCK(mp); 2472 loop: 2473 MNT_VNODE_FOREACH(vp, mp, mvp) { 2474 VI_LOCK(vp); 2475 vholdl(vp); 2476 MNT_IUNLOCK(mp); 2477 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 2478 if (error) { 2479 vdrop(vp); 2480 MNT_ILOCK(mp); 2481 MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp); 2482 goto loop; 2483 } 2484 /* 2485 * Skip over a vnodes marked VV_SYSTEM. 2486 */ 2487 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 2488 VOP_UNLOCK(vp, 0); 2489 vdrop(vp); 2490 MNT_ILOCK(mp); 2491 continue; 2492 } 2493 /* 2494 * If WRITECLOSE is set, flush out unlinked but still open 2495 * files (even if open only for reading) and regular file 2496 * vnodes open for writing. 2497 */ 2498 if (flags & WRITECLOSE) { 2499 if (vp->v_object != NULL) { 2500 VM_OBJECT_LOCK(vp->v_object); 2501 vm_object_page_clean(vp->v_object, 0, 0, 0); 2502 VM_OBJECT_UNLOCK(vp->v_object); 2503 } 2504 error = VOP_FSYNC(vp, MNT_WAIT, td); 2505 if (error != 0) { 2506 VOP_UNLOCK(vp, 0); 2507 vdrop(vp); 2508 MNT_VNODE_FOREACH_ABORT(mp, mvp); 2509 return (error); 2510 } 2511 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 2512 VI_LOCK(vp); 2513 2514 if ((vp->v_type == VNON || 2515 (error == 0 && vattr.va_nlink > 0)) && 2516 (vp->v_writecount == 0 || vp->v_type != VREG)) { 2517 VOP_UNLOCK(vp, 0); 2518 vdropl(vp); 2519 MNT_ILOCK(mp); 2520 continue; 2521 } 2522 } else 2523 VI_LOCK(vp); 2524 /* 2525 * With v_usecount == 0, all we need to do is clear out the 2526 * vnode data structures and we are done. 2527 * 2528 * If FORCECLOSE is set, forcibly close the vnode. 2529 */ 2530 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 2531 VNASSERT(vp->v_usecount == 0 || 2532 (vp->v_type != VCHR && vp->v_type != VBLK), vp, 2533 ("device VNODE %p is FORCECLOSED", vp)); 2534 vgonel(vp); 2535 } else { 2536 busy++; 2537 #ifdef DIAGNOSTIC 2538 if (busyprt) 2539 vprint("vflush: busy vnode", vp); 2540 #endif 2541 } 2542 VOP_UNLOCK(vp, 0); 2543 vdropl(vp); 2544 MNT_ILOCK(mp); 2545 } 2546 MNT_IUNLOCK(mp); 2547 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 2548 /* 2549 * If just the root vnode is busy, and if its refcount 2550 * is equal to `rootrefs', then go ahead and kill it. 2551 */ 2552 VI_LOCK(rootvp); 2553 KASSERT(busy > 0, ("vflush: not busy")); 2554 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 2555 ("vflush: usecount %d < rootrefs %d", 2556 rootvp->v_usecount, rootrefs)); 2557 if (busy == 1 && rootvp->v_usecount == rootrefs) { 2558 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 2559 vgone(rootvp); 2560 VOP_UNLOCK(rootvp, 0); 2561 busy = 0; 2562 } else 2563 VI_UNLOCK(rootvp); 2564 } 2565 if (busy) { 2566 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 2567 busy); 2568 return (EBUSY); 2569 } 2570 for (; rootrefs > 0; rootrefs--) 2571 vrele(rootvp); 2572 return (0); 2573 } 2574 2575 /* 2576 * Recycle an unused vnode to the front of the free list. 2577 */ 2578 int 2579 vrecycle(struct vnode *vp, struct thread *td) 2580 { 2581 int recycled; 2582 2583 ASSERT_VOP_ELOCKED(vp, "vrecycle"); 2584 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2585 recycled = 0; 2586 VI_LOCK(vp); 2587 if (vp->v_usecount == 0) { 2588 recycled = 1; 2589 vgonel(vp); 2590 } 2591 VI_UNLOCK(vp); 2592 return (recycled); 2593 } 2594 2595 /* 2596 * Eliminate all activity associated with a vnode 2597 * in preparation for reuse. 2598 */ 2599 void 2600 vgone(struct vnode *vp) 2601 { 2602 VI_LOCK(vp); 2603 vgonel(vp); 2604 VI_UNLOCK(vp); 2605 } 2606 2607 /* 2608 * vgone, with the vp interlock held. 2609 */ 2610 void 2611 vgonel(struct vnode *vp) 2612 { 2613 struct thread *td; 2614 int oweinact; 2615 int active; 2616 struct mount *mp; 2617 2618 ASSERT_VOP_ELOCKED(vp, "vgonel"); 2619 ASSERT_VI_LOCKED(vp, "vgonel"); 2620 VNASSERT(vp->v_holdcnt, vp, 2621 ("vgonel: vp %p has no reference.", vp)); 2622 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2623 td = curthread; 2624 2625 /* 2626 * Don't vgonel if we're already doomed. 2627 */ 2628 if (vp->v_iflag & VI_DOOMED) 2629 return; 2630 vp->v_iflag |= VI_DOOMED; 2631 /* 2632 * Check to see if the vnode is in use. If so, we have to call 2633 * VOP_CLOSE() and VOP_INACTIVE(). 2634 */ 2635 active = vp->v_usecount; 2636 oweinact = (vp->v_iflag & VI_OWEINACT); 2637 VI_UNLOCK(vp); 2638 /* 2639 * Clean out any buffers associated with the vnode. 2640 * If the flush fails, just toss the buffers. 2641 */ 2642 mp = NULL; 2643 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 2644 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 2645 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) 2646 vinvalbuf(vp, 0, 0, 0); 2647 2648 /* 2649 * If purging an active vnode, it must be closed and 2650 * deactivated before being reclaimed. 2651 */ 2652 if (active) 2653 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 2654 if (oweinact || active) { 2655 VI_LOCK(vp); 2656 if ((vp->v_iflag & VI_DOINGINACT) == 0) 2657 vinactive(vp, td); 2658 VI_UNLOCK(vp); 2659 } 2660 if (vp->v_type == VSOCK) 2661 vfs_unp_reclaim(vp); 2662 /* 2663 * Reclaim the vnode. 2664 */ 2665 if (VOP_RECLAIM(vp, td)) 2666 panic("vgone: cannot reclaim"); 2667 if (mp != NULL) 2668 vn_finished_secondary_write(mp); 2669 VNASSERT(vp->v_object == NULL, vp, 2670 ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag)); 2671 /* 2672 * Clear the advisory locks and wake up waiting threads. 2673 */ 2674 (void)VOP_ADVLOCKPURGE(vp); 2675 /* 2676 * Delete from old mount point vnode list. 2677 */ 2678 delmntque(vp); 2679 cache_purge(vp); 2680 /* 2681 * Done with purge, reset to the standard lock and invalidate 2682 * the vnode. 2683 */ 2684 VI_LOCK(vp); 2685 vp->v_vnlock = &vp->v_lock; 2686 vp->v_op = &dead_vnodeops; 2687 vp->v_tag = "none"; 2688 vp->v_type = VBAD; 2689 } 2690 2691 /* 2692 * Calculate the total number of references to a special device. 2693 */ 2694 int 2695 vcount(struct vnode *vp) 2696 { 2697 int count; 2698 2699 dev_lock(); 2700 count = vp->v_rdev->si_usecount; 2701 dev_unlock(); 2702 return (count); 2703 } 2704 2705 /* 2706 * Same as above, but using the struct cdev *as argument 2707 */ 2708 int 2709 count_dev(struct cdev *dev) 2710 { 2711 int count; 2712 2713 dev_lock(); 2714 count = dev->si_usecount; 2715 dev_unlock(); 2716 return(count); 2717 } 2718 2719 /* 2720 * Print out a description of a vnode. 2721 */ 2722 static char *typename[] = 2723 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 2724 "VMARKER"}; 2725 2726 void 2727 vn_printf(struct vnode *vp, const char *fmt, ...) 2728 { 2729 va_list ap; 2730 char buf[256], buf2[16]; 2731 u_long flags; 2732 2733 va_start(ap, fmt); 2734 vprintf(fmt, ap); 2735 va_end(ap); 2736 printf("%p: ", (void *)vp); 2737 printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]); 2738 printf(" usecount %d, writecount %d, refcount %d mountedhere %p\n", 2739 vp->v_usecount, vp->v_writecount, vp->v_holdcnt, vp->v_mountedhere); 2740 buf[0] = '\0'; 2741 buf[1] = '\0'; 2742 if (vp->v_vflag & VV_ROOT) 2743 strlcat(buf, "|VV_ROOT", sizeof(buf)); 2744 if (vp->v_vflag & VV_ISTTY) 2745 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 2746 if (vp->v_vflag & VV_NOSYNC) 2747 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 2748 if (vp->v_vflag & VV_CACHEDLABEL) 2749 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 2750 if (vp->v_vflag & VV_TEXT) 2751 strlcat(buf, "|VV_TEXT", sizeof(buf)); 2752 if (vp->v_vflag & VV_COPYONWRITE) 2753 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 2754 if (vp->v_vflag & VV_SYSTEM) 2755 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 2756 if (vp->v_vflag & VV_PROCDEP) 2757 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 2758 if (vp->v_vflag & VV_NOKNOTE) 2759 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 2760 if (vp->v_vflag & VV_DELETED) 2761 strlcat(buf, "|VV_DELETED", sizeof(buf)); 2762 if (vp->v_vflag & VV_MD) 2763 strlcat(buf, "|VV_MD", sizeof(buf)); 2764 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | 2765 VV_CACHEDLABEL | VV_TEXT | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 2766 VV_NOKNOTE | VV_DELETED | VV_MD); 2767 if (flags != 0) { 2768 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 2769 strlcat(buf, buf2, sizeof(buf)); 2770 } 2771 if (vp->v_iflag & VI_MOUNT) 2772 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 2773 if (vp->v_iflag & VI_AGE) 2774 strlcat(buf, "|VI_AGE", sizeof(buf)); 2775 if (vp->v_iflag & VI_DOOMED) 2776 strlcat(buf, "|VI_DOOMED", sizeof(buf)); 2777 if (vp->v_iflag & VI_FREE) 2778 strlcat(buf, "|VI_FREE", sizeof(buf)); 2779 if (vp->v_iflag & VI_DOINGINACT) 2780 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 2781 if (vp->v_iflag & VI_OWEINACT) 2782 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 2783 flags = vp->v_iflag & ~(VI_MOUNT | VI_AGE | VI_DOOMED | VI_FREE | 2784 VI_DOINGINACT | VI_OWEINACT); 2785 if (flags != 0) { 2786 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 2787 strlcat(buf, buf2, sizeof(buf)); 2788 } 2789 printf(" flags (%s)\n", buf + 1); 2790 if (mtx_owned(VI_MTX(vp))) 2791 printf(" VI_LOCKed"); 2792 if (vp->v_object != NULL) 2793 printf(" v_object %p ref %d pages %d\n", 2794 vp->v_object, vp->v_object->ref_count, 2795 vp->v_object->resident_page_count); 2796 printf(" "); 2797 lockmgr_printinfo(vp->v_vnlock); 2798 if (vp->v_data != NULL) 2799 VOP_PRINT(vp); 2800 } 2801 2802 #ifdef DDB 2803 /* 2804 * List all of the locked vnodes in the system. 2805 * Called when debugging the kernel. 2806 */ 2807 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 2808 { 2809 struct mount *mp, *nmp; 2810 struct vnode *vp; 2811 2812 /* 2813 * Note: because this is DDB, we can't obey the locking semantics 2814 * for these structures, which means we could catch an inconsistent 2815 * state and dereference a nasty pointer. Not much to be done 2816 * about that. 2817 */ 2818 db_printf("Locked vnodes\n"); 2819 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 2820 nmp = TAILQ_NEXT(mp, mnt_list); 2821 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 2822 if (vp->v_type != VMARKER && 2823 VOP_ISLOCKED(vp)) 2824 vprint("", vp); 2825 } 2826 nmp = TAILQ_NEXT(mp, mnt_list); 2827 } 2828 } 2829 2830 /* 2831 * Show details about the given vnode. 2832 */ 2833 DB_SHOW_COMMAND(vnode, db_show_vnode) 2834 { 2835 struct vnode *vp; 2836 2837 if (!have_addr) 2838 return; 2839 vp = (struct vnode *)addr; 2840 vn_printf(vp, "vnode "); 2841 } 2842 2843 /* 2844 * Show details about the given mount point. 2845 */ 2846 DB_SHOW_COMMAND(mount, db_show_mount) 2847 { 2848 struct mount *mp; 2849 struct vfsopt *opt; 2850 struct statfs *sp; 2851 struct vnode *vp; 2852 char buf[512]; 2853 uint64_t mflags; 2854 u_int flags; 2855 2856 if (!have_addr) { 2857 /* No address given, print short info about all mount points. */ 2858 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 2859 db_printf("%p %s on %s (%s)\n", mp, 2860 mp->mnt_stat.f_mntfromname, 2861 mp->mnt_stat.f_mntonname, 2862 mp->mnt_stat.f_fstypename); 2863 if (db_pager_quit) 2864 break; 2865 } 2866 db_printf("\nMore info: show mount <addr>\n"); 2867 return; 2868 } 2869 2870 mp = (struct mount *)addr; 2871 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 2872 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 2873 2874 buf[0] = '\0'; 2875 mflags = mp->mnt_flag; 2876 #define MNT_FLAG(flag) do { \ 2877 if (mflags & (flag)) { \ 2878 if (buf[0] != '\0') \ 2879 strlcat(buf, ", ", sizeof(buf)); \ 2880 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 2881 mflags &= ~(flag); \ 2882 } \ 2883 } while (0) 2884 MNT_FLAG(MNT_RDONLY); 2885 MNT_FLAG(MNT_SYNCHRONOUS); 2886 MNT_FLAG(MNT_NOEXEC); 2887 MNT_FLAG(MNT_NOSUID); 2888 MNT_FLAG(MNT_UNION); 2889 MNT_FLAG(MNT_ASYNC); 2890 MNT_FLAG(MNT_SUIDDIR); 2891 MNT_FLAG(MNT_SOFTDEP); 2892 MNT_FLAG(MNT_SUJ); 2893 MNT_FLAG(MNT_NOSYMFOLLOW); 2894 MNT_FLAG(MNT_GJOURNAL); 2895 MNT_FLAG(MNT_MULTILABEL); 2896 MNT_FLAG(MNT_ACLS); 2897 MNT_FLAG(MNT_NOATIME); 2898 MNT_FLAG(MNT_NOCLUSTERR); 2899 MNT_FLAG(MNT_NOCLUSTERW); 2900 MNT_FLAG(MNT_NFS4ACLS); 2901 MNT_FLAG(MNT_EXRDONLY); 2902 MNT_FLAG(MNT_EXPORTED); 2903 MNT_FLAG(MNT_DEFEXPORTED); 2904 MNT_FLAG(MNT_EXPORTANON); 2905 MNT_FLAG(MNT_EXKERB); 2906 MNT_FLAG(MNT_EXPUBLIC); 2907 MNT_FLAG(MNT_LOCAL); 2908 MNT_FLAG(MNT_QUOTA); 2909 MNT_FLAG(MNT_ROOTFS); 2910 MNT_FLAG(MNT_USER); 2911 MNT_FLAG(MNT_IGNORE); 2912 MNT_FLAG(MNT_UPDATE); 2913 MNT_FLAG(MNT_DELEXPORT); 2914 MNT_FLAG(MNT_RELOAD); 2915 MNT_FLAG(MNT_FORCE); 2916 MNT_FLAG(MNT_SNAPSHOT); 2917 MNT_FLAG(MNT_BYFSID); 2918 #undef MNT_FLAG 2919 if (mflags != 0) { 2920 if (buf[0] != '\0') 2921 strlcat(buf, ", ", sizeof(buf)); 2922 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 2923 "0x%016jx", mflags); 2924 } 2925 db_printf(" mnt_flag = %s\n", buf); 2926 2927 buf[0] = '\0'; 2928 flags = mp->mnt_kern_flag; 2929 #define MNT_KERN_FLAG(flag) do { \ 2930 if (flags & (flag)) { \ 2931 if (buf[0] != '\0') \ 2932 strlcat(buf, ", ", sizeof(buf)); \ 2933 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 2934 flags &= ~(flag); \ 2935 } \ 2936 } while (0) 2937 MNT_KERN_FLAG(MNTK_UNMOUNTF); 2938 MNT_KERN_FLAG(MNTK_ASYNC); 2939 MNT_KERN_FLAG(MNTK_SOFTDEP); 2940 MNT_KERN_FLAG(MNTK_NOINSMNTQ); 2941 MNT_KERN_FLAG(MNTK_DRAINING); 2942 MNT_KERN_FLAG(MNTK_REFEXPIRE); 2943 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 2944 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 2945 MNT_KERN_FLAG(MNTK_NOASYNC); 2946 MNT_KERN_FLAG(MNTK_UNMOUNT); 2947 MNT_KERN_FLAG(MNTK_MWAIT); 2948 MNT_KERN_FLAG(MNTK_SUSPEND); 2949 MNT_KERN_FLAG(MNTK_SUSPEND2); 2950 MNT_KERN_FLAG(MNTK_SUSPENDED); 2951 MNT_KERN_FLAG(MNTK_MPSAFE); 2952 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 2953 MNT_KERN_FLAG(MNTK_NOKNOTE); 2954 #undef MNT_KERN_FLAG 2955 if (flags != 0) { 2956 if (buf[0] != '\0') 2957 strlcat(buf, ", ", sizeof(buf)); 2958 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 2959 "0x%08x", flags); 2960 } 2961 db_printf(" mnt_kern_flag = %s\n", buf); 2962 2963 db_printf(" mnt_opt = "); 2964 opt = TAILQ_FIRST(mp->mnt_opt); 2965 if (opt != NULL) { 2966 db_printf("%s", opt->name); 2967 opt = TAILQ_NEXT(opt, link); 2968 while (opt != NULL) { 2969 db_printf(", %s", opt->name); 2970 opt = TAILQ_NEXT(opt, link); 2971 } 2972 } 2973 db_printf("\n"); 2974 2975 sp = &mp->mnt_stat; 2976 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 2977 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 2978 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 2979 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 2980 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 2981 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 2982 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 2983 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 2984 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 2985 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 2986 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 2987 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 2988 2989 db_printf(" mnt_cred = { uid=%u ruid=%u", 2990 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 2991 if (jailed(mp->mnt_cred)) 2992 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 2993 db_printf(" }\n"); 2994 db_printf(" mnt_ref = %d\n", mp->mnt_ref); 2995 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 2996 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 2997 db_printf(" mnt_writeopcount = %d\n", mp->mnt_writeopcount); 2998 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 2999 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 3000 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 3001 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 3002 db_printf(" mnt_secondary_accwrites = %d\n", 3003 mp->mnt_secondary_accwrites); 3004 db_printf(" mnt_gjprovider = %s\n", 3005 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 3006 db_printf("\n"); 3007 3008 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3009 if (vp->v_type != VMARKER) { 3010 vn_printf(vp, "vnode "); 3011 if (db_pager_quit) 3012 break; 3013 } 3014 } 3015 } 3016 #endif /* DDB */ 3017 3018 /* 3019 * Fill in a struct xvfsconf based on a struct vfsconf. 3020 */ 3021 static void 3022 vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp) 3023 { 3024 3025 strcpy(xvfsp->vfc_name, vfsp->vfc_name); 3026 xvfsp->vfc_typenum = vfsp->vfc_typenum; 3027 xvfsp->vfc_refcount = vfsp->vfc_refcount; 3028 xvfsp->vfc_flags = vfsp->vfc_flags; 3029 /* 3030 * These are unused in userland, we keep them 3031 * to not break binary compatibility. 3032 */ 3033 xvfsp->vfc_vfsops = NULL; 3034 xvfsp->vfc_next = NULL; 3035 } 3036 3037 /* 3038 * Top level filesystem related information gathering. 3039 */ 3040 static int 3041 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 3042 { 3043 struct vfsconf *vfsp; 3044 struct xvfsconf xvfsp; 3045 int error; 3046 3047 error = 0; 3048 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3049 bzero(&xvfsp, sizeof(xvfsp)); 3050 vfsconf2x(vfsp, &xvfsp); 3051 error = SYSCTL_OUT(req, &xvfsp, sizeof xvfsp); 3052 if (error) 3053 break; 3054 } 3055 return (error); 3056 } 3057 3058 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD, 3059 NULL, 0, sysctl_vfs_conflist, 3060 "S,xvfsconf", "List of all configured filesystems"); 3061 3062 #ifndef BURN_BRIDGES 3063 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 3064 3065 static int 3066 vfs_sysctl(SYSCTL_HANDLER_ARGS) 3067 { 3068 int *name = (int *)arg1 - 1; /* XXX */ 3069 u_int namelen = arg2 + 1; /* XXX */ 3070 struct vfsconf *vfsp; 3071 struct xvfsconf xvfsp; 3072 3073 log(LOG_WARNING, "userland calling deprecated sysctl, " 3074 "please rebuild world\n"); 3075 3076 #if 1 || defined(COMPAT_PRELITE2) 3077 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 3078 if (namelen == 1) 3079 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 3080 #endif 3081 3082 switch (name[1]) { 3083 case VFS_MAXTYPENUM: 3084 if (namelen != 2) 3085 return (ENOTDIR); 3086 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 3087 case VFS_CONF: 3088 if (namelen != 3) 3089 return (ENOTDIR); /* overloaded */ 3090 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) 3091 if (vfsp->vfc_typenum == name[2]) 3092 break; 3093 if (vfsp == NULL) 3094 return (EOPNOTSUPP); 3095 bzero(&xvfsp, sizeof(xvfsp)); 3096 vfsconf2x(vfsp, &xvfsp); 3097 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 3098 } 3099 return (EOPNOTSUPP); 3100 } 3101 3102 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP, 3103 vfs_sysctl, "Generic filesystem"); 3104 3105 #if 1 || defined(COMPAT_PRELITE2) 3106 3107 static int 3108 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 3109 { 3110 int error; 3111 struct vfsconf *vfsp; 3112 struct ovfsconf ovfs; 3113 3114 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 3115 bzero(&ovfs, sizeof(ovfs)); 3116 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 3117 strcpy(ovfs.vfc_name, vfsp->vfc_name); 3118 ovfs.vfc_index = vfsp->vfc_typenum; 3119 ovfs.vfc_refcount = vfsp->vfc_refcount; 3120 ovfs.vfc_flags = vfsp->vfc_flags; 3121 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 3122 if (error) 3123 return error; 3124 } 3125 return 0; 3126 } 3127 3128 #endif /* 1 || COMPAT_PRELITE2 */ 3129 #endif /* !BURN_BRIDGES */ 3130 3131 #define KINFO_VNODESLOP 10 3132 #ifdef notyet 3133 /* 3134 * Dump vnode list (via sysctl). 3135 */ 3136 /* ARGSUSED */ 3137 static int 3138 sysctl_vnode(SYSCTL_HANDLER_ARGS) 3139 { 3140 struct xvnode *xvn; 3141 struct mount *mp; 3142 struct vnode *vp; 3143 int error, len, n; 3144 3145 /* 3146 * Stale numvnodes access is not fatal here. 3147 */ 3148 req->lock = 0; 3149 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 3150 if (!req->oldptr) 3151 /* Make an estimate */ 3152 return (SYSCTL_OUT(req, 0, len)); 3153 3154 error = sysctl_wire_old_buffer(req, 0); 3155 if (error != 0) 3156 return (error); 3157 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 3158 n = 0; 3159 mtx_lock(&mountlist_mtx); 3160 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3161 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 3162 continue; 3163 MNT_ILOCK(mp); 3164 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3165 if (n == len) 3166 break; 3167 vref(vp); 3168 xvn[n].xv_size = sizeof *xvn; 3169 xvn[n].xv_vnode = vp; 3170 xvn[n].xv_id = 0; /* XXX compat */ 3171 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 3172 XV_COPY(usecount); 3173 XV_COPY(writecount); 3174 XV_COPY(holdcnt); 3175 XV_COPY(mount); 3176 XV_COPY(numoutput); 3177 XV_COPY(type); 3178 #undef XV_COPY 3179 xvn[n].xv_flag = vp->v_vflag; 3180 3181 switch (vp->v_type) { 3182 case VREG: 3183 case VDIR: 3184 case VLNK: 3185 break; 3186 case VBLK: 3187 case VCHR: 3188 if (vp->v_rdev == NULL) { 3189 vrele(vp); 3190 continue; 3191 } 3192 xvn[n].xv_dev = dev2udev(vp->v_rdev); 3193 break; 3194 case VSOCK: 3195 xvn[n].xv_socket = vp->v_socket; 3196 break; 3197 case VFIFO: 3198 xvn[n].xv_fifo = vp->v_fifoinfo; 3199 break; 3200 case VNON: 3201 case VBAD: 3202 default: 3203 /* shouldn't happen? */ 3204 vrele(vp); 3205 continue; 3206 } 3207 vrele(vp); 3208 ++n; 3209 } 3210 MNT_IUNLOCK(mp); 3211 mtx_lock(&mountlist_mtx); 3212 vfs_unbusy(mp); 3213 if (n == len) 3214 break; 3215 } 3216 mtx_unlock(&mountlist_mtx); 3217 3218 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 3219 free(xvn, M_TEMP); 3220 return (error); 3221 } 3222 3223 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD, 3224 0, 0, sysctl_vnode, "S,xvnode", ""); 3225 #endif 3226 3227 /* 3228 * Unmount all filesystems. The list is traversed in reverse order 3229 * of mounting to avoid dependencies. 3230 */ 3231 void 3232 vfs_unmountall(void) 3233 { 3234 struct mount *mp; 3235 struct thread *td; 3236 int error; 3237 3238 KASSERT(curthread != NULL, ("vfs_unmountall: NULL curthread")); 3239 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 3240 td = curthread; 3241 3242 /* 3243 * Since this only runs when rebooting, it is not interlocked. 3244 */ 3245 while(!TAILQ_EMPTY(&mountlist)) { 3246 mp = TAILQ_LAST(&mountlist, mntlist); 3247 error = dounmount(mp, MNT_FORCE, td); 3248 if (error) { 3249 TAILQ_REMOVE(&mountlist, mp, mnt_list); 3250 /* 3251 * XXX: Due to the way in which we mount the root 3252 * file system off of devfs, devfs will generate a 3253 * "busy" warning when we try to unmount it before 3254 * the root. Don't print a warning as a result in 3255 * order to avoid false positive errors that may 3256 * cause needless upset. 3257 */ 3258 if (strcmp(mp->mnt_vfc->vfc_name, "devfs") != 0) { 3259 printf("unmount of %s failed (", 3260 mp->mnt_stat.f_mntonname); 3261 if (error == EBUSY) 3262 printf("BUSY)\n"); 3263 else 3264 printf("%d)\n", error); 3265 } 3266 } else { 3267 /* The unmount has removed mp from the mountlist */ 3268 } 3269 } 3270 } 3271 3272 /* 3273 * perform msync on all vnodes under a mount point 3274 * the mount point must be locked. 3275 */ 3276 void 3277 vfs_msync(struct mount *mp, int flags) 3278 { 3279 struct vnode *vp, *mvp; 3280 struct vm_object *obj; 3281 3282 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 3283 MNT_ILOCK(mp); 3284 MNT_VNODE_FOREACH(vp, mp, mvp) { 3285 VI_LOCK(vp); 3286 obj = vp->v_object; 3287 if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0 && 3288 (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) { 3289 MNT_IUNLOCK(mp); 3290 if (!vget(vp, 3291 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, 3292 curthread)) { 3293 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */ 3294 vput(vp); 3295 MNT_ILOCK(mp); 3296 continue; 3297 } 3298 3299 obj = vp->v_object; 3300 if (obj != NULL) { 3301 VM_OBJECT_LOCK(obj); 3302 vm_object_page_clean(obj, 0, 0, 3303 flags == MNT_WAIT ? 3304 OBJPC_SYNC : OBJPC_NOSYNC); 3305 VM_OBJECT_UNLOCK(obj); 3306 } 3307 vput(vp); 3308 } 3309 MNT_ILOCK(mp); 3310 } else 3311 VI_UNLOCK(vp); 3312 } 3313 MNT_IUNLOCK(mp); 3314 } 3315 3316 /* 3317 * Mark a vnode as free, putting it up for recycling. 3318 */ 3319 static void 3320 vfree(struct vnode *vp) 3321 { 3322 3323 ASSERT_VI_LOCKED(vp, "vfree"); 3324 mtx_lock(&vnode_free_list_mtx); 3325 VNASSERT(vp->v_op != NULL, vp, ("vfree: vnode already reclaimed.")); 3326 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, ("vnode already free")); 3327 VNASSERT(VSHOULDFREE(vp), vp, ("vfree: freeing when we shouldn't")); 3328 VNASSERT((vp->v_iflag & VI_DOOMED) == 0, vp, 3329 ("vfree: Freeing doomed vnode")); 3330 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3331 if (vp->v_iflag & VI_AGE) { 3332 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 3333 } else { 3334 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 3335 } 3336 freevnodes++; 3337 vp->v_iflag &= ~VI_AGE; 3338 vp->v_iflag |= VI_FREE; 3339 mtx_unlock(&vnode_free_list_mtx); 3340 } 3341 3342 /* 3343 * Opposite of vfree() - mark a vnode as in use. 3344 */ 3345 static void 3346 vbusy(struct vnode *vp) 3347 { 3348 ASSERT_VI_LOCKED(vp, "vbusy"); 3349 VNASSERT((vp->v_iflag & VI_FREE) != 0, vp, ("vnode not free")); 3350 VNASSERT(vp->v_op != NULL, vp, ("vbusy: vnode already reclaimed.")); 3351 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3352 3353 mtx_lock(&vnode_free_list_mtx); 3354 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 3355 freevnodes--; 3356 vp->v_iflag &= ~(VI_FREE|VI_AGE); 3357 mtx_unlock(&vnode_free_list_mtx); 3358 } 3359 3360 static void 3361 destroy_vpollinfo(struct vpollinfo *vi) 3362 { 3363 seldrain(&vi->vpi_selinfo); 3364 knlist_destroy(&vi->vpi_selinfo.si_note); 3365 mtx_destroy(&vi->vpi_lock); 3366 uma_zfree(vnodepoll_zone, vi); 3367 } 3368 3369 /* 3370 * Initalize per-vnode helper structure to hold poll-related state. 3371 */ 3372 void 3373 v_addpollinfo(struct vnode *vp) 3374 { 3375 struct vpollinfo *vi; 3376 3377 if (vp->v_pollinfo != NULL) 3378 return; 3379 vi = uma_zalloc(vnodepoll_zone, M_WAITOK); 3380 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 3381 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 3382 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 3383 VI_LOCK(vp); 3384 if (vp->v_pollinfo != NULL) { 3385 VI_UNLOCK(vp); 3386 destroy_vpollinfo(vi); 3387 return; 3388 } 3389 vp->v_pollinfo = vi; 3390 VI_UNLOCK(vp); 3391 } 3392 3393 /* 3394 * Record a process's interest in events which might happen to 3395 * a vnode. Because poll uses the historic select-style interface 3396 * internally, this routine serves as both the ``check for any 3397 * pending events'' and the ``record my interest in future events'' 3398 * functions. (These are done together, while the lock is held, 3399 * to avoid race conditions.) 3400 */ 3401 int 3402 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 3403 { 3404 3405 v_addpollinfo(vp); 3406 mtx_lock(&vp->v_pollinfo->vpi_lock); 3407 if (vp->v_pollinfo->vpi_revents & events) { 3408 /* 3409 * This leaves events we are not interested 3410 * in available for the other process which 3411 * which presumably had requested them 3412 * (otherwise they would never have been 3413 * recorded). 3414 */ 3415 events &= vp->v_pollinfo->vpi_revents; 3416 vp->v_pollinfo->vpi_revents &= ~events; 3417 3418 mtx_unlock(&vp->v_pollinfo->vpi_lock); 3419 return (events); 3420 } 3421 vp->v_pollinfo->vpi_events |= events; 3422 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 3423 mtx_unlock(&vp->v_pollinfo->vpi_lock); 3424 return (0); 3425 } 3426 3427 /* 3428 * Routine to create and manage a filesystem syncer vnode. 3429 */ 3430 #define sync_close ((int (*)(struct vop_close_args *))nullop) 3431 static int sync_fsync(struct vop_fsync_args *); 3432 static int sync_inactive(struct vop_inactive_args *); 3433 static int sync_reclaim(struct vop_reclaim_args *); 3434 3435 static struct vop_vector sync_vnodeops = { 3436 .vop_bypass = VOP_EOPNOTSUPP, 3437 .vop_close = sync_close, /* close */ 3438 .vop_fsync = sync_fsync, /* fsync */ 3439 .vop_inactive = sync_inactive, /* inactive */ 3440 .vop_reclaim = sync_reclaim, /* reclaim */ 3441 .vop_lock1 = vop_stdlock, /* lock */ 3442 .vop_unlock = vop_stdunlock, /* unlock */ 3443 .vop_islocked = vop_stdislocked, /* islocked */ 3444 }; 3445 3446 /* 3447 * Create a new filesystem syncer vnode for the specified mount point. 3448 */ 3449 void 3450 vfs_allocate_syncvnode(struct mount *mp) 3451 { 3452 struct vnode *vp; 3453 struct bufobj *bo; 3454 static long start, incr, next; 3455 int error; 3456 3457 /* Allocate a new vnode */ 3458 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 3459 if (error != 0) 3460 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 3461 vp->v_type = VNON; 3462 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3463 vp->v_vflag |= VV_FORCEINSMQ; 3464 error = insmntque(vp, mp); 3465 if (error != 0) 3466 panic("vfs_allocate_syncvnode: insmntque() failed"); 3467 vp->v_vflag &= ~VV_FORCEINSMQ; 3468 VOP_UNLOCK(vp, 0); 3469 /* 3470 * Place the vnode onto the syncer worklist. We attempt to 3471 * scatter them about on the list so that they will go off 3472 * at evenly distributed times even if all the filesystems 3473 * are mounted at once. 3474 */ 3475 next += incr; 3476 if (next == 0 || next > syncer_maxdelay) { 3477 start /= 2; 3478 incr /= 2; 3479 if (start == 0) { 3480 start = syncer_maxdelay / 2; 3481 incr = syncer_maxdelay; 3482 } 3483 next = start; 3484 } 3485 bo = &vp->v_bufobj; 3486 BO_LOCK(bo); 3487 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 3488 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 3489 mtx_lock(&sync_mtx); 3490 sync_vnode_count++; 3491 if (mp->mnt_syncer == NULL) { 3492 mp->mnt_syncer = vp; 3493 vp = NULL; 3494 } 3495 mtx_unlock(&sync_mtx); 3496 BO_UNLOCK(bo); 3497 if (vp != NULL) { 3498 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 3499 vgone(vp); 3500 vput(vp); 3501 } 3502 } 3503 3504 void 3505 vfs_deallocate_syncvnode(struct mount *mp) 3506 { 3507 struct vnode *vp; 3508 3509 mtx_lock(&sync_mtx); 3510 vp = mp->mnt_syncer; 3511 if (vp != NULL) 3512 mp->mnt_syncer = NULL; 3513 mtx_unlock(&sync_mtx); 3514 if (vp != NULL) 3515 vrele(vp); 3516 } 3517 3518 /* 3519 * Do a lazy sync of the filesystem. 3520 */ 3521 static int 3522 sync_fsync(struct vop_fsync_args *ap) 3523 { 3524 struct vnode *syncvp = ap->a_vp; 3525 struct mount *mp = syncvp->v_mount; 3526 int error, save; 3527 struct bufobj *bo; 3528 3529 /* 3530 * We only need to do something if this is a lazy evaluation. 3531 */ 3532 if (ap->a_waitfor != MNT_LAZY) 3533 return (0); 3534 3535 /* 3536 * Move ourselves to the back of the sync list. 3537 */ 3538 bo = &syncvp->v_bufobj; 3539 BO_LOCK(bo); 3540 vn_syncer_add_to_worklist(bo, syncdelay); 3541 BO_UNLOCK(bo); 3542 3543 /* 3544 * Walk the list of vnodes pushing all that are dirty and 3545 * not already on the sync list. 3546 */ 3547 mtx_lock(&mountlist_mtx); 3548 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK) != 0) { 3549 mtx_unlock(&mountlist_mtx); 3550 return (0); 3551 } 3552 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 3553 vfs_unbusy(mp); 3554 return (0); 3555 } 3556 save = curthread_pflags_set(TDP_SYNCIO); 3557 vfs_msync(mp, MNT_NOWAIT); 3558 error = VFS_SYNC(mp, MNT_LAZY); 3559 curthread_pflags_restore(save); 3560 vn_finished_write(mp); 3561 vfs_unbusy(mp); 3562 return (error); 3563 } 3564 3565 /* 3566 * The syncer vnode is no referenced. 3567 */ 3568 static int 3569 sync_inactive(struct vop_inactive_args *ap) 3570 { 3571 3572 vgone(ap->a_vp); 3573 return (0); 3574 } 3575 3576 /* 3577 * The syncer vnode is no longer needed and is being decommissioned. 3578 * 3579 * Modifications to the worklist must be protected by sync_mtx. 3580 */ 3581 static int 3582 sync_reclaim(struct vop_reclaim_args *ap) 3583 { 3584 struct vnode *vp = ap->a_vp; 3585 struct bufobj *bo; 3586 3587 bo = &vp->v_bufobj; 3588 BO_LOCK(bo); 3589 mtx_lock(&sync_mtx); 3590 if (vp->v_mount->mnt_syncer == vp) 3591 vp->v_mount->mnt_syncer = NULL; 3592 if (bo->bo_flag & BO_ONWORKLST) { 3593 LIST_REMOVE(bo, bo_synclist); 3594 syncer_worklist_len--; 3595 sync_vnode_count--; 3596 bo->bo_flag &= ~BO_ONWORKLST; 3597 } 3598 mtx_unlock(&sync_mtx); 3599 BO_UNLOCK(bo); 3600 3601 return (0); 3602 } 3603 3604 /* 3605 * Check if vnode represents a disk device 3606 */ 3607 int 3608 vn_isdisk(struct vnode *vp, int *errp) 3609 { 3610 int error; 3611 3612 error = 0; 3613 dev_lock(); 3614 if (vp->v_type != VCHR) 3615 error = ENOTBLK; 3616 else if (vp->v_rdev == NULL) 3617 error = ENXIO; 3618 else if (vp->v_rdev->si_devsw == NULL) 3619 error = ENXIO; 3620 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 3621 error = ENOTBLK; 3622 dev_unlock(); 3623 if (errp != NULL) 3624 *errp = error; 3625 return (error == 0); 3626 } 3627 3628 /* 3629 * Common filesystem object access control check routine. Accepts a 3630 * vnode's type, "mode", uid and gid, requested access mode, credentials, 3631 * and optional call-by-reference privused argument allowing vaccess() 3632 * to indicate to the caller whether privilege was used to satisfy the 3633 * request (obsoleted). Returns 0 on success, or an errno on failure. 3634 */ 3635 int 3636 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 3637 accmode_t accmode, struct ucred *cred, int *privused) 3638 { 3639 accmode_t dac_granted; 3640 accmode_t priv_granted; 3641 3642 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 3643 ("invalid bit in accmode")); 3644 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 3645 ("VAPPEND without VWRITE")); 3646 3647 /* 3648 * Look for a normal, non-privileged way to access the file/directory 3649 * as requested. If it exists, go with that. 3650 */ 3651 3652 if (privused != NULL) 3653 *privused = 0; 3654 3655 dac_granted = 0; 3656 3657 /* Check the owner. */ 3658 if (cred->cr_uid == file_uid) { 3659 dac_granted |= VADMIN; 3660 if (file_mode & S_IXUSR) 3661 dac_granted |= VEXEC; 3662 if (file_mode & S_IRUSR) 3663 dac_granted |= VREAD; 3664 if (file_mode & S_IWUSR) 3665 dac_granted |= (VWRITE | VAPPEND); 3666 3667 if ((accmode & dac_granted) == accmode) 3668 return (0); 3669 3670 goto privcheck; 3671 } 3672 3673 /* Otherwise, check the groups (first match) */ 3674 if (groupmember(file_gid, cred)) { 3675 if (file_mode & S_IXGRP) 3676 dac_granted |= VEXEC; 3677 if (file_mode & S_IRGRP) 3678 dac_granted |= VREAD; 3679 if (file_mode & S_IWGRP) 3680 dac_granted |= (VWRITE | VAPPEND); 3681 3682 if ((accmode & dac_granted) == accmode) 3683 return (0); 3684 3685 goto privcheck; 3686 } 3687 3688 /* Otherwise, check everyone else. */ 3689 if (file_mode & S_IXOTH) 3690 dac_granted |= VEXEC; 3691 if (file_mode & S_IROTH) 3692 dac_granted |= VREAD; 3693 if (file_mode & S_IWOTH) 3694 dac_granted |= (VWRITE | VAPPEND); 3695 if ((accmode & dac_granted) == accmode) 3696 return (0); 3697 3698 privcheck: 3699 /* 3700 * Build a privilege mask to determine if the set of privileges 3701 * satisfies the requirements when combined with the granted mask 3702 * from above. For each privilege, if the privilege is required, 3703 * bitwise or the request type onto the priv_granted mask. 3704 */ 3705 priv_granted = 0; 3706 3707 if (type == VDIR) { 3708 /* 3709 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 3710 * requests, instead of PRIV_VFS_EXEC. 3711 */ 3712 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3713 !priv_check_cred(cred, PRIV_VFS_LOOKUP, 0)) 3714 priv_granted |= VEXEC; 3715 } else { 3716 /* 3717 * Ensure that at least one execute bit is on. Otherwise, 3718 * a privileged user will always succeed, and we don't want 3719 * this to happen unless the file really is executable. 3720 */ 3721 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3722 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 3723 !priv_check_cred(cred, PRIV_VFS_EXEC, 0)) 3724 priv_granted |= VEXEC; 3725 } 3726 3727 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 3728 !priv_check_cred(cred, PRIV_VFS_READ, 0)) 3729 priv_granted |= VREAD; 3730 3731 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 3732 !priv_check_cred(cred, PRIV_VFS_WRITE, 0)) 3733 priv_granted |= (VWRITE | VAPPEND); 3734 3735 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 3736 !priv_check_cred(cred, PRIV_VFS_ADMIN, 0)) 3737 priv_granted |= VADMIN; 3738 3739 if ((accmode & (priv_granted | dac_granted)) == accmode) { 3740 /* XXX audit: privilege used */ 3741 if (privused != NULL) 3742 *privused = 1; 3743 return (0); 3744 } 3745 3746 return ((accmode & VADMIN) ? EPERM : EACCES); 3747 } 3748 3749 /* 3750 * Credential check based on process requesting service, and per-attribute 3751 * permissions. 3752 */ 3753 int 3754 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 3755 struct thread *td, accmode_t accmode) 3756 { 3757 3758 /* 3759 * Kernel-invoked always succeeds. 3760 */ 3761 if (cred == NOCRED) 3762 return (0); 3763 3764 /* 3765 * Do not allow privileged processes in jail to directly manipulate 3766 * system attributes. 3767 */ 3768 switch (attrnamespace) { 3769 case EXTATTR_NAMESPACE_SYSTEM: 3770 /* Potentially should be: return (EPERM); */ 3771 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM, 0)); 3772 case EXTATTR_NAMESPACE_USER: 3773 return (VOP_ACCESS(vp, accmode, cred, td)); 3774 default: 3775 return (EPERM); 3776 } 3777 } 3778 3779 #ifdef DEBUG_VFS_LOCKS 3780 /* 3781 * This only exists to supress warnings from unlocked specfs accesses. It is 3782 * no longer ok to have an unlocked VFS. 3783 */ 3784 #define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \ 3785 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 3786 3787 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 3788 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 3789 "Drop into debugger on lock violation"); 3790 3791 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 3792 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 3793 0, "Check for interlock across VOPs"); 3794 3795 int vfs_badlock_print = 1; /* Print lock violations. */ 3796 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 3797 0, "Print lock violations"); 3798 3799 #ifdef KDB 3800 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 3801 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 3802 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 3803 #endif 3804 3805 static void 3806 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 3807 { 3808 3809 #ifdef KDB 3810 if (vfs_badlock_backtrace) 3811 kdb_backtrace(); 3812 #endif 3813 if (vfs_badlock_print) 3814 printf("%s: %p %s\n", str, (void *)vp, msg); 3815 if (vfs_badlock_ddb) 3816 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 3817 } 3818 3819 void 3820 assert_vi_locked(struct vnode *vp, const char *str) 3821 { 3822 3823 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 3824 vfs_badlock("interlock is not locked but should be", str, vp); 3825 } 3826 3827 void 3828 assert_vi_unlocked(struct vnode *vp, const char *str) 3829 { 3830 3831 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 3832 vfs_badlock("interlock is locked but should not be", str, vp); 3833 } 3834 3835 void 3836 assert_vop_locked(struct vnode *vp, const char *str) 3837 { 3838 3839 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == 0) 3840 vfs_badlock("is not locked but should be", str, vp); 3841 } 3842 3843 void 3844 assert_vop_unlocked(struct vnode *vp, const char *str) 3845 { 3846 3847 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 3848 vfs_badlock("is locked but should not be", str, vp); 3849 } 3850 3851 void 3852 assert_vop_elocked(struct vnode *vp, const char *str) 3853 { 3854 3855 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 3856 vfs_badlock("is not exclusive locked but should be", str, vp); 3857 } 3858 3859 #if 0 3860 void 3861 assert_vop_elocked_other(struct vnode *vp, const char *str) 3862 { 3863 3864 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLOTHER) 3865 vfs_badlock("is not exclusive locked by another thread", 3866 str, vp); 3867 } 3868 3869 void 3870 assert_vop_slocked(struct vnode *vp, const char *str) 3871 { 3872 3873 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_SHARED) 3874 vfs_badlock("is not locked shared but should be", str, vp); 3875 } 3876 #endif /* 0 */ 3877 #endif /* DEBUG_VFS_LOCKS */ 3878 3879 void 3880 vop_rename_fail(struct vop_rename_args *ap) 3881 { 3882 3883 if (ap->a_tvp != NULL) 3884 vput(ap->a_tvp); 3885 if (ap->a_tdvp == ap->a_tvp) 3886 vrele(ap->a_tdvp); 3887 else 3888 vput(ap->a_tdvp); 3889 vrele(ap->a_fdvp); 3890 vrele(ap->a_fvp); 3891 } 3892 3893 void 3894 vop_rename_pre(void *ap) 3895 { 3896 struct vop_rename_args *a = ap; 3897 3898 #ifdef DEBUG_VFS_LOCKS 3899 if (a->a_tvp) 3900 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 3901 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 3902 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 3903 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 3904 3905 /* Check the source (from). */ 3906 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 3907 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 3908 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 3909 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 3910 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 3911 3912 /* Check the target. */ 3913 if (a->a_tvp) 3914 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 3915 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 3916 #endif 3917 if (a->a_tdvp != a->a_fdvp) 3918 vhold(a->a_fdvp); 3919 if (a->a_tvp != a->a_fvp) 3920 vhold(a->a_fvp); 3921 vhold(a->a_tdvp); 3922 if (a->a_tvp) 3923 vhold(a->a_tvp); 3924 } 3925 3926 void 3927 vop_strategy_pre(void *ap) 3928 { 3929 #ifdef DEBUG_VFS_LOCKS 3930 struct vop_strategy_args *a; 3931 struct buf *bp; 3932 3933 a = ap; 3934 bp = a->a_bp; 3935 3936 /* 3937 * Cluster ops lock their component buffers but not the IO container. 3938 */ 3939 if ((bp->b_flags & B_CLUSTER) != 0) 3940 return; 3941 3942 if (panicstr == NULL && !BUF_ISLOCKED(bp)) { 3943 if (vfs_badlock_print) 3944 printf( 3945 "VOP_STRATEGY: bp is not locked but should be\n"); 3946 if (vfs_badlock_ddb) 3947 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 3948 } 3949 #endif 3950 } 3951 3952 void 3953 vop_lookup_pre(void *ap) 3954 { 3955 #ifdef DEBUG_VFS_LOCKS 3956 struct vop_lookup_args *a; 3957 struct vnode *dvp; 3958 3959 a = ap; 3960 dvp = a->a_dvp; 3961 ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP"); 3962 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP"); 3963 #endif 3964 } 3965 3966 void 3967 vop_lookup_post(void *ap, int rc) 3968 { 3969 #ifdef DEBUG_VFS_LOCKS 3970 struct vop_lookup_args *a; 3971 struct vnode *dvp; 3972 struct vnode *vp; 3973 3974 a = ap; 3975 dvp = a->a_dvp; 3976 vp = *(a->a_vpp); 3977 3978 ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP"); 3979 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP"); 3980 3981 if (!rc) 3982 ASSERT_VOP_LOCKED(vp, "VOP_LOOKUP (child)"); 3983 #endif 3984 } 3985 3986 void 3987 vop_lock_pre(void *ap) 3988 { 3989 #ifdef DEBUG_VFS_LOCKS 3990 struct vop_lock1_args *a = ap; 3991 3992 if ((a->a_flags & LK_INTERLOCK) == 0) 3993 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 3994 else 3995 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 3996 #endif 3997 } 3998 3999 void 4000 vop_lock_post(void *ap, int rc) 4001 { 4002 #ifdef DEBUG_VFS_LOCKS 4003 struct vop_lock1_args *a = ap; 4004 4005 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 4006 if (rc == 0) 4007 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 4008 #endif 4009 } 4010 4011 void 4012 vop_unlock_pre(void *ap) 4013 { 4014 #ifdef DEBUG_VFS_LOCKS 4015 struct vop_unlock_args *a = ap; 4016 4017 if (a->a_flags & LK_INTERLOCK) 4018 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK"); 4019 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 4020 #endif 4021 } 4022 4023 void 4024 vop_unlock_post(void *ap, int rc) 4025 { 4026 #ifdef DEBUG_VFS_LOCKS 4027 struct vop_unlock_args *a = ap; 4028 4029 if (a->a_flags & LK_INTERLOCK) 4030 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK"); 4031 #endif 4032 } 4033 4034 void 4035 vop_create_post(void *ap, int rc) 4036 { 4037 struct vop_create_args *a = ap; 4038 4039 if (!rc) 4040 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4041 } 4042 4043 void 4044 vop_deleteextattr_post(void *ap, int rc) 4045 { 4046 struct vop_deleteextattr_args *a = ap; 4047 4048 if (!rc) 4049 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4050 } 4051 4052 void 4053 vop_link_post(void *ap, int rc) 4054 { 4055 struct vop_link_args *a = ap; 4056 4057 if (!rc) { 4058 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); 4059 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE); 4060 } 4061 } 4062 4063 void 4064 vop_mkdir_post(void *ap, int rc) 4065 { 4066 struct vop_mkdir_args *a = ap; 4067 4068 if (!rc) 4069 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 4070 } 4071 4072 void 4073 vop_mknod_post(void *ap, int rc) 4074 { 4075 struct vop_mknod_args *a = ap; 4076 4077 if (!rc) 4078 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4079 } 4080 4081 void 4082 vop_remove_post(void *ap, int rc) 4083 { 4084 struct vop_remove_args *a = ap; 4085 4086 if (!rc) { 4087 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4088 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 4089 } 4090 } 4091 4092 void 4093 vop_rename_post(void *ap, int rc) 4094 { 4095 struct vop_rename_args *a = ap; 4096 4097 if (!rc) { 4098 VFS_KNOTE_UNLOCKED(a->a_fdvp, NOTE_WRITE); 4099 VFS_KNOTE_UNLOCKED(a->a_tdvp, NOTE_WRITE); 4100 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 4101 if (a->a_tvp) 4102 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 4103 } 4104 if (a->a_tdvp != a->a_fdvp) 4105 vdrop(a->a_fdvp); 4106 if (a->a_tvp != a->a_fvp) 4107 vdrop(a->a_fvp); 4108 vdrop(a->a_tdvp); 4109 if (a->a_tvp) 4110 vdrop(a->a_tvp); 4111 } 4112 4113 void 4114 vop_rmdir_post(void *ap, int rc) 4115 { 4116 struct vop_rmdir_args *a = ap; 4117 4118 if (!rc) { 4119 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 4120 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 4121 } 4122 } 4123 4124 void 4125 vop_setattr_post(void *ap, int rc) 4126 { 4127 struct vop_setattr_args *a = ap; 4128 4129 if (!rc) 4130 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4131 } 4132 4133 void 4134 vop_setextattr_post(void *ap, int rc) 4135 { 4136 struct vop_setextattr_args *a = ap; 4137 4138 if (!rc) 4139 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 4140 } 4141 4142 void 4143 vop_symlink_post(void *ap, int rc) 4144 { 4145 struct vop_symlink_args *a = ap; 4146 4147 if (!rc) 4148 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 4149 } 4150 4151 static struct knlist fs_knlist; 4152 4153 static void 4154 vfs_event_init(void *arg) 4155 { 4156 knlist_init_mtx(&fs_knlist, NULL); 4157 } 4158 /* XXX - correct order? */ 4159 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 4160 4161 void 4162 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 4163 { 4164 4165 KNOTE_UNLOCKED(&fs_knlist, event); 4166 } 4167 4168 static int filt_fsattach(struct knote *kn); 4169 static void filt_fsdetach(struct knote *kn); 4170 static int filt_fsevent(struct knote *kn, long hint); 4171 4172 struct filterops fs_filtops = { 4173 .f_isfd = 0, 4174 .f_attach = filt_fsattach, 4175 .f_detach = filt_fsdetach, 4176 .f_event = filt_fsevent 4177 }; 4178 4179 static int 4180 filt_fsattach(struct knote *kn) 4181 { 4182 4183 kn->kn_flags |= EV_CLEAR; 4184 knlist_add(&fs_knlist, kn, 0); 4185 return (0); 4186 } 4187 4188 static void 4189 filt_fsdetach(struct knote *kn) 4190 { 4191 4192 knlist_remove(&fs_knlist, kn, 0); 4193 } 4194 4195 static int 4196 filt_fsevent(struct knote *kn, long hint) 4197 { 4198 4199 kn->kn_fflags |= hint; 4200 return (kn->kn_fflags != 0); 4201 } 4202 4203 static int 4204 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 4205 { 4206 struct vfsidctl vc; 4207 int error; 4208 struct mount *mp; 4209 4210 error = SYSCTL_IN(req, &vc, sizeof(vc)); 4211 if (error) 4212 return (error); 4213 if (vc.vc_vers != VFS_CTL_VERS1) 4214 return (EINVAL); 4215 mp = vfs_getvfs(&vc.vc_fsid); 4216 if (mp == NULL) 4217 return (ENOENT); 4218 /* ensure that a specific sysctl goes to the right filesystem. */ 4219 if (strcmp(vc.vc_fstypename, "*") != 0 && 4220 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 4221 vfs_rel(mp); 4222 return (EINVAL); 4223 } 4224 VCTLTOREQ(&vc, req); 4225 error = VFS_SYSCTL(mp, vc.vc_op, req); 4226 vfs_rel(mp); 4227 return (error); 4228 } 4229 4230 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_WR, 4231 NULL, 0, sysctl_vfs_ctl, "", 4232 "Sysctl by fsid"); 4233 4234 /* 4235 * Function to initialize a va_filerev field sensibly. 4236 * XXX: Wouldn't a random number make a lot more sense ?? 4237 */ 4238 u_quad_t 4239 init_va_filerev(void) 4240 { 4241 struct bintime bt; 4242 4243 getbinuptime(&bt); 4244 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 4245 } 4246 4247 static int filt_vfsread(struct knote *kn, long hint); 4248 static int filt_vfswrite(struct knote *kn, long hint); 4249 static int filt_vfsvnode(struct knote *kn, long hint); 4250 static void filt_vfsdetach(struct knote *kn); 4251 static struct filterops vfsread_filtops = { 4252 .f_isfd = 1, 4253 .f_detach = filt_vfsdetach, 4254 .f_event = filt_vfsread 4255 }; 4256 static struct filterops vfswrite_filtops = { 4257 .f_isfd = 1, 4258 .f_detach = filt_vfsdetach, 4259 .f_event = filt_vfswrite 4260 }; 4261 static struct filterops vfsvnode_filtops = { 4262 .f_isfd = 1, 4263 .f_detach = filt_vfsdetach, 4264 .f_event = filt_vfsvnode 4265 }; 4266 4267 static void 4268 vfs_knllock(void *arg) 4269 { 4270 struct vnode *vp = arg; 4271 4272 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4273 } 4274 4275 static void 4276 vfs_knlunlock(void *arg) 4277 { 4278 struct vnode *vp = arg; 4279 4280 VOP_UNLOCK(vp, 0); 4281 } 4282 4283 static void 4284 vfs_knl_assert_locked(void *arg) 4285 { 4286 #ifdef DEBUG_VFS_LOCKS 4287 struct vnode *vp = arg; 4288 4289 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 4290 #endif 4291 } 4292 4293 static void 4294 vfs_knl_assert_unlocked(void *arg) 4295 { 4296 #ifdef DEBUG_VFS_LOCKS 4297 struct vnode *vp = arg; 4298 4299 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 4300 #endif 4301 } 4302 4303 int 4304 vfs_kqfilter(struct vop_kqfilter_args *ap) 4305 { 4306 struct vnode *vp = ap->a_vp; 4307 struct knote *kn = ap->a_kn; 4308 struct knlist *knl; 4309 4310 switch (kn->kn_filter) { 4311 case EVFILT_READ: 4312 kn->kn_fop = &vfsread_filtops; 4313 break; 4314 case EVFILT_WRITE: 4315 kn->kn_fop = &vfswrite_filtops; 4316 break; 4317 case EVFILT_VNODE: 4318 kn->kn_fop = &vfsvnode_filtops; 4319 break; 4320 default: 4321 return (EINVAL); 4322 } 4323 4324 kn->kn_hook = (caddr_t)vp; 4325 4326 v_addpollinfo(vp); 4327 if (vp->v_pollinfo == NULL) 4328 return (ENOMEM); 4329 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 4330 knlist_add(knl, kn, 0); 4331 4332 return (0); 4333 } 4334 4335 /* 4336 * Detach knote from vnode 4337 */ 4338 static void 4339 filt_vfsdetach(struct knote *kn) 4340 { 4341 struct vnode *vp = (struct vnode *)kn->kn_hook; 4342 4343 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 4344 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 4345 } 4346 4347 /*ARGSUSED*/ 4348 static int 4349 filt_vfsread(struct knote *kn, long hint) 4350 { 4351 struct vnode *vp = (struct vnode *)kn->kn_hook; 4352 struct vattr va; 4353 int res; 4354 4355 /* 4356 * filesystem is gone, so set the EOF flag and schedule 4357 * the knote for deletion. 4358 */ 4359 if (hint == NOTE_REVOKE) { 4360 VI_LOCK(vp); 4361 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 4362 VI_UNLOCK(vp); 4363 return (1); 4364 } 4365 4366 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 4367 return (0); 4368 4369 VI_LOCK(vp); 4370 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 4371 res = (kn->kn_data != 0); 4372 VI_UNLOCK(vp); 4373 return (res); 4374 } 4375 4376 /*ARGSUSED*/ 4377 static int 4378 filt_vfswrite(struct knote *kn, long hint) 4379 { 4380 struct vnode *vp = (struct vnode *)kn->kn_hook; 4381 4382 VI_LOCK(vp); 4383 4384 /* 4385 * filesystem is gone, so set the EOF flag and schedule 4386 * the knote for deletion. 4387 */ 4388 if (hint == NOTE_REVOKE) 4389 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 4390 4391 kn->kn_data = 0; 4392 VI_UNLOCK(vp); 4393 return (1); 4394 } 4395 4396 static int 4397 filt_vfsvnode(struct knote *kn, long hint) 4398 { 4399 struct vnode *vp = (struct vnode *)kn->kn_hook; 4400 int res; 4401 4402 VI_LOCK(vp); 4403 if (kn->kn_sfflags & hint) 4404 kn->kn_fflags |= hint; 4405 if (hint == NOTE_REVOKE) { 4406 kn->kn_flags |= EV_EOF; 4407 VI_UNLOCK(vp); 4408 return (1); 4409 } 4410 res = (kn->kn_fflags != 0); 4411 VI_UNLOCK(vp); 4412 return (res); 4413 } 4414 4415 int 4416 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 4417 { 4418 int error; 4419 4420 if (dp->d_reclen > ap->a_uio->uio_resid) 4421 return (ENAMETOOLONG); 4422 error = uiomove(dp, dp->d_reclen, ap->a_uio); 4423 if (error) { 4424 if (ap->a_ncookies != NULL) { 4425 if (ap->a_cookies != NULL) 4426 free(ap->a_cookies, M_TEMP); 4427 ap->a_cookies = NULL; 4428 *ap->a_ncookies = 0; 4429 } 4430 return (error); 4431 } 4432 if (ap->a_ncookies == NULL) 4433 return (0); 4434 4435 KASSERT(ap->a_cookies, 4436 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 4437 4438 *ap->a_cookies = realloc(*ap->a_cookies, 4439 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 4440 (*ap->a_cookies)[*ap->a_ncookies] = off; 4441 return (0); 4442 } 4443 4444 /* 4445 * Mark for update the access time of the file if the filesystem 4446 * supports VOP_MARKATIME. This functionality is used by execve and 4447 * mmap, so we want to avoid the I/O implied by directly setting 4448 * va_atime for the sake of efficiency. 4449 */ 4450 void 4451 vfs_mark_atime(struct vnode *vp, struct ucred *cred) 4452 { 4453 struct mount *mp; 4454 4455 mp = vp->v_mount; 4456 VFS_ASSERT_GIANT(mp); 4457 ASSERT_VOP_LOCKED(vp, "vfs_mark_atime"); 4458 if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) 4459 (void)VOP_MARKATIME(vp); 4460 } 4461 4462 /* 4463 * The purpose of this routine is to remove granularity from accmode_t, 4464 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 4465 * VADMIN and VAPPEND. 4466 * 4467 * If it returns 0, the caller is supposed to continue with the usual 4468 * access checks using 'accmode' as modified by this routine. If it 4469 * returns nonzero value, the caller is supposed to return that value 4470 * as errno. 4471 * 4472 * Note that after this routine runs, accmode may be zero. 4473 */ 4474 int 4475 vfs_unixify_accmode(accmode_t *accmode) 4476 { 4477 /* 4478 * There is no way to specify explicit "deny" rule using 4479 * file mode or POSIX.1e ACLs. 4480 */ 4481 if (*accmode & VEXPLICIT_DENY) { 4482 *accmode = 0; 4483 return (0); 4484 } 4485 4486 /* 4487 * None of these can be translated into usual access bits. 4488 * Also, the common case for NFSv4 ACLs is to not contain 4489 * either of these bits. Caller should check for VWRITE 4490 * on the containing directory instead. 4491 */ 4492 if (*accmode & (VDELETE_CHILD | VDELETE)) 4493 return (EPERM); 4494 4495 if (*accmode & VADMIN_PERMS) { 4496 *accmode &= ~VADMIN_PERMS; 4497 *accmode |= VADMIN; 4498 } 4499 4500 /* 4501 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 4502 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 4503 */ 4504 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 4505 4506 return (0); 4507 } 4508