1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 39 * $FreeBSD$ 40 */ 41 42 /* 43 * External virtual filesystem routines 44 */ 45 #include "opt_ddb.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/bio.h> 50 #include <sys/buf.h> 51 #include <sys/conf.h> 52 #include <sys/eventhandler.h> 53 #include <sys/fcntl.h> 54 #include <sys/kernel.h> 55 #include <sys/kthread.h> 56 #include <sys/malloc.h> 57 #include <sys/mount.h> 58 #include <sys/namei.h> 59 #include <sys/stat.h> 60 #include <sys/sysctl.h> 61 #include <sys/syslog.h> 62 #include <sys/vmmeter.h> 63 #include <sys/vnode.h> 64 65 #include <vm/vm.h> 66 #include <vm/vm_object.h> 67 #include <vm/vm_extern.h> 68 #include <vm/pmap.h> 69 #include <vm/vm_map.h> 70 #include <vm/vm_page.h> 71 #include <vm/uma.h> 72 73 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); 74 75 static void addalias(struct vnode *vp, dev_t nvp_rdev); 76 static void insmntque(struct vnode *vp, struct mount *mp); 77 static void vclean(struct vnode *vp, int flags, struct thread *td); 78 static void vlruvp(struct vnode *vp); 79 static int flushbuflist(struct buf *blist, int flags, struct vnode *vp, 80 int slpflag, int slptimeo, int *errorp); 81 82 /* 83 * Number of vnodes in existence. Increased whenever getnewvnode() 84 * allocates a new vnode, never decreased. 85 */ 86 static unsigned long numvnodes; 87 88 SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, ""); 89 90 /* 91 * Conversion tables for conversion from vnode types to inode formats 92 * and back. 93 */ 94 enum vtype iftovt_tab[16] = { 95 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 96 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 97 }; 98 int vttoif_tab[9] = { 99 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 100 S_IFSOCK, S_IFIFO, S_IFMT, 101 }; 102 103 /* 104 * List of vnodes that are ready for recycling. 105 */ 106 static TAILQ_HEAD(freelst, vnode) vnode_free_list; 107 108 /* 109 * Minimum number of free vnodes. If there are fewer than this free vnodes, 110 * getnewvnode() will return a newly allocated vnode. 111 */ 112 static u_long wantfreevnodes = 25; 113 SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, ""); 114 /* Number of vnodes in the free list. */ 115 static u_long freevnodes; 116 SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, ""); 117 118 /* 119 * Various variables used for debugging the new implementation of 120 * reassignbuf(). 121 * XXX these are probably of (very) limited utility now. 122 */ 123 static int reassignbufcalls; 124 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, ""); 125 static int nameileafonly; 126 SYSCTL_INT(_vfs, OID_AUTO, nameileafonly, CTLFLAG_RW, &nameileafonly, 0, ""); 127 128 #ifdef ENABLE_VFS_IOOPT 129 /* See NOTES for a description of this setting. */ 130 int vfs_ioopt; 131 SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, ""); 132 #endif 133 134 /* 135 * Cache for the mount type id assigned to NFS. This is used for 136 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c. 137 */ 138 int nfs_mount_type = -1; 139 140 /* To keep more than one thread at a time from running vfs_getnewfsid */ 141 static struct mtx mntid_mtx; 142 143 /* For any iteration/modification of vnode_free_list */ 144 static struct mtx vnode_free_list_mtx; 145 146 /* 147 * For any iteration/modification of dev->si_hlist (linked through 148 * v_specnext) 149 */ 150 static struct mtx spechash_mtx; 151 152 /* Publicly exported FS */ 153 struct nfs_public nfs_pub; 154 155 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 156 static uma_zone_t vnode_zone; 157 static uma_zone_t vnodepoll_zone; 158 159 /* Set to 1 to print out reclaim of active vnodes */ 160 int prtactive; 161 162 /* 163 * The workitem queue. 164 * 165 * It is useful to delay writes of file data and filesystem metadata 166 * for tens of seconds so that quickly created and deleted files need 167 * not waste disk bandwidth being created and removed. To realize this, 168 * we append vnodes to a "workitem" queue. When running with a soft 169 * updates implementation, most pending metadata dependencies should 170 * not wait for more than a few seconds. Thus, mounted on block devices 171 * are delayed only about a half the time that file data is delayed. 172 * Similarly, directory updates are more critical, so are only delayed 173 * about a third the time that file data is delayed. Thus, there are 174 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 175 * one each second (driven off the filesystem syncer process). The 176 * syncer_delayno variable indicates the next queue that is to be processed. 177 * Items that need to be processed soon are placed in this queue: 178 * 179 * syncer_workitem_pending[syncer_delayno] 180 * 181 * A delay of fifteen seconds is done by placing the request fifteen 182 * entries later in the queue: 183 * 184 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 185 * 186 */ 187 static int syncer_delayno; 188 static long syncer_mask; 189 LIST_HEAD(synclist, vnode); 190 static struct synclist *syncer_workitem_pending; 191 192 #define SYNCER_MAXDELAY 32 193 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 194 static int syncdelay = 30; /* max time to delay syncing data */ 195 static int filedelay = 30; /* time to delay syncing files */ 196 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, ""); 197 static int dirdelay = 29; /* time to delay syncing directories */ 198 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, ""); 199 static int metadelay = 28; /* time to delay syncing metadata */ 200 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, ""); 201 static int rushjob; /* number of slots to run ASAP */ 202 static int stat_rush_requests; /* number of times I/O speeded up */ 203 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, ""); 204 205 /* 206 * Number of vnodes we want to exist at any one time. This is mostly used 207 * to size hash tables in vnode-related code. It is normally not used in 208 * getnewvnode(), as wantfreevnodes is normally nonzero.) 209 * 210 * XXX desiredvnodes is historical cruft and should not exist. 211 */ 212 int desiredvnodes; 213 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, 214 &desiredvnodes, 0, "Maximum number of vnodes"); 215 static int minvnodes; 216 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 217 &minvnodes, 0, "Minimum number of vnodes"); 218 static int vnlru_nowhere; 219 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, &vnlru_nowhere, 0, 220 "Number of times the vnlru process ran without success"); 221 222 /* Hook for calling soft updates */ 223 int (*softdep_process_worklist_hook)(struct mount *); 224 225 #ifdef DEBUG_VFS_LOCKS 226 /* Print lock violations */ 227 int vfs_badlock_print = 1; 228 /* Panic on violation */ 229 int vfs_badlock_panic = 1; 230 231 void 232 vop_rename_pre(void *ap) 233 { 234 struct vop_rename_args *a = ap; 235 236 /* Check the source (from) */ 237 if (a->a_tdvp != a->a_fdvp) 238 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked.\n"); 239 if (a->a_tvp != a->a_fvp) 240 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: tvp locked.\n"); 241 242 /* Check the target */ 243 if (a->a_tvp) 244 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked.\n"); 245 246 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked.\n"); 247 } 248 249 void 250 vop_strategy_pre(void *ap) 251 { 252 struct vop_strategy_args *a = ap; 253 struct buf *bp; 254 255 bp = a->a_bp; 256 257 /* 258 * Cluster ops lock their component buffers but not the IO container. 259 */ 260 if ((bp->b_flags & B_CLUSTER) != 0) 261 return; 262 263 if (BUF_REFCNT(bp) < 1) { 264 if (vfs_badlock_print) 265 printf("VOP_STRATEGY: bp is not locked but should be.\n"); 266 if (vfs_badlock_panic) 267 Debugger("Lock violation.\n"); 268 } 269 } 270 271 void 272 vop_lookup_pre(void *ap) 273 { 274 struct vop_lookup_args *a = ap; 275 struct vnode *dvp; 276 277 dvp = a->a_dvp; 278 279 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP"); 280 } 281 282 void 283 vop_lookup_post(void *ap, int rc) 284 { 285 struct vop_lookup_args *a = ap; 286 struct componentname *cnp; 287 struct vnode *dvp; 288 struct vnode *vp; 289 int flags; 290 291 dvp = a->a_dvp; 292 cnp = a->a_cnp; 293 vp = *(a->a_vpp); 294 flags = cnp->cn_flags; 295 296 297 /* 298 * If this is the last path component for this lookup and LOCPARENT 299 * is set, OR if there is an error the directory has to be locked. 300 */ 301 if ((flags & LOCKPARENT) && (flags & ISLASTCN)) 302 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (LOCKPARENT)"); 303 else if (rc != 0) 304 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (error)"); 305 else if (dvp != vp) 306 ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (dvp)"); 307 308 if (flags & PDIRUNLOCK) 309 ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (PDIRUNLOCK)"); 310 311 if (rc == 0) 312 ASSERT_VOP_LOCKED(vp, "VOP_LOOKUP (vpp)"); 313 } 314 315 #endif /* DEBUG_VFS_LOCKS */ 316 317 void 318 v_addpollinfo(struct vnode *vp) 319 { 320 vp->v_pollinfo = uma_zalloc(vnodepoll_zone, M_WAITOK); 321 mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 322 } 323 324 /* 325 * Initialize the vnode management data structures. 326 */ 327 static void 328 vntblinit(void *dummy __unused) 329 { 330 331 desiredvnodes = maxproc + cnt.v_page_count / 4; 332 minvnodes = desiredvnodes / 4; 333 mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF); 334 mtx_init(&mntvnode_mtx, "mntvnode", NULL, MTX_DEF); 335 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 336 mtx_init(&spechash_mtx, "spechash", NULL, MTX_DEF); 337 TAILQ_INIT(&vnode_free_list); 338 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 339 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 340 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 341 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 342 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 343 /* 344 * Initialize the filesystem syncer. 345 */ 346 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 347 &syncer_mask); 348 syncer_maxdelay = syncer_mask + 1; 349 } 350 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL) 351 352 353 /* 354 * Mark a mount point as busy. Used to synchronize access and to delay 355 * unmounting. Interlock is not released on failure. 356 */ 357 int 358 vfs_busy(mp, flags, interlkp, td) 359 struct mount *mp; 360 int flags; 361 struct mtx *interlkp; 362 struct thread *td; 363 { 364 int lkflags; 365 366 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 367 if (flags & LK_NOWAIT) 368 return (ENOENT); 369 mp->mnt_kern_flag |= MNTK_MWAIT; 370 /* 371 * Since all busy locks are shared except the exclusive 372 * lock granted when unmounting, the only place that a 373 * wakeup needs to be done is at the release of the 374 * exclusive lock at the end of dounmount. 375 */ 376 msleep(mp, interlkp, PVFS, "vfs_busy", 0); 377 return (ENOENT); 378 } 379 lkflags = LK_SHARED | LK_NOPAUSE; 380 if (interlkp) 381 lkflags |= LK_INTERLOCK; 382 if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td)) 383 panic("vfs_busy: unexpected lock failure"); 384 return (0); 385 } 386 387 /* 388 * Free a busy filesystem. 389 */ 390 void 391 vfs_unbusy(mp, td) 392 struct mount *mp; 393 struct thread *td; 394 { 395 396 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td); 397 } 398 399 /* 400 * Lookup a mount point by filesystem identifier. 401 */ 402 struct mount * 403 vfs_getvfs(fsid) 404 fsid_t *fsid; 405 { 406 register struct mount *mp; 407 408 mtx_lock(&mountlist_mtx); 409 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 410 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 411 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 412 mtx_unlock(&mountlist_mtx); 413 return (mp); 414 } 415 } 416 mtx_unlock(&mountlist_mtx); 417 return ((struct mount *) 0); 418 } 419 420 /* 421 * Get a new unique fsid. Try to make its val[0] unique, since this value 422 * will be used to create fake device numbers for stat(). Also try (but 423 * not so hard) make its val[0] unique mod 2^16, since some emulators only 424 * support 16-bit device numbers. We end up with unique val[0]'s for the 425 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 426 * 427 * Keep in mind that several mounts may be running in parallel. Starting 428 * the search one past where the previous search terminated is both a 429 * micro-optimization and a defense against returning the same fsid to 430 * different mounts. 431 */ 432 void 433 vfs_getnewfsid(mp) 434 struct mount *mp; 435 { 436 static u_int16_t mntid_base; 437 fsid_t tfsid; 438 int mtype; 439 440 mtx_lock(&mntid_mtx); 441 mtype = mp->mnt_vfc->vfc_typenum; 442 tfsid.val[1] = mtype; 443 mtype = (mtype & 0xFF) << 24; 444 for (;;) { 445 tfsid.val[0] = makeudev(255, 446 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 447 mntid_base++; 448 if (vfs_getvfs(&tfsid) == NULL) 449 break; 450 } 451 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 452 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 453 mtx_unlock(&mntid_mtx); 454 } 455 456 /* 457 * Knob to control the precision of file timestamps: 458 * 459 * 0 = seconds only; nanoseconds zeroed. 460 * 1 = seconds and nanoseconds, accurate within 1/HZ. 461 * 2 = seconds and nanoseconds, truncated to microseconds. 462 * >=3 = seconds and nanoseconds, maximum precision. 463 */ 464 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 465 466 static int timestamp_precision = TSP_SEC; 467 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 468 ×tamp_precision, 0, ""); 469 470 /* 471 * Get a current timestamp. 472 */ 473 void 474 vfs_timestamp(tsp) 475 struct timespec *tsp; 476 { 477 struct timeval tv; 478 479 switch (timestamp_precision) { 480 case TSP_SEC: 481 tsp->tv_sec = time_second; 482 tsp->tv_nsec = 0; 483 break; 484 case TSP_HZ: 485 getnanotime(tsp); 486 break; 487 case TSP_USEC: 488 microtime(&tv); 489 TIMEVAL_TO_TIMESPEC(&tv, tsp); 490 break; 491 case TSP_NSEC: 492 default: 493 nanotime(tsp); 494 break; 495 } 496 } 497 498 /* 499 * Set vnode attributes to VNOVAL 500 */ 501 void 502 vattr_null(vap) 503 register struct vattr *vap; 504 { 505 506 vap->va_type = VNON; 507 vap->va_size = VNOVAL; 508 vap->va_bytes = VNOVAL; 509 vap->va_mode = VNOVAL; 510 vap->va_nlink = VNOVAL; 511 vap->va_uid = VNOVAL; 512 vap->va_gid = VNOVAL; 513 vap->va_fsid = VNOVAL; 514 vap->va_fileid = VNOVAL; 515 vap->va_blocksize = VNOVAL; 516 vap->va_rdev = VNOVAL; 517 vap->va_atime.tv_sec = VNOVAL; 518 vap->va_atime.tv_nsec = VNOVAL; 519 vap->va_mtime.tv_sec = VNOVAL; 520 vap->va_mtime.tv_nsec = VNOVAL; 521 vap->va_ctime.tv_sec = VNOVAL; 522 vap->va_ctime.tv_nsec = VNOVAL; 523 vap->va_birthtime.tv_sec = VNOVAL; 524 vap->va_birthtime.tv_nsec = VNOVAL; 525 vap->va_flags = VNOVAL; 526 vap->va_gen = VNOVAL; 527 vap->va_vaflags = 0; 528 } 529 530 /* 531 * This routine is called when we have too many vnodes. It attempts 532 * to free <count> vnodes and will potentially free vnodes that still 533 * have VM backing store (VM backing store is typically the cause 534 * of a vnode blowout so we want to do this). Therefore, this operation 535 * is not considered cheap. 536 * 537 * A number of conditions may prevent a vnode from being reclaimed. 538 * the buffer cache may have references on the vnode, a directory 539 * vnode may still have references due to the namei cache representing 540 * underlying files, or the vnode may be in active use. It is not 541 * desireable to reuse such vnodes. These conditions may cause the 542 * number of vnodes to reach some minimum value regardless of what 543 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 544 */ 545 static int 546 vlrureclaim(struct mount *mp, int count) 547 { 548 struct vnode *vp; 549 int done; 550 int trigger; 551 int usevnodes; 552 553 /* 554 * Calculate the trigger point, don't allow user 555 * screwups to blow us up. This prevents us from 556 * recycling vnodes with lots of resident pages. We 557 * aren't trying to free memory, we are trying to 558 * free vnodes. 559 */ 560 usevnodes = desiredvnodes; 561 if (usevnodes <= 0) 562 usevnodes = 1; 563 trigger = cnt.v_page_count * 2 / usevnodes; 564 565 done = 0; 566 mtx_lock(&mntvnode_mtx); 567 while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) { 568 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 569 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 570 571 if (vp->v_type != VNON && 572 vp->v_type != VBAD && 573 VMIGHTFREE(vp) && /* critical path opt */ 574 (vp->v_object == NULL || vp->v_object->resident_page_count < trigger) && 575 mtx_trylock(&vp->v_interlock) 576 ) { 577 mtx_unlock(&mntvnode_mtx); 578 if (VMIGHTFREE(vp)) { 579 vgonel(vp, curthread); 580 done++; 581 } else { 582 mtx_unlock(&vp->v_interlock); 583 } 584 mtx_lock(&mntvnode_mtx); 585 } 586 --count; 587 } 588 mtx_unlock(&mntvnode_mtx); 589 return done; 590 } 591 592 /* 593 * Attempt to recycle vnodes in a context that is always safe to block. 594 * Calling vlrurecycle() from the bowels of filesystem code has some 595 * interesting deadlock problems. 596 */ 597 static struct proc *vnlruproc; 598 static int vnlruproc_sig; 599 600 static void 601 vnlru_proc(void) 602 { 603 struct mount *mp, *nmp; 604 int s; 605 int done; 606 struct proc *p = vnlruproc; 607 struct thread *td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */ 608 609 mtx_lock(&Giant); 610 611 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p, 612 SHUTDOWN_PRI_FIRST); 613 614 s = splbio(); 615 for (;;) { 616 kthread_suspend_check(p); 617 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) { 618 vnlruproc_sig = 0; 619 tsleep(vnlruproc, PVFS, "vlruwt", 0); 620 continue; 621 } 622 done = 0; 623 mtx_lock(&mountlist_mtx); 624 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 625 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) { 626 nmp = TAILQ_NEXT(mp, mnt_list); 627 continue; 628 } 629 done += vlrureclaim(mp, 10); 630 mtx_lock(&mountlist_mtx); 631 nmp = TAILQ_NEXT(mp, mnt_list); 632 vfs_unbusy(mp, td); 633 } 634 mtx_unlock(&mountlist_mtx); 635 if (done == 0) { 636 #if 0 637 /* These messages are temporary debugging aids */ 638 if (vnlru_nowhere < 5) 639 printf("vnlru process getting nowhere..\n"); 640 else if (vnlru_nowhere == 5) 641 printf("vnlru process messages stopped.\n"); 642 #endif 643 vnlru_nowhere++; 644 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 645 } 646 } 647 splx(s); 648 } 649 650 static struct kproc_desc vnlru_kp = { 651 "vnlru", 652 vnlru_proc, 653 &vnlruproc 654 }; 655 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) 656 657 658 /* 659 * Routines having to do with the management of the vnode table. 660 */ 661 662 /* 663 * Return the next vnode from the free list. 664 */ 665 int 666 getnewvnode(tag, mp, vops, vpp) 667 enum vtagtype tag; 668 struct mount *mp; 669 vop_t **vops; 670 struct vnode **vpp; 671 { 672 int s; 673 struct thread *td = curthread; /* XXX */ 674 struct vnode *vp = NULL; 675 struct mount *vnmp; 676 vm_object_t object; 677 678 s = splbio(); 679 /* 680 * Try to reuse vnodes if we hit the max. This situation only 681 * occurs in certain large-memory (2G+) situations. We cannot 682 * attempt to directly reclaim vnodes due to nasty recursion 683 * problems. 684 */ 685 if (vnlruproc_sig == 0 && numvnodes - freevnodes > desiredvnodes) { 686 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 687 wakeup(vnlruproc); 688 } 689 690 /* 691 * Attempt to reuse a vnode already on the free list, allocating 692 * a new vnode if we can't find one or if we have not reached a 693 * good minimum for good LRU performance. 694 */ 695 696 mtx_lock(&vnode_free_list_mtx); 697 698 if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes) { 699 int count; 700 701 for (count = 0; count < freevnodes; count++) { 702 vp = TAILQ_FIRST(&vnode_free_list); 703 if (vp == NULL || vp->v_usecount) 704 panic("getnewvnode: free vnode isn't"); 705 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 706 707 /* Don't recycle if we can't get the interlock */ 708 if (!mtx_trylock(&vp->v_interlock)) { 709 vp = NULL; 710 continue; 711 } 712 713 /* We should be able to immediately acquire this */ 714 if (vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td) != 0) 715 continue; 716 /* 717 * Don't recycle if we still have cached pages. 718 */ 719 if (VOP_GETVOBJECT(vp, &object) == 0 && 720 (object->resident_page_count || 721 object->ref_count)) { 722 TAILQ_INSERT_TAIL(&vnode_free_list, vp, 723 v_freelist); 724 VOP_UNLOCK(vp, 0, td); 725 vp = NULL; 726 continue; 727 } 728 if (LIST_FIRST(&vp->v_cache_src)) { 729 /* 730 * note: nameileafonly sysctl is temporary, 731 * for debugging only, and will eventually be 732 * removed. 733 */ 734 if (nameileafonly > 0) { 735 /* 736 * Do not reuse namei-cached directory 737 * vnodes that have cached 738 * subdirectories. 739 */ 740 if (cache_leaf_test(vp) < 0) { 741 VOP_UNLOCK(vp, 0, td); 742 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 743 vp = NULL; 744 continue; 745 } 746 } else if (nameileafonly < 0 || 747 vmiodirenable == 0) { 748 /* 749 * Do not reuse namei-cached directory 750 * vnodes if nameileafonly is -1 or 751 * if VMIO backing for directories is 752 * turned off (otherwise we reuse them 753 * too quickly). 754 */ 755 VOP_UNLOCK(vp, 0, td); 756 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 757 vp = NULL; 758 continue; 759 } 760 } 761 /* 762 * Skip over it if its filesystem is being suspended. 763 */ 764 if (vn_start_write(vp, &vnmp, V_NOWAIT) == 0) 765 break; 766 VOP_UNLOCK(vp, 0, td); 767 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 768 vp = NULL; 769 } 770 } 771 if (vp) { 772 vp->v_flag |= VDOOMED; 773 vp->v_flag &= ~VFREE; 774 freevnodes--; 775 mtx_unlock(&vnode_free_list_mtx); 776 cache_purge(vp); 777 if (vp->v_type != VBAD) { 778 VOP_UNLOCK(vp, 0, td); 779 vgone(vp); 780 } else { 781 VOP_UNLOCK(vp, 0, td); 782 } 783 vn_finished_write(vnmp); 784 785 #ifdef INVARIANTS 786 { 787 int s; 788 789 if (vp->v_data) 790 panic("cleaned vnode isn't"); 791 s = splbio(); 792 if (vp->v_numoutput) 793 panic("Clean vnode has pending I/O's"); 794 splx(s); 795 if (vp->v_writecount != 0) 796 panic("Non-zero write count"); 797 } 798 #endif 799 if (vp->v_pollinfo) { 800 mtx_destroy(&vp->v_pollinfo->vpi_lock); 801 uma_zfree(vnodepoll_zone, vp->v_pollinfo); 802 } 803 vp->v_pollinfo = NULL; 804 vp->v_flag = 0; 805 vp->v_lastw = 0; 806 vp->v_lasta = 0; 807 vp->v_cstart = 0; 808 vp->v_clen = 0; 809 vp->v_socket = 0; 810 KASSERT(vp->v_cleanblkroot == NULL, ("cleanblkroot not NULL")); 811 KASSERT(vp->v_dirtyblkroot == NULL, ("dirtyblkroot not NULL")); 812 } else { 813 mtx_unlock(&vnode_free_list_mtx); 814 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK); 815 bzero((char *) vp, sizeof *vp); 816 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 817 vp->v_dd = vp; 818 cache_purge(vp); 819 LIST_INIT(&vp->v_cache_src); 820 TAILQ_INIT(&vp->v_cache_dst); 821 numvnodes++; 822 } 823 824 TAILQ_INIT(&vp->v_cleanblkhd); 825 TAILQ_INIT(&vp->v_dirtyblkhd); 826 vp->v_type = VNON; 827 vp->v_tag = tag; 828 vp->v_op = vops; 829 lockinit(&vp->v_lock, PVFS, "vnlock", VLKTIMEOUT, LK_NOPAUSE); 830 insmntque(vp, mp); 831 *vpp = vp; 832 vp->v_usecount = 1; 833 vp->v_data = 0; 834 835 splx(s); 836 837 #if 0 838 vnodeallocs++; 839 if (vnodeallocs % vnoderecycleperiod == 0 && 840 freevnodes < vnoderecycleminfreevn && 841 vnoderecyclemintotalvn < numvnodes) { 842 /* Recycle vnodes. */ 843 cache_purgeleafdirs(vnoderecyclenumber); 844 } 845 #endif 846 847 return (0); 848 } 849 850 /* 851 * Move a vnode from one mount queue to another. 852 */ 853 static void 854 insmntque(vp, mp) 855 register struct vnode *vp; 856 register struct mount *mp; 857 { 858 859 mtx_lock(&mntvnode_mtx); 860 /* 861 * Delete from old mount point vnode list, if on one. 862 */ 863 if (vp->v_mount != NULL) 864 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 865 /* 866 * Insert into list of vnodes for the new mount point, if available. 867 */ 868 if ((vp->v_mount = mp) == NULL) { 869 mtx_unlock(&mntvnode_mtx); 870 return; 871 } 872 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 873 mtx_unlock(&mntvnode_mtx); 874 } 875 876 /* 877 * Update outstanding I/O count and do wakeup if requested. 878 */ 879 void 880 vwakeup(bp) 881 register struct buf *bp; 882 { 883 register struct vnode *vp; 884 885 bp->b_flags &= ~B_WRITEINPROG; 886 if ((vp = bp->b_vp)) { 887 vp->v_numoutput--; 888 if (vp->v_numoutput < 0) 889 panic("vwakeup: neg numoutput"); 890 if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) { 891 vp->v_flag &= ~VBWAIT; 892 wakeup(&vp->v_numoutput); 893 } 894 } 895 } 896 897 /* 898 * Flush out and invalidate all buffers associated with a vnode. 899 * Called with the underlying object locked. 900 */ 901 int 902 vinvalbuf(vp, flags, cred, td, slpflag, slptimeo) 903 struct vnode *vp; 904 int flags; 905 struct ucred *cred; 906 struct thread *td; 907 int slpflag, slptimeo; 908 { 909 struct buf *blist; 910 int s, error; 911 vm_object_t object; 912 913 GIANT_REQUIRED; 914 915 if (flags & V_SAVE) { 916 s = splbio(); 917 while (vp->v_numoutput) { 918 vp->v_flag |= VBWAIT; 919 error = tsleep(&vp->v_numoutput, 920 slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo); 921 if (error) { 922 splx(s); 923 return (error); 924 } 925 } 926 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 927 splx(s); 928 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0) 929 return (error); 930 s = splbio(); 931 if (vp->v_numoutput > 0 || 932 !TAILQ_EMPTY(&vp->v_dirtyblkhd)) 933 panic("vinvalbuf: dirty bufs"); 934 } 935 splx(s); 936 } 937 s = splbio(); 938 for (error = 0;;) { 939 if ((blist = TAILQ_FIRST(&vp->v_cleanblkhd)) != 0 && 940 flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) { 941 if (error) 942 break; 943 continue; 944 } 945 if ((blist = TAILQ_FIRST(&vp->v_dirtyblkhd)) != 0 && 946 flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) { 947 if (error) 948 break; 949 continue; 950 } 951 break; 952 } 953 if (error) { 954 splx(s); 955 return (error); 956 } 957 958 /* 959 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 960 * have write I/O in-progress but if there is a VM object then the 961 * VM object can also have read-I/O in-progress. 962 */ 963 do { 964 while (vp->v_numoutput > 0) { 965 vp->v_flag |= VBWAIT; 966 tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0); 967 } 968 if (VOP_GETVOBJECT(vp, &object) == 0) { 969 while (object->paging_in_progress) 970 vm_object_pip_sleep(object, "vnvlbx"); 971 } 972 } while (vp->v_numoutput > 0); 973 974 splx(s); 975 976 /* 977 * Destroy the copy in the VM cache, too. 978 */ 979 mtx_lock(&vp->v_interlock); 980 if (VOP_GETVOBJECT(vp, &object) == 0) { 981 vm_object_page_remove(object, 0, 0, 982 (flags & V_SAVE) ? TRUE : FALSE); 983 } 984 mtx_unlock(&vp->v_interlock); 985 986 if ((flags & (V_ALT | V_NORMAL)) == 0 && 987 (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || 988 !TAILQ_EMPTY(&vp->v_cleanblkhd))) 989 panic("vinvalbuf: flush failed"); 990 return (0); 991 } 992 993 /* 994 * Flush out buffers on the specified list. 995 */ 996 static int 997 flushbuflist(blist, flags, vp, slpflag, slptimeo, errorp) 998 struct buf *blist; 999 int flags; 1000 struct vnode *vp; 1001 int slpflag, slptimeo; 1002 int *errorp; 1003 { 1004 struct buf *bp, *nbp; 1005 int found, error; 1006 1007 for (found = 0, bp = blist; bp; bp = nbp) { 1008 nbp = TAILQ_NEXT(bp, b_vnbufs); 1009 if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) || 1010 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) 1011 continue; 1012 found += 1; 1013 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1014 error = BUF_TIMELOCK(bp, 1015 LK_EXCLUSIVE | LK_SLEEPFAIL, 1016 "flushbuf", slpflag, slptimeo); 1017 if (error != ENOLCK) 1018 *errorp = error; 1019 return (found); 1020 } 1021 /* 1022 * XXX Since there are no node locks for NFS, I 1023 * believe there is a slight chance that a delayed 1024 * write will occur while sleeping just above, so 1025 * check for it. Note that vfs_bio_awrite expects 1026 * buffers to reside on a queue, while BUF_WRITE and 1027 * brelse do not. 1028 */ 1029 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 1030 (flags & V_SAVE)) { 1031 1032 if (bp->b_vp == vp) { 1033 if (bp->b_flags & B_CLUSTEROK) { 1034 BUF_UNLOCK(bp); 1035 vfs_bio_awrite(bp); 1036 } else { 1037 bremfree(bp); 1038 bp->b_flags |= B_ASYNC; 1039 BUF_WRITE(bp); 1040 } 1041 } else { 1042 bremfree(bp); 1043 (void) BUF_WRITE(bp); 1044 } 1045 return (found); 1046 } 1047 bremfree(bp); 1048 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF); 1049 bp->b_flags &= ~B_ASYNC; 1050 brelse(bp); 1051 } 1052 return (found); 1053 } 1054 1055 /* 1056 * Truncate a file's buffer and pages to a specified length. This 1057 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 1058 * sync activity. 1059 */ 1060 int 1061 vtruncbuf(vp, cred, td, length, blksize) 1062 register struct vnode *vp; 1063 struct ucred *cred; 1064 struct thread *td; 1065 off_t length; 1066 int blksize; 1067 { 1068 register struct buf *bp; 1069 struct buf *nbp; 1070 int s, anyfreed; 1071 int trunclbn; 1072 1073 /* 1074 * Round up to the *next* lbn. 1075 */ 1076 trunclbn = (length + blksize - 1) / blksize; 1077 1078 s = splbio(); 1079 restart: 1080 anyfreed = 1; 1081 for (;anyfreed;) { 1082 anyfreed = 0; 1083 for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) { 1084 nbp = TAILQ_NEXT(bp, b_vnbufs); 1085 if (bp->b_lblkno >= trunclbn) { 1086 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1087 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 1088 goto restart; 1089 } else { 1090 bremfree(bp); 1091 bp->b_flags |= (B_INVAL | B_RELBUF); 1092 bp->b_flags &= ~B_ASYNC; 1093 brelse(bp); 1094 anyfreed = 1; 1095 } 1096 if (nbp && 1097 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 1098 (nbp->b_vp != vp) || 1099 (nbp->b_flags & B_DELWRI))) { 1100 goto restart; 1101 } 1102 } 1103 } 1104 1105 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 1106 nbp = TAILQ_NEXT(bp, b_vnbufs); 1107 if (bp->b_lblkno >= trunclbn) { 1108 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1109 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 1110 goto restart; 1111 } else { 1112 bremfree(bp); 1113 bp->b_flags |= (B_INVAL | B_RELBUF); 1114 bp->b_flags &= ~B_ASYNC; 1115 brelse(bp); 1116 anyfreed = 1; 1117 } 1118 if (nbp && 1119 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 1120 (nbp->b_vp != vp) || 1121 (nbp->b_flags & B_DELWRI) == 0)) { 1122 goto restart; 1123 } 1124 } 1125 } 1126 } 1127 1128 if (length > 0) { 1129 restartsync: 1130 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 1131 nbp = TAILQ_NEXT(bp, b_vnbufs); 1132 if ((bp->b_flags & B_DELWRI) && (bp->b_lblkno < 0)) { 1133 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 1134 BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL); 1135 goto restart; 1136 } else { 1137 bremfree(bp); 1138 if (bp->b_vp == vp) { 1139 bp->b_flags |= B_ASYNC; 1140 } else { 1141 bp->b_flags &= ~B_ASYNC; 1142 } 1143 BUF_WRITE(bp); 1144 } 1145 goto restartsync; 1146 } 1147 1148 } 1149 } 1150 1151 while (vp->v_numoutput > 0) { 1152 vp->v_flag |= VBWAIT; 1153 tsleep(&vp->v_numoutput, PVM, "vbtrunc", 0); 1154 } 1155 1156 splx(s); 1157 1158 vnode_pager_setsize(vp, length); 1159 1160 return (0); 1161 } 1162 1163 /* 1164 * buf_splay() - splay tree core for the clean/dirty list of buffers in 1165 * a vnode. 1166 * 1167 * NOTE: We have to deal with the special case of a background bitmap 1168 * buffer, a situation where two buffers will have the same logical 1169 * block offset. We want (1) only the foreground buffer to be accessed 1170 * in a lookup and (2) must differentiate between the foreground and 1171 * background buffer in the splay tree algorithm because the splay 1172 * tree cannot normally handle multiple entities with the same 'index'. 1173 * We accomplish this by adding differentiating flags to the splay tree's 1174 * numerical domain. 1175 */ 1176 static 1177 struct buf * 1178 buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root) 1179 { 1180 struct buf dummy; 1181 struct buf *lefttreemax, *righttreemin, *y; 1182 1183 if (root == NULL) 1184 return (NULL); 1185 lefttreemax = righttreemin = &dummy; 1186 for (;;) { 1187 if (lblkno < root->b_lblkno || 1188 (lblkno == root->b_lblkno && 1189 (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) { 1190 if ((y = root->b_left) == NULL) 1191 break; 1192 if (lblkno < y->b_lblkno) { 1193 /* Rotate right. */ 1194 root->b_left = y->b_right; 1195 y->b_right = root; 1196 root = y; 1197 if ((y = root->b_left) == NULL) 1198 break; 1199 } 1200 /* Link into the new root's right tree. */ 1201 righttreemin->b_left = root; 1202 righttreemin = root; 1203 } else if (lblkno > root->b_lblkno || 1204 (lblkno == root->b_lblkno && 1205 (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) { 1206 if ((y = root->b_right) == NULL) 1207 break; 1208 if (lblkno > y->b_lblkno) { 1209 /* Rotate left. */ 1210 root->b_right = y->b_left; 1211 y->b_left = root; 1212 root = y; 1213 if ((y = root->b_right) == NULL) 1214 break; 1215 } 1216 /* Link into the new root's left tree. */ 1217 lefttreemax->b_right = root; 1218 lefttreemax = root; 1219 } else { 1220 break; 1221 } 1222 root = y; 1223 } 1224 /* Assemble the new root. */ 1225 lefttreemax->b_right = root->b_left; 1226 righttreemin->b_left = root->b_right; 1227 root->b_left = dummy.b_right; 1228 root->b_right = dummy.b_left; 1229 return (root); 1230 } 1231 1232 static 1233 void 1234 buf_vlist_remove(struct buf *bp) 1235 { 1236 struct vnode *vp = bp->b_vp; 1237 struct buf *root; 1238 1239 if (bp->b_xflags & BX_VNDIRTY) { 1240 if (bp != vp->v_dirtyblkroot) { 1241 root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot); 1242 KASSERT(root == bp, ("splay lookup failed during dirty remove")); 1243 } 1244 if (bp->b_left == NULL) { 1245 root = bp->b_right; 1246 } else { 1247 root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left); 1248 root->b_right = bp->b_right; 1249 } 1250 vp->v_dirtyblkroot = root; 1251 TAILQ_REMOVE(&vp->v_dirtyblkhd, bp, b_vnbufs); 1252 } else { 1253 /* KASSERT(bp->b_xflags & BX_VNCLEAN, ("bp wasn't clean")); */ 1254 if (bp != vp->v_cleanblkroot) { 1255 root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot); 1256 KASSERT(root == bp, ("splay lookup failed during clean remove")); 1257 } 1258 if (bp->b_left == NULL) { 1259 root = bp->b_right; 1260 } else { 1261 root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left); 1262 root->b_right = bp->b_right; 1263 } 1264 vp->v_cleanblkroot = root; 1265 TAILQ_REMOVE(&vp->v_cleanblkhd, bp, b_vnbufs); 1266 } 1267 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 1268 } 1269 1270 /* 1271 * Add the buffer to the sorted clean or dirty block list using a 1272 * splay tree algorithm. 1273 * 1274 * NOTE: xflags is passed as a constant, optimizing this inline function! 1275 */ 1276 static 1277 void 1278 buf_vlist_add(struct buf *bp, struct vnode *vp, b_xflags_t xflags) 1279 { 1280 struct buf *root; 1281 1282 bp->b_xflags |= xflags; 1283 if (xflags & BX_VNDIRTY) { 1284 root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot); 1285 if (root == NULL) { 1286 bp->b_left = NULL; 1287 bp->b_right = NULL; 1288 TAILQ_INSERT_TAIL(&vp->v_dirtyblkhd, bp, b_vnbufs); 1289 } else if (bp->b_lblkno < root->b_lblkno || 1290 (bp->b_lblkno == root->b_lblkno && 1291 (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) { 1292 bp->b_left = root->b_left; 1293 bp->b_right = root; 1294 root->b_left = NULL; 1295 TAILQ_INSERT_BEFORE(root, bp, b_vnbufs); 1296 } else { 1297 bp->b_right = root->b_right; 1298 bp->b_left = root; 1299 root->b_right = NULL; 1300 TAILQ_INSERT_AFTER(&vp->v_dirtyblkhd, 1301 root, bp, b_vnbufs); 1302 } 1303 vp->v_dirtyblkroot = bp; 1304 } else { 1305 /* KASSERT(xflags & BX_VNCLEAN, ("xflags not clean")); */ 1306 root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot); 1307 if (root == NULL) { 1308 bp->b_left = NULL; 1309 bp->b_right = NULL; 1310 TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs); 1311 } else if (bp->b_lblkno < root->b_lblkno || 1312 (bp->b_lblkno == root->b_lblkno && 1313 (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) { 1314 bp->b_left = root->b_left; 1315 bp->b_right = root; 1316 root->b_left = NULL; 1317 TAILQ_INSERT_BEFORE(root, bp, b_vnbufs); 1318 } else { 1319 bp->b_right = root->b_right; 1320 bp->b_left = root; 1321 root->b_right = NULL; 1322 TAILQ_INSERT_AFTER(&vp->v_cleanblkhd, 1323 root, bp, b_vnbufs); 1324 } 1325 vp->v_cleanblkroot = bp; 1326 } 1327 } 1328 1329 #ifndef USE_BUFHASH 1330 1331 /* 1332 * Lookup a buffer using the splay tree. Note that we specifically avoid 1333 * shadow buffers used in background bitmap writes. 1334 * 1335 * This code isn't quite efficient as it could be because we are maintaining 1336 * two sorted lists and do not know which list the block resides in. 1337 */ 1338 struct buf * 1339 gbincore(struct vnode *vp, daddr_t lblkno) 1340 { 1341 struct buf *bp; 1342 1343 GIANT_REQUIRED; 1344 1345 bp = vp->v_cleanblkroot = buf_splay(lblkno, 0, vp->v_cleanblkroot); 1346 if (bp && bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER)) 1347 return(bp); 1348 bp = vp->v_dirtyblkroot = buf_splay(lblkno, 0, vp->v_dirtyblkroot); 1349 if (bp && bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER)) 1350 return(bp); 1351 return(NULL); 1352 } 1353 1354 #endif 1355 1356 /* 1357 * Associate a buffer with a vnode. 1358 */ 1359 void 1360 bgetvp(vp, bp) 1361 register struct vnode *vp; 1362 register struct buf *bp; 1363 { 1364 int s; 1365 1366 KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); 1367 1368 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 1369 ("bgetvp: bp already attached! %p", bp)); 1370 1371 vhold(vp); 1372 bp->b_vp = vp; 1373 bp->b_dev = vn_todev(vp); 1374 /* 1375 * Insert onto list for new vnode. 1376 */ 1377 s = splbio(); 1378 buf_vlist_add(bp, vp, BX_VNCLEAN); 1379 splx(s); 1380 } 1381 1382 /* 1383 * Disassociate a buffer from a vnode. 1384 */ 1385 void 1386 brelvp(bp) 1387 register struct buf *bp; 1388 { 1389 struct vnode *vp; 1390 int s; 1391 1392 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 1393 1394 /* 1395 * Delete from old vnode list, if on one. 1396 */ 1397 vp = bp->b_vp; 1398 s = splbio(); 1399 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 1400 buf_vlist_remove(bp); 1401 if ((vp->v_flag & VONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 1402 vp->v_flag &= ~VONWORKLST; 1403 LIST_REMOVE(vp, v_synclist); 1404 } 1405 splx(s); 1406 bp->b_vp = (struct vnode *) 0; 1407 vdrop(vp); 1408 if (bp->b_object) 1409 bp->b_object = NULL; 1410 } 1411 1412 /* 1413 * Add an item to the syncer work queue. 1414 */ 1415 static void 1416 vn_syncer_add_to_worklist(struct vnode *vp, int delay) 1417 { 1418 int s, slot; 1419 1420 s = splbio(); 1421 1422 if (vp->v_flag & VONWORKLST) { 1423 LIST_REMOVE(vp, v_synclist); 1424 } 1425 1426 if (delay > syncer_maxdelay - 2) 1427 delay = syncer_maxdelay - 2; 1428 slot = (syncer_delayno + delay) & syncer_mask; 1429 1430 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist); 1431 vp->v_flag |= VONWORKLST; 1432 splx(s); 1433 } 1434 1435 struct proc *updateproc; 1436 static void sched_sync(void); 1437 static struct kproc_desc up_kp = { 1438 "syncer", 1439 sched_sync, 1440 &updateproc 1441 }; 1442 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 1443 1444 /* 1445 * System filesystem synchronizer daemon. 1446 */ 1447 void 1448 sched_sync(void) 1449 { 1450 struct synclist *slp; 1451 struct vnode *vp; 1452 struct mount *mp; 1453 long starttime; 1454 int s; 1455 struct thread *td = FIRST_THREAD_IN_PROC(updateproc); /* XXXKSE */ 1456 1457 mtx_lock(&Giant); 1458 1459 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, td->td_proc, 1460 SHUTDOWN_PRI_LAST); 1461 1462 for (;;) { 1463 kthread_suspend_check(td->td_proc); 1464 1465 starttime = time_second; 1466 1467 /* 1468 * Push files whose dirty time has expired. Be careful 1469 * of interrupt race on slp queue. 1470 */ 1471 s = splbio(); 1472 slp = &syncer_workitem_pending[syncer_delayno]; 1473 syncer_delayno += 1; 1474 if (syncer_delayno == syncer_maxdelay) 1475 syncer_delayno = 0; 1476 splx(s); 1477 1478 while ((vp = LIST_FIRST(slp)) != NULL) { 1479 if (VOP_ISLOCKED(vp, NULL) == 0 && 1480 vn_start_write(vp, &mp, V_NOWAIT) == 0) { 1481 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1482 (void) VOP_FSYNC(vp, td->td_ucred, MNT_LAZY, td); 1483 VOP_UNLOCK(vp, 0, td); 1484 vn_finished_write(mp); 1485 } 1486 s = splbio(); 1487 if (LIST_FIRST(slp) == vp) { 1488 /* 1489 * Note: v_tag VT_VFS vps can remain on the 1490 * worklist too with no dirty blocks, but 1491 * since sync_fsync() moves it to a different 1492 * slot we are safe. 1493 */ 1494 if (TAILQ_EMPTY(&vp->v_dirtyblkhd) && 1495 !vn_isdisk(vp, NULL)) 1496 panic("sched_sync: fsync failed vp %p tag %d", vp, vp->v_tag); 1497 /* 1498 * Put us back on the worklist. The worklist 1499 * routine will remove us from our current 1500 * position and then add us back in at a later 1501 * position. 1502 */ 1503 vn_syncer_add_to_worklist(vp, syncdelay); 1504 } 1505 splx(s); 1506 } 1507 1508 /* 1509 * Do soft update processing. 1510 */ 1511 if (softdep_process_worklist_hook != NULL) 1512 (*softdep_process_worklist_hook)(NULL); 1513 1514 /* 1515 * The variable rushjob allows the kernel to speed up the 1516 * processing of the filesystem syncer process. A rushjob 1517 * value of N tells the filesystem syncer to process the next 1518 * N seconds worth of work on its queue ASAP. Currently rushjob 1519 * is used by the soft update code to speed up the filesystem 1520 * syncer process when the incore state is getting so far 1521 * ahead of the disk that the kernel memory pool is being 1522 * threatened with exhaustion. 1523 */ 1524 if (rushjob > 0) { 1525 rushjob -= 1; 1526 continue; 1527 } 1528 /* 1529 * If it has taken us less than a second to process the 1530 * current work, then wait. Otherwise start right over 1531 * again. We can still lose time if any single round 1532 * takes more than two seconds, but it does not really 1533 * matter as we are just trying to generally pace the 1534 * filesystem activity. 1535 */ 1536 if (time_second == starttime) 1537 tsleep(&lbolt, PPAUSE, "syncer", 0); 1538 } 1539 } 1540 1541 /* 1542 * Request the syncer daemon to speed up its work. 1543 * We never push it to speed up more than half of its 1544 * normal turn time, otherwise it could take over the cpu. 1545 * XXXKSE only one update? 1546 */ 1547 int 1548 speedup_syncer() 1549 { 1550 1551 mtx_lock_spin(&sched_lock); 1552 if (FIRST_THREAD_IN_PROC(updateproc)->td_wchan == &lbolt) /* XXXKSE */ 1553 setrunnable(FIRST_THREAD_IN_PROC(updateproc)); 1554 mtx_unlock_spin(&sched_lock); 1555 if (rushjob < syncdelay / 2) { 1556 rushjob += 1; 1557 stat_rush_requests += 1; 1558 return (1); 1559 } 1560 return(0); 1561 } 1562 1563 /* 1564 * Associate a p-buffer with a vnode. 1565 * 1566 * Also sets B_PAGING flag to indicate that vnode is not fully associated 1567 * with the buffer. i.e. the bp has not been linked into the vnode or 1568 * ref-counted. 1569 */ 1570 void 1571 pbgetvp(vp, bp) 1572 register struct vnode *vp; 1573 register struct buf *bp; 1574 { 1575 1576 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free")); 1577 1578 bp->b_vp = vp; 1579 bp->b_flags |= B_PAGING; 1580 bp->b_dev = vn_todev(vp); 1581 } 1582 1583 /* 1584 * Disassociate a p-buffer from a vnode. 1585 */ 1586 void 1587 pbrelvp(bp) 1588 register struct buf *bp; 1589 { 1590 1591 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL")); 1592 1593 /* XXX REMOVE ME */ 1594 if (TAILQ_NEXT(bp, b_vnbufs) != NULL) { 1595 panic( 1596 "relpbuf(): b_vp was probably reassignbuf()d %p %x", 1597 bp, 1598 (int)bp->b_flags 1599 ); 1600 } 1601 bp->b_vp = (struct vnode *) 0; 1602 bp->b_flags &= ~B_PAGING; 1603 } 1604 1605 /* 1606 * Reassign a buffer from one vnode to another. 1607 * Used to assign file specific control information 1608 * (indirect blocks) to the vnode to which they belong. 1609 */ 1610 void 1611 reassignbuf(bp, newvp) 1612 register struct buf *bp; 1613 register struct vnode *newvp; 1614 { 1615 int delay; 1616 int s; 1617 1618 if (newvp == NULL) { 1619 printf("reassignbuf: NULL"); 1620 return; 1621 } 1622 ++reassignbufcalls; 1623 1624 /* 1625 * B_PAGING flagged buffers cannot be reassigned because their vp 1626 * is not fully linked in. 1627 */ 1628 if (bp->b_flags & B_PAGING) 1629 panic("cannot reassign paging buffer"); 1630 1631 s = splbio(); 1632 /* 1633 * Delete from old vnode list, if on one. 1634 */ 1635 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) { 1636 buf_vlist_remove(bp); 1637 if (bp->b_vp != newvp) { 1638 vdrop(bp->b_vp); 1639 bp->b_vp = NULL; /* for clarification */ 1640 } 1641 } 1642 /* 1643 * If dirty, put on list of dirty buffers; otherwise insert onto list 1644 * of clean buffers. 1645 */ 1646 if (bp->b_flags & B_DELWRI) { 1647 if ((newvp->v_flag & VONWORKLST) == 0) { 1648 switch (newvp->v_type) { 1649 case VDIR: 1650 delay = dirdelay; 1651 break; 1652 case VCHR: 1653 if (newvp->v_rdev->si_mountpoint != NULL) { 1654 delay = metadelay; 1655 break; 1656 } 1657 /* fall through */ 1658 default: 1659 delay = filedelay; 1660 } 1661 vn_syncer_add_to_worklist(newvp, delay); 1662 } 1663 buf_vlist_add(bp, newvp, BX_VNDIRTY); 1664 } else { 1665 buf_vlist_add(bp, newvp, BX_VNCLEAN); 1666 1667 if ((newvp->v_flag & VONWORKLST) && 1668 TAILQ_EMPTY(&newvp->v_dirtyblkhd)) { 1669 newvp->v_flag &= ~VONWORKLST; 1670 LIST_REMOVE(newvp, v_synclist); 1671 } 1672 } 1673 if (bp->b_vp != newvp) { 1674 bp->b_vp = newvp; 1675 vhold(bp->b_vp); 1676 } 1677 splx(s); 1678 } 1679 1680 /* 1681 * Create a vnode for a device. 1682 * Used for mounting the root filesystem. 1683 */ 1684 int 1685 bdevvp(dev, vpp) 1686 dev_t dev; 1687 struct vnode **vpp; 1688 { 1689 register struct vnode *vp; 1690 struct vnode *nvp; 1691 int error; 1692 1693 if (dev == NODEV) { 1694 *vpp = NULLVP; 1695 return (ENXIO); 1696 } 1697 if (vfinddev(dev, VCHR, vpp)) 1698 return (0); 1699 error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp); 1700 if (error) { 1701 *vpp = NULLVP; 1702 return (error); 1703 } 1704 vp = nvp; 1705 vp->v_type = VCHR; 1706 addalias(vp, dev); 1707 *vpp = vp; 1708 return (0); 1709 } 1710 1711 /* 1712 * Add vnode to the alias list hung off the dev_t. 1713 * 1714 * The reason for this gunk is that multiple vnodes can reference 1715 * the same physical device, so checking vp->v_usecount to see 1716 * how many users there are is inadequate; the v_usecount for 1717 * the vnodes need to be accumulated. vcount() does that. 1718 */ 1719 struct vnode * 1720 addaliasu(nvp, nvp_rdev) 1721 struct vnode *nvp; 1722 udev_t nvp_rdev; 1723 { 1724 struct vnode *ovp; 1725 vop_t **ops; 1726 dev_t dev; 1727 1728 if (nvp->v_type == VBLK) 1729 return (nvp); 1730 if (nvp->v_type != VCHR) 1731 panic("addaliasu on non-special vnode"); 1732 dev = udev2dev(nvp_rdev, 0); 1733 /* 1734 * Check to see if we have a bdevvp vnode with no associated 1735 * filesystem. If so, we want to associate the filesystem of 1736 * the new newly instigated vnode with the bdevvp vnode and 1737 * discard the newly created vnode rather than leaving the 1738 * bdevvp vnode lying around with no associated filesystem. 1739 */ 1740 if (vfinddev(dev, nvp->v_type, &ovp) == 0 || ovp->v_data != NULL) { 1741 addalias(nvp, dev); 1742 return (nvp); 1743 } 1744 /* 1745 * Discard unneeded vnode, but save its node specific data. 1746 * Note that if there is a lock, it is carried over in the 1747 * node specific data to the replacement vnode. 1748 */ 1749 vref(ovp); 1750 ovp->v_data = nvp->v_data; 1751 ovp->v_tag = nvp->v_tag; 1752 nvp->v_data = NULL; 1753 lockinit(&ovp->v_lock, PVFS, nvp->v_lock.lk_wmesg, 1754 nvp->v_lock.lk_timo, nvp->v_lock.lk_flags & LK_EXTFLG_MASK); 1755 if (nvp->v_vnlock) 1756 ovp->v_vnlock = &ovp->v_lock; 1757 ops = ovp->v_op; 1758 ovp->v_op = nvp->v_op; 1759 if (VOP_ISLOCKED(nvp, curthread)) { 1760 VOP_UNLOCK(nvp, 0, curthread); 1761 vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curthread); 1762 } 1763 nvp->v_op = ops; 1764 insmntque(ovp, nvp->v_mount); 1765 vrele(nvp); 1766 vgone(nvp); 1767 return (ovp); 1768 } 1769 1770 /* This is a local helper function that do the same as addaliasu, but for a 1771 * dev_t instead of an udev_t. */ 1772 static void 1773 addalias(nvp, dev) 1774 struct vnode *nvp; 1775 dev_t dev; 1776 { 1777 1778 KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode")); 1779 nvp->v_rdev = dev; 1780 mtx_lock(&spechash_mtx); 1781 SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext); 1782 mtx_unlock(&spechash_mtx); 1783 } 1784 1785 /* 1786 * Grab a particular vnode from the free list, increment its 1787 * reference count and lock it. The vnode lock bit is set if the 1788 * vnode is being eliminated in vgone. The process is awakened 1789 * when the transition is completed, and an error returned to 1790 * indicate that the vnode is no longer usable (possibly having 1791 * been changed to a new filesystem type). 1792 */ 1793 int 1794 vget(vp, flags, td) 1795 register struct vnode *vp; 1796 int flags; 1797 struct thread *td; 1798 { 1799 int error; 1800 1801 /* 1802 * If the vnode is in the process of being cleaned out for 1803 * another use, we wait for the cleaning to finish and then 1804 * return failure. Cleaning is determined by checking that 1805 * the VXLOCK flag is set. 1806 */ 1807 if ((flags & LK_INTERLOCK) == 0) 1808 mtx_lock(&vp->v_interlock); 1809 if (vp->v_flag & VXLOCK) { 1810 if (vp->v_vxproc == curthread) { 1811 #if 0 1812 /* this can now occur in normal operation */ 1813 log(LOG_INFO, "VXLOCK interlock avoided\n"); 1814 #endif 1815 } else { 1816 vp->v_flag |= VXWANT; 1817 msleep(vp, &vp->v_interlock, PINOD | PDROP, "vget", 0); 1818 return (ENOENT); 1819 } 1820 } 1821 1822 vp->v_usecount++; 1823 1824 if (VSHOULDBUSY(vp)) 1825 vbusy(vp); 1826 if (flags & LK_TYPE_MASK) { 1827 if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) { 1828 /* 1829 * must expand vrele here because we do not want 1830 * to call VOP_INACTIVE if the reference count 1831 * drops back to zero since it was never really 1832 * active. We must remove it from the free list 1833 * before sleeping so that multiple processes do 1834 * not try to recycle it. 1835 */ 1836 mtx_lock(&vp->v_interlock); 1837 vp->v_usecount--; 1838 if (VSHOULDFREE(vp)) 1839 vfree(vp); 1840 else 1841 vlruvp(vp); 1842 mtx_unlock(&vp->v_interlock); 1843 } 1844 return (error); 1845 } 1846 mtx_unlock(&vp->v_interlock); 1847 return (0); 1848 } 1849 1850 /* 1851 * Increase the reference count of a vnode. 1852 */ 1853 void 1854 vref(struct vnode *vp) 1855 { 1856 mtx_lock(&vp->v_interlock); 1857 vp->v_usecount++; 1858 mtx_unlock(&vp->v_interlock); 1859 } 1860 1861 /* 1862 * Vnode put/release. 1863 * If count drops to zero, call inactive routine and return to freelist. 1864 */ 1865 void 1866 vrele(vp) 1867 struct vnode *vp; 1868 { 1869 struct thread *td = curthread; /* XXX */ 1870 1871 KASSERT(vp != NULL, ("vrele: null vp")); 1872 1873 mtx_lock(&vp->v_interlock); 1874 1875 /* Skip this v_writecount check if we're going to panic below. */ 1876 KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, 1877 ("vrele: missed vn_close")); 1878 1879 if (vp->v_usecount > 1) { 1880 1881 vp->v_usecount--; 1882 mtx_unlock(&vp->v_interlock); 1883 1884 return; 1885 } 1886 1887 if (vp->v_usecount == 1) { 1888 vp->v_usecount--; 1889 /* 1890 * We must call VOP_INACTIVE with the node locked. 1891 * If we are doing a vput, the node is already locked, 1892 * but, in the case of vrele, we must explicitly lock 1893 * the vnode before calling VOP_INACTIVE. 1894 */ 1895 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) 1896 VOP_INACTIVE(vp, td); 1897 if (VSHOULDFREE(vp)) 1898 vfree(vp); 1899 else 1900 vlruvp(vp); 1901 1902 } else { 1903 #ifdef DIAGNOSTIC 1904 vprint("vrele: negative ref count", vp); 1905 mtx_unlock(&vp->v_interlock); 1906 #endif 1907 panic("vrele: negative ref cnt"); 1908 } 1909 } 1910 1911 /* 1912 * Release an already locked vnode. This give the same effects as 1913 * unlock+vrele(), but takes less time and avoids releasing and 1914 * re-aquiring the lock (as vrele() aquires the lock internally.) 1915 */ 1916 void 1917 vput(vp) 1918 struct vnode *vp; 1919 { 1920 struct thread *td = curthread; /* XXX */ 1921 1922 GIANT_REQUIRED; 1923 1924 KASSERT(vp != NULL, ("vput: null vp")); 1925 mtx_lock(&vp->v_interlock); 1926 /* Skip this v_writecount check if we're going to panic below. */ 1927 KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, 1928 ("vput: missed vn_close")); 1929 1930 if (vp->v_usecount > 1) { 1931 vp->v_usecount--; 1932 VOP_UNLOCK(vp, LK_INTERLOCK, td); 1933 return; 1934 } 1935 1936 if (vp->v_usecount == 1) { 1937 vp->v_usecount--; 1938 /* 1939 * We must call VOP_INACTIVE with the node locked. 1940 * If we are doing a vput, the node is already locked, 1941 * so we just need to release the vnode mutex. 1942 */ 1943 mtx_unlock(&vp->v_interlock); 1944 VOP_INACTIVE(vp, td); 1945 if (VSHOULDFREE(vp)) 1946 vfree(vp); 1947 else 1948 vlruvp(vp); 1949 1950 } else { 1951 #ifdef DIAGNOSTIC 1952 vprint("vput: negative ref count", vp); 1953 #endif 1954 panic("vput: negative ref cnt"); 1955 } 1956 } 1957 1958 /* 1959 * Somebody doesn't want the vnode recycled. 1960 */ 1961 void 1962 vhold(vp) 1963 register struct vnode *vp; 1964 { 1965 int s; 1966 1967 s = splbio(); 1968 vp->v_holdcnt++; 1969 if (VSHOULDBUSY(vp)) 1970 vbusy(vp); 1971 splx(s); 1972 } 1973 1974 /* 1975 * Note that there is one less who cares about this vnode. vdrop() is the 1976 * opposite of vhold(). 1977 */ 1978 void 1979 vdrop(vp) 1980 register struct vnode *vp; 1981 { 1982 int s; 1983 1984 s = splbio(); 1985 if (vp->v_holdcnt <= 0) 1986 panic("vdrop: holdcnt"); 1987 vp->v_holdcnt--; 1988 if (VSHOULDFREE(vp)) 1989 vfree(vp); 1990 else 1991 vlruvp(vp); 1992 splx(s); 1993 } 1994 1995 /* 1996 * Remove any vnodes in the vnode table belonging to mount point mp. 1997 * 1998 * If FORCECLOSE is not specified, there should not be any active ones, 1999 * return error if any are found (nb: this is a user error, not a 2000 * system error). If FORCECLOSE is specified, detach any active vnodes 2001 * that are found. 2002 * 2003 * If WRITECLOSE is set, only flush out regular file vnodes open for 2004 * writing. 2005 * 2006 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 2007 * 2008 * `rootrefs' specifies the base reference count for the root vnode 2009 * of this filesystem. The root vnode is considered busy if its 2010 * v_usecount exceeds this value. On a successful return, vflush() 2011 * will call vrele() on the root vnode exactly rootrefs times. 2012 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 2013 * be zero. 2014 */ 2015 #ifdef DIAGNOSTIC 2016 static int busyprt = 0; /* print out busy vnodes */ 2017 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 2018 #endif 2019 2020 int 2021 vflush(mp, rootrefs, flags) 2022 struct mount *mp; 2023 int rootrefs; 2024 int flags; 2025 { 2026 struct thread *td = curthread; /* XXX */ 2027 struct vnode *vp, *nvp, *rootvp = NULL; 2028 struct vattr vattr; 2029 int busy = 0, error; 2030 2031 if (rootrefs > 0) { 2032 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 2033 ("vflush: bad args")); 2034 /* 2035 * Get the filesystem root vnode. We can vput() it 2036 * immediately, since with rootrefs > 0, it won't go away. 2037 */ 2038 if ((error = VFS_ROOT(mp, &rootvp)) != 0) 2039 return (error); 2040 vput(rootvp); 2041 2042 } 2043 mtx_lock(&mntvnode_mtx); 2044 loop: 2045 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) { 2046 /* 2047 * Make sure this vnode wasn't reclaimed in getnewvnode(). 2048 * Start over if it has (it won't be on the list anymore). 2049 */ 2050 if (vp->v_mount != mp) 2051 goto loop; 2052 nvp = TAILQ_NEXT(vp, v_nmntvnodes); 2053 2054 mtx_unlock(&mntvnode_mtx); 2055 mtx_lock(&vp->v_interlock); 2056 /* 2057 * Skip over a vnodes marked VSYSTEM. 2058 */ 2059 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 2060 mtx_unlock(&vp->v_interlock); 2061 mtx_lock(&mntvnode_mtx); 2062 continue; 2063 } 2064 /* 2065 * If WRITECLOSE is set, flush out unlinked but still open 2066 * files (even if open only for reading) and regular file 2067 * vnodes open for writing. 2068 */ 2069 if ((flags & WRITECLOSE) && 2070 (vp->v_type == VNON || 2071 (VOP_GETATTR(vp, &vattr, td->td_ucred, td) == 0 && 2072 vattr.va_nlink > 0)) && 2073 (vp->v_writecount == 0 || vp->v_type != VREG)) { 2074 mtx_unlock(&vp->v_interlock); 2075 mtx_lock(&mntvnode_mtx); 2076 continue; 2077 } 2078 2079 /* 2080 * With v_usecount == 0, all we need to do is clear out the 2081 * vnode data structures and we are done. 2082 */ 2083 if (vp->v_usecount == 0) { 2084 vgonel(vp, td); 2085 mtx_lock(&mntvnode_mtx); 2086 continue; 2087 } 2088 2089 /* 2090 * If FORCECLOSE is set, forcibly close the vnode. For block 2091 * or character devices, revert to an anonymous device. For 2092 * all other files, just kill them. 2093 */ 2094 if (flags & FORCECLOSE) { 2095 if (vp->v_type != VCHR) { 2096 vgonel(vp, td); 2097 } else { 2098 vclean(vp, 0, td); 2099 vp->v_op = spec_vnodeop_p; 2100 insmntque(vp, (struct mount *) 0); 2101 } 2102 mtx_lock(&mntvnode_mtx); 2103 continue; 2104 } 2105 #ifdef DIAGNOSTIC 2106 if (busyprt) 2107 vprint("vflush: busy vnode", vp); 2108 #endif 2109 mtx_unlock(&vp->v_interlock); 2110 mtx_lock(&mntvnode_mtx); 2111 busy++; 2112 } 2113 mtx_unlock(&mntvnode_mtx); 2114 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 2115 /* 2116 * If just the root vnode is busy, and if its refcount 2117 * is equal to `rootrefs', then go ahead and kill it. 2118 */ 2119 mtx_lock(&rootvp->v_interlock); 2120 KASSERT(busy > 0, ("vflush: not busy")); 2121 KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs")); 2122 if (busy == 1 && rootvp->v_usecount == rootrefs) { 2123 vgonel(rootvp, td); 2124 busy = 0; 2125 } else 2126 mtx_unlock(&rootvp->v_interlock); 2127 } 2128 if (busy) 2129 return (EBUSY); 2130 for (; rootrefs > 0; rootrefs--) 2131 vrele(rootvp); 2132 return (0); 2133 } 2134 2135 /* 2136 * This moves a now (likely recyclable) vnode to the end of the 2137 * mountlist. XXX However, it is temporarily disabled until we 2138 * can clean up ffs_sync() and friends, which have loop restart 2139 * conditions which this code causes to operate O(N^2). 2140 */ 2141 static void 2142 vlruvp(struct vnode *vp) 2143 { 2144 #if 0 2145 struct mount *mp; 2146 2147 if ((mp = vp->v_mount) != NULL) { 2148 mtx_lock(&mntvnode_mtx); 2149 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2150 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2151 mtx_unlock(&mntvnode_mtx); 2152 } 2153 #endif 2154 } 2155 2156 /* 2157 * Disassociate the underlying filesystem from a vnode. 2158 */ 2159 static void 2160 vclean(vp, flags, td) 2161 struct vnode *vp; 2162 int flags; 2163 struct thread *td; 2164 { 2165 int active; 2166 2167 /* 2168 * Check to see if the vnode is in use. If so we have to reference it 2169 * before we clean it out so that its count cannot fall to zero and 2170 * generate a race against ourselves to recycle it. 2171 */ 2172 if ((active = vp->v_usecount)) 2173 vp->v_usecount++; 2174 2175 /* 2176 * Prevent the vnode from being recycled or brought into use while we 2177 * clean it out. 2178 */ 2179 if (vp->v_flag & VXLOCK) 2180 panic("vclean: deadlock"); 2181 vp->v_flag |= VXLOCK; 2182 vp->v_vxproc = curthread; 2183 /* 2184 * Even if the count is zero, the VOP_INACTIVE routine may still 2185 * have the object locked while it cleans it out. The VOP_LOCK 2186 * ensures that the VOP_INACTIVE routine is done with its work. 2187 * For active vnodes, it ensures that no other activity can 2188 * occur while the underlying object is being cleaned out. 2189 */ 2190 VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td); 2191 2192 /* 2193 * Clean out any buffers associated with the vnode. 2194 * If the flush fails, just toss the buffers. 2195 */ 2196 if (flags & DOCLOSE) { 2197 if (TAILQ_FIRST(&vp->v_dirtyblkhd) != NULL) 2198 (void) vn_write_suspend_wait(vp, NULL, V_WAIT); 2199 if (vinvalbuf(vp, V_SAVE, NOCRED, td, 0, 0) != 0) 2200 vinvalbuf(vp, 0, NOCRED, td, 0, 0); 2201 } 2202 2203 VOP_DESTROYVOBJECT(vp); 2204 2205 /* 2206 * Any other processes trying to obtain this lock must first 2207 * wait for VXLOCK to clear, then call the new lock operation. 2208 */ 2209 VOP_UNLOCK(vp, 0, td); 2210 2211 /* 2212 * If purging an active vnode, it must be closed and 2213 * deactivated before being reclaimed. Note that the 2214 * VOP_INACTIVE will unlock the vnode. 2215 */ 2216 if (active) { 2217 if (flags & DOCLOSE) 2218 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 2219 if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) != 0) 2220 panic("vclean: cannot relock."); 2221 VOP_INACTIVE(vp, td); 2222 } 2223 2224 /* 2225 * Reclaim the vnode. 2226 */ 2227 if (VOP_RECLAIM(vp, td)) 2228 panic("vclean: cannot reclaim"); 2229 2230 if (active) { 2231 /* 2232 * Inline copy of vrele() since VOP_INACTIVE 2233 * has already been called. 2234 */ 2235 mtx_lock(&vp->v_interlock); 2236 if (--vp->v_usecount <= 0) { 2237 #ifdef DIAGNOSTIC 2238 if (vp->v_usecount < 0 || vp->v_writecount != 0) { 2239 vprint("vclean: bad ref count", vp); 2240 panic("vclean: ref cnt"); 2241 } 2242 #endif 2243 vfree(vp); 2244 } 2245 mtx_unlock(&vp->v_interlock); 2246 } 2247 2248 cache_purge(vp); 2249 vp->v_vnlock = NULL; 2250 lockdestroy(&vp->v_lock); 2251 2252 if (VSHOULDFREE(vp)) 2253 vfree(vp); 2254 2255 /* 2256 * Done with purge, notify sleepers of the grim news. 2257 */ 2258 vp->v_op = dead_vnodeop_p; 2259 if (vp->v_pollinfo != NULL) 2260 vn_pollgone(vp); 2261 vp->v_tag = VT_NON; 2262 vp->v_flag &= ~VXLOCK; 2263 vp->v_vxproc = NULL; 2264 if (vp->v_flag & VXWANT) { 2265 vp->v_flag &= ~VXWANT; 2266 wakeup(vp); 2267 } 2268 } 2269 2270 /* 2271 * Eliminate all activity associated with the requested vnode 2272 * and with all vnodes aliased to the requested vnode. 2273 */ 2274 int 2275 vop_revoke(ap) 2276 struct vop_revoke_args /* { 2277 struct vnode *a_vp; 2278 int a_flags; 2279 } */ *ap; 2280 { 2281 struct vnode *vp, *vq; 2282 dev_t dev; 2283 2284 KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke")); 2285 2286 vp = ap->a_vp; 2287 /* 2288 * If a vgone (or vclean) is already in progress, 2289 * wait until it is done and return. 2290 */ 2291 if (vp->v_flag & VXLOCK) { 2292 vp->v_flag |= VXWANT; 2293 msleep(vp, &vp->v_interlock, PINOD | PDROP, 2294 "vop_revokeall", 0); 2295 return (0); 2296 } 2297 dev = vp->v_rdev; 2298 for (;;) { 2299 mtx_lock(&spechash_mtx); 2300 vq = SLIST_FIRST(&dev->si_hlist); 2301 mtx_unlock(&spechash_mtx); 2302 if (!vq) 2303 break; 2304 vgone(vq); 2305 } 2306 return (0); 2307 } 2308 2309 /* 2310 * Recycle an unused vnode to the front of the free list. 2311 * Release the passed interlock if the vnode will be recycled. 2312 */ 2313 int 2314 vrecycle(vp, inter_lkp, td) 2315 struct vnode *vp; 2316 struct mtx *inter_lkp; 2317 struct thread *td; 2318 { 2319 2320 mtx_lock(&vp->v_interlock); 2321 if (vp->v_usecount == 0) { 2322 if (inter_lkp) { 2323 mtx_unlock(inter_lkp); 2324 } 2325 vgonel(vp, td); 2326 return (1); 2327 } 2328 mtx_unlock(&vp->v_interlock); 2329 return (0); 2330 } 2331 2332 /* 2333 * Eliminate all activity associated with a vnode 2334 * in preparation for reuse. 2335 */ 2336 void 2337 vgone(vp) 2338 register struct vnode *vp; 2339 { 2340 struct thread *td = curthread; /* XXX */ 2341 2342 mtx_lock(&vp->v_interlock); 2343 vgonel(vp, td); 2344 } 2345 2346 /* 2347 * vgone, with the vp interlock held. 2348 */ 2349 void 2350 vgonel(vp, td) 2351 struct vnode *vp; 2352 struct thread *td; 2353 { 2354 int s; 2355 2356 /* 2357 * If a vgone (or vclean) is already in progress, 2358 * wait until it is done and return. 2359 */ 2360 if (vp->v_flag & VXLOCK) { 2361 vp->v_flag |= VXWANT; 2362 msleep(vp, &vp->v_interlock, PINOD | PDROP, "vgone", 0); 2363 return; 2364 } 2365 2366 /* 2367 * Clean out the filesystem specific data. 2368 */ 2369 vclean(vp, DOCLOSE, td); 2370 mtx_lock(&vp->v_interlock); 2371 2372 /* 2373 * Delete from old mount point vnode list, if on one. 2374 */ 2375 if (vp->v_mount != NULL) 2376 insmntque(vp, (struct mount *)0); 2377 /* 2378 * If special device, remove it from special device alias list 2379 * if it is on one. 2380 */ 2381 if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) { 2382 mtx_lock(&spechash_mtx); 2383 SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext); 2384 freedev(vp->v_rdev); 2385 mtx_unlock(&spechash_mtx); 2386 vp->v_rdev = NULL; 2387 } 2388 2389 /* 2390 * If it is on the freelist and not already at the head, 2391 * move it to the head of the list. The test of the 2392 * VDOOMED flag and the reference count of zero is because 2393 * it will be removed from the free list by getnewvnode, 2394 * but will not have its reference count incremented until 2395 * after calling vgone. If the reference count were 2396 * incremented first, vgone would (incorrectly) try to 2397 * close the previous instance of the underlying object. 2398 */ 2399 if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) { 2400 s = splbio(); 2401 mtx_lock(&vnode_free_list_mtx); 2402 if (vp->v_flag & VFREE) 2403 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2404 else 2405 freevnodes++; 2406 vp->v_flag |= VFREE; 2407 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2408 mtx_unlock(&vnode_free_list_mtx); 2409 splx(s); 2410 } 2411 2412 vp->v_type = VBAD; 2413 mtx_unlock(&vp->v_interlock); 2414 } 2415 2416 /* 2417 * Lookup a vnode by device number. 2418 */ 2419 int 2420 vfinddev(dev, type, vpp) 2421 dev_t dev; 2422 enum vtype type; 2423 struct vnode **vpp; 2424 { 2425 struct vnode *vp; 2426 2427 mtx_lock(&spechash_mtx); 2428 SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) { 2429 if (type == vp->v_type) { 2430 *vpp = vp; 2431 mtx_unlock(&spechash_mtx); 2432 return (1); 2433 } 2434 } 2435 mtx_unlock(&spechash_mtx); 2436 return (0); 2437 } 2438 2439 /* 2440 * Calculate the total number of references to a special device. 2441 */ 2442 int 2443 vcount(vp) 2444 struct vnode *vp; 2445 { 2446 struct vnode *vq; 2447 int count; 2448 2449 count = 0; 2450 mtx_lock(&spechash_mtx); 2451 SLIST_FOREACH(vq, &vp->v_rdev->si_hlist, v_specnext) 2452 count += vq->v_usecount; 2453 mtx_unlock(&spechash_mtx); 2454 return (count); 2455 } 2456 2457 /* 2458 * Same as above, but using the dev_t as argument 2459 */ 2460 int 2461 count_dev(dev) 2462 dev_t dev; 2463 { 2464 struct vnode *vp; 2465 2466 vp = SLIST_FIRST(&dev->si_hlist); 2467 if (vp == NULL) 2468 return (0); 2469 return(vcount(vp)); 2470 } 2471 2472 /* 2473 * Print out a description of a vnode. 2474 */ 2475 static char *typename[] = 2476 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 2477 2478 void 2479 vprint(label, vp) 2480 char *label; 2481 struct vnode *vp; 2482 { 2483 char buf[96]; 2484 2485 if (label != NULL) 2486 printf("%s: %p: ", label, (void *)vp); 2487 else 2488 printf("%p: ", (void *)vp); 2489 printf("type %s, usecount %d, writecount %d, refcount %d,", 2490 typename[vp->v_type], vp->v_usecount, vp->v_writecount, 2491 vp->v_holdcnt); 2492 buf[0] = '\0'; 2493 if (vp->v_flag & VROOT) 2494 strcat(buf, "|VROOT"); 2495 if (vp->v_flag & VTEXT) 2496 strcat(buf, "|VTEXT"); 2497 if (vp->v_flag & VSYSTEM) 2498 strcat(buf, "|VSYSTEM"); 2499 if (vp->v_flag & VXLOCK) 2500 strcat(buf, "|VXLOCK"); 2501 if (vp->v_flag & VXWANT) 2502 strcat(buf, "|VXWANT"); 2503 if (vp->v_flag & VBWAIT) 2504 strcat(buf, "|VBWAIT"); 2505 if (vp->v_flag & VDOOMED) 2506 strcat(buf, "|VDOOMED"); 2507 if (vp->v_flag & VFREE) 2508 strcat(buf, "|VFREE"); 2509 if (vp->v_flag & VOBJBUF) 2510 strcat(buf, "|VOBJBUF"); 2511 if (buf[0] != '\0') 2512 printf(" flags (%s)", &buf[1]); 2513 if (vp->v_data == NULL) { 2514 printf("\n"); 2515 } else { 2516 printf("\n\t"); 2517 VOP_PRINT(vp); 2518 } 2519 } 2520 2521 #ifdef DDB 2522 #include <ddb/ddb.h> 2523 /* 2524 * List all of the locked vnodes in the system. 2525 * Called when debugging the kernel. 2526 */ 2527 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 2528 { 2529 struct thread *td = curthread; /* XXX */ 2530 struct mount *mp, *nmp; 2531 struct vnode *vp; 2532 2533 printf("Locked vnodes\n"); 2534 mtx_lock(&mountlist_mtx); 2535 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 2536 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) { 2537 nmp = TAILQ_NEXT(mp, mnt_list); 2538 continue; 2539 } 2540 mtx_lock(&mntvnode_mtx); 2541 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 2542 if (VOP_ISLOCKED(vp, NULL)) 2543 vprint((char *)0, vp); 2544 } 2545 mtx_unlock(&mntvnode_mtx); 2546 mtx_lock(&mountlist_mtx); 2547 nmp = TAILQ_NEXT(mp, mnt_list); 2548 vfs_unbusy(mp, td); 2549 } 2550 mtx_unlock(&mountlist_mtx); 2551 } 2552 #endif 2553 2554 /* 2555 * Top level filesystem related information gathering. 2556 */ 2557 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 2558 2559 static int 2560 vfs_sysctl(SYSCTL_HANDLER_ARGS) 2561 { 2562 int *name = (int *)arg1 - 1; /* XXX */ 2563 u_int namelen = arg2 + 1; /* XXX */ 2564 struct vfsconf *vfsp; 2565 2566 #if 1 || defined(COMPAT_PRELITE2) 2567 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 2568 if (namelen == 1) 2569 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 2570 #endif 2571 2572 /* XXX the below code does not compile; vfs_sysctl does not exist. */ 2573 #ifdef notyet 2574 /* all sysctl names at this level are at least name and field */ 2575 if (namelen < 2) 2576 return (ENOTDIR); /* overloaded */ 2577 if (name[0] != VFS_GENERIC) { 2578 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2579 if (vfsp->vfc_typenum == name[0]) 2580 break; 2581 if (vfsp == NULL) 2582 return (EOPNOTSUPP); 2583 return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, 2584 oldp, oldlenp, newp, newlen, td)); 2585 } 2586 #endif 2587 switch (name[1]) { 2588 case VFS_MAXTYPENUM: 2589 if (namelen != 2) 2590 return (ENOTDIR); 2591 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 2592 case VFS_CONF: 2593 if (namelen != 3) 2594 return (ENOTDIR); /* overloaded */ 2595 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) 2596 if (vfsp->vfc_typenum == name[2]) 2597 break; 2598 if (vfsp == NULL) 2599 return (EOPNOTSUPP); 2600 return (SYSCTL_OUT(req, vfsp, sizeof *vfsp)); 2601 } 2602 return (EOPNOTSUPP); 2603 } 2604 2605 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl, 2606 "Generic filesystem"); 2607 2608 #if 1 || defined(COMPAT_PRELITE2) 2609 2610 static int 2611 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 2612 { 2613 int error; 2614 struct vfsconf *vfsp; 2615 struct ovfsconf ovfs; 2616 2617 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 2618 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 2619 strcpy(ovfs.vfc_name, vfsp->vfc_name); 2620 ovfs.vfc_index = vfsp->vfc_typenum; 2621 ovfs.vfc_refcount = vfsp->vfc_refcount; 2622 ovfs.vfc_flags = vfsp->vfc_flags; 2623 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 2624 if (error) 2625 return error; 2626 } 2627 return 0; 2628 } 2629 2630 #endif /* 1 || COMPAT_PRELITE2 */ 2631 2632 #if COMPILING_LINT 2633 #define KINFO_VNODESLOP 10 2634 /* 2635 * Dump vnode list (via sysctl). 2636 * Copyout address of vnode followed by vnode. 2637 */ 2638 /* ARGSUSED */ 2639 static int 2640 sysctl_vnode(SYSCTL_HANDLER_ARGS) 2641 { 2642 struct thread *td = curthread; /* XXX */ 2643 struct mount *mp, *nmp; 2644 struct vnode *nvp, *vp; 2645 int error; 2646 2647 #define VPTRSZ sizeof (struct vnode *) 2648 #define VNODESZ sizeof (struct vnode) 2649 2650 req->lock = 0; 2651 if (!req->oldptr) /* Make an estimate */ 2652 return (SYSCTL_OUT(req, 0, 2653 (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ))); 2654 2655 sysctl_wire_old_buffer(req, 0); 2656 mtx_lock(&mountlist_mtx); 2657 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 2658 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) { 2659 nmp = TAILQ_NEXT(mp, mnt_list); 2660 continue; 2661 } 2662 mtx_lock(&mntvnode_mtx); 2663 again: 2664 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 2665 vp != NULL; 2666 vp = nvp) { 2667 /* 2668 * Check that the vp is still associated with 2669 * this filesystem. RACE: could have been 2670 * recycled onto the same filesystem. 2671 */ 2672 if (vp->v_mount != mp) 2673 goto again; 2674 nvp = TAILQ_NEXT(vp, v_nmntvnodes); 2675 mtx_unlock(&mntvnode_mtx); 2676 if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) || 2677 (error = SYSCTL_OUT(req, vp, VNODESZ))) 2678 return (error); 2679 mtx_lock(&mntvnode_mtx); 2680 } 2681 mtx_unlock(&mntvnode_mtx); 2682 mtx_lock(&mountlist_mtx); 2683 nmp = TAILQ_NEXT(mp, mnt_list); 2684 vfs_unbusy(mp, td); 2685 } 2686 mtx_unlock(&mountlist_mtx); 2687 2688 return (0); 2689 } 2690 2691 /* 2692 * XXX 2693 * Exporting the vnode list on large systems causes them to crash. 2694 * Exporting the vnode list on medium systems causes sysctl to coredump. 2695 */ 2696 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD, 2697 0, 0, sysctl_vnode, "S,vnode", ""); 2698 #endif 2699 2700 /* 2701 * Check to see if a filesystem is mounted on a block device. 2702 */ 2703 int 2704 vfs_mountedon(vp) 2705 struct vnode *vp; 2706 { 2707 2708 if (vp->v_rdev->si_mountpoint != NULL) 2709 return (EBUSY); 2710 return (0); 2711 } 2712 2713 /* 2714 * Unmount all filesystems. The list is traversed in reverse order 2715 * of mounting to avoid dependencies. 2716 */ 2717 void 2718 vfs_unmountall() 2719 { 2720 struct mount *mp; 2721 struct thread *td; 2722 int error; 2723 2724 if (curthread != NULL) 2725 td = curthread; 2726 else 2727 td = FIRST_THREAD_IN_PROC(initproc); /* XXX XXX proc0? */ 2728 /* 2729 * Since this only runs when rebooting, it is not interlocked. 2730 */ 2731 while(!TAILQ_EMPTY(&mountlist)) { 2732 mp = TAILQ_LAST(&mountlist, mntlist); 2733 error = dounmount(mp, MNT_FORCE, td); 2734 if (error) { 2735 TAILQ_REMOVE(&mountlist, mp, mnt_list); 2736 printf("unmount of %s failed (", 2737 mp->mnt_stat.f_mntonname); 2738 if (error == EBUSY) 2739 printf("BUSY)\n"); 2740 else 2741 printf("%d)\n", error); 2742 } else { 2743 /* The unmount has removed mp from the mountlist */ 2744 } 2745 } 2746 } 2747 2748 /* 2749 * perform msync on all vnodes under a mount point 2750 * the mount point must be locked. 2751 */ 2752 void 2753 vfs_msync(struct mount *mp, int flags) 2754 { 2755 struct vnode *vp, *nvp; 2756 struct vm_object *obj; 2757 int tries; 2758 2759 GIANT_REQUIRED; 2760 2761 tries = 5; 2762 mtx_lock(&mntvnode_mtx); 2763 loop: 2764 for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) { 2765 if (vp->v_mount != mp) { 2766 if (--tries > 0) 2767 goto loop; 2768 break; 2769 } 2770 nvp = TAILQ_NEXT(vp, v_nmntvnodes); 2771 2772 if (vp->v_flag & VXLOCK) /* XXX: what if MNT_WAIT? */ 2773 continue; 2774 2775 if (vp->v_flag & VNOSYNC) /* unlinked, skip it */ 2776 continue; 2777 2778 if ((vp->v_flag & VOBJDIRTY) && 2779 (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) { 2780 mtx_unlock(&mntvnode_mtx); 2781 if (!vget(vp, 2782 LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curthread)) { 2783 if (VOP_GETVOBJECT(vp, &obj) == 0) { 2784 vm_object_page_clean(obj, 0, 0, 2785 flags == MNT_WAIT ? 2786 OBJPC_SYNC : OBJPC_NOSYNC); 2787 } 2788 vput(vp); 2789 } 2790 mtx_lock(&mntvnode_mtx); 2791 if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) { 2792 if (--tries > 0) 2793 goto loop; 2794 break; 2795 } 2796 } 2797 } 2798 mtx_unlock(&mntvnode_mtx); 2799 } 2800 2801 /* 2802 * Create the VM object needed for VMIO and mmap support. This 2803 * is done for all VREG files in the system. Some filesystems might 2804 * afford the additional metadata buffering capability of the 2805 * VMIO code by making the device node be VMIO mode also. 2806 * 2807 * vp must be locked when vfs_object_create is called. 2808 */ 2809 int 2810 vfs_object_create(vp, td, cred) 2811 struct vnode *vp; 2812 struct thread *td; 2813 struct ucred *cred; 2814 { 2815 GIANT_REQUIRED; 2816 return (VOP_CREATEVOBJECT(vp, cred, td)); 2817 } 2818 2819 /* 2820 * Mark a vnode as free, putting it up for recycling. 2821 */ 2822 void 2823 vfree(vp) 2824 struct vnode *vp; 2825 { 2826 int s; 2827 2828 s = splbio(); 2829 mtx_lock(&vnode_free_list_mtx); 2830 KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free")); 2831 if (vp->v_flag & VAGE) { 2832 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2833 } else { 2834 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 2835 } 2836 freevnodes++; 2837 mtx_unlock(&vnode_free_list_mtx); 2838 vp->v_flag &= ~VAGE; 2839 vp->v_flag |= VFREE; 2840 splx(s); 2841 } 2842 2843 /* 2844 * Opposite of vfree() - mark a vnode as in use. 2845 */ 2846 void 2847 vbusy(vp) 2848 struct vnode *vp; 2849 { 2850 int s; 2851 2852 s = splbio(); 2853 mtx_lock(&vnode_free_list_mtx); 2854 KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free")); 2855 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 2856 freevnodes--; 2857 mtx_unlock(&vnode_free_list_mtx); 2858 vp->v_flag &= ~(VFREE|VAGE); 2859 splx(s); 2860 } 2861 2862 /* 2863 * Record a process's interest in events which might happen to 2864 * a vnode. Because poll uses the historic select-style interface 2865 * internally, this routine serves as both the ``check for any 2866 * pending events'' and the ``record my interest in future events'' 2867 * functions. (These are done together, while the lock is held, 2868 * to avoid race conditions.) 2869 */ 2870 int 2871 vn_pollrecord(vp, td, events) 2872 struct vnode *vp; 2873 struct thread *td; 2874 short events; 2875 { 2876 2877 if (vp->v_pollinfo == NULL) 2878 v_addpollinfo(vp); 2879 mtx_lock(&vp->v_pollinfo->vpi_lock); 2880 if (vp->v_pollinfo->vpi_revents & events) { 2881 /* 2882 * This leaves events we are not interested 2883 * in available for the other process which 2884 * which presumably had requested them 2885 * (otherwise they would never have been 2886 * recorded). 2887 */ 2888 events &= vp->v_pollinfo->vpi_revents; 2889 vp->v_pollinfo->vpi_revents &= ~events; 2890 2891 mtx_unlock(&vp->v_pollinfo->vpi_lock); 2892 return events; 2893 } 2894 vp->v_pollinfo->vpi_events |= events; 2895 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 2896 mtx_unlock(&vp->v_pollinfo->vpi_lock); 2897 return 0; 2898 } 2899 2900 /* 2901 * Note the occurrence of an event. If the VN_POLLEVENT macro is used, 2902 * it is possible for us to miss an event due to race conditions, but 2903 * that condition is expected to be rare, so for the moment it is the 2904 * preferred interface. 2905 */ 2906 void 2907 vn_pollevent(vp, events) 2908 struct vnode *vp; 2909 short events; 2910 { 2911 2912 if (vp->v_pollinfo == NULL) 2913 v_addpollinfo(vp); 2914 mtx_lock(&vp->v_pollinfo->vpi_lock); 2915 if (vp->v_pollinfo->vpi_events & events) { 2916 /* 2917 * We clear vpi_events so that we don't 2918 * call selwakeup() twice if two events are 2919 * posted before the polling process(es) is 2920 * awakened. This also ensures that we take at 2921 * most one selwakeup() if the polling process 2922 * is no longer interested. However, it does 2923 * mean that only one event can be noticed at 2924 * a time. (Perhaps we should only clear those 2925 * event bits which we note?) XXX 2926 */ 2927 vp->v_pollinfo->vpi_events = 0; /* &= ~events ??? */ 2928 vp->v_pollinfo->vpi_revents |= events; 2929 selwakeup(&vp->v_pollinfo->vpi_selinfo); 2930 } 2931 mtx_unlock(&vp->v_pollinfo->vpi_lock); 2932 } 2933 2934 /* 2935 * Wake up anyone polling on vp because it is being revoked. 2936 * This depends on dead_poll() returning POLLHUP for correct 2937 * behavior. 2938 */ 2939 void 2940 vn_pollgone(vp) 2941 struct vnode *vp; 2942 { 2943 2944 mtx_lock(&vp->v_pollinfo->vpi_lock); 2945 VN_KNOTE(vp, NOTE_REVOKE); 2946 if (vp->v_pollinfo->vpi_events) { 2947 vp->v_pollinfo->vpi_events = 0; 2948 selwakeup(&vp->v_pollinfo->vpi_selinfo); 2949 } 2950 mtx_unlock(&vp->v_pollinfo->vpi_lock); 2951 } 2952 2953 2954 2955 /* 2956 * Routine to create and manage a filesystem syncer vnode. 2957 */ 2958 #define sync_close ((int (*)(struct vop_close_args *))nullop) 2959 static int sync_fsync(struct vop_fsync_args *); 2960 static int sync_inactive(struct vop_inactive_args *); 2961 static int sync_reclaim(struct vop_reclaim_args *); 2962 static int sync_print(struct vop_print_args *); 2963 2964 static vop_t **sync_vnodeop_p; 2965 static struct vnodeopv_entry_desc sync_vnodeop_entries[] = { 2966 { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 2967 { &vop_close_desc, (vop_t *) sync_close }, /* close */ 2968 { &vop_fsync_desc, (vop_t *) sync_fsync }, /* fsync */ 2969 { &vop_inactive_desc, (vop_t *) sync_inactive }, /* inactive */ 2970 { &vop_reclaim_desc, (vop_t *) sync_reclaim }, /* reclaim */ 2971 { &vop_lock_desc, (vop_t *) vop_stdlock }, /* lock */ 2972 { &vop_unlock_desc, (vop_t *) vop_stdunlock }, /* unlock */ 2973 { &vop_print_desc, (vop_t *) sync_print }, /* print */ 2974 { &vop_islocked_desc, (vop_t *) vop_stdislocked }, /* islocked */ 2975 { NULL, NULL } 2976 }; 2977 static struct vnodeopv_desc sync_vnodeop_opv_desc = 2978 { &sync_vnodeop_p, sync_vnodeop_entries }; 2979 2980 VNODEOP_SET(sync_vnodeop_opv_desc); 2981 2982 /* 2983 * Create a new filesystem syncer vnode for the specified mount point. 2984 */ 2985 int 2986 vfs_allocate_syncvnode(mp) 2987 struct mount *mp; 2988 { 2989 struct vnode *vp; 2990 static long start, incr, next; 2991 int error; 2992 2993 /* Allocate a new vnode */ 2994 if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) { 2995 mp->mnt_syncer = NULL; 2996 return (error); 2997 } 2998 vp->v_type = VNON; 2999 /* 3000 * Place the vnode onto the syncer worklist. We attempt to 3001 * scatter them about on the list so that they will go off 3002 * at evenly distributed times even if all the filesystems 3003 * are mounted at once. 3004 */ 3005 next += incr; 3006 if (next == 0 || next > syncer_maxdelay) { 3007 start /= 2; 3008 incr /= 2; 3009 if (start == 0) { 3010 start = syncer_maxdelay / 2; 3011 incr = syncer_maxdelay; 3012 } 3013 next = start; 3014 } 3015 vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0); 3016 mp->mnt_syncer = vp; 3017 return (0); 3018 } 3019 3020 /* 3021 * Do a lazy sync of the filesystem. 3022 */ 3023 static int 3024 sync_fsync(ap) 3025 struct vop_fsync_args /* { 3026 struct vnode *a_vp; 3027 struct ucred *a_cred; 3028 int a_waitfor; 3029 struct thread *a_td; 3030 } */ *ap; 3031 { 3032 struct vnode *syncvp = ap->a_vp; 3033 struct mount *mp = syncvp->v_mount; 3034 struct thread *td = ap->a_td; 3035 int asyncflag; 3036 3037 /* 3038 * We only need to do something if this is a lazy evaluation. 3039 */ 3040 if (ap->a_waitfor != MNT_LAZY) 3041 return (0); 3042 3043 /* 3044 * Move ourselves to the back of the sync list. 3045 */ 3046 vn_syncer_add_to_worklist(syncvp, syncdelay); 3047 3048 /* 3049 * Walk the list of vnodes pushing all that are dirty and 3050 * not already on the sync list. 3051 */ 3052 mtx_lock(&mountlist_mtx); 3053 if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) { 3054 mtx_unlock(&mountlist_mtx); 3055 return (0); 3056 } 3057 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 3058 vfs_unbusy(mp, td); 3059 return (0); 3060 } 3061 asyncflag = mp->mnt_flag & MNT_ASYNC; 3062 mp->mnt_flag &= ~MNT_ASYNC; 3063 vfs_msync(mp, MNT_NOWAIT); 3064 VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td); 3065 if (asyncflag) 3066 mp->mnt_flag |= MNT_ASYNC; 3067 vn_finished_write(mp); 3068 vfs_unbusy(mp, td); 3069 return (0); 3070 } 3071 3072 /* 3073 * The syncer vnode is no referenced. 3074 */ 3075 static int 3076 sync_inactive(ap) 3077 struct vop_inactive_args /* { 3078 struct vnode *a_vp; 3079 struct thread *a_td; 3080 } */ *ap; 3081 { 3082 3083 vgone(ap->a_vp); 3084 return (0); 3085 } 3086 3087 /* 3088 * The syncer vnode is no longer needed and is being decommissioned. 3089 * 3090 * Modifications to the worklist must be protected at splbio(). 3091 */ 3092 static int 3093 sync_reclaim(ap) 3094 struct vop_reclaim_args /* { 3095 struct vnode *a_vp; 3096 } */ *ap; 3097 { 3098 struct vnode *vp = ap->a_vp; 3099 int s; 3100 3101 s = splbio(); 3102 vp->v_mount->mnt_syncer = NULL; 3103 if (vp->v_flag & VONWORKLST) { 3104 LIST_REMOVE(vp, v_synclist); 3105 vp->v_flag &= ~VONWORKLST; 3106 } 3107 splx(s); 3108 3109 return (0); 3110 } 3111 3112 /* 3113 * Print out a syncer vnode. 3114 */ 3115 static int 3116 sync_print(ap) 3117 struct vop_print_args /* { 3118 struct vnode *a_vp; 3119 } */ *ap; 3120 { 3121 struct vnode *vp = ap->a_vp; 3122 3123 printf("syncer vnode"); 3124 if (vp->v_vnlock != NULL) 3125 lockmgr_printinfo(vp->v_vnlock); 3126 printf("\n"); 3127 return (0); 3128 } 3129 3130 /* 3131 * extract the dev_t from a VCHR 3132 */ 3133 dev_t 3134 vn_todev(vp) 3135 struct vnode *vp; 3136 { 3137 if (vp->v_type != VCHR) 3138 return (NODEV); 3139 return (vp->v_rdev); 3140 } 3141 3142 /* 3143 * Check if vnode represents a disk device 3144 */ 3145 int 3146 vn_isdisk(vp, errp) 3147 struct vnode *vp; 3148 int *errp; 3149 { 3150 struct cdevsw *cdevsw; 3151 3152 if (vp->v_type != VCHR) { 3153 if (errp != NULL) 3154 *errp = ENOTBLK; 3155 return (0); 3156 } 3157 if (vp->v_rdev == NULL) { 3158 if (errp != NULL) 3159 *errp = ENXIO; 3160 return (0); 3161 } 3162 cdevsw = devsw(vp->v_rdev); 3163 if (cdevsw == NULL) { 3164 if (errp != NULL) 3165 *errp = ENXIO; 3166 return (0); 3167 } 3168 if (!(cdevsw->d_flags & D_DISK)) { 3169 if (errp != NULL) 3170 *errp = ENOTBLK; 3171 return (0); 3172 } 3173 if (errp != NULL) 3174 *errp = 0; 3175 return (1); 3176 } 3177 3178 /* 3179 * Free data allocated by namei(); see namei(9) for details. 3180 */ 3181 void 3182 NDFREE(ndp, flags) 3183 struct nameidata *ndp; 3184 const uint flags; 3185 { 3186 if (!(flags & NDF_NO_FREE_PNBUF) && 3187 (ndp->ni_cnd.cn_flags & HASBUF)) { 3188 uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf); 3189 ndp->ni_cnd.cn_flags &= ~HASBUF; 3190 } 3191 if (!(flags & NDF_NO_DVP_UNLOCK) && 3192 (ndp->ni_cnd.cn_flags & LOCKPARENT) && 3193 ndp->ni_dvp != ndp->ni_vp) 3194 VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_thread); 3195 if (!(flags & NDF_NO_DVP_RELE) && 3196 (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) { 3197 vrele(ndp->ni_dvp); 3198 ndp->ni_dvp = NULL; 3199 } 3200 if (!(flags & NDF_NO_VP_UNLOCK) && 3201 (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp) 3202 VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_thread); 3203 if (!(flags & NDF_NO_VP_RELE) && 3204 ndp->ni_vp) { 3205 vrele(ndp->ni_vp); 3206 ndp->ni_vp = NULL; 3207 } 3208 if (!(flags & NDF_NO_STARTDIR_RELE) && 3209 (ndp->ni_cnd.cn_flags & SAVESTART)) { 3210 vrele(ndp->ni_startdir); 3211 ndp->ni_startdir = NULL; 3212 } 3213 } 3214 3215 /* 3216 * Common filesystem object access control check routine. Accepts a 3217 * vnode's type, "mode", uid and gid, requested access mode, credentials, 3218 * and optional call-by-reference privused argument allowing vaccess() 3219 * to indicate to the caller whether privilege was used to satisfy the 3220 * request. Returns 0 on success, or an errno on failure. 3221 */ 3222 int 3223 vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused) 3224 enum vtype type; 3225 mode_t file_mode; 3226 uid_t file_uid; 3227 gid_t file_gid; 3228 mode_t acc_mode; 3229 struct ucred *cred; 3230 int *privused; 3231 { 3232 mode_t dac_granted; 3233 #ifdef CAPABILITIES 3234 mode_t cap_granted; 3235 #endif 3236 3237 /* 3238 * Look for a normal, non-privileged way to access the file/directory 3239 * as requested. If it exists, go with that. 3240 */ 3241 3242 if (privused != NULL) 3243 *privused = 0; 3244 3245 dac_granted = 0; 3246 3247 /* Check the owner. */ 3248 if (cred->cr_uid == file_uid) { 3249 dac_granted |= VADMIN; 3250 if (file_mode & S_IXUSR) 3251 dac_granted |= VEXEC; 3252 if (file_mode & S_IRUSR) 3253 dac_granted |= VREAD; 3254 if (file_mode & S_IWUSR) 3255 dac_granted |= (VWRITE | VAPPEND); 3256 3257 if ((acc_mode & dac_granted) == acc_mode) 3258 return (0); 3259 3260 goto privcheck; 3261 } 3262 3263 /* Otherwise, check the groups (first match) */ 3264 if (groupmember(file_gid, cred)) { 3265 if (file_mode & S_IXGRP) 3266 dac_granted |= VEXEC; 3267 if (file_mode & S_IRGRP) 3268 dac_granted |= VREAD; 3269 if (file_mode & S_IWGRP) 3270 dac_granted |= (VWRITE | VAPPEND); 3271 3272 if ((acc_mode & dac_granted) == acc_mode) 3273 return (0); 3274 3275 goto privcheck; 3276 } 3277 3278 /* Otherwise, check everyone else. */ 3279 if (file_mode & S_IXOTH) 3280 dac_granted |= VEXEC; 3281 if (file_mode & S_IROTH) 3282 dac_granted |= VREAD; 3283 if (file_mode & S_IWOTH) 3284 dac_granted |= (VWRITE | VAPPEND); 3285 if ((acc_mode & dac_granted) == acc_mode) 3286 return (0); 3287 3288 privcheck: 3289 if (!suser_cred(cred, PRISON_ROOT)) { 3290 /* XXX audit: privilege used */ 3291 if (privused != NULL) 3292 *privused = 1; 3293 return (0); 3294 } 3295 3296 #ifdef CAPABILITIES 3297 /* 3298 * Build a capability mask to determine if the set of capabilities 3299 * satisfies the requirements when combined with the granted mask 3300 * from above. 3301 * For each capability, if the capability is required, bitwise 3302 * or the request type onto the cap_granted mask. 3303 */ 3304 cap_granted = 0; 3305 3306 if (type == VDIR) { 3307 /* 3308 * For directories, use CAP_DAC_READ_SEARCH to satisfy 3309 * VEXEC requests, instead of CAP_DAC_EXECUTE. 3310 */ 3311 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3312 !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT)) 3313 cap_granted |= VEXEC; 3314 } else { 3315 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) && 3316 !cap_check(cred, NULL, CAP_DAC_EXECUTE, PRISON_ROOT)) 3317 cap_granted |= VEXEC; 3318 } 3319 3320 if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) && 3321 !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT)) 3322 cap_granted |= VREAD; 3323 3324 if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) && 3325 !cap_check(cred, NULL, CAP_DAC_WRITE, PRISON_ROOT)) 3326 cap_granted |= (VWRITE | VAPPEND); 3327 3328 if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) && 3329 !cap_check(cred, NULL, CAP_FOWNER, PRISON_ROOT)) 3330 cap_granted |= VADMIN; 3331 3332 if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) { 3333 /* XXX audit: privilege used */ 3334 if (privused != NULL) 3335 *privused = 1; 3336 return (0); 3337 } 3338 #endif 3339 3340 return ((acc_mode & VADMIN) ? EPERM : EACCES); 3341 } 3342