vfs_subr.c (0359a12eadcd66a9298e283fc5f3c90a9393322a) vfs_subr.c (59d4932531aa4e97f68b2d42290a68cae6b17375)
1/*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.

--- 324 unchanged lines hidden (view full) ---

333SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL);
334
335
336/*
337 * Mark a mount point as busy. Used to synchronize access and to delay
338 * unmounting. Interlock is not released on failure.
339 */
340int
1/*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.

--- 324 unchanged lines hidden (view full) ---

333SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL);
334
335
336/*
337 * Mark a mount point as busy. Used to synchronize access and to delay
338 * unmounting. Interlock is not released on failure.
339 */
340int
341vfs_busy(struct mount *mp, int flags, struct mtx *interlkp,
342 struct thread *td)
341vfs_busy(struct mount *mp, int flags, struct mtx *interlkp)
343{
344 int lkflags;
345
346 MNT_ILOCK(mp);
347 MNT_REF(mp);
348 if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
349 if (flags & LK_NOWAIT) {
350 MNT_REL(mp);

--- 23 unchanged lines hidden (view full) ---

374 panic("vfs_busy: unexpected lock failure");
375 return (0);
376}
377
378/*
379 * Free a busy filesystem.
380 */
381void
342{
343 int lkflags;
344
345 MNT_ILOCK(mp);
346 MNT_REF(mp);
347 if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
348 if (flags & LK_NOWAIT) {
349 MNT_REL(mp);

--- 23 unchanged lines hidden (view full) ---

373 panic("vfs_busy: unexpected lock failure");
374 return (0);
375}
376
377/*
378 * Free a busy filesystem.
379 */
380void
382vfs_unbusy(struct mount *mp, struct thread *td)
381vfs_unbusy(struct mount *mp)
383{
384
385 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL);
386 vfs_rel(mp);
387}
388
389/*
390 * Lookup a mount point by filesystem identifier.

--- 177 unchanged lines hidden (view full) ---

568 * underlying files, or the vnode may be in active use. It is not
569 * desireable to reuse such vnodes. These conditions may cause the
570 * number of vnodes to reach some minimum value regardless of what
571 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
572 */
573static int
574vlrureclaim(struct mount *mp)
575{
382{
383
384 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL);
385 vfs_rel(mp);
386}
387
388/*
389 * Lookup a mount point by filesystem identifier.

--- 177 unchanged lines hidden (view full) ---

567 * underlying files, or the vnode may be in active use. It is not
568 * desireable to reuse such vnodes. These conditions may cause the
569 * number of vnodes to reach some minimum value regardless of what
570 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
571 */
572static int
573vlrureclaim(struct mount *mp)
574{
576 struct thread *td;
577 struct vnode *vp;
578 int done;
579 int trigger;
580 int usevnodes;
581 int count;
582
583 /*
584 * Calculate the trigger point, don't allow user
585 * screwups to blow us up. This prevents us from
586 * recycling vnodes with lots of resident pages. We
587 * aren't trying to free memory, we are trying to
588 * free vnodes.
589 */
590 usevnodes = desiredvnodes;
591 if (usevnodes <= 0)
592 usevnodes = 1;
593 trigger = cnt.v_page_count * 2 / usevnodes;
594 done = 0;
575 struct vnode *vp;
576 int done;
577 int trigger;
578 int usevnodes;
579 int count;
580
581 /*
582 * Calculate the trigger point, don't allow user
583 * screwups to blow us up. This prevents us from
584 * recycling vnodes with lots of resident pages. We
585 * aren't trying to free memory, we are trying to
586 * free vnodes.
587 */
588 usevnodes = desiredvnodes;
589 if (usevnodes <= 0)
590 usevnodes = 1;
591 trigger = cnt.v_page_count * 2 / usevnodes;
592 done = 0;
595 td = curthread;
596 vn_start_write(NULL, &mp, V_WAIT);
597 MNT_ILOCK(mp);
598 count = mp->mnt_nvnodelistsize / 10 + 1;
599 while (count != 0) {
600 vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
601 while (vp != NULL && vp->v_type == VMARKER)
602 vp = TAILQ_NEXT(vp, v_nmntvnodes);
603 if (vp == NULL)

--- 118 unchanged lines hidden (view full) ---

722static int vnlruproc_sig;
723
724static void
725vnlru_proc(void)
726{
727 struct mount *mp, *nmp;
728 int done;
729 struct proc *p = vnlruproc;
593 vn_start_write(NULL, &mp, V_WAIT);
594 MNT_ILOCK(mp);
595 count = mp->mnt_nvnodelistsize / 10 + 1;
596 while (count != 0) {
597 vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
598 while (vp != NULL && vp->v_type == VMARKER)
599 vp = TAILQ_NEXT(vp, v_nmntvnodes);
600 if (vp == NULL)

--- 118 unchanged lines hidden (view full) ---

719static int vnlruproc_sig;
720
721static void
722vnlru_proc(void)
723{
724 struct mount *mp, *nmp;
725 int done;
726 struct proc *p = vnlruproc;
730 struct thread *td = curthread;
731
732 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
733 SHUTDOWN_PRI_FIRST);
734
735 mtx_lock(&Giant);
736
737 for (;;) {
738 kproc_suspend_check(p);

--- 7 unchanged lines hidden (view full) ---

746 PVFS|PDROP, "vlruwt", hz);
747 continue;
748 }
749 mtx_unlock(&vnode_free_list_mtx);
750 done = 0;
751 mtx_lock(&mountlist_mtx);
752 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
753 int vfsunlocked;
727
728 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
729 SHUTDOWN_PRI_FIRST);
730
731 mtx_lock(&Giant);
732
733 for (;;) {
734 kproc_suspend_check(p);

--- 7 unchanged lines hidden (view full) ---

742 PVFS|PDROP, "vlruwt", hz);
743 continue;
744 }
745 mtx_unlock(&vnode_free_list_mtx);
746 done = 0;
747 mtx_lock(&mountlist_mtx);
748 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
749 int vfsunlocked;
754 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
750 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx)) {
755 nmp = TAILQ_NEXT(mp, mnt_list);
756 continue;
757 }
758 if (!VFS_NEEDSGIANT(mp)) {
759 mtx_unlock(&Giant);
760 vfsunlocked = 1;
761 } else
762 vfsunlocked = 0;
763 done += vlrureclaim(mp);
764 if (vfsunlocked)
765 mtx_lock(&Giant);
766 mtx_lock(&mountlist_mtx);
767 nmp = TAILQ_NEXT(mp, mnt_list);
751 nmp = TAILQ_NEXT(mp, mnt_list);
752 continue;
753 }
754 if (!VFS_NEEDSGIANT(mp)) {
755 mtx_unlock(&Giant);
756 vfsunlocked = 1;
757 } else
758 vfsunlocked = 0;
759 done += vlrureclaim(mp);
760 if (vfsunlocked)
761 mtx_lock(&Giant);
762 mtx_lock(&mountlist_mtx);
763 nmp = TAILQ_NEXT(mp, mnt_list);
768 vfs_unbusy(mp, td);
764 vfs_unbusy(mp);
769 }
770 mtx_unlock(&mountlist_mtx);
771 if (done == 0) {
772 EVENTHANDLER_INVOKE(vfs_lowvnodes, desiredvnodes / 10);
773#if 0
774 /* These messages are temporary debugging aids */
775 if (vnlru_nowhere < 5)
776 printf("vnlru process getting nowhere..\n");

--- 2206 unchanged lines hidden (view full) ---

2983/*
2984 * Dump vnode list (via sysctl).
2985 */
2986/* ARGSUSED */
2987static int
2988sysctl_vnode(SYSCTL_HANDLER_ARGS)
2989{
2990 struct xvnode *xvn;
765 }
766 mtx_unlock(&mountlist_mtx);
767 if (done == 0) {
768 EVENTHANDLER_INVOKE(vfs_lowvnodes, desiredvnodes / 10);
769#if 0
770 /* These messages are temporary debugging aids */
771 if (vnlru_nowhere < 5)
772 printf("vnlru process getting nowhere..\n");

--- 2206 unchanged lines hidden (view full) ---

2979/*
2980 * Dump vnode list (via sysctl).
2981 */
2982/* ARGSUSED */
2983static int
2984sysctl_vnode(SYSCTL_HANDLER_ARGS)
2985{
2986 struct xvnode *xvn;
2991 struct thread *td = req->td;
2992 struct mount *mp;
2993 struct vnode *vp;
2994 int error, len, n;
2995
2996 /*
2997 * Stale numvnodes access is not fatal here.
2998 */
2999 req->lock = 0;

--- 4 unchanged lines hidden (view full) ---

3004
3005 error = sysctl_wire_old_buffer(req, 0);
3006 if (error != 0)
3007 return (error);
3008 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
3009 n = 0;
3010 mtx_lock(&mountlist_mtx);
3011 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2987 struct mount *mp;
2988 struct vnode *vp;
2989 int error, len, n;
2990
2991 /*
2992 * Stale numvnodes access is not fatal here.
2993 */
2994 req->lock = 0;

--- 4 unchanged lines hidden (view full) ---

2999
3000 error = sysctl_wire_old_buffer(req, 0);
3001 if (error != 0)
3002 return (error);
3003 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
3004 n = 0;
3005 mtx_lock(&mountlist_mtx);
3006 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3012 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
3007 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx))
3013 continue;
3014 MNT_ILOCK(mp);
3015 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3016 if (n == len)
3017 break;
3018 vref(vp);
3019 xvn[n].xv_size = sizeof *xvn;
3020 xvn[n].xv_vnode = vp;

--- 34 unchanged lines hidden (view full) ---

3055 vrele(vp);
3056 continue;
3057 }
3058 vrele(vp);
3059 ++n;
3060 }
3061 MNT_IUNLOCK(mp);
3062 mtx_lock(&mountlist_mtx);
3008 continue;
3009 MNT_ILOCK(mp);
3010 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3011 if (n == len)
3012 break;
3013 vref(vp);
3014 xvn[n].xv_size = sizeof *xvn;
3015 xvn[n].xv_vnode = vp;

--- 34 unchanged lines hidden (view full) ---

3050 vrele(vp);
3051 continue;
3052 }
3053 vrele(vp);
3054 ++n;
3055 }
3056 MNT_IUNLOCK(mp);
3057 mtx_lock(&mountlist_mtx);
3063 vfs_unbusy(mp, td);
3058 vfs_unbusy(mp);
3064 if (n == len)
3065 break;
3066 }
3067 mtx_unlock(&mountlist_mtx);
3068
3069 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
3070 free(xvn, M_TEMP);
3071 return (error);

--- 260 unchanged lines hidden (view full) ---

3332/*
3333 * Do a lazy sync of the filesystem.
3334 */
3335static int
3336sync_fsync(struct vop_fsync_args *ap)
3337{
3338 struct vnode *syncvp = ap->a_vp;
3339 struct mount *mp = syncvp->v_mount;
3059 if (n == len)
3060 break;
3061 }
3062 mtx_unlock(&mountlist_mtx);
3063
3064 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
3065 free(xvn, M_TEMP);
3066 return (error);

--- 260 unchanged lines hidden (view full) ---

3327/*
3328 * Do a lazy sync of the filesystem.
3329 */
3330static int
3331sync_fsync(struct vop_fsync_args *ap)
3332{
3333 struct vnode *syncvp = ap->a_vp;
3334 struct mount *mp = syncvp->v_mount;
3340 struct thread *td = ap->a_td;
3341 int error;
3342 struct bufobj *bo;
3343
3344 /*
3345 * We only need to do something if this is a lazy evaluation.
3346 */
3347 if (ap->a_waitfor != MNT_LAZY)
3348 return (0);

--- 6 unchanged lines hidden (view full) ---

3355 vn_syncer_add_to_worklist(bo, syncdelay);
3356 BO_UNLOCK(bo);
3357
3358 /*
3359 * Walk the list of vnodes pushing all that are dirty and
3360 * not already on the sync list.
3361 */
3362 mtx_lock(&mountlist_mtx);
3335 int error;
3336 struct bufobj *bo;
3337
3338 /*
3339 * We only need to do something if this is a lazy evaluation.
3340 */
3341 if (ap->a_waitfor != MNT_LAZY)
3342 return (0);

--- 6 unchanged lines hidden (view full) ---

3349 vn_syncer_add_to_worklist(bo, syncdelay);
3350 BO_UNLOCK(bo);
3351
3352 /*
3353 * Walk the list of vnodes pushing all that are dirty and
3354 * not already on the sync list.
3355 */
3356 mtx_lock(&mountlist_mtx);
3363 if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) {
3357 if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx) != 0) {
3364 mtx_unlock(&mountlist_mtx);
3365 return (0);
3366 }
3367 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
3358 mtx_unlock(&mountlist_mtx);
3359 return (0);
3360 }
3361 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
3368 vfs_unbusy(mp, td);
3362 vfs_unbusy(mp);
3369 return (0);
3370 }
3371 MNT_ILOCK(mp);
3372 mp->mnt_noasync++;
3373 mp->mnt_kern_flag &= ~MNTK_ASYNC;
3374 MNT_IUNLOCK(mp);
3375 vfs_msync(mp, MNT_NOWAIT);
3363 return (0);
3364 }
3365 MNT_ILOCK(mp);
3366 mp->mnt_noasync++;
3367 mp->mnt_kern_flag &= ~MNTK_ASYNC;
3368 MNT_IUNLOCK(mp);
3369 vfs_msync(mp, MNT_NOWAIT);
3376 error = VFS_SYNC(mp, MNT_LAZY, td);
3370 error = VFS_SYNC(mp, MNT_LAZY, ap->a_td);
3377 MNT_ILOCK(mp);
3378 mp->mnt_noasync--;
3379 if ((mp->mnt_flag & MNT_ASYNC) != 0 && mp->mnt_noasync == 0)
3380 mp->mnt_kern_flag |= MNTK_ASYNC;
3381 MNT_IUNLOCK(mp);
3382 vn_finished_write(mp);
3371 MNT_ILOCK(mp);
3372 mp->mnt_noasync--;
3373 if ((mp->mnt_flag & MNT_ASYNC) != 0 && mp->mnt_noasync == 0)
3374 mp->mnt_kern_flag |= MNTK_ASYNC;
3375 MNT_IUNLOCK(mp);
3376 vn_finished_write(mp);
3383 vfs_unbusy(mp, td);
3377 vfs_unbusy(mp);
3384 return (error);
3385}
3386
3387/*
3388 * The syncer vnode is no referenced.
3389 */
3390static int
3391sync_inactive(struct vop_inactive_args *ap)

--- 802 unchanged lines hidden ---
3378 return (error);
3379}
3380
3381/*
3382 * The syncer vnode is no referenced.
3383 */
3384static int
3385sync_inactive(struct vop_inactive_args *ap)

--- 802 unchanged lines hidden ---