vfs_syscalls.c (91921bd5976bc40a97f48b9a019b381b77ab6f85) | vfs_syscalls.c (0429e37ade0f33fd1159e425bfa5a5d531b3e8a1) |
---|---|
1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. --- 317 unchanged lines hidden (view full) --- 326 */ 327 cache_purge(vp); 328 if (!error) { 329 simple_lock(&vp->v_interlock); 330 vp->v_flag &= ~VMOUNT; 331 vp->v_mountedhere = mp; 332 simple_unlock(&vp->v_interlock); 333 simple_lock(&mountlist_slock); | 1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. --- 317 unchanged lines hidden (view full) --- 326 */ 327 cache_purge(vp); 328 if (!error) { 329 simple_lock(&vp->v_interlock); 330 vp->v_flag &= ~VMOUNT; 331 vp->v_mountedhere = mp; 332 simple_unlock(&vp->v_interlock); 333 simple_lock(&mountlist_slock); |
334 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list); | 334 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); |
335 simple_unlock(&mountlist_slock); 336 checkdirs(vp); 337 VOP_UNLOCK(vp, 0, p); 338 if ((mp->mnt_flag & MNT_RDONLY) == 0) 339 error = vfs_allocate_syncvnode(mp); 340 vfs_unbusy(mp, p); 341 if ((error = VFS_START(mp, 0, p)) != 0) 342 vrele(vp); --- 146 unchanged lines hidden (view full) --- 489 mp->mnt_kern_flag &= ~MNTK_UNMOUNT; 490 mp->mnt_flag |= async_flag; 491 lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK | LK_REENABLE, 492 &mountlist_slock, p); 493 if (mp->mnt_kern_flag & MNTK_MWAIT) 494 wakeup((caddr_t)mp); 495 return (error); 496 } | 335 simple_unlock(&mountlist_slock); 336 checkdirs(vp); 337 VOP_UNLOCK(vp, 0, p); 338 if ((mp->mnt_flag & MNT_RDONLY) == 0) 339 error = vfs_allocate_syncvnode(mp); 340 vfs_unbusy(mp, p); 341 if ((error = VFS_START(mp, 0, p)) != 0) 342 vrele(vp); --- 146 unchanged lines hidden (view full) --- 489 mp->mnt_kern_flag &= ~MNTK_UNMOUNT; 490 mp->mnt_flag |= async_flag; 491 lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK | LK_REENABLE, 492 &mountlist_slock, p); 493 if (mp->mnt_kern_flag & MNTK_MWAIT) 494 wakeup((caddr_t)mp); 495 return (error); 496 } |
497 CIRCLEQ_REMOVE(&mountlist, mp, mnt_list); | 497 TAILQ_REMOVE(&mountlist, mp, mnt_list); |
498 if ((coveredvp = mp->mnt_vnodecovered) != NULLVP) { 499 coveredvp->v_mountedhere = (struct mount *)0; 500 vrele(coveredvp); 501 } 502 mp->mnt_vfc->vfc_refcount--; 503 if (!LIST_EMPTY(&mp->mnt_vnodelist)) 504 panic("unmount: dangling vnode"); 505 lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_slock, p); --- 22 unchanged lines hidden (view full) --- 528sync(p, uap) 529 struct proc *p; 530 struct sync_args *uap; 531{ 532 register struct mount *mp, *nmp; 533 int asyncflag; 534 535 simple_lock(&mountlist_slock); | 498 if ((coveredvp = mp->mnt_vnodecovered) != NULLVP) { 499 coveredvp->v_mountedhere = (struct mount *)0; 500 vrele(coveredvp); 501 } 502 mp->mnt_vfc->vfc_refcount--; 503 if (!LIST_EMPTY(&mp->mnt_vnodelist)) 504 panic("unmount: dangling vnode"); 505 lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_slock, p); --- 22 unchanged lines hidden (view full) --- 528sync(p, uap) 529 struct proc *p; 530 struct sync_args *uap; 531{ 532 register struct mount *mp, *nmp; 533 int asyncflag; 534 535 simple_lock(&mountlist_slock); |
536 mp = CIRCLEQ_FIRST(&mountlist); 537 for (; mp != (void *)&mountlist; mp = nmp) { | 536 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { |
538 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { | 537 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { |
539 nmp = CIRCLEQ_NEXT(mp, mnt_list); | 538 nmp = TAILQ_NEXT(mp, mnt_list); |
540 continue; 541 } 542 if ((mp->mnt_flag & MNT_RDONLY) == 0) { 543 asyncflag = mp->mnt_flag & MNT_ASYNC; 544 mp->mnt_flag &= ~MNT_ASYNC; 545 vfs_msync(mp, MNT_NOWAIT); 546 VFS_SYNC(mp, MNT_NOWAIT, 547 ((p != NULL) ? p->p_ucred : NOCRED), p); 548 mp->mnt_flag |= asyncflag; 549 } 550 simple_lock(&mountlist_slock); | 539 continue; 540 } 541 if ((mp->mnt_flag & MNT_RDONLY) == 0) { 542 asyncflag = mp->mnt_flag & MNT_ASYNC; 543 mp->mnt_flag &= ~MNT_ASYNC; 544 vfs_msync(mp, MNT_NOWAIT); 545 VFS_SYNC(mp, MNT_NOWAIT, 546 ((p != NULL) ? p->p_ucred : NOCRED), p); 547 mp->mnt_flag |= asyncflag; 548 } 549 simple_lock(&mountlist_slock); |
551 nmp = CIRCLEQ_NEXT(mp, mnt_list); | 550 nmp = TAILQ_NEXT(mp, mnt_list); |
552 vfs_unbusy(mp, p); 553 } 554 simple_unlock(&mountlist_slock); 555#if 0 556/* 557 * XXX don't call vfs_bufstats() yet because that routine 558 * was not imported in the Lite2 merge. 559 */ --- 153 unchanged lines hidden (view full) --- 713 register struct statfs *sp; 714 caddr_t sfsp; 715 long count, maxcount, error; 716 717 maxcount = SCARG(uap, bufsize) / sizeof(struct statfs); 718 sfsp = (caddr_t)SCARG(uap, buf); 719 count = 0; 720 simple_lock(&mountlist_slock); | 551 vfs_unbusy(mp, p); 552 } 553 simple_unlock(&mountlist_slock); 554#if 0 555/* 556 * XXX don't call vfs_bufstats() yet because that routine 557 * was not imported in the Lite2 merge. 558 */ --- 153 unchanged lines hidden (view full) --- 712 register struct statfs *sp; 713 caddr_t sfsp; 714 long count, maxcount, error; 715 716 maxcount = SCARG(uap, bufsize) / sizeof(struct statfs); 717 sfsp = (caddr_t)SCARG(uap, buf); 718 count = 0; 719 simple_lock(&mountlist_slock); |
721 mp = CIRCLEQ_FIRST(&mountlist); 722 for (; mp != (void *)&mountlist; mp = nmp) { | 720 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { |
723 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { | 721 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { |
724 nmp = CIRCLEQ_NEXT(mp, mnt_list); | 722 nmp = TAILQ_NEXT(mp, mnt_list); |
725 continue; 726 } 727 if (sfsp && count < maxcount) { 728 sp = &mp->mnt_stat; 729 /* 730 * If MNT_NOWAIT or MNT_LAZY is specified, do not 731 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY 732 * overrides MNT_WAIT. 733 */ 734 if (((SCARG(uap, flags) & (MNT_LAZY|MNT_NOWAIT)) == 0 || 735 (SCARG(uap, flags) & MNT_WAIT)) && 736 (error = VFS_STATFS(mp, sp, p))) { 737 simple_lock(&mountlist_slock); | 723 continue; 724 } 725 if (sfsp && count < maxcount) { 726 sp = &mp->mnt_stat; 727 /* 728 * If MNT_NOWAIT or MNT_LAZY is specified, do not 729 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY 730 * overrides MNT_WAIT. 731 */ 732 if (((SCARG(uap, flags) & (MNT_LAZY|MNT_NOWAIT)) == 0 || 733 (SCARG(uap, flags) & MNT_WAIT)) && 734 (error = VFS_STATFS(mp, sp, p))) { 735 simple_lock(&mountlist_slock); |
738 nmp = CIRCLEQ_NEXT(mp, mnt_list); | 736 nmp = TAILQ_NEXT(mp, mnt_list); |
739 vfs_unbusy(mp, p); 740 continue; 741 } 742 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 743 error = copyout((caddr_t)sp, sfsp, sizeof(*sp)); 744 if (error) { 745 vfs_unbusy(mp, p); 746 return (error); 747 } 748 sfsp += sizeof(*sp); 749 } 750 count++; 751 simple_lock(&mountlist_slock); | 737 vfs_unbusy(mp, p); 738 continue; 739 } 740 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 741 error = copyout((caddr_t)sp, sfsp, sizeof(*sp)); 742 if (error) { 743 vfs_unbusy(mp, p); 744 return (error); 745 } 746 sfsp += sizeof(*sp); 747 } 748 count++; 749 simple_lock(&mountlist_slock); |
752 nmp = CIRCLEQ_NEXT(mp, mnt_list); | 750 nmp = TAILQ_NEXT(mp, mnt_list); |
753 vfs_unbusy(mp, p); 754 } 755 simple_unlock(&mountlist_slock); 756 if (sfsp && count > maxcount) 757 p->p_retval[0] = maxcount; 758 else 759 p->p_retval[0] = count; 760 return (0); --- 2580 unchanged lines hidden --- | 751 vfs_unbusy(mp, p); 752 } 753 simple_unlock(&mountlist_slock); 754 if (sfsp && count > maxcount) 755 p->p_retval[0] = maxcount; 756 else 757 p->p_retval[0] = count; 758 return (0); --- 2580 unchanged lines hidden --- |