Lines Matching full:mp

134 static void mount_devctl_event(const char *type, struct mount *mp, bool donew);
153 struct mount *mp; in mount_init() local
155 mp = (struct mount *)mem; in mount_init()
156 mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF); in mount_init()
157 mtx_init(&mp->mnt_listmtx, "struct mount vlist mtx", NULL, MTX_DEF); in mount_init()
158 lockinit(&mp->mnt_explock, PVFS, "explock", 0, 0); in mount_init()
159 lockinit(&mp->mnt_renamelock, PVFS, "rename", 0, 0); in mount_init()
160 mp->mnt_pcpu = uma_zalloc_pcpu(pcpu_zone_16, M_WAITOK | M_ZERO); in mount_init()
161 mp->mnt_ref = 0; in mount_init()
162 mp->mnt_vfs_ops = 1; in mount_init()
163 mp->mnt_rootvnode = NULL; in mount_init()
170 struct mount *mp; in mount_fini() local
172 mp = (struct mount *)mem; in mount_fini()
173 uma_zfree_pcpu(pcpu_zone_16, mp->mnt_pcpu); in mount_fini()
174 lockdestroy(&mp->mnt_renamelock); in mount_fini()
175 lockdestroy(&mp->mnt_explock); in mount_fini()
176 mtx_destroy(&mp->mnt_listmtx); in mount_fini()
177 mtx_destroy(&mp->mnt_mtx); in mount_fini()
496 struct mount *mp; in vfs_ref_from_vp() local
499 mp = atomic_load_ptr(&vp->v_mount); in vfs_ref_from_vp()
500 if (__predict_false(mp == NULL)) { in vfs_ref_from_vp()
501 return (mp); in vfs_ref_from_vp()
503 if (vfs_op_thread_enter(mp, mpcpu)) { in vfs_ref_from_vp()
504 if (__predict_true(mp == vp->v_mount)) { in vfs_ref_from_vp()
506 vfs_op_thread_exit(mp, mpcpu); in vfs_ref_from_vp()
508 vfs_op_thread_exit(mp, mpcpu); in vfs_ref_from_vp()
509 mp = NULL; in vfs_ref_from_vp()
512 MNT_ILOCK(mp); in vfs_ref_from_vp()
513 if (mp == vp->v_mount) { in vfs_ref_from_vp()
514 MNT_REF(mp); in vfs_ref_from_vp()
515 MNT_IUNLOCK(mp); in vfs_ref_from_vp()
517 MNT_IUNLOCK(mp); in vfs_ref_from_vp()
518 mp = NULL; in vfs_ref_from_vp()
521 return (mp); in vfs_ref_from_vp()
525 vfs_ref(struct mount *mp) in vfs_ref() argument
529 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); in vfs_ref()
530 if (vfs_op_thread_enter(mp, mpcpu)) { in vfs_ref()
532 vfs_op_thread_exit(mp, mpcpu); in vfs_ref()
536 MNT_ILOCK(mp); in vfs_ref()
537 MNT_REF(mp); in vfs_ref()
538 MNT_IUNLOCK(mp); in vfs_ref()
545 * caller and stored in per-mount data associated with mp.
557 struct mount *mp; in vfs_register_upper_from_vp() local
559 mp = atomic_load_ptr(&vp->v_mount); in vfs_register_upper_from_vp()
560 if (mp == NULL) in vfs_register_upper_from_vp()
562 MNT_ILOCK(mp); in vfs_register_upper_from_vp()
563 if (mp != vp->v_mount || in vfs_register_upper_from_vp()
564 ((mp->mnt_kern_flag & (MNTK_UNMOUNT | MNTK_RECURSE)) != 0)) { in vfs_register_upper_from_vp()
565 MNT_IUNLOCK(mp); in vfs_register_upper_from_vp()
568 KASSERT(ump != mp, ("upper and lower mounts are identical")); in vfs_register_upper_from_vp()
569 upper->mp = ump; in vfs_register_upper_from_vp()
570 MNT_REF(mp); in vfs_register_upper_from_vp()
571 TAILQ_INSERT_TAIL(&mp->mnt_uppers, upper, mnt_upper_link); in vfs_register_upper_from_vp()
572 MNT_IUNLOCK(mp); in vfs_register_upper_from_vp()
573 return (mp); in vfs_register_upper_from_vp()
578 * notifications from lower mount mp. This registration will
581 * associated with mp.
583 * ump must already be registered as an upper mount of mp
587 vfs_register_for_notification(struct mount *mp, struct mount *ump, in vfs_register_for_notification() argument
590 upper->mp = ump; in vfs_register_for_notification()
591 MNT_ILOCK(mp); in vfs_register_for_notification()
592 TAILQ_INSERT_TAIL(&mp->mnt_notify, upper, mnt_upper_link); in vfs_register_for_notification()
593 MNT_IUNLOCK(mp); in vfs_register_for_notification()
597 vfs_drain_upper_locked(struct mount *mp) in vfs_drain_upper_locked() argument
599 mtx_assert(MNT_MTX(mp), MA_OWNED); in vfs_drain_upper_locked()
600 while (mp->mnt_upper_pending != 0) { in vfs_drain_upper_locked()
601 mp->mnt_kern_flag |= MNTK_UPPER_WAITER; in vfs_drain_upper_locked()
602 msleep(&mp->mnt_uppers, MNT_MTX(mp), 0, "mntupw", 0); in vfs_drain_upper_locked()
609 * as an upper mount for mp.
612 vfs_unregister_for_notification(struct mount *mp, in vfs_unregister_for_notification() argument
615 MNT_ILOCK(mp); in vfs_unregister_for_notification()
616 vfs_drain_upper_locked(mp); in vfs_unregister_for_notification()
617 TAILQ_REMOVE(&mp->mnt_notify, upper, mnt_upper_link); in vfs_unregister_for_notification()
618 MNT_IUNLOCK(mp); in vfs_unregister_for_notification()
623 * This must be done before mp can be unmounted.
626 vfs_unregister_upper(struct mount *mp, struct mount_upper_node *upper) in vfs_unregister_upper() argument
628 MNT_ILOCK(mp); in vfs_unregister_upper()
629 KASSERT((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0, in vfs_unregister_upper()
631 vfs_drain_upper_locked(mp); in vfs_unregister_upper()
632 TAILQ_REMOVE(&mp->mnt_uppers, upper, mnt_upper_link); in vfs_unregister_upper()
633 if ((mp->mnt_kern_flag & MNTK_TASKQUEUE_WAITER) != 0 && in vfs_unregister_upper()
634 TAILQ_EMPTY(&mp->mnt_uppers)) { in vfs_unregister_upper()
635 mp->mnt_kern_flag &= ~MNTK_TASKQUEUE_WAITER; in vfs_unregister_upper()
636 wakeup(&mp->mnt_taskqueue_link); in vfs_unregister_upper()
638 MNT_REL(mp); in vfs_unregister_upper()
639 MNT_IUNLOCK(mp); in vfs_unregister_upper()
643 vfs_rel(struct mount *mp) in vfs_rel() argument
647 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); in vfs_rel()
648 if (vfs_op_thread_enter(mp, mpcpu)) { in vfs_rel()
650 vfs_op_thread_exit(mp, mpcpu); in vfs_rel()
654 MNT_ILOCK(mp); in vfs_rel()
655 MNT_REL(mp); in vfs_rel()
656 MNT_IUNLOCK(mp); in vfs_rel()
666 struct mount *mp; in vfs_mount_alloc() local
668 mp = uma_zalloc(mount_zone, M_WAITOK); in vfs_mount_alloc()
669 bzero(&mp->mnt_startzero, in vfs_mount_alloc()
671 mp->mnt_kern_flag = 0; in vfs_mount_alloc()
672 mp->mnt_flag = 0; in vfs_mount_alloc()
673 mp->mnt_rootvnode = NULL; in vfs_mount_alloc()
674 mp->mnt_vnodecovered = NULL; in vfs_mount_alloc()
675 mp->mnt_op = NULL; in vfs_mount_alloc()
676 mp->mnt_vfc = NULL; in vfs_mount_alloc()
677 TAILQ_INIT(&mp->mnt_nvnodelist); in vfs_mount_alloc()
678 mp->mnt_nvnodelistsize = 0; in vfs_mount_alloc()
679 TAILQ_INIT(&mp->mnt_lazyvnodelist); in vfs_mount_alloc()
680 mp->mnt_lazyvnodelistsize = 0; in vfs_mount_alloc()
681 MPPASS(mp->mnt_ref == 0 && mp->mnt_lockref == 0 && in vfs_mount_alloc()
682 mp->mnt_writeopcount == 0, mp); in vfs_mount_alloc()
683 MPASSERT(mp->mnt_vfs_ops == 1, mp, in vfs_mount_alloc()
684 ("vfs_ops should be 1 but %d found", mp->mnt_vfs_ops)); in vfs_mount_alloc()
685 (void) vfs_busy(mp, MBF_NOWAIT); in vfs_mount_alloc()
686 mp->mnt_op = vfsp->vfc_vfsops; in vfs_mount_alloc()
687 mp->mnt_vfc = vfsp; in vfs_mount_alloc()
688 mp->mnt_stat.f_type = vfsp->vfc_typenum; in vfs_mount_alloc()
689 mp->mnt_gen++; in vfs_mount_alloc()
690 strlcpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); in vfs_mount_alloc()
691 mp->mnt_vnodecovered = vp; in vfs_mount_alloc()
692 mp->mnt_cred = crdup(cred); in vfs_mount_alloc()
693 mp->mnt_stat.f_owner = cred->cr_uid; in vfs_mount_alloc()
694 strlcpy(mp->mnt_stat.f_mntonname, fspath, MNAMELEN); in vfs_mount_alloc()
695 mp->mnt_iosize_max = DFLTPHYS; in vfs_mount_alloc()
697 mac_mount_init(mp); in vfs_mount_alloc()
698 mac_mount_create(cred, mp); in vfs_mount_alloc()
700 arc4rand(&mp->mnt_hashseed, sizeof mp->mnt_hashseed, 0); in vfs_mount_alloc()
701 mp->mnt_upper_pending = 0; in vfs_mount_alloc()
702 TAILQ_INIT(&mp->mnt_uppers); in vfs_mount_alloc()
703 TAILQ_INIT(&mp->mnt_notify); in vfs_mount_alloc()
704 mp->mnt_taskqueue_flags = 0; in vfs_mount_alloc()
705 mp->mnt_unmount_retries = 0; in vfs_mount_alloc()
706 return (mp); in vfs_mount_alloc()
713 vfs_mount_destroy(struct mount *mp) in vfs_mount_destroy() argument
716 MPPASS(mp->mnt_vfs_ops != 0, mp); in vfs_mount_destroy()
718 vfs_assert_mount_counters(mp); in vfs_mount_destroy()
720 MNT_ILOCK(mp); in vfs_mount_destroy()
721 mp->mnt_kern_flag |= MNTK_REFEXPIRE; in vfs_mount_destroy()
722 if (mp->mnt_kern_flag & MNTK_MWAIT) { in vfs_mount_destroy()
723 mp->mnt_kern_flag &= ~MNTK_MWAIT; in vfs_mount_destroy()
724 wakeup(mp); in vfs_mount_destroy()
726 while (mp->mnt_ref) in vfs_mount_destroy()
727 msleep(mp, MNT_MTX(mp), PVFS, "mntref", 0); in vfs_mount_destroy()
728 KASSERT(mp->mnt_ref == 0, in vfs_mount_destroy()
731 MPPASS(mp->mnt_writeopcount == 0, mp); in vfs_mount_destroy()
732 MPPASS(mp->mnt_secondary_writes == 0, mp); in vfs_mount_destroy()
733 if (!TAILQ_EMPTY(&mp->mnt_nvnodelist)) { in vfs_mount_destroy()
736 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) in vfs_mount_destroy()
740 KASSERT(mp->mnt_upper_pending == 0, ("mnt_upper_pending")); in vfs_mount_destroy()
741 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), ("mnt_uppers")); in vfs_mount_destroy()
742 KASSERT(TAILQ_EMPTY(&mp->mnt_notify), ("mnt_notify")); in vfs_mount_destroy()
743 MPPASS(mp->mnt_nvnodelistsize == 0, mp); in vfs_mount_destroy()
744 MPPASS(mp->mnt_lazyvnodelistsize == 0, mp); in vfs_mount_destroy()
745 MPPASS(mp->mnt_lockref == 0, mp); in vfs_mount_destroy()
746 MNT_IUNLOCK(mp); in vfs_mount_destroy()
748 MPASSERT(mp->mnt_vfs_ops == 1, mp, in vfs_mount_destroy()
749 ("vfs_ops should be 1 but %d found", mp->mnt_vfs_ops)); in vfs_mount_destroy()
751 MPASSERT(mp->mnt_rootvnode == NULL, mp, in vfs_mount_destroy()
752 ("mount point still has a root vnode %p", mp->mnt_rootvnode)); in vfs_mount_destroy()
754 if (mp->mnt_vnodecovered != NULL) in vfs_mount_destroy()
755 vrele(mp->mnt_vnodecovered); in vfs_mount_destroy()
757 mac_mount_destroy(mp); in vfs_mount_destroy()
759 if (mp->mnt_opt != NULL) in vfs_mount_destroy()
760 vfs_freeopts(mp->mnt_opt); in vfs_mount_destroy()
761 if (mp->mnt_exjail != NULL) { in vfs_mount_destroy()
762 atomic_subtract_int(&mp->mnt_exjail->cr_prison->pr_exportcnt, in vfs_mount_destroy()
764 crfree(mp->mnt_exjail); in vfs_mount_destroy()
766 if (mp->mnt_export != NULL) { in vfs_mount_destroy()
767 vfs_free_addrlist(mp->mnt_export); in vfs_mount_destroy()
768 free(mp->mnt_export, M_MOUNT); in vfs_mount_destroy()
771 mp->mnt_vfc->vfc_refcount--; in vfs_mount_destroy()
773 crfree(mp->mnt_cred); in vfs_mount_destroy()
774 uma_zfree(mount_zone, mp); in vfs_mount_destroy()
983 * variables will fit in our mp buffers, including the in vfs_donmount()
1122 struct mount *mp; in vfs_domount_first() local
1181 mp = vfs_mount_alloc(vp, vfsp, fspath, td->td_ucred); in vfs_domount_first()
1183 mp->mnt_optnew = *optlist; in vfs_domount_first()
1185 mp->mnt_flag = (fsflags & in vfs_domount_first()
1195 if ((error = VFS_MOUNT(mp)) != 0 || in vfs_domount_first()
1196 (error1 = VFS_STATFS(mp, &mp->mnt_stat)) != 0 || in vfs_domount_first()
1197 (error1 = VFS_ROOT(mp, LK_EXCLUSIVE, &newdp)) != 0) { in vfs_domount_first()
1201 rootvp = vfs_cache_root_clear(mp); in vfs_domount_first()
1206 (void)vn_start_write(NULL, &mp, V_WAIT); in vfs_domount_first()
1207 MNT_ILOCK(mp); in vfs_domount_first()
1208 mp->mnt_kern_flag |= MNTK_UNMOUNT | MNTK_UNMOUNTF; in vfs_domount_first()
1209 MNT_IUNLOCK(mp); in vfs_domount_first()
1210 VFS_PURGE(mp); in vfs_domount_first()
1211 error = VFS_UNMOUNT(mp, 0); in vfs_domount_first()
1212 vn_finished_write(mp); in vfs_domount_first()
1221 vfs_unbusy(mp); in vfs_domount_first()
1222 mp->mnt_vnodecovered = NULL; in vfs_domount_first()
1225 vfs_mount_destroy(mp); in vfs_domount_first()
1241 if (mp->mnt_opt != NULL) in vfs_domount_first()
1242 vfs_freeopts(mp->mnt_opt); in vfs_domount_first()
1243 mp->mnt_opt = mp->mnt_optnew; in vfs_domount_first()
1249 mp->mnt_optnew = NULL; in vfs_domount_first()
1251 MNT_ILOCK(mp); in vfs_domount_first()
1252 if ((mp->mnt_flag & MNT_ASYNC) != 0 && in vfs_domount_first()
1253 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0) in vfs_domount_first()
1254 mp->mnt_kern_flag |= MNTK_ASYNC; in vfs_domount_first()
1256 mp->mnt_kern_flag &= ~MNTK_ASYNC; in vfs_domount_first()
1257 MNT_IUNLOCK(mp); in vfs_domount_first()
1266 vp->v_mountedhere = mp; in vfs_domount_first()
1284 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); in vfs_domount_first()
1288 EVENTHANDLER_DIRECT_INVOKE(vfs_mounted, mp, newdp, td); in vfs_domount_first()
1290 mount_devctl_event("MOUNT", mp, false); in vfs_domount_first()
1295 if ((mp->mnt_flag & MNT_RDONLY) == 0) in vfs_domount_first()
1296 vfs_allocate_syncvnode(mp); in vfs_domount_first()
1297 vfs_op_exit(mp); in vfs_domount_first()
1298 vfs_unbusy(mp); in vfs_domount_first()
1318 struct mount *mp; in vfs_domount_update() local
1327 mp = vp->v_mount; in vfs_domount_update()
1343 flag = mp->mnt_flag; in vfs_domount_update()
1361 error = vfs_suser(mp, td); in vfs_domount_update()
1370 if (vfs_busy(mp, MBF_NOWAIT)) { in vfs_domount_update()
1377 vfs_unbusy(mp); in vfs_domount_update()
1386 vfs_op_enter(mp); in vfs_domount_update()
1395 if (fsidcmp(fsid_up, &mp->mnt_stat.f_fsid) != 0) { in vfs_domount_update()
1403 MNT_ILOCK(mp); in vfs_domount_update()
1404 if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) { in vfs_domount_update()
1405 MNT_IUNLOCK(mp); in vfs_domount_update()
1421 mp->mnt_flag |= MNT_UPDATE; in vfs_domount_update()
1423 mp->mnt_flag &= ~MNT_UPDATEMASK; in vfs_domount_update()
1424 if ((mp->mnt_flag & MNT_UNION) == 0 && in vfs_domount_update()
1429 mp->mnt_flag |= fsflags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE | in vfs_domount_update()
1431 if ((mp->mnt_flag & MNT_ASYNC) == 0) in vfs_domount_update()
1432 mp->mnt_kern_flag &= ~MNTK_ASYNC; in vfs_domount_update()
1434 rootvp = vfs_cache_root_clear(mp); in vfs_domount_update()
1435 MNT_IUNLOCK(mp); in vfs_domount_update()
1436 mp->mnt_optnew = *optlist; in vfs_domount_update()
1437 vfs_mergeopts(mp->mnt_optnew, mp->mnt_opt); in vfs_domount_update()
1454 error = VFS_MOUNT(mp); in vfs_domount_update()
1458 if (error == 0 && vfs_getopt(mp->mnt_optnew, "export", &bufp, in vfs_domount_update()
1497 export_error = vfs_export(mp, &export, true); in vfs_domount_update()
1519 export_error = vfs_export(mp, &export, true); in vfs_domount_update()
1528 MNT_ILOCK(mp); in vfs_domount_update()
1530 mp->mnt_flag &= ~(MNT_UPDATE | MNT_RELOAD | MNT_FORCE | in vfs_domount_update()
1532 mp->mnt_flag |= mnt_union; in vfs_domount_update()
1541 mp->mnt_flag = (mp->mnt_flag & MNT_QUOTA) | (flag & ~MNT_QUOTA); in vfs_domount_update()
1543 if ((mp->mnt_flag & MNT_ASYNC) != 0 && in vfs_domount_update()
1544 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0) in vfs_domount_update()
1545 mp->mnt_kern_flag |= MNTK_ASYNC; in vfs_domount_update()
1547 mp->mnt_kern_flag &= ~MNTK_ASYNC; in vfs_domount_update()
1548 MNT_IUNLOCK(mp); in vfs_domount_update()
1553 mount_devctl_event("REMOUNT", mp, true); in vfs_domount_update()
1554 if (mp->mnt_opt != NULL) in vfs_domount_update()
1555 vfs_freeopts(mp->mnt_opt); in vfs_domount_update()
1556 mp->mnt_opt = mp->mnt_optnew; in vfs_domount_update()
1558 (void)VFS_STATFS(mp, &mp->mnt_stat); in vfs_domount_update()
1563 mp->mnt_optnew = NULL; in vfs_domount_update()
1565 if ((mp->mnt_flag & MNT_RDONLY) == 0) in vfs_domount_update()
1566 vfs_allocate_syncvnode(mp); in vfs_domount_update()
1568 vfs_deallocate_syncvnode(mp); in vfs_domount_update()
1570 vfs_op_exit(mp); in vfs_domount_update()
1576 vfs_unbusy(mp); in vfs_domount_update()
1605 * variables will fit in our mp buffers, including the in vfs_domount()
1730 struct mount *mp; in kern_unmount() local
1757 mp = vfs_getvfs(&fsid); in kern_unmount()
1759 if (mp == NULL) { in kern_unmount()
1783 TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) { in kern_unmount()
1784 if (strcmp(mp->mnt_stat.f_mntonname, pathbuf) == 0) { in kern_unmount()
1785 vfs_ref(mp); in kern_unmount()
1791 if (mp == NULL) { in kern_unmount()
1805 if (mp->mnt_flag & MNT_ROOTFS) { in kern_unmount()
1806 vfs_rel(mp); in kern_unmount()
1809 error = dounmount(mp, flags, td); in kern_unmount()
1821 vfs_check_usecounts(struct mount *mp) in vfs_check_usecounts() argument
1825 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { in vfs_check_usecounts()
1829 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); in vfs_check_usecounts()
1839 dounmount_cleanup(struct mount *mp, struct vnode *coveredvp, int mntkflags) in dounmount_cleanup() argument
1842 mtx_assert(MNT_MTX(mp), MA_OWNED); in dounmount_cleanup()
1843 mp->mnt_kern_flag &= ~mntkflags; in dounmount_cleanup()
1844 if ((mp->mnt_kern_flag & MNTK_MWAIT) != 0) { in dounmount_cleanup()
1845 mp->mnt_kern_flag &= ~MNTK_MWAIT; in dounmount_cleanup()
1846 wakeup(mp); in dounmount_cleanup()
1848 vfs_op_exit_locked(mp); in dounmount_cleanup()
1849 MNT_IUNLOCK(mp); in dounmount_cleanup()
1854 vn_finished_write(mp); in dounmount_cleanup()
1855 vfs_rel(mp); in dounmount_cleanup()
1866 vfs_op_enter(struct mount *mp) in vfs_op_enter() argument
1871 MNT_ILOCK(mp); in vfs_op_enter()
1872 mp->mnt_vfs_ops++; in vfs_op_enter()
1873 if (mp->mnt_vfs_ops > 1) { in vfs_op_enter()
1874 MNT_IUNLOCK(mp); in vfs_op_enter()
1877 vfs_op_barrier_wait(mp); in vfs_op_enter()
1879 mpcpu = vfs_mount_pcpu_remote(mp, cpu); in vfs_op_enter()
1881 mp->mnt_ref += mpcpu->mntp_ref; in vfs_op_enter()
1884 mp->mnt_lockref += mpcpu->mntp_lockref; in vfs_op_enter()
1887 mp->mnt_writeopcount += mpcpu->mntp_writeopcount; in vfs_op_enter()
1890 MPASSERT(mp->mnt_ref > 0 && mp->mnt_lockref >= 0 && in vfs_op_enter()
1891 mp->mnt_writeopcount >= 0, mp, in vfs_op_enter()
1893 mp->mnt_ref, mp->mnt_lockref, mp->mnt_writeopcount)); in vfs_op_enter()
1894 MNT_IUNLOCK(mp); in vfs_op_enter()
1895 vfs_assert_mount_counters(mp); in vfs_op_enter()
1899 vfs_op_exit_locked(struct mount *mp) in vfs_op_exit_locked() argument
1902 mtx_assert(MNT_MTX(mp), MA_OWNED); in vfs_op_exit_locked()
1904 MPASSERT(mp->mnt_vfs_ops > 0, mp, in vfs_op_exit_locked()
1905 ("invalid vfs_ops count %d", mp->mnt_vfs_ops)); in vfs_op_exit_locked()
1906 MPASSERT(mp->mnt_vfs_ops > 1 || in vfs_op_exit_locked()
1907 (mp->mnt_kern_flag & (MNTK_UNMOUNT | MNTK_SUSPEND)) == 0, mp, in vfs_op_exit_locked()
1908 ("vfs_ops too low %d in unmount or suspend", mp->mnt_vfs_ops)); in vfs_op_exit_locked()
1909 mp->mnt_vfs_ops--; in vfs_op_exit_locked()
1913 vfs_op_exit(struct mount *mp) in vfs_op_exit() argument
1916 MNT_ILOCK(mp); in vfs_op_exit()
1917 vfs_op_exit_locked(mp); in vfs_op_exit()
1918 MNT_IUNLOCK(mp); in vfs_op_exit()
1922 struct mount *mp; member
1930 struct mount *mp; in vfs_op_action_func() local
1933 mp = vfsopipi->mp; in vfs_op_action_func()
1935 if (!vfs_op_thread_entered(mp)) in vfs_op_action_func()
1943 struct mount *mp; in vfs_op_wait_func() local
1947 mp = vfsopipi->mp; in vfs_op_wait_func()
1949 mpcpu = vfs_mount_pcpu_remote(mp, cpu); in vfs_op_wait_func()
1955 vfs_op_barrier_wait(struct mount *mp) in vfs_op_barrier_wait() argument
1959 vfsopipi.mp = mp; in vfs_op_barrier_wait()
1971 vfs_assert_mount_counters(struct mount *mp) in vfs_assert_mount_counters() argument
1976 if (mp->mnt_vfs_ops == 0) in vfs_assert_mount_counters()
1980 mpcpu = vfs_mount_pcpu_remote(mp, cpu); in vfs_assert_mount_counters()
1984 vfs_dump_mount_counters(mp); in vfs_assert_mount_counters()
1989 vfs_dump_mount_counters(struct mount *mp) in vfs_dump_mount_counters() argument
1995 printf("%s: mp %p vfs_ops %d\n", __func__, mp, mp->mnt_vfs_ops); in vfs_dump_mount_counters()
1998 ref = mp->mnt_ref; in vfs_dump_mount_counters()
2000 mpcpu = vfs_mount_pcpu_remote(mp, cpu); in vfs_dump_mount_counters()
2006 lockref = mp->mnt_lockref; in vfs_dump_mount_counters()
2008 mpcpu = vfs_mount_pcpu_remote(mp, cpu); in vfs_dump_mount_counters()
2014 writeopcount = mp->mnt_writeopcount; in vfs_dump_mount_counters()
2016 mpcpu = vfs_mount_pcpu_remote(mp, cpu); in vfs_dump_mount_counters()
2023 printf("ref %-5d %-5d\n", mp->mnt_ref, ref); in vfs_dump_mount_counters()
2024 printf("lockref %-5d %-5d\n", mp->mnt_lockref, lockref); in vfs_dump_mount_counters()
2025 printf("writeopcount %-5d %-5d\n", mp->mnt_writeopcount, writeopcount); in vfs_dump_mount_counters()
2032 vfs_mount_fetch_counter(struct mount *mp, enum mount_counter which) in vfs_mount_fetch_counter() argument
2039 sum = mp->mnt_ref; in vfs_mount_fetch_counter()
2042 sum = mp->mnt_lockref; in vfs_mount_fetch_counter()
2045 sum = mp->mnt_writeopcount; in vfs_mount_fetch_counter()
2050 mpcpu = vfs_mount_pcpu_remote(mp, cpu); in vfs_mount_fetch_counter()
2067 deferred_unmount_enqueue(struct mount *mp, uint64_t flags, bool requeue, in deferred_unmount_enqueue() argument
2074 if ((mp->mnt_taskqueue_flags & MNT_DEFERRED) == 0 || requeue) { in deferred_unmount_enqueue()
2075 mp->mnt_taskqueue_flags = flags | MNT_DEFERRED; in deferred_unmount_enqueue()
2076 STAILQ_INSERT_TAIL(&deferred_unmount_list, mp, in deferred_unmount_enqueue()
2098 struct mount *mp, *tmp; in vfs_deferred_unmount() local
2108 STAILQ_FOREACH_SAFE(mp, &local_unmounts, mnt_taskqueue_link, tmp) { in vfs_deferred_unmount()
2109 flags = mp->mnt_taskqueue_flags; in vfs_deferred_unmount()
2112 error = dounmount(mp, flags, curthread); in vfs_deferred_unmount()
2114 MNT_ILOCK(mp); in vfs_deferred_unmount()
2115 unmounted = ((mp->mnt_kern_flag & MNTK_REFEXPIRE) != 0); in vfs_deferred_unmount()
2116 MNT_IUNLOCK(mp); in vfs_deferred_unmount()
2123 retries = (mp->mnt_unmount_retries)++; in vfs_deferred_unmount()
2126 deferred_unmount_enqueue(mp, flags, true, in vfs_deferred_unmount()
2132 mp->mnt_stat.f_mntonname, retries, error); in vfs_deferred_unmount()
2134 vfs_rel(mp); in vfs_deferred_unmount()
2144 dounmount(struct mount *mp, uint64_t flags, struct thread *td) in dounmount() argument
2168 if (!deferred_unmount_enqueue(mp, flags, false, 0)) in dounmount()
2169 vfs_rel(mp); in dounmount()
2183 error = vfs_suser(mp, td); in dounmount()
2187 vfs_rel(mp); in dounmount()
2198 MNT_ILOCK(mp); in dounmount()
2206 mp->mnt_kern_flag |= MNTK_RECURSE; in dounmount()
2207 mp->mnt_upper_pending++; in dounmount()
2208 TAILQ_FOREACH(upper, &mp->mnt_uppers, mnt_upper_link) { in dounmount()
2209 retries = upper->mp->mnt_unmount_retries; in dounmount()
2214 MNT_IUNLOCK(mp); in dounmount()
2216 vfs_ref(upper->mp); in dounmount()
2217 if (!deferred_unmount_enqueue(upper->mp, flags, in dounmount()
2219 vfs_rel(upper->mp); in dounmount()
2220 MNT_ILOCK(mp); in dounmount()
2222 mp->mnt_upper_pending--; in dounmount()
2223 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && in dounmount()
2224 mp->mnt_upper_pending == 0) { in dounmount()
2225 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; in dounmount()
2226 wakeup(&mp->mnt_uppers); in dounmount()
2236 while (error == 0 && !TAILQ_EMPTY(&mp->mnt_uppers)) { in dounmount()
2237 mp->mnt_kern_flag |= MNTK_TASKQUEUE_WAITER; in dounmount()
2238 error = msleep(&mp->mnt_taskqueue_link, in dounmount()
2239 MNT_MTX(mp), PCATCH, "umntqw", 0); in dounmount()
2242 MNT_REL(mp); in dounmount()
2243 MNT_IUNLOCK(mp); in dounmount()
2246 } else if (!TAILQ_EMPTY(&mp->mnt_uppers)) { in dounmount()
2247 MNT_IUNLOCK(mp); in dounmount()
2249 deferred_unmount_enqueue(mp, flags, true, 0); in dounmount()
2252 MNT_IUNLOCK(mp); in dounmount()
2253 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), ("mnt_uppers not empty")); in dounmount()
2258 vfs_ref(mp); in dounmount()
2260 if ((coveredvp = mp->mnt_vnodecovered) != NULL) { in dounmount()
2261 mnt_gen_r = mp->mnt_gen; in dounmount()
2266 * Check for mp being unmounted while waiting for the in dounmount()
2269 if (coveredvp->v_mountedhere != mp || in dounmount()
2273 vfs_rel(mp); in dounmount()
2278 vfs_op_enter(mp); in dounmount()
2280 vn_start_write(NULL, &mp, V_WAIT); in dounmount()
2281 MNT_ILOCK(mp); in dounmount()
2282 if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 || in dounmount()
2283 (mp->mnt_flag & MNT_UPDATE) != 0 || in dounmount()
2284 !TAILQ_EMPTY(&mp->mnt_uppers)) { in dounmount()
2285 dounmount_cleanup(mp, coveredvp, 0); in dounmount()
2288 mp->mnt_kern_flag |= MNTK_UNMOUNT; in dounmount()
2289 rootvp = vfs_cache_root_clear(mp); in dounmount()
2293 MNT_IUNLOCK(mp); in dounmount()
2294 error = vfs_check_usecounts(mp); in dounmount()
2295 MNT_ILOCK(mp); in dounmount()
2298 dounmount_cleanup(mp, coveredvp, MNTK_UNMOUNT); in dounmount()
2308 mp->mnt_kern_flag |= MNTK_UNMOUNTF; in dounmount()
2309 MNT_IUNLOCK(mp); in dounmount()
2314 VFS_PURGE(mp); in dounmount()
2315 MNT_ILOCK(mp); in dounmount()
2318 if (mp->mnt_lockref) { in dounmount()
2319 mp->mnt_kern_flag |= MNTK_DRAINING; in dounmount()
2320 error = msleep(&mp->mnt_lockref, MNT_MTX(mp), PVFS, in dounmount()
2323 MNT_IUNLOCK(mp); in dounmount()
2324 KASSERT(mp->mnt_lockref == 0, in dounmount()
2341 if (mp->mnt_flag & MNT_EXPUBLIC) in dounmount()
2344 vfs_periodic(mp, MNT_WAIT); in dounmount()
2345 MNT_ILOCK(mp); in dounmount()
2346 async_flag = mp->mnt_flag & MNT_ASYNC; in dounmount()
2347 mp->mnt_flag &= ~MNT_ASYNC; in dounmount()
2348 mp->mnt_kern_flag &= ~MNTK_ASYNC; in dounmount()
2349 MNT_IUNLOCK(mp); in dounmount()
2350 vfs_deallocate_syncvnode(mp); in dounmount()
2351 error = VFS_UNMOUNT(mp, flags); in dounmount()
2352 vn_finished_write(mp); in dounmount()
2353 vfs_rel(mp); in dounmount()
2361 MNT_ILOCK(mp); in dounmount()
2362 if ((mp->mnt_flag & MNT_RDONLY) == 0) { in dounmount()
2363 MNT_IUNLOCK(mp); in dounmount()
2364 vfs_allocate_syncvnode(mp); in dounmount()
2365 MNT_ILOCK(mp); in dounmount()
2367 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF); in dounmount()
2368 mp->mnt_flag |= async_flag; in dounmount()
2369 if ((mp->mnt_flag & MNT_ASYNC) != 0 && in dounmount()
2370 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0) in dounmount()
2371 mp->mnt_kern_flag |= MNTK_ASYNC; in dounmount()
2372 if (mp->mnt_kern_flag & MNTK_MWAIT) { in dounmount()
2373 mp->mnt_kern_flag &= ~MNTK_MWAIT; in dounmount()
2374 wakeup(mp); in dounmount()
2376 vfs_op_exit_locked(mp); in dounmount()
2377 MNT_IUNLOCK(mp); in dounmount()
2391 TAILQ_REMOVE(&mountlist, mp, mnt_list); in dounmount()
2393 EVENTHANDLER_DIRECT_INVOKE(vfs_unmounted, mp, td); in dounmount()
2403 mount_devctl_event("UNMOUNT", mp, false); in dounmount()
2409 if (rootvnode != NULL && mp == rootvnode->v_mount) { in dounmount()
2413 if (mp == rootdevmp) in dounmount()
2416 vfs_rel(mp); in dounmount()
2417 vfs_mount_destroy(mp); in dounmount()
2425 vfs_mount_error(struct mount *mp, const char *fmt, ...) in vfs_mount_error() argument
2427 struct vfsoptlist *moptlist = mp->mnt_optnew; in vfs_mount_error()
2755 __vfs_statfs(struct mount *mp, struct statfs *sbp) in __vfs_statfs() argument
2761 if (sbp != &mp->mnt_stat) in __vfs_statfs()
2762 memcpy(sbp, &mp->mnt_stat, sizeof(*sbp)); in __vfs_statfs()
2769 sbp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; in __vfs_statfs()
2770 sbp->f_nvnodelistsize = mp->mnt_nvnodelistsize; in __vfs_statfs()
2772 return (mp->mnt_op->vfs_statfs(mp, sbp)); in __vfs_statfs()
2776 vfs_mountedfrom(struct mount *mp, const char *from) in vfs_mountedfrom() argument
2779 bzero(mp->mnt_stat.f_mntfromname, sizeof mp->mnt_stat.f_mntfromname); in vfs_mountedfrom()
2780 strlcpy(mp->mnt_stat.f_mntfromname, from, in vfs_mountedfrom()
2781 sizeof mp->mnt_stat.f_mntfromname); in vfs_mountedfrom()
2969 mount_devctl_event(const char *type, struct mount *mp, bool donew) in mount_devctl_event() argument
2974 struct statfs *sfp = &mp->mnt_stat; in mount_devctl_event()
2993 if ((mp->mnt_flag & fp->o_opt) != 0) { in mount_devctl_event()
3024 vfs_remount_ro(struct mount *mp) in vfs_remount_ro() argument
3031 vfs_op_enter(mp); in vfs_remount_ro()
3032 KASSERT(mp->mnt_lockref > 0, in vfs_remount_ro()
3033 ("vfs_remount_ro: mp %p is not busied", mp)); in vfs_remount_ro()
3034 KASSERT((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0, in vfs_remount_ro()
3035 ("vfs_remount_ro: mp %p is being unmounted (and busy?)", mp)); in vfs_remount_ro()
3038 vp_covered = mp->mnt_vnodecovered; in vfs_remount_ro()
3041 vfs_op_exit(mp); in vfs_remount_ro()
3048 vfs_op_exit(mp); in vfs_remount_ro()
3055 MNT_ILOCK(mp); in vfs_remount_ro()
3056 if ((mp->mnt_flag & MNT_RDONLY) != 0) { in vfs_remount_ro()
3057 MNT_IUNLOCK(mp); in vfs_remount_ro()
3061 mp->mnt_flag |= MNT_UPDATE | MNT_FORCE | MNT_RDONLY; in vfs_remount_ro()
3062 rootvp = vfs_cache_root_clear(mp); in vfs_remount_ro()
3063 MNT_IUNLOCK(mp); in vfs_remount_ro()
3071 vfs_mergeopts(opts, mp->mnt_opt); in vfs_remount_ro()
3072 mp->mnt_optnew = opts; in vfs_remount_ro()
3074 error = VFS_MOUNT(mp); in vfs_remount_ro()
3077 MNT_ILOCK(mp); in vfs_remount_ro()
3078 mp->mnt_flag &= ~(MNT_UPDATE | MNT_FORCE); in vfs_remount_ro()
3079 MNT_IUNLOCK(mp); in vfs_remount_ro()
3080 vfs_deallocate_syncvnode(mp); in vfs_remount_ro()
3081 if (mp->mnt_opt != NULL) in vfs_remount_ro()
3082 vfs_freeopts(mp->mnt_opt); in vfs_remount_ro()
3083 mp->mnt_opt = mp->mnt_optnew; in vfs_remount_ro()
3085 MNT_ILOCK(mp); in vfs_remount_ro()
3086 mp->mnt_flag &= ~(MNT_UPDATE | MNT_FORCE | MNT_RDONLY); in vfs_remount_ro()
3087 MNT_IUNLOCK(mp); in vfs_remount_ro()
3088 vfs_freeopts(mp->mnt_optnew); in vfs_remount_ro()
3090 mp->mnt_optnew = NULL; in vfs_remount_ro()
3093 vfs_op_exit(mp); in vfs_remount_ro()
3118 struct mount *mp; in suspend_all_fs() local
3122 TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) { in suspend_all_fs()
3123 error = vfs_busy(mp, MBF_MNTLSTLOCK | MBF_NOWAIT); in suspend_all_fs()
3126 if ((mp->mnt_flag & (MNT_RDONLY | MNT_LOCAL)) != MNT_LOCAL || in suspend_all_fs()
3127 (mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { in suspend_all_fs()
3129 vfs_unbusy(mp); in suspend_all_fs()
3132 error = vfs_write_suspend(mp, 0); in suspend_all_fs()
3134 MNT_ILOCK(mp); in suspend_all_fs()
3135 MPASS((mp->mnt_kern_flag & MNTK_SUSPEND_ALL) == 0); in suspend_all_fs()
3136 mp->mnt_kern_flag |= MNTK_SUSPEND_ALL; in suspend_all_fs()
3137 MNT_IUNLOCK(mp); in suspend_all_fs()
3141 mp->mnt_stat.f_mntonname, error); in suspend_all_fs()
3143 vfs_unbusy(mp); in suspend_all_fs()
3187 struct mount *mp; in resume_all_fs() local
3190 TAILQ_FOREACH(mp, &mountlist, mnt_list) { in resume_all_fs()
3191 if ((mp->mnt_kern_flag & MNTK_SUSPEND_ALL) == 0) in resume_all_fs()
3194 MNT_ILOCK(mp); in resume_all_fs()
3195 MPASS((mp->mnt_kern_flag & MNTK_SUSPEND) != 0); in resume_all_fs()
3196 mp->mnt_kern_flag &= ~MNTK_SUSPEND_ALL; in resume_all_fs()
3197 MNT_IUNLOCK(mp); in resume_all_fs()
3198 vfs_write_resume(mp, 0); in resume_all_fs()
3200 vfs_unbusy(mp); in resume_all_fs()