Lines Matching full:mp

135 static void mount_devctl_event(const char *type, struct mount *mp, bool donew);
154 struct mount *mp; in mount_init() local
156 mp = (struct mount *)mem; in mount_init()
157 mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF); in mount_init()
158 mtx_init(&mp->mnt_listmtx, "struct mount vlist mtx", NULL, MTX_DEF); in mount_init()
159 lockinit(&mp->mnt_explock, PVFS, "explock", 0, 0); in mount_init()
160 mp->mnt_pcpu = uma_zalloc_pcpu(pcpu_zone_16, M_WAITOK | M_ZERO); in mount_init()
161 mp->mnt_ref = 0; in mount_init()
162 mp->mnt_vfs_ops = 1; in mount_init()
163 mp->mnt_rootvnode = NULL; in mount_init()
170 struct mount *mp; in mount_fini() local
172 mp = (struct mount *)mem; in mount_fini()
173 uma_zfree_pcpu(pcpu_zone_16, mp->mnt_pcpu); in mount_fini()
174 lockdestroy(&mp->mnt_explock); in mount_fini()
175 mtx_destroy(&mp->mnt_listmtx); in mount_fini()
176 mtx_destroy(&mp->mnt_mtx); in mount_fini()
495 struct mount *mp; in vfs_ref_from_vp() local
498 mp = atomic_load_ptr(&vp->v_mount); in vfs_ref_from_vp()
499 if (__predict_false(mp == NULL)) { in vfs_ref_from_vp()
500 return (mp); in vfs_ref_from_vp()
502 if (vfs_op_thread_enter(mp, mpcpu)) { in vfs_ref_from_vp()
503 if (__predict_true(mp == vp->v_mount)) { in vfs_ref_from_vp()
505 vfs_op_thread_exit(mp, mpcpu); in vfs_ref_from_vp()
507 vfs_op_thread_exit(mp, mpcpu); in vfs_ref_from_vp()
508 mp = NULL; in vfs_ref_from_vp()
511 MNT_ILOCK(mp); in vfs_ref_from_vp()
512 if (mp == vp->v_mount) { in vfs_ref_from_vp()
513 MNT_REF(mp); in vfs_ref_from_vp()
514 MNT_IUNLOCK(mp); in vfs_ref_from_vp()
516 MNT_IUNLOCK(mp); in vfs_ref_from_vp()
517 mp = NULL; in vfs_ref_from_vp()
520 return (mp); in vfs_ref_from_vp()
524 vfs_ref(struct mount *mp) in vfs_ref() argument
528 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); in vfs_ref()
529 if (vfs_op_thread_enter(mp, mpcpu)) { in vfs_ref()
531 vfs_op_thread_exit(mp, mpcpu); in vfs_ref()
535 MNT_ILOCK(mp); in vfs_ref()
536 MNT_REF(mp); in vfs_ref()
537 MNT_IUNLOCK(mp); in vfs_ref()
544 * caller and stored in per-mount data associated with mp.
556 struct mount *mp; in vfs_register_upper_from_vp() local
558 mp = atomic_load_ptr(&vp->v_mount); in vfs_register_upper_from_vp()
559 if (mp == NULL) in vfs_register_upper_from_vp()
561 MNT_ILOCK(mp); in vfs_register_upper_from_vp()
562 if (mp != vp->v_mount || in vfs_register_upper_from_vp()
563 ((mp->mnt_kern_flag & (MNTK_UNMOUNT | MNTK_RECURSE)) != 0)) { in vfs_register_upper_from_vp()
564 MNT_IUNLOCK(mp); in vfs_register_upper_from_vp()
567 KASSERT(ump != mp, ("upper and lower mounts are identical")); in vfs_register_upper_from_vp()
568 upper->mp = ump; in vfs_register_upper_from_vp()
569 MNT_REF(mp); in vfs_register_upper_from_vp()
570 TAILQ_INSERT_TAIL(&mp->mnt_uppers, upper, mnt_upper_link); in vfs_register_upper_from_vp()
571 MNT_IUNLOCK(mp); in vfs_register_upper_from_vp()
572 return (mp); in vfs_register_upper_from_vp()
577 * notifications from lower mount mp. This registration will
580 * associated with mp.
582 * ump must already be registered as an upper mount of mp
586 vfs_register_for_notification(struct mount *mp, struct mount *ump, in vfs_register_for_notification() argument
589 upper->mp = ump; in vfs_register_for_notification()
590 MNT_ILOCK(mp); in vfs_register_for_notification()
591 TAILQ_INSERT_TAIL(&mp->mnt_notify, upper, mnt_upper_link); in vfs_register_for_notification()
592 MNT_IUNLOCK(mp); in vfs_register_for_notification()
596 vfs_drain_upper_locked(struct mount *mp) in vfs_drain_upper_locked() argument
598 mtx_assert(MNT_MTX(mp), MA_OWNED); in vfs_drain_upper_locked()
599 while (mp->mnt_upper_pending != 0) { in vfs_drain_upper_locked()
600 mp->mnt_kern_flag |= MNTK_UPPER_WAITER; in vfs_drain_upper_locked()
601 msleep(&mp->mnt_uppers, MNT_MTX(mp), 0, "mntupw", 0); in vfs_drain_upper_locked()
608 * as an upper mount for mp.
611 vfs_unregister_for_notification(struct mount *mp, in vfs_unregister_for_notification() argument
614 MNT_ILOCK(mp); in vfs_unregister_for_notification()
615 vfs_drain_upper_locked(mp); in vfs_unregister_for_notification()
616 TAILQ_REMOVE(&mp->mnt_notify, upper, mnt_upper_link); in vfs_unregister_for_notification()
617 MNT_IUNLOCK(mp); in vfs_unregister_for_notification()
622 * This must be done before mp can be unmounted.
625 vfs_unregister_upper(struct mount *mp, struct mount_upper_node *upper) in vfs_unregister_upper() argument
627 MNT_ILOCK(mp); in vfs_unregister_upper()
628 KASSERT((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0, in vfs_unregister_upper()
630 vfs_drain_upper_locked(mp); in vfs_unregister_upper()
631 TAILQ_REMOVE(&mp->mnt_uppers, upper, mnt_upper_link); in vfs_unregister_upper()
632 if ((mp->mnt_kern_flag & MNTK_TASKQUEUE_WAITER) != 0 && in vfs_unregister_upper()
633 TAILQ_EMPTY(&mp->mnt_uppers)) { in vfs_unregister_upper()
634 mp->mnt_kern_flag &= ~MNTK_TASKQUEUE_WAITER; in vfs_unregister_upper()
635 wakeup(&mp->mnt_taskqueue_link); in vfs_unregister_upper()
637 MNT_REL(mp); in vfs_unregister_upper()
638 MNT_IUNLOCK(mp); in vfs_unregister_upper()
642 vfs_rel(struct mount *mp) in vfs_rel() argument
646 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); in vfs_rel()
647 if (vfs_op_thread_enter(mp, mpcpu)) { in vfs_rel()
649 vfs_op_thread_exit(mp, mpcpu); in vfs_rel()
653 MNT_ILOCK(mp); in vfs_rel()
654 MNT_REL(mp); in vfs_rel()
655 MNT_IUNLOCK(mp); in vfs_rel()
665 struct mount *mp; in vfs_mount_alloc() local
667 mp = uma_zalloc(mount_zone, M_WAITOK); in vfs_mount_alloc()
668 bzero(&mp->mnt_startzero, in vfs_mount_alloc()
670 mp->mnt_kern_flag = 0; in vfs_mount_alloc()
671 mp->mnt_flag = 0; in vfs_mount_alloc()
672 mp->mnt_rootvnode = NULL; in vfs_mount_alloc()
673 mp->mnt_vnodecovered = NULL; in vfs_mount_alloc()
674 mp->mnt_op = NULL; in vfs_mount_alloc()
675 mp->mnt_vfc = NULL; in vfs_mount_alloc()
676 TAILQ_INIT(&mp->mnt_nvnodelist); in vfs_mount_alloc()
677 mp->mnt_nvnodelistsize = 0; in vfs_mount_alloc()
678 TAILQ_INIT(&mp->mnt_lazyvnodelist); in vfs_mount_alloc()
679 mp->mnt_lazyvnodelistsize = 0; in vfs_mount_alloc()
680 MPPASS(mp->mnt_ref == 0 && mp->mnt_lockref == 0 && in vfs_mount_alloc()
681 mp->mnt_writeopcount == 0, mp); in vfs_mount_alloc()
682 MPASSERT(mp->mnt_vfs_ops == 1, mp, in vfs_mount_alloc()
683 ("vfs_ops should be 1 but %d found", mp->mnt_vfs_ops)); in vfs_mount_alloc()
684 (void) vfs_busy(mp, MBF_NOWAIT); in vfs_mount_alloc()
686 mp->mnt_op = vfsp->vfc_vfsops; in vfs_mount_alloc()
687 mp->mnt_vfc = vfsp; in vfs_mount_alloc()
688 mp->mnt_stat.f_type = vfsp->vfc_typenum; in vfs_mount_alloc()
689 mp->mnt_gen++; in vfs_mount_alloc()
690 strlcpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); in vfs_mount_alloc()
691 mp->mnt_vnodecovered = vp; in vfs_mount_alloc()
692 mp->mnt_cred = crdup(cred); in vfs_mount_alloc()
693 mp->mnt_stat.f_owner = cred->cr_uid; in vfs_mount_alloc()
694 strlcpy(mp->mnt_stat.f_mntonname, fspath, MNAMELEN); in vfs_mount_alloc()
695 mp->mnt_iosize_max = DFLTPHYS; in vfs_mount_alloc()
697 mac_mount_init(mp); in vfs_mount_alloc()
698 mac_mount_create(cred, mp); in vfs_mount_alloc()
700 arc4rand(&mp->mnt_hashseed, sizeof mp->mnt_hashseed, 0); in vfs_mount_alloc()
701 mp->mnt_upper_pending = 0; in vfs_mount_alloc()
702 TAILQ_INIT(&mp->mnt_uppers); in vfs_mount_alloc()
703 TAILQ_INIT(&mp->mnt_notify); in vfs_mount_alloc()
704 mp->mnt_taskqueue_flags = 0; in vfs_mount_alloc()
705 mp->mnt_unmount_retries = 0; in vfs_mount_alloc()
706 return (mp); in vfs_mount_alloc()
713 vfs_mount_destroy(struct mount *mp) in vfs_mount_destroy() argument
716 MPPASS(mp->mnt_vfs_ops != 0, mp); in vfs_mount_destroy()
718 vfs_assert_mount_counters(mp); in vfs_mount_destroy()
720 MNT_ILOCK(mp); in vfs_mount_destroy()
721 mp->mnt_kern_flag |= MNTK_REFEXPIRE; in vfs_mount_destroy()
722 if (mp->mnt_kern_flag & MNTK_MWAIT) { in vfs_mount_destroy()
723 mp->mnt_kern_flag &= ~MNTK_MWAIT; in vfs_mount_destroy()
724 wakeup(mp); in vfs_mount_destroy()
726 while (mp->mnt_ref) in vfs_mount_destroy()
727 msleep(mp, MNT_MTX(mp), PVFS, "mntref", 0); in vfs_mount_destroy()
728 KASSERT(mp->mnt_ref == 0, in vfs_mount_destroy()
731 MPPASS(mp->mnt_writeopcount == 0, mp); in vfs_mount_destroy()
732 MPPASS(mp->mnt_secondary_writes == 0, mp); in vfs_mount_destroy()
733 atomic_subtract_rel_int(&mp->mnt_vfc->vfc_refcount, 1); in vfs_mount_destroy()
734 if (!TAILQ_EMPTY(&mp->mnt_nvnodelist)) { in vfs_mount_destroy()
737 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) in vfs_mount_destroy()
741 KASSERT(mp->mnt_upper_pending == 0, ("mnt_upper_pending")); in vfs_mount_destroy()
742 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), ("mnt_uppers")); in vfs_mount_destroy()
743 KASSERT(TAILQ_EMPTY(&mp->mnt_notify), ("mnt_notify")); in vfs_mount_destroy()
744 MPPASS(mp->mnt_nvnodelistsize == 0, mp); in vfs_mount_destroy()
745 MPPASS(mp->mnt_lazyvnodelistsize == 0, mp); in vfs_mount_destroy()
746 MPPASS(mp->mnt_lockref == 0, mp); in vfs_mount_destroy()
747 MNT_IUNLOCK(mp); in vfs_mount_destroy()
749 MPASSERT(mp->mnt_vfs_ops == 1, mp, in vfs_mount_destroy()
750 ("vfs_ops should be 1 but %d found", mp->mnt_vfs_ops)); in vfs_mount_destroy()
752 MPASSERT(mp->mnt_rootvnode == NULL, mp, in vfs_mount_destroy()
753 ("mount point still has a root vnode %p", mp->mnt_rootvnode)); in vfs_mount_destroy()
755 if (mp->mnt_vnodecovered != NULL) in vfs_mount_destroy()
756 vrele(mp->mnt_vnodecovered); in vfs_mount_destroy()
758 mac_mount_destroy(mp); in vfs_mount_destroy()
760 if (mp->mnt_opt != NULL) in vfs_mount_destroy()
761 vfs_freeopts(mp->mnt_opt); in vfs_mount_destroy()
762 if (mp->mnt_exjail != NULL) { in vfs_mount_destroy()
763 atomic_subtract_int(&mp->mnt_exjail->cr_prison->pr_exportcnt, in vfs_mount_destroy()
765 crfree(mp->mnt_exjail); in vfs_mount_destroy()
767 if (mp->mnt_export != NULL) { in vfs_mount_destroy()
768 vfs_free_addrlist(mp->mnt_export); in vfs_mount_destroy()
769 free(mp->mnt_export, M_MOUNT); in vfs_mount_destroy()
771 crfree(mp->mnt_cred); in vfs_mount_destroy()
772 uma_zfree(mount_zone, mp); in vfs_mount_destroy()
981 * variables will fit in our mp buffers, including the in vfs_donmount()
1120 struct mount *mp; in vfs_domount_first() local
1177 mp = vfs_mount_alloc(vp, vfsp, fspath, td->td_ucred); in vfs_domount_first()
1179 mp->mnt_optnew = *optlist; in vfs_domount_first()
1181 mp->mnt_flag = (fsflags & in vfs_domount_first()
1191 if ((error = VFS_MOUNT(mp)) != 0 || in vfs_domount_first()
1192 (error1 = VFS_STATFS(mp, &mp->mnt_stat)) != 0 || in vfs_domount_first()
1193 (error1 = VFS_ROOT(mp, LK_EXCLUSIVE, &newdp)) != 0) { in vfs_domount_first()
1197 rootvp = vfs_cache_root_clear(mp); in vfs_domount_first()
1202 (void)vn_start_write(NULL, &mp, V_WAIT); in vfs_domount_first()
1203 MNT_ILOCK(mp); in vfs_domount_first()
1204 mp->mnt_kern_flag |= MNTK_UNMOUNT | MNTK_UNMOUNTF; in vfs_domount_first()
1205 MNT_IUNLOCK(mp); in vfs_domount_first()
1206 VFS_PURGE(mp); in vfs_domount_first()
1207 error = VFS_UNMOUNT(mp, 0); in vfs_domount_first()
1208 vn_finished_write(mp); in vfs_domount_first()
1217 vfs_unbusy(mp); in vfs_domount_first()
1218 mp->mnt_vnodecovered = NULL; in vfs_domount_first()
1221 vfs_mount_destroy(mp); in vfs_domount_first()
1237 if (mp->mnt_opt != NULL) in vfs_domount_first()
1238 vfs_freeopts(mp->mnt_opt); in vfs_domount_first()
1239 mp->mnt_opt = mp->mnt_optnew; in vfs_domount_first()
1245 mp->mnt_optnew = NULL; in vfs_domount_first()
1247 MNT_ILOCK(mp); in vfs_domount_first()
1248 if ((mp->mnt_flag & MNT_ASYNC) != 0 && in vfs_domount_first()
1249 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0) in vfs_domount_first()
1250 mp->mnt_kern_flag |= MNTK_ASYNC; in vfs_domount_first()
1252 mp->mnt_kern_flag &= ~MNTK_ASYNC; in vfs_domount_first()
1253 MNT_IUNLOCK(mp); in vfs_domount_first()
1262 vp->v_mountedhere = mp; in vfs_domount_first()
1280 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); in vfs_domount_first()
1284 EVENTHANDLER_DIRECT_INVOKE(vfs_mounted, mp, newdp, td); in vfs_domount_first()
1286 mount_devctl_event("MOUNT", mp, false); in vfs_domount_first()
1291 if ((mp->mnt_flag & MNT_RDONLY) == 0) in vfs_domount_first()
1292 vfs_allocate_syncvnode(mp); in vfs_domount_first()
1293 vfs_op_exit(mp); in vfs_domount_first()
1294 vfs_unbusy(mp); in vfs_domount_first()
1314 struct mount *mp; in vfs_domount_update() local
1323 mp = vp->v_mount; in vfs_domount_update()
1339 flag = mp->mnt_flag; in vfs_domount_update()
1357 error = vfs_suser(mp, td); in vfs_domount_update()
1366 if (vfs_busy(mp, MBF_NOWAIT)) { in vfs_domount_update()
1373 vfs_unbusy(mp); in vfs_domount_update()
1382 vfs_op_enter(mp); in vfs_domount_update()
1391 if (fsidcmp(fsid_up, &mp->mnt_stat.f_fsid) != 0) { in vfs_domount_update()
1399 MNT_ILOCK(mp); in vfs_domount_update()
1400 if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) { in vfs_domount_update()
1401 MNT_IUNLOCK(mp); in vfs_domount_update()
1417 mp->mnt_flag |= MNT_UPDATE; in vfs_domount_update()
1419 mp->mnt_flag &= ~MNT_UPDATEMASK; in vfs_domount_update()
1420 if ((mp->mnt_flag & MNT_UNION) == 0 && in vfs_domount_update()
1425 mp->mnt_flag |= fsflags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE | in vfs_domount_update()
1427 if ((mp->mnt_flag & MNT_ASYNC) == 0) in vfs_domount_update()
1428 mp->mnt_kern_flag &= ~MNTK_ASYNC; in vfs_domount_update()
1430 rootvp = vfs_cache_root_clear(mp); in vfs_domount_update()
1431 MNT_IUNLOCK(mp); in vfs_domount_update()
1432 mp->mnt_optnew = *optlist; in vfs_domount_update()
1433 vfs_mergeopts(mp->mnt_optnew, mp->mnt_opt); in vfs_domount_update()
1450 error = VFS_MOUNT(mp); in vfs_domount_update()
1454 if (error == 0 && vfs_getopt(mp->mnt_optnew, "export", &bufp, in vfs_domount_update()
1493 export_error = vfs_export(mp, &export, true); in vfs_domount_update()
1515 export_error = vfs_export(mp, &export, true); in vfs_domount_update()
1524 MNT_ILOCK(mp); in vfs_domount_update()
1526 mp->mnt_flag &= ~(MNT_UPDATE | MNT_RELOAD | MNT_FORCE | in vfs_domount_update()
1528 mp->mnt_flag |= mnt_union; in vfs_domount_update()
1537 mp->mnt_flag = (mp->mnt_flag & MNT_QUOTA) | (flag & ~MNT_QUOTA); in vfs_domount_update()
1539 if ((mp->mnt_flag & MNT_ASYNC) != 0 && in vfs_domount_update()
1540 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0) in vfs_domount_update()
1541 mp->mnt_kern_flag |= MNTK_ASYNC; in vfs_domount_update()
1543 mp->mnt_kern_flag &= ~MNTK_ASYNC; in vfs_domount_update()
1544 MNT_IUNLOCK(mp); in vfs_domount_update()
1549 mount_devctl_event("REMOUNT", mp, true); in vfs_domount_update()
1550 if (mp->mnt_opt != NULL) in vfs_domount_update()
1551 vfs_freeopts(mp->mnt_opt); in vfs_domount_update()
1552 mp->mnt_opt = mp->mnt_optnew; in vfs_domount_update()
1554 (void)VFS_STATFS(mp, &mp->mnt_stat); in vfs_domount_update()
1559 mp->mnt_optnew = NULL; in vfs_domount_update()
1561 if ((mp->mnt_flag & MNT_RDONLY) == 0) in vfs_domount_update()
1562 vfs_allocate_syncvnode(mp); in vfs_domount_update()
1564 vfs_deallocate_syncvnode(mp); in vfs_domount_update()
1566 vfs_op_exit(mp); in vfs_domount_update()
1572 vfs_unbusy(mp); in vfs_domount_update()
1601 * variables will fit in our mp buffers, including the in vfs_domount()
1726 struct mount *mp; in kern_unmount() local
1753 mp = vfs_getvfs(&fsid); in kern_unmount()
1755 if (mp == NULL) { in kern_unmount()
1779 TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) { in kern_unmount()
1780 if (strcmp(mp->mnt_stat.f_mntonname, pathbuf) == 0) { in kern_unmount()
1781 vfs_ref(mp); in kern_unmount()
1787 if (mp == NULL) { in kern_unmount()
1801 if (mp->mnt_flag & MNT_ROOTFS) { in kern_unmount()
1802 vfs_rel(mp); in kern_unmount()
1805 error = dounmount(mp, flags, td); in kern_unmount()
1817 vfs_check_usecounts(struct mount *mp) in vfs_check_usecounts() argument
1821 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { in vfs_check_usecounts()
1825 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); in vfs_check_usecounts()
1835 dounmount_cleanup(struct mount *mp, struct vnode *coveredvp, int mntkflags) in dounmount_cleanup() argument
1838 mtx_assert(MNT_MTX(mp), MA_OWNED); in dounmount_cleanup()
1839 mp->mnt_kern_flag &= ~mntkflags; in dounmount_cleanup()
1840 if ((mp->mnt_kern_flag & MNTK_MWAIT) != 0) { in dounmount_cleanup()
1841 mp->mnt_kern_flag &= ~MNTK_MWAIT; in dounmount_cleanup()
1842 wakeup(mp); in dounmount_cleanup()
1844 vfs_op_exit_locked(mp); in dounmount_cleanup()
1845 MNT_IUNLOCK(mp); in dounmount_cleanup()
1850 vn_finished_write(mp); in dounmount_cleanup()
1851 vfs_rel(mp); in dounmount_cleanup()
1862 vfs_op_enter(struct mount *mp) in vfs_op_enter() argument
1867 MNT_ILOCK(mp); in vfs_op_enter()
1868 mp->mnt_vfs_ops++; in vfs_op_enter()
1869 if (mp->mnt_vfs_ops > 1) { in vfs_op_enter()
1870 MNT_IUNLOCK(mp); in vfs_op_enter()
1873 vfs_op_barrier_wait(mp); in vfs_op_enter()
1875 mpcpu = vfs_mount_pcpu_remote(mp, cpu); in vfs_op_enter()
1877 mp->mnt_ref += mpcpu->mntp_ref; in vfs_op_enter()
1880 mp->mnt_lockref += mpcpu->mntp_lockref; in vfs_op_enter()
1883 mp->mnt_writeopcount += mpcpu->mntp_writeopcount; in vfs_op_enter()
1886 MPASSERT(mp->mnt_ref > 0 && mp->mnt_lockref >= 0 && in vfs_op_enter()
1887 mp->mnt_writeopcount >= 0, mp, in vfs_op_enter()
1889 mp->mnt_ref, mp->mnt_lockref, mp->mnt_writeopcount)); in vfs_op_enter()
1890 MNT_IUNLOCK(mp); in vfs_op_enter()
1891 vfs_assert_mount_counters(mp); in vfs_op_enter()
1895 vfs_op_exit_locked(struct mount *mp) in vfs_op_exit_locked() argument
1898 mtx_assert(MNT_MTX(mp), MA_OWNED); in vfs_op_exit_locked()
1900 MPASSERT(mp->mnt_vfs_ops > 0, mp, in vfs_op_exit_locked()
1901 ("invalid vfs_ops count %d", mp->mnt_vfs_ops)); in vfs_op_exit_locked()
1902 MPASSERT(mp->mnt_vfs_ops > 1 || in vfs_op_exit_locked()
1903 (mp->mnt_kern_flag & (MNTK_UNMOUNT | MNTK_SUSPEND)) == 0, mp, in vfs_op_exit_locked()
1904 ("vfs_ops too low %d in unmount or suspend", mp->mnt_vfs_ops)); in vfs_op_exit_locked()
1905 mp->mnt_vfs_ops--; in vfs_op_exit_locked()
1909 vfs_op_exit(struct mount *mp) in vfs_op_exit() argument
1912 MNT_ILOCK(mp); in vfs_op_exit()
1913 vfs_op_exit_locked(mp); in vfs_op_exit()
1914 MNT_IUNLOCK(mp); in vfs_op_exit()
1918 struct mount *mp; member
1926 struct mount *mp; in vfs_op_action_func() local
1929 mp = vfsopipi->mp; in vfs_op_action_func()
1931 if (!vfs_op_thread_entered(mp)) in vfs_op_action_func()
1939 struct mount *mp; in vfs_op_wait_func() local
1943 mp = vfsopipi->mp; in vfs_op_wait_func()
1945 mpcpu = vfs_mount_pcpu_remote(mp, cpu); in vfs_op_wait_func()
1951 vfs_op_barrier_wait(struct mount *mp) in vfs_op_barrier_wait() argument
1955 vfsopipi.mp = mp; in vfs_op_barrier_wait()
1967 vfs_assert_mount_counters(struct mount *mp) in vfs_assert_mount_counters() argument
1972 if (mp->mnt_vfs_ops == 0) in vfs_assert_mount_counters()
1976 mpcpu = vfs_mount_pcpu_remote(mp, cpu); in vfs_assert_mount_counters()
1980 vfs_dump_mount_counters(mp); in vfs_assert_mount_counters()
1985 vfs_dump_mount_counters(struct mount *mp) in vfs_dump_mount_counters() argument
1991 printf("%s: mp %p vfs_ops %d\n", __func__, mp, mp->mnt_vfs_ops); in vfs_dump_mount_counters()
1994 ref = mp->mnt_ref; in vfs_dump_mount_counters()
1996 mpcpu = vfs_mount_pcpu_remote(mp, cpu); in vfs_dump_mount_counters()
2002 lockref = mp->mnt_lockref; in vfs_dump_mount_counters()
2004 mpcpu = vfs_mount_pcpu_remote(mp, cpu); in vfs_dump_mount_counters()
2010 writeopcount = mp->mnt_writeopcount; in vfs_dump_mount_counters()
2012 mpcpu = vfs_mount_pcpu_remote(mp, cpu); in vfs_dump_mount_counters()
2019 printf("ref %-5d %-5d\n", mp->mnt_ref, ref); in vfs_dump_mount_counters()
2020 printf("lockref %-5d %-5d\n", mp->mnt_lockref, lockref); in vfs_dump_mount_counters()
2021 printf("writeopcount %-5d %-5d\n", mp->mnt_writeopcount, writeopcount); in vfs_dump_mount_counters()
2028 vfs_mount_fetch_counter(struct mount *mp, enum mount_counter which) in vfs_mount_fetch_counter() argument
2035 sum = mp->mnt_ref; in vfs_mount_fetch_counter()
2038 sum = mp->mnt_lockref; in vfs_mount_fetch_counter()
2041 sum = mp->mnt_writeopcount; in vfs_mount_fetch_counter()
2046 mpcpu = vfs_mount_pcpu_remote(mp, cpu); in vfs_mount_fetch_counter()
2063 deferred_unmount_enqueue(struct mount *mp, uint64_t flags, bool requeue, in deferred_unmount_enqueue() argument
2070 if ((mp->mnt_taskqueue_flags & MNT_DEFERRED) == 0 || requeue) { in deferred_unmount_enqueue()
2071 mp->mnt_taskqueue_flags = flags | MNT_DEFERRED; in deferred_unmount_enqueue()
2072 STAILQ_INSERT_TAIL(&deferred_unmount_list, mp, in deferred_unmount_enqueue()
2094 struct mount *mp, *tmp; in vfs_deferred_unmount() local
2104 STAILQ_FOREACH_SAFE(mp, &local_unmounts, mnt_taskqueue_link, tmp) { in vfs_deferred_unmount()
2105 flags = mp->mnt_taskqueue_flags; in vfs_deferred_unmount()
2108 error = dounmount(mp, flags, curthread); in vfs_deferred_unmount()
2110 MNT_ILOCK(mp); in vfs_deferred_unmount()
2111 unmounted = ((mp->mnt_kern_flag & MNTK_REFEXPIRE) != 0); in vfs_deferred_unmount()
2112 MNT_IUNLOCK(mp); in vfs_deferred_unmount()
2119 retries = (mp->mnt_unmount_retries)++; in vfs_deferred_unmount()
2122 deferred_unmount_enqueue(mp, flags, true, in vfs_deferred_unmount()
2128 mp->mnt_stat.f_mntonname, retries, error); in vfs_deferred_unmount()
2130 vfs_rel(mp); in vfs_deferred_unmount()
2140 dounmount(struct mount *mp, uint64_t flags, struct thread *td) in dounmount() argument
2164 if (!deferred_unmount_enqueue(mp, flags, false, 0)) in dounmount()
2165 vfs_rel(mp); in dounmount()
2179 error = vfs_suser(mp, td); in dounmount()
2183 vfs_rel(mp); in dounmount()
2194 MNT_ILOCK(mp); in dounmount()
2202 mp->mnt_kern_flag |= MNTK_RECURSE; in dounmount()
2203 mp->mnt_upper_pending++; in dounmount()
2204 TAILQ_FOREACH(upper, &mp->mnt_uppers, mnt_upper_link) { in dounmount()
2205 retries = upper->mp->mnt_unmount_retries; in dounmount()
2210 MNT_IUNLOCK(mp); in dounmount()
2212 vfs_ref(upper->mp); in dounmount()
2213 if (!deferred_unmount_enqueue(upper->mp, flags, in dounmount()
2215 vfs_rel(upper->mp); in dounmount()
2216 MNT_ILOCK(mp); in dounmount()
2218 mp->mnt_upper_pending--; in dounmount()
2219 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && in dounmount()
2220 mp->mnt_upper_pending == 0) { in dounmount()
2221 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; in dounmount()
2222 wakeup(&mp->mnt_uppers); in dounmount()
2232 while (error == 0 && !TAILQ_EMPTY(&mp->mnt_uppers)) { in dounmount()
2233 mp->mnt_kern_flag |= MNTK_TASKQUEUE_WAITER; in dounmount()
2234 error = msleep(&mp->mnt_taskqueue_link, in dounmount()
2235 MNT_MTX(mp), PCATCH, "umntqw", 0); in dounmount()
2238 MNT_REL(mp); in dounmount()
2239 MNT_IUNLOCK(mp); in dounmount()
2242 } else if (!TAILQ_EMPTY(&mp->mnt_uppers)) { in dounmount()
2243 MNT_IUNLOCK(mp); in dounmount()
2245 deferred_unmount_enqueue(mp, flags, true, 0); in dounmount()
2248 MNT_IUNLOCK(mp); in dounmount()
2249 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), ("mnt_uppers not empty")); in dounmount()
2254 vfs_ref(mp); in dounmount()
2256 if ((coveredvp = mp->mnt_vnodecovered) != NULL) { in dounmount()
2257 mnt_gen_r = mp->mnt_gen; in dounmount()
2262 * Check for mp being unmounted while waiting for the in dounmount()
2265 if (coveredvp->v_mountedhere != mp || in dounmount()
2269 vfs_rel(mp); in dounmount()
2274 vfs_op_enter(mp); in dounmount()
2276 vn_start_write(NULL, &mp, V_WAIT); in dounmount()
2277 MNT_ILOCK(mp); in dounmount()
2278 if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 || in dounmount()
2279 (mp->mnt_flag & MNT_UPDATE) != 0 || in dounmount()
2280 !TAILQ_EMPTY(&mp->mnt_uppers)) { in dounmount()
2281 dounmount_cleanup(mp, coveredvp, 0); in dounmount()
2284 mp->mnt_kern_flag |= MNTK_UNMOUNT; in dounmount()
2285 rootvp = vfs_cache_root_clear(mp); in dounmount()
2289 MNT_IUNLOCK(mp); in dounmount()
2290 error = vfs_check_usecounts(mp); in dounmount()
2291 MNT_ILOCK(mp); in dounmount()
2294 dounmount_cleanup(mp, coveredvp, MNTK_UNMOUNT); in dounmount()
2304 mp->mnt_kern_flag |= MNTK_UNMOUNTF; in dounmount()
2305 MNT_IUNLOCK(mp); in dounmount()
2310 VFS_PURGE(mp); in dounmount()
2311 MNT_ILOCK(mp); in dounmount()
2314 if (mp->mnt_lockref) { in dounmount()
2315 mp->mnt_kern_flag |= MNTK_DRAINING; in dounmount()
2316 error = msleep(&mp->mnt_lockref, MNT_MTX(mp), PVFS, in dounmount()
2319 MNT_IUNLOCK(mp); in dounmount()
2320 KASSERT(mp->mnt_lockref == 0, in dounmount()
2337 if (mp->mnt_flag & MNT_EXPUBLIC) in dounmount()
2340 vfs_periodic(mp, MNT_WAIT); in dounmount()
2341 MNT_ILOCK(mp); in dounmount()
2342 async_flag = mp->mnt_flag & MNT_ASYNC; in dounmount()
2343 mp->mnt_flag &= ~MNT_ASYNC; in dounmount()
2344 mp->mnt_kern_flag &= ~MNTK_ASYNC; in dounmount()
2345 MNT_IUNLOCK(mp); in dounmount()
2346 vfs_deallocate_syncvnode(mp); in dounmount()
2347 error = VFS_UNMOUNT(mp, flags); in dounmount()
2348 vn_finished_write(mp); in dounmount()
2349 vfs_rel(mp); in dounmount()
2357 MNT_ILOCK(mp); in dounmount()
2358 if ((mp->mnt_flag & MNT_RDONLY) == 0) { in dounmount()
2359 MNT_IUNLOCK(mp); in dounmount()
2360 vfs_allocate_syncvnode(mp); in dounmount()
2361 MNT_ILOCK(mp); in dounmount()
2363 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF); in dounmount()
2364 mp->mnt_flag |= async_flag; in dounmount()
2365 if ((mp->mnt_flag & MNT_ASYNC) != 0 && in dounmount()
2366 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0) in dounmount()
2367 mp->mnt_kern_flag |= MNTK_ASYNC; in dounmount()
2368 if (mp->mnt_kern_flag & MNTK_MWAIT) { in dounmount()
2369 mp->mnt_kern_flag &= ~MNTK_MWAIT; in dounmount()
2370 wakeup(mp); in dounmount()
2372 vfs_op_exit_locked(mp); in dounmount()
2373 MNT_IUNLOCK(mp); in dounmount()
2387 TAILQ_REMOVE(&mountlist, mp, mnt_list); in dounmount()
2389 EVENTHANDLER_DIRECT_INVOKE(vfs_unmounted, mp, td); in dounmount()
2399 mount_devctl_event("UNMOUNT", mp, false); in dounmount()
2405 if (rootvnode != NULL && mp == rootvnode->v_mount) { in dounmount()
2409 if (mp == rootdevmp) in dounmount()
2412 vfs_rel(mp); in dounmount()
2413 vfs_mount_destroy(mp); in dounmount()
2421 vfs_mount_error(struct mount *mp, const char *fmt, ...) in vfs_mount_error() argument
2423 struct vfsoptlist *moptlist = mp->mnt_optnew; in vfs_mount_error()
2751 __vfs_statfs(struct mount *mp, struct statfs *sbp) in __vfs_statfs() argument
2757 if (sbp != &mp->mnt_stat) in __vfs_statfs()
2758 memcpy(sbp, &mp->mnt_stat, sizeof(*sbp)); in __vfs_statfs()
2765 sbp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; in __vfs_statfs()
2766 sbp->f_nvnodelistsize = mp->mnt_nvnodelistsize; in __vfs_statfs()
2768 return (mp->mnt_op->vfs_statfs(mp, sbp)); in __vfs_statfs()
2772 vfs_mountedfrom(struct mount *mp, const char *from) in vfs_mountedfrom() argument
2775 bzero(mp->mnt_stat.f_mntfromname, sizeof mp->mnt_stat.f_mntfromname); in vfs_mountedfrom()
2776 strlcpy(mp->mnt_stat.f_mntfromname, from, in vfs_mountedfrom()
2777 sizeof mp->mnt_stat.f_mntfromname); in vfs_mountedfrom()
2965 mount_devctl_event(const char *type, struct mount *mp, bool donew) in mount_devctl_event() argument
2970 struct statfs *sfp = &mp->mnt_stat; in mount_devctl_event()
2989 if ((mp->mnt_flag & fp->o_opt) != 0) { in mount_devctl_event()
3020 vfs_remount_ro(struct mount *mp) in vfs_remount_ro() argument
3027 vfs_op_enter(mp); in vfs_remount_ro()
3028 KASSERT(mp->mnt_lockref > 0, in vfs_remount_ro()
3029 ("vfs_remount_ro: mp %p is not busied", mp)); in vfs_remount_ro()
3030 KASSERT((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0, in vfs_remount_ro()
3031 ("vfs_remount_ro: mp %p is being unmounted (and busy?)", mp)); in vfs_remount_ro()
3034 vp_covered = mp->mnt_vnodecovered; in vfs_remount_ro()
3037 vfs_op_exit(mp); in vfs_remount_ro()
3044 vfs_op_exit(mp); in vfs_remount_ro()
3051 MNT_ILOCK(mp); in vfs_remount_ro()
3052 if ((mp->mnt_flag & MNT_RDONLY) != 0) { in vfs_remount_ro()
3053 MNT_IUNLOCK(mp); in vfs_remount_ro()
3057 mp->mnt_flag |= MNT_UPDATE | MNT_FORCE | MNT_RDONLY; in vfs_remount_ro()
3058 rootvp = vfs_cache_root_clear(mp); in vfs_remount_ro()
3059 MNT_IUNLOCK(mp); in vfs_remount_ro()
3067 vfs_mergeopts(opts, mp->mnt_opt); in vfs_remount_ro()
3068 mp->mnt_optnew = opts; in vfs_remount_ro()
3070 error = VFS_MOUNT(mp); in vfs_remount_ro()
3073 MNT_ILOCK(mp); in vfs_remount_ro()
3074 mp->mnt_flag &= ~(MNT_UPDATE | MNT_FORCE); in vfs_remount_ro()
3075 MNT_IUNLOCK(mp); in vfs_remount_ro()
3076 vfs_deallocate_syncvnode(mp); in vfs_remount_ro()
3077 if (mp->mnt_opt != NULL) in vfs_remount_ro()
3078 vfs_freeopts(mp->mnt_opt); in vfs_remount_ro()
3079 mp->mnt_opt = mp->mnt_optnew; in vfs_remount_ro()
3081 MNT_ILOCK(mp); in vfs_remount_ro()
3082 mp->mnt_flag &= ~(MNT_UPDATE | MNT_FORCE | MNT_RDONLY); in vfs_remount_ro()
3083 MNT_IUNLOCK(mp); in vfs_remount_ro()
3084 vfs_freeopts(mp->mnt_optnew); in vfs_remount_ro()
3086 mp->mnt_optnew = NULL; in vfs_remount_ro()
3089 vfs_op_exit(mp); in vfs_remount_ro()
3114 struct mount *mp; in suspend_all_fs() local
3118 TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) { in suspend_all_fs()
3119 error = vfs_busy(mp, MBF_MNTLSTLOCK | MBF_NOWAIT); in suspend_all_fs()
3122 if ((mp->mnt_flag & (MNT_RDONLY | MNT_LOCAL)) != MNT_LOCAL || in suspend_all_fs()
3123 (mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { in suspend_all_fs()
3125 vfs_unbusy(mp); in suspend_all_fs()
3128 error = vfs_write_suspend(mp, 0); in suspend_all_fs()
3130 MNT_ILOCK(mp); in suspend_all_fs()
3131 MPASS((mp->mnt_kern_flag & MNTK_SUSPEND_ALL) == 0); in suspend_all_fs()
3132 mp->mnt_kern_flag |= MNTK_SUSPEND_ALL; in suspend_all_fs()
3133 MNT_IUNLOCK(mp); in suspend_all_fs()
3137 mp->mnt_stat.f_mntonname, error); in suspend_all_fs()
3139 vfs_unbusy(mp); in suspend_all_fs()
3183 struct mount *mp; in resume_all_fs() local
3186 TAILQ_FOREACH(mp, &mountlist, mnt_list) { in resume_all_fs()
3187 if ((mp->mnt_kern_flag & MNTK_SUSPEND_ALL) == 0) in resume_all_fs()
3190 MNT_ILOCK(mp); in resume_all_fs()
3191 MPASS((mp->mnt_kern_flag & MNTK_SUSPEND) != 0); in resume_all_fs()
3192 mp->mnt_kern_flag &= ~MNTK_SUSPEND_ALL; in resume_all_fs()
3193 MNT_IUNLOCK(mp); in resume_all_fs()
3194 vfs_write_resume(mp, 0); in resume_all_fs()
3196 vfs_unbusy(mp); in resume_all_fs()