Lines Matching full:sb

42 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who);
53 static inline void __super_lock(struct super_block *sb, bool excl) in __super_lock() argument
56 down_write(&sb->s_umount); in __super_lock()
58 down_read(&sb->s_umount); in __super_lock()
61 static inline void super_unlock(struct super_block *sb, bool excl) in super_unlock() argument
64 up_write(&sb->s_umount); in super_unlock()
66 up_read(&sb->s_umount); in super_unlock()
69 static inline void __super_lock_excl(struct super_block *sb) in __super_lock_excl() argument
71 __super_lock(sb, true); in __super_lock_excl()
74 static inline void super_unlock_excl(struct super_block *sb) in super_unlock_excl() argument
76 super_unlock(sb, true); in super_unlock_excl()
79 static inline void super_unlock_shared(struct super_block *sb) in super_unlock_shared() argument
81 super_unlock(sb, false); in super_unlock_shared()
84 static bool super_flags(const struct super_block *sb, unsigned int flags) in super_flags() argument
90 return smp_load_acquire(&sb->s_flags) & flags; in super_flags()
95 * @sb: superblock to wait for
103 * The caller must have acquired a temporary reference on @sb->s_count.
109 static __must_check bool super_lock(struct super_block *sb, bool excl) in super_lock() argument
111 lockdep_assert_not_held(&sb->s_umount); in super_lock()
114 wait_var_event(&sb->s_flags, super_flags(sb, SB_BORN | SB_DYING)); in super_lock()
117 if (super_flags(sb, SB_DYING)) in super_lock()
120 __super_lock(sb, excl); in super_lock()
124 * @sb->s_root is NULL and @sb->s_active is 0. No one needs to in super_lock()
127 if (sb->s_flags & SB_DYING) { in super_lock()
128 super_unlock(sb, excl); in super_lock()
132 WARN_ON_ONCE(!(sb->s_flags & SB_BORN)); in super_lock()
136 /* wait and try to acquire read-side of @sb->s_umount */
137 static inline bool super_lock_shared(struct super_block *sb) in super_lock_shared() argument
139 return super_lock(sb, false); in super_lock_shared()
142 /* wait and try to acquire write-side of @sb->s_umount */
143 static inline bool super_lock_excl(struct super_block *sb) in super_lock_excl() argument
145 return super_lock(sb, true); in super_lock_excl()
150 static void super_wake(struct super_block *sb, unsigned int flag) in super_wake() argument
160 smp_store_release(&sb->s_flags, sb->s_flags | flag); in super_wake()
167 wake_up_var(&sb->s_flags); in super_wake()
171 * One thing we have to be careful of with a per-sb shrinker is that we don't
180 struct super_block *sb; in super_cache_scan() local
187 sb = shrink->private_data; in super_cache_scan()
196 if (!super_trylock_shared(sb)) in super_cache_scan()
199 if (sb->s_op->nr_cached_objects) in super_cache_scan()
200 fs_objects = sb->s_op->nr_cached_objects(sb, sc); in super_cache_scan()
202 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc); in super_cache_scan()
203 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc); in super_cache_scan()
221 freed = prune_dcache_sb(sb, sc); in super_cache_scan()
223 freed += prune_icache_sb(sb, sc); in super_cache_scan()
227 freed += sb->s_op->free_cached_objects(sb, sc); in super_cache_scan()
230 super_unlock_shared(sb); in super_cache_scan()
237 struct super_block *sb; in super_cache_count() local
240 sb = shrink->private_data; in super_cache_count()
256 if (!(sb->s_flags & SB_BORN)) in super_cache_count()
260 if (sb->s_op && sb->s_op->nr_cached_objects) in super_cache_count()
261 total_objects = sb->s_op->nr_cached_objects(sb, sc); in super_cache_count()
263 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc); in super_cache_count()
264 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc); in super_cache_count()
332 * When it cannot find a suitable sb, it allocates a new in alloc_super()
379 "sb-%s", type->name); in alloc_super()
417 * @sb: superblock in question
422 void put_super(struct super_block *sb) in put_super() argument
425 __put_super(sb); in put_super()
429 static void kill_super_notify(struct super_block *sb) in kill_super_notify() argument
431 lockdep_assert_not_held(&sb->s_umount); in kill_super_notify()
434 if (sb->s_flags & SB_DEAD) in kill_super_notify()
445 hlist_del_init(&sb->s_instances); in kill_super_notify()
450 * We don't need @sb->s_umount here as every concurrent caller in kill_super_notify()
454 super_wake(sb, SB_DEAD); in kill_super_notify()
514 * @sb: superblock to acquire
519 * sb->kill() and be marked as SB_DEAD.
524 static bool grab_super(struct super_block *sb) in grab_super() argument
528 sb->s_count++; in grab_super()
530 locked = super_lock_excl(sb); in grab_super()
532 if (atomic_inc_not_zero(&sb->s_active)) { in grab_super()
533 put_super(sb); in grab_super()
536 super_unlock_excl(sb); in grab_super()
538 wait_var_event(&sb->s_flags, super_flags(sb, SB_DEAD)); in grab_super()
539 put_super(sb); in grab_super()
545 * @sb: reference we are trying to grab
560 bool super_trylock_shared(struct super_block *sb) in super_trylock_shared() argument
562 if (down_read_trylock(&sb->s_umount)) { in super_trylock_shared()
563 if (!(sb->s_flags & SB_DYING) && sb->s_root && in super_trylock_shared()
564 (sb->s_flags & SB_BORN)) in super_trylock_shared()
566 super_unlock_shared(sb); in super_trylock_shared()
574 * @sb: superblock to retire
588 void retire_super(struct super_block *sb) in retire_super() argument
590 WARN_ON(!sb->s_bdev); in retire_super()
591 __super_lock_excl(sb); in retire_super()
592 if (sb->s_iflags & SB_I_PERSB_BDI) { in retire_super()
593 bdi_unregister(sb->s_bdi); in retire_super()
594 sb->s_iflags &= ~SB_I_PERSB_BDI; in retire_super()
596 sb->s_iflags |= SB_I_RETIRED; in retire_super()
597 super_unlock_excl(sb); in retire_super()
603 * @sb: superblock to kill
615 void generic_shutdown_super(struct super_block *sb) in generic_shutdown_super() argument
617 const struct super_operations *sop = sb->s_op; in generic_shutdown_super()
619 if (sb->s_root) { in generic_shutdown_super()
620 shrink_dcache_for_umount(sb); in generic_shutdown_super()
621 sync_filesystem(sb); in generic_shutdown_super()
622 sb->s_flags &= ~SB_ACTIVE; in generic_shutdown_super()
624 cgroup_writeback_umount(sb); in generic_shutdown_super()
627 evict_inodes(sb); in generic_shutdown_super()
633 fsnotify_sb_delete(sb); in generic_shutdown_super()
634 security_sb_delete(sb); in generic_shutdown_super()
636 if (sb->s_dio_done_wq) { in generic_shutdown_super()
637 destroy_workqueue(sb->s_dio_done_wq); in generic_shutdown_super()
638 sb->s_dio_done_wq = NULL; in generic_shutdown_super()
642 sop->put_super(sb); in generic_shutdown_super()
648 fscrypt_destroy_keyring(sb); in generic_shutdown_super()
650 if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes), in generic_shutdown_super()
652 sb->s_id, sb->s_type->name)) { in generic_shutdown_super()
660 spin_lock(&sb->s_inode_list_lock); in generic_shutdown_super()
661 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { in generic_shutdown_super()
666 spin_unlock(&sb->s_inode_list_lock); in generic_shutdown_super()
676 * sget{_fc}() until we passed sb->kill_sb(). in generic_shutdown_super()
678 super_wake(sb, SB_DYING); in generic_shutdown_super()
679 super_unlock_excl(sb); in generic_shutdown_super()
680 if (sb->s_bdi != &noop_backing_dev_info) { in generic_shutdown_super()
681 if (sb->s_iflags & SB_I_PERSB_BDI) in generic_shutdown_super()
682 bdi_unregister(sb->s_bdi); in generic_shutdown_super()
683 bdi_put(sb->s_bdi); in generic_shutdown_super()
684 sb->s_bdi = &noop_backing_dev_info; in generic_shutdown_super()
875 void drop_super(struct super_block *sb) in drop_super() argument
877 super_unlock_shared(sb); in drop_super()
878 put_super(sb); in drop_super()
883 void drop_super_exclusive(struct super_block *sb) in drop_super_exclusive() argument
885 super_unlock_excl(sb); in drop_super_exclusive()
886 put_super(sb); in drop_super_exclusive()
892 struct super_block *sb, *p = NULL; in __iterate_supers() local
895 list_for_each_entry(sb, &super_blocks, s_list) { in __iterate_supers()
896 if (super_flags(sb, SB_DYING)) in __iterate_supers()
898 sb->s_count++; in __iterate_supers()
901 f(sb); in __iterate_supers()
906 p = sb; in __iterate_supers()
922 struct super_block *sb, *p = NULL; in iterate_supers() local
925 list_for_each_entry(sb, &super_blocks, s_list) { in iterate_supers()
928 sb->s_count++; in iterate_supers()
931 locked = super_lock_shared(sb); in iterate_supers()
933 if (sb->s_root) in iterate_supers()
934 f(sb, arg); in iterate_supers()
935 super_unlock_shared(sb); in iterate_supers()
941 p = sb; in iterate_supers()
960 struct super_block *sb, *p = NULL; in iterate_supers_type() local
963 hlist_for_each_entry(sb, &type->fs_supers, s_instances) { in iterate_supers_type()
966 sb->s_count++; in iterate_supers_type()
969 locked = super_lock_shared(sb); in iterate_supers_type()
971 if (sb->s_root) in iterate_supers_type()
972 f(sb, arg); in iterate_supers_type()
973 super_unlock_shared(sb); in iterate_supers_type()
979 p = sb; in iterate_supers_type()
990 struct super_block *sb; in user_get_super() local
993 list_for_each_entry(sb, &super_blocks, s_list) { in user_get_super()
994 if (sb->s_dev == dev) { in user_get_super()
997 sb->s_count++; in user_get_super()
1000 locked = super_lock(sb, excl); in user_get_super()
1002 if (sb->s_root) in user_get_super()
1003 return sb; in user_get_super()
1004 super_unlock(sb, excl); in user_get_super()
1008 __put_super(sb); in user_get_super()
1024 struct super_block *sb = fc->root->d_sb; in reconfigure_super() local
1032 if (sb->s_writers.frozen != SB_UNFROZEN) in reconfigure_super()
1035 retval = security_sb_remount(sb, fc->security); in reconfigure_super()
1041 if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev && in reconfigure_super()
1042 bdev_read_only(sb->s_bdev)) in reconfigure_super()
1045 remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb); in reconfigure_super()
1046 remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb); in reconfigure_super()
1050 if (!hlist_empty(&sb->s_pins)) { in reconfigure_super()
1051 super_unlock_excl(sb); in reconfigure_super()
1052 group_pin_kill(&sb->s_pins); in reconfigure_super()
1053 __super_lock_excl(sb); in reconfigure_super()
1054 if (!sb->s_root) in reconfigure_super()
1056 if (sb->s_writers.frozen != SB_UNFROZEN) in reconfigure_super()
1058 remount_ro = !sb_rdonly(sb); in reconfigure_super()
1061 shrink_dcache_sb(sb); in reconfigure_super()
1063 /* If we are reconfiguring to RDONLY and current sb is read/write, in reconfigure_super()
1068 sb_start_ro_state_change(sb); in reconfigure_super()
1070 retval = sb_prepare_remount_readonly(sb); in reconfigure_super()
1079 sb_start_ro_state_change(sb); in reconfigure_super()
1089 sb->s_type->name, retval); in reconfigure_super()
1093 WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) | in reconfigure_super()
1095 sb_end_ro_state_change(sb); in reconfigure_super()
1105 if (remount_ro && sb->s_bdev) in reconfigure_super()
1106 invalidate_bdev(sb->s_bdev); in reconfigure_super()
1110 sb_end_ro_state_change(sb); in reconfigure_super()
1114 static void do_emergency_remount_callback(struct super_block *sb) in do_emergency_remount_callback() argument
1116 bool locked = super_lock_excl(sb); in do_emergency_remount_callback()
1118 if (locked && sb->s_root && sb->s_bdev && !sb_rdonly(sb)) { in do_emergency_remount_callback()
1121 fc = fs_context_for_reconfigure(sb->s_root, in do_emergency_remount_callback()
1130 super_unlock_excl(sb); in do_emergency_remount_callback()
1151 static void do_thaw_all_callback(struct super_block *sb) in do_thaw_all_callback() argument
1153 bool locked = super_lock_excl(sb); in do_thaw_all_callback()
1155 if (locked && sb->s_root) { in do_thaw_all_callback()
1157 while (sb->s_bdev && !bdev_thaw(sb->s_bdev)) in do_thaw_all_callback()
1158 pr_warn("Emergency Thaw on %pg\n", sb->s_bdev); in do_thaw_all_callback()
1159 thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE); in do_thaw_all_callback()
1163 super_unlock_excl(sb); in do_thaw_all_callback()
1234 void kill_anon_super(struct super_block *sb) in kill_anon_super() argument
1236 dev_t dev = sb->s_dev; in kill_anon_super()
1237 generic_shutdown_super(sb); in kill_anon_super()
1238 kill_super_notify(sb); in kill_anon_super()
1243 void kill_litter_super(struct super_block *sb) in kill_litter_super() argument
1245 if (sb->s_root) in kill_litter_super()
1246 d_genocide(sb->s_root); in kill_litter_super()
1247 kill_anon_super(sb); in kill_litter_super()
1251 int set_anon_super_fc(struct super_block *sb, struct fs_context *fc) in set_anon_super_fc() argument
1253 return set_anon_super(sb, NULL); in set_anon_super_fc()
1257 static int test_keyed_super(struct super_block *sb, struct fs_context *fc) in test_keyed_super() argument
1259 return sb->s_fs_info == fc->s_fs_info; in test_keyed_super()
1269 int (*fill_super)(struct super_block *sb, in vfs_get_super() argument
1272 struct super_block *sb; in vfs_get_super() local
1275 sb = sget_fc(fc, test, set_anon_super_fc); in vfs_get_super()
1276 if (IS_ERR(sb)) in vfs_get_super()
1277 return PTR_ERR(sb); in vfs_get_super()
1279 if (!sb->s_root) { in vfs_get_super()
1280 err = fill_super(sb, fc); in vfs_get_super()
1284 sb->s_flags |= SB_ACTIVE; in vfs_get_super()
1287 fc->root = dget(sb->s_root); in vfs_get_super()
1291 deactivate_locked_super(sb); in vfs_get_super()
1296 int (*fill_super)(struct super_block *sb, in get_tree_nodev() argument
1304 int (*fill_super)(struct super_block *sb, in get_tree_single() argument
1312 int (*fill_super)(struct super_block *sb, in get_tree_keyed() argument
1376 struct super_block *sb = bdev->bd_holder; in bdev_super_lock() local
1380 lockdep_assert_not_held(&sb->s_umount); in bdev_super_lock()
1383 /* Make sure sb doesn't go away from under us */ in bdev_super_lock()
1385 sb->s_count++; in bdev_super_lock()
1390 locked = super_lock(sb, excl); in bdev_super_lock()
1396 put_super(sb); in bdev_super_lock()
1401 if (!sb->s_root || !(sb->s_flags & SB_ACTIVE)) { in bdev_super_lock()
1402 super_unlock(sb, excl); in bdev_super_lock()
1406 return sb; in bdev_super_lock()
1411 struct super_block *sb; in fs_bdev_mark_dead() local
1413 sb = bdev_super_lock(bdev, false); in fs_bdev_mark_dead()
1414 if (!sb) in fs_bdev_mark_dead()
1418 sync_filesystem(sb); in fs_bdev_mark_dead()
1419 shrink_dcache_sb(sb); in fs_bdev_mark_dead()
1420 invalidate_inodes(sb); in fs_bdev_mark_dead()
1421 if (sb->s_op->shutdown) in fs_bdev_mark_dead()
1422 sb->s_op->shutdown(sb); in fs_bdev_mark_dead()
1424 super_unlock_shared(sb); in fs_bdev_mark_dead()
1429 struct super_block *sb; in fs_bdev_sync() local
1431 sb = bdev_super_lock(bdev, false); in fs_bdev_sync()
1432 if (!sb) in fs_bdev_sync()
1435 sync_filesystem(sb); in fs_bdev_sync()
1436 super_unlock_shared(sb); in fs_bdev_sync()
1442 struct super_block *sb; in get_bdev_super() local
1444 sb = bdev_super_lock(bdev, true); in get_bdev_super()
1445 if (sb) { in get_bdev_super()
1446 active = atomic_inc_not_zero(&sb->s_active); in get_bdev_super()
1447 super_unlock_excl(sb); in get_bdev_super()
1451 return sb; in get_bdev_super()
1471 struct super_block *sb; in fs_bdev_freeze() local
1476 sb = get_bdev_super(bdev); in fs_bdev_freeze()
1477 if (!sb) in fs_bdev_freeze()
1480 if (sb->s_op->freeze_super) in fs_bdev_freeze()
1481 error = sb->s_op->freeze_super(sb, in fs_bdev_freeze()
1484 error = freeze_super(sb, in fs_bdev_freeze()
1488 deactivate_super(sb); in fs_bdev_freeze()
1511 struct super_block *sb; in fs_bdev_thaw() local
1525 sb = get_bdev_super(bdev); in fs_bdev_thaw()
1526 if (!sb) in fs_bdev_thaw()
1529 if (sb->s_op->thaw_super) in fs_bdev_thaw()
1530 error = sb->s_op->thaw_super(sb, in fs_bdev_thaw()
1533 error = thaw_super(sb, in fs_bdev_thaw()
1535 deactivate_super(sb); in fs_bdev_thaw()
1547 int setup_bdev_super(struct super_block *sb, int sb_flags, in setup_bdev_super() argument
1554 bdev_file = bdev_file_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops); in setup_bdev_super()
1583 sb->s_bdev_file = bdev_file; in setup_bdev_super()
1584 sb->s_bdev = bdev; in setup_bdev_super()
1585 sb->s_bdi = bdi_get(bdev->bd_disk->bdi); in setup_bdev_super()
1587 sb->s_iflags |= SB_I_STABLE_WRITES; in setup_bdev_super()
1590 snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev); in setup_bdev_super()
1591 shrinker_debugfs_rename(sb->s_shrink, "sb-%s:%s", sb->s_type->name, in setup_bdev_super()
1592 sb->s_id); in setup_bdev_super()
1593 sb_set_blocksize(sb, block_size(bdev)); in setup_bdev_super()
1605 int (*fill_super)(struct super_block *sb, in get_tree_bdev_flags() argument
1706 void kill_block_super(struct super_block *sb) in kill_block_super() argument
1708 struct block_device *bdev = sb->s_bdev; in kill_block_super()
1710 generic_shutdown_super(sb); in kill_block_super()
1713 bdev_fput(sb->s_bdev_file); in kill_block_super()
1805 struct super_block *sb; in vfs_get_tree() local
1827 sb = fc->root->d_sb; in vfs_get_tree()
1828 WARN_ON(!sb->s_bdi); in vfs_get_tree()
1837 super_wake(sb, SB_BORN); in vfs_get_tree()
1839 error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL); in vfs_get_tree()
1851 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to " in vfs_get_tree()
1852 "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes); in vfs_get_tree()
1862 int super_setup_bdi_name(struct super_block *sb, char *fmt, ...) in super_setup_bdi_name() argument
1879 WARN_ON(sb->s_bdi != &noop_backing_dev_info); in super_setup_bdi_name()
1880 sb->s_bdi = bdi; in super_setup_bdi_name()
1881 sb->s_iflags |= SB_I_PERSB_BDI; in super_setup_bdi_name()
1891 int super_setup_bdi(struct super_block *sb) in super_setup_bdi() argument
1895 return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name, in super_setup_bdi()
1902 * @sb: the super for which we wait
1908 static void sb_wait_write(struct super_block *sb, int level) in sb_wait_write() argument
1910 percpu_down_write(sb->s_writers.rw_sem + level-1); in sb_wait_write()
1917 static void lockdep_sb_freeze_release(struct super_block *sb) in lockdep_sb_freeze_release() argument
1922 percpu_rwsem_release(sb->s_writers.rw_sem + level, _THIS_IP_); in lockdep_sb_freeze_release()
1926 * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
1928 static void lockdep_sb_freeze_acquire(struct super_block *sb) in lockdep_sb_freeze_acquire() argument
1933 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_); in lockdep_sb_freeze_acquire()
1936 static void sb_freeze_unlock(struct super_block *sb, int level) in sb_freeze_unlock() argument
1939 percpu_up_write(sb->s_writers.rw_sem + level); in sb_freeze_unlock()
1942 static int wait_for_partially_frozen(struct super_block *sb) in wait_for_partially_frozen() argument
1947 unsigned short old = sb->s_writers.frozen; in wait_for_partially_frozen()
1949 up_write(&sb->s_umount); in wait_for_partially_frozen()
1950 ret = wait_var_event_killable(&sb->s_writers.frozen, in wait_for_partially_frozen()
1951 sb->s_writers.frozen != old); in wait_for_partially_frozen()
1952 down_write(&sb->s_umount); in wait_for_partially_frozen()
1954 sb->s_writers.frozen != SB_UNFROZEN && in wait_for_partially_frozen()
1955 sb->s_writers.frozen != SB_FREEZE_COMPLETE); in wait_for_partially_frozen()
1963 static inline int freeze_inc(struct super_block *sb, enum freeze_holder who) in freeze_inc() argument
1969 ++sb->s_writers.freeze_kcount; in freeze_inc()
1971 ++sb->s_writers.freeze_ucount; in freeze_inc()
1972 return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount; in freeze_inc()
1975 static inline int freeze_dec(struct super_block *sb, enum freeze_holder who) in freeze_dec() argument
1980 if ((who & FREEZE_HOLDER_KERNEL) && sb->s_writers.freeze_kcount) in freeze_dec()
1981 --sb->s_writers.freeze_kcount; in freeze_dec()
1982 if ((who & FREEZE_HOLDER_USERSPACE) && sb->s_writers.freeze_ucount) in freeze_dec()
1983 --sb->s_writers.freeze_ucount; in freeze_dec()
1984 return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount; in freeze_dec()
1987 static inline bool may_freeze(struct super_block *sb, enum freeze_holder who) in may_freeze() argument
1994 sb->s_writers.freeze_kcount == 0; in may_freeze()
1997 sb->s_writers.freeze_ucount == 0; in may_freeze()
2003 * @sb: the super to lock
2028 * During this function, sb->s_writers.frozen goes through these values:
2051 * sb->s_writers.frozen is protected by sb->s_umount.
2056 int freeze_super(struct super_block *sb, enum freeze_holder who) in freeze_super() argument
2060 if (!super_lock_excl(sb)) { in freeze_super()
2064 atomic_inc(&sb->s_active); in freeze_super()
2067 if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) { in freeze_super()
2068 if (may_freeze(sb, who)) in freeze_super()
2069 ret = !!WARN_ON_ONCE(freeze_inc(sb, who) == 1); in freeze_super()
2073 deactivate_locked_super(sb); in freeze_super()
2077 if (sb->s_writers.frozen != SB_UNFROZEN) { in freeze_super()
2078 ret = wait_for_partially_frozen(sb); in freeze_super()
2080 deactivate_locked_super(sb); in freeze_super()
2087 if (sb_rdonly(sb)) { in freeze_super()
2089 WARN_ON_ONCE(freeze_inc(sb, who) > 1); in freeze_super()
2090 sb->s_writers.frozen = SB_FREEZE_COMPLETE; in freeze_super()
2091 wake_up_var(&sb->s_writers.frozen); in freeze_super()
2092 super_unlock_excl(sb); in freeze_super()
2096 sb->s_writers.frozen = SB_FREEZE_WRITE; in freeze_super()
2098 super_unlock_excl(sb); in freeze_super()
2099 sb_wait_write(sb, SB_FREEZE_WRITE); in freeze_super()
2100 __super_lock_excl(sb); in freeze_super()
2103 sb->s_writers.frozen = SB_FREEZE_PAGEFAULT; in freeze_super()
2104 sb_wait_write(sb, SB_FREEZE_PAGEFAULT); in freeze_super()
2107 ret = sync_filesystem(sb); in freeze_super()
2109 sb->s_writers.frozen = SB_UNFROZEN; in freeze_super()
2110 sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT); in freeze_super()
2111 wake_up_var(&sb->s_writers.frozen); in freeze_super()
2112 deactivate_locked_super(sb); in freeze_super()
2117 sb->s_writers.frozen = SB_FREEZE_FS; in freeze_super()
2118 sb_wait_write(sb, SB_FREEZE_FS); in freeze_super()
2120 if (sb->s_op->freeze_fs) { in freeze_super()
2121 ret = sb->s_op->freeze_fs(sb); in freeze_super()
2125 sb->s_writers.frozen = SB_UNFROZEN; in freeze_super()
2126 sb_freeze_unlock(sb, SB_FREEZE_FS); in freeze_super()
2127 wake_up_var(&sb->s_writers.frozen); in freeze_super()
2128 deactivate_locked_super(sb); in freeze_super()
2136 WARN_ON_ONCE(freeze_inc(sb, who) > 1); in freeze_super()
2137 sb->s_writers.frozen = SB_FREEZE_COMPLETE; in freeze_super()
2138 wake_up_var(&sb->s_writers.frozen); in freeze_super()
2139 lockdep_sb_freeze_release(sb); in freeze_super()
2140 super_unlock_excl(sb); in freeze_super()
2151 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who) in thaw_super_locked() argument
2155 if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) in thaw_super_locked()
2162 if (freeze_dec(sb, who)) in thaw_super_locked()
2165 if (sb_rdonly(sb)) { in thaw_super_locked()
2166 sb->s_writers.frozen = SB_UNFROZEN; in thaw_super_locked()
2167 wake_up_var(&sb->s_writers.frozen); in thaw_super_locked()
2171 lockdep_sb_freeze_acquire(sb); in thaw_super_locked()
2173 if (sb->s_op->unfreeze_fs) { in thaw_super_locked()
2174 error = sb->s_op->unfreeze_fs(sb); in thaw_super_locked()
2177 freeze_inc(sb, who); in thaw_super_locked()
2178 lockdep_sb_freeze_release(sb); in thaw_super_locked()
2183 sb->s_writers.frozen = SB_UNFROZEN; in thaw_super_locked()
2184 wake_up_var(&sb->s_writers.frozen); in thaw_super_locked()
2185 sb_freeze_unlock(sb, SB_FREEZE_FS); in thaw_super_locked()
2187 deactivate_locked_super(sb); in thaw_super_locked()
2191 super_unlock_excl(sb); in thaw_super_locked()
2197 * @sb: the super to thaw
2212 int thaw_super(struct super_block *sb, enum freeze_holder who) in thaw_super() argument
2214 if (!super_lock_excl(sb)) { in thaw_super()
2218 return thaw_super_locked(sb, who); in thaw_super()
2228 int sb_init_dio_done_wq(struct super_block *sb) in sb_init_dio_done_wq() argument
2233 sb->s_id); in sb_init_dio_done_wq()
2239 old = cmpxchg(&sb->s_dio_done_wq, NULL, wq); in sb_init_dio_done_wq()