1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/super.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * super.c contains code to handle: - mount structures 8 * - super-block tables 9 * - filesystem drivers list 10 * - mount system call 11 * - umount system call 12 * - ustat system call 13 * 14 * GK 2/5/95 - Changed to support mounting the root fs via NFS 15 * 16 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall 17 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96 18 * Added options to /proc/mounts: 19 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996. 20 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998 21 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000 22 */ 23 24 #include <linux/export.h> 25 #include <linux/slab.h> 26 #include <linux/blkdev.h> 27 #include <linux/mount.h> 28 #include <linux/security.h> 29 #include <linux/writeback.h> /* for the emergency remount stuff */ 30 #include <linux/idr.h> 31 #include <linux/mutex.h> 32 #include <linux/backing-dev.h> 33 #include <linux/rculist_bl.h> 34 #include <linux/fscrypt.h> 35 #include <linux/fsnotify.h> 36 #include <linux/lockdep.h> 37 #include <linux/user_namespace.h> 38 #include <linux/fs_context.h> 39 #include <uapi/linux/mount.h> 40 #include "internal.h" 41 42 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who); 43 44 static LIST_HEAD(super_blocks); 45 static DEFINE_SPINLOCK(sb_lock); 46 47 static char *sb_writers_name[SB_FREEZE_LEVELS] = { 48 "sb_writers", 49 "sb_pagefaults", 50 "sb_internal", 51 }; 52 53 static inline void __super_lock(struct super_block *sb, bool excl) 54 { 55 if (excl) 56 down_write(&sb->s_umount); 57 else 58 down_read(&sb->s_umount); 59 } 60 61 static inline void super_unlock(struct super_block *sb, bool excl) 62 { 63 if (excl) 64 up_write(&sb->s_umount); 65 else 66 up_read(&sb->s_umount); 67 } 68 69 static inline void __super_lock_excl(struct super_block *sb) 70 { 71 __super_lock(sb, true); 72 } 73 74 static inline void super_unlock_excl(struct super_block *sb) 75 { 76 super_unlock(sb, true); 77 } 78 79 static inline void super_unlock_shared(struct super_block *sb) 80 { 81 super_unlock(sb, false); 82 } 83 84 static inline bool wait_born(struct super_block *sb) 85 { 86 unsigned int flags; 87 88 /* 89 * Pairs with smp_store_release() in super_wake() and ensures 90 * that we see SB_BORN or SB_DYING after we're woken. 91 */ 92 flags = smp_load_acquire(&sb->s_flags); 93 return flags & (SB_BORN | SB_DYING); 94 } 95 96 /** 97 * super_lock - wait for superblock to become ready and lock it 98 * @sb: superblock to wait for 99 * @excl: whether exclusive access is required 100 * 101 * If the superblock has neither passed through vfs_get_tree() or 102 * generic_shutdown_super() yet wait for it to happen. Either superblock 103 * creation will succeed and SB_BORN is set by vfs_get_tree() or we're 104 * woken and we'll see SB_DYING. 105 * 106 * The caller must have acquired a temporary reference on @sb->s_count. 107 * 108 * Return: This returns true if SB_BORN was set, false if SB_DYING was 109 * set. The function acquires s_umount and returns with it held. 110 */ 111 static __must_check bool super_lock(struct super_block *sb, bool excl) 112 { 113 114 lockdep_assert_not_held(&sb->s_umount); 115 116 relock: 117 __super_lock(sb, excl); 118 119 /* 120 * Has gone through generic_shutdown_super() in the meantime. 121 * @sb->s_root is NULL and @sb->s_active is 0. No one needs to 122 * grab a reference to this. Tell them so. 123 */ 124 if (sb->s_flags & SB_DYING) 125 return false; 126 127 /* Has called ->get_tree() successfully. */ 128 if (sb->s_flags & SB_BORN) 129 return true; 130 131 super_unlock(sb, excl); 132 133 /* wait until the superblock is ready or dying */ 134 wait_var_event(&sb->s_flags, wait_born(sb)); 135 136 /* 137 * Neither SB_BORN nor SB_DYING are ever unset so we never loop. 138 * Just reacquire @sb->s_umount for the caller. 139 */ 140 goto relock; 141 } 142 143 /* wait and acquire read-side of @sb->s_umount */ 144 static inline bool super_lock_shared(struct super_block *sb) 145 { 146 return super_lock(sb, false); 147 } 148 149 /* wait and acquire write-side of @sb->s_umount */ 150 static inline bool super_lock_excl(struct super_block *sb) 151 { 152 return super_lock(sb, true); 153 } 154 155 /* wake waiters */ 156 #define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING | SB_DEAD) 157 static void super_wake(struct super_block *sb, unsigned int flag) 158 { 159 WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS)); 160 WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1); 161 162 /* 163 * Pairs with smp_load_acquire() in super_lock() to make sure 164 * all initializations in the superblock are seen by the user 165 * seeing SB_BORN sent. 166 */ 167 smp_store_release(&sb->s_flags, sb->s_flags | flag); 168 /* 169 * Pairs with the barrier in prepare_to_wait_event() to make sure 170 * ___wait_var_event() either sees SB_BORN set or 171 * waitqueue_active() check in wake_up_var() sees the waiter. 172 */ 173 smp_mb(); 174 wake_up_var(&sb->s_flags); 175 } 176 177 /* 178 * One thing we have to be careful of with a per-sb shrinker is that we don't 179 * drop the last active reference to the superblock from within the shrinker. 180 * If that happens we could trigger unregistering the shrinker from within the 181 * shrinker path and that leads to deadlock on the shrinker_mutex. Hence we 182 * take a passive reference to the superblock to avoid this from occurring. 183 */ 184 static unsigned long super_cache_scan(struct shrinker *shrink, 185 struct shrink_control *sc) 186 { 187 struct super_block *sb; 188 long fs_objects = 0; 189 long total_objects; 190 long freed = 0; 191 long dentries; 192 long inodes; 193 194 sb = shrink->private_data; 195 196 /* 197 * Deadlock avoidance. We may hold various FS locks, and we don't want 198 * to recurse into the FS that called us in clear_inode() and friends.. 199 */ 200 if (!(sc->gfp_mask & __GFP_FS)) 201 return SHRINK_STOP; 202 203 if (!super_trylock_shared(sb)) 204 return SHRINK_STOP; 205 206 if (sb->s_op->nr_cached_objects) 207 fs_objects = sb->s_op->nr_cached_objects(sb, sc); 208 209 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc); 210 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc); 211 total_objects = dentries + inodes + fs_objects + 1; 212 if (!total_objects) 213 total_objects = 1; 214 215 /* proportion the scan between the caches */ 216 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects); 217 inodes = mult_frac(sc->nr_to_scan, inodes, total_objects); 218 fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects); 219 220 /* 221 * prune the dcache first as the icache is pinned by it, then 222 * prune the icache, followed by the filesystem specific caches 223 * 224 * Ensure that we always scan at least one object - memcg kmem 225 * accounting uses this to fully empty the caches. 226 */ 227 sc->nr_to_scan = dentries + 1; 228 freed = prune_dcache_sb(sb, sc); 229 sc->nr_to_scan = inodes + 1; 230 freed += prune_icache_sb(sb, sc); 231 232 if (fs_objects) { 233 sc->nr_to_scan = fs_objects + 1; 234 freed += sb->s_op->free_cached_objects(sb, sc); 235 } 236 237 super_unlock_shared(sb); 238 return freed; 239 } 240 241 static unsigned long super_cache_count(struct shrinker *shrink, 242 struct shrink_control *sc) 243 { 244 struct super_block *sb; 245 long total_objects = 0; 246 247 sb = shrink->private_data; 248 249 /* 250 * We don't call super_trylock_shared() here as it is a scalability 251 * bottleneck, so we're exposed to partial setup state. The shrinker 252 * rwsem does not protect filesystem operations backing 253 * list_lru_shrink_count() or s_op->nr_cached_objects(). Counts can 254 * change between super_cache_count and super_cache_scan, so we really 255 * don't need locks here. 256 * 257 * However, if we are currently mounting the superblock, the underlying 258 * filesystem might be in a state of partial construction and hence it 259 * is dangerous to access it. super_trylock_shared() uses a SB_BORN check 260 * to avoid this situation, so do the same here. The memory barrier is 261 * matched with the one in mount_fs() as we don't hold locks here. 262 */ 263 if (!(sb->s_flags & SB_BORN)) 264 return 0; 265 smp_rmb(); 266 267 if (sb->s_op && sb->s_op->nr_cached_objects) 268 total_objects = sb->s_op->nr_cached_objects(sb, sc); 269 270 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc); 271 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc); 272 273 if (!total_objects) 274 return SHRINK_EMPTY; 275 276 total_objects = vfs_pressure_ratio(total_objects); 277 return total_objects; 278 } 279 280 static void destroy_super_work(struct work_struct *work) 281 { 282 struct super_block *s = container_of(work, struct super_block, 283 destroy_work); 284 int i; 285 286 for (i = 0; i < SB_FREEZE_LEVELS; i++) 287 percpu_free_rwsem(&s->s_writers.rw_sem[i]); 288 kfree(s); 289 } 290 291 static void destroy_super_rcu(struct rcu_head *head) 292 { 293 struct super_block *s = container_of(head, struct super_block, rcu); 294 INIT_WORK(&s->destroy_work, destroy_super_work); 295 schedule_work(&s->destroy_work); 296 } 297 298 /* Free a superblock that has never been seen by anyone */ 299 static void destroy_unused_super(struct super_block *s) 300 { 301 if (!s) 302 return; 303 super_unlock_excl(s); 304 list_lru_destroy(&s->s_dentry_lru); 305 list_lru_destroy(&s->s_inode_lru); 306 security_sb_free(s); 307 put_user_ns(s->s_user_ns); 308 kfree(s->s_subtype); 309 shrinker_free(s->s_shrink); 310 /* no delays needed */ 311 destroy_super_work(&s->destroy_work); 312 } 313 314 /** 315 * alloc_super - create new superblock 316 * @type: filesystem type superblock should belong to 317 * @flags: the mount flags 318 * @user_ns: User namespace for the super_block 319 * 320 * Allocates and initializes a new &struct super_block. alloc_super() 321 * returns a pointer new superblock or %NULL if allocation had failed. 322 */ 323 static struct super_block *alloc_super(struct file_system_type *type, int flags, 324 struct user_namespace *user_ns) 325 { 326 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER); 327 static const struct super_operations default_op; 328 int i; 329 330 if (!s) 331 return NULL; 332 333 INIT_LIST_HEAD(&s->s_mounts); 334 s->s_user_ns = get_user_ns(user_ns); 335 init_rwsem(&s->s_umount); 336 lockdep_set_class(&s->s_umount, &type->s_umount_key); 337 /* 338 * sget() can have s_umount recursion. 339 * 340 * When it cannot find a suitable sb, it allocates a new 341 * one (this one), and tries again to find a suitable old 342 * one. 343 * 344 * In case that succeeds, it will acquire the s_umount 345 * lock of the old one. Since these are clearly distrinct 346 * locks, and this object isn't exposed yet, there's no 347 * risk of deadlocks. 348 * 349 * Annotate this by putting this lock in a different 350 * subclass. 351 */ 352 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING); 353 354 if (security_sb_alloc(s)) 355 goto fail; 356 357 for (i = 0; i < SB_FREEZE_LEVELS; i++) { 358 if (__percpu_init_rwsem(&s->s_writers.rw_sem[i], 359 sb_writers_name[i], 360 &type->s_writers_key[i])) 361 goto fail; 362 } 363 s->s_bdi = &noop_backing_dev_info; 364 s->s_flags = flags; 365 if (s->s_user_ns != &init_user_ns) 366 s->s_iflags |= SB_I_NODEV; 367 INIT_HLIST_NODE(&s->s_instances); 368 INIT_HLIST_BL_HEAD(&s->s_roots); 369 mutex_init(&s->s_sync_lock); 370 INIT_LIST_HEAD(&s->s_inodes); 371 spin_lock_init(&s->s_inode_list_lock); 372 INIT_LIST_HEAD(&s->s_inodes_wb); 373 spin_lock_init(&s->s_inode_wblist_lock); 374 375 s->s_count = 1; 376 atomic_set(&s->s_active, 1); 377 mutex_init(&s->s_vfs_rename_mutex); 378 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); 379 init_rwsem(&s->s_dquot.dqio_sem); 380 s->s_maxbytes = MAX_NON_LFS; 381 s->s_op = &default_op; 382 s->s_time_gran = 1000000000; 383 s->s_time_min = TIME64_MIN; 384 s->s_time_max = TIME64_MAX; 385 386 s->s_shrink = shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, 387 "sb-%s", type->name); 388 if (!s->s_shrink) 389 goto fail; 390 391 s->s_shrink->scan_objects = super_cache_scan; 392 s->s_shrink->count_objects = super_cache_count; 393 s->s_shrink->batch = 1024; 394 s->s_shrink->private_data = s; 395 396 if (list_lru_init_memcg(&s->s_dentry_lru, s->s_shrink)) 397 goto fail; 398 if (list_lru_init_memcg(&s->s_inode_lru, s->s_shrink)) 399 goto fail; 400 return s; 401 402 fail: 403 destroy_unused_super(s); 404 return NULL; 405 } 406 407 /* Superblock refcounting */ 408 409 /* 410 * Drop a superblock's refcount. The caller must hold sb_lock. 411 */ 412 static void __put_super(struct super_block *s) 413 { 414 if (!--s->s_count) { 415 list_del_init(&s->s_list); 416 WARN_ON(s->s_dentry_lru.node); 417 WARN_ON(s->s_inode_lru.node); 418 WARN_ON(!list_empty(&s->s_mounts)); 419 security_sb_free(s); 420 put_user_ns(s->s_user_ns); 421 kfree(s->s_subtype); 422 call_rcu(&s->rcu, destroy_super_rcu); 423 } 424 } 425 426 /** 427 * put_super - drop a temporary reference to superblock 428 * @sb: superblock in question 429 * 430 * Drops a temporary reference, frees superblock if there's no 431 * references left. 432 */ 433 void put_super(struct super_block *sb) 434 { 435 spin_lock(&sb_lock); 436 __put_super(sb); 437 spin_unlock(&sb_lock); 438 } 439 440 static void kill_super_notify(struct super_block *sb) 441 { 442 lockdep_assert_not_held(&sb->s_umount); 443 444 /* already notified earlier */ 445 if (sb->s_flags & SB_DEAD) 446 return; 447 448 /* 449 * Remove it from @fs_supers so it isn't found by new 450 * sget{_fc}() walkers anymore. Any concurrent mounter still 451 * managing to grab a temporary reference is guaranteed to 452 * already see SB_DYING and will wait until we notify them about 453 * SB_DEAD. 454 */ 455 spin_lock(&sb_lock); 456 hlist_del_init(&sb->s_instances); 457 spin_unlock(&sb_lock); 458 459 /* 460 * Let concurrent mounts know that this thing is really dead. 461 * We don't need @sb->s_umount here as every concurrent caller 462 * will see SB_DYING and either discard the superblock or wait 463 * for SB_DEAD. 464 */ 465 super_wake(sb, SB_DEAD); 466 } 467 468 /** 469 * deactivate_locked_super - drop an active reference to superblock 470 * @s: superblock to deactivate 471 * 472 * Drops an active reference to superblock, converting it into a temporary 473 * one if there is no other active references left. In that case we 474 * tell fs driver to shut it down and drop the temporary reference we 475 * had just acquired. 476 * 477 * Caller holds exclusive lock on superblock; that lock is released. 478 */ 479 void deactivate_locked_super(struct super_block *s) 480 { 481 struct file_system_type *fs = s->s_type; 482 if (atomic_dec_and_test(&s->s_active)) { 483 shrinker_free(s->s_shrink); 484 fs->kill_sb(s); 485 486 kill_super_notify(s); 487 488 /* 489 * Since list_lru_destroy() may sleep, we cannot call it from 490 * put_super(), where we hold the sb_lock. Therefore we destroy 491 * the lru lists right now. 492 */ 493 list_lru_destroy(&s->s_dentry_lru); 494 list_lru_destroy(&s->s_inode_lru); 495 496 put_filesystem(fs); 497 put_super(s); 498 } else { 499 super_unlock_excl(s); 500 } 501 } 502 503 EXPORT_SYMBOL(deactivate_locked_super); 504 505 /** 506 * deactivate_super - drop an active reference to superblock 507 * @s: superblock to deactivate 508 * 509 * Variant of deactivate_locked_super(), except that superblock is *not* 510 * locked by caller. If we are going to drop the final active reference, 511 * lock will be acquired prior to that. 512 */ 513 void deactivate_super(struct super_block *s) 514 { 515 if (!atomic_add_unless(&s->s_active, -1, 1)) { 516 __super_lock_excl(s); 517 deactivate_locked_super(s); 518 } 519 } 520 521 EXPORT_SYMBOL(deactivate_super); 522 523 /** 524 * grab_super - acquire an active reference 525 * @s: reference we are trying to make active 526 * 527 * Tries to acquire an active reference. grab_super() is used when we 528 * had just found a superblock in super_blocks or fs_type->fs_supers 529 * and want to turn it into a full-blown active reference. grab_super() 530 * is called with sb_lock held and drops it. Returns 1 in case of 531 * success, 0 if we had failed (superblock contents was already dead or 532 * dying when grab_super() had been called). Note that this is only 533 * called for superblocks not in rundown mode (== ones still on ->fs_supers 534 * of their type), so increment of ->s_count is OK here. 535 */ 536 static int grab_super(struct super_block *s) __releases(sb_lock) 537 { 538 bool born; 539 540 s->s_count++; 541 spin_unlock(&sb_lock); 542 born = super_lock_excl(s); 543 if (born && atomic_inc_not_zero(&s->s_active)) { 544 put_super(s); 545 return 1; 546 } 547 super_unlock_excl(s); 548 put_super(s); 549 return 0; 550 } 551 552 static inline bool wait_dead(struct super_block *sb) 553 { 554 unsigned int flags; 555 556 /* 557 * Pairs with memory barrier in super_wake() and ensures 558 * that we see SB_DEAD after we're woken. 559 */ 560 flags = smp_load_acquire(&sb->s_flags); 561 return flags & SB_DEAD; 562 } 563 564 /** 565 * grab_super_dead - acquire an active reference to a superblock 566 * @sb: superblock to acquire 567 * 568 * Acquire a temporary reference on a superblock and try to trade it for 569 * an active reference. This is used in sget{_fc}() to wait for a 570 * superblock to either become SB_BORN or for it to pass through 571 * sb->kill() and be marked as SB_DEAD. 572 * 573 * Return: This returns true if an active reference could be acquired, 574 * false if not. 575 */ 576 static bool grab_super_dead(struct super_block *sb) 577 { 578 579 sb->s_count++; 580 if (grab_super(sb)) { 581 put_super(sb); 582 lockdep_assert_held(&sb->s_umount); 583 return true; 584 } 585 wait_var_event(&sb->s_flags, wait_dead(sb)); 586 lockdep_assert_not_held(&sb->s_umount); 587 put_super(sb); 588 return false; 589 } 590 591 /* 592 * super_trylock_shared - try to grab ->s_umount shared 593 * @sb: reference we are trying to grab 594 * 595 * Try to prevent fs shutdown. This is used in places where we 596 * cannot take an active reference but we need to ensure that the 597 * filesystem is not shut down while we are working on it. It returns 598 * false if we cannot acquire s_umount or if we lose the race and 599 * filesystem already got into shutdown, and returns true with the s_umount 600 * lock held in read mode in case of success. On successful return, 601 * the caller must drop the s_umount lock when done. 602 * 603 * Note that unlike get_super() et.al. this one does *not* bump ->s_count. 604 * The reason why it's safe is that we are OK with doing trylock instead 605 * of down_read(). There's a couple of places that are OK with that, but 606 * it's very much not a general-purpose interface. 607 */ 608 bool super_trylock_shared(struct super_block *sb) 609 { 610 if (down_read_trylock(&sb->s_umount)) { 611 if (!(sb->s_flags & SB_DYING) && sb->s_root && 612 (sb->s_flags & SB_BORN)) 613 return true; 614 super_unlock_shared(sb); 615 } 616 617 return false; 618 } 619 620 /** 621 * retire_super - prevents superblock from being reused 622 * @sb: superblock to retire 623 * 624 * The function marks superblock to be ignored in superblock test, which 625 * prevents it from being reused for any new mounts. If the superblock has 626 * a private bdi, it also unregisters it, but doesn't reduce the refcount 627 * of the superblock to prevent potential races. The refcount is reduced 628 * by generic_shutdown_super(). The function can not be called 629 * concurrently with generic_shutdown_super(). It is safe to call the 630 * function multiple times, subsequent calls have no effect. 631 * 632 * The marker will affect the re-use only for block-device-based 633 * superblocks. Other superblocks will still get marked if this function 634 * is used, but that will not affect their reusability. 635 */ 636 void retire_super(struct super_block *sb) 637 { 638 WARN_ON(!sb->s_bdev); 639 __super_lock_excl(sb); 640 if (sb->s_iflags & SB_I_PERSB_BDI) { 641 bdi_unregister(sb->s_bdi); 642 sb->s_iflags &= ~SB_I_PERSB_BDI; 643 } 644 sb->s_iflags |= SB_I_RETIRED; 645 super_unlock_excl(sb); 646 } 647 EXPORT_SYMBOL(retire_super); 648 649 /** 650 * generic_shutdown_super - common helper for ->kill_sb() 651 * @sb: superblock to kill 652 * 653 * generic_shutdown_super() does all fs-independent work on superblock 654 * shutdown. Typical ->kill_sb() should pick all fs-specific objects 655 * that need destruction out of superblock, call generic_shutdown_super() 656 * and release aforementioned objects. Note: dentries and inodes _are_ 657 * taken care of and do not need specific handling. 658 * 659 * Upon calling this function, the filesystem may no longer alter or 660 * rearrange the set of dentries belonging to this super_block, nor may it 661 * change the attachments of dentries to inodes. 662 */ 663 void generic_shutdown_super(struct super_block *sb) 664 { 665 const struct super_operations *sop = sb->s_op; 666 667 if (sb->s_root) { 668 shrink_dcache_for_umount(sb); 669 sync_filesystem(sb); 670 sb->s_flags &= ~SB_ACTIVE; 671 672 cgroup_writeback_umount(); 673 674 /* Evict all inodes with zero refcount. */ 675 evict_inodes(sb); 676 677 /* 678 * Clean up and evict any inodes that still have references due 679 * to fsnotify or the security policy. 680 */ 681 fsnotify_sb_delete(sb); 682 security_sb_delete(sb); 683 684 /* 685 * Now that all potentially-encrypted inodes have been evicted, 686 * the fscrypt keyring can be destroyed. 687 */ 688 fscrypt_destroy_keyring(sb); 689 690 if (sb->s_dio_done_wq) { 691 destroy_workqueue(sb->s_dio_done_wq); 692 sb->s_dio_done_wq = NULL; 693 } 694 695 if (sop->put_super) 696 sop->put_super(sb); 697 698 if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes), 699 "VFS: Busy inodes after unmount of %s (%s)", 700 sb->s_id, sb->s_type->name)) { 701 /* 702 * Adding a proper bailout path here would be hard, but 703 * we can at least make it more likely that a later 704 * iput_final() or such crashes cleanly. 705 */ 706 struct inode *inode; 707 708 spin_lock(&sb->s_inode_list_lock); 709 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 710 inode->i_op = VFS_PTR_POISON; 711 inode->i_sb = VFS_PTR_POISON; 712 inode->i_mapping = VFS_PTR_POISON; 713 } 714 spin_unlock(&sb->s_inode_list_lock); 715 } 716 } 717 /* 718 * Broadcast to everyone that grabbed a temporary reference to this 719 * superblock before we removed it from @fs_supers that the superblock 720 * is dying. Every walker of @fs_supers outside of sget{_fc}() will now 721 * discard this superblock and treat it as dead. 722 * 723 * We leave the superblock on @fs_supers so it can be found by 724 * sget{_fc}() until we passed sb->kill_sb(). 725 */ 726 super_wake(sb, SB_DYING); 727 super_unlock_excl(sb); 728 if (sb->s_bdi != &noop_backing_dev_info) { 729 if (sb->s_iflags & SB_I_PERSB_BDI) 730 bdi_unregister(sb->s_bdi); 731 bdi_put(sb->s_bdi); 732 sb->s_bdi = &noop_backing_dev_info; 733 } 734 } 735 736 EXPORT_SYMBOL(generic_shutdown_super); 737 738 bool mount_capable(struct fs_context *fc) 739 { 740 if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) 741 return capable(CAP_SYS_ADMIN); 742 else 743 return ns_capable(fc->user_ns, CAP_SYS_ADMIN); 744 } 745 746 /** 747 * sget_fc - Find or create a superblock 748 * @fc: Filesystem context. 749 * @test: Comparison callback 750 * @set: Setup callback 751 * 752 * Create a new superblock or find an existing one. 753 * 754 * The @test callback is used to find a matching existing superblock. 755 * Whether or not the requested parameters in @fc are taken into account 756 * is specific to the @test callback that is used. They may even be 757 * completely ignored. 758 * 759 * If an extant superblock is matched, it will be returned unless: 760 * 761 * (1) the namespace the filesystem context @fc and the extant 762 * superblock's namespace differ 763 * 764 * (2) the filesystem context @fc has requested that reusing an extant 765 * superblock is not allowed 766 * 767 * In both cases EBUSY will be returned. 768 * 769 * If no match is made, a new superblock will be allocated and basic 770 * initialisation will be performed (s_type, s_fs_info and s_id will be 771 * set and the @set callback will be invoked), the superblock will be 772 * published and it will be returned in a partially constructed state 773 * with SB_BORN and SB_ACTIVE as yet unset. 774 * 775 * Return: On success, an extant or newly created superblock is 776 * returned. On failure an error pointer is returned. 777 */ 778 struct super_block *sget_fc(struct fs_context *fc, 779 int (*test)(struct super_block *, struct fs_context *), 780 int (*set)(struct super_block *, struct fs_context *)) 781 { 782 struct super_block *s = NULL; 783 struct super_block *old; 784 struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns; 785 int err; 786 787 retry: 788 spin_lock(&sb_lock); 789 if (test) { 790 hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) { 791 if (test(old, fc)) 792 goto share_extant_sb; 793 } 794 } 795 if (!s) { 796 spin_unlock(&sb_lock); 797 s = alloc_super(fc->fs_type, fc->sb_flags, user_ns); 798 if (!s) 799 return ERR_PTR(-ENOMEM); 800 goto retry; 801 } 802 803 s->s_fs_info = fc->s_fs_info; 804 err = set(s, fc); 805 if (err) { 806 s->s_fs_info = NULL; 807 spin_unlock(&sb_lock); 808 destroy_unused_super(s); 809 return ERR_PTR(err); 810 } 811 fc->s_fs_info = NULL; 812 s->s_type = fc->fs_type; 813 s->s_iflags |= fc->s_iflags; 814 strscpy(s->s_id, s->s_type->name, sizeof(s->s_id)); 815 /* 816 * Make the superblock visible on @super_blocks and @fs_supers. 817 * It's in a nascent state and users should wait on SB_BORN or 818 * SB_DYING to be set. 819 */ 820 list_add_tail(&s->s_list, &super_blocks); 821 hlist_add_head(&s->s_instances, &s->s_type->fs_supers); 822 spin_unlock(&sb_lock); 823 get_filesystem(s->s_type); 824 shrinker_register(s->s_shrink); 825 return s; 826 827 share_extant_sb: 828 if (user_ns != old->s_user_ns || fc->exclusive) { 829 spin_unlock(&sb_lock); 830 destroy_unused_super(s); 831 if (fc->exclusive) 832 warnfc(fc, "reusing existing filesystem not allowed"); 833 else 834 warnfc(fc, "reusing existing filesystem in another namespace not allowed"); 835 return ERR_PTR(-EBUSY); 836 } 837 if (!grab_super_dead(old)) 838 goto retry; 839 destroy_unused_super(s); 840 return old; 841 } 842 EXPORT_SYMBOL(sget_fc); 843 844 /** 845 * sget - find or create a superblock 846 * @type: filesystem type superblock should belong to 847 * @test: comparison callback 848 * @set: setup callback 849 * @flags: mount flags 850 * @data: argument to each of them 851 */ 852 struct super_block *sget(struct file_system_type *type, 853 int (*test)(struct super_block *,void *), 854 int (*set)(struct super_block *,void *), 855 int flags, 856 void *data) 857 { 858 struct user_namespace *user_ns = current_user_ns(); 859 struct super_block *s = NULL; 860 struct super_block *old; 861 int err; 862 863 /* We don't yet pass the user namespace of the parent 864 * mount through to here so always use &init_user_ns 865 * until that changes. 866 */ 867 if (flags & SB_SUBMOUNT) 868 user_ns = &init_user_ns; 869 870 retry: 871 spin_lock(&sb_lock); 872 if (test) { 873 hlist_for_each_entry(old, &type->fs_supers, s_instances) { 874 if (!test(old, data)) 875 continue; 876 if (user_ns != old->s_user_ns) { 877 spin_unlock(&sb_lock); 878 destroy_unused_super(s); 879 return ERR_PTR(-EBUSY); 880 } 881 if (!grab_super_dead(old)) 882 goto retry; 883 destroy_unused_super(s); 884 return old; 885 } 886 } 887 if (!s) { 888 spin_unlock(&sb_lock); 889 s = alloc_super(type, (flags & ~SB_SUBMOUNT), user_ns); 890 if (!s) 891 return ERR_PTR(-ENOMEM); 892 goto retry; 893 } 894 895 err = set(s, data); 896 if (err) { 897 spin_unlock(&sb_lock); 898 destroy_unused_super(s); 899 return ERR_PTR(err); 900 } 901 s->s_type = type; 902 strscpy(s->s_id, type->name, sizeof(s->s_id)); 903 list_add_tail(&s->s_list, &super_blocks); 904 hlist_add_head(&s->s_instances, &type->fs_supers); 905 spin_unlock(&sb_lock); 906 get_filesystem(type); 907 shrinker_register(s->s_shrink); 908 return s; 909 } 910 EXPORT_SYMBOL(sget); 911 912 void drop_super(struct super_block *sb) 913 { 914 super_unlock_shared(sb); 915 put_super(sb); 916 } 917 918 EXPORT_SYMBOL(drop_super); 919 920 void drop_super_exclusive(struct super_block *sb) 921 { 922 super_unlock_excl(sb); 923 put_super(sb); 924 } 925 EXPORT_SYMBOL(drop_super_exclusive); 926 927 static void __iterate_supers(void (*f)(struct super_block *)) 928 { 929 struct super_block *sb, *p = NULL; 930 931 spin_lock(&sb_lock); 932 list_for_each_entry(sb, &super_blocks, s_list) { 933 /* Pairs with memory marrier in super_wake(). */ 934 if (smp_load_acquire(&sb->s_flags) & SB_DYING) 935 continue; 936 sb->s_count++; 937 spin_unlock(&sb_lock); 938 939 f(sb); 940 941 spin_lock(&sb_lock); 942 if (p) 943 __put_super(p); 944 p = sb; 945 } 946 if (p) 947 __put_super(p); 948 spin_unlock(&sb_lock); 949 } 950 /** 951 * iterate_supers - call function for all active superblocks 952 * @f: function to call 953 * @arg: argument to pass to it 954 * 955 * Scans the superblock list and calls given function, passing it 956 * locked superblock and given argument. 957 */ 958 void iterate_supers(void (*f)(struct super_block *, void *), void *arg) 959 { 960 struct super_block *sb, *p = NULL; 961 962 spin_lock(&sb_lock); 963 list_for_each_entry(sb, &super_blocks, s_list) { 964 bool born; 965 966 sb->s_count++; 967 spin_unlock(&sb_lock); 968 969 born = super_lock_shared(sb); 970 if (born && sb->s_root) 971 f(sb, arg); 972 super_unlock_shared(sb); 973 974 spin_lock(&sb_lock); 975 if (p) 976 __put_super(p); 977 p = sb; 978 } 979 if (p) 980 __put_super(p); 981 spin_unlock(&sb_lock); 982 } 983 984 /** 985 * iterate_supers_type - call function for superblocks of given type 986 * @type: fs type 987 * @f: function to call 988 * @arg: argument to pass to it 989 * 990 * Scans the superblock list and calls given function, passing it 991 * locked superblock and given argument. 992 */ 993 void iterate_supers_type(struct file_system_type *type, 994 void (*f)(struct super_block *, void *), void *arg) 995 { 996 struct super_block *sb, *p = NULL; 997 998 spin_lock(&sb_lock); 999 hlist_for_each_entry(sb, &type->fs_supers, s_instances) { 1000 bool born; 1001 1002 sb->s_count++; 1003 spin_unlock(&sb_lock); 1004 1005 born = super_lock_shared(sb); 1006 if (born && sb->s_root) 1007 f(sb, arg); 1008 super_unlock_shared(sb); 1009 1010 spin_lock(&sb_lock); 1011 if (p) 1012 __put_super(p); 1013 p = sb; 1014 } 1015 if (p) 1016 __put_super(p); 1017 spin_unlock(&sb_lock); 1018 } 1019 1020 EXPORT_SYMBOL(iterate_supers_type); 1021 1022 /** 1023 * get_active_super - get an active reference to the superblock of a device 1024 * @bdev: device to get the superblock for 1025 * 1026 * Scans the superblock list and finds the superblock of the file system 1027 * mounted on the device given. Returns the superblock with an active 1028 * reference or %NULL if none was found. 1029 */ 1030 struct super_block *get_active_super(struct block_device *bdev) 1031 { 1032 struct super_block *sb; 1033 1034 if (!bdev) 1035 return NULL; 1036 1037 spin_lock(&sb_lock); 1038 list_for_each_entry(sb, &super_blocks, s_list) { 1039 if (sb->s_bdev == bdev) { 1040 if (!grab_super(sb)) 1041 return NULL; 1042 super_unlock_excl(sb); 1043 return sb; 1044 } 1045 } 1046 spin_unlock(&sb_lock); 1047 return NULL; 1048 } 1049 1050 struct super_block *user_get_super(dev_t dev, bool excl) 1051 { 1052 struct super_block *sb; 1053 1054 spin_lock(&sb_lock); 1055 list_for_each_entry(sb, &super_blocks, s_list) { 1056 if (sb->s_dev == dev) { 1057 bool born; 1058 1059 sb->s_count++; 1060 spin_unlock(&sb_lock); 1061 /* still alive? */ 1062 born = super_lock(sb, excl); 1063 if (born && sb->s_root) 1064 return sb; 1065 super_unlock(sb, excl); 1066 /* nope, got unmounted */ 1067 spin_lock(&sb_lock); 1068 __put_super(sb); 1069 break; 1070 } 1071 } 1072 spin_unlock(&sb_lock); 1073 return NULL; 1074 } 1075 1076 /** 1077 * reconfigure_super - asks filesystem to change superblock parameters 1078 * @fc: The superblock and configuration 1079 * 1080 * Alters the configuration parameters of a live superblock. 1081 */ 1082 int reconfigure_super(struct fs_context *fc) 1083 { 1084 struct super_block *sb = fc->root->d_sb; 1085 int retval; 1086 bool remount_ro = false; 1087 bool remount_rw = false; 1088 bool force = fc->sb_flags & SB_FORCE; 1089 1090 if (fc->sb_flags_mask & ~MS_RMT_MASK) 1091 return -EINVAL; 1092 if (sb->s_writers.frozen != SB_UNFROZEN) 1093 return -EBUSY; 1094 1095 retval = security_sb_remount(sb, fc->security); 1096 if (retval) 1097 return retval; 1098 1099 if (fc->sb_flags_mask & SB_RDONLY) { 1100 #ifdef CONFIG_BLOCK 1101 if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev && 1102 bdev_read_only(sb->s_bdev)) 1103 return -EACCES; 1104 #endif 1105 remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb); 1106 remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb); 1107 } 1108 1109 if (remount_ro) { 1110 if (!hlist_empty(&sb->s_pins)) { 1111 super_unlock_excl(sb); 1112 group_pin_kill(&sb->s_pins); 1113 __super_lock_excl(sb); 1114 if (!sb->s_root) 1115 return 0; 1116 if (sb->s_writers.frozen != SB_UNFROZEN) 1117 return -EBUSY; 1118 remount_ro = !sb_rdonly(sb); 1119 } 1120 } 1121 shrink_dcache_sb(sb); 1122 1123 /* If we are reconfiguring to RDONLY and current sb is read/write, 1124 * make sure there are no files open for writing. 1125 */ 1126 if (remount_ro) { 1127 if (force) { 1128 sb_start_ro_state_change(sb); 1129 } else { 1130 retval = sb_prepare_remount_readonly(sb); 1131 if (retval) 1132 return retval; 1133 } 1134 } else if (remount_rw) { 1135 /* 1136 * Protect filesystem's reconfigure code from writes from 1137 * userspace until reconfigure finishes. 1138 */ 1139 sb_start_ro_state_change(sb); 1140 } 1141 1142 if (fc->ops->reconfigure) { 1143 retval = fc->ops->reconfigure(fc); 1144 if (retval) { 1145 if (!force) 1146 goto cancel_readonly; 1147 /* If forced remount, go ahead despite any errors */ 1148 WARN(1, "forced remount of a %s fs returned %i\n", 1149 sb->s_type->name, retval); 1150 } 1151 } 1152 1153 WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) | 1154 (fc->sb_flags & fc->sb_flags_mask))); 1155 sb_end_ro_state_change(sb); 1156 1157 /* 1158 * Some filesystems modify their metadata via some other path than the 1159 * bdev buffer cache (eg. use a private mapping, or directories in 1160 * pagecache, etc). Also file data modifications go via their own 1161 * mappings. So If we try to mount readonly then copy the filesystem 1162 * from bdev, we could get stale data, so invalidate it to give a best 1163 * effort at coherency. 1164 */ 1165 if (remount_ro && sb->s_bdev) 1166 invalidate_bdev(sb->s_bdev); 1167 return 0; 1168 1169 cancel_readonly: 1170 sb_end_ro_state_change(sb); 1171 return retval; 1172 } 1173 1174 static void do_emergency_remount_callback(struct super_block *sb) 1175 { 1176 bool born = super_lock_excl(sb); 1177 1178 if (born && sb->s_root && sb->s_bdev && !sb_rdonly(sb)) { 1179 struct fs_context *fc; 1180 1181 fc = fs_context_for_reconfigure(sb->s_root, 1182 SB_RDONLY | SB_FORCE, SB_RDONLY); 1183 if (!IS_ERR(fc)) { 1184 if (parse_monolithic_mount_data(fc, NULL) == 0) 1185 (void)reconfigure_super(fc); 1186 put_fs_context(fc); 1187 } 1188 } 1189 super_unlock_excl(sb); 1190 } 1191 1192 static void do_emergency_remount(struct work_struct *work) 1193 { 1194 __iterate_supers(do_emergency_remount_callback); 1195 kfree(work); 1196 printk("Emergency Remount complete\n"); 1197 } 1198 1199 void emergency_remount(void) 1200 { 1201 struct work_struct *work; 1202 1203 work = kmalloc(sizeof(*work), GFP_ATOMIC); 1204 if (work) { 1205 INIT_WORK(work, do_emergency_remount); 1206 schedule_work(work); 1207 } 1208 } 1209 1210 static void do_thaw_all_callback(struct super_block *sb) 1211 { 1212 bool born = super_lock_excl(sb); 1213 1214 if (born && sb->s_root) { 1215 if (IS_ENABLED(CONFIG_BLOCK)) 1216 while (sb->s_bdev && !thaw_bdev(sb->s_bdev)) 1217 pr_warn("Emergency Thaw on %pg\n", sb->s_bdev); 1218 thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE); 1219 } else { 1220 super_unlock_excl(sb); 1221 } 1222 } 1223 1224 static void do_thaw_all(struct work_struct *work) 1225 { 1226 __iterate_supers(do_thaw_all_callback); 1227 kfree(work); 1228 printk(KERN_WARNING "Emergency Thaw complete\n"); 1229 } 1230 1231 /** 1232 * emergency_thaw_all -- forcibly thaw every frozen filesystem 1233 * 1234 * Used for emergency unfreeze of all filesystems via SysRq 1235 */ 1236 void emergency_thaw_all(void) 1237 { 1238 struct work_struct *work; 1239 1240 work = kmalloc(sizeof(*work), GFP_ATOMIC); 1241 if (work) { 1242 INIT_WORK(work, do_thaw_all); 1243 schedule_work(work); 1244 } 1245 } 1246 1247 static DEFINE_IDA(unnamed_dev_ida); 1248 1249 /** 1250 * get_anon_bdev - Allocate a block device for filesystems which don't have one. 1251 * @p: Pointer to a dev_t. 1252 * 1253 * Filesystems which don't use real block devices can call this function 1254 * to allocate a virtual block device. 1255 * 1256 * Context: Any context. Frequently called while holding sb_lock. 1257 * Return: 0 on success, -EMFILE if there are no anonymous bdevs left 1258 * or -ENOMEM if memory allocation failed. 1259 */ 1260 int get_anon_bdev(dev_t *p) 1261 { 1262 int dev; 1263 1264 /* 1265 * Many userspace utilities consider an FSID of 0 invalid. 1266 * Always return at least 1 from get_anon_bdev. 1267 */ 1268 dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1, 1269 GFP_ATOMIC); 1270 if (dev == -ENOSPC) 1271 dev = -EMFILE; 1272 if (dev < 0) 1273 return dev; 1274 1275 *p = MKDEV(0, dev); 1276 return 0; 1277 } 1278 EXPORT_SYMBOL(get_anon_bdev); 1279 1280 void free_anon_bdev(dev_t dev) 1281 { 1282 ida_free(&unnamed_dev_ida, MINOR(dev)); 1283 } 1284 EXPORT_SYMBOL(free_anon_bdev); 1285 1286 int set_anon_super(struct super_block *s, void *data) 1287 { 1288 return get_anon_bdev(&s->s_dev); 1289 } 1290 EXPORT_SYMBOL(set_anon_super); 1291 1292 void kill_anon_super(struct super_block *sb) 1293 { 1294 dev_t dev = sb->s_dev; 1295 generic_shutdown_super(sb); 1296 kill_super_notify(sb); 1297 free_anon_bdev(dev); 1298 } 1299 EXPORT_SYMBOL(kill_anon_super); 1300 1301 void kill_litter_super(struct super_block *sb) 1302 { 1303 if (sb->s_root) 1304 d_genocide(sb->s_root); 1305 kill_anon_super(sb); 1306 } 1307 EXPORT_SYMBOL(kill_litter_super); 1308 1309 int set_anon_super_fc(struct super_block *sb, struct fs_context *fc) 1310 { 1311 return set_anon_super(sb, NULL); 1312 } 1313 EXPORT_SYMBOL(set_anon_super_fc); 1314 1315 static int test_keyed_super(struct super_block *sb, struct fs_context *fc) 1316 { 1317 return sb->s_fs_info == fc->s_fs_info; 1318 } 1319 1320 static int test_single_super(struct super_block *s, struct fs_context *fc) 1321 { 1322 return 1; 1323 } 1324 1325 static int vfs_get_super(struct fs_context *fc, 1326 int (*test)(struct super_block *, struct fs_context *), 1327 int (*fill_super)(struct super_block *sb, 1328 struct fs_context *fc)) 1329 { 1330 struct super_block *sb; 1331 int err; 1332 1333 sb = sget_fc(fc, test, set_anon_super_fc); 1334 if (IS_ERR(sb)) 1335 return PTR_ERR(sb); 1336 1337 if (!sb->s_root) { 1338 err = fill_super(sb, fc); 1339 if (err) 1340 goto error; 1341 1342 sb->s_flags |= SB_ACTIVE; 1343 } 1344 1345 fc->root = dget(sb->s_root); 1346 return 0; 1347 1348 error: 1349 deactivate_locked_super(sb); 1350 return err; 1351 } 1352 1353 int get_tree_nodev(struct fs_context *fc, 1354 int (*fill_super)(struct super_block *sb, 1355 struct fs_context *fc)) 1356 { 1357 return vfs_get_super(fc, NULL, fill_super); 1358 } 1359 EXPORT_SYMBOL(get_tree_nodev); 1360 1361 int get_tree_single(struct fs_context *fc, 1362 int (*fill_super)(struct super_block *sb, 1363 struct fs_context *fc)) 1364 { 1365 return vfs_get_super(fc, test_single_super, fill_super); 1366 } 1367 EXPORT_SYMBOL(get_tree_single); 1368 1369 int get_tree_keyed(struct fs_context *fc, 1370 int (*fill_super)(struct super_block *sb, 1371 struct fs_context *fc), 1372 void *key) 1373 { 1374 fc->s_fs_info = key; 1375 return vfs_get_super(fc, test_keyed_super, fill_super); 1376 } 1377 EXPORT_SYMBOL(get_tree_keyed); 1378 1379 static int set_bdev_super(struct super_block *s, void *data) 1380 { 1381 s->s_dev = *(dev_t *)data; 1382 return 0; 1383 } 1384 1385 static int super_s_dev_set(struct super_block *s, struct fs_context *fc) 1386 { 1387 return set_bdev_super(s, fc->sget_key); 1388 } 1389 1390 static int super_s_dev_test(struct super_block *s, struct fs_context *fc) 1391 { 1392 return !(s->s_iflags & SB_I_RETIRED) && 1393 s->s_dev == *(dev_t *)fc->sget_key; 1394 } 1395 1396 /** 1397 * sget_dev - Find or create a superblock by device number 1398 * @fc: Filesystem context. 1399 * @dev: device number 1400 * 1401 * Find or create a superblock using the provided device number that 1402 * will be stored in fc->sget_key. 1403 * 1404 * If an extant superblock is matched, then that will be returned with 1405 * an elevated reference count that the caller must transfer or discard. 1406 * 1407 * If no match is made, a new superblock will be allocated and basic 1408 * initialisation will be performed (s_type, s_fs_info, s_id, s_dev will 1409 * be set). The superblock will be published and it will be returned in 1410 * a partially constructed state with SB_BORN and SB_ACTIVE as yet 1411 * unset. 1412 * 1413 * Return: an existing or newly created superblock on success, an error 1414 * pointer on failure. 1415 */ 1416 struct super_block *sget_dev(struct fs_context *fc, dev_t dev) 1417 { 1418 fc->sget_key = &dev; 1419 return sget_fc(fc, super_s_dev_test, super_s_dev_set); 1420 } 1421 EXPORT_SYMBOL(sget_dev); 1422 1423 #ifdef CONFIG_BLOCK 1424 /* 1425 * Lock the superblock that is holder of the bdev. Returns the superblock 1426 * pointer if we successfully locked the superblock and it is alive. Otherwise 1427 * we return NULL and just unlock bdev->bd_holder_lock. 1428 * 1429 * The function must be called with bdev->bd_holder_lock and releases it. 1430 */ 1431 static struct super_block *bdev_super_lock_shared(struct block_device *bdev) 1432 __releases(&bdev->bd_holder_lock) 1433 { 1434 struct super_block *sb = bdev->bd_holder; 1435 bool born; 1436 1437 lockdep_assert_held(&bdev->bd_holder_lock); 1438 lockdep_assert_not_held(&sb->s_umount); 1439 lockdep_assert_not_held(&bdev->bd_disk->open_mutex); 1440 1441 /* Make sure sb doesn't go away from under us */ 1442 spin_lock(&sb_lock); 1443 sb->s_count++; 1444 spin_unlock(&sb_lock); 1445 mutex_unlock(&bdev->bd_holder_lock); 1446 1447 born = super_lock_shared(sb); 1448 if (!born || !sb->s_root || !(sb->s_flags & SB_ACTIVE)) { 1449 super_unlock_shared(sb); 1450 put_super(sb); 1451 return NULL; 1452 } 1453 /* 1454 * The superblock is active and we hold s_umount, we can drop our 1455 * temporary reference now. 1456 */ 1457 put_super(sb); 1458 return sb; 1459 } 1460 1461 static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise) 1462 { 1463 struct super_block *sb; 1464 1465 sb = bdev_super_lock_shared(bdev); 1466 if (!sb) 1467 return; 1468 1469 if (!surprise) 1470 sync_filesystem(sb); 1471 shrink_dcache_sb(sb); 1472 invalidate_inodes(sb); 1473 if (sb->s_op->shutdown) 1474 sb->s_op->shutdown(sb); 1475 1476 super_unlock_shared(sb); 1477 } 1478 1479 static void fs_bdev_sync(struct block_device *bdev) 1480 { 1481 struct super_block *sb; 1482 1483 sb = bdev_super_lock_shared(bdev); 1484 if (!sb) 1485 return; 1486 sync_filesystem(sb); 1487 super_unlock_shared(sb); 1488 } 1489 1490 const struct blk_holder_ops fs_holder_ops = { 1491 .mark_dead = fs_bdev_mark_dead, 1492 .sync = fs_bdev_sync, 1493 }; 1494 EXPORT_SYMBOL_GPL(fs_holder_ops); 1495 1496 int setup_bdev_super(struct super_block *sb, int sb_flags, 1497 struct fs_context *fc) 1498 { 1499 blk_mode_t mode = sb_open_mode(sb_flags); 1500 struct bdev_handle *bdev_handle; 1501 struct block_device *bdev; 1502 1503 bdev_handle = bdev_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops); 1504 if (IS_ERR(bdev_handle)) { 1505 if (fc) 1506 errorf(fc, "%s: Can't open blockdev", fc->source); 1507 return PTR_ERR(bdev_handle); 1508 } 1509 bdev = bdev_handle->bdev; 1510 1511 /* 1512 * This really should be in blkdev_get_by_dev, but right now can't due 1513 * to legacy issues that require us to allow opening a block device node 1514 * writable from userspace even for a read-only block device. 1515 */ 1516 if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) { 1517 bdev_release(bdev_handle); 1518 return -EACCES; 1519 } 1520 1521 /* 1522 * Until SB_BORN flag is set, there can be no active superblock 1523 * references and thus no filesystem freezing. get_active_super() will 1524 * just loop waiting for SB_BORN so even freeze_bdev() cannot proceed. 1525 * 1526 * It is enough to check bdev was not frozen before we set s_bdev. 1527 */ 1528 mutex_lock(&bdev->bd_fsfreeze_mutex); 1529 if (bdev->bd_fsfreeze_count > 0) { 1530 mutex_unlock(&bdev->bd_fsfreeze_mutex); 1531 if (fc) 1532 warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev); 1533 bdev_release(bdev_handle); 1534 return -EBUSY; 1535 } 1536 spin_lock(&sb_lock); 1537 sb->s_bdev_handle = bdev_handle; 1538 sb->s_bdev = bdev; 1539 sb->s_bdi = bdi_get(bdev->bd_disk->bdi); 1540 if (bdev_stable_writes(bdev)) 1541 sb->s_iflags |= SB_I_STABLE_WRITES; 1542 spin_unlock(&sb_lock); 1543 mutex_unlock(&bdev->bd_fsfreeze_mutex); 1544 1545 snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev); 1546 shrinker_debugfs_rename(sb->s_shrink, "sb-%s:%s", sb->s_type->name, 1547 sb->s_id); 1548 sb_set_blocksize(sb, block_size(bdev)); 1549 return 0; 1550 } 1551 EXPORT_SYMBOL_GPL(setup_bdev_super); 1552 1553 /** 1554 * get_tree_bdev - Get a superblock based on a single block device 1555 * @fc: The filesystem context holding the parameters 1556 * @fill_super: Helper to initialise a new superblock 1557 */ 1558 int get_tree_bdev(struct fs_context *fc, 1559 int (*fill_super)(struct super_block *, 1560 struct fs_context *)) 1561 { 1562 struct super_block *s; 1563 int error = 0; 1564 dev_t dev; 1565 1566 if (!fc->source) 1567 return invalf(fc, "No source specified"); 1568 1569 error = lookup_bdev(fc->source, &dev); 1570 if (error) { 1571 errorf(fc, "%s: Can't lookup blockdev", fc->source); 1572 return error; 1573 } 1574 1575 fc->sb_flags |= SB_NOSEC; 1576 s = sget_dev(fc, dev); 1577 if (IS_ERR(s)) 1578 return PTR_ERR(s); 1579 1580 if (s->s_root) { 1581 /* Don't summarily change the RO/RW state. */ 1582 if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) { 1583 warnf(fc, "%pg: Can't mount, would change RO state", s->s_bdev); 1584 deactivate_locked_super(s); 1585 return -EBUSY; 1586 } 1587 } else { 1588 /* 1589 * We drop s_umount here because we need to open the bdev and 1590 * bdev->open_mutex ranks above s_umount (blkdev_put() -> 1591 * bdev_mark_dead()). It is safe because we have active sb 1592 * reference and SB_BORN is not set yet. 1593 */ 1594 super_unlock_excl(s); 1595 error = setup_bdev_super(s, fc->sb_flags, fc); 1596 __super_lock_excl(s); 1597 if (!error) 1598 error = fill_super(s, fc); 1599 if (error) { 1600 deactivate_locked_super(s); 1601 return error; 1602 } 1603 s->s_flags |= SB_ACTIVE; 1604 } 1605 1606 BUG_ON(fc->root); 1607 fc->root = dget(s->s_root); 1608 return 0; 1609 } 1610 EXPORT_SYMBOL(get_tree_bdev); 1611 1612 static int test_bdev_super(struct super_block *s, void *data) 1613 { 1614 return !(s->s_iflags & SB_I_RETIRED) && s->s_dev == *(dev_t *)data; 1615 } 1616 1617 struct dentry *mount_bdev(struct file_system_type *fs_type, 1618 int flags, const char *dev_name, void *data, 1619 int (*fill_super)(struct super_block *, void *, int)) 1620 { 1621 struct super_block *s; 1622 int error; 1623 dev_t dev; 1624 1625 error = lookup_bdev(dev_name, &dev); 1626 if (error) 1627 return ERR_PTR(error); 1628 1629 flags |= SB_NOSEC; 1630 s = sget(fs_type, test_bdev_super, set_bdev_super, flags, &dev); 1631 if (IS_ERR(s)) 1632 return ERR_CAST(s); 1633 1634 if (s->s_root) { 1635 if ((flags ^ s->s_flags) & SB_RDONLY) { 1636 deactivate_locked_super(s); 1637 return ERR_PTR(-EBUSY); 1638 } 1639 } else { 1640 /* 1641 * We drop s_umount here because we need to open the bdev and 1642 * bdev->open_mutex ranks above s_umount (blkdev_put() -> 1643 * bdev_mark_dead()). It is safe because we have active sb 1644 * reference and SB_BORN is not set yet. 1645 */ 1646 super_unlock_excl(s); 1647 error = setup_bdev_super(s, flags, NULL); 1648 __super_lock_excl(s); 1649 if (!error) 1650 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0); 1651 if (error) { 1652 deactivate_locked_super(s); 1653 return ERR_PTR(error); 1654 } 1655 1656 s->s_flags |= SB_ACTIVE; 1657 } 1658 1659 return dget(s->s_root); 1660 } 1661 EXPORT_SYMBOL(mount_bdev); 1662 1663 void kill_block_super(struct super_block *sb) 1664 { 1665 struct block_device *bdev = sb->s_bdev; 1666 1667 generic_shutdown_super(sb); 1668 if (bdev) { 1669 sync_blockdev(bdev); 1670 bdev_release(sb->s_bdev_handle); 1671 } 1672 } 1673 1674 EXPORT_SYMBOL(kill_block_super); 1675 #endif 1676 1677 struct dentry *mount_nodev(struct file_system_type *fs_type, 1678 int flags, void *data, 1679 int (*fill_super)(struct super_block *, void *, int)) 1680 { 1681 int error; 1682 struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL); 1683 1684 if (IS_ERR(s)) 1685 return ERR_CAST(s); 1686 1687 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0); 1688 if (error) { 1689 deactivate_locked_super(s); 1690 return ERR_PTR(error); 1691 } 1692 s->s_flags |= SB_ACTIVE; 1693 return dget(s->s_root); 1694 } 1695 EXPORT_SYMBOL(mount_nodev); 1696 1697 int reconfigure_single(struct super_block *s, 1698 int flags, void *data) 1699 { 1700 struct fs_context *fc; 1701 int ret; 1702 1703 /* The caller really need to be passing fc down into mount_single(), 1704 * then a chunk of this can be removed. [Bollocks -- AV] 1705 * Better yet, reconfiguration shouldn't happen, but rather the second 1706 * mount should be rejected if the parameters are not compatible. 1707 */ 1708 fc = fs_context_for_reconfigure(s->s_root, flags, MS_RMT_MASK); 1709 if (IS_ERR(fc)) 1710 return PTR_ERR(fc); 1711 1712 ret = parse_monolithic_mount_data(fc, data); 1713 if (ret < 0) 1714 goto out; 1715 1716 ret = reconfigure_super(fc); 1717 out: 1718 put_fs_context(fc); 1719 return ret; 1720 } 1721 1722 static int compare_single(struct super_block *s, void *p) 1723 { 1724 return 1; 1725 } 1726 1727 struct dentry *mount_single(struct file_system_type *fs_type, 1728 int flags, void *data, 1729 int (*fill_super)(struct super_block *, void *, int)) 1730 { 1731 struct super_block *s; 1732 int error; 1733 1734 s = sget(fs_type, compare_single, set_anon_super, flags, NULL); 1735 if (IS_ERR(s)) 1736 return ERR_CAST(s); 1737 if (!s->s_root) { 1738 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0); 1739 if (!error) 1740 s->s_flags |= SB_ACTIVE; 1741 } else { 1742 error = reconfigure_single(s, flags, data); 1743 } 1744 if (unlikely(error)) { 1745 deactivate_locked_super(s); 1746 return ERR_PTR(error); 1747 } 1748 return dget(s->s_root); 1749 } 1750 EXPORT_SYMBOL(mount_single); 1751 1752 /** 1753 * vfs_get_tree - Get the mountable root 1754 * @fc: The superblock configuration context. 1755 * 1756 * The filesystem is invoked to get or create a superblock which can then later 1757 * be used for mounting. The filesystem places a pointer to the root to be 1758 * used for mounting in @fc->root. 1759 */ 1760 int vfs_get_tree(struct fs_context *fc) 1761 { 1762 struct super_block *sb; 1763 int error; 1764 1765 if (fc->root) 1766 return -EBUSY; 1767 1768 /* Get the mountable root in fc->root, with a ref on the root and a ref 1769 * on the superblock. 1770 */ 1771 error = fc->ops->get_tree(fc); 1772 if (error < 0) 1773 return error; 1774 1775 if (!fc->root) { 1776 pr_err("Filesystem %s get_tree() didn't set fc->root\n", 1777 fc->fs_type->name); 1778 /* We don't know what the locking state of the superblock is - 1779 * if there is a superblock. 1780 */ 1781 BUG(); 1782 } 1783 1784 sb = fc->root->d_sb; 1785 WARN_ON(!sb->s_bdi); 1786 1787 /* 1788 * super_wake() contains a memory barrier which also care of 1789 * ordering for super_cache_count(). We place it before setting 1790 * SB_BORN as the data dependency between the two functions is 1791 * the superblock structure contents that we just set up, not 1792 * the SB_BORN flag. 1793 */ 1794 super_wake(sb, SB_BORN); 1795 1796 error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL); 1797 if (unlikely(error)) { 1798 fc_drop_locked(fc); 1799 return error; 1800 } 1801 1802 /* 1803 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE 1804 * but s_maxbytes was an unsigned long long for many releases. Throw 1805 * this warning for a little while to try and catch filesystems that 1806 * violate this rule. 1807 */ 1808 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to " 1809 "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes); 1810 1811 return 0; 1812 } 1813 EXPORT_SYMBOL(vfs_get_tree); 1814 1815 /* 1816 * Setup private BDI for given superblock. It gets automatically cleaned up 1817 * in generic_shutdown_super(). 1818 */ 1819 int super_setup_bdi_name(struct super_block *sb, char *fmt, ...) 1820 { 1821 struct backing_dev_info *bdi; 1822 int err; 1823 va_list args; 1824 1825 bdi = bdi_alloc(NUMA_NO_NODE); 1826 if (!bdi) 1827 return -ENOMEM; 1828 1829 va_start(args, fmt); 1830 err = bdi_register_va(bdi, fmt, args); 1831 va_end(args); 1832 if (err) { 1833 bdi_put(bdi); 1834 return err; 1835 } 1836 WARN_ON(sb->s_bdi != &noop_backing_dev_info); 1837 sb->s_bdi = bdi; 1838 sb->s_iflags |= SB_I_PERSB_BDI; 1839 1840 return 0; 1841 } 1842 EXPORT_SYMBOL(super_setup_bdi_name); 1843 1844 /* 1845 * Setup private BDI for given superblock. I gets automatically cleaned up 1846 * in generic_shutdown_super(). 1847 */ 1848 int super_setup_bdi(struct super_block *sb) 1849 { 1850 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); 1851 1852 return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name, 1853 atomic_long_inc_return(&bdi_seq)); 1854 } 1855 EXPORT_SYMBOL(super_setup_bdi); 1856 1857 /** 1858 * sb_wait_write - wait until all writers to given file system finish 1859 * @sb: the super for which we wait 1860 * @level: type of writers we wait for (normal vs page fault) 1861 * 1862 * This function waits until there are no writers of given type to given file 1863 * system. 1864 */ 1865 static void sb_wait_write(struct super_block *sb, int level) 1866 { 1867 percpu_down_write(sb->s_writers.rw_sem + level-1); 1868 } 1869 1870 /* 1871 * We are going to return to userspace and forget about these locks, the 1872 * ownership goes to the caller of thaw_super() which does unlock(). 1873 */ 1874 static void lockdep_sb_freeze_release(struct super_block *sb) 1875 { 1876 int level; 1877 1878 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--) 1879 percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_); 1880 } 1881 1882 /* 1883 * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb). 1884 */ 1885 static void lockdep_sb_freeze_acquire(struct super_block *sb) 1886 { 1887 int level; 1888 1889 for (level = 0; level < SB_FREEZE_LEVELS; ++level) 1890 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_); 1891 } 1892 1893 static void sb_freeze_unlock(struct super_block *sb, int level) 1894 { 1895 for (level--; level >= 0; level--) 1896 percpu_up_write(sb->s_writers.rw_sem + level); 1897 } 1898 1899 static int wait_for_partially_frozen(struct super_block *sb) 1900 { 1901 int ret = 0; 1902 1903 do { 1904 unsigned short old = sb->s_writers.frozen; 1905 1906 up_write(&sb->s_umount); 1907 ret = wait_var_event_killable(&sb->s_writers.frozen, 1908 sb->s_writers.frozen != old); 1909 down_write(&sb->s_umount); 1910 } while (ret == 0 && 1911 sb->s_writers.frozen != SB_UNFROZEN && 1912 sb->s_writers.frozen != SB_FREEZE_COMPLETE); 1913 1914 return ret; 1915 } 1916 1917 /** 1918 * freeze_super - lock the filesystem and force it into a consistent state 1919 * @sb: the super to lock 1920 * @who: context that wants to freeze 1921 * 1922 * Syncs the super to make sure the filesystem is consistent and calls the fs's 1923 * freeze_fs. Subsequent calls to this without first thawing the fs may return 1924 * -EBUSY. 1925 * 1926 * @who should be: 1927 * * %FREEZE_HOLDER_USERSPACE if userspace wants to freeze the fs; 1928 * * %FREEZE_HOLDER_KERNEL if the kernel wants to freeze the fs. 1929 * 1930 * The @who argument distinguishes between the kernel and userspace trying to 1931 * freeze the filesystem. Although there cannot be multiple kernel freezes or 1932 * multiple userspace freezes in effect at any given time, the kernel and 1933 * userspace can both hold a filesystem frozen. The filesystem remains frozen 1934 * until there are no kernel or userspace freezes in effect. 1935 * 1936 * During this function, sb->s_writers.frozen goes through these values: 1937 * 1938 * SB_UNFROZEN: File system is normal, all writes progress as usual. 1939 * 1940 * SB_FREEZE_WRITE: The file system is in the process of being frozen. New 1941 * writes should be blocked, though page faults are still allowed. We wait for 1942 * all writes to complete and then proceed to the next stage. 1943 * 1944 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked 1945 * but internal fs threads can still modify the filesystem (although they 1946 * should not dirty new pages or inodes), writeback can run etc. After waiting 1947 * for all running page faults we sync the filesystem which will clean all 1948 * dirty pages and inodes (no new dirty pages or inodes can be created when 1949 * sync is running). 1950 * 1951 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs 1952 * modification are blocked (e.g. XFS preallocation truncation on inode 1953 * reclaim). This is usually implemented by blocking new transactions for 1954 * filesystems that have them and need this additional guard. After all 1955 * internal writers are finished we call ->freeze_fs() to finish filesystem 1956 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is 1957 * mostly auxiliary for filesystems to verify they do not modify frozen fs. 1958 * 1959 * sb->s_writers.frozen is protected by sb->s_umount. 1960 */ 1961 int freeze_super(struct super_block *sb, enum freeze_holder who) 1962 { 1963 int ret; 1964 1965 atomic_inc(&sb->s_active); 1966 if (!super_lock_excl(sb)) 1967 WARN(1, "Dying superblock while freezing!"); 1968 1969 retry: 1970 if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) { 1971 if (sb->s_writers.freeze_holders & who) { 1972 deactivate_locked_super(sb); 1973 return -EBUSY; 1974 } 1975 1976 WARN_ON(sb->s_writers.freeze_holders == 0); 1977 1978 /* 1979 * Someone else already holds this type of freeze; share the 1980 * freeze and assign the active ref to the freeze. 1981 */ 1982 sb->s_writers.freeze_holders |= who; 1983 super_unlock_excl(sb); 1984 return 0; 1985 } 1986 1987 if (sb->s_writers.frozen != SB_UNFROZEN) { 1988 ret = wait_for_partially_frozen(sb); 1989 if (ret) { 1990 deactivate_locked_super(sb); 1991 return ret; 1992 } 1993 1994 goto retry; 1995 } 1996 1997 if (!(sb->s_flags & SB_BORN)) { 1998 super_unlock_excl(sb); 1999 return 0; /* sic - it's "nothing to do" */ 2000 } 2001 2002 if (sb_rdonly(sb)) { 2003 /* Nothing to do really... */ 2004 sb->s_writers.freeze_holders |= who; 2005 sb->s_writers.frozen = SB_FREEZE_COMPLETE; 2006 wake_up_var(&sb->s_writers.frozen); 2007 super_unlock_excl(sb); 2008 return 0; 2009 } 2010 2011 sb->s_writers.frozen = SB_FREEZE_WRITE; 2012 /* Release s_umount to preserve sb_start_write -> s_umount ordering */ 2013 super_unlock_excl(sb); 2014 sb_wait_write(sb, SB_FREEZE_WRITE); 2015 if (!super_lock_excl(sb)) 2016 WARN(1, "Dying superblock while freezing!"); 2017 2018 /* Now we go and block page faults... */ 2019 sb->s_writers.frozen = SB_FREEZE_PAGEFAULT; 2020 sb_wait_write(sb, SB_FREEZE_PAGEFAULT); 2021 2022 /* All writers are done so after syncing there won't be dirty data */ 2023 ret = sync_filesystem(sb); 2024 if (ret) { 2025 sb->s_writers.frozen = SB_UNFROZEN; 2026 sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT); 2027 wake_up_var(&sb->s_writers.frozen); 2028 deactivate_locked_super(sb); 2029 return ret; 2030 } 2031 2032 /* Now wait for internal filesystem counter */ 2033 sb->s_writers.frozen = SB_FREEZE_FS; 2034 sb_wait_write(sb, SB_FREEZE_FS); 2035 2036 if (sb->s_op->freeze_fs) { 2037 ret = sb->s_op->freeze_fs(sb); 2038 if (ret) { 2039 printk(KERN_ERR 2040 "VFS:Filesystem freeze failed\n"); 2041 sb->s_writers.frozen = SB_UNFROZEN; 2042 sb_freeze_unlock(sb, SB_FREEZE_FS); 2043 wake_up_var(&sb->s_writers.frozen); 2044 deactivate_locked_super(sb); 2045 return ret; 2046 } 2047 } 2048 /* 2049 * For debugging purposes so that fs can warn if it sees write activity 2050 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super(). 2051 */ 2052 sb->s_writers.freeze_holders |= who; 2053 sb->s_writers.frozen = SB_FREEZE_COMPLETE; 2054 wake_up_var(&sb->s_writers.frozen); 2055 lockdep_sb_freeze_release(sb); 2056 super_unlock_excl(sb); 2057 return 0; 2058 } 2059 EXPORT_SYMBOL(freeze_super); 2060 2061 /* 2062 * Undoes the effect of a freeze_super_locked call. If the filesystem is 2063 * frozen both by userspace and the kernel, a thaw call from either source 2064 * removes that state without releasing the other state or unlocking the 2065 * filesystem. 2066 */ 2067 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who) 2068 { 2069 int error; 2070 2071 if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) { 2072 if (!(sb->s_writers.freeze_holders & who)) { 2073 super_unlock_excl(sb); 2074 return -EINVAL; 2075 } 2076 2077 /* 2078 * Freeze is shared with someone else. Release our hold and 2079 * drop the active ref that freeze_super assigned to the 2080 * freezer. 2081 */ 2082 if (sb->s_writers.freeze_holders & ~who) { 2083 sb->s_writers.freeze_holders &= ~who; 2084 deactivate_locked_super(sb); 2085 return 0; 2086 } 2087 } else { 2088 super_unlock_excl(sb); 2089 return -EINVAL; 2090 } 2091 2092 if (sb_rdonly(sb)) { 2093 sb->s_writers.freeze_holders &= ~who; 2094 sb->s_writers.frozen = SB_UNFROZEN; 2095 wake_up_var(&sb->s_writers.frozen); 2096 goto out; 2097 } 2098 2099 lockdep_sb_freeze_acquire(sb); 2100 2101 if (sb->s_op->unfreeze_fs) { 2102 error = sb->s_op->unfreeze_fs(sb); 2103 if (error) { 2104 printk(KERN_ERR "VFS:Filesystem thaw failed\n"); 2105 lockdep_sb_freeze_release(sb); 2106 super_unlock_excl(sb); 2107 return error; 2108 } 2109 } 2110 2111 sb->s_writers.freeze_holders &= ~who; 2112 sb->s_writers.frozen = SB_UNFROZEN; 2113 wake_up_var(&sb->s_writers.frozen); 2114 sb_freeze_unlock(sb, SB_FREEZE_FS); 2115 out: 2116 deactivate_locked_super(sb); 2117 return 0; 2118 } 2119 2120 /** 2121 * thaw_super -- unlock filesystem 2122 * @sb: the super to thaw 2123 * @who: context that wants to freeze 2124 * 2125 * Unlocks the filesystem and marks it writeable again after freeze_super() 2126 * if there are no remaining freezes on the filesystem. 2127 * 2128 * @who should be: 2129 * * %FREEZE_HOLDER_USERSPACE if userspace wants to thaw the fs; 2130 * * %FREEZE_HOLDER_KERNEL if the kernel wants to thaw the fs. 2131 */ 2132 int thaw_super(struct super_block *sb, enum freeze_holder who) 2133 { 2134 if (!super_lock_excl(sb)) 2135 WARN(1, "Dying superblock while thawing!"); 2136 return thaw_super_locked(sb, who); 2137 } 2138 EXPORT_SYMBOL(thaw_super); 2139 2140 /* 2141 * Create workqueue for deferred direct IO completions. We allocate the 2142 * workqueue when it's first needed. This avoids creating workqueue for 2143 * filesystems that don't need it and also allows us to create the workqueue 2144 * late enough so the we can include s_id in the name of the workqueue. 2145 */ 2146 int sb_init_dio_done_wq(struct super_block *sb) 2147 { 2148 struct workqueue_struct *old; 2149 struct workqueue_struct *wq = alloc_workqueue("dio/%s", 2150 WQ_MEM_RECLAIM, 0, 2151 sb->s_id); 2152 if (!wq) 2153 return -ENOMEM; 2154 /* 2155 * This has to be atomic as more DIOs can race to create the workqueue 2156 */ 2157 old = cmpxchg(&sb->s_dio_done_wq, NULL, wq); 2158 /* Someone created workqueue before us? Free ours... */ 2159 if (old) 2160 destroy_workqueue(wq); 2161 return 0; 2162 } 2163 EXPORT_SYMBOL_GPL(sb_init_dio_done_wq); 2164