1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/super.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * super.c contains code to handle: - mount structures 8 * - super-block tables 9 * - filesystem drivers list 10 * - mount system call 11 * - umount system call 12 * - ustat system call 13 * 14 * GK 2/5/95 - Changed to support mounting the root fs via NFS 15 * 16 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall 17 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96 18 * Added options to /proc/mounts: 19 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996. 20 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998 21 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000 22 */ 23 24 #include <linux/export.h> 25 #include <linux/slab.h> 26 #include <linux/blkdev.h> 27 #include <linux/mount.h> 28 #include <linux/security.h> 29 #include <linux/writeback.h> /* for the emergency remount stuff */ 30 #include <linux/idr.h> 31 #include <linux/mutex.h> 32 #include <linux/backing-dev.h> 33 #include <linux/rculist_bl.h> 34 #include <linux/fscrypt.h> 35 #include <linux/fsnotify.h> 36 #include <linux/lockdep.h> 37 #include <linux/user_namespace.h> 38 #include <linux/fs_context.h> 39 #include <uapi/linux/mount.h> 40 #include "internal.h" 41 42 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who, 43 const void *freeze_owner); 44 45 static LIST_HEAD(super_blocks); 46 static DEFINE_SPINLOCK(sb_lock); 47 48 static char *sb_writers_name[SB_FREEZE_LEVELS] = { 49 "sb_writers", 50 "sb_pagefaults", 51 "sb_internal", 52 }; 53 54 static inline void __super_lock(struct super_block *sb, bool excl) 55 { 56 if (excl) 57 down_write(&sb->s_umount); 58 else 59 down_read(&sb->s_umount); 60 } 61 62 static inline void super_unlock(struct super_block *sb, bool excl) 63 { 64 if (excl) 65 up_write(&sb->s_umount); 66 else 67 up_read(&sb->s_umount); 68 } 69 70 static inline void __super_lock_excl(struct super_block *sb) 71 { 72 __super_lock(sb, true); 73 } 74 75 static inline void super_unlock_excl(struct super_block *sb) 76 { 77 super_unlock(sb, true); 78 } 79 80 static inline void super_unlock_shared(struct super_block *sb) 81 { 82 super_unlock(sb, false); 83 } 84 85 static bool super_flags(const struct super_block *sb, unsigned int flags) 86 { 87 /* 88 * Pairs with smp_store_release() in super_wake() and ensures 89 * that we see @flags after we're woken. 90 */ 91 return smp_load_acquire(&sb->s_flags) & flags; 92 } 93 94 /** 95 * super_lock - wait for superblock to become ready and lock it 96 * @sb: superblock to wait for 97 * @excl: whether exclusive access is required 98 * 99 * If the superblock has neither passed through vfs_get_tree() or 100 * generic_shutdown_super() yet wait for it to happen. Either superblock 101 * creation will succeed and SB_BORN is set by vfs_get_tree() or we're 102 * woken and we'll see SB_DYING. 103 * 104 * The caller must have acquired a temporary reference on @sb->s_count. 105 * 106 * Return: The function returns true if SB_BORN was set and with 107 * s_umount held. The function returns false if SB_DYING was 108 * set and without s_umount held. 109 */ 110 static __must_check bool super_lock(struct super_block *sb, bool excl) 111 { 112 lockdep_assert_not_held(&sb->s_umount); 113 114 /* wait until the superblock is ready or dying */ 115 wait_var_event(&sb->s_flags, super_flags(sb, SB_BORN | SB_DYING)); 116 117 /* Don't pointlessly acquire s_umount. */ 118 if (super_flags(sb, SB_DYING)) 119 return false; 120 121 __super_lock(sb, excl); 122 123 /* 124 * Has gone through generic_shutdown_super() in the meantime. 125 * @sb->s_root is NULL and @sb->s_active is 0. No one needs to 126 * grab a reference to this. Tell them so. 127 */ 128 if (sb->s_flags & SB_DYING) { 129 super_unlock(sb, excl); 130 return false; 131 } 132 133 WARN_ON_ONCE(!(sb->s_flags & SB_BORN)); 134 return true; 135 } 136 137 /* wait and try to acquire read-side of @sb->s_umount */ 138 static inline bool super_lock_shared(struct super_block *sb) 139 { 140 return super_lock(sb, false); 141 } 142 143 /* wait and try to acquire write-side of @sb->s_umount */ 144 static inline bool super_lock_excl(struct super_block *sb) 145 { 146 return super_lock(sb, true); 147 } 148 149 /* wake waiters */ 150 #define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING | SB_DEAD) 151 static void super_wake(struct super_block *sb, unsigned int flag) 152 { 153 WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS)); 154 WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1); 155 156 /* 157 * Pairs with smp_load_acquire() in super_lock() to make sure 158 * all initializations in the superblock are seen by the user 159 * seeing SB_BORN sent. 160 */ 161 smp_store_release(&sb->s_flags, sb->s_flags | flag); 162 /* 163 * Pairs with the barrier in prepare_to_wait_event() to make sure 164 * ___wait_var_event() either sees SB_BORN set or 165 * waitqueue_active() check in wake_up_var() sees the waiter. 166 */ 167 smp_mb(); 168 wake_up_var(&sb->s_flags); 169 } 170 171 /* 172 * One thing we have to be careful of with a per-sb shrinker is that we don't 173 * drop the last active reference to the superblock from within the shrinker. 174 * If that happens we could trigger unregistering the shrinker from within the 175 * shrinker path and that leads to deadlock on the shrinker_mutex. Hence we 176 * take a passive reference to the superblock to avoid this from occurring. 177 */ 178 static unsigned long super_cache_scan(struct shrinker *shrink, 179 struct shrink_control *sc) 180 { 181 struct super_block *sb; 182 long fs_objects = 0; 183 long total_objects; 184 long freed = 0; 185 long dentries; 186 long inodes; 187 188 sb = shrink->private_data; 189 190 /* 191 * Deadlock avoidance. We may hold various FS locks, and we don't want 192 * to recurse into the FS that called us in clear_inode() and friends.. 193 */ 194 if (!(sc->gfp_mask & __GFP_FS)) 195 return SHRINK_STOP; 196 197 if (!super_trylock_shared(sb)) 198 return SHRINK_STOP; 199 200 if (sb->s_op->nr_cached_objects) 201 fs_objects = sb->s_op->nr_cached_objects(sb, sc); 202 203 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc); 204 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc); 205 total_objects = dentries + inodes + fs_objects; 206 if (!total_objects) 207 total_objects = 1; 208 209 /* proportion the scan between the caches */ 210 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects); 211 inodes = mult_frac(sc->nr_to_scan, inodes, total_objects); 212 fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects); 213 214 /* 215 * prune the dcache first as the icache is pinned by it, then 216 * prune the icache, followed by the filesystem specific caches 217 * 218 * Ensure that we always scan at least one object - memcg kmem 219 * accounting uses this to fully empty the caches. 220 */ 221 sc->nr_to_scan = dentries + 1; 222 freed = prune_dcache_sb(sb, sc); 223 sc->nr_to_scan = inodes + 1; 224 freed += prune_icache_sb(sb, sc); 225 226 if (fs_objects) { 227 sc->nr_to_scan = fs_objects + 1; 228 freed += sb->s_op->free_cached_objects(sb, sc); 229 } 230 231 super_unlock_shared(sb); 232 return freed; 233 } 234 235 static unsigned long super_cache_count(struct shrinker *shrink, 236 struct shrink_control *sc) 237 { 238 struct super_block *sb; 239 long total_objects = 0; 240 241 sb = shrink->private_data; 242 243 /* 244 * We don't call super_trylock_shared() here as it is a scalability 245 * bottleneck, so we're exposed to partial setup state. The shrinker 246 * rwsem does not protect filesystem operations backing 247 * list_lru_shrink_count() or s_op->nr_cached_objects(). Counts can 248 * change between super_cache_count and super_cache_scan, so we really 249 * don't need locks here. 250 * 251 * However, if we are currently mounting the superblock, the underlying 252 * filesystem might be in a state of partial construction and hence it 253 * is dangerous to access it. super_trylock_shared() uses a SB_BORN check 254 * to avoid this situation, so do the same here. The memory barrier is 255 * matched with the one in mount_fs() as we don't hold locks here. 256 */ 257 if (!(sb->s_flags & SB_BORN)) 258 return 0; 259 smp_rmb(); 260 261 if (sb->s_op && sb->s_op->nr_cached_objects) 262 total_objects = sb->s_op->nr_cached_objects(sb, sc); 263 264 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc); 265 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc); 266 267 if (!total_objects) 268 return SHRINK_EMPTY; 269 270 total_objects = vfs_pressure_ratio(total_objects); 271 return total_objects; 272 } 273 274 static void destroy_super_work(struct work_struct *work) 275 { 276 struct super_block *s = container_of(work, struct super_block, 277 destroy_work); 278 fsnotify_sb_free(s); 279 security_sb_free(s); 280 put_user_ns(s->s_user_ns); 281 kfree(s->s_subtype); 282 for (int i = 0; i < SB_FREEZE_LEVELS; i++) 283 percpu_free_rwsem(&s->s_writers.rw_sem[i]); 284 kfree(s); 285 } 286 287 static void destroy_super_rcu(struct rcu_head *head) 288 { 289 struct super_block *s = container_of(head, struct super_block, rcu); 290 INIT_WORK(&s->destroy_work, destroy_super_work); 291 schedule_work(&s->destroy_work); 292 } 293 294 /* Free a superblock that has never been seen by anyone */ 295 static void destroy_unused_super(struct super_block *s) 296 { 297 if (!s) 298 return; 299 super_unlock_excl(s); 300 list_lru_destroy(&s->s_dentry_lru); 301 list_lru_destroy(&s->s_inode_lru); 302 shrinker_free(s->s_shrink); 303 /* no delays needed */ 304 destroy_super_work(&s->destroy_work); 305 } 306 307 /** 308 * alloc_super - create new superblock 309 * @type: filesystem type superblock should belong to 310 * @flags: the mount flags 311 * @user_ns: User namespace for the super_block 312 * 313 * Allocates and initializes a new &struct super_block. alloc_super() 314 * returns a pointer new superblock or %NULL if allocation had failed. 315 */ 316 static struct super_block *alloc_super(struct file_system_type *type, int flags, 317 struct user_namespace *user_ns) 318 { 319 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_KERNEL); 320 static const struct super_operations default_op; 321 int i; 322 323 if (!s) 324 return NULL; 325 326 INIT_LIST_HEAD(&s->s_mounts); 327 s->s_user_ns = get_user_ns(user_ns); 328 init_rwsem(&s->s_umount); 329 lockdep_set_class(&s->s_umount, &type->s_umount_key); 330 /* 331 * sget() can have s_umount recursion. 332 * 333 * When it cannot find a suitable sb, it allocates a new 334 * one (this one), and tries again to find a suitable old 335 * one. 336 * 337 * In case that succeeds, it will acquire the s_umount 338 * lock of the old one. Since these are clearly distrinct 339 * locks, and this object isn't exposed yet, there's no 340 * risk of deadlocks. 341 * 342 * Annotate this by putting this lock in a different 343 * subclass. 344 */ 345 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING); 346 347 if (security_sb_alloc(s)) 348 goto fail; 349 350 for (i = 0; i < SB_FREEZE_LEVELS; i++) { 351 if (__percpu_init_rwsem(&s->s_writers.rw_sem[i], 352 sb_writers_name[i], 353 &type->s_writers_key[i])) 354 goto fail; 355 } 356 s->s_bdi = &noop_backing_dev_info; 357 s->s_flags = flags; 358 if (s->s_user_ns != &init_user_ns) 359 s->s_iflags |= SB_I_NODEV; 360 INIT_HLIST_NODE(&s->s_instances); 361 INIT_HLIST_BL_HEAD(&s->s_roots); 362 mutex_init(&s->s_sync_lock); 363 INIT_LIST_HEAD(&s->s_inodes); 364 spin_lock_init(&s->s_inode_list_lock); 365 INIT_LIST_HEAD(&s->s_inodes_wb); 366 spin_lock_init(&s->s_inode_wblist_lock); 367 368 s->s_count = 1; 369 atomic_set(&s->s_active, 1); 370 mutex_init(&s->s_vfs_rename_mutex); 371 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); 372 init_rwsem(&s->s_dquot.dqio_sem); 373 s->s_maxbytes = MAX_NON_LFS; 374 s->s_op = &default_op; 375 s->s_time_gran = 1000000000; 376 s->s_time_min = TIME64_MIN; 377 s->s_time_max = TIME64_MAX; 378 379 s->s_shrink = shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, 380 "sb-%s", type->name); 381 if (!s->s_shrink) 382 goto fail; 383 384 s->s_shrink->scan_objects = super_cache_scan; 385 s->s_shrink->count_objects = super_cache_count; 386 s->s_shrink->batch = 1024; 387 s->s_shrink->private_data = s; 388 389 if (list_lru_init_memcg(&s->s_dentry_lru, s->s_shrink)) 390 goto fail; 391 if (list_lru_init_memcg(&s->s_inode_lru, s->s_shrink)) 392 goto fail; 393 return s; 394 395 fail: 396 destroy_unused_super(s); 397 return NULL; 398 } 399 400 /* Superblock refcounting */ 401 402 /* 403 * Drop a superblock's refcount. The caller must hold sb_lock. 404 */ 405 static void __put_super(struct super_block *s) 406 { 407 if (!--s->s_count) { 408 list_del_init(&s->s_list); 409 WARN_ON(s->s_dentry_lru.node); 410 WARN_ON(s->s_inode_lru.node); 411 WARN_ON(!list_empty(&s->s_mounts)); 412 call_rcu(&s->rcu, destroy_super_rcu); 413 } 414 } 415 416 /** 417 * put_super - drop a temporary reference to superblock 418 * @sb: superblock in question 419 * 420 * Drops a temporary reference, frees superblock if there's no 421 * references left. 422 */ 423 void put_super(struct super_block *sb) 424 { 425 spin_lock(&sb_lock); 426 __put_super(sb); 427 spin_unlock(&sb_lock); 428 } 429 430 static void kill_super_notify(struct super_block *sb) 431 { 432 lockdep_assert_not_held(&sb->s_umount); 433 434 /* already notified earlier */ 435 if (sb->s_flags & SB_DEAD) 436 return; 437 438 /* 439 * Remove it from @fs_supers so it isn't found by new 440 * sget{_fc}() walkers anymore. Any concurrent mounter still 441 * managing to grab a temporary reference is guaranteed to 442 * already see SB_DYING and will wait until we notify them about 443 * SB_DEAD. 444 */ 445 spin_lock(&sb_lock); 446 hlist_del_init(&sb->s_instances); 447 spin_unlock(&sb_lock); 448 449 /* 450 * Let concurrent mounts know that this thing is really dead. 451 * We don't need @sb->s_umount here as every concurrent caller 452 * will see SB_DYING and either discard the superblock or wait 453 * for SB_DEAD. 454 */ 455 super_wake(sb, SB_DEAD); 456 } 457 458 /** 459 * deactivate_locked_super - drop an active reference to superblock 460 * @s: superblock to deactivate 461 * 462 * Drops an active reference to superblock, converting it into a temporary 463 * one if there is no other active references left. In that case we 464 * tell fs driver to shut it down and drop the temporary reference we 465 * had just acquired. 466 * 467 * Caller holds exclusive lock on superblock; that lock is released. 468 */ 469 void deactivate_locked_super(struct super_block *s) 470 { 471 struct file_system_type *fs = s->s_type; 472 if (atomic_dec_and_test(&s->s_active)) { 473 shrinker_free(s->s_shrink); 474 fs->kill_sb(s); 475 476 kill_super_notify(s); 477 478 /* 479 * Since list_lru_destroy() may sleep, we cannot call it from 480 * put_super(), where we hold the sb_lock. Therefore we destroy 481 * the lru lists right now. 482 */ 483 list_lru_destroy(&s->s_dentry_lru); 484 list_lru_destroy(&s->s_inode_lru); 485 486 put_filesystem(fs); 487 put_super(s); 488 } else { 489 super_unlock_excl(s); 490 } 491 } 492 493 EXPORT_SYMBOL(deactivate_locked_super); 494 495 /** 496 * deactivate_super - drop an active reference to superblock 497 * @s: superblock to deactivate 498 * 499 * Variant of deactivate_locked_super(), except that superblock is *not* 500 * locked by caller. If we are going to drop the final active reference, 501 * lock will be acquired prior to that. 502 */ 503 void deactivate_super(struct super_block *s) 504 { 505 if (!atomic_add_unless(&s->s_active, -1, 1)) { 506 __super_lock_excl(s); 507 deactivate_locked_super(s); 508 } 509 } 510 511 EXPORT_SYMBOL(deactivate_super); 512 513 /** 514 * grab_super - acquire an active reference to a superblock 515 * @sb: superblock to acquire 516 * 517 * Acquire a temporary reference on a superblock and try to trade it for 518 * an active reference. This is used in sget{_fc}() to wait for a 519 * superblock to either become SB_BORN or for it to pass through 520 * sb->kill() and be marked as SB_DEAD. 521 * 522 * Return: This returns true if an active reference could be acquired, 523 * false if not. 524 */ 525 static bool grab_super(struct super_block *sb) 526 { 527 bool locked; 528 529 sb->s_count++; 530 spin_unlock(&sb_lock); 531 locked = super_lock_excl(sb); 532 if (locked) { 533 if (atomic_inc_not_zero(&sb->s_active)) { 534 put_super(sb); 535 return true; 536 } 537 super_unlock_excl(sb); 538 } 539 wait_var_event(&sb->s_flags, super_flags(sb, SB_DEAD)); 540 put_super(sb); 541 return false; 542 } 543 544 /* 545 * super_trylock_shared - try to grab ->s_umount shared 546 * @sb: reference we are trying to grab 547 * 548 * Try to prevent fs shutdown. This is used in places where we 549 * cannot take an active reference but we need to ensure that the 550 * filesystem is not shut down while we are working on it. It returns 551 * false if we cannot acquire s_umount or if we lose the race and 552 * filesystem already got into shutdown, and returns true with the s_umount 553 * lock held in read mode in case of success. On successful return, 554 * the caller must drop the s_umount lock when done. 555 * 556 * Note that unlike get_super() et.al. this one does *not* bump ->s_count. 557 * The reason why it's safe is that we are OK with doing trylock instead 558 * of down_read(). There's a couple of places that are OK with that, but 559 * it's very much not a general-purpose interface. 560 */ 561 bool super_trylock_shared(struct super_block *sb) 562 { 563 if (down_read_trylock(&sb->s_umount)) { 564 if (!(sb->s_flags & SB_DYING) && sb->s_root && 565 (sb->s_flags & SB_BORN)) 566 return true; 567 super_unlock_shared(sb); 568 } 569 570 return false; 571 } 572 573 /** 574 * retire_super - prevents superblock from being reused 575 * @sb: superblock to retire 576 * 577 * The function marks superblock to be ignored in superblock test, which 578 * prevents it from being reused for any new mounts. If the superblock has 579 * a private bdi, it also unregisters it, but doesn't reduce the refcount 580 * of the superblock to prevent potential races. The refcount is reduced 581 * by generic_shutdown_super(). The function can not be called 582 * concurrently with generic_shutdown_super(). It is safe to call the 583 * function multiple times, subsequent calls have no effect. 584 * 585 * The marker will affect the re-use only for block-device-based 586 * superblocks. Other superblocks will still get marked if this function 587 * is used, but that will not affect their reusability. 588 */ 589 void retire_super(struct super_block *sb) 590 { 591 WARN_ON(!sb->s_bdev); 592 __super_lock_excl(sb); 593 if (sb->s_iflags & SB_I_PERSB_BDI) { 594 bdi_unregister(sb->s_bdi); 595 sb->s_iflags &= ~SB_I_PERSB_BDI; 596 } 597 sb->s_iflags |= SB_I_RETIRED; 598 super_unlock_excl(sb); 599 } 600 EXPORT_SYMBOL(retire_super); 601 602 /** 603 * generic_shutdown_super - common helper for ->kill_sb() 604 * @sb: superblock to kill 605 * 606 * generic_shutdown_super() does all fs-independent work on superblock 607 * shutdown. Typical ->kill_sb() should pick all fs-specific objects 608 * that need destruction out of superblock, call generic_shutdown_super() 609 * and release aforementioned objects. Note: dentries and inodes _are_ 610 * taken care of and do not need specific handling. 611 * 612 * Upon calling this function, the filesystem may no longer alter or 613 * rearrange the set of dentries belonging to this super_block, nor may it 614 * change the attachments of dentries to inodes. 615 */ 616 void generic_shutdown_super(struct super_block *sb) 617 { 618 const struct super_operations *sop = sb->s_op; 619 620 if (sb->s_root) { 621 shrink_dcache_for_umount(sb); 622 sync_filesystem(sb); 623 sb->s_flags &= ~SB_ACTIVE; 624 625 cgroup_writeback_umount(sb); 626 627 /* Evict all inodes with zero refcount. */ 628 evict_inodes(sb); 629 630 /* 631 * Clean up and evict any inodes that still have references due 632 * to fsnotify or the security policy. 633 */ 634 fsnotify_sb_delete(sb); 635 security_sb_delete(sb); 636 637 if (sb->s_dio_done_wq) { 638 destroy_workqueue(sb->s_dio_done_wq); 639 sb->s_dio_done_wq = NULL; 640 } 641 642 if (sop->put_super) 643 sop->put_super(sb); 644 645 /* 646 * Now that all potentially-encrypted inodes have been evicted, 647 * the fscrypt keyring can be destroyed. 648 */ 649 fscrypt_destroy_keyring(sb); 650 651 if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes), NULL, 652 "VFS: Busy inodes after unmount of %s (%s)", 653 sb->s_id, sb->s_type->name)) { 654 /* 655 * Adding a proper bailout path here would be hard, but 656 * we can at least make it more likely that a later 657 * iput_final() or such crashes cleanly. 658 */ 659 struct inode *inode; 660 661 spin_lock(&sb->s_inode_list_lock); 662 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 663 inode->i_op = VFS_PTR_POISON; 664 inode->i_sb = VFS_PTR_POISON; 665 inode->i_mapping = VFS_PTR_POISON; 666 } 667 spin_unlock(&sb->s_inode_list_lock); 668 } 669 } 670 /* 671 * Broadcast to everyone that grabbed a temporary reference to this 672 * superblock before we removed it from @fs_supers that the superblock 673 * is dying. Every walker of @fs_supers outside of sget{_fc}() will now 674 * discard this superblock and treat it as dead. 675 * 676 * We leave the superblock on @fs_supers so it can be found by 677 * sget{_fc}() until we passed sb->kill_sb(). 678 */ 679 super_wake(sb, SB_DYING); 680 super_unlock_excl(sb); 681 if (sb->s_bdi != &noop_backing_dev_info) { 682 if (sb->s_iflags & SB_I_PERSB_BDI) 683 bdi_unregister(sb->s_bdi); 684 bdi_put(sb->s_bdi); 685 sb->s_bdi = &noop_backing_dev_info; 686 } 687 } 688 689 EXPORT_SYMBOL(generic_shutdown_super); 690 691 bool mount_capable(struct fs_context *fc) 692 { 693 if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) 694 return capable(CAP_SYS_ADMIN); 695 else 696 return ns_capable(fc->user_ns, CAP_SYS_ADMIN); 697 } 698 699 /** 700 * sget_fc - Find or create a superblock 701 * @fc: Filesystem context. 702 * @test: Comparison callback 703 * @set: Setup callback 704 * 705 * Create a new superblock or find an existing one. 706 * 707 * The @test callback is used to find a matching existing superblock. 708 * Whether or not the requested parameters in @fc are taken into account 709 * is specific to the @test callback that is used. They may even be 710 * completely ignored. 711 * 712 * If an extant superblock is matched, it will be returned unless: 713 * 714 * (1) the namespace the filesystem context @fc and the extant 715 * superblock's namespace differ 716 * 717 * (2) the filesystem context @fc has requested that reusing an extant 718 * superblock is not allowed 719 * 720 * In both cases EBUSY will be returned. 721 * 722 * If no match is made, a new superblock will be allocated and basic 723 * initialisation will be performed (s_type, s_fs_info and s_id will be 724 * set and the @set callback will be invoked), the superblock will be 725 * published and it will be returned in a partially constructed state 726 * with SB_BORN and SB_ACTIVE as yet unset. 727 * 728 * Return: On success, an extant or newly created superblock is 729 * returned. On failure an error pointer is returned. 730 */ 731 struct super_block *sget_fc(struct fs_context *fc, 732 int (*test)(struct super_block *, struct fs_context *), 733 int (*set)(struct super_block *, struct fs_context *)) 734 { 735 struct super_block *s = NULL; 736 struct super_block *old; 737 struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns; 738 int err; 739 740 /* 741 * Never allow s_user_ns != &init_user_ns when FS_USERNS_MOUNT is 742 * not set, as the filesystem is likely unprepared to handle it. 743 * This can happen when fsconfig() is called from init_user_ns with 744 * an fs_fd opened in another user namespace. 745 */ 746 if (user_ns != &init_user_ns && !(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) { 747 errorfc(fc, "VFS: Mounting from non-initial user namespace is not allowed"); 748 return ERR_PTR(-EPERM); 749 } 750 751 retry: 752 spin_lock(&sb_lock); 753 if (test) { 754 hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) { 755 if (test(old, fc)) 756 goto share_extant_sb; 757 } 758 } 759 if (!s) { 760 spin_unlock(&sb_lock); 761 s = alloc_super(fc->fs_type, fc->sb_flags, user_ns); 762 if (!s) 763 return ERR_PTR(-ENOMEM); 764 goto retry; 765 } 766 767 s->s_fs_info = fc->s_fs_info; 768 err = set(s, fc); 769 if (err) { 770 s->s_fs_info = NULL; 771 spin_unlock(&sb_lock); 772 destroy_unused_super(s); 773 return ERR_PTR(err); 774 } 775 fc->s_fs_info = NULL; 776 s->s_type = fc->fs_type; 777 s->s_iflags |= fc->s_iflags; 778 strscpy(s->s_id, s->s_type->name, sizeof(s->s_id)); 779 /* 780 * Make the superblock visible on @super_blocks and @fs_supers. 781 * It's in a nascent state and users should wait on SB_BORN or 782 * SB_DYING to be set. 783 */ 784 list_add_tail(&s->s_list, &super_blocks); 785 hlist_add_head(&s->s_instances, &s->s_type->fs_supers); 786 spin_unlock(&sb_lock); 787 get_filesystem(s->s_type); 788 shrinker_register(s->s_shrink); 789 return s; 790 791 share_extant_sb: 792 if (user_ns != old->s_user_ns || fc->exclusive) { 793 spin_unlock(&sb_lock); 794 destroy_unused_super(s); 795 if (fc->exclusive) 796 warnfc(fc, "reusing existing filesystem not allowed"); 797 else 798 warnfc(fc, "reusing existing filesystem in another namespace not allowed"); 799 return ERR_PTR(-EBUSY); 800 } 801 if (!grab_super(old)) 802 goto retry; 803 destroy_unused_super(s); 804 return old; 805 } 806 EXPORT_SYMBOL(sget_fc); 807 808 /** 809 * sget - find or create a superblock 810 * @type: filesystem type superblock should belong to 811 * @test: comparison callback 812 * @set: setup callback 813 * @flags: mount flags 814 * @data: argument to each of them 815 */ 816 struct super_block *sget(struct file_system_type *type, 817 int (*test)(struct super_block *,void *), 818 int (*set)(struct super_block *,void *), 819 int flags, 820 void *data) 821 { 822 struct user_namespace *user_ns = current_user_ns(); 823 struct super_block *s = NULL; 824 struct super_block *old; 825 int err; 826 827 /* We don't yet pass the user namespace of the parent 828 * mount through to here so always use &init_user_ns 829 * until that changes. 830 */ 831 if (flags & SB_SUBMOUNT) 832 user_ns = &init_user_ns; 833 834 retry: 835 spin_lock(&sb_lock); 836 if (test) { 837 hlist_for_each_entry(old, &type->fs_supers, s_instances) { 838 if (!test(old, data)) 839 continue; 840 if (user_ns != old->s_user_ns) { 841 spin_unlock(&sb_lock); 842 destroy_unused_super(s); 843 return ERR_PTR(-EBUSY); 844 } 845 if (!grab_super(old)) 846 goto retry; 847 destroy_unused_super(s); 848 return old; 849 } 850 } 851 if (!s) { 852 spin_unlock(&sb_lock); 853 s = alloc_super(type, (flags & ~SB_SUBMOUNT), user_ns); 854 if (!s) 855 return ERR_PTR(-ENOMEM); 856 goto retry; 857 } 858 859 err = set(s, data); 860 if (err) { 861 spin_unlock(&sb_lock); 862 destroy_unused_super(s); 863 return ERR_PTR(err); 864 } 865 s->s_type = type; 866 strscpy(s->s_id, type->name, sizeof(s->s_id)); 867 list_add_tail(&s->s_list, &super_blocks); 868 hlist_add_head(&s->s_instances, &type->fs_supers); 869 spin_unlock(&sb_lock); 870 get_filesystem(type); 871 shrinker_register(s->s_shrink); 872 return s; 873 } 874 EXPORT_SYMBOL(sget); 875 876 void drop_super(struct super_block *sb) 877 { 878 super_unlock_shared(sb); 879 put_super(sb); 880 } 881 882 EXPORT_SYMBOL(drop_super); 883 884 void drop_super_exclusive(struct super_block *sb) 885 { 886 super_unlock_excl(sb); 887 put_super(sb); 888 } 889 EXPORT_SYMBOL(drop_super_exclusive); 890 891 enum super_iter_flags_t { 892 SUPER_ITER_EXCL = (1U << 0), 893 SUPER_ITER_UNLOCKED = (1U << 1), 894 SUPER_ITER_REVERSE = (1U << 2), 895 }; 896 897 static inline struct super_block *first_super(enum super_iter_flags_t flags) 898 { 899 if (flags & SUPER_ITER_REVERSE) 900 return list_last_entry(&super_blocks, struct super_block, s_list); 901 return list_first_entry(&super_blocks, struct super_block, s_list); 902 } 903 904 static inline struct super_block *next_super(struct super_block *sb, 905 enum super_iter_flags_t flags) 906 { 907 if (flags & SUPER_ITER_REVERSE) 908 return list_prev_entry(sb, s_list); 909 return list_next_entry(sb, s_list); 910 } 911 912 static void __iterate_supers(void (*f)(struct super_block *, void *), void *arg, 913 enum super_iter_flags_t flags) 914 { 915 struct super_block *sb, *p = NULL; 916 bool excl = flags & SUPER_ITER_EXCL; 917 918 guard(spinlock)(&sb_lock); 919 920 for (sb = first_super(flags); 921 !list_entry_is_head(sb, &super_blocks, s_list); 922 sb = next_super(sb, flags)) { 923 if (super_flags(sb, SB_DYING)) 924 continue; 925 sb->s_count++; 926 spin_unlock(&sb_lock); 927 928 if (flags & SUPER_ITER_UNLOCKED) { 929 f(sb, arg); 930 } else if (super_lock(sb, excl)) { 931 f(sb, arg); 932 super_unlock(sb, excl); 933 } 934 935 spin_lock(&sb_lock); 936 if (p) 937 __put_super(p); 938 p = sb; 939 } 940 if (p) 941 __put_super(p); 942 } 943 944 void iterate_supers(void (*f)(struct super_block *, void *), void *arg) 945 { 946 __iterate_supers(f, arg, 0); 947 } 948 949 /** 950 * iterate_supers_type - call function for superblocks of given type 951 * @type: fs type 952 * @f: function to call 953 * @arg: argument to pass to it 954 * 955 * Scans the superblock list and calls given function, passing it 956 * locked superblock and given argument. 957 */ 958 void iterate_supers_type(struct file_system_type *type, 959 void (*f)(struct super_block *, void *), void *arg) 960 { 961 struct super_block *sb, *p = NULL; 962 963 spin_lock(&sb_lock); 964 hlist_for_each_entry(sb, &type->fs_supers, s_instances) { 965 bool locked; 966 967 if (super_flags(sb, SB_DYING)) 968 continue; 969 970 sb->s_count++; 971 spin_unlock(&sb_lock); 972 973 locked = super_lock_shared(sb); 974 if (locked) 975 f(sb, arg); 976 977 spin_lock(&sb_lock); 978 if (p) 979 __put_super(p); 980 p = sb; 981 } 982 if (p) 983 __put_super(p); 984 spin_unlock(&sb_lock); 985 } 986 987 EXPORT_SYMBOL(iterate_supers_type); 988 989 struct super_block *user_get_super(dev_t dev, bool excl) 990 { 991 struct super_block *sb; 992 993 spin_lock(&sb_lock); 994 list_for_each_entry(sb, &super_blocks, s_list) { 995 bool locked; 996 997 if (sb->s_dev != dev) 998 continue; 999 1000 sb->s_count++; 1001 spin_unlock(&sb_lock); 1002 1003 locked = super_lock(sb, excl); 1004 if (locked) 1005 return sb; 1006 1007 spin_lock(&sb_lock); 1008 __put_super(sb); 1009 break; 1010 } 1011 spin_unlock(&sb_lock); 1012 return NULL; 1013 } 1014 1015 /** 1016 * reconfigure_super - asks filesystem to change superblock parameters 1017 * @fc: The superblock and configuration 1018 * 1019 * Alters the configuration parameters of a live superblock. 1020 */ 1021 int reconfigure_super(struct fs_context *fc) 1022 { 1023 struct super_block *sb = fc->root->d_sb; 1024 int retval; 1025 bool remount_ro = false; 1026 bool remount_rw = false; 1027 bool force = fc->sb_flags & SB_FORCE; 1028 1029 if (fc->sb_flags_mask & ~MS_RMT_MASK) 1030 return -EINVAL; 1031 if (sb->s_writers.frozen != SB_UNFROZEN) 1032 return -EBUSY; 1033 1034 retval = security_sb_remount(sb, fc->security); 1035 if (retval) 1036 return retval; 1037 1038 if (fc->sb_flags_mask & SB_RDONLY) { 1039 #ifdef CONFIG_BLOCK 1040 if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev && 1041 bdev_read_only(sb->s_bdev)) 1042 return -EACCES; 1043 #endif 1044 remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb); 1045 remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb); 1046 } 1047 1048 if (remount_ro) { 1049 if (!hlist_empty(&sb->s_pins)) { 1050 super_unlock_excl(sb); 1051 group_pin_kill(&sb->s_pins); 1052 __super_lock_excl(sb); 1053 if (!sb->s_root) 1054 return 0; 1055 if (sb->s_writers.frozen != SB_UNFROZEN) 1056 return -EBUSY; 1057 remount_ro = !sb_rdonly(sb); 1058 } 1059 } 1060 shrink_dcache_sb(sb); 1061 1062 /* If we are reconfiguring to RDONLY and current sb is read/write, 1063 * make sure there are no files open for writing. 1064 */ 1065 if (remount_ro) { 1066 if (force) { 1067 sb_start_ro_state_change(sb); 1068 } else { 1069 retval = sb_prepare_remount_readonly(sb); 1070 if (retval) 1071 return retval; 1072 } 1073 } else if (remount_rw) { 1074 /* 1075 * Protect filesystem's reconfigure code from writes from 1076 * userspace until reconfigure finishes. 1077 */ 1078 sb_start_ro_state_change(sb); 1079 } 1080 1081 if (fc->ops->reconfigure) { 1082 retval = fc->ops->reconfigure(fc); 1083 if (retval) { 1084 if (!force) 1085 goto cancel_readonly; 1086 /* If forced remount, go ahead despite any errors */ 1087 WARN(1, "forced remount of a %s fs returned %i\n", 1088 sb->s_type->name, retval); 1089 } 1090 } 1091 1092 WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) | 1093 (fc->sb_flags & fc->sb_flags_mask))); 1094 sb_end_ro_state_change(sb); 1095 1096 /* 1097 * Some filesystems modify their metadata via some other path than the 1098 * bdev buffer cache (eg. use a private mapping, or directories in 1099 * pagecache, etc). Also file data modifications go via their own 1100 * mappings. So If we try to mount readonly then copy the filesystem 1101 * from bdev, we could get stale data, so invalidate it to give a best 1102 * effort at coherency. 1103 */ 1104 if (remount_ro && sb->s_bdev) 1105 invalidate_bdev(sb->s_bdev); 1106 return 0; 1107 1108 cancel_readonly: 1109 sb_end_ro_state_change(sb); 1110 return retval; 1111 } 1112 1113 static void do_emergency_remount_callback(struct super_block *sb, void *unused) 1114 { 1115 if (sb->s_bdev && !sb_rdonly(sb)) { 1116 struct fs_context *fc; 1117 1118 fc = fs_context_for_reconfigure(sb->s_root, 1119 SB_RDONLY | SB_FORCE, SB_RDONLY); 1120 if (!IS_ERR(fc)) { 1121 if (parse_monolithic_mount_data(fc, NULL) == 0) 1122 (void)reconfigure_super(fc); 1123 put_fs_context(fc); 1124 } 1125 } 1126 } 1127 1128 static void do_emergency_remount(struct work_struct *work) 1129 { 1130 __iterate_supers(do_emergency_remount_callback, NULL, 1131 SUPER_ITER_EXCL | SUPER_ITER_REVERSE); 1132 kfree(work); 1133 printk("Emergency Remount complete\n"); 1134 } 1135 1136 void emergency_remount(void) 1137 { 1138 struct work_struct *work; 1139 1140 work = kmalloc(sizeof(*work), GFP_ATOMIC); 1141 if (work) { 1142 INIT_WORK(work, do_emergency_remount); 1143 schedule_work(work); 1144 } 1145 } 1146 1147 static void do_thaw_all_callback(struct super_block *sb, void *unused) 1148 { 1149 if (IS_ENABLED(CONFIG_BLOCK)) 1150 while (sb->s_bdev && !bdev_thaw(sb->s_bdev)) 1151 pr_warn("Emergency Thaw on %pg\n", sb->s_bdev); 1152 thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE, NULL); 1153 return; 1154 } 1155 1156 static void do_thaw_all(struct work_struct *work) 1157 { 1158 __iterate_supers(do_thaw_all_callback, NULL, SUPER_ITER_EXCL); 1159 kfree(work); 1160 printk(KERN_WARNING "Emergency Thaw complete\n"); 1161 } 1162 1163 /** 1164 * emergency_thaw_all -- forcibly thaw every frozen filesystem 1165 * 1166 * Used for emergency unfreeze of all filesystems via SysRq 1167 */ 1168 void emergency_thaw_all(void) 1169 { 1170 struct work_struct *work; 1171 1172 work = kmalloc(sizeof(*work), GFP_ATOMIC); 1173 if (work) { 1174 INIT_WORK(work, do_thaw_all); 1175 schedule_work(work); 1176 } 1177 } 1178 1179 static inline bool get_active_super(struct super_block *sb) 1180 { 1181 bool active = false; 1182 1183 if (super_lock_excl(sb)) { 1184 active = atomic_inc_not_zero(&sb->s_active); 1185 super_unlock_excl(sb); 1186 } 1187 return active; 1188 } 1189 1190 static const char *filesystems_freeze_ptr = "filesystems_freeze"; 1191 1192 static void filesystems_freeze_callback(struct super_block *sb, void *unused) 1193 { 1194 if (!sb->s_op->freeze_fs && !sb->s_op->freeze_super) 1195 return; 1196 1197 if (!get_active_super(sb)) 1198 return; 1199 1200 if (sb->s_op->freeze_super) 1201 sb->s_op->freeze_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL, 1202 filesystems_freeze_ptr); 1203 else 1204 freeze_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL, 1205 filesystems_freeze_ptr); 1206 1207 deactivate_super(sb); 1208 } 1209 1210 void filesystems_freeze(void) 1211 { 1212 __iterate_supers(filesystems_freeze_callback, NULL, 1213 SUPER_ITER_UNLOCKED | SUPER_ITER_REVERSE); 1214 } 1215 1216 static void filesystems_thaw_callback(struct super_block *sb, void *unused) 1217 { 1218 if (!sb->s_op->freeze_fs && !sb->s_op->freeze_super) 1219 return; 1220 1221 if (!get_active_super(sb)) 1222 return; 1223 1224 if (sb->s_op->thaw_super) 1225 sb->s_op->thaw_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL, 1226 filesystems_freeze_ptr); 1227 else 1228 thaw_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL, 1229 filesystems_freeze_ptr); 1230 1231 deactivate_super(sb); 1232 } 1233 1234 void filesystems_thaw(void) 1235 { 1236 __iterate_supers(filesystems_thaw_callback, NULL, SUPER_ITER_UNLOCKED); 1237 } 1238 1239 static DEFINE_IDA(unnamed_dev_ida); 1240 1241 /** 1242 * get_anon_bdev - Allocate a block device for filesystems which don't have one. 1243 * @p: Pointer to a dev_t. 1244 * 1245 * Filesystems which don't use real block devices can call this function 1246 * to allocate a virtual block device. 1247 * 1248 * Context: Any context. Frequently called while holding sb_lock. 1249 * Return: 0 on success, -EMFILE if there are no anonymous bdevs left 1250 * or -ENOMEM if memory allocation failed. 1251 */ 1252 int get_anon_bdev(dev_t *p) 1253 { 1254 int dev; 1255 1256 /* 1257 * Many userspace utilities consider an FSID of 0 invalid. 1258 * Always return at least 1 from get_anon_bdev. 1259 */ 1260 dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1, 1261 GFP_ATOMIC); 1262 if (dev == -ENOSPC) 1263 dev = -EMFILE; 1264 if (dev < 0) 1265 return dev; 1266 1267 *p = MKDEV(0, dev); 1268 return 0; 1269 } 1270 EXPORT_SYMBOL(get_anon_bdev); 1271 1272 void free_anon_bdev(dev_t dev) 1273 { 1274 ida_free(&unnamed_dev_ida, MINOR(dev)); 1275 } 1276 EXPORT_SYMBOL(free_anon_bdev); 1277 1278 int set_anon_super(struct super_block *s, void *data) 1279 { 1280 return get_anon_bdev(&s->s_dev); 1281 } 1282 EXPORT_SYMBOL(set_anon_super); 1283 1284 void kill_anon_super(struct super_block *sb) 1285 { 1286 dev_t dev = sb->s_dev; 1287 generic_shutdown_super(sb); 1288 kill_super_notify(sb); 1289 free_anon_bdev(dev); 1290 } 1291 EXPORT_SYMBOL(kill_anon_super); 1292 1293 void kill_litter_super(struct super_block *sb) 1294 { 1295 if (sb->s_root) 1296 d_genocide(sb->s_root); 1297 kill_anon_super(sb); 1298 } 1299 EXPORT_SYMBOL(kill_litter_super); 1300 1301 int set_anon_super_fc(struct super_block *sb, struct fs_context *fc) 1302 { 1303 return set_anon_super(sb, NULL); 1304 } 1305 EXPORT_SYMBOL(set_anon_super_fc); 1306 1307 static int test_keyed_super(struct super_block *sb, struct fs_context *fc) 1308 { 1309 return sb->s_fs_info == fc->s_fs_info; 1310 } 1311 1312 static int test_single_super(struct super_block *s, struct fs_context *fc) 1313 { 1314 return 1; 1315 } 1316 1317 static int vfs_get_super(struct fs_context *fc, 1318 int (*test)(struct super_block *, struct fs_context *), 1319 int (*fill_super)(struct super_block *sb, 1320 struct fs_context *fc)) 1321 { 1322 struct super_block *sb; 1323 int err; 1324 1325 sb = sget_fc(fc, test, set_anon_super_fc); 1326 if (IS_ERR(sb)) 1327 return PTR_ERR(sb); 1328 1329 if (!sb->s_root) { 1330 err = fill_super(sb, fc); 1331 if (err) 1332 goto error; 1333 1334 sb->s_flags |= SB_ACTIVE; 1335 } 1336 1337 fc->root = dget(sb->s_root); 1338 return 0; 1339 1340 error: 1341 deactivate_locked_super(sb); 1342 return err; 1343 } 1344 1345 int get_tree_nodev(struct fs_context *fc, 1346 int (*fill_super)(struct super_block *sb, 1347 struct fs_context *fc)) 1348 { 1349 return vfs_get_super(fc, NULL, fill_super); 1350 } 1351 EXPORT_SYMBOL(get_tree_nodev); 1352 1353 int get_tree_single(struct fs_context *fc, 1354 int (*fill_super)(struct super_block *sb, 1355 struct fs_context *fc)) 1356 { 1357 return vfs_get_super(fc, test_single_super, fill_super); 1358 } 1359 EXPORT_SYMBOL(get_tree_single); 1360 1361 int get_tree_keyed(struct fs_context *fc, 1362 int (*fill_super)(struct super_block *sb, 1363 struct fs_context *fc), 1364 void *key) 1365 { 1366 fc->s_fs_info = key; 1367 return vfs_get_super(fc, test_keyed_super, fill_super); 1368 } 1369 EXPORT_SYMBOL(get_tree_keyed); 1370 1371 static int set_bdev_super(struct super_block *s, void *data) 1372 { 1373 s->s_dev = *(dev_t *)data; 1374 return 0; 1375 } 1376 1377 static int super_s_dev_set(struct super_block *s, struct fs_context *fc) 1378 { 1379 return set_bdev_super(s, fc->sget_key); 1380 } 1381 1382 static int super_s_dev_test(struct super_block *s, struct fs_context *fc) 1383 { 1384 return !(s->s_iflags & SB_I_RETIRED) && 1385 s->s_dev == *(dev_t *)fc->sget_key; 1386 } 1387 1388 /** 1389 * sget_dev - Find or create a superblock by device number 1390 * @fc: Filesystem context. 1391 * @dev: device number 1392 * 1393 * Find or create a superblock using the provided device number that 1394 * will be stored in fc->sget_key. 1395 * 1396 * If an extant superblock is matched, then that will be returned with 1397 * an elevated reference count that the caller must transfer or discard. 1398 * 1399 * If no match is made, a new superblock will be allocated and basic 1400 * initialisation will be performed (s_type, s_fs_info, s_id, s_dev will 1401 * be set). The superblock will be published and it will be returned in 1402 * a partially constructed state with SB_BORN and SB_ACTIVE as yet 1403 * unset. 1404 * 1405 * Return: an existing or newly created superblock on success, an error 1406 * pointer on failure. 1407 */ 1408 struct super_block *sget_dev(struct fs_context *fc, dev_t dev) 1409 { 1410 fc->sget_key = &dev; 1411 return sget_fc(fc, super_s_dev_test, super_s_dev_set); 1412 } 1413 EXPORT_SYMBOL(sget_dev); 1414 1415 #ifdef CONFIG_BLOCK 1416 /* 1417 * Lock the superblock that is holder of the bdev. Returns the superblock 1418 * pointer if we successfully locked the superblock and it is alive. Otherwise 1419 * we return NULL and just unlock bdev->bd_holder_lock. 1420 * 1421 * The function must be called with bdev->bd_holder_lock and releases it. 1422 */ 1423 static struct super_block *bdev_super_lock(struct block_device *bdev, bool excl) 1424 __releases(&bdev->bd_holder_lock) 1425 { 1426 struct super_block *sb = bdev->bd_holder; 1427 bool locked; 1428 1429 lockdep_assert_held(&bdev->bd_holder_lock); 1430 lockdep_assert_not_held(&sb->s_umount); 1431 lockdep_assert_not_held(&bdev->bd_disk->open_mutex); 1432 1433 /* Make sure sb doesn't go away from under us */ 1434 spin_lock(&sb_lock); 1435 sb->s_count++; 1436 spin_unlock(&sb_lock); 1437 1438 mutex_unlock(&bdev->bd_holder_lock); 1439 1440 locked = super_lock(sb, excl); 1441 1442 /* 1443 * If the superblock wasn't already SB_DYING then we hold 1444 * s_umount and can safely drop our temporary reference. 1445 */ 1446 put_super(sb); 1447 1448 if (!locked) 1449 return NULL; 1450 1451 if (!sb->s_root || !(sb->s_flags & SB_ACTIVE)) { 1452 super_unlock(sb, excl); 1453 return NULL; 1454 } 1455 1456 return sb; 1457 } 1458 1459 static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise) 1460 { 1461 struct super_block *sb; 1462 1463 sb = bdev_super_lock(bdev, false); 1464 if (!sb) 1465 return; 1466 1467 if (!surprise) 1468 sync_filesystem(sb); 1469 shrink_dcache_sb(sb); 1470 evict_inodes(sb); 1471 if (sb->s_op->shutdown) 1472 sb->s_op->shutdown(sb); 1473 1474 super_unlock_shared(sb); 1475 } 1476 1477 static void fs_bdev_sync(struct block_device *bdev) 1478 { 1479 struct super_block *sb; 1480 1481 sb = bdev_super_lock(bdev, false); 1482 if (!sb) 1483 return; 1484 1485 sync_filesystem(sb); 1486 super_unlock_shared(sb); 1487 } 1488 1489 static struct super_block *get_bdev_super(struct block_device *bdev) 1490 { 1491 bool active = false; 1492 struct super_block *sb; 1493 1494 sb = bdev_super_lock(bdev, true); 1495 if (sb) { 1496 active = atomic_inc_not_zero(&sb->s_active); 1497 super_unlock_excl(sb); 1498 } 1499 if (!active) 1500 return NULL; 1501 return sb; 1502 } 1503 1504 /** 1505 * fs_bdev_freeze - freeze owning filesystem of block device 1506 * @bdev: block device 1507 * 1508 * Freeze the filesystem that owns this block device if it is still 1509 * active. 1510 * 1511 * A filesystem that owns multiple block devices may be frozen from each 1512 * block device and won't be unfrozen until all block devices are 1513 * unfrozen. Each block device can only freeze the filesystem once as we 1514 * nest freezes for block devices in the block layer. 1515 * 1516 * Return: If the freeze was successful zero is returned. If the freeze 1517 * failed a negative error code is returned. 1518 */ 1519 static int fs_bdev_freeze(struct block_device *bdev) 1520 { 1521 struct super_block *sb; 1522 int error = 0; 1523 1524 lockdep_assert_held(&bdev->bd_fsfreeze_mutex); 1525 1526 sb = get_bdev_super(bdev); 1527 if (!sb) 1528 return -EINVAL; 1529 1530 if (sb->s_op->freeze_super) 1531 error = sb->s_op->freeze_super(sb, 1532 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL); 1533 else 1534 error = freeze_super(sb, 1535 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL); 1536 if (!error) 1537 error = sync_blockdev(bdev); 1538 deactivate_super(sb); 1539 return error; 1540 } 1541 1542 /** 1543 * fs_bdev_thaw - thaw owning filesystem of block device 1544 * @bdev: block device 1545 * 1546 * Thaw the filesystem that owns this block device. 1547 * 1548 * A filesystem that owns multiple block devices may be frozen from each 1549 * block device and won't be unfrozen until all block devices are 1550 * unfrozen. Each block device can only freeze the filesystem once as we 1551 * nest freezes for block devices in the block layer. 1552 * 1553 * Return: If the thaw was successful zero is returned. If the thaw 1554 * failed a negative error code is returned. If this function 1555 * returns zero it doesn't mean that the filesystem is unfrozen 1556 * as it may have been frozen multiple times (kernel may hold a 1557 * freeze or might be frozen from other block devices). 1558 */ 1559 static int fs_bdev_thaw(struct block_device *bdev) 1560 { 1561 struct super_block *sb; 1562 int error; 1563 1564 lockdep_assert_held(&bdev->bd_fsfreeze_mutex); 1565 1566 /* 1567 * The block device may have been frozen before it was claimed by a 1568 * filesystem. Concurrently another process might try to mount that 1569 * frozen block device and has temporarily claimed the block device for 1570 * that purpose causing a concurrent fs_bdev_thaw() to end up here. The 1571 * mounter is already about to abort mounting because they still saw an 1572 * elevanted bdev->bd_fsfreeze_count so get_bdev_super() will return 1573 * NULL in that case. 1574 */ 1575 sb = get_bdev_super(bdev); 1576 if (!sb) 1577 return -EINVAL; 1578 1579 if (sb->s_op->thaw_super) 1580 error = sb->s_op->thaw_super(sb, 1581 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL); 1582 else 1583 error = thaw_super(sb, 1584 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL); 1585 deactivate_super(sb); 1586 return error; 1587 } 1588 1589 const struct blk_holder_ops fs_holder_ops = { 1590 .mark_dead = fs_bdev_mark_dead, 1591 .sync = fs_bdev_sync, 1592 .freeze = fs_bdev_freeze, 1593 .thaw = fs_bdev_thaw, 1594 }; 1595 EXPORT_SYMBOL_GPL(fs_holder_ops); 1596 1597 int setup_bdev_super(struct super_block *sb, int sb_flags, 1598 struct fs_context *fc) 1599 { 1600 blk_mode_t mode = sb_open_mode(sb_flags); 1601 struct file *bdev_file; 1602 struct block_device *bdev; 1603 1604 bdev_file = bdev_file_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops); 1605 if (IS_ERR(bdev_file)) { 1606 if (fc) 1607 errorf(fc, "%s: Can't open blockdev", fc->source); 1608 return PTR_ERR(bdev_file); 1609 } 1610 bdev = file_bdev(bdev_file); 1611 1612 /* 1613 * This really should be in blkdev_get_by_dev, but right now can't due 1614 * to legacy issues that require us to allow opening a block device node 1615 * writable from userspace even for a read-only block device. 1616 */ 1617 if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) { 1618 bdev_fput(bdev_file); 1619 return -EACCES; 1620 } 1621 1622 /* 1623 * It is enough to check bdev was not frozen before we set 1624 * s_bdev as freezing will wait until SB_BORN is set. 1625 */ 1626 if (atomic_read(&bdev->bd_fsfreeze_count) > 0) { 1627 if (fc) 1628 warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev); 1629 bdev_fput(bdev_file); 1630 return -EBUSY; 1631 } 1632 spin_lock(&sb_lock); 1633 sb->s_bdev_file = bdev_file; 1634 sb->s_bdev = bdev; 1635 sb->s_bdi = bdi_get(bdev->bd_disk->bdi); 1636 if (bdev_stable_writes(bdev)) 1637 sb->s_iflags |= SB_I_STABLE_WRITES; 1638 spin_unlock(&sb_lock); 1639 1640 snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev); 1641 shrinker_debugfs_rename(sb->s_shrink, "sb-%s:%s", sb->s_type->name, 1642 sb->s_id); 1643 sb_set_blocksize(sb, block_size(bdev)); 1644 return 0; 1645 } 1646 EXPORT_SYMBOL_GPL(setup_bdev_super); 1647 1648 /** 1649 * get_tree_bdev_flags - Get a superblock based on a single block device 1650 * @fc: The filesystem context holding the parameters 1651 * @fill_super: Helper to initialise a new superblock 1652 * @flags: GET_TREE_BDEV_* flags 1653 */ 1654 int get_tree_bdev_flags(struct fs_context *fc, 1655 int (*fill_super)(struct super_block *sb, 1656 struct fs_context *fc), unsigned int flags) 1657 { 1658 struct super_block *s; 1659 int error = 0; 1660 dev_t dev; 1661 1662 if (!fc->source) 1663 return invalf(fc, "No source specified"); 1664 1665 error = lookup_bdev(fc->source, &dev); 1666 if (error) { 1667 if (!(flags & GET_TREE_BDEV_QUIET_LOOKUP)) 1668 errorf(fc, "%s: Can't lookup blockdev", fc->source); 1669 return error; 1670 } 1671 fc->sb_flags |= SB_NOSEC; 1672 s = sget_dev(fc, dev); 1673 if (IS_ERR(s)) 1674 return PTR_ERR(s); 1675 1676 if (s->s_root) { 1677 /* Don't summarily change the RO/RW state. */ 1678 if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) { 1679 warnf(fc, "%pg: Can't mount, would change RO state", s->s_bdev); 1680 deactivate_locked_super(s); 1681 return -EBUSY; 1682 } 1683 } else { 1684 error = setup_bdev_super(s, fc->sb_flags, fc); 1685 if (!error) 1686 error = fill_super(s, fc); 1687 if (error) { 1688 deactivate_locked_super(s); 1689 return error; 1690 } 1691 s->s_flags |= SB_ACTIVE; 1692 } 1693 1694 BUG_ON(fc->root); 1695 fc->root = dget(s->s_root); 1696 return 0; 1697 } 1698 EXPORT_SYMBOL_GPL(get_tree_bdev_flags); 1699 1700 /** 1701 * get_tree_bdev - Get a superblock based on a single block device 1702 * @fc: The filesystem context holding the parameters 1703 * @fill_super: Helper to initialise a new superblock 1704 */ 1705 int get_tree_bdev(struct fs_context *fc, 1706 int (*fill_super)(struct super_block *, 1707 struct fs_context *)) 1708 { 1709 return get_tree_bdev_flags(fc, fill_super, 0); 1710 } 1711 EXPORT_SYMBOL(get_tree_bdev); 1712 1713 static int test_bdev_super(struct super_block *s, void *data) 1714 { 1715 return !(s->s_iflags & SB_I_RETIRED) && s->s_dev == *(dev_t *)data; 1716 } 1717 1718 struct dentry *mount_bdev(struct file_system_type *fs_type, 1719 int flags, const char *dev_name, void *data, 1720 int (*fill_super)(struct super_block *, void *, int)) 1721 { 1722 struct super_block *s; 1723 int error; 1724 dev_t dev; 1725 1726 error = lookup_bdev(dev_name, &dev); 1727 if (error) 1728 return ERR_PTR(error); 1729 1730 flags |= SB_NOSEC; 1731 s = sget(fs_type, test_bdev_super, set_bdev_super, flags, &dev); 1732 if (IS_ERR(s)) 1733 return ERR_CAST(s); 1734 1735 if (s->s_root) { 1736 if ((flags ^ s->s_flags) & SB_RDONLY) { 1737 deactivate_locked_super(s); 1738 return ERR_PTR(-EBUSY); 1739 } 1740 } else { 1741 error = setup_bdev_super(s, flags, NULL); 1742 if (!error) 1743 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0); 1744 if (error) { 1745 deactivate_locked_super(s); 1746 return ERR_PTR(error); 1747 } 1748 1749 s->s_flags |= SB_ACTIVE; 1750 } 1751 1752 return dget(s->s_root); 1753 } 1754 EXPORT_SYMBOL(mount_bdev); 1755 1756 void kill_block_super(struct super_block *sb) 1757 { 1758 struct block_device *bdev = sb->s_bdev; 1759 1760 generic_shutdown_super(sb); 1761 if (bdev) { 1762 sync_blockdev(bdev); 1763 bdev_fput(sb->s_bdev_file); 1764 } 1765 } 1766 1767 EXPORT_SYMBOL(kill_block_super); 1768 #endif 1769 1770 struct dentry *mount_nodev(struct file_system_type *fs_type, 1771 int flags, void *data, 1772 int (*fill_super)(struct super_block *, void *, int)) 1773 { 1774 int error; 1775 struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL); 1776 1777 if (IS_ERR(s)) 1778 return ERR_CAST(s); 1779 1780 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0); 1781 if (error) { 1782 deactivate_locked_super(s); 1783 return ERR_PTR(error); 1784 } 1785 s->s_flags |= SB_ACTIVE; 1786 return dget(s->s_root); 1787 } 1788 EXPORT_SYMBOL(mount_nodev); 1789 1790 /** 1791 * vfs_get_tree - Get the mountable root 1792 * @fc: The superblock configuration context. 1793 * 1794 * The filesystem is invoked to get or create a superblock which can then later 1795 * be used for mounting. The filesystem places a pointer to the root to be 1796 * used for mounting in @fc->root. 1797 */ 1798 int vfs_get_tree(struct fs_context *fc) 1799 { 1800 struct super_block *sb; 1801 int error; 1802 1803 if (fc->root) 1804 return -EBUSY; 1805 1806 /* Get the mountable root in fc->root, with a ref on the root and a ref 1807 * on the superblock. 1808 */ 1809 error = fc->ops->get_tree(fc); 1810 if (error < 0) 1811 return error; 1812 1813 if (!fc->root) { 1814 pr_err("Filesystem %s get_tree() didn't set fc->root, returned %i\n", 1815 fc->fs_type->name, error); 1816 /* We don't know what the locking state of the superblock is - 1817 * if there is a superblock. 1818 */ 1819 BUG(); 1820 } 1821 1822 sb = fc->root->d_sb; 1823 WARN_ON(!sb->s_bdi); 1824 1825 /* 1826 * super_wake() contains a memory barrier which also care of 1827 * ordering for super_cache_count(). We place it before setting 1828 * SB_BORN as the data dependency between the two functions is 1829 * the superblock structure contents that we just set up, not 1830 * the SB_BORN flag. 1831 */ 1832 super_wake(sb, SB_BORN); 1833 1834 error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL); 1835 if (unlikely(error)) { 1836 fc_drop_locked(fc); 1837 return error; 1838 } 1839 1840 /* 1841 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE 1842 * but s_maxbytes was an unsigned long long for many releases. Throw 1843 * this warning for a little while to try and catch filesystems that 1844 * violate this rule. 1845 */ 1846 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to " 1847 "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes); 1848 1849 return 0; 1850 } 1851 EXPORT_SYMBOL(vfs_get_tree); 1852 1853 /* 1854 * Setup private BDI for given superblock. It gets automatically cleaned up 1855 * in generic_shutdown_super(). 1856 */ 1857 int super_setup_bdi_name(struct super_block *sb, char *fmt, ...) 1858 { 1859 struct backing_dev_info *bdi; 1860 int err; 1861 va_list args; 1862 1863 bdi = bdi_alloc(NUMA_NO_NODE); 1864 if (!bdi) 1865 return -ENOMEM; 1866 1867 va_start(args, fmt); 1868 err = bdi_register_va(bdi, fmt, args); 1869 va_end(args); 1870 if (err) { 1871 bdi_put(bdi); 1872 return err; 1873 } 1874 WARN_ON(sb->s_bdi != &noop_backing_dev_info); 1875 sb->s_bdi = bdi; 1876 sb->s_iflags |= SB_I_PERSB_BDI; 1877 1878 return 0; 1879 } 1880 EXPORT_SYMBOL(super_setup_bdi_name); 1881 1882 /* 1883 * Setup private BDI for given superblock. I gets automatically cleaned up 1884 * in generic_shutdown_super(). 1885 */ 1886 int super_setup_bdi(struct super_block *sb) 1887 { 1888 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); 1889 1890 return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name, 1891 atomic_long_inc_return(&bdi_seq)); 1892 } 1893 EXPORT_SYMBOL(super_setup_bdi); 1894 1895 /** 1896 * sb_wait_write - wait until all writers to given file system finish 1897 * @sb: the super for which we wait 1898 * @level: type of writers we wait for (normal vs page fault) 1899 * 1900 * This function waits until there are no writers of given type to given file 1901 * system. 1902 */ 1903 static void sb_wait_write(struct super_block *sb, int level) 1904 { 1905 percpu_down_write(sb->s_writers.rw_sem + level-1); 1906 } 1907 1908 /* 1909 * We are going to return to userspace and forget about these locks, the 1910 * ownership goes to the caller of thaw_super() which does unlock(). 1911 */ 1912 static void lockdep_sb_freeze_release(struct super_block *sb) 1913 { 1914 int level; 1915 1916 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--) 1917 percpu_rwsem_release(sb->s_writers.rw_sem + level, _THIS_IP_); 1918 } 1919 1920 /* 1921 * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb). 1922 */ 1923 static void lockdep_sb_freeze_acquire(struct super_block *sb) 1924 { 1925 int level; 1926 1927 for (level = 0; level < SB_FREEZE_LEVELS; ++level) 1928 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_); 1929 } 1930 1931 static void sb_freeze_unlock(struct super_block *sb, int level) 1932 { 1933 for (level--; level >= 0; level--) 1934 percpu_up_write(sb->s_writers.rw_sem + level); 1935 } 1936 1937 static int wait_for_partially_frozen(struct super_block *sb) 1938 { 1939 int ret = 0; 1940 1941 do { 1942 unsigned short old = sb->s_writers.frozen; 1943 1944 up_write(&sb->s_umount); 1945 ret = wait_var_event_killable(&sb->s_writers.frozen, 1946 sb->s_writers.frozen != old); 1947 down_write(&sb->s_umount); 1948 } while (ret == 0 && 1949 sb->s_writers.frozen != SB_UNFROZEN && 1950 sb->s_writers.frozen != SB_FREEZE_COMPLETE); 1951 1952 return ret; 1953 } 1954 1955 #define FREEZE_HOLDERS (FREEZE_HOLDER_KERNEL | FREEZE_HOLDER_USERSPACE) 1956 #define FREEZE_FLAGS (FREEZE_HOLDERS | FREEZE_MAY_NEST | FREEZE_EXCL) 1957 1958 static inline int freeze_inc(struct super_block *sb, enum freeze_holder who) 1959 { 1960 WARN_ON_ONCE((who & ~FREEZE_FLAGS)); 1961 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1); 1962 1963 if (who & FREEZE_HOLDER_KERNEL) 1964 ++sb->s_writers.freeze_kcount; 1965 if (who & FREEZE_HOLDER_USERSPACE) 1966 ++sb->s_writers.freeze_ucount; 1967 return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount; 1968 } 1969 1970 static inline int freeze_dec(struct super_block *sb, enum freeze_holder who) 1971 { 1972 WARN_ON_ONCE((who & ~FREEZE_FLAGS)); 1973 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1); 1974 1975 if ((who & FREEZE_HOLDER_KERNEL) && sb->s_writers.freeze_kcount) 1976 --sb->s_writers.freeze_kcount; 1977 if ((who & FREEZE_HOLDER_USERSPACE) && sb->s_writers.freeze_ucount) 1978 --sb->s_writers.freeze_ucount; 1979 return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount; 1980 } 1981 1982 static inline bool may_freeze(struct super_block *sb, enum freeze_holder who, 1983 const void *freeze_owner) 1984 { 1985 lockdep_assert_held(&sb->s_umount); 1986 1987 WARN_ON_ONCE((who & ~FREEZE_FLAGS)); 1988 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1); 1989 1990 if (who & FREEZE_EXCL) { 1991 if (WARN_ON_ONCE(!(who & FREEZE_HOLDER_KERNEL))) 1992 return false; 1993 if (WARN_ON_ONCE(who & ~(FREEZE_EXCL | FREEZE_HOLDER_KERNEL))) 1994 return false; 1995 if (WARN_ON_ONCE(!freeze_owner)) 1996 return false; 1997 /* This freeze already has a specific owner. */ 1998 if (sb->s_writers.freeze_owner) 1999 return false; 2000 /* 2001 * This is already frozen multiple times so we're just 2002 * going to take a reference count and mark the freeze as 2003 * being owned by the caller. 2004 */ 2005 if (sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount) 2006 sb->s_writers.freeze_owner = freeze_owner; 2007 return true; 2008 } 2009 2010 if (who & FREEZE_HOLDER_KERNEL) 2011 return (who & FREEZE_MAY_NEST) || 2012 sb->s_writers.freeze_kcount == 0; 2013 if (who & FREEZE_HOLDER_USERSPACE) 2014 return (who & FREEZE_MAY_NEST) || 2015 sb->s_writers.freeze_ucount == 0; 2016 return false; 2017 } 2018 2019 static inline bool may_unfreeze(struct super_block *sb, enum freeze_holder who, 2020 const void *freeze_owner) 2021 { 2022 lockdep_assert_held(&sb->s_umount); 2023 2024 WARN_ON_ONCE((who & ~FREEZE_FLAGS)); 2025 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1); 2026 2027 if (who & FREEZE_EXCL) { 2028 if (WARN_ON_ONCE(!(who & FREEZE_HOLDER_KERNEL))) 2029 return false; 2030 if (WARN_ON_ONCE(who & ~(FREEZE_EXCL | FREEZE_HOLDER_KERNEL))) 2031 return false; 2032 if (WARN_ON_ONCE(!freeze_owner)) 2033 return false; 2034 if (WARN_ON_ONCE(sb->s_writers.freeze_kcount == 0)) 2035 return false; 2036 /* This isn't exclusively frozen. */ 2037 if (!sb->s_writers.freeze_owner) 2038 return false; 2039 /* This isn't exclusively frozen by us. */ 2040 if (sb->s_writers.freeze_owner != freeze_owner) 2041 return false; 2042 /* 2043 * This is still frozen multiple times so we're just 2044 * going to drop our reference count and undo our 2045 * exclusive freeze. 2046 */ 2047 if ((sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount) > 1) 2048 sb->s_writers.freeze_owner = NULL; 2049 return true; 2050 } 2051 2052 if (who & FREEZE_HOLDER_KERNEL) { 2053 /* 2054 * Someone's trying to steal the reference belonging to 2055 * @sb->s_writers.freeze_owner. 2056 */ 2057 if (sb->s_writers.freeze_kcount == 1 && 2058 sb->s_writers.freeze_owner) 2059 return false; 2060 return sb->s_writers.freeze_kcount > 0; 2061 } 2062 2063 if (who & FREEZE_HOLDER_USERSPACE) 2064 return sb->s_writers.freeze_ucount > 0; 2065 2066 return false; 2067 } 2068 2069 /** 2070 * freeze_super - lock the filesystem and force it into a consistent state 2071 * @sb: the super to lock 2072 * @who: context that wants to freeze 2073 * @freeze_owner: owner of the freeze 2074 * 2075 * Syncs the super to make sure the filesystem is consistent and calls the fs's 2076 * freeze_fs. Subsequent calls to this without first thawing the fs may return 2077 * -EBUSY. 2078 * 2079 * @who should be: 2080 * * %FREEZE_HOLDER_USERSPACE if userspace wants to freeze the fs; 2081 * * %FREEZE_HOLDER_KERNEL if the kernel wants to freeze the fs. 2082 * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed. 2083 * 2084 * The @who argument distinguishes between the kernel and userspace trying to 2085 * freeze the filesystem. Although there cannot be multiple kernel freezes or 2086 * multiple userspace freezes in effect at any given time, the kernel and 2087 * userspace can both hold a filesystem frozen. The filesystem remains frozen 2088 * until there are no kernel or userspace freezes in effect. 2089 * 2090 * A filesystem may hold multiple devices and thus a filesystems may be 2091 * frozen through the block layer via multiple block devices. In this 2092 * case the request is marked as being allowed to nest by passing 2093 * FREEZE_MAY_NEST. The filesystem remains frozen until all block 2094 * devices are unfrozen. If multiple freezes are attempted without 2095 * FREEZE_MAY_NEST -EBUSY will be returned. 2096 * 2097 * During this function, sb->s_writers.frozen goes through these values: 2098 * 2099 * SB_UNFROZEN: File system is normal, all writes progress as usual. 2100 * 2101 * SB_FREEZE_WRITE: The file system is in the process of being frozen. New 2102 * writes should be blocked, though page faults are still allowed. We wait for 2103 * all writes to complete and then proceed to the next stage. 2104 * 2105 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked 2106 * but internal fs threads can still modify the filesystem (although they 2107 * should not dirty new pages or inodes), writeback can run etc. After waiting 2108 * for all running page faults we sync the filesystem which will clean all 2109 * dirty pages and inodes (no new dirty pages or inodes can be created when 2110 * sync is running). 2111 * 2112 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs 2113 * modification are blocked (e.g. XFS preallocation truncation on inode 2114 * reclaim). This is usually implemented by blocking new transactions for 2115 * filesystems that have them and need this additional guard. After all 2116 * internal writers are finished we call ->freeze_fs() to finish filesystem 2117 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is 2118 * mostly auxiliary for filesystems to verify they do not modify frozen fs. 2119 * 2120 * sb->s_writers.frozen is protected by sb->s_umount. 2121 * 2122 * Return: If the freeze was successful zero is returned. If the freeze 2123 * failed a negative error code is returned. 2124 */ 2125 int freeze_super(struct super_block *sb, enum freeze_holder who, const void *freeze_owner) 2126 { 2127 int ret; 2128 2129 if (!super_lock_excl(sb)) { 2130 WARN_ON_ONCE("Dying superblock while freezing!"); 2131 return -EINVAL; 2132 } 2133 atomic_inc(&sb->s_active); 2134 2135 retry: 2136 if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) { 2137 if (may_freeze(sb, who, freeze_owner)) 2138 ret = !!WARN_ON_ONCE(freeze_inc(sb, who) == 1); 2139 else 2140 ret = -EBUSY; 2141 /* All freezers share a single active reference. */ 2142 deactivate_locked_super(sb); 2143 return ret; 2144 } 2145 2146 if (sb->s_writers.frozen != SB_UNFROZEN) { 2147 ret = wait_for_partially_frozen(sb); 2148 if (ret) { 2149 deactivate_locked_super(sb); 2150 return ret; 2151 } 2152 2153 goto retry; 2154 } 2155 2156 if (sb_rdonly(sb)) { 2157 /* Nothing to do really... */ 2158 WARN_ON_ONCE(freeze_inc(sb, who) > 1); 2159 sb->s_writers.freeze_owner = freeze_owner; 2160 sb->s_writers.frozen = SB_FREEZE_COMPLETE; 2161 wake_up_var(&sb->s_writers.frozen); 2162 super_unlock_excl(sb); 2163 return 0; 2164 } 2165 2166 sb->s_writers.frozen = SB_FREEZE_WRITE; 2167 /* Release s_umount to preserve sb_start_write -> s_umount ordering */ 2168 super_unlock_excl(sb); 2169 sb_wait_write(sb, SB_FREEZE_WRITE); 2170 __super_lock_excl(sb); 2171 2172 /* Now we go and block page faults... */ 2173 sb->s_writers.frozen = SB_FREEZE_PAGEFAULT; 2174 sb_wait_write(sb, SB_FREEZE_PAGEFAULT); 2175 2176 /* All writers are done so after syncing there won't be dirty data */ 2177 ret = sync_filesystem(sb); 2178 if (ret) { 2179 sb->s_writers.frozen = SB_UNFROZEN; 2180 sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT); 2181 wake_up_var(&sb->s_writers.frozen); 2182 deactivate_locked_super(sb); 2183 return ret; 2184 } 2185 2186 /* Now wait for internal filesystem counter */ 2187 sb->s_writers.frozen = SB_FREEZE_FS; 2188 sb_wait_write(sb, SB_FREEZE_FS); 2189 2190 if (sb->s_op->freeze_fs) { 2191 ret = sb->s_op->freeze_fs(sb); 2192 if (ret) { 2193 printk(KERN_ERR 2194 "VFS:Filesystem freeze failed\n"); 2195 sb->s_writers.frozen = SB_UNFROZEN; 2196 sb_freeze_unlock(sb, SB_FREEZE_FS); 2197 wake_up_var(&sb->s_writers.frozen); 2198 deactivate_locked_super(sb); 2199 return ret; 2200 } 2201 } 2202 /* 2203 * For debugging purposes so that fs can warn if it sees write activity 2204 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super(). 2205 */ 2206 WARN_ON_ONCE(freeze_inc(sb, who) > 1); 2207 sb->s_writers.freeze_owner = freeze_owner; 2208 sb->s_writers.frozen = SB_FREEZE_COMPLETE; 2209 wake_up_var(&sb->s_writers.frozen); 2210 lockdep_sb_freeze_release(sb); 2211 super_unlock_excl(sb); 2212 return 0; 2213 } 2214 EXPORT_SYMBOL(freeze_super); 2215 2216 /* 2217 * Undoes the effect of a freeze_super_locked call. If the filesystem is 2218 * frozen both by userspace and the kernel, a thaw call from either source 2219 * removes that state without releasing the other state or unlocking the 2220 * filesystem. 2221 */ 2222 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who, 2223 const void *freeze_owner) 2224 { 2225 int error = -EINVAL; 2226 2227 if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) 2228 goto out_unlock; 2229 2230 if (!may_unfreeze(sb, who, freeze_owner)) 2231 goto out_unlock; 2232 2233 /* 2234 * All freezers share a single active reference. 2235 * So just unlock in case there are any left. 2236 */ 2237 if (freeze_dec(sb, who)) 2238 goto out_unlock; 2239 2240 if (sb_rdonly(sb)) { 2241 sb->s_writers.frozen = SB_UNFROZEN; 2242 sb->s_writers.freeze_owner = NULL; 2243 wake_up_var(&sb->s_writers.frozen); 2244 goto out_deactivate; 2245 } 2246 2247 lockdep_sb_freeze_acquire(sb); 2248 2249 if (sb->s_op->unfreeze_fs) { 2250 error = sb->s_op->unfreeze_fs(sb); 2251 if (error) { 2252 pr_err("VFS: Filesystem thaw failed\n"); 2253 freeze_inc(sb, who); 2254 lockdep_sb_freeze_release(sb); 2255 goto out_unlock; 2256 } 2257 } 2258 2259 sb->s_writers.frozen = SB_UNFROZEN; 2260 sb->s_writers.freeze_owner = NULL; 2261 wake_up_var(&sb->s_writers.frozen); 2262 sb_freeze_unlock(sb, SB_FREEZE_FS); 2263 out_deactivate: 2264 deactivate_locked_super(sb); 2265 return 0; 2266 2267 out_unlock: 2268 super_unlock_excl(sb); 2269 return error; 2270 } 2271 2272 /** 2273 * thaw_super -- unlock filesystem 2274 * @sb: the super to thaw 2275 * @who: context that wants to freeze 2276 * @freeze_owner: owner of the freeze 2277 * 2278 * Unlocks the filesystem and marks it writeable again after freeze_super() 2279 * if there are no remaining freezes on the filesystem. 2280 * 2281 * @who should be: 2282 * * %FREEZE_HOLDER_USERSPACE if userspace wants to thaw the fs; 2283 * * %FREEZE_HOLDER_KERNEL if the kernel wants to thaw the fs. 2284 * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed 2285 * 2286 * A filesystem may hold multiple devices and thus a filesystems may 2287 * have been frozen through the block layer via multiple block devices. 2288 * The filesystem remains frozen until all block devices are unfrozen. 2289 */ 2290 int thaw_super(struct super_block *sb, enum freeze_holder who, 2291 const void *freeze_owner) 2292 { 2293 if (!super_lock_excl(sb)) { 2294 WARN_ON_ONCE("Dying superblock while thawing!"); 2295 return -EINVAL; 2296 } 2297 return thaw_super_locked(sb, who, freeze_owner); 2298 } 2299 EXPORT_SYMBOL(thaw_super); 2300 2301 /* 2302 * Create workqueue for deferred direct IO completions. We allocate the 2303 * workqueue when it's first needed. This avoids creating workqueue for 2304 * filesystems that don't need it and also allows us to create the workqueue 2305 * late enough so the we can include s_id in the name of the workqueue. 2306 */ 2307 int sb_init_dio_done_wq(struct super_block *sb) 2308 { 2309 struct workqueue_struct *old; 2310 struct workqueue_struct *wq = alloc_workqueue("dio/%s", 2311 WQ_MEM_RECLAIM, 0, 2312 sb->s_id); 2313 if (!wq) 2314 return -ENOMEM; 2315 /* 2316 * This has to be atomic as more DIOs can race to create the workqueue 2317 */ 2318 old = cmpxchg(&sb->s_dio_done_wq, NULL, wq); 2319 /* Someone created workqueue before us? Free ours... */ 2320 if (old) 2321 destroy_workqueue(wq); 2322 return 0; 2323 } 2324 EXPORT_SYMBOL_GPL(sb_init_dio_done_wq); 2325