1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/super.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * super.c contains code to handle: - mount structures 8 * - super-block tables 9 * - filesystem drivers list 10 * - mount system call 11 * - umount system call 12 * - ustat system call 13 * 14 * GK 2/5/95 - Changed to support mounting the root fs via NFS 15 * 16 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall 17 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96 18 * Added options to /proc/mounts: 19 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996. 20 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998 21 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000 22 */ 23 24 #include <linux/export.h> 25 #include <linux/slab.h> 26 #include <linux/blkdev.h> 27 #include <linux/mount.h> 28 #include <linux/security.h> 29 #include <linux/writeback.h> /* for the emergency remount stuff */ 30 #include <linux/idr.h> 31 #include <linux/mutex.h> 32 #include <linux/backing-dev.h> 33 #include <linux/rculist_bl.h> 34 #include <linux/cleancache.h> 35 #include <linux/fscrypt.h> 36 #include <linux/fsnotify.h> 37 #include <linux/lockdep.h> 38 #include <linux/user_namespace.h> 39 #include <linux/fs_context.h> 40 #include <uapi/linux/mount.h> 41 #include "internal.h" 42 43 static int thaw_super_locked(struct super_block *sb); 44 45 static LIST_HEAD(super_blocks); 46 static DEFINE_SPINLOCK(sb_lock); 47 48 static char *sb_writers_name[SB_FREEZE_LEVELS] = { 49 "sb_writers", 50 "sb_pagefaults", 51 "sb_internal", 52 }; 53 54 /* 55 * One thing we have to be careful of with a per-sb shrinker is that we don't 56 * drop the last active reference to the superblock from within the shrinker. 57 * If that happens we could trigger unregistering the shrinker from within the 58 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we 59 * take a passive reference to the superblock to avoid this from occurring. 60 */ 61 static unsigned long super_cache_scan(struct shrinker *shrink, 62 struct shrink_control *sc) 63 { 64 struct super_block *sb; 65 long fs_objects = 0; 66 long total_objects; 67 long freed = 0; 68 long dentries; 69 long inodes; 70 71 sb = container_of(shrink, struct super_block, s_shrink); 72 73 /* 74 * Deadlock avoidance. We may hold various FS locks, and we don't want 75 * to recurse into the FS that called us in clear_inode() and friends.. 76 */ 77 if (!(sc->gfp_mask & __GFP_FS)) 78 return SHRINK_STOP; 79 80 if (!trylock_super(sb)) 81 return SHRINK_STOP; 82 83 if (sb->s_op->nr_cached_objects) 84 fs_objects = sb->s_op->nr_cached_objects(sb, sc); 85 86 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc); 87 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc); 88 total_objects = dentries + inodes + fs_objects + 1; 89 if (!total_objects) 90 total_objects = 1; 91 92 /* proportion the scan between the caches */ 93 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects); 94 inodes = mult_frac(sc->nr_to_scan, inodes, total_objects); 95 fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects); 96 97 /* 98 * prune the dcache first as the icache is pinned by it, then 99 * prune the icache, followed by the filesystem specific caches 100 * 101 * Ensure that we always scan at least one object - memcg kmem 102 * accounting uses this to fully empty the caches. 103 */ 104 sc->nr_to_scan = dentries + 1; 105 freed = prune_dcache_sb(sb, sc); 106 sc->nr_to_scan = inodes + 1; 107 freed += prune_icache_sb(sb, sc); 108 109 if (fs_objects) { 110 sc->nr_to_scan = fs_objects + 1; 111 freed += sb->s_op->free_cached_objects(sb, sc); 112 } 113 114 up_read(&sb->s_umount); 115 return freed; 116 } 117 118 static unsigned long super_cache_count(struct shrinker *shrink, 119 struct shrink_control *sc) 120 { 121 struct super_block *sb; 122 long total_objects = 0; 123 124 sb = container_of(shrink, struct super_block, s_shrink); 125 126 /* 127 * We don't call trylock_super() here as it is a scalability bottleneck, 128 * so we're exposed to partial setup state. The shrinker rwsem does not 129 * protect filesystem operations backing list_lru_shrink_count() or 130 * s_op->nr_cached_objects(). Counts can change between 131 * super_cache_count and super_cache_scan, so we really don't need locks 132 * here. 133 * 134 * However, if we are currently mounting the superblock, the underlying 135 * filesystem might be in a state of partial construction and hence it 136 * is dangerous to access it. trylock_super() uses a SB_BORN check to 137 * avoid this situation, so do the same here. The memory barrier is 138 * matched with the one in mount_fs() as we don't hold locks here. 139 */ 140 if (!(sb->s_flags & SB_BORN)) 141 return 0; 142 smp_rmb(); 143 144 if (sb->s_op && sb->s_op->nr_cached_objects) 145 total_objects = sb->s_op->nr_cached_objects(sb, sc); 146 147 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc); 148 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc); 149 150 if (!total_objects) 151 return SHRINK_EMPTY; 152 153 total_objects = vfs_pressure_ratio(total_objects); 154 return total_objects; 155 } 156 157 static void destroy_super_work(struct work_struct *work) 158 { 159 struct super_block *s = container_of(work, struct super_block, 160 destroy_work); 161 int i; 162 163 for (i = 0; i < SB_FREEZE_LEVELS; i++) 164 percpu_free_rwsem(&s->s_writers.rw_sem[i]); 165 kfree(s); 166 } 167 168 static void destroy_super_rcu(struct rcu_head *head) 169 { 170 struct super_block *s = container_of(head, struct super_block, rcu); 171 INIT_WORK(&s->destroy_work, destroy_super_work); 172 schedule_work(&s->destroy_work); 173 } 174 175 /* Free a superblock that has never been seen by anyone */ 176 static void destroy_unused_super(struct super_block *s) 177 { 178 if (!s) 179 return; 180 up_write(&s->s_umount); 181 list_lru_destroy(&s->s_dentry_lru); 182 list_lru_destroy(&s->s_inode_lru); 183 security_sb_free(s); 184 put_user_ns(s->s_user_ns); 185 kfree(s->s_subtype); 186 free_prealloced_shrinker(&s->s_shrink); 187 /* no delays needed */ 188 destroy_super_work(&s->destroy_work); 189 } 190 191 /** 192 * alloc_super - create new superblock 193 * @type: filesystem type superblock should belong to 194 * @flags: the mount flags 195 * @user_ns: User namespace for the super_block 196 * 197 * Allocates and initializes a new &struct super_block. alloc_super() 198 * returns a pointer new superblock or %NULL if allocation had failed. 199 */ 200 static struct super_block *alloc_super(struct file_system_type *type, int flags, 201 struct user_namespace *user_ns) 202 { 203 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER); 204 static const struct super_operations default_op; 205 int i; 206 207 if (!s) 208 return NULL; 209 210 INIT_LIST_HEAD(&s->s_mounts); 211 s->s_user_ns = get_user_ns(user_ns); 212 init_rwsem(&s->s_umount); 213 lockdep_set_class(&s->s_umount, &type->s_umount_key); 214 /* 215 * sget() can have s_umount recursion. 216 * 217 * When it cannot find a suitable sb, it allocates a new 218 * one (this one), and tries again to find a suitable old 219 * one. 220 * 221 * In case that succeeds, it will acquire the s_umount 222 * lock of the old one. Since these are clearly distrinct 223 * locks, and this object isn't exposed yet, there's no 224 * risk of deadlocks. 225 * 226 * Annotate this by putting this lock in a different 227 * subclass. 228 */ 229 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING); 230 231 if (security_sb_alloc(s)) 232 goto fail; 233 234 for (i = 0; i < SB_FREEZE_LEVELS; i++) { 235 if (__percpu_init_rwsem(&s->s_writers.rw_sem[i], 236 sb_writers_name[i], 237 &type->s_writers_key[i])) 238 goto fail; 239 } 240 init_waitqueue_head(&s->s_writers.wait_unfrozen); 241 s->s_bdi = &noop_backing_dev_info; 242 s->s_flags = flags; 243 if (s->s_user_ns != &init_user_ns) 244 s->s_iflags |= SB_I_NODEV; 245 INIT_HLIST_NODE(&s->s_instances); 246 INIT_HLIST_BL_HEAD(&s->s_roots); 247 mutex_init(&s->s_sync_lock); 248 INIT_LIST_HEAD(&s->s_inodes); 249 spin_lock_init(&s->s_inode_list_lock); 250 INIT_LIST_HEAD(&s->s_inodes_wb); 251 spin_lock_init(&s->s_inode_wblist_lock); 252 253 s->s_count = 1; 254 atomic_set(&s->s_active, 1); 255 mutex_init(&s->s_vfs_rename_mutex); 256 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); 257 init_rwsem(&s->s_dquot.dqio_sem); 258 s->s_maxbytes = MAX_NON_LFS; 259 s->s_op = &default_op; 260 s->s_time_gran = 1000000000; 261 s->s_time_min = TIME64_MIN; 262 s->s_time_max = TIME64_MAX; 263 s->cleancache_poolid = CLEANCACHE_NO_POOL; 264 265 s->s_shrink.seeks = DEFAULT_SEEKS; 266 s->s_shrink.scan_objects = super_cache_scan; 267 s->s_shrink.count_objects = super_cache_count; 268 s->s_shrink.batch = 1024; 269 s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE; 270 if (prealloc_shrinker(&s->s_shrink)) 271 goto fail; 272 if (list_lru_init_memcg(&s->s_dentry_lru, &s->s_shrink)) 273 goto fail; 274 if (list_lru_init_memcg(&s->s_inode_lru, &s->s_shrink)) 275 goto fail; 276 return s; 277 278 fail: 279 destroy_unused_super(s); 280 return NULL; 281 } 282 283 /* Superblock refcounting */ 284 285 /* 286 * Drop a superblock's refcount. The caller must hold sb_lock. 287 */ 288 static void __put_super(struct super_block *s) 289 { 290 if (!--s->s_count) { 291 list_del_init(&s->s_list); 292 WARN_ON(s->s_dentry_lru.node); 293 WARN_ON(s->s_inode_lru.node); 294 WARN_ON(!list_empty(&s->s_mounts)); 295 security_sb_free(s); 296 fscrypt_sb_free(s); 297 put_user_ns(s->s_user_ns); 298 kfree(s->s_subtype); 299 call_rcu(&s->rcu, destroy_super_rcu); 300 } 301 } 302 303 /** 304 * put_super - drop a temporary reference to superblock 305 * @sb: superblock in question 306 * 307 * Drops a temporary reference, frees superblock if there's no 308 * references left. 309 */ 310 void put_super(struct super_block *sb) 311 { 312 spin_lock(&sb_lock); 313 __put_super(sb); 314 spin_unlock(&sb_lock); 315 } 316 317 318 /** 319 * deactivate_locked_super - drop an active reference to superblock 320 * @s: superblock to deactivate 321 * 322 * Drops an active reference to superblock, converting it into a temporary 323 * one if there is no other active references left. In that case we 324 * tell fs driver to shut it down and drop the temporary reference we 325 * had just acquired. 326 * 327 * Caller holds exclusive lock on superblock; that lock is released. 328 */ 329 void deactivate_locked_super(struct super_block *s) 330 { 331 struct file_system_type *fs = s->s_type; 332 if (atomic_dec_and_test(&s->s_active)) { 333 cleancache_invalidate_fs(s); 334 unregister_shrinker(&s->s_shrink); 335 fs->kill_sb(s); 336 337 /* 338 * Since list_lru_destroy() may sleep, we cannot call it from 339 * put_super(), where we hold the sb_lock. Therefore we destroy 340 * the lru lists right now. 341 */ 342 list_lru_destroy(&s->s_dentry_lru); 343 list_lru_destroy(&s->s_inode_lru); 344 345 put_filesystem(fs); 346 put_super(s); 347 } else { 348 up_write(&s->s_umount); 349 } 350 } 351 352 EXPORT_SYMBOL(deactivate_locked_super); 353 354 /** 355 * deactivate_super - drop an active reference to superblock 356 * @s: superblock to deactivate 357 * 358 * Variant of deactivate_locked_super(), except that superblock is *not* 359 * locked by caller. If we are going to drop the final active reference, 360 * lock will be acquired prior to that. 361 */ 362 void deactivate_super(struct super_block *s) 363 { 364 if (!atomic_add_unless(&s->s_active, -1, 1)) { 365 down_write(&s->s_umount); 366 deactivate_locked_super(s); 367 } 368 } 369 370 EXPORT_SYMBOL(deactivate_super); 371 372 /** 373 * grab_super - acquire an active reference 374 * @s: reference we are trying to make active 375 * 376 * Tries to acquire an active reference. grab_super() is used when we 377 * had just found a superblock in super_blocks or fs_type->fs_supers 378 * and want to turn it into a full-blown active reference. grab_super() 379 * is called with sb_lock held and drops it. Returns 1 in case of 380 * success, 0 if we had failed (superblock contents was already dead or 381 * dying when grab_super() had been called). Note that this is only 382 * called for superblocks not in rundown mode (== ones still on ->fs_supers 383 * of their type), so increment of ->s_count is OK here. 384 */ 385 static int grab_super(struct super_block *s) __releases(sb_lock) 386 { 387 s->s_count++; 388 spin_unlock(&sb_lock); 389 down_write(&s->s_umount); 390 if ((s->s_flags & SB_BORN) && atomic_inc_not_zero(&s->s_active)) { 391 put_super(s); 392 return 1; 393 } 394 up_write(&s->s_umount); 395 put_super(s); 396 return 0; 397 } 398 399 /* 400 * trylock_super - try to grab ->s_umount shared 401 * @sb: reference we are trying to grab 402 * 403 * Try to prevent fs shutdown. This is used in places where we 404 * cannot take an active reference but we need to ensure that the 405 * filesystem is not shut down while we are working on it. It returns 406 * false if we cannot acquire s_umount or if we lose the race and 407 * filesystem already got into shutdown, and returns true with the s_umount 408 * lock held in read mode in case of success. On successful return, 409 * the caller must drop the s_umount lock when done. 410 * 411 * Note that unlike get_super() et.al. this one does *not* bump ->s_count. 412 * The reason why it's safe is that we are OK with doing trylock instead 413 * of down_read(). There's a couple of places that are OK with that, but 414 * it's very much not a general-purpose interface. 415 */ 416 bool trylock_super(struct super_block *sb) 417 { 418 if (down_read_trylock(&sb->s_umount)) { 419 if (!hlist_unhashed(&sb->s_instances) && 420 sb->s_root && (sb->s_flags & SB_BORN)) 421 return true; 422 up_read(&sb->s_umount); 423 } 424 425 return false; 426 } 427 428 /** 429 * generic_shutdown_super - common helper for ->kill_sb() 430 * @sb: superblock to kill 431 * 432 * generic_shutdown_super() does all fs-independent work on superblock 433 * shutdown. Typical ->kill_sb() should pick all fs-specific objects 434 * that need destruction out of superblock, call generic_shutdown_super() 435 * and release aforementioned objects. Note: dentries and inodes _are_ 436 * taken care of and do not need specific handling. 437 * 438 * Upon calling this function, the filesystem may no longer alter or 439 * rearrange the set of dentries belonging to this super_block, nor may it 440 * change the attachments of dentries to inodes. 441 */ 442 void generic_shutdown_super(struct super_block *sb) 443 { 444 const struct super_operations *sop = sb->s_op; 445 446 if (sb->s_root) { 447 shrink_dcache_for_umount(sb); 448 sync_filesystem(sb); 449 sb->s_flags &= ~SB_ACTIVE; 450 451 cgroup_writeback_umount(); 452 453 /* evict all inodes with zero refcount */ 454 evict_inodes(sb); 455 /* only nonzero refcount inodes can have marks */ 456 fsnotify_sb_delete(sb); 457 security_sb_delete(sb); 458 459 if (sb->s_dio_done_wq) { 460 destroy_workqueue(sb->s_dio_done_wq); 461 sb->s_dio_done_wq = NULL; 462 } 463 464 if (sop->put_super) 465 sop->put_super(sb); 466 467 if (!list_empty(&sb->s_inodes)) { 468 printk("VFS: Busy inodes after unmount of %s. " 469 "Self-destruct in 5 seconds. Have a nice day...\n", 470 sb->s_id); 471 } 472 } 473 spin_lock(&sb_lock); 474 /* should be initialized for __put_super_and_need_restart() */ 475 hlist_del_init(&sb->s_instances); 476 spin_unlock(&sb_lock); 477 up_write(&sb->s_umount); 478 if (sb->s_bdi != &noop_backing_dev_info) { 479 if (sb->s_iflags & SB_I_PERSB_BDI) 480 bdi_unregister(sb->s_bdi); 481 bdi_put(sb->s_bdi); 482 sb->s_bdi = &noop_backing_dev_info; 483 } 484 } 485 486 EXPORT_SYMBOL(generic_shutdown_super); 487 488 bool mount_capable(struct fs_context *fc) 489 { 490 if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) 491 return capable(CAP_SYS_ADMIN); 492 else 493 return ns_capable(fc->user_ns, CAP_SYS_ADMIN); 494 } 495 496 /** 497 * sget_fc - Find or create a superblock 498 * @fc: Filesystem context. 499 * @test: Comparison callback 500 * @set: Setup callback 501 * 502 * Find or create a superblock using the parameters stored in the filesystem 503 * context and the two callback functions. 504 * 505 * If an extant superblock is matched, then that will be returned with an 506 * elevated reference count that the caller must transfer or discard. 507 * 508 * If no match is made, a new superblock will be allocated and basic 509 * initialisation will be performed (s_type, s_fs_info and s_id will be set and 510 * the set() callback will be invoked), the superblock will be published and it 511 * will be returned in a partially constructed state with SB_BORN and SB_ACTIVE 512 * as yet unset. 513 */ 514 struct super_block *sget_fc(struct fs_context *fc, 515 int (*test)(struct super_block *, struct fs_context *), 516 int (*set)(struct super_block *, struct fs_context *)) 517 { 518 struct super_block *s = NULL; 519 struct super_block *old; 520 struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns; 521 int err; 522 523 retry: 524 spin_lock(&sb_lock); 525 if (test) { 526 hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) { 527 if (test(old, fc)) 528 goto share_extant_sb; 529 } 530 } 531 if (!s) { 532 spin_unlock(&sb_lock); 533 s = alloc_super(fc->fs_type, fc->sb_flags, user_ns); 534 if (!s) 535 return ERR_PTR(-ENOMEM); 536 goto retry; 537 } 538 539 s->s_fs_info = fc->s_fs_info; 540 err = set(s, fc); 541 if (err) { 542 s->s_fs_info = NULL; 543 spin_unlock(&sb_lock); 544 destroy_unused_super(s); 545 return ERR_PTR(err); 546 } 547 fc->s_fs_info = NULL; 548 s->s_type = fc->fs_type; 549 s->s_iflags |= fc->s_iflags; 550 strlcpy(s->s_id, s->s_type->name, sizeof(s->s_id)); 551 list_add_tail(&s->s_list, &super_blocks); 552 hlist_add_head(&s->s_instances, &s->s_type->fs_supers); 553 spin_unlock(&sb_lock); 554 get_filesystem(s->s_type); 555 register_shrinker_prepared(&s->s_shrink); 556 return s; 557 558 share_extant_sb: 559 if (user_ns != old->s_user_ns) { 560 spin_unlock(&sb_lock); 561 destroy_unused_super(s); 562 return ERR_PTR(-EBUSY); 563 } 564 if (!grab_super(old)) 565 goto retry; 566 destroy_unused_super(s); 567 return old; 568 } 569 EXPORT_SYMBOL(sget_fc); 570 571 /** 572 * sget - find or create a superblock 573 * @type: filesystem type superblock should belong to 574 * @test: comparison callback 575 * @set: setup callback 576 * @flags: mount flags 577 * @data: argument to each of them 578 */ 579 struct super_block *sget(struct file_system_type *type, 580 int (*test)(struct super_block *,void *), 581 int (*set)(struct super_block *,void *), 582 int flags, 583 void *data) 584 { 585 struct user_namespace *user_ns = current_user_ns(); 586 struct super_block *s = NULL; 587 struct super_block *old; 588 int err; 589 590 /* We don't yet pass the user namespace of the parent 591 * mount through to here so always use &init_user_ns 592 * until that changes. 593 */ 594 if (flags & SB_SUBMOUNT) 595 user_ns = &init_user_ns; 596 597 retry: 598 spin_lock(&sb_lock); 599 if (test) { 600 hlist_for_each_entry(old, &type->fs_supers, s_instances) { 601 if (!test(old, data)) 602 continue; 603 if (user_ns != old->s_user_ns) { 604 spin_unlock(&sb_lock); 605 destroy_unused_super(s); 606 return ERR_PTR(-EBUSY); 607 } 608 if (!grab_super(old)) 609 goto retry; 610 destroy_unused_super(s); 611 return old; 612 } 613 } 614 if (!s) { 615 spin_unlock(&sb_lock); 616 s = alloc_super(type, (flags & ~SB_SUBMOUNT), user_ns); 617 if (!s) 618 return ERR_PTR(-ENOMEM); 619 goto retry; 620 } 621 622 err = set(s, data); 623 if (err) { 624 spin_unlock(&sb_lock); 625 destroy_unused_super(s); 626 return ERR_PTR(err); 627 } 628 s->s_type = type; 629 strlcpy(s->s_id, type->name, sizeof(s->s_id)); 630 list_add_tail(&s->s_list, &super_blocks); 631 hlist_add_head(&s->s_instances, &type->fs_supers); 632 spin_unlock(&sb_lock); 633 get_filesystem(type); 634 register_shrinker_prepared(&s->s_shrink); 635 return s; 636 } 637 EXPORT_SYMBOL(sget); 638 639 void drop_super(struct super_block *sb) 640 { 641 up_read(&sb->s_umount); 642 put_super(sb); 643 } 644 645 EXPORT_SYMBOL(drop_super); 646 647 void drop_super_exclusive(struct super_block *sb) 648 { 649 up_write(&sb->s_umount); 650 put_super(sb); 651 } 652 EXPORT_SYMBOL(drop_super_exclusive); 653 654 static void __iterate_supers(void (*f)(struct super_block *)) 655 { 656 struct super_block *sb, *p = NULL; 657 658 spin_lock(&sb_lock); 659 list_for_each_entry(sb, &super_blocks, s_list) { 660 if (hlist_unhashed(&sb->s_instances)) 661 continue; 662 sb->s_count++; 663 spin_unlock(&sb_lock); 664 665 f(sb); 666 667 spin_lock(&sb_lock); 668 if (p) 669 __put_super(p); 670 p = sb; 671 } 672 if (p) 673 __put_super(p); 674 spin_unlock(&sb_lock); 675 } 676 /** 677 * iterate_supers - call function for all active superblocks 678 * @f: function to call 679 * @arg: argument to pass to it 680 * 681 * Scans the superblock list and calls given function, passing it 682 * locked superblock and given argument. 683 */ 684 void iterate_supers(void (*f)(struct super_block *, void *), void *arg) 685 { 686 struct super_block *sb, *p = NULL; 687 688 spin_lock(&sb_lock); 689 list_for_each_entry(sb, &super_blocks, s_list) { 690 if (hlist_unhashed(&sb->s_instances)) 691 continue; 692 sb->s_count++; 693 spin_unlock(&sb_lock); 694 695 down_read(&sb->s_umount); 696 if (sb->s_root && (sb->s_flags & SB_BORN)) 697 f(sb, arg); 698 up_read(&sb->s_umount); 699 700 spin_lock(&sb_lock); 701 if (p) 702 __put_super(p); 703 p = sb; 704 } 705 if (p) 706 __put_super(p); 707 spin_unlock(&sb_lock); 708 } 709 710 /** 711 * iterate_supers_type - call function for superblocks of given type 712 * @type: fs type 713 * @f: function to call 714 * @arg: argument to pass to it 715 * 716 * Scans the superblock list and calls given function, passing it 717 * locked superblock and given argument. 718 */ 719 void iterate_supers_type(struct file_system_type *type, 720 void (*f)(struct super_block *, void *), void *arg) 721 { 722 struct super_block *sb, *p = NULL; 723 724 spin_lock(&sb_lock); 725 hlist_for_each_entry(sb, &type->fs_supers, s_instances) { 726 sb->s_count++; 727 spin_unlock(&sb_lock); 728 729 down_read(&sb->s_umount); 730 if (sb->s_root && (sb->s_flags & SB_BORN)) 731 f(sb, arg); 732 up_read(&sb->s_umount); 733 734 spin_lock(&sb_lock); 735 if (p) 736 __put_super(p); 737 p = sb; 738 } 739 if (p) 740 __put_super(p); 741 spin_unlock(&sb_lock); 742 } 743 744 EXPORT_SYMBOL(iterate_supers_type); 745 746 /** 747 * get_super - get the superblock of a device 748 * @bdev: device to get the superblock for 749 * 750 * Scans the superblock list and finds the superblock of the file system 751 * mounted on the device given. %NULL is returned if no match is found. 752 */ 753 struct super_block *get_super(struct block_device *bdev) 754 { 755 struct super_block *sb; 756 757 if (!bdev) 758 return NULL; 759 760 spin_lock(&sb_lock); 761 rescan: 762 list_for_each_entry(sb, &super_blocks, s_list) { 763 if (hlist_unhashed(&sb->s_instances)) 764 continue; 765 if (sb->s_bdev == bdev) { 766 sb->s_count++; 767 spin_unlock(&sb_lock); 768 down_read(&sb->s_umount); 769 /* still alive? */ 770 if (sb->s_root && (sb->s_flags & SB_BORN)) 771 return sb; 772 up_read(&sb->s_umount); 773 /* nope, got unmounted */ 774 spin_lock(&sb_lock); 775 __put_super(sb); 776 goto rescan; 777 } 778 } 779 spin_unlock(&sb_lock); 780 return NULL; 781 } 782 783 /** 784 * get_active_super - get an active reference to the superblock of a device 785 * @bdev: device to get the superblock for 786 * 787 * Scans the superblock list and finds the superblock of the file system 788 * mounted on the device given. Returns the superblock with an active 789 * reference or %NULL if none was found. 790 */ 791 struct super_block *get_active_super(struct block_device *bdev) 792 { 793 struct super_block *sb; 794 795 if (!bdev) 796 return NULL; 797 798 restart: 799 spin_lock(&sb_lock); 800 list_for_each_entry(sb, &super_blocks, s_list) { 801 if (hlist_unhashed(&sb->s_instances)) 802 continue; 803 if (sb->s_bdev == bdev) { 804 if (!grab_super(sb)) 805 goto restart; 806 up_write(&sb->s_umount); 807 return sb; 808 } 809 } 810 spin_unlock(&sb_lock); 811 return NULL; 812 } 813 814 struct super_block *user_get_super(dev_t dev, bool excl) 815 { 816 struct super_block *sb; 817 818 spin_lock(&sb_lock); 819 rescan: 820 list_for_each_entry(sb, &super_blocks, s_list) { 821 if (hlist_unhashed(&sb->s_instances)) 822 continue; 823 if (sb->s_dev == dev) { 824 sb->s_count++; 825 spin_unlock(&sb_lock); 826 if (excl) 827 down_write(&sb->s_umount); 828 else 829 down_read(&sb->s_umount); 830 /* still alive? */ 831 if (sb->s_root && (sb->s_flags & SB_BORN)) 832 return sb; 833 if (excl) 834 up_write(&sb->s_umount); 835 else 836 up_read(&sb->s_umount); 837 /* nope, got unmounted */ 838 spin_lock(&sb_lock); 839 __put_super(sb); 840 goto rescan; 841 } 842 } 843 spin_unlock(&sb_lock); 844 return NULL; 845 } 846 847 /** 848 * reconfigure_super - asks filesystem to change superblock parameters 849 * @fc: The superblock and configuration 850 * 851 * Alters the configuration parameters of a live superblock. 852 */ 853 int reconfigure_super(struct fs_context *fc) 854 { 855 struct super_block *sb = fc->root->d_sb; 856 int retval; 857 bool remount_ro = false; 858 bool force = fc->sb_flags & SB_FORCE; 859 860 if (fc->sb_flags_mask & ~MS_RMT_MASK) 861 return -EINVAL; 862 if (sb->s_writers.frozen != SB_UNFROZEN) 863 return -EBUSY; 864 865 retval = security_sb_remount(sb, fc->security); 866 if (retval) 867 return retval; 868 869 if (fc->sb_flags_mask & SB_RDONLY) { 870 #ifdef CONFIG_BLOCK 871 if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev && 872 bdev_read_only(sb->s_bdev)) 873 return -EACCES; 874 #endif 875 876 remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb); 877 } 878 879 if (remount_ro) { 880 if (!hlist_empty(&sb->s_pins)) { 881 up_write(&sb->s_umount); 882 group_pin_kill(&sb->s_pins); 883 down_write(&sb->s_umount); 884 if (!sb->s_root) 885 return 0; 886 if (sb->s_writers.frozen != SB_UNFROZEN) 887 return -EBUSY; 888 remount_ro = !sb_rdonly(sb); 889 } 890 } 891 shrink_dcache_sb(sb); 892 893 /* If we are reconfiguring to RDONLY and current sb is read/write, 894 * make sure there are no files open for writing. 895 */ 896 if (remount_ro) { 897 if (force) { 898 sb->s_readonly_remount = 1; 899 smp_wmb(); 900 } else { 901 retval = sb_prepare_remount_readonly(sb); 902 if (retval) 903 return retval; 904 } 905 } 906 907 if (fc->ops->reconfigure) { 908 retval = fc->ops->reconfigure(fc); 909 if (retval) { 910 if (!force) 911 goto cancel_readonly; 912 /* If forced remount, go ahead despite any errors */ 913 WARN(1, "forced remount of a %s fs returned %i\n", 914 sb->s_type->name, retval); 915 } 916 } 917 918 WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) | 919 (fc->sb_flags & fc->sb_flags_mask))); 920 /* Needs to be ordered wrt mnt_is_readonly() */ 921 smp_wmb(); 922 sb->s_readonly_remount = 0; 923 924 /* 925 * Some filesystems modify their metadata via some other path than the 926 * bdev buffer cache (eg. use a private mapping, or directories in 927 * pagecache, etc). Also file data modifications go via their own 928 * mappings. So If we try to mount readonly then copy the filesystem 929 * from bdev, we could get stale data, so invalidate it to give a best 930 * effort at coherency. 931 */ 932 if (remount_ro && sb->s_bdev) 933 invalidate_bdev(sb->s_bdev); 934 return 0; 935 936 cancel_readonly: 937 sb->s_readonly_remount = 0; 938 return retval; 939 } 940 941 static void do_emergency_remount_callback(struct super_block *sb) 942 { 943 down_write(&sb->s_umount); 944 if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) && 945 !sb_rdonly(sb)) { 946 struct fs_context *fc; 947 948 fc = fs_context_for_reconfigure(sb->s_root, 949 SB_RDONLY | SB_FORCE, SB_RDONLY); 950 if (!IS_ERR(fc)) { 951 if (parse_monolithic_mount_data(fc, NULL) == 0) 952 (void)reconfigure_super(fc); 953 put_fs_context(fc); 954 } 955 } 956 up_write(&sb->s_umount); 957 } 958 959 static void do_emergency_remount(struct work_struct *work) 960 { 961 __iterate_supers(do_emergency_remount_callback); 962 kfree(work); 963 printk("Emergency Remount complete\n"); 964 } 965 966 void emergency_remount(void) 967 { 968 struct work_struct *work; 969 970 work = kmalloc(sizeof(*work), GFP_ATOMIC); 971 if (work) { 972 INIT_WORK(work, do_emergency_remount); 973 schedule_work(work); 974 } 975 } 976 977 static void do_thaw_all_callback(struct super_block *sb) 978 { 979 down_write(&sb->s_umount); 980 if (sb->s_root && sb->s_flags & SB_BORN) { 981 emergency_thaw_bdev(sb); 982 thaw_super_locked(sb); 983 } else { 984 up_write(&sb->s_umount); 985 } 986 } 987 988 static void do_thaw_all(struct work_struct *work) 989 { 990 __iterate_supers(do_thaw_all_callback); 991 kfree(work); 992 printk(KERN_WARNING "Emergency Thaw complete\n"); 993 } 994 995 /** 996 * emergency_thaw_all -- forcibly thaw every frozen filesystem 997 * 998 * Used for emergency unfreeze of all filesystems via SysRq 999 */ 1000 void emergency_thaw_all(void) 1001 { 1002 struct work_struct *work; 1003 1004 work = kmalloc(sizeof(*work), GFP_ATOMIC); 1005 if (work) { 1006 INIT_WORK(work, do_thaw_all); 1007 schedule_work(work); 1008 } 1009 } 1010 1011 static DEFINE_IDA(unnamed_dev_ida); 1012 1013 /** 1014 * get_anon_bdev - Allocate a block device for filesystems which don't have one. 1015 * @p: Pointer to a dev_t. 1016 * 1017 * Filesystems which don't use real block devices can call this function 1018 * to allocate a virtual block device. 1019 * 1020 * Context: Any context. Frequently called while holding sb_lock. 1021 * Return: 0 on success, -EMFILE if there are no anonymous bdevs left 1022 * or -ENOMEM if memory allocation failed. 1023 */ 1024 int get_anon_bdev(dev_t *p) 1025 { 1026 int dev; 1027 1028 /* 1029 * Many userspace utilities consider an FSID of 0 invalid. 1030 * Always return at least 1 from get_anon_bdev. 1031 */ 1032 dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1, 1033 GFP_ATOMIC); 1034 if (dev == -ENOSPC) 1035 dev = -EMFILE; 1036 if (dev < 0) 1037 return dev; 1038 1039 *p = MKDEV(0, dev); 1040 return 0; 1041 } 1042 EXPORT_SYMBOL(get_anon_bdev); 1043 1044 void free_anon_bdev(dev_t dev) 1045 { 1046 ida_free(&unnamed_dev_ida, MINOR(dev)); 1047 } 1048 EXPORT_SYMBOL(free_anon_bdev); 1049 1050 int set_anon_super(struct super_block *s, void *data) 1051 { 1052 return get_anon_bdev(&s->s_dev); 1053 } 1054 EXPORT_SYMBOL(set_anon_super); 1055 1056 void kill_anon_super(struct super_block *sb) 1057 { 1058 dev_t dev = sb->s_dev; 1059 generic_shutdown_super(sb); 1060 free_anon_bdev(dev); 1061 } 1062 EXPORT_SYMBOL(kill_anon_super); 1063 1064 void kill_litter_super(struct super_block *sb) 1065 { 1066 if (sb->s_root) 1067 d_genocide(sb->s_root); 1068 kill_anon_super(sb); 1069 } 1070 EXPORT_SYMBOL(kill_litter_super); 1071 1072 int set_anon_super_fc(struct super_block *sb, struct fs_context *fc) 1073 { 1074 return set_anon_super(sb, NULL); 1075 } 1076 EXPORT_SYMBOL(set_anon_super_fc); 1077 1078 static int test_keyed_super(struct super_block *sb, struct fs_context *fc) 1079 { 1080 return sb->s_fs_info == fc->s_fs_info; 1081 } 1082 1083 static int test_single_super(struct super_block *s, struct fs_context *fc) 1084 { 1085 return 1; 1086 } 1087 1088 /** 1089 * vfs_get_super - Get a superblock with a search key set in s_fs_info. 1090 * @fc: The filesystem context holding the parameters 1091 * @keying: How to distinguish superblocks 1092 * @fill_super: Helper to initialise a new superblock 1093 * 1094 * Search for a superblock and create a new one if not found. The search 1095 * criterion is controlled by @keying. If the search fails, a new superblock 1096 * is created and @fill_super() is called to initialise it. 1097 * 1098 * @keying can take one of a number of values: 1099 * 1100 * (1) vfs_get_single_super - Only one superblock of this type may exist on the 1101 * system. This is typically used for special system filesystems. 1102 * 1103 * (2) vfs_get_keyed_super - Multiple superblocks may exist, but they must have 1104 * distinct keys (where the key is in s_fs_info). Searching for the same 1105 * key again will turn up the superblock for that key. 1106 * 1107 * (3) vfs_get_independent_super - Multiple superblocks may exist and are 1108 * unkeyed. Each call will get a new superblock. 1109 * 1110 * A permissions check is made by sget_fc() unless we're getting a superblock 1111 * for a kernel-internal mount or a submount. 1112 */ 1113 int vfs_get_super(struct fs_context *fc, 1114 enum vfs_get_super_keying keying, 1115 int (*fill_super)(struct super_block *sb, 1116 struct fs_context *fc)) 1117 { 1118 int (*test)(struct super_block *, struct fs_context *); 1119 struct super_block *sb; 1120 int err; 1121 1122 switch (keying) { 1123 case vfs_get_single_super: 1124 case vfs_get_single_reconf_super: 1125 test = test_single_super; 1126 break; 1127 case vfs_get_keyed_super: 1128 test = test_keyed_super; 1129 break; 1130 case vfs_get_independent_super: 1131 test = NULL; 1132 break; 1133 default: 1134 BUG(); 1135 } 1136 1137 sb = sget_fc(fc, test, set_anon_super_fc); 1138 if (IS_ERR(sb)) 1139 return PTR_ERR(sb); 1140 1141 if (!sb->s_root) { 1142 err = fill_super(sb, fc); 1143 if (err) 1144 goto error; 1145 1146 sb->s_flags |= SB_ACTIVE; 1147 fc->root = dget(sb->s_root); 1148 } else { 1149 fc->root = dget(sb->s_root); 1150 if (keying == vfs_get_single_reconf_super) { 1151 err = reconfigure_super(fc); 1152 if (err < 0) { 1153 dput(fc->root); 1154 fc->root = NULL; 1155 goto error; 1156 } 1157 } 1158 } 1159 1160 return 0; 1161 1162 error: 1163 deactivate_locked_super(sb); 1164 return err; 1165 } 1166 EXPORT_SYMBOL(vfs_get_super); 1167 1168 int get_tree_nodev(struct fs_context *fc, 1169 int (*fill_super)(struct super_block *sb, 1170 struct fs_context *fc)) 1171 { 1172 return vfs_get_super(fc, vfs_get_independent_super, fill_super); 1173 } 1174 EXPORT_SYMBOL(get_tree_nodev); 1175 1176 int get_tree_single(struct fs_context *fc, 1177 int (*fill_super)(struct super_block *sb, 1178 struct fs_context *fc)) 1179 { 1180 return vfs_get_super(fc, vfs_get_single_super, fill_super); 1181 } 1182 EXPORT_SYMBOL(get_tree_single); 1183 1184 int get_tree_single_reconf(struct fs_context *fc, 1185 int (*fill_super)(struct super_block *sb, 1186 struct fs_context *fc)) 1187 { 1188 return vfs_get_super(fc, vfs_get_single_reconf_super, fill_super); 1189 } 1190 EXPORT_SYMBOL(get_tree_single_reconf); 1191 1192 int get_tree_keyed(struct fs_context *fc, 1193 int (*fill_super)(struct super_block *sb, 1194 struct fs_context *fc), 1195 void *key) 1196 { 1197 fc->s_fs_info = key; 1198 return vfs_get_super(fc, vfs_get_keyed_super, fill_super); 1199 } 1200 EXPORT_SYMBOL(get_tree_keyed); 1201 1202 #ifdef CONFIG_BLOCK 1203 1204 static int set_bdev_super(struct super_block *s, void *data) 1205 { 1206 s->s_bdev = data; 1207 s->s_dev = s->s_bdev->bd_dev; 1208 s->s_bdi = bdi_get(s->s_bdev->bd_disk->bdi); 1209 1210 if (blk_queue_stable_writes(s->s_bdev->bd_disk->queue)) 1211 s->s_iflags |= SB_I_STABLE_WRITES; 1212 return 0; 1213 } 1214 1215 static int set_bdev_super_fc(struct super_block *s, struct fs_context *fc) 1216 { 1217 return set_bdev_super(s, fc->sget_key); 1218 } 1219 1220 static int test_bdev_super_fc(struct super_block *s, struct fs_context *fc) 1221 { 1222 return s->s_bdev == fc->sget_key; 1223 } 1224 1225 /** 1226 * get_tree_bdev - Get a superblock based on a single block device 1227 * @fc: The filesystem context holding the parameters 1228 * @fill_super: Helper to initialise a new superblock 1229 */ 1230 int get_tree_bdev(struct fs_context *fc, 1231 int (*fill_super)(struct super_block *, 1232 struct fs_context *)) 1233 { 1234 struct block_device *bdev; 1235 struct super_block *s; 1236 fmode_t mode = FMODE_READ | FMODE_EXCL; 1237 int error = 0; 1238 1239 if (!(fc->sb_flags & SB_RDONLY)) 1240 mode |= FMODE_WRITE; 1241 1242 if (!fc->source) 1243 return invalf(fc, "No source specified"); 1244 1245 bdev = blkdev_get_by_path(fc->source, mode, fc->fs_type); 1246 if (IS_ERR(bdev)) { 1247 errorf(fc, "%s: Can't open blockdev", fc->source); 1248 return PTR_ERR(bdev); 1249 } 1250 1251 /* Once the superblock is inserted into the list by sget_fc(), s_umount 1252 * will protect the lockfs code from trying to start a snapshot while 1253 * we are mounting 1254 */ 1255 mutex_lock(&bdev->bd_fsfreeze_mutex); 1256 if (bdev->bd_fsfreeze_count > 0) { 1257 mutex_unlock(&bdev->bd_fsfreeze_mutex); 1258 warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev); 1259 blkdev_put(bdev, mode); 1260 return -EBUSY; 1261 } 1262 1263 fc->sb_flags |= SB_NOSEC; 1264 fc->sget_key = bdev; 1265 s = sget_fc(fc, test_bdev_super_fc, set_bdev_super_fc); 1266 mutex_unlock(&bdev->bd_fsfreeze_mutex); 1267 if (IS_ERR(s)) { 1268 blkdev_put(bdev, mode); 1269 return PTR_ERR(s); 1270 } 1271 1272 if (s->s_root) { 1273 /* Don't summarily change the RO/RW state. */ 1274 if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) { 1275 warnf(fc, "%pg: Can't mount, would change RO state", bdev); 1276 deactivate_locked_super(s); 1277 blkdev_put(bdev, mode); 1278 return -EBUSY; 1279 } 1280 1281 /* 1282 * s_umount nests inside open_mutex during 1283 * __invalidate_device(). blkdev_put() acquires 1284 * open_mutex and can't be called under s_umount. Drop 1285 * s_umount temporarily. This is safe as we're 1286 * holding an active reference. 1287 */ 1288 up_write(&s->s_umount); 1289 blkdev_put(bdev, mode); 1290 down_write(&s->s_umount); 1291 } else { 1292 s->s_mode = mode; 1293 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev); 1294 sb_set_blocksize(s, block_size(bdev)); 1295 error = fill_super(s, fc); 1296 if (error) { 1297 deactivate_locked_super(s); 1298 return error; 1299 } 1300 1301 s->s_flags |= SB_ACTIVE; 1302 bdev->bd_super = s; 1303 } 1304 1305 BUG_ON(fc->root); 1306 fc->root = dget(s->s_root); 1307 return 0; 1308 } 1309 EXPORT_SYMBOL(get_tree_bdev); 1310 1311 static int test_bdev_super(struct super_block *s, void *data) 1312 { 1313 return (void *)s->s_bdev == data; 1314 } 1315 1316 struct dentry *mount_bdev(struct file_system_type *fs_type, 1317 int flags, const char *dev_name, void *data, 1318 int (*fill_super)(struct super_block *, void *, int)) 1319 { 1320 struct block_device *bdev; 1321 struct super_block *s; 1322 fmode_t mode = FMODE_READ | FMODE_EXCL; 1323 int error = 0; 1324 1325 if (!(flags & SB_RDONLY)) 1326 mode |= FMODE_WRITE; 1327 1328 bdev = blkdev_get_by_path(dev_name, mode, fs_type); 1329 if (IS_ERR(bdev)) 1330 return ERR_CAST(bdev); 1331 1332 /* 1333 * once the super is inserted into the list by sget, s_umount 1334 * will protect the lockfs code from trying to start a snapshot 1335 * while we are mounting 1336 */ 1337 mutex_lock(&bdev->bd_fsfreeze_mutex); 1338 if (bdev->bd_fsfreeze_count > 0) { 1339 mutex_unlock(&bdev->bd_fsfreeze_mutex); 1340 error = -EBUSY; 1341 goto error_bdev; 1342 } 1343 s = sget(fs_type, test_bdev_super, set_bdev_super, flags | SB_NOSEC, 1344 bdev); 1345 mutex_unlock(&bdev->bd_fsfreeze_mutex); 1346 if (IS_ERR(s)) 1347 goto error_s; 1348 1349 if (s->s_root) { 1350 if ((flags ^ s->s_flags) & SB_RDONLY) { 1351 deactivate_locked_super(s); 1352 error = -EBUSY; 1353 goto error_bdev; 1354 } 1355 1356 /* 1357 * s_umount nests inside open_mutex during 1358 * __invalidate_device(). blkdev_put() acquires 1359 * open_mutex and can't be called under s_umount. Drop 1360 * s_umount temporarily. This is safe as we're 1361 * holding an active reference. 1362 */ 1363 up_write(&s->s_umount); 1364 blkdev_put(bdev, mode); 1365 down_write(&s->s_umount); 1366 } else { 1367 s->s_mode = mode; 1368 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev); 1369 sb_set_blocksize(s, block_size(bdev)); 1370 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0); 1371 if (error) { 1372 deactivate_locked_super(s); 1373 goto error; 1374 } 1375 1376 s->s_flags |= SB_ACTIVE; 1377 bdev->bd_super = s; 1378 } 1379 1380 return dget(s->s_root); 1381 1382 error_s: 1383 error = PTR_ERR(s); 1384 error_bdev: 1385 blkdev_put(bdev, mode); 1386 error: 1387 return ERR_PTR(error); 1388 } 1389 EXPORT_SYMBOL(mount_bdev); 1390 1391 void kill_block_super(struct super_block *sb) 1392 { 1393 struct block_device *bdev = sb->s_bdev; 1394 fmode_t mode = sb->s_mode; 1395 1396 bdev->bd_super = NULL; 1397 generic_shutdown_super(sb); 1398 sync_blockdev(bdev); 1399 WARN_ON_ONCE(!(mode & FMODE_EXCL)); 1400 blkdev_put(bdev, mode | FMODE_EXCL); 1401 } 1402 1403 EXPORT_SYMBOL(kill_block_super); 1404 #endif 1405 1406 struct dentry *mount_nodev(struct file_system_type *fs_type, 1407 int flags, void *data, 1408 int (*fill_super)(struct super_block *, void *, int)) 1409 { 1410 int error; 1411 struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL); 1412 1413 if (IS_ERR(s)) 1414 return ERR_CAST(s); 1415 1416 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0); 1417 if (error) { 1418 deactivate_locked_super(s); 1419 return ERR_PTR(error); 1420 } 1421 s->s_flags |= SB_ACTIVE; 1422 return dget(s->s_root); 1423 } 1424 EXPORT_SYMBOL(mount_nodev); 1425 1426 static int reconfigure_single(struct super_block *s, 1427 int flags, void *data) 1428 { 1429 struct fs_context *fc; 1430 int ret; 1431 1432 /* The caller really need to be passing fc down into mount_single(), 1433 * then a chunk of this can be removed. [Bollocks -- AV] 1434 * Better yet, reconfiguration shouldn't happen, but rather the second 1435 * mount should be rejected if the parameters are not compatible. 1436 */ 1437 fc = fs_context_for_reconfigure(s->s_root, flags, MS_RMT_MASK); 1438 if (IS_ERR(fc)) 1439 return PTR_ERR(fc); 1440 1441 ret = parse_monolithic_mount_data(fc, data); 1442 if (ret < 0) 1443 goto out; 1444 1445 ret = reconfigure_super(fc); 1446 out: 1447 put_fs_context(fc); 1448 return ret; 1449 } 1450 1451 static int compare_single(struct super_block *s, void *p) 1452 { 1453 return 1; 1454 } 1455 1456 struct dentry *mount_single(struct file_system_type *fs_type, 1457 int flags, void *data, 1458 int (*fill_super)(struct super_block *, void *, int)) 1459 { 1460 struct super_block *s; 1461 int error; 1462 1463 s = sget(fs_type, compare_single, set_anon_super, flags, NULL); 1464 if (IS_ERR(s)) 1465 return ERR_CAST(s); 1466 if (!s->s_root) { 1467 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0); 1468 if (!error) 1469 s->s_flags |= SB_ACTIVE; 1470 } else { 1471 error = reconfigure_single(s, flags, data); 1472 } 1473 if (unlikely(error)) { 1474 deactivate_locked_super(s); 1475 return ERR_PTR(error); 1476 } 1477 return dget(s->s_root); 1478 } 1479 EXPORT_SYMBOL(mount_single); 1480 1481 /** 1482 * vfs_get_tree - Get the mountable root 1483 * @fc: The superblock configuration context. 1484 * 1485 * The filesystem is invoked to get or create a superblock which can then later 1486 * be used for mounting. The filesystem places a pointer to the root to be 1487 * used for mounting in @fc->root. 1488 */ 1489 int vfs_get_tree(struct fs_context *fc) 1490 { 1491 struct super_block *sb; 1492 int error; 1493 1494 if (fc->root) 1495 return -EBUSY; 1496 1497 /* Get the mountable root in fc->root, with a ref on the root and a ref 1498 * on the superblock. 1499 */ 1500 error = fc->ops->get_tree(fc); 1501 if (error < 0) 1502 return error; 1503 1504 if (!fc->root) { 1505 pr_err("Filesystem %s get_tree() didn't set fc->root\n", 1506 fc->fs_type->name); 1507 /* We don't know what the locking state of the superblock is - 1508 * if there is a superblock. 1509 */ 1510 BUG(); 1511 } 1512 1513 sb = fc->root->d_sb; 1514 WARN_ON(!sb->s_bdi); 1515 1516 /* 1517 * Write barrier is for super_cache_count(). We place it before setting 1518 * SB_BORN as the data dependency between the two functions is the 1519 * superblock structure contents that we just set up, not the SB_BORN 1520 * flag. 1521 */ 1522 smp_wmb(); 1523 sb->s_flags |= SB_BORN; 1524 1525 error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL); 1526 if (unlikely(error)) { 1527 fc_drop_locked(fc); 1528 return error; 1529 } 1530 1531 /* 1532 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE 1533 * but s_maxbytes was an unsigned long long for many releases. Throw 1534 * this warning for a little while to try and catch filesystems that 1535 * violate this rule. 1536 */ 1537 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to " 1538 "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes); 1539 1540 return 0; 1541 } 1542 EXPORT_SYMBOL(vfs_get_tree); 1543 1544 /* 1545 * Setup private BDI for given superblock. It gets automatically cleaned up 1546 * in generic_shutdown_super(). 1547 */ 1548 int super_setup_bdi_name(struct super_block *sb, char *fmt, ...) 1549 { 1550 struct backing_dev_info *bdi; 1551 int err; 1552 va_list args; 1553 1554 bdi = bdi_alloc(NUMA_NO_NODE); 1555 if (!bdi) 1556 return -ENOMEM; 1557 1558 va_start(args, fmt); 1559 err = bdi_register_va(bdi, fmt, args); 1560 va_end(args); 1561 if (err) { 1562 bdi_put(bdi); 1563 return err; 1564 } 1565 WARN_ON(sb->s_bdi != &noop_backing_dev_info); 1566 sb->s_bdi = bdi; 1567 sb->s_iflags |= SB_I_PERSB_BDI; 1568 1569 return 0; 1570 } 1571 EXPORT_SYMBOL(super_setup_bdi_name); 1572 1573 /* 1574 * Setup private BDI for given superblock. I gets automatically cleaned up 1575 * in generic_shutdown_super(). 1576 */ 1577 int super_setup_bdi(struct super_block *sb) 1578 { 1579 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); 1580 1581 return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name, 1582 atomic_long_inc_return(&bdi_seq)); 1583 } 1584 EXPORT_SYMBOL(super_setup_bdi); 1585 1586 /** 1587 * sb_wait_write - wait until all writers to given file system finish 1588 * @sb: the super for which we wait 1589 * @level: type of writers we wait for (normal vs page fault) 1590 * 1591 * This function waits until there are no writers of given type to given file 1592 * system. 1593 */ 1594 static void sb_wait_write(struct super_block *sb, int level) 1595 { 1596 percpu_down_write(sb->s_writers.rw_sem + level-1); 1597 } 1598 1599 /* 1600 * We are going to return to userspace and forget about these locks, the 1601 * ownership goes to the caller of thaw_super() which does unlock(). 1602 */ 1603 static void lockdep_sb_freeze_release(struct super_block *sb) 1604 { 1605 int level; 1606 1607 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--) 1608 percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_); 1609 } 1610 1611 /* 1612 * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb). 1613 */ 1614 static void lockdep_sb_freeze_acquire(struct super_block *sb) 1615 { 1616 int level; 1617 1618 for (level = 0; level < SB_FREEZE_LEVELS; ++level) 1619 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_); 1620 } 1621 1622 static void sb_freeze_unlock(struct super_block *sb) 1623 { 1624 int level; 1625 1626 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--) 1627 percpu_up_write(sb->s_writers.rw_sem + level); 1628 } 1629 1630 /** 1631 * freeze_super - lock the filesystem and force it into a consistent state 1632 * @sb: the super to lock 1633 * 1634 * Syncs the super to make sure the filesystem is consistent and calls the fs's 1635 * freeze_fs. Subsequent calls to this without first thawing the fs will return 1636 * -EBUSY. 1637 * 1638 * During this function, sb->s_writers.frozen goes through these values: 1639 * 1640 * SB_UNFROZEN: File system is normal, all writes progress as usual. 1641 * 1642 * SB_FREEZE_WRITE: The file system is in the process of being frozen. New 1643 * writes should be blocked, though page faults are still allowed. We wait for 1644 * all writes to complete and then proceed to the next stage. 1645 * 1646 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked 1647 * but internal fs threads can still modify the filesystem (although they 1648 * should not dirty new pages or inodes), writeback can run etc. After waiting 1649 * for all running page faults we sync the filesystem which will clean all 1650 * dirty pages and inodes (no new dirty pages or inodes can be created when 1651 * sync is running). 1652 * 1653 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs 1654 * modification are blocked (e.g. XFS preallocation truncation on inode 1655 * reclaim). This is usually implemented by blocking new transactions for 1656 * filesystems that have them and need this additional guard. After all 1657 * internal writers are finished we call ->freeze_fs() to finish filesystem 1658 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is 1659 * mostly auxiliary for filesystems to verify they do not modify frozen fs. 1660 * 1661 * sb->s_writers.frozen is protected by sb->s_umount. 1662 */ 1663 int freeze_super(struct super_block *sb) 1664 { 1665 int ret; 1666 1667 atomic_inc(&sb->s_active); 1668 down_write(&sb->s_umount); 1669 if (sb->s_writers.frozen != SB_UNFROZEN) { 1670 deactivate_locked_super(sb); 1671 return -EBUSY; 1672 } 1673 1674 if (!(sb->s_flags & SB_BORN)) { 1675 up_write(&sb->s_umount); 1676 return 0; /* sic - it's "nothing to do" */ 1677 } 1678 1679 if (sb_rdonly(sb)) { 1680 /* Nothing to do really... */ 1681 sb->s_writers.frozen = SB_FREEZE_COMPLETE; 1682 up_write(&sb->s_umount); 1683 return 0; 1684 } 1685 1686 sb->s_writers.frozen = SB_FREEZE_WRITE; 1687 /* Release s_umount to preserve sb_start_write -> s_umount ordering */ 1688 up_write(&sb->s_umount); 1689 sb_wait_write(sb, SB_FREEZE_WRITE); 1690 down_write(&sb->s_umount); 1691 1692 /* Now we go and block page faults... */ 1693 sb->s_writers.frozen = SB_FREEZE_PAGEFAULT; 1694 sb_wait_write(sb, SB_FREEZE_PAGEFAULT); 1695 1696 /* All writers are done so after syncing there won't be dirty data */ 1697 sync_filesystem(sb); 1698 1699 /* Now wait for internal filesystem counter */ 1700 sb->s_writers.frozen = SB_FREEZE_FS; 1701 sb_wait_write(sb, SB_FREEZE_FS); 1702 1703 if (sb->s_op->freeze_fs) { 1704 ret = sb->s_op->freeze_fs(sb); 1705 if (ret) { 1706 printk(KERN_ERR 1707 "VFS:Filesystem freeze failed\n"); 1708 sb->s_writers.frozen = SB_UNFROZEN; 1709 sb_freeze_unlock(sb); 1710 wake_up(&sb->s_writers.wait_unfrozen); 1711 deactivate_locked_super(sb); 1712 return ret; 1713 } 1714 } 1715 /* 1716 * For debugging purposes so that fs can warn if it sees write activity 1717 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super(). 1718 */ 1719 sb->s_writers.frozen = SB_FREEZE_COMPLETE; 1720 lockdep_sb_freeze_release(sb); 1721 up_write(&sb->s_umount); 1722 return 0; 1723 } 1724 EXPORT_SYMBOL(freeze_super); 1725 1726 static int thaw_super_locked(struct super_block *sb) 1727 { 1728 int error; 1729 1730 if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) { 1731 up_write(&sb->s_umount); 1732 return -EINVAL; 1733 } 1734 1735 if (sb_rdonly(sb)) { 1736 sb->s_writers.frozen = SB_UNFROZEN; 1737 goto out; 1738 } 1739 1740 lockdep_sb_freeze_acquire(sb); 1741 1742 if (sb->s_op->unfreeze_fs) { 1743 error = sb->s_op->unfreeze_fs(sb); 1744 if (error) { 1745 printk(KERN_ERR 1746 "VFS:Filesystem thaw failed\n"); 1747 lockdep_sb_freeze_release(sb); 1748 up_write(&sb->s_umount); 1749 return error; 1750 } 1751 } 1752 1753 sb->s_writers.frozen = SB_UNFROZEN; 1754 sb_freeze_unlock(sb); 1755 out: 1756 wake_up(&sb->s_writers.wait_unfrozen); 1757 deactivate_locked_super(sb); 1758 return 0; 1759 } 1760 1761 /** 1762 * thaw_super -- unlock filesystem 1763 * @sb: the super to thaw 1764 * 1765 * Unlocks the filesystem and marks it writeable again after freeze_super(). 1766 */ 1767 int thaw_super(struct super_block *sb) 1768 { 1769 down_write(&sb->s_umount); 1770 return thaw_super_locked(sb); 1771 } 1772 EXPORT_SYMBOL(thaw_super); 1773