1 /* 2 * linux/fs/super.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * super.c contains code to handle: - mount structures 7 * - super-block tables 8 * - filesystem drivers list 9 * - mount system call 10 * - umount system call 11 * - ustat system call 12 * 13 * GK 2/5/95 - Changed to support mounting the root fs via NFS 14 * 15 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall 16 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96 17 * Added options to /proc/mounts: 18 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996. 19 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998 20 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000 21 */ 22 23 #include <linux/module.h> 24 #include <linux/slab.h> 25 #include <linux/init.h> 26 #include <linux/smp_lock.h> 27 #include <linux/acct.h> 28 #include <linux/blkdev.h> 29 #include <linux/quotaops.h> 30 #include <linux/namei.h> 31 #include <linux/buffer_head.h> /* for fsync_super() */ 32 #include <linux/mount.h> 33 #include <linux/security.h> 34 #include <linux/syscalls.h> 35 #include <linux/vfs.h> 36 #include <linux/writeback.h> /* for the emergency remount stuff */ 37 #include <linux/idr.h> 38 #include <linux/kobject.h> 39 #include <linux/mutex.h> 40 #include <asm/uaccess.h> 41 42 43 LIST_HEAD(super_blocks); 44 DEFINE_SPINLOCK(sb_lock); 45 46 /** 47 * alloc_super - create new superblock 48 * @type: filesystem type superblock should belong to 49 * 50 * Allocates and initializes a new &struct super_block. alloc_super() 51 * returns a pointer new superblock or %NULL if allocation had failed. 52 */ 53 static struct super_block *alloc_super(struct file_system_type *type) 54 { 55 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER); 56 static struct super_operations default_op; 57 58 if (s) { 59 if (security_sb_alloc(s)) { 60 kfree(s); 61 s = NULL; 62 goto out; 63 } 64 INIT_LIST_HEAD(&s->s_dirty); 65 INIT_LIST_HEAD(&s->s_io); 66 INIT_LIST_HEAD(&s->s_more_io); 67 INIT_LIST_HEAD(&s->s_files); 68 INIT_LIST_HEAD(&s->s_instances); 69 INIT_HLIST_HEAD(&s->s_anon); 70 INIT_LIST_HEAD(&s->s_inodes); 71 init_rwsem(&s->s_umount); 72 mutex_init(&s->s_lock); 73 lockdep_set_class(&s->s_umount, &type->s_umount_key); 74 /* 75 * The locking rules for s_lock are up to the 76 * filesystem. For example ext3fs has different 77 * lock ordering than usbfs: 78 */ 79 lockdep_set_class(&s->s_lock, &type->s_lock_key); 80 down_write(&s->s_umount); 81 s->s_count = S_BIAS; 82 atomic_set(&s->s_active, 1); 83 mutex_init(&s->s_vfs_rename_mutex); 84 mutex_init(&s->s_dquot.dqio_mutex); 85 mutex_init(&s->s_dquot.dqonoff_mutex); 86 init_rwsem(&s->s_dquot.dqptr_sem); 87 init_waitqueue_head(&s->s_wait_unfrozen); 88 s->s_maxbytes = MAX_NON_LFS; 89 s->dq_op = sb_dquot_ops; 90 s->s_qcop = sb_quotactl_ops; 91 s->s_op = &default_op; 92 s->s_time_gran = 1000000000; 93 } 94 out: 95 return s; 96 } 97 98 /** 99 * destroy_super - frees a superblock 100 * @s: superblock to free 101 * 102 * Frees a superblock. 103 */ 104 static inline void destroy_super(struct super_block *s) 105 { 106 security_sb_free(s); 107 kfree(s->s_subtype); 108 kfree(s); 109 } 110 111 /* Superblock refcounting */ 112 113 /* 114 * Drop a superblock's refcount. Returns non-zero if the superblock was 115 * destroyed. The caller must hold sb_lock. 116 */ 117 int __put_super(struct super_block *sb) 118 { 119 int ret = 0; 120 121 if (!--sb->s_count) { 122 destroy_super(sb); 123 ret = 1; 124 } 125 return ret; 126 } 127 128 /* 129 * Drop a superblock's refcount. 130 * Returns non-zero if the superblock is about to be destroyed and 131 * at least is already removed from super_blocks list, so if we are 132 * making a loop through super blocks then we need to restart. 133 * The caller must hold sb_lock. 134 */ 135 int __put_super_and_need_restart(struct super_block *sb) 136 { 137 /* check for race with generic_shutdown_super() */ 138 if (list_empty(&sb->s_list)) { 139 /* super block is removed, need to restart... */ 140 __put_super(sb); 141 return 1; 142 } 143 /* can't be the last, since s_list is still in use */ 144 sb->s_count--; 145 BUG_ON(sb->s_count == 0); 146 return 0; 147 } 148 149 /** 150 * put_super - drop a temporary reference to superblock 151 * @sb: superblock in question 152 * 153 * Drops a temporary reference, frees superblock if there's no 154 * references left. 155 */ 156 static void put_super(struct super_block *sb) 157 { 158 spin_lock(&sb_lock); 159 __put_super(sb); 160 spin_unlock(&sb_lock); 161 } 162 163 164 /** 165 * deactivate_super - drop an active reference to superblock 166 * @s: superblock to deactivate 167 * 168 * Drops an active reference to superblock, acquiring a temprory one if 169 * there is no active references left. In that case we lock superblock, 170 * tell fs driver to shut it down and drop the temporary reference we 171 * had just acquired. 172 */ 173 void deactivate_super(struct super_block *s) 174 { 175 struct file_system_type *fs = s->s_type; 176 if (atomic_dec_and_lock(&s->s_active, &sb_lock)) { 177 s->s_count -= S_BIAS-1; 178 spin_unlock(&sb_lock); 179 DQUOT_OFF(s); 180 down_write(&s->s_umount); 181 fs->kill_sb(s); 182 put_filesystem(fs); 183 put_super(s); 184 } 185 } 186 187 EXPORT_SYMBOL(deactivate_super); 188 189 /** 190 * grab_super - acquire an active reference 191 * @s: reference we are trying to make active 192 * 193 * Tries to acquire an active reference. grab_super() is used when we 194 * had just found a superblock in super_blocks or fs_type->fs_supers 195 * and want to turn it into a full-blown active reference. grab_super() 196 * is called with sb_lock held and drops it. Returns 1 in case of 197 * success, 0 if we had failed (superblock contents was already dead or 198 * dying when grab_super() had been called). 199 */ 200 static int grab_super(struct super_block *s) __releases(sb_lock) 201 { 202 s->s_count++; 203 spin_unlock(&sb_lock); 204 down_write(&s->s_umount); 205 if (s->s_root) { 206 spin_lock(&sb_lock); 207 if (s->s_count > S_BIAS) { 208 atomic_inc(&s->s_active); 209 s->s_count--; 210 spin_unlock(&sb_lock); 211 return 1; 212 } 213 spin_unlock(&sb_lock); 214 } 215 up_write(&s->s_umount); 216 put_super(s); 217 yield(); 218 return 0; 219 } 220 221 /* 222 * Superblock locking. We really ought to get rid of these two. 223 */ 224 void lock_super(struct super_block * sb) 225 { 226 get_fs_excl(); 227 mutex_lock(&sb->s_lock); 228 } 229 230 void unlock_super(struct super_block * sb) 231 { 232 put_fs_excl(); 233 mutex_unlock(&sb->s_lock); 234 } 235 236 EXPORT_SYMBOL(lock_super); 237 EXPORT_SYMBOL(unlock_super); 238 239 /* 240 * Write out and wait upon all dirty data associated with this 241 * superblock. Filesystem data as well as the underlying block 242 * device. Takes the superblock lock. Requires a second blkdev 243 * flush by the caller to complete the operation. 244 */ 245 void __fsync_super(struct super_block *sb) 246 { 247 sync_inodes_sb(sb, 0); 248 DQUOT_SYNC(sb); 249 lock_super(sb); 250 if (sb->s_dirt && sb->s_op->write_super) 251 sb->s_op->write_super(sb); 252 unlock_super(sb); 253 if (sb->s_op->sync_fs) 254 sb->s_op->sync_fs(sb, 1); 255 sync_blockdev(sb->s_bdev); 256 sync_inodes_sb(sb, 1); 257 } 258 259 /* 260 * Write out and wait upon all dirty data associated with this 261 * superblock. Filesystem data as well as the underlying block 262 * device. Takes the superblock lock. 263 */ 264 int fsync_super(struct super_block *sb) 265 { 266 __fsync_super(sb); 267 return sync_blockdev(sb->s_bdev); 268 } 269 270 /** 271 * generic_shutdown_super - common helper for ->kill_sb() 272 * @sb: superblock to kill 273 * 274 * generic_shutdown_super() does all fs-independent work on superblock 275 * shutdown. Typical ->kill_sb() should pick all fs-specific objects 276 * that need destruction out of superblock, call generic_shutdown_super() 277 * and release aforementioned objects. Note: dentries and inodes _are_ 278 * taken care of and do not need specific handling. 279 * 280 * Upon calling this function, the filesystem may no longer alter or 281 * rearrange the set of dentries belonging to this super_block, nor may it 282 * change the attachments of dentries to inodes. 283 */ 284 void generic_shutdown_super(struct super_block *sb) 285 { 286 const struct super_operations *sop = sb->s_op; 287 288 if (sb->s_root) { 289 shrink_dcache_for_umount(sb); 290 fsync_super(sb); 291 lock_super(sb); 292 sb->s_flags &= ~MS_ACTIVE; 293 /* bad name - it should be evict_inodes() */ 294 invalidate_inodes(sb); 295 lock_kernel(); 296 297 if (sop->write_super && sb->s_dirt) 298 sop->write_super(sb); 299 if (sop->put_super) 300 sop->put_super(sb); 301 302 /* Forget any remaining inodes */ 303 if (invalidate_inodes(sb)) { 304 printk("VFS: Busy inodes after unmount of %s. " 305 "Self-destruct in 5 seconds. Have a nice day...\n", 306 sb->s_id); 307 } 308 309 unlock_kernel(); 310 unlock_super(sb); 311 } 312 spin_lock(&sb_lock); 313 /* should be initialized for __put_super_and_need_restart() */ 314 list_del_init(&sb->s_list); 315 list_del(&sb->s_instances); 316 spin_unlock(&sb_lock); 317 up_write(&sb->s_umount); 318 } 319 320 EXPORT_SYMBOL(generic_shutdown_super); 321 322 /** 323 * sget - find or create a superblock 324 * @type: filesystem type superblock should belong to 325 * @test: comparison callback 326 * @set: setup callback 327 * @data: argument to each of them 328 */ 329 struct super_block *sget(struct file_system_type *type, 330 int (*test)(struct super_block *,void *), 331 int (*set)(struct super_block *,void *), 332 void *data) 333 { 334 struct super_block *s = NULL; 335 struct super_block *old; 336 int err; 337 338 retry: 339 spin_lock(&sb_lock); 340 if (test) { 341 list_for_each_entry(old, &type->fs_supers, s_instances) { 342 if (!test(old, data)) 343 continue; 344 if (!grab_super(old)) 345 goto retry; 346 if (s) 347 destroy_super(s); 348 return old; 349 } 350 } 351 if (!s) { 352 spin_unlock(&sb_lock); 353 s = alloc_super(type); 354 if (!s) 355 return ERR_PTR(-ENOMEM); 356 goto retry; 357 } 358 359 err = set(s, data); 360 if (err) { 361 spin_unlock(&sb_lock); 362 destroy_super(s); 363 return ERR_PTR(err); 364 } 365 s->s_type = type; 366 strlcpy(s->s_id, type->name, sizeof(s->s_id)); 367 list_add_tail(&s->s_list, &super_blocks); 368 list_add(&s->s_instances, &type->fs_supers); 369 spin_unlock(&sb_lock); 370 get_filesystem(type); 371 return s; 372 } 373 374 EXPORT_SYMBOL(sget); 375 376 void drop_super(struct super_block *sb) 377 { 378 up_read(&sb->s_umount); 379 put_super(sb); 380 } 381 382 EXPORT_SYMBOL(drop_super); 383 384 static inline void write_super(struct super_block *sb) 385 { 386 lock_super(sb); 387 if (sb->s_root && sb->s_dirt) 388 if (sb->s_op->write_super) 389 sb->s_op->write_super(sb); 390 unlock_super(sb); 391 } 392 393 /* 394 * Note: check the dirty flag before waiting, so we don't 395 * hold up the sync while mounting a device. (The newly 396 * mounted device won't need syncing.) 397 */ 398 void sync_supers(void) 399 { 400 struct super_block *sb; 401 402 spin_lock(&sb_lock); 403 restart: 404 list_for_each_entry(sb, &super_blocks, s_list) { 405 if (sb->s_dirt) { 406 sb->s_count++; 407 spin_unlock(&sb_lock); 408 down_read(&sb->s_umount); 409 write_super(sb); 410 up_read(&sb->s_umount); 411 spin_lock(&sb_lock); 412 if (__put_super_and_need_restart(sb)) 413 goto restart; 414 } 415 } 416 spin_unlock(&sb_lock); 417 } 418 419 /* 420 * Call the ->sync_fs super_op against all filesystems which are r/w and 421 * which implement it. 422 * 423 * This operation is careful to avoid the livelock which could easily happen 424 * if two or more filesystems are being continuously dirtied. s_need_sync_fs 425 * is used only here. We set it against all filesystems and then clear it as 426 * we sync them. So redirtied filesystems are skipped. 427 * 428 * But if process A is currently running sync_filesystems and then process B 429 * calls sync_filesystems as well, process B will set all the s_need_sync_fs 430 * flags again, which will cause process A to resync everything. Fix that with 431 * a local mutex. 432 * 433 * (Fabian) Avoid sync_fs with clean fs & wait mode 0 434 */ 435 void sync_filesystems(int wait) 436 { 437 struct super_block *sb; 438 static DEFINE_MUTEX(mutex); 439 440 mutex_lock(&mutex); /* Could be down_interruptible */ 441 spin_lock(&sb_lock); 442 list_for_each_entry(sb, &super_blocks, s_list) { 443 if (!sb->s_op->sync_fs) 444 continue; 445 if (sb->s_flags & MS_RDONLY) 446 continue; 447 sb->s_need_sync_fs = 1; 448 } 449 450 restart: 451 list_for_each_entry(sb, &super_blocks, s_list) { 452 if (!sb->s_need_sync_fs) 453 continue; 454 sb->s_need_sync_fs = 0; 455 if (sb->s_flags & MS_RDONLY) 456 continue; /* hm. Was remounted r/o meanwhile */ 457 sb->s_count++; 458 spin_unlock(&sb_lock); 459 down_read(&sb->s_umount); 460 if (sb->s_root && (wait || sb->s_dirt)) 461 sb->s_op->sync_fs(sb, wait); 462 up_read(&sb->s_umount); 463 /* restart only when sb is no longer on the list */ 464 spin_lock(&sb_lock); 465 if (__put_super_and_need_restart(sb)) 466 goto restart; 467 } 468 spin_unlock(&sb_lock); 469 mutex_unlock(&mutex); 470 } 471 472 /** 473 * get_super - get the superblock of a device 474 * @bdev: device to get the superblock for 475 * 476 * Scans the superblock list and finds the superblock of the file system 477 * mounted on the device given. %NULL is returned if no match is found. 478 */ 479 480 struct super_block * get_super(struct block_device *bdev) 481 { 482 struct super_block *sb; 483 484 if (!bdev) 485 return NULL; 486 487 spin_lock(&sb_lock); 488 rescan: 489 list_for_each_entry(sb, &super_blocks, s_list) { 490 if (sb->s_bdev == bdev) { 491 sb->s_count++; 492 spin_unlock(&sb_lock); 493 down_read(&sb->s_umount); 494 if (sb->s_root) 495 return sb; 496 up_read(&sb->s_umount); 497 /* restart only when sb is no longer on the list */ 498 spin_lock(&sb_lock); 499 if (__put_super_and_need_restart(sb)) 500 goto rescan; 501 } 502 } 503 spin_unlock(&sb_lock); 504 return NULL; 505 } 506 507 EXPORT_SYMBOL(get_super); 508 509 struct super_block * user_get_super(dev_t dev) 510 { 511 struct super_block *sb; 512 513 spin_lock(&sb_lock); 514 rescan: 515 list_for_each_entry(sb, &super_blocks, s_list) { 516 if (sb->s_dev == dev) { 517 sb->s_count++; 518 spin_unlock(&sb_lock); 519 down_read(&sb->s_umount); 520 if (sb->s_root) 521 return sb; 522 up_read(&sb->s_umount); 523 /* restart only when sb is no longer on the list */ 524 spin_lock(&sb_lock); 525 if (__put_super_and_need_restart(sb)) 526 goto rescan; 527 } 528 } 529 spin_unlock(&sb_lock); 530 return NULL; 531 } 532 533 asmlinkage long sys_ustat(unsigned dev, struct ustat __user * ubuf) 534 { 535 struct super_block *s; 536 struct ustat tmp; 537 struct kstatfs sbuf; 538 int err = -EINVAL; 539 540 s = user_get_super(new_decode_dev(dev)); 541 if (s == NULL) 542 goto out; 543 err = vfs_statfs(s->s_root, &sbuf); 544 drop_super(s); 545 if (err) 546 goto out; 547 548 memset(&tmp,0,sizeof(struct ustat)); 549 tmp.f_tfree = sbuf.f_bfree; 550 tmp.f_tinode = sbuf.f_ffree; 551 552 err = copy_to_user(ubuf,&tmp,sizeof(struct ustat)) ? -EFAULT : 0; 553 out: 554 return err; 555 } 556 557 /** 558 * mark_files_ro 559 * @sb: superblock in question 560 * 561 * All files are marked read/only. We don't care about pending 562 * delete files so this should be used in 'force' mode only 563 */ 564 565 static void mark_files_ro(struct super_block *sb) 566 { 567 struct file *f; 568 569 file_list_lock(); 570 list_for_each_entry(f, &sb->s_files, f_u.fu_list) { 571 if (S_ISREG(f->f_path.dentry->d_inode->i_mode) && file_count(f)) 572 f->f_mode &= ~FMODE_WRITE; 573 } 574 file_list_unlock(); 575 } 576 577 /** 578 * do_remount_sb - asks filesystem to change mount options. 579 * @sb: superblock in question 580 * @flags: numeric part of options 581 * @data: the rest of options 582 * @force: whether or not to force the change 583 * 584 * Alters the mount options of a mounted file system. 585 */ 586 int do_remount_sb(struct super_block *sb, int flags, void *data, int force) 587 { 588 int retval; 589 590 #ifdef CONFIG_BLOCK 591 if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev)) 592 return -EACCES; 593 #endif 594 if (flags & MS_RDONLY) 595 acct_auto_close(sb); 596 shrink_dcache_sb(sb); 597 fsync_super(sb); 598 599 /* If we are remounting RDONLY and current sb is read/write, 600 make sure there are no rw files opened */ 601 if ((flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY)) { 602 if (force) 603 mark_files_ro(sb); 604 else if (!fs_may_remount_ro(sb)) 605 return -EBUSY; 606 } 607 608 if (sb->s_op->remount_fs) { 609 lock_super(sb); 610 retval = sb->s_op->remount_fs(sb, &flags, data); 611 unlock_super(sb); 612 if (retval) 613 return retval; 614 } 615 sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK); 616 return 0; 617 } 618 619 static void do_emergency_remount(unsigned long foo) 620 { 621 struct super_block *sb; 622 623 spin_lock(&sb_lock); 624 list_for_each_entry(sb, &super_blocks, s_list) { 625 sb->s_count++; 626 spin_unlock(&sb_lock); 627 down_read(&sb->s_umount); 628 if (sb->s_root && sb->s_bdev && !(sb->s_flags & MS_RDONLY)) { 629 /* 630 * ->remount_fs needs lock_kernel(). 631 * 632 * What lock protects sb->s_flags?? 633 */ 634 lock_kernel(); 635 do_remount_sb(sb, MS_RDONLY, NULL, 1); 636 unlock_kernel(); 637 } 638 drop_super(sb); 639 spin_lock(&sb_lock); 640 } 641 spin_unlock(&sb_lock); 642 printk("Emergency Remount complete\n"); 643 } 644 645 void emergency_remount(void) 646 { 647 pdflush_operation(do_emergency_remount, 0); 648 } 649 650 /* 651 * Unnamed block devices are dummy devices used by virtual 652 * filesystems which don't use real block-devices. -- jrs 653 */ 654 655 static struct idr unnamed_dev_idr; 656 static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */ 657 658 int set_anon_super(struct super_block *s, void *data) 659 { 660 int dev; 661 int error; 662 663 retry: 664 if (idr_pre_get(&unnamed_dev_idr, GFP_ATOMIC) == 0) 665 return -ENOMEM; 666 spin_lock(&unnamed_dev_lock); 667 error = idr_get_new(&unnamed_dev_idr, NULL, &dev); 668 spin_unlock(&unnamed_dev_lock); 669 if (error == -EAGAIN) 670 /* We raced and lost with another CPU. */ 671 goto retry; 672 else if (error) 673 return -EAGAIN; 674 675 if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) { 676 spin_lock(&unnamed_dev_lock); 677 idr_remove(&unnamed_dev_idr, dev); 678 spin_unlock(&unnamed_dev_lock); 679 return -EMFILE; 680 } 681 s->s_dev = MKDEV(0, dev & MINORMASK); 682 return 0; 683 } 684 685 EXPORT_SYMBOL(set_anon_super); 686 687 void kill_anon_super(struct super_block *sb) 688 { 689 int slot = MINOR(sb->s_dev); 690 691 generic_shutdown_super(sb); 692 spin_lock(&unnamed_dev_lock); 693 idr_remove(&unnamed_dev_idr, slot); 694 spin_unlock(&unnamed_dev_lock); 695 } 696 697 EXPORT_SYMBOL(kill_anon_super); 698 699 void __init unnamed_dev_init(void) 700 { 701 idr_init(&unnamed_dev_idr); 702 } 703 704 void kill_litter_super(struct super_block *sb) 705 { 706 if (sb->s_root) 707 d_genocide(sb->s_root); 708 kill_anon_super(sb); 709 } 710 711 EXPORT_SYMBOL(kill_litter_super); 712 713 #ifdef CONFIG_BLOCK 714 static int set_bdev_super(struct super_block *s, void *data) 715 { 716 s->s_bdev = data; 717 s->s_dev = s->s_bdev->bd_dev; 718 return 0; 719 } 720 721 static int test_bdev_super(struct super_block *s, void *data) 722 { 723 return (void *)s->s_bdev == data; 724 } 725 726 int get_sb_bdev(struct file_system_type *fs_type, 727 int flags, const char *dev_name, void *data, 728 int (*fill_super)(struct super_block *, void *, int), 729 struct vfsmount *mnt) 730 { 731 struct block_device *bdev; 732 struct super_block *s; 733 int error = 0; 734 735 bdev = open_bdev_excl(dev_name, flags, fs_type); 736 if (IS_ERR(bdev)) 737 return PTR_ERR(bdev); 738 739 /* 740 * once the super is inserted into the list by sget, s_umount 741 * will protect the lockfs code from trying to start a snapshot 742 * while we are mounting 743 */ 744 down(&bdev->bd_mount_sem); 745 s = sget(fs_type, test_bdev_super, set_bdev_super, bdev); 746 up(&bdev->bd_mount_sem); 747 if (IS_ERR(s)) 748 goto error_s; 749 750 if (s->s_root) { 751 if ((flags ^ s->s_flags) & MS_RDONLY) { 752 up_write(&s->s_umount); 753 deactivate_super(s); 754 error = -EBUSY; 755 goto error_bdev; 756 } 757 758 close_bdev_excl(bdev); 759 } else { 760 char b[BDEVNAME_SIZE]; 761 762 s->s_flags = flags; 763 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); 764 sb_set_blocksize(s, block_size(bdev)); 765 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); 766 if (error) { 767 up_write(&s->s_umount); 768 deactivate_super(s); 769 goto error; 770 } 771 772 s->s_flags |= MS_ACTIVE; 773 } 774 775 return simple_set_mnt(mnt, s); 776 777 error_s: 778 error = PTR_ERR(s); 779 error_bdev: 780 close_bdev_excl(bdev); 781 error: 782 return error; 783 } 784 785 EXPORT_SYMBOL(get_sb_bdev); 786 787 void kill_block_super(struct super_block *sb) 788 { 789 struct block_device *bdev = sb->s_bdev; 790 791 generic_shutdown_super(sb); 792 sync_blockdev(bdev); 793 close_bdev_excl(bdev); 794 } 795 796 EXPORT_SYMBOL(kill_block_super); 797 #endif 798 799 int get_sb_nodev(struct file_system_type *fs_type, 800 int flags, void *data, 801 int (*fill_super)(struct super_block *, void *, int), 802 struct vfsmount *mnt) 803 { 804 int error; 805 struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL); 806 807 if (IS_ERR(s)) 808 return PTR_ERR(s); 809 810 s->s_flags = flags; 811 812 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); 813 if (error) { 814 up_write(&s->s_umount); 815 deactivate_super(s); 816 return error; 817 } 818 s->s_flags |= MS_ACTIVE; 819 return simple_set_mnt(mnt, s); 820 } 821 822 EXPORT_SYMBOL(get_sb_nodev); 823 824 static int compare_single(struct super_block *s, void *p) 825 { 826 return 1; 827 } 828 829 int get_sb_single(struct file_system_type *fs_type, 830 int flags, void *data, 831 int (*fill_super)(struct super_block *, void *, int), 832 struct vfsmount *mnt) 833 { 834 struct super_block *s; 835 int error; 836 837 s = sget(fs_type, compare_single, set_anon_super, NULL); 838 if (IS_ERR(s)) 839 return PTR_ERR(s); 840 if (!s->s_root) { 841 s->s_flags = flags; 842 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); 843 if (error) { 844 up_write(&s->s_umount); 845 deactivate_super(s); 846 return error; 847 } 848 s->s_flags |= MS_ACTIVE; 849 } 850 do_remount_sb(s, flags, data, 0); 851 return simple_set_mnt(mnt, s); 852 } 853 854 EXPORT_SYMBOL(get_sb_single); 855 856 struct vfsmount * 857 vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data) 858 { 859 struct vfsmount *mnt; 860 char *secdata = NULL; 861 int error; 862 863 if (!type) 864 return ERR_PTR(-ENODEV); 865 866 error = -ENOMEM; 867 mnt = alloc_vfsmnt(name); 868 if (!mnt) 869 goto out; 870 871 if (data) { 872 secdata = alloc_secdata(); 873 if (!secdata) 874 goto out_mnt; 875 876 error = security_sb_copy_data(type, data, secdata); 877 if (error) 878 goto out_free_secdata; 879 } 880 881 error = type->get_sb(type, flags, name, data, mnt); 882 if (error < 0) 883 goto out_free_secdata; 884 BUG_ON(!mnt->mnt_sb); 885 886 error = security_sb_kern_mount(mnt->mnt_sb, secdata); 887 if (error) 888 goto out_sb; 889 890 mnt->mnt_mountpoint = mnt->mnt_root; 891 mnt->mnt_parent = mnt; 892 up_write(&mnt->mnt_sb->s_umount); 893 free_secdata(secdata); 894 return mnt; 895 out_sb: 896 dput(mnt->mnt_root); 897 up_write(&mnt->mnt_sb->s_umount); 898 deactivate_super(mnt->mnt_sb); 899 out_free_secdata: 900 free_secdata(secdata); 901 out_mnt: 902 free_vfsmnt(mnt); 903 out: 904 return ERR_PTR(error); 905 } 906 907 EXPORT_SYMBOL_GPL(vfs_kern_mount); 908 909 static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype) 910 { 911 int err; 912 const char *subtype = strchr(fstype, '.'); 913 if (subtype) { 914 subtype++; 915 err = -EINVAL; 916 if (!subtype[0]) 917 goto err; 918 } else 919 subtype = ""; 920 921 mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL); 922 err = -ENOMEM; 923 if (!mnt->mnt_sb->s_subtype) 924 goto err; 925 return mnt; 926 927 err: 928 mntput(mnt); 929 return ERR_PTR(err); 930 } 931 932 struct vfsmount * 933 do_kern_mount(const char *fstype, int flags, const char *name, void *data) 934 { 935 struct file_system_type *type = get_fs_type(fstype); 936 struct vfsmount *mnt; 937 if (!type) 938 return ERR_PTR(-ENODEV); 939 mnt = vfs_kern_mount(type, flags, name, data); 940 if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) && 941 !mnt->mnt_sb->s_subtype) 942 mnt = fs_set_subtype(mnt, fstype); 943 put_filesystem(type); 944 return mnt; 945 } 946 947 struct vfsmount *kern_mount_data(struct file_system_type *type, void *data) 948 { 949 return vfs_kern_mount(type, MS_KERNMOUNT, type->name, data); 950 } 951 952 EXPORT_SYMBOL_GPL(kern_mount_data); 953