1 /* 2 * linux/fs/super.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * super.c contains code to handle: - mount structures 7 * - super-block tables 8 * - filesystem drivers list 9 * - mount system call 10 * - umount system call 11 * - ustat system call 12 * 13 * GK 2/5/95 - Changed to support mounting the root fs via NFS 14 * 15 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall 16 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96 17 * Added options to /proc/mounts: 18 * Torbj�rn Lindh (torbjorn.lindh@gopta.se), April 14, 1996. 19 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998 20 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000 21 */ 22 23 #include <linux/module.h> 24 #include <linux/slab.h> 25 #include <linux/init.h> 26 #include <linux/smp_lock.h> 27 #include <linux/acct.h> 28 #include <linux/blkdev.h> 29 #include <linux/quotaops.h> 30 #include <linux/namei.h> 31 #include <linux/buffer_head.h> /* for fsync_super() */ 32 #include <linux/mount.h> 33 #include <linux/security.h> 34 #include <linux/syscalls.h> 35 #include <linux/vfs.h> 36 #include <linux/writeback.h> /* for the emergency remount stuff */ 37 #include <linux/idr.h> 38 #include <linux/kobject.h> 39 #include <linux/mutex.h> 40 #include <asm/uaccess.h> 41 42 43 void get_filesystem(struct file_system_type *fs); 44 void put_filesystem(struct file_system_type *fs); 45 struct file_system_type *get_fs_type(const char *name); 46 47 LIST_HEAD(super_blocks); 48 DEFINE_SPINLOCK(sb_lock); 49 50 /** 51 * alloc_super - create new superblock 52 * 53 * Allocates and initializes a new &struct super_block. alloc_super() 54 * returns a pointer new superblock or %NULL if allocation had failed. 55 */ 56 static struct super_block *alloc_super(struct file_system_type *type) 57 { 58 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER); 59 static struct super_operations default_op; 60 61 if (s) { 62 if (security_sb_alloc(s)) { 63 kfree(s); 64 s = NULL; 65 goto out; 66 } 67 INIT_LIST_HEAD(&s->s_dirty); 68 INIT_LIST_HEAD(&s->s_io); 69 INIT_LIST_HEAD(&s->s_files); 70 INIT_LIST_HEAD(&s->s_instances); 71 INIT_HLIST_HEAD(&s->s_anon); 72 INIT_LIST_HEAD(&s->s_inodes); 73 init_rwsem(&s->s_umount); 74 mutex_init(&s->s_lock); 75 lockdep_set_class(&s->s_umount, &type->s_umount_key); 76 /* 77 * The locking rules for s_lock are up to the 78 * filesystem. For example ext3fs has different 79 * lock ordering than usbfs: 80 */ 81 lockdep_set_class(&s->s_lock, &type->s_lock_key); 82 down_write(&s->s_umount); 83 s->s_count = S_BIAS; 84 atomic_set(&s->s_active, 1); 85 mutex_init(&s->s_vfs_rename_mutex); 86 mutex_init(&s->s_dquot.dqio_mutex); 87 mutex_init(&s->s_dquot.dqonoff_mutex); 88 init_rwsem(&s->s_dquot.dqptr_sem); 89 init_waitqueue_head(&s->s_wait_unfrozen); 90 s->s_maxbytes = MAX_NON_LFS; 91 s->dq_op = sb_dquot_ops; 92 s->s_qcop = sb_quotactl_ops; 93 s->s_op = &default_op; 94 s->s_time_gran = 1000000000; 95 } 96 out: 97 return s; 98 } 99 100 /** 101 * destroy_super - frees a superblock 102 * @s: superblock to free 103 * 104 * Frees a superblock. 105 */ 106 static inline void destroy_super(struct super_block *s) 107 { 108 security_sb_free(s); 109 kfree(s); 110 } 111 112 /* Superblock refcounting */ 113 114 /* 115 * Drop a superblock's refcount. Returns non-zero if the superblock was 116 * destroyed. The caller must hold sb_lock. 117 */ 118 int __put_super(struct super_block *sb) 119 { 120 int ret = 0; 121 122 if (!--sb->s_count) { 123 destroy_super(sb); 124 ret = 1; 125 } 126 return ret; 127 } 128 129 /* 130 * Drop a superblock's refcount. 131 * Returns non-zero if the superblock is about to be destroyed and 132 * at least is already removed from super_blocks list, so if we are 133 * making a loop through super blocks then we need to restart. 134 * The caller must hold sb_lock. 135 */ 136 int __put_super_and_need_restart(struct super_block *sb) 137 { 138 /* check for race with generic_shutdown_super() */ 139 if (list_empty(&sb->s_list)) { 140 /* super block is removed, need to restart... */ 141 __put_super(sb); 142 return 1; 143 } 144 /* can't be the last, since s_list is still in use */ 145 sb->s_count--; 146 BUG_ON(sb->s_count == 0); 147 return 0; 148 } 149 150 /** 151 * put_super - drop a temporary reference to superblock 152 * @sb: superblock in question 153 * 154 * Drops a temporary reference, frees superblock if there's no 155 * references left. 156 */ 157 static void put_super(struct super_block *sb) 158 { 159 spin_lock(&sb_lock); 160 __put_super(sb); 161 spin_unlock(&sb_lock); 162 } 163 164 165 /** 166 * deactivate_super - drop an active reference to superblock 167 * @s: superblock to deactivate 168 * 169 * Drops an active reference to superblock, acquiring a temprory one if 170 * there is no active references left. In that case we lock superblock, 171 * tell fs driver to shut it down and drop the temporary reference we 172 * had just acquired. 173 */ 174 void deactivate_super(struct super_block *s) 175 { 176 struct file_system_type *fs = s->s_type; 177 if (atomic_dec_and_lock(&s->s_active, &sb_lock)) { 178 s->s_count -= S_BIAS-1; 179 spin_unlock(&sb_lock); 180 DQUOT_OFF(s); 181 down_write(&s->s_umount); 182 fs->kill_sb(s); 183 put_filesystem(fs); 184 put_super(s); 185 } 186 } 187 188 EXPORT_SYMBOL(deactivate_super); 189 190 /** 191 * grab_super - acquire an active reference 192 * @s: reference we are trying to make active 193 * 194 * Tries to acquire an active reference. grab_super() is used when we 195 * had just found a superblock in super_blocks or fs_type->fs_supers 196 * and want to turn it into a full-blown active reference. grab_super() 197 * is called with sb_lock held and drops it. Returns 1 in case of 198 * success, 0 if we had failed (superblock contents was already dead or 199 * dying when grab_super() had been called). 200 */ 201 static int grab_super(struct super_block *s) 202 { 203 s->s_count++; 204 spin_unlock(&sb_lock); 205 down_write(&s->s_umount); 206 if (s->s_root) { 207 spin_lock(&sb_lock); 208 if (s->s_count > S_BIAS) { 209 atomic_inc(&s->s_active); 210 s->s_count--; 211 spin_unlock(&sb_lock); 212 return 1; 213 } 214 spin_unlock(&sb_lock); 215 } 216 up_write(&s->s_umount); 217 put_super(s); 218 yield(); 219 return 0; 220 } 221 222 /** 223 * generic_shutdown_super - common helper for ->kill_sb() 224 * @sb: superblock to kill 225 * 226 * generic_shutdown_super() does all fs-independent work on superblock 227 * shutdown. Typical ->kill_sb() should pick all fs-specific objects 228 * that need destruction out of superblock, call generic_shutdown_super() 229 * and release aforementioned objects. Note: dentries and inodes _are_ 230 * taken care of and do not need specific handling. 231 */ 232 void generic_shutdown_super(struct super_block *sb) 233 { 234 struct dentry *root = sb->s_root; 235 struct super_operations *sop = sb->s_op; 236 237 if (root) { 238 sb->s_root = NULL; 239 shrink_dcache_parent(root); 240 shrink_dcache_sb(sb); 241 dput(root); 242 fsync_super(sb); 243 lock_super(sb); 244 sb->s_flags &= ~MS_ACTIVE; 245 /* bad name - it should be evict_inodes() */ 246 invalidate_inodes(sb); 247 lock_kernel(); 248 249 if (sop->write_super && sb->s_dirt) 250 sop->write_super(sb); 251 if (sop->put_super) 252 sop->put_super(sb); 253 254 /* Forget any remaining inodes */ 255 if (invalidate_inodes(sb)) { 256 printk("VFS: Busy inodes after unmount of %s. " 257 "Self-destruct in 5 seconds. Have a nice day...\n", 258 sb->s_id); 259 } 260 261 unlock_kernel(); 262 unlock_super(sb); 263 } 264 spin_lock(&sb_lock); 265 /* should be initialized for __put_super_and_need_restart() */ 266 list_del_init(&sb->s_list); 267 list_del(&sb->s_instances); 268 spin_unlock(&sb_lock); 269 up_write(&sb->s_umount); 270 } 271 272 EXPORT_SYMBOL(generic_shutdown_super); 273 274 /** 275 * sget - find or create a superblock 276 * @type: filesystem type superblock should belong to 277 * @test: comparison callback 278 * @set: setup callback 279 * @data: argument to each of them 280 */ 281 struct super_block *sget(struct file_system_type *type, 282 int (*test)(struct super_block *,void *), 283 int (*set)(struct super_block *,void *), 284 void *data) 285 { 286 struct super_block *s = NULL; 287 struct list_head *p; 288 int err; 289 290 retry: 291 spin_lock(&sb_lock); 292 if (test) list_for_each(p, &type->fs_supers) { 293 struct super_block *old; 294 old = list_entry(p, struct super_block, s_instances); 295 if (!test(old, data)) 296 continue; 297 if (!grab_super(old)) 298 goto retry; 299 if (s) 300 destroy_super(s); 301 return old; 302 } 303 if (!s) { 304 spin_unlock(&sb_lock); 305 s = alloc_super(type); 306 if (!s) 307 return ERR_PTR(-ENOMEM); 308 goto retry; 309 } 310 311 err = set(s, data); 312 if (err) { 313 spin_unlock(&sb_lock); 314 destroy_super(s); 315 return ERR_PTR(err); 316 } 317 s->s_type = type; 318 strlcpy(s->s_id, type->name, sizeof(s->s_id)); 319 list_add_tail(&s->s_list, &super_blocks); 320 list_add(&s->s_instances, &type->fs_supers); 321 spin_unlock(&sb_lock); 322 get_filesystem(type); 323 return s; 324 } 325 326 EXPORT_SYMBOL(sget); 327 328 void drop_super(struct super_block *sb) 329 { 330 up_read(&sb->s_umount); 331 put_super(sb); 332 } 333 334 EXPORT_SYMBOL(drop_super); 335 336 static inline void write_super(struct super_block *sb) 337 { 338 lock_super(sb); 339 if (sb->s_root && sb->s_dirt) 340 if (sb->s_op->write_super) 341 sb->s_op->write_super(sb); 342 unlock_super(sb); 343 } 344 345 /* 346 * Note: check the dirty flag before waiting, so we don't 347 * hold up the sync while mounting a device. (The newly 348 * mounted device won't need syncing.) 349 */ 350 void sync_supers(void) 351 { 352 struct super_block *sb; 353 354 spin_lock(&sb_lock); 355 restart: 356 list_for_each_entry(sb, &super_blocks, s_list) { 357 if (sb->s_dirt) { 358 sb->s_count++; 359 spin_unlock(&sb_lock); 360 down_read(&sb->s_umount); 361 write_super(sb); 362 up_read(&sb->s_umount); 363 spin_lock(&sb_lock); 364 if (__put_super_and_need_restart(sb)) 365 goto restart; 366 } 367 } 368 spin_unlock(&sb_lock); 369 } 370 371 /* 372 * Call the ->sync_fs super_op against all filesytems which are r/w and 373 * which implement it. 374 * 375 * This operation is careful to avoid the livelock which could easily happen 376 * if two or more filesystems are being continuously dirtied. s_need_sync_fs 377 * is used only here. We set it against all filesystems and then clear it as 378 * we sync them. So redirtied filesystems are skipped. 379 * 380 * But if process A is currently running sync_filesytems and then process B 381 * calls sync_filesystems as well, process B will set all the s_need_sync_fs 382 * flags again, which will cause process A to resync everything. Fix that with 383 * a local mutex. 384 * 385 * (Fabian) Avoid sync_fs with clean fs & wait mode 0 386 */ 387 void sync_filesystems(int wait) 388 { 389 struct super_block *sb; 390 static DEFINE_MUTEX(mutex); 391 392 mutex_lock(&mutex); /* Could be down_interruptible */ 393 spin_lock(&sb_lock); 394 list_for_each_entry(sb, &super_blocks, s_list) { 395 if (!sb->s_op->sync_fs) 396 continue; 397 if (sb->s_flags & MS_RDONLY) 398 continue; 399 sb->s_need_sync_fs = 1; 400 } 401 402 restart: 403 list_for_each_entry(sb, &super_blocks, s_list) { 404 if (!sb->s_need_sync_fs) 405 continue; 406 sb->s_need_sync_fs = 0; 407 if (sb->s_flags & MS_RDONLY) 408 continue; /* hm. Was remounted r/o meanwhile */ 409 sb->s_count++; 410 spin_unlock(&sb_lock); 411 down_read(&sb->s_umount); 412 if (sb->s_root && (wait || sb->s_dirt)) 413 sb->s_op->sync_fs(sb, wait); 414 up_read(&sb->s_umount); 415 /* restart only when sb is no longer on the list */ 416 spin_lock(&sb_lock); 417 if (__put_super_and_need_restart(sb)) 418 goto restart; 419 } 420 spin_unlock(&sb_lock); 421 mutex_unlock(&mutex); 422 } 423 424 /** 425 * get_super - get the superblock of a device 426 * @bdev: device to get the superblock for 427 * 428 * Scans the superblock list and finds the superblock of the file system 429 * mounted on the device given. %NULL is returned if no match is found. 430 */ 431 432 struct super_block * get_super(struct block_device *bdev) 433 { 434 struct super_block *sb; 435 436 if (!bdev) 437 return NULL; 438 439 spin_lock(&sb_lock); 440 rescan: 441 list_for_each_entry(sb, &super_blocks, s_list) { 442 if (sb->s_bdev == bdev) { 443 sb->s_count++; 444 spin_unlock(&sb_lock); 445 down_read(&sb->s_umount); 446 if (sb->s_root) 447 return sb; 448 up_read(&sb->s_umount); 449 /* restart only when sb is no longer on the list */ 450 spin_lock(&sb_lock); 451 if (__put_super_and_need_restart(sb)) 452 goto rescan; 453 } 454 } 455 spin_unlock(&sb_lock); 456 return NULL; 457 } 458 459 EXPORT_SYMBOL(get_super); 460 461 struct super_block * user_get_super(dev_t dev) 462 { 463 struct super_block *sb; 464 465 spin_lock(&sb_lock); 466 rescan: 467 list_for_each_entry(sb, &super_blocks, s_list) { 468 if (sb->s_dev == dev) { 469 sb->s_count++; 470 spin_unlock(&sb_lock); 471 down_read(&sb->s_umount); 472 if (sb->s_root) 473 return sb; 474 up_read(&sb->s_umount); 475 /* restart only when sb is no longer on the list */ 476 spin_lock(&sb_lock); 477 if (__put_super_and_need_restart(sb)) 478 goto rescan; 479 } 480 } 481 spin_unlock(&sb_lock); 482 return NULL; 483 } 484 485 asmlinkage long sys_ustat(unsigned dev, struct ustat __user * ubuf) 486 { 487 struct super_block *s; 488 struct ustat tmp; 489 struct kstatfs sbuf; 490 int err = -EINVAL; 491 492 s = user_get_super(new_decode_dev(dev)); 493 if (s == NULL) 494 goto out; 495 err = vfs_statfs(s->s_root, &sbuf); 496 drop_super(s); 497 if (err) 498 goto out; 499 500 memset(&tmp,0,sizeof(struct ustat)); 501 tmp.f_tfree = sbuf.f_bfree; 502 tmp.f_tinode = sbuf.f_ffree; 503 504 err = copy_to_user(ubuf,&tmp,sizeof(struct ustat)) ? -EFAULT : 0; 505 out: 506 return err; 507 } 508 509 /** 510 * mark_files_ro 511 * @sb: superblock in question 512 * 513 * All files are marked read/only. We don't care about pending 514 * delete files so this should be used in 'force' mode only 515 */ 516 517 static void mark_files_ro(struct super_block *sb) 518 { 519 struct file *f; 520 521 file_list_lock(); 522 list_for_each_entry(f, &sb->s_files, f_u.fu_list) { 523 if (S_ISREG(f->f_dentry->d_inode->i_mode) && file_count(f)) 524 f->f_mode &= ~FMODE_WRITE; 525 } 526 file_list_unlock(); 527 } 528 529 /** 530 * do_remount_sb - asks filesystem to change mount options. 531 * @sb: superblock in question 532 * @flags: numeric part of options 533 * @data: the rest of options 534 * @force: whether or not to force the change 535 * 536 * Alters the mount options of a mounted file system. 537 */ 538 int do_remount_sb(struct super_block *sb, int flags, void *data, int force) 539 { 540 int retval; 541 542 if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev)) 543 return -EACCES; 544 if (flags & MS_RDONLY) 545 acct_auto_close(sb); 546 shrink_dcache_sb(sb); 547 fsync_super(sb); 548 549 /* If we are remounting RDONLY and current sb is read/write, 550 make sure there are no rw files opened */ 551 if ((flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY)) { 552 if (force) 553 mark_files_ro(sb); 554 else if (!fs_may_remount_ro(sb)) 555 return -EBUSY; 556 } 557 558 if (sb->s_op->remount_fs) { 559 lock_super(sb); 560 retval = sb->s_op->remount_fs(sb, &flags, data); 561 unlock_super(sb); 562 if (retval) 563 return retval; 564 } 565 sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK); 566 return 0; 567 } 568 569 static void do_emergency_remount(unsigned long foo) 570 { 571 struct super_block *sb; 572 573 spin_lock(&sb_lock); 574 list_for_each_entry(sb, &super_blocks, s_list) { 575 sb->s_count++; 576 spin_unlock(&sb_lock); 577 down_read(&sb->s_umount); 578 if (sb->s_root && sb->s_bdev && !(sb->s_flags & MS_RDONLY)) { 579 /* 580 * ->remount_fs needs lock_kernel(). 581 * 582 * What lock protects sb->s_flags?? 583 */ 584 lock_kernel(); 585 do_remount_sb(sb, MS_RDONLY, NULL, 1); 586 unlock_kernel(); 587 } 588 drop_super(sb); 589 spin_lock(&sb_lock); 590 } 591 spin_unlock(&sb_lock); 592 printk("Emergency Remount complete\n"); 593 } 594 595 void emergency_remount(void) 596 { 597 pdflush_operation(do_emergency_remount, 0); 598 } 599 600 /* 601 * Unnamed block devices are dummy devices used by virtual 602 * filesystems which don't use real block-devices. -- jrs 603 */ 604 605 static struct idr unnamed_dev_idr; 606 static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */ 607 608 int set_anon_super(struct super_block *s, void *data) 609 { 610 int dev; 611 int error; 612 613 retry: 614 if (idr_pre_get(&unnamed_dev_idr, GFP_ATOMIC) == 0) 615 return -ENOMEM; 616 spin_lock(&unnamed_dev_lock); 617 error = idr_get_new(&unnamed_dev_idr, NULL, &dev); 618 spin_unlock(&unnamed_dev_lock); 619 if (error == -EAGAIN) 620 /* We raced and lost with another CPU. */ 621 goto retry; 622 else if (error) 623 return -EAGAIN; 624 625 if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) { 626 spin_lock(&unnamed_dev_lock); 627 idr_remove(&unnamed_dev_idr, dev); 628 spin_unlock(&unnamed_dev_lock); 629 return -EMFILE; 630 } 631 s->s_dev = MKDEV(0, dev & MINORMASK); 632 return 0; 633 } 634 635 EXPORT_SYMBOL(set_anon_super); 636 637 void kill_anon_super(struct super_block *sb) 638 { 639 int slot = MINOR(sb->s_dev); 640 641 generic_shutdown_super(sb); 642 spin_lock(&unnamed_dev_lock); 643 idr_remove(&unnamed_dev_idr, slot); 644 spin_unlock(&unnamed_dev_lock); 645 } 646 647 EXPORT_SYMBOL(kill_anon_super); 648 649 void __init unnamed_dev_init(void) 650 { 651 idr_init(&unnamed_dev_idr); 652 } 653 654 void kill_litter_super(struct super_block *sb) 655 { 656 if (sb->s_root) 657 d_genocide(sb->s_root); 658 kill_anon_super(sb); 659 } 660 661 EXPORT_SYMBOL(kill_litter_super); 662 663 static int set_bdev_super(struct super_block *s, void *data) 664 { 665 s->s_bdev = data; 666 s->s_dev = s->s_bdev->bd_dev; 667 return 0; 668 } 669 670 static int test_bdev_super(struct super_block *s, void *data) 671 { 672 return (void *)s->s_bdev == data; 673 } 674 675 static void bdev_uevent(struct block_device *bdev, enum kobject_action action) 676 { 677 if (bdev->bd_disk) { 678 if (bdev->bd_part) 679 kobject_uevent(&bdev->bd_part->kobj, action); 680 else 681 kobject_uevent(&bdev->bd_disk->kobj, action); 682 } 683 } 684 685 int get_sb_bdev(struct file_system_type *fs_type, 686 int flags, const char *dev_name, void *data, 687 int (*fill_super)(struct super_block *, void *, int), 688 struct vfsmount *mnt) 689 { 690 struct block_device *bdev; 691 struct super_block *s; 692 int error = 0; 693 694 bdev = open_bdev_excl(dev_name, flags, fs_type); 695 if (IS_ERR(bdev)) 696 return PTR_ERR(bdev); 697 698 /* 699 * once the super is inserted into the list by sget, s_umount 700 * will protect the lockfs code from trying to start a snapshot 701 * while we are mounting 702 */ 703 mutex_lock(&bdev->bd_mount_mutex); 704 s = sget(fs_type, test_bdev_super, set_bdev_super, bdev); 705 mutex_unlock(&bdev->bd_mount_mutex); 706 if (IS_ERR(s)) 707 goto error_s; 708 709 if (s->s_root) { 710 if ((flags ^ s->s_flags) & MS_RDONLY) { 711 up_write(&s->s_umount); 712 deactivate_super(s); 713 error = -EBUSY; 714 goto error_bdev; 715 } 716 717 close_bdev_excl(bdev); 718 } else { 719 char b[BDEVNAME_SIZE]; 720 721 s->s_flags = flags; 722 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); 723 sb_set_blocksize(s, block_size(bdev)); 724 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); 725 if (error) { 726 up_write(&s->s_umount); 727 deactivate_super(s); 728 goto error; 729 } 730 731 s->s_flags |= MS_ACTIVE; 732 bdev_uevent(bdev, KOBJ_MOUNT); 733 } 734 735 return simple_set_mnt(mnt, s); 736 737 error_s: 738 error = PTR_ERR(s); 739 error_bdev: 740 close_bdev_excl(bdev); 741 error: 742 return error; 743 } 744 745 EXPORT_SYMBOL(get_sb_bdev); 746 747 void kill_block_super(struct super_block *sb) 748 { 749 struct block_device *bdev = sb->s_bdev; 750 751 bdev_uevent(bdev, KOBJ_UMOUNT); 752 generic_shutdown_super(sb); 753 sync_blockdev(bdev); 754 close_bdev_excl(bdev); 755 } 756 757 EXPORT_SYMBOL(kill_block_super); 758 759 int get_sb_nodev(struct file_system_type *fs_type, 760 int flags, void *data, 761 int (*fill_super)(struct super_block *, void *, int), 762 struct vfsmount *mnt) 763 { 764 int error; 765 struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL); 766 767 if (IS_ERR(s)) 768 return PTR_ERR(s); 769 770 s->s_flags = flags; 771 772 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); 773 if (error) { 774 up_write(&s->s_umount); 775 deactivate_super(s); 776 return error; 777 } 778 s->s_flags |= MS_ACTIVE; 779 return simple_set_mnt(mnt, s); 780 } 781 782 EXPORT_SYMBOL(get_sb_nodev); 783 784 static int compare_single(struct super_block *s, void *p) 785 { 786 return 1; 787 } 788 789 int get_sb_single(struct file_system_type *fs_type, 790 int flags, void *data, 791 int (*fill_super)(struct super_block *, void *, int), 792 struct vfsmount *mnt) 793 { 794 struct super_block *s; 795 int error; 796 797 s = sget(fs_type, compare_single, set_anon_super, NULL); 798 if (IS_ERR(s)) 799 return PTR_ERR(s); 800 if (!s->s_root) { 801 s->s_flags = flags; 802 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); 803 if (error) { 804 up_write(&s->s_umount); 805 deactivate_super(s); 806 return error; 807 } 808 s->s_flags |= MS_ACTIVE; 809 } 810 do_remount_sb(s, flags, data, 0); 811 return simple_set_mnt(mnt, s); 812 } 813 814 EXPORT_SYMBOL(get_sb_single); 815 816 struct vfsmount * 817 vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data) 818 { 819 struct vfsmount *mnt; 820 char *secdata = NULL; 821 int error; 822 823 if (!type) 824 return ERR_PTR(-ENODEV); 825 826 error = -ENOMEM; 827 mnt = alloc_vfsmnt(name); 828 if (!mnt) 829 goto out; 830 831 if (data) { 832 secdata = alloc_secdata(); 833 if (!secdata) 834 goto out_mnt; 835 836 error = security_sb_copy_data(type, data, secdata); 837 if (error) 838 goto out_free_secdata; 839 } 840 841 error = type->get_sb(type, flags, name, data, mnt); 842 if (error < 0) 843 goto out_free_secdata; 844 845 error = security_sb_kern_mount(mnt->mnt_sb, secdata); 846 if (error) 847 goto out_sb; 848 849 mnt->mnt_mountpoint = mnt->mnt_root; 850 mnt->mnt_parent = mnt; 851 up_write(&mnt->mnt_sb->s_umount); 852 free_secdata(secdata); 853 return mnt; 854 out_sb: 855 dput(mnt->mnt_root); 856 up_write(&mnt->mnt_sb->s_umount); 857 deactivate_super(mnt->mnt_sb); 858 out_free_secdata: 859 free_secdata(secdata); 860 out_mnt: 861 free_vfsmnt(mnt); 862 out: 863 return ERR_PTR(error); 864 } 865 866 EXPORT_SYMBOL_GPL(vfs_kern_mount); 867 868 struct vfsmount * 869 do_kern_mount(const char *fstype, int flags, const char *name, void *data) 870 { 871 struct file_system_type *type = get_fs_type(fstype); 872 struct vfsmount *mnt; 873 if (!type) 874 return ERR_PTR(-ENODEV); 875 mnt = vfs_kern_mount(type, flags, name, data); 876 put_filesystem(type); 877 return mnt; 878 } 879 880 struct vfsmount *kern_mount(struct file_system_type *type) 881 { 882 return vfs_kern_mount(type, 0, type->name, NULL); 883 } 884 885 EXPORT_SYMBOL(kern_mount); 886