1 /* 2 * linux/fs/ext2/super.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/inode.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * Big-endian to little-endian byte-swapping/bitmaps by 16 * David S. Miller (davem@caip.rutgers.edu), 1995 17 */ 18 19 #include <linux/module.h> 20 #include <linux/string.h> 21 #include <linux/fs.h> 22 #include <linux/slab.h> 23 #include <linux/init.h> 24 #include <linux/blkdev.h> 25 #include <linux/parser.h> 26 #include <linux/random.h> 27 #include <linux/buffer_head.h> 28 #include <linux/exportfs.h> 29 #include <linux/vfs.h> 30 #include <linux/seq_file.h> 31 #include <linux/mount.h> 32 #include <linux/log2.h> 33 #include <linux/quotaops.h> 34 #include <asm/uaccess.h> 35 #include "ext2.h" 36 #include "xattr.h" 37 #include "acl.h" 38 #include "xip.h" 39 40 static void ext2_sync_super(struct super_block *sb, 41 struct ext2_super_block *es, int wait); 42 static int ext2_remount (struct super_block * sb, int * flags, char * data); 43 static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf); 44 static int ext2_sync_fs(struct super_block *sb, int wait); 45 46 void ext2_error (struct super_block * sb, const char * function, 47 const char * fmt, ...) 48 { 49 va_list args; 50 struct ext2_sb_info *sbi = EXT2_SB(sb); 51 struct ext2_super_block *es = sbi->s_es; 52 53 if (!(sb->s_flags & MS_RDONLY)) { 54 spin_lock(&sbi->s_lock); 55 sbi->s_mount_state |= EXT2_ERROR_FS; 56 es->s_state |= cpu_to_le16(EXT2_ERROR_FS); 57 spin_unlock(&sbi->s_lock); 58 ext2_sync_super(sb, es, 1); 59 } 60 61 va_start(args, fmt); 62 printk(KERN_CRIT "EXT2-fs (%s): error: %s: ", sb->s_id, function); 63 vprintk(fmt, args); 64 printk("\n"); 65 va_end(args); 66 67 if (test_opt(sb, ERRORS_PANIC)) 68 panic("EXT2-fs: panic from previous error\n"); 69 if (test_opt(sb, ERRORS_RO)) { 70 ext2_msg(sb, KERN_CRIT, 71 "error: remounting filesystem read-only"); 72 sb->s_flags |= MS_RDONLY; 73 } 74 } 75 76 void ext2_msg(struct super_block *sb, const char *prefix, 77 const char *fmt, ...) 78 { 79 va_list args; 80 81 va_start(args, fmt); 82 printk("%sEXT2-fs (%s): ", prefix, sb->s_id); 83 vprintk(fmt, args); 84 printk("\n"); 85 va_end(args); 86 } 87 88 /* 89 * This must be called with sbi->s_lock held. 90 */ 91 void ext2_update_dynamic_rev(struct super_block *sb) 92 { 93 struct ext2_super_block *es = EXT2_SB(sb)->s_es; 94 95 if (le32_to_cpu(es->s_rev_level) > EXT2_GOOD_OLD_REV) 96 return; 97 98 ext2_msg(sb, KERN_WARNING, 99 "warning: updating to rev %d because of " 100 "new feature flag, running e2fsck is recommended", 101 EXT2_DYNAMIC_REV); 102 103 es->s_first_ino = cpu_to_le32(EXT2_GOOD_OLD_FIRST_INO); 104 es->s_inode_size = cpu_to_le16(EXT2_GOOD_OLD_INODE_SIZE); 105 es->s_rev_level = cpu_to_le32(EXT2_DYNAMIC_REV); 106 /* leave es->s_feature_*compat flags alone */ 107 /* es->s_uuid will be set by e2fsck if empty */ 108 109 /* 110 * The rest of the superblock fields should be zero, and if not it 111 * means they are likely already in use, so leave them alone. We 112 * can leave it up to e2fsck to clean up any inconsistencies there. 113 */ 114 } 115 116 static void ext2_put_super (struct super_block * sb) 117 { 118 int db_count; 119 int i; 120 struct ext2_sb_info *sbi = EXT2_SB(sb); 121 122 dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); 123 124 if (sb->s_dirt) 125 ext2_write_super(sb); 126 127 ext2_xattr_put_super(sb); 128 if (!(sb->s_flags & MS_RDONLY)) { 129 struct ext2_super_block *es = sbi->s_es; 130 131 spin_lock(&sbi->s_lock); 132 es->s_state = cpu_to_le16(sbi->s_mount_state); 133 spin_unlock(&sbi->s_lock); 134 ext2_sync_super(sb, es, 1); 135 } 136 db_count = sbi->s_gdb_count; 137 for (i = 0; i < db_count; i++) 138 if (sbi->s_group_desc[i]) 139 brelse (sbi->s_group_desc[i]); 140 kfree(sbi->s_group_desc); 141 kfree(sbi->s_debts); 142 percpu_counter_destroy(&sbi->s_freeblocks_counter); 143 percpu_counter_destroy(&sbi->s_freeinodes_counter); 144 percpu_counter_destroy(&sbi->s_dirs_counter); 145 brelse (sbi->s_sbh); 146 sb->s_fs_info = NULL; 147 kfree(sbi->s_blockgroup_lock); 148 kfree(sbi); 149 } 150 151 static struct kmem_cache * ext2_inode_cachep; 152 153 static struct inode *ext2_alloc_inode(struct super_block *sb) 154 { 155 struct ext2_inode_info *ei; 156 ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL); 157 if (!ei) 158 return NULL; 159 ei->i_block_alloc_info = NULL; 160 ei->vfs_inode.i_version = 1; 161 return &ei->vfs_inode; 162 } 163 164 static void ext2_destroy_inode(struct inode *inode) 165 { 166 kmem_cache_free(ext2_inode_cachep, EXT2_I(inode)); 167 } 168 169 static void init_once(void *foo) 170 { 171 struct ext2_inode_info *ei = (struct ext2_inode_info *) foo; 172 173 rwlock_init(&ei->i_meta_lock); 174 #ifdef CONFIG_EXT2_FS_XATTR 175 init_rwsem(&ei->xattr_sem); 176 #endif 177 mutex_init(&ei->truncate_mutex); 178 inode_init_once(&ei->vfs_inode); 179 } 180 181 static int init_inodecache(void) 182 { 183 ext2_inode_cachep = kmem_cache_create("ext2_inode_cache", 184 sizeof(struct ext2_inode_info), 185 0, (SLAB_RECLAIM_ACCOUNT| 186 SLAB_MEM_SPREAD), 187 init_once); 188 if (ext2_inode_cachep == NULL) 189 return -ENOMEM; 190 return 0; 191 } 192 193 static void destroy_inodecache(void) 194 { 195 kmem_cache_destroy(ext2_inode_cachep); 196 } 197 198 static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs) 199 { 200 struct super_block *sb = vfs->mnt_sb; 201 struct ext2_sb_info *sbi = EXT2_SB(sb); 202 struct ext2_super_block *es = sbi->s_es; 203 unsigned long def_mount_opts; 204 205 spin_lock(&sbi->s_lock); 206 def_mount_opts = le32_to_cpu(es->s_default_mount_opts); 207 208 if (sbi->s_sb_block != 1) 209 seq_printf(seq, ",sb=%lu", sbi->s_sb_block); 210 if (test_opt(sb, MINIX_DF)) 211 seq_puts(seq, ",minixdf"); 212 if (test_opt(sb, GRPID)) 213 seq_puts(seq, ",grpid"); 214 if (!test_opt(sb, GRPID) && (def_mount_opts & EXT2_DEFM_BSDGROUPS)) 215 seq_puts(seq, ",nogrpid"); 216 if (sbi->s_resuid != EXT2_DEF_RESUID || 217 le16_to_cpu(es->s_def_resuid) != EXT2_DEF_RESUID) { 218 seq_printf(seq, ",resuid=%u", sbi->s_resuid); 219 } 220 if (sbi->s_resgid != EXT2_DEF_RESGID || 221 le16_to_cpu(es->s_def_resgid) != EXT2_DEF_RESGID) { 222 seq_printf(seq, ",resgid=%u", sbi->s_resgid); 223 } 224 if (test_opt(sb, ERRORS_RO)) { 225 int def_errors = le16_to_cpu(es->s_errors); 226 227 if (def_errors == EXT2_ERRORS_PANIC || 228 def_errors == EXT2_ERRORS_CONTINUE) { 229 seq_puts(seq, ",errors=remount-ro"); 230 } 231 } 232 if (test_opt(sb, ERRORS_CONT)) 233 seq_puts(seq, ",errors=continue"); 234 if (test_opt(sb, ERRORS_PANIC)) 235 seq_puts(seq, ",errors=panic"); 236 if (test_opt(sb, NO_UID32)) 237 seq_puts(seq, ",nouid32"); 238 if (test_opt(sb, DEBUG)) 239 seq_puts(seq, ",debug"); 240 if (test_opt(sb, OLDALLOC)) 241 seq_puts(seq, ",oldalloc"); 242 243 #ifdef CONFIG_EXT2_FS_XATTR 244 if (test_opt(sb, XATTR_USER)) 245 seq_puts(seq, ",user_xattr"); 246 if (!test_opt(sb, XATTR_USER) && 247 (def_mount_opts & EXT2_DEFM_XATTR_USER)) { 248 seq_puts(seq, ",nouser_xattr"); 249 } 250 #endif 251 252 #ifdef CONFIG_EXT2_FS_POSIX_ACL 253 if (test_opt(sb, POSIX_ACL)) 254 seq_puts(seq, ",acl"); 255 if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT2_DEFM_ACL)) 256 seq_puts(seq, ",noacl"); 257 #endif 258 259 if (test_opt(sb, NOBH)) 260 seq_puts(seq, ",nobh"); 261 262 #if defined(CONFIG_QUOTA) 263 if (sbi->s_mount_opt & EXT2_MOUNT_USRQUOTA) 264 seq_puts(seq, ",usrquota"); 265 266 if (sbi->s_mount_opt & EXT2_MOUNT_GRPQUOTA) 267 seq_puts(seq, ",grpquota"); 268 #endif 269 270 #if defined(CONFIG_EXT2_FS_XIP) 271 if (sbi->s_mount_opt & EXT2_MOUNT_XIP) 272 seq_puts(seq, ",xip"); 273 #endif 274 275 if (!test_opt(sb, RESERVATION)) 276 seq_puts(seq, ",noreservation"); 277 278 spin_unlock(&sbi->s_lock); 279 return 0; 280 } 281 282 #ifdef CONFIG_QUOTA 283 static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off); 284 static ssize_t ext2_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); 285 #endif 286 287 static const struct super_operations ext2_sops = { 288 .alloc_inode = ext2_alloc_inode, 289 .destroy_inode = ext2_destroy_inode, 290 .write_inode = ext2_write_inode, 291 .evict_inode = ext2_evict_inode, 292 .put_super = ext2_put_super, 293 .write_super = ext2_write_super, 294 .sync_fs = ext2_sync_fs, 295 .statfs = ext2_statfs, 296 .remount_fs = ext2_remount, 297 .show_options = ext2_show_options, 298 #ifdef CONFIG_QUOTA 299 .quota_read = ext2_quota_read, 300 .quota_write = ext2_quota_write, 301 #endif 302 }; 303 304 static struct inode *ext2_nfs_get_inode(struct super_block *sb, 305 u64 ino, u32 generation) 306 { 307 struct inode *inode; 308 309 if (ino < EXT2_FIRST_INO(sb) && ino != EXT2_ROOT_INO) 310 return ERR_PTR(-ESTALE); 311 if (ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count)) 312 return ERR_PTR(-ESTALE); 313 314 /* iget isn't really right if the inode is currently unallocated!! 315 * ext2_read_inode currently does appropriate checks, but 316 * it might be "neater" to call ext2_get_inode first and check 317 * if the inode is valid..... 318 */ 319 inode = ext2_iget(sb, ino); 320 if (IS_ERR(inode)) 321 return ERR_CAST(inode); 322 if (generation && inode->i_generation != generation) { 323 /* we didn't find the right inode.. */ 324 iput(inode); 325 return ERR_PTR(-ESTALE); 326 } 327 return inode; 328 } 329 330 static struct dentry *ext2_fh_to_dentry(struct super_block *sb, struct fid *fid, 331 int fh_len, int fh_type) 332 { 333 return generic_fh_to_dentry(sb, fid, fh_len, fh_type, 334 ext2_nfs_get_inode); 335 } 336 337 static struct dentry *ext2_fh_to_parent(struct super_block *sb, struct fid *fid, 338 int fh_len, int fh_type) 339 { 340 return generic_fh_to_parent(sb, fid, fh_len, fh_type, 341 ext2_nfs_get_inode); 342 } 343 344 /* Yes, most of these are left as NULL!! 345 * A NULL value implies the default, which works with ext2-like file 346 * systems, but can be improved upon. 347 * Currently only get_parent is required. 348 */ 349 static const struct export_operations ext2_export_ops = { 350 .fh_to_dentry = ext2_fh_to_dentry, 351 .fh_to_parent = ext2_fh_to_parent, 352 .get_parent = ext2_get_parent, 353 }; 354 355 static unsigned long get_sb_block(void **data) 356 { 357 unsigned long sb_block; 358 char *options = (char *) *data; 359 360 if (!options || strncmp(options, "sb=", 3) != 0) 361 return 1; /* Default location */ 362 options += 3; 363 sb_block = simple_strtoul(options, &options, 0); 364 if (*options && *options != ',') { 365 printk("EXT2-fs: Invalid sb specification: %s\n", 366 (char *) *data); 367 return 1; 368 } 369 if (*options == ',') 370 options++; 371 *data = (void *) options; 372 return sb_block; 373 } 374 375 enum { 376 Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, 377 Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, 378 Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug, 379 Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr, 380 Opt_acl, Opt_noacl, Opt_xip, Opt_ignore, Opt_err, Opt_quota, 381 Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation 382 }; 383 384 static const match_table_t tokens = { 385 {Opt_bsd_df, "bsddf"}, 386 {Opt_minix_df, "minixdf"}, 387 {Opt_grpid, "grpid"}, 388 {Opt_grpid, "bsdgroups"}, 389 {Opt_nogrpid, "nogrpid"}, 390 {Opt_nogrpid, "sysvgroups"}, 391 {Opt_resgid, "resgid=%u"}, 392 {Opt_resuid, "resuid=%u"}, 393 {Opt_sb, "sb=%u"}, 394 {Opt_err_cont, "errors=continue"}, 395 {Opt_err_panic, "errors=panic"}, 396 {Opt_err_ro, "errors=remount-ro"}, 397 {Opt_nouid32, "nouid32"}, 398 {Opt_nocheck, "check=none"}, 399 {Opt_nocheck, "nocheck"}, 400 {Opt_debug, "debug"}, 401 {Opt_oldalloc, "oldalloc"}, 402 {Opt_orlov, "orlov"}, 403 {Opt_nobh, "nobh"}, 404 {Opt_user_xattr, "user_xattr"}, 405 {Opt_nouser_xattr, "nouser_xattr"}, 406 {Opt_acl, "acl"}, 407 {Opt_noacl, "noacl"}, 408 {Opt_xip, "xip"}, 409 {Opt_grpquota, "grpquota"}, 410 {Opt_ignore, "noquota"}, 411 {Opt_quota, "quota"}, 412 {Opt_usrquota, "usrquota"}, 413 {Opt_reservation, "reservation"}, 414 {Opt_noreservation, "noreservation"}, 415 {Opt_err, NULL} 416 }; 417 418 static int parse_options(char *options, struct super_block *sb) 419 { 420 char *p; 421 struct ext2_sb_info *sbi = EXT2_SB(sb); 422 substring_t args[MAX_OPT_ARGS]; 423 int option; 424 425 if (!options) 426 return 1; 427 428 while ((p = strsep (&options, ",")) != NULL) { 429 int token; 430 if (!*p) 431 continue; 432 433 token = match_token(p, tokens, args); 434 switch (token) { 435 case Opt_bsd_df: 436 clear_opt (sbi->s_mount_opt, MINIX_DF); 437 break; 438 case Opt_minix_df: 439 set_opt (sbi->s_mount_opt, MINIX_DF); 440 break; 441 case Opt_grpid: 442 set_opt (sbi->s_mount_opt, GRPID); 443 break; 444 case Opt_nogrpid: 445 clear_opt (sbi->s_mount_opt, GRPID); 446 break; 447 case Opt_resuid: 448 if (match_int(&args[0], &option)) 449 return 0; 450 sbi->s_resuid = option; 451 break; 452 case Opt_resgid: 453 if (match_int(&args[0], &option)) 454 return 0; 455 sbi->s_resgid = option; 456 break; 457 case Opt_sb: 458 /* handled by get_sb_block() instead of here */ 459 /* *sb_block = match_int(&args[0]); */ 460 break; 461 case Opt_err_panic: 462 clear_opt (sbi->s_mount_opt, ERRORS_CONT); 463 clear_opt (sbi->s_mount_opt, ERRORS_RO); 464 set_opt (sbi->s_mount_opt, ERRORS_PANIC); 465 break; 466 case Opt_err_ro: 467 clear_opt (sbi->s_mount_opt, ERRORS_CONT); 468 clear_opt (sbi->s_mount_opt, ERRORS_PANIC); 469 set_opt (sbi->s_mount_opt, ERRORS_RO); 470 break; 471 case Opt_err_cont: 472 clear_opt (sbi->s_mount_opt, ERRORS_RO); 473 clear_opt (sbi->s_mount_opt, ERRORS_PANIC); 474 set_opt (sbi->s_mount_opt, ERRORS_CONT); 475 break; 476 case Opt_nouid32: 477 set_opt (sbi->s_mount_opt, NO_UID32); 478 break; 479 case Opt_nocheck: 480 clear_opt (sbi->s_mount_opt, CHECK); 481 break; 482 case Opt_debug: 483 set_opt (sbi->s_mount_opt, DEBUG); 484 break; 485 case Opt_oldalloc: 486 set_opt (sbi->s_mount_opt, OLDALLOC); 487 break; 488 case Opt_orlov: 489 clear_opt (sbi->s_mount_opt, OLDALLOC); 490 break; 491 case Opt_nobh: 492 set_opt (sbi->s_mount_opt, NOBH); 493 break; 494 #ifdef CONFIG_EXT2_FS_XATTR 495 case Opt_user_xattr: 496 set_opt (sbi->s_mount_opt, XATTR_USER); 497 break; 498 case Opt_nouser_xattr: 499 clear_opt (sbi->s_mount_opt, XATTR_USER); 500 break; 501 #else 502 case Opt_user_xattr: 503 case Opt_nouser_xattr: 504 ext2_msg(sb, KERN_INFO, "(no)user_xattr options" 505 "not supported"); 506 break; 507 #endif 508 #ifdef CONFIG_EXT2_FS_POSIX_ACL 509 case Opt_acl: 510 set_opt(sbi->s_mount_opt, POSIX_ACL); 511 break; 512 case Opt_noacl: 513 clear_opt(sbi->s_mount_opt, POSIX_ACL); 514 break; 515 #else 516 case Opt_acl: 517 case Opt_noacl: 518 ext2_msg(sb, KERN_INFO, 519 "(no)acl options not supported"); 520 break; 521 #endif 522 case Opt_xip: 523 #ifdef CONFIG_EXT2_FS_XIP 524 set_opt (sbi->s_mount_opt, XIP); 525 #else 526 ext2_msg(sb, KERN_INFO, "xip option not supported"); 527 #endif 528 break; 529 530 #if defined(CONFIG_QUOTA) 531 case Opt_quota: 532 case Opt_usrquota: 533 set_opt(sbi->s_mount_opt, USRQUOTA); 534 break; 535 536 case Opt_grpquota: 537 set_opt(sbi->s_mount_opt, GRPQUOTA); 538 break; 539 #else 540 case Opt_quota: 541 case Opt_usrquota: 542 case Opt_grpquota: 543 ext2_msg(sb, KERN_INFO, 544 "quota operations not supported"); 545 break; 546 #endif 547 548 case Opt_reservation: 549 set_opt(sbi->s_mount_opt, RESERVATION); 550 ext2_msg(sb, KERN_INFO, "reservations ON"); 551 break; 552 case Opt_noreservation: 553 clear_opt(sbi->s_mount_opt, RESERVATION); 554 ext2_msg(sb, KERN_INFO, "reservations OFF"); 555 break; 556 case Opt_ignore: 557 break; 558 default: 559 return 0; 560 } 561 } 562 return 1; 563 } 564 565 static int ext2_setup_super (struct super_block * sb, 566 struct ext2_super_block * es, 567 int read_only) 568 { 569 int res = 0; 570 struct ext2_sb_info *sbi = EXT2_SB(sb); 571 572 if (le32_to_cpu(es->s_rev_level) > EXT2_MAX_SUPP_REV) { 573 ext2_msg(sb, KERN_ERR, 574 "error: revision level too high, " 575 "forcing read-only mode"); 576 res = MS_RDONLY; 577 } 578 if (read_only) 579 return res; 580 if (!(sbi->s_mount_state & EXT2_VALID_FS)) 581 ext2_msg(sb, KERN_WARNING, 582 "warning: mounting unchecked fs, " 583 "running e2fsck is recommended"); 584 else if ((sbi->s_mount_state & EXT2_ERROR_FS)) 585 ext2_msg(sb, KERN_WARNING, 586 "warning: mounting fs with errors, " 587 "running e2fsck is recommended"); 588 else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 && 589 le16_to_cpu(es->s_mnt_count) >= 590 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count)) 591 ext2_msg(sb, KERN_WARNING, 592 "warning: maximal mount count reached, " 593 "running e2fsck is recommended"); 594 else if (le32_to_cpu(es->s_checkinterval) && 595 (le32_to_cpu(es->s_lastcheck) + 596 le32_to_cpu(es->s_checkinterval) <= get_seconds())) 597 ext2_msg(sb, KERN_WARNING, 598 "warning: checktime reached, " 599 "running e2fsck is recommended"); 600 if (!le16_to_cpu(es->s_max_mnt_count)) 601 es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT); 602 le16_add_cpu(&es->s_mnt_count, 1); 603 if (test_opt (sb, DEBUG)) 604 ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, " 605 "bpg=%lu, ipg=%lu, mo=%04lx]", 606 EXT2FS_VERSION, EXT2FS_DATE, sb->s_blocksize, 607 sbi->s_frag_size, 608 sbi->s_groups_count, 609 EXT2_BLOCKS_PER_GROUP(sb), 610 EXT2_INODES_PER_GROUP(sb), 611 sbi->s_mount_opt); 612 return res; 613 } 614 615 static int ext2_check_descriptors(struct super_block *sb) 616 { 617 int i; 618 struct ext2_sb_info *sbi = EXT2_SB(sb); 619 620 ext2_debug ("Checking group descriptors"); 621 622 for (i = 0; i < sbi->s_groups_count; i++) { 623 struct ext2_group_desc *gdp = ext2_get_group_desc(sb, i, NULL); 624 ext2_fsblk_t first_block = ext2_group_first_block_no(sb, i); 625 ext2_fsblk_t last_block; 626 627 if (i == sbi->s_groups_count - 1) 628 last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1; 629 else 630 last_block = first_block + 631 (EXT2_BLOCKS_PER_GROUP(sb) - 1); 632 633 if (le32_to_cpu(gdp->bg_block_bitmap) < first_block || 634 le32_to_cpu(gdp->bg_block_bitmap) > last_block) 635 { 636 ext2_error (sb, "ext2_check_descriptors", 637 "Block bitmap for group %d" 638 " not in group (block %lu)!", 639 i, (unsigned long) le32_to_cpu(gdp->bg_block_bitmap)); 640 return 0; 641 } 642 if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block || 643 le32_to_cpu(gdp->bg_inode_bitmap) > last_block) 644 { 645 ext2_error (sb, "ext2_check_descriptors", 646 "Inode bitmap for group %d" 647 " not in group (block %lu)!", 648 i, (unsigned long) le32_to_cpu(gdp->bg_inode_bitmap)); 649 return 0; 650 } 651 if (le32_to_cpu(gdp->bg_inode_table) < first_block || 652 le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 > 653 last_block) 654 { 655 ext2_error (sb, "ext2_check_descriptors", 656 "Inode table for group %d" 657 " not in group (block %lu)!", 658 i, (unsigned long) le32_to_cpu(gdp->bg_inode_table)); 659 return 0; 660 } 661 } 662 return 1; 663 } 664 665 /* 666 * Maximal file size. There is a direct, and {,double-,triple-}indirect 667 * block limit, and also a limit of (2^32 - 1) 512-byte sectors in i_blocks. 668 * We need to be 1 filesystem block less than the 2^32 sector limit. 669 */ 670 static loff_t ext2_max_size(int bits) 671 { 672 loff_t res = EXT2_NDIR_BLOCKS; 673 int meta_blocks; 674 loff_t upper_limit; 675 676 /* This is calculated to be the largest file size for a 677 * dense, file such that the total number of 678 * sectors in the file, including data and all indirect blocks, 679 * does not exceed 2^32 -1 680 * __u32 i_blocks representing the total number of 681 * 512 bytes blocks of the file 682 */ 683 upper_limit = (1LL << 32) - 1; 684 685 /* total blocks in file system block size */ 686 upper_limit >>= (bits - 9); 687 688 689 /* indirect blocks */ 690 meta_blocks = 1; 691 /* double indirect blocks */ 692 meta_blocks += 1 + (1LL << (bits-2)); 693 /* tripple indirect blocks */ 694 meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2))); 695 696 upper_limit -= meta_blocks; 697 upper_limit <<= bits; 698 699 res += 1LL << (bits-2); 700 res += 1LL << (2*(bits-2)); 701 res += 1LL << (3*(bits-2)); 702 res <<= bits; 703 if (res > upper_limit) 704 res = upper_limit; 705 706 if (res > MAX_LFS_FILESIZE) 707 res = MAX_LFS_FILESIZE; 708 709 return res; 710 } 711 712 static unsigned long descriptor_loc(struct super_block *sb, 713 unsigned long logic_sb_block, 714 int nr) 715 { 716 struct ext2_sb_info *sbi = EXT2_SB(sb); 717 unsigned long bg, first_meta_bg; 718 int has_super = 0; 719 720 first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg); 721 722 if (!EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_META_BG) || 723 nr < first_meta_bg) 724 return (logic_sb_block + nr + 1); 725 bg = sbi->s_desc_per_block * nr; 726 if (ext2_bg_has_super(sb, bg)) 727 has_super = 1; 728 729 return ext2_group_first_block_no(sb, bg) + has_super; 730 } 731 732 static int ext2_fill_super(struct super_block *sb, void *data, int silent) 733 { 734 struct buffer_head * bh; 735 struct ext2_sb_info * sbi; 736 struct ext2_super_block * es; 737 struct inode *root; 738 unsigned long block; 739 unsigned long sb_block = get_sb_block(&data); 740 unsigned long logic_sb_block; 741 unsigned long offset = 0; 742 unsigned long def_mount_opts; 743 long ret = -EINVAL; 744 int blocksize = BLOCK_SIZE; 745 int db_count; 746 int i, j; 747 __le32 features; 748 int err; 749 750 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 751 if (!sbi) 752 return -ENOMEM; 753 754 sbi->s_blockgroup_lock = 755 kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); 756 if (!sbi->s_blockgroup_lock) { 757 kfree(sbi); 758 return -ENOMEM; 759 } 760 sb->s_fs_info = sbi; 761 sbi->s_sb_block = sb_block; 762 763 spin_lock_init(&sbi->s_lock); 764 765 /* 766 * See what the current blocksize for the device is, and 767 * use that as the blocksize. Otherwise (or if the blocksize 768 * is smaller than the default) use the default. 769 * This is important for devices that have a hardware 770 * sectorsize that is larger than the default. 771 */ 772 blocksize = sb_min_blocksize(sb, BLOCK_SIZE); 773 if (!blocksize) { 774 ext2_msg(sb, KERN_ERR, "error: unable to set blocksize"); 775 goto failed_sbi; 776 } 777 778 /* 779 * If the superblock doesn't start on a hardware sector boundary, 780 * calculate the offset. 781 */ 782 if (blocksize != BLOCK_SIZE) { 783 logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize; 784 offset = (sb_block*BLOCK_SIZE) % blocksize; 785 } else { 786 logic_sb_block = sb_block; 787 } 788 789 if (!(bh = sb_bread(sb, logic_sb_block))) { 790 ext2_msg(sb, KERN_ERR, "error: unable to read superblock"); 791 goto failed_sbi; 792 } 793 /* 794 * Note: s_es must be initialized as soon as possible because 795 * some ext2 macro-instructions depend on its value 796 */ 797 es = (struct ext2_super_block *) (((char *)bh->b_data) + offset); 798 sbi->s_es = es; 799 sb->s_magic = le16_to_cpu(es->s_magic); 800 801 if (sb->s_magic != EXT2_SUPER_MAGIC) 802 goto cantfind_ext2; 803 804 /* Set defaults before we parse the mount options */ 805 def_mount_opts = le32_to_cpu(es->s_default_mount_opts); 806 if (def_mount_opts & EXT2_DEFM_DEBUG) 807 set_opt(sbi->s_mount_opt, DEBUG); 808 if (def_mount_opts & EXT2_DEFM_BSDGROUPS) 809 set_opt(sbi->s_mount_opt, GRPID); 810 if (def_mount_opts & EXT2_DEFM_UID16) 811 set_opt(sbi->s_mount_opt, NO_UID32); 812 #ifdef CONFIG_EXT2_FS_XATTR 813 if (def_mount_opts & EXT2_DEFM_XATTR_USER) 814 set_opt(sbi->s_mount_opt, XATTR_USER); 815 #endif 816 #ifdef CONFIG_EXT2_FS_POSIX_ACL 817 if (def_mount_opts & EXT2_DEFM_ACL) 818 set_opt(sbi->s_mount_opt, POSIX_ACL); 819 #endif 820 821 if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC) 822 set_opt(sbi->s_mount_opt, ERRORS_PANIC); 823 else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE) 824 set_opt(sbi->s_mount_opt, ERRORS_CONT); 825 else 826 set_opt(sbi->s_mount_opt, ERRORS_RO); 827 828 sbi->s_resuid = le16_to_cpu(es->s_def_resuid); 829 sbi->s_resgid = le16_to_cpu(es->s_def_resgid); 830 831 set_opt(sbi->s_mount_opt, RESERVATION); 832 833 if (!parse_options((char *) data, sb)) 834 goto failed_mount; 835 836 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | 837 ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? 838 MS_POSIXACL : 0); 839 840 ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset 841 EXT2_MOUNT_XIP if not */ 842 843 if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV && 844 (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) || 845 EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) || 846 EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U))) 847 ext2_msg(sb, KERN_WARNING, 848 "warning: feature flags set on rev 0 fs, " 849 "running e2fsck is recommended"); 850 /* 851 * Check feature flags regardless of the revision level, since we 852 * previously didn't change the revision level when setting the flags, 853 * so there is a chance incompat flags are set on a rev 0 filesystem. 854 */ 855 features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP); 856 if (features) { 857 ext2_msg(sb, KERN_ERR, "error: couldn't mount because of " 858 "unsupported optional features (%x)", 859 le32_to_cpu(features)); 860 goto failed_mount; 861 } 862 if (!(sb->s_flags & MS_RDONLY) && 863 (features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){ 864 ext2_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of " 865 "unsupported optional features (%x)", 866 le32_to_cpu(features)); 867 goto failed_mount; 868 } 869 870 blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); 871 872 if (ext2_use_xip(sb) && blocksize != PAGE_SIZE) { 873 if (!silent) 874 ext2_msg(sb, KERN_ERR, 875 "error: unsupported blocksize for xip"); 876 goto failed_mount; 877 } 878 879 /* If the blocksize doesn't match, re-read the thing.. */ 880 if (sb->s_blocksize != blocksize) { 881 brelse(bh); 882 883 if (!sb_set_blocksize(sb, blocksize)) { 884 ext2_msg(sb, KERN_ERR, "error: blocksize is too small"); 885 goto failed_sbi; 886 } 887 888 logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize; 889 offset = (sb_block*BLOCK_SIZE) % blocksize; 890 bh = sb_bread(sb, logic_sb_block); 891 if(!bh) { 892 ext2_msg(sb, KERN_ERR, "error: couldn't read" 893 "superblock on 2nd try"); 894 goto failed_sbi; 895 } 896 es = (struct ext2_super_block *) (((char *)bh->b_data) + offset); 897 sbi->s_es = es; 898 if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) { 899 ext2_msg(sb, KERN_ERR, "error: magic mismatch"); 900 goto failed_mount; 901 } 902 } 903 904 sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits); 905 906 if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) { 907 sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE; 908 sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO; 909 } else { 910 sbi->s_inode_size = le16_to_cpu(es->s_inode_size); 911 sbi->s_first_ino = le32_to_cpu(es->s_first_ino); 912 if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) || 913 !is_power_of_2(sbi->s_inode_size) || 914 (sbi->s_inode_size > blocksize)) { 915 ext2_msg(sb, KERN_ERR, 916 "error: unsupported inode size: %d", 917 sbi->s_inode_size); 918 goto failed_mount; 919 } 920 } 921 922 sbi->s_frag_size = EXT2_MIN_FRAG_SIZE << 923 le32_to_cpu(es->s_log_frag_size); 924 if (sbi->s_frag_size == 0) 925 goto cantfind_ext2; 926 sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size; 927 928 sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); 929 sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group); 930 sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); 931 932 if (EXT2_INODE_SIZE(sb) == 0) 933 goto cantfind_ext2; 934 sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb); 935 if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0) 936 goto cantfind_ext2; 937 sbi->s_itb_per_group = sbi->s_inodes_per_group / 938 sbi->s_inodes_per_block; 939 sbi->s_desc_per_block = sb->s_blocksize / 940 sizeof (struct ext2_group_desc); 941 sbi->s_sbh = bh; 942 sbi->s_mount_state = le16_to_cpu(es->s_state); 943 sbi->s_addr_per_block_bits = 944 ilog2 (EXT2_ADDR_PER_BLOCK(sb)); 945 sbi->s_desc_per_block_bits = 946 ilog2 (EXT2_DESC_PER_BLOCK(sb)); 947 948 if (sb->s_magic != EXT2_SUPER_MAGIC) 949 goto cantfind_ext2; 950 951 if (sb->s_blocksize != bh->b_size) { 952 if (!silent) 953 ext2_msg(sb, KERN_ERR, "error: unsupported blocksize"); 954 goto failed_mount; 955 } 956 957 if (sb->s_blocksize != sbi->s_frag_size) { 958 ext2_msg(sb, KERN_ERR, 959 "error: fragsize %lu != blocksize %lu" 960 "(not supported yet)", 961 sbi->s_frag_size, sb->s_blocksize); 962 goto failed_mount; 963 } 964 965 if (sbi->s_blocks_per_group > sb->s_blocksize * 8) { 966 ext2_msg(sb, KERN_ERR, 967 "error: #blocks per group too big: %lu", 968 sbi->s_blocks_per_group); 969 goto failed_mount; 970 } 971 if (sbi->s_frags_per_group > sb->s_blocksize * 8) { 972 ext2_msg(sb, KERN_ERR, 973 "error: #fragments per group too big: %lu", 974 sbi->s_frags_per_group); 975 goto failed_mount; 976 } 977 if (sbi->s_inodes_per_group > sb->s_blocksize * 8) { 978 ext2_msg(sb, KERN_ERR, 979 "error: #inodes per group too big: %lu", 980 sbi->s_inodes_per_group); 981 goto failed_mount; 982 } 983 984 if (EXT2_BLOCKS_PER_GROUP(sb) == 0) 985 goto cantfind_ext2; 986 sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) - 987 le32_to_cpu(es->s_first_data_block) - 1) 988 / EXT2_BLOCKS_PER_GROUP(sb)) + 1; 989 db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) / 990 EXT2_DESC_PER_BLOCK(sb); 991 sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL); 992 if (sbi->s_group_desc == NULL) { 993 ext2_msg(sb, KERN_ERR, "error: not enough memory"); 994 goto failed_mount; 995 } 996 bgl_lock_init(sbi->s_blockgroup_lock); 997 sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL); 998 if (!sbi->s_debts) { 999 ext2_msg(sb, KERN_ERR, "error: not enough memory"); 1000 goto failed_mount_group_desc; 1001 } 1002 for (i = 0; i < db_count; i++) { 1003 block = descriptor_loc(sb, logic_sb_block, i); 1004 sbi->s_group_desc[i] = sb_bread(sb, block); 1005 if (!sbi->s_group_desc[i]) { 1006 for (j = 0; j < i; j++) 1007 brelse (sbi->s_group_desc[j]); 1008 ext2_msg(sb, KERN_ERR, 1009 "error: unable to read group descriptors"); 1010 goto failed_mount_group_desc; 1011 } 1012 } 1013 if (!ext2_check_descriptors (sb)) { 1014 ext2_msg(sb, KERN_ERR, "group descriptors corrupted"); 1015 goto failed_mount2; 1016 } 1017 sbi->s_gdb_count = db_count; 1018 get_random_bytes(&sbi->s_next_generation, sizeof(u32)); 1019 spin_lock_init(&sbi->s_next_gen_lock); 1020 1021 /* per fileystem reservation list head & lock */ 1022 spin_lock_init(&sbi->s_rsv_window_lock); 1023 sbi->s_rsv_window_root = RB_ROOT; 1024 /* 1025 * Add a single, static dummy reservation to the start of the 1026 * reservation window list --- it gives us a placeholder for 1027 * append-at-start-of-list which makes the allocation logic 1028 * _much_ simpler. 1029 */ 1030 sbi->s_rsv_window_head.rsv_start = EXT2_RESERVE_WINDOW_NOT_ALLOCATED; 1031 sbi->s_rsv_window_head.rsv_end = EXT2_RESERVE_WINDOW_NOT_ALLOCATED; 1032 sbi->s_rsv_window_head.rsv_alloc_hit = 0; 1033 sbi->s_rsv_window_head.rsv_goal_size = 0; 1034 ext2_rsv_window_add(sb, &sbi->s_rsv_window_head); 1035 1036 err = percpu_counter_init(&sbi->s_freeblocks_counter, 1037 ext2_count_free_blocks(sb)); 1038 if (!err) { 1039 err = percpu_counter_init(&sbi->s_freeinodes_counter, 1040 ext2_count_free_inodes(sb)); 1041 } 1042 if (!err) { 1043 err = percpu_counter_init(&sbi->s_dirs_counter, 1044 ext2_count_dirs(sb)); 1045 } 1046 if (err) { 1047 ext2_msg(sb, KERN_ERR, "error: insufficient memory"); 1048 goto failed_mount3; 1049 } 1050 /* 1051 * set up enough so that it can read an inode 1052 */ 1053 sb->s_op = &ext2_sops; 1054 sb->s_export_op = &ext2_export_ops; 1055 sb->s_xattr = ext2_xattr_handlers; 1056 1057 #ifdef CONFIG_QUOTA 1058 sb->dq_op = &dquot_operations; 1059 sb->s_qcop = &dquot_quotactl_ops; 1060 #endif 1061 1062 root = ext2_iget(sb, EXT2_ROOT_INO); 1063 if (IS_ERR(root)) { 1064 ret = PTR_ERR(root); 1065 goto failed_mount3; 1066 } 1067 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { 1068 iput(root); 1069 ext2_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck"); 1070 goto failed_mount3; 1071 } 1072 1073 sb->s_root = d_alloc_root(root); 1074 if (!sb->s_root) { 1075 iput(root); 1076 ext2_msg(sb, KERN_ERR, "error: get root inode failed"); 1077 ret = -ENOMEM; 1078 goto failed_mount3; 1079 } 1080 if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) 1081 ext2_msg(sb, KERN_WARNING, 1082 "warning: mounting ext3 filesystem as ext2"); 1083 if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY)) 1084 sb->s_flags |= MS_RDONLY; 1085 ext2_write_super(sb); 1086 return 0; 1087 1088 cantfind_ext2: 1089 if (!silent) 1090 ext2_msg(sb, KERN_ERR, 1091 "error: can't find an ext2 filesystem on dev %s.", 1092 sb->s_id); 1093 goto failed_mount; 1094 failed_mount3: 1095 percpu_counter_destroy(&sbi->s_freeblocks_counter); 1096 percpu_counter_destroy(&sbi->s_freeinodes_counter); 1097 percpu_counter_destroy(&sbi->s_dirs_counter); 1098 failed_mount2: 1099 for (i = 0; i < db_count; i++) 1100 brelse(sbi->s_group_desc[i]); 1101 failed_mount_group_desc: 1102 kfree(sbi->s_group_desc); 1103 kfree(sbi->s_debts); 1104 failed_mount: 1105 brelse(bh); 1106 failed_sbi: 1107 sb->s_fs_info = NULL; 1108 kfree(sbi->s_blockgroup_lock); 1109 kfree(sbi); 1110 return ret; 1111 } 1112 1113 static void ext2_clear_super_error(struct super_block *sb) 1114 { 1115 struct buffer_head *sbh = EXT2_SB(sb)->s_sbh; 1116 1117 if (buffer_write_io_error(sbh)) { 1118 /* 1119 * Oh, dear. A previous attempt to write the 1120 * superblock failed. This could happen because the 1121 * USB device was yanked out. Or it could happen to 1122 * be a transient write error and maybe the block will 1123 * be remapped. Nothing we can do but to retry the 1124 * write and hope for the best. 1125 */ 1126 ext2_msg(sb, KERN_ERR, 1127 "previous I/O error to superblock detected\n"); 1128 clear_buffer_write_io_error(sbh); 1129 set_buffer_uptodate(sbh); 1130 } 1131 } 1132 1133 static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es, 1134 int wait) 1135 { 1136 ext2_clear_super_error(sb); 1137 spin_lock(&EXT2_SB(sb)->s_lock); 1138 es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb)); 1139 es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb)); 1140 es->s_wtime = cpu_to_le32(get_seconds()); 1141 /* unlock before we do IO */ 1142 spin_unlock(&EXT2_SB(sb)->s_lock); 1143 mark_buffer_dirty(EXT2_SB(sb)->s_sbh); 1144 if (wait) 1145 sync_dirty_buffer(EXT2_SB(sb)->s_sbh); 1146 sb->s_dirt = 0; 1147 } 1148 1149 /* 1150 * In the second extended file system, it is not necessary to 1151 * write the super block since we use a mapping of the 1152 * disk super block in a buffer. 1153 * 1154 * However, this function is still used to set the fs valid 1155 * flags to 0. We need to set this flag to 0 since the fs 1156 * may have been checked while mounted and e2fsck may have 1157 * set s_state to EXT2_VALID_FS after some corrections. 1158 */ 1159 static int ext2_sync_fs(struct super_block *sb, int wait) 1160 { 1161 struct ext2_sb_info *sbi = EXT2_SB(sb); 1162 struct ext2_super_block *es = EXT2_SB(sb)->s_es; 1163 1164 spin_lock(&sbi->s_lock); 1165 if (es->s_state & cpu_to_le16(EXT2_VALID_FS)) { 1166 ext2_debug("setting valid to 0\n"); 1167 es->s_state &= cpu_to_le16(~EXT2_VALID_FS); 1168 } 1169 spin_unlock(&sbi->s_lock); 1170 ext2_sync_super(sb, es, wait); 1171 return 0; 1172 } 1173 1174 1175 void ext2_write_super(struct super_block *sb) 1176 { 1177 if (!(sb->s_flags & MS_RDONLY)) 1178 ext2_sync_fs(sb, 1); 1179 else 1180 sb->s_dirt = 0; 1181 } 1182 1183 static int ext2_remount (struct super_block * sb, int * flags, char * data) 1184 { 1185 struct ext2_sb_info * sbi = EXT2_SB(sb); 1186 struct ext2_super_block * es; 1187 unsigned long old_mount_opt = sbi->s_mount_opt; 1188 struct ext2_mount_options old_opts; 1189 unsigned long old_sb_flags; 1190 int err; 1191 1192 spin_lock(&sbi->s_lock); 1193 1194 /* Store the old options */ 1195 old_sb_flags = sb->s_flags; 1196 old_opts.s_mount_opt = sbi->s_mount_opt; 1197 old_opts.s_resuid = sbi->s_resuid; 1198 old_opts.s_resgid = sbi->s_resgid; 1199 1200 /* 1201 * Allow the "check" option to be passed as a remount option. 1202 */ 1203 if (!parse_options(data, sb)) { 1204 err = -EINVAL; 1205 goto restore_opts; 1206 } 1207 1208 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | 1209 ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); 1210 1211 ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset 1212 EXT2_MOUNT_XIP if not */ 1213 1214 if ((ext2_use_xip(sb)) && (sb->s_blocksize != PAGE_SIZE)) { 1215 ext2_msg(sb, KERN_WARNING, 1216 "warning: unsupported blocksize for xip"); 1217 err = -EINVAL; 1218 goto restore_opts; 1219 } 1220 1221 es = sbi->s_es; 1222 if (((sbi->s_mount_opt & EXT2_MOUNT_XIP) != 1223 (old_mount_opt & EXT2_MOUNT_XIP)) && 1224 invalidate_inodes(sb)) { 1225 ext2_msg(sb, KERN_WARNING, "warning: refusing change of " 1226 "xip flag with busy inodes while remounting"); 1227 sbi->s_mount_opt &= ~EXT2_MOUNT_XIP; 1228 sbi->s_mount_opt |= old_mount_opt & EXT2_MOUNT_XIP; 1229 } 1230 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) { 1231 spin_unlock(&sbi->s_lock); 1232 return 0; 1233 } 1234 if (*flags & MS_RDONLY) { 1235 if (le16_to_cpu(es->s_state) & EXT2_VALID_FS || 1236 !(sbi->s_mount_state & EXT2_VALID_FS)) { 1237 spin_unlock(&sbi->s_lock); 1238 return 0; 1239 } 1240 1241 /* 1242 * OK, we are remounting a valid rw partition rdonly, so set 1243 * the rdonly flag and then mark the partition as valid again. 1244 */ 1245 es->s_state = cpu_to_le16(sbi->s_mount_state); 1246 es->s_mtime = cpu_to_le32(get_seconds()); 1247 spin_unlock(&sbi->s_lock); 1248 1249 err = dquot_suspend(sb, -1); 1250 if (err < 0) { 1251 spin_lock(&sbi->s_lock); 1252 goto restore_opts; 1253 } 1254 1255 ext2_sync_super(sb, es, 1); 1256 } else { 1257 __le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb, 1258 ~EXT2_FEATURE_RO_COMPAT_SUPP); 1259 if (ret) { 1260 ext2_msg(sb, KERN_WARNING, 1261 "warning: couldn't remount RDWR because of " 1262 "unsupported optional features (%x).", 1263 le32_to_cpu(ret)); 1264 err = -EROFS; 1265 goto restore_opts; 1266 } 1267 /* 1268 * Mounting a RDONLY partition read-write, so reread and 1269 * store the current valid flag. (It may have been changed 1270 * by e2fsck since we originally mounted the partition.) 1271 */ 1272 sbi->s_mount_state = le16_to_cpu(es->s_state); 1273 if (!ext2_setup_super (sb, es, 0)) 1274 sb->s_flags &= ~MS_RDONLY; 1275 spin_unlock(&sbi->s_lock); 1276 1277 ext2_write_super(sb); 1278 1279 dquot_resume(sb, -1); 1280 } 1281 1282 return 0; 1283 restore_opts: 1284 sbi->s_mount_opt = old_opts.s_mount_opt; 1285 sbi->s_resuid = old_opts.s_resuid; 1286 sbi->s_resgid = old_opts.s_resgid; 1287 sb->s_flags = old_sb_flags; 1288 spin_unlock(&sbi->s_lock); 1289 return err; 1290 } 1291 1292 static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf) 1293 { 1294 struct super_block *sb = dentry->d_sb; 1295 struct ext2_sb_info *sbi = EXT2_SB(sb); 1296 struct ext2_super_block *es = sbi->s_es; 1297 u64 fsid; 1298 1299 spin_lock(&sbi->s_lock); 1300 1301 if (test_opt (sb, MINIX_DF)) 1302 sbi->s_overhead_last = 0; 1303 else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) { 1304 unsigned long i, overhead = 0; 1305 smp_rmb(); 1306 1307 /* 1308 * Compute the overhead (FS structures). This is constant 1309 * for a given filesystem unless the number of block groups 1310 * changes so we cache the previous value until it does. 1311 */ 1312 1313 /* 1314 * All of the blocks before first_data_block are 1315 * overhead 1316 */ 1317 overhead = le32_to_cpu(es->s_first_data_block); 1318 1319 /* 1320 * Add the overhead attributed to the superblock and 1321 * block group descriptors. If the sparse superblocks 1322 * feature is turned on, then not all groups have this. 1323 */ 1324 for (i = 0; i < sbi->s_groups_count; i++) 1325 overhead += ext2_bg_has_super(sb, i) + 1326 ext2_bg_num_gdb(sb, i); 1327 1328 /* 1329 * Every block group has an inode bitmap, a block 1330 * bitmap, and an inode table. 1331 */ 1332 overhead += (sbi->s_groups_count * 1333 (2 + sbi->s_itb_per_group)); 1334 sbi->s_overhead_last = overhead; 1335 smp_wmb(); 1336 sbi->s_blocks_last = le32_to_cpu(es->s_blocks_count); 1337 } 1338 1339 buf->f_type = EXT2_SUPER_MAGIC; 1340 buf->f_bsize = sb->s_blocksize; 1341 buf->f_blocks = le32_to_cpu(es->s_blocks_count) - sbi->s_overhead_last; 1342 buf->f_bfree = ext2_count_free_blocks(sb); 1343 es->s_free_blocks_count = cpu_to_le32(buf->f_bfree); 1344 buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count); 1345 if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count)) 1346 buf->f_bavail = 0; 1347 buf->f_files = le32_to_cpu(es->s_inodes_count); 1348 buf->f_ffree = ext2_count_free_inodes(sb); 1349 es->s_free_inodes_count = cpu_to_le32(buf->f_ffree); 1350 buf->f_namelen = EXT2_NAME_LEN; 1351 fsid = le64_to_cpup((void *)es->s_uuid) ^ 1352 le64_to_cpup((void *)es->s_uuid + sizeof(u64)); 1353 buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL; 1354 buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL; 1355 spin_unlock(&sbi->s_lock); 1356 return 0; 1357 } 1358 1359 static int ext2_get_sb(struct file_system_type *fs_type, 1360 int flags, const char *dev_name, void *data, struct vfsmount *mnt) 1361 { 1362 return get_sb_bdev(fs_type, flags, dev_name, data, ext2_fill_super, mnt); 1363 } 1364 1365 #ifdef CONFIG_QUOTA 1366 1367 /* Read data from quotafile - avoid pagecache and such because we cannot afford 1368 * acquiring the locks... As quota files are never truncated and quota code 1369 * itself serializes the operations (and noone else should touch the files) 1370 * we don't have to be afraid of races */ 1371 static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data, 1372 size_t len, loff_t off) 1373 { 1374 struct inode *inode = sb_dqopt(sb)->files[type]; 1375 sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb); 1376 int err = 0; 1377 int offset = off & (sb->s_blocksize - 1); 1378 int tocopy; 1379 size_t toread; 1380 struct buffer_head tmp_bh; 1381 struct buffer_head *bh; 1382 loff_t i_size = i_size_read(inode); 1383 1384 if (off > i_size) 1385 return 0; 1386 if (off+len > i_size) 1387 len = i_size-off; 1388 toread = len; 1389 while (toread > 0) { 1390 tocopy = sb->s_blocksize - offset < toread ? 1391 sb->s_blocksize - offset : toread; 1392 1393 tmp_bh.b_state = 0; 1394 tmp_bh.b_size = sb->s_blocksize; 1395 err = ext2_get_block(inode, blk, &tmp_bh, 0); 1396 if (err < 0) 1397 return err; 1398 if (!buffer_mapped(&tmp_bh)) /* A hole? */ 1399 memset(data, 0, tocopy); 1400 else { 1401 bh = sb_bread(sb, tmp_bh.b_blocknr); 1402 if (!bh) 1403 return -EIO; 1404 memcpy(data, bh->b_data+offset, tocopy); 1405 brelse(bh); 1406 } 1407 offset = 0; 1408 toread -= tocopy; 1409 data += tocopy; 1410 blk++; 1411 } 1412 return len; 1413 } 1414 1415 /* Write to quotafile */ 1416 static ssize_t ext2_quota_write(struct super_block *sb, int type, 1417 const char *data, size_t len, loff_t off) 1418 { 1419 struct inode *inode = sb_dqopt(sb)->files[type]; 1420 sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb); 1421 int err = 0; 1422 int offset = off & (sb->s_blocksize - 1); 1423 int tocopy; 1424 size_t towrite = len; 1425 struct buffer_head tmp_bh; 1426 struct buffer_head *bh; 1427 1428 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); 1429 while (towrite > 0) { 1430 tocopy = sb->s_blocksize - offset < towrite ? 1431 sb->s_blocksize - offset : towrite; 1432 1433 tmp_bh.b_state = 0; 1434 err = ext2_get_block(inode, blk, &tmp_bh, 1); 1435 if (err < 0) 1436 goto out; 1437 if (offset || tocopy != EXT2_BLOCK_SIZE(sb)) 1438 bh = sb_bread(sb, tmp_bh.b_blocknr); 1439 else 1440 bh = sb_getblk(sb, tmp_bh.b_blocknr); 1441 if (!bh) { 1442 err = -EIO; 1443 goto out; 1444 } 1445 lock_buffer(bh); 1446 memcpy(bh->b_data+offset, data, tocopy); 1447 flush_dcache_page(bh->b_page); 1448 set_buffer_uptodate(bh); 1449 mark_buffer_dirty(bh); 1450 unlock_buffer(bh); 1451 brelse(bh); 1452 offset = 0; 1453 towrite -= tocopy; 1454 data += tocopy; 1455 blk++; 1456 } 1457 out: 1458 if (len == towrite) { 1459 mutex_unlock(&inode->i_mutex); 1460 return err; 1461 } 1462 if (inode->i_size < off+len-towrite) 1463 i_size_write(inode, off+len-towrite); 1464 inode->i_version++; 1465 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1466 mark_inode_dirty(inode); 1467 mutex_unlock(&inode->i_mutex); 1468 return len - towrite; 1469 } 1470 1471 #endif 1472 1473 static struct file_system_type ext2_fs_type = { 1474 .owner = THIS_MODULE, 1475 .name = "ext2", 1476 .get_sb = ext2_get_sb, 1477 .kill_sb = kill_block_super, 1478 .fs_flags = FS_REQUIRES_DEV, 1479 }; 1480 1481 static int __init init_ext2_fs(void) 1482 { 1483 int err = init_ext2_xattr(); 1484 if (err) 1485 return err; 1486 err = init_inodecache(); 1487 if (err) 1488 goto out1; 1489 err = register_filesystem(&ext2_fs_type); 1490 if (err) 1491 goto out; 1492 return 0; 1493 out: 1494 destroy_inodecache(); 1495 out1: 1496 exit_ext2_xattr(); 1497 return err; 1498 } 1499 1500 static void __exit exit_ext2_fs(void) 1501 { 1502 unregister_filesystem(&ext2_fs_type); 1503 destroy_inodecache(); 1504 exit_ext2_xattr(); 1505 } 1506 1507 module_init(init_ext2_fs) 1508 module_exit(exit_ext2_fs) 1509