1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext4/super.c 4 * 5 * Copyright (C) 1992, 1993, 1994, 1995 6 * Remy Card (card@masi.ibp.fr) 7 * Laboratoire MASI - Institut Blaise Pascal 8 * Universite Pierre et Marie Curie (Paris VI) 9 * 10 * from 11 * 12 * linux/fs/minix/inode.c 13 * 14 * Copyright (C) 1991, 1992 Linus Torvalds 15 * 16 * Big-endian to little-endian byte-swapping/bitmaps by 17 * David S. Miller (davem@caip.rutgers.edu), 1995 18 */ 19 20 #include <linux/module.h> 21 #include <linux/string.h> 22 #include <linux/fs.h> 23 #include <linux/time.h> 24 #include <linux/vmalloc.h> 25 #include <linux/slab.h> 26 #include <linux/init.h> 27 #include <linux/blkdev.h> 28 #include <linux/backing-dev.h> 29 #include <linux/parser.h> 30 #include <linux/buffer_head.h> 31 #include <linux/exportfs.h> 32 #include <linux/vfs.h> 33 #include <linux/random.h> 34 #include <linux/mount.h> 35 #include <linux/namei.h> 36 #include <linux/quotaops.h> 37 #include <linux/seq_file.h> 38 #include <linux/ctype.h> 39 #include <linux/log2.h> 40 #include <linux/crc16.h> 41 #include <linux/dax.h> 42 #include <linux/uaccess.h> 43 #include <linux/iversion.h> 44 #include <linux/unicode.h> 45 #include <linux/part_stat.h> 46 #include <linux/kthread.h> 47 #include <linux/freezer.h> 48 #include <linux/fsnotify.h> 49 #include <linux/fs_context.h> 50 #include <linux/fs_parser.h> 51 #include <linux/fserror.h> 52 53 #include "ext4.h" 54 #include "ext4_extents.h" /* Needed for trace points definition */ 55 #include "ext4_jbd2.h" 56 #include "xattr.h" 57 #include "acl.h" 58 #include "mballoc.h" 59 #include "fsmap.h" 60 61 #define CREATE_TRACE_POINTS 62 #include <trace/events/ext4.h> 63 64 static struct ext4_lazy_init *ext4_li_info; 65 static DEFINE_MUTEX(ext4_li_mtx); 66 static struct ratelimit_state ext4_mount_msg_ratelimit; 67 68 static int ext4_load_journal(struct super_block *, struct ext4_super_block *, 69 unsigned long journal_devnum); 70 static int ext4_show_options(struct seq_file *seq, struct dentry *root); 71 static void ext4_update_super(struct super_block *sb); 72 static int ext4_commit_super(struct super_block *sb); 73 static int ext4_mark_recovery_complete(struct super_block *sb, 74 struct ext4_super_block *es); 75 static int ext4_clear_journal_err(struct super_block *sb, 76 struct ext4_super_block *es); 77 static int ext4_sync_fs(struct super_block *sb, int wait); 78 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); 79 static int ext4_unfreeze(struct super_block *sb); 80 static int ext4_freeze(struct super_block *sb); 81 static inline int ext2_feature_set_ok(struct super_block *sb); 82 static inline int ext3_feature_set_ok(struct super_block *sb); 83 static void ext4_unregister_li_request(struct super_block *sb); 84 static void ext4_clear_request_list(void); 85 static struct inode *ext4_get_journal_inode(struct super_block *sb, 86 unsigned int journal_inum); 87 static int ext4_validate_options(struct fs_context *fc); 88 static int ext4_check_opt_consistency(struct fs_context *fc, 89 struct super_block *sb); 90 static void ext4_apply_options(struct fs_context *fc, struct super_block *sb); 91 static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param); 92 static int ext4_get_tree(struct fs_context *fc); 93 static int ext4_reconfigure(struct fs_context *fc); 94 static void ext4_fc_free(struct fs_context *fc); 95 static int ext4_init_fs_context(struct fs_context *fc); 96 static void ext4_kill_sb(struct super_block *sb); 97 static const struct fs_parameter_spec ext4_param_specs[]; 98 99 /* 100 * Lock ordering 101 * 102 * page fault path: 103 * mmap_lock -> sb_start_pagefault -> invalidate_lock (r) -> transaction start 104 * -> page lock -> i_data_sem (rw) 105 * 106 * buffered write path: 107 * sb_start_write -> i_mutex -> mmap_lock 108 * sb_start_write -> i_mutex -> transaction start -> page lock -> 109 * i_data_sem (rw) 110 * 111 * truncate: 112 * sb_start_write -> i_mutex -> invalidate_lock (w) -> i_mmap_rwsem (w) -> 113 * page lock 114 * sb_start_write -> i_mutex -> invalidate_lock (w) -> transaction start -> 115 * i_data_sem (rw) 116 * 117 * direct IO: 118 * sb_start_write -> i_mutex -> mmap_lock 119 * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw) 120 * 121 * writepages: 122 * transaction start -> page lock(s) -> i_data_sem (rw) 123 */ 124 125 static const struct fs_context_operations ext4_context_ops = { 126 .parse_param = ext4_parse_param, 127 .get_tree = ext4_get_tree, 128 .reconfigure = ext4_reconfigure, 129 .free = ext4_fc_free, 130 }; 131 132 133 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2) 134 static struct file_system_type ext2_fs_type = { 135 .owner = THIS_MODULE, 136 .name = "ext2", 137 .init_fs_context = ext4_init_fs_context, 138 .parameters = ext4_param_specs, 139 .kill_sb = ext4_kill_sb, 140 .fs_flags = FS_REQUIRES_DEV, 141 }; 142 MODULE_ALIAS_FS("ext2"); 143 MODULE_ALIAS("ext2"); 144 #define IS_EXT2_SB(sb) ((sb)->s_type == &ext2_fs_type) 145 #else 146 #define IS_EXT2_SB(sb) (0) 147 #endif 148 149 150 static struct file_system_type ext3_fs_type = { 151 .owner = THIS_MODULE, 152 .name = "ext3", 153 .init_fs_context = ext4_init_fs_context, 154 .parameters = ext4_param_specs, 155 .kill_sb = ext4_kill_sb, 156 .fs_flags = FS_REQUIRES_DEV, 157 }; 158 MODULE_ALIAS_FS("ext3"); 159 MODULE_ALIAS("ext3"); 160 #define IS_EXT3_SB(sb) ((sb)->s_type == &ext3_fs_type) 161 162 163 static inline void __ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags, 164 bh_end_io_t *end_io, bool simu_fail) 165 { 166 if (simu_fail) { 167 clear_buffer_uptodate(bh); 168 unlock_buffer(bh); 169 return; 170 } 171 172 /* 173 * buffer's verified bit is no longer valid after reading from 174 * disk again due to write out error, clear it to make sure we 175 * recheck the buffer contents. 176 */ 177 clear_buffer_verified(bh); 178 179 bh->b_end_io = end_io ? end_io : end_buffer_read_sync; 180 get_bh(bh); 181 submit_bh(REQ_OP_READ | op_flags, bh); 182 } 183 184 void ext4_read_bh_nowait(struct buffer_head *bh, blk_opf_t op_flags, 185 bh_end_io_t *end_io, bool simu_fail) 186 { 187 BUG_ON(!buffer_locked(bh)); 188 189 if (ext4_buffer_uptodate(bh)) { 190 unlock_buffer(bh); 191 return; 192 } 193 __ext4_read_bh(bh, op_flags, end_io, simu_fail); 194 } 195 196 int ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags, 197 bh_end_io_t *end_io, bool simu_fail) 198 { 199 BUG_ON(!buffer_locked(bh)); 200 201 if (ext4_buffer_uptodate(bh)) { 202 unlock_buffer(bh); 203 return 0; 204 } 205 206 __ext4_read_bh(bh, op_flags, end_io, simu_fail); 207 208 wait_on_buffer(bh); 209 if (buffer_uptodate(bh)) 210 return 0; 211 return -EIO; 212 } 213 214 int ext4_read_bh_lock(struct buffer_head *bh, blk_opf_t op_flags, bool wait) 215 { 216 lock_buffer(bh); 217 if (!wait) { 218 ext4_read_bh_nowait(bh, op_flags, NULL, false); 219 return 0; 220 } 221 return ext4_read_bh(bh, op_flags, NULL, false); 222 } 223 224 /* 225 * This works like __bread_gfp() except it uses ERR_PTR for error 226 * returns. Currently with sb_bread it's impossible to distinguish 227 * between ENOMEM and EIO situations (since both result in a NULL 228 * return. 229 */ 230 static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb, 231 sector_t block, 232 blk_opf_t op_flags, gfp_t gfp) 233 { 234 struct buffer_head *bh; 235 int ret; 236 237 bh = sb_getblk_gfp(sb, block, gfp); 238 if (bh == NULL) 239 return ERR_PTR(-ENOMEM); 240 if (ext4_buffer_uptodate(bh)) 241 return bh; 242 243 ret = ext4_read_bh_lock(bh, REQ_META | op_flags, true); 244 if (ret) { 245 put_bh(bh); 246 return ERR_PTR(ret); 247 } 248 return bh; 249 } 250 251 struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block, 252 blk_opf_t op_flags) 253 { 254 gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping, 255 ~__GFP_FS) | __GFP_MOVABLE; 256 257 return __ext4_sb_bread_gfp(sb, block, op_flags, gfp); 258 } 259 260 struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb, 261 sector_t block) 262 { 263 gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping, 264 ~__GFP_FS); 265 266 return __ext4_sb_bread_gfp(sb, block, 0, gfp); 267 } 268 269 struct buffer_head *ext4_sb_bread_nofail(struct super_block *sb, 270 sector_t block) 271 { 272 gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping, 273 ~__GFP_FS) | __GFP_MOVABLE | __GFP_NOFAIL; 274 275 return __ext4_sb_bread_gfp(sb, block, 0, gfp); 276 } 277 278 void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block) 279 { 280 struct buffer_head *bh = bdev_getblk(sb->s_bdev, block, 281 sb->s_blocksize, GFP_NOWAIT); 282 283 if (likely(bh)) { 284 if (trylock_buffer(bh)) 285 ext4_read_bh_nowait(bh, REQ_RAHEAD, NULL, false); 286 brelse(bh); 287 } 288 } 289 290 static int ext4_verify_csum_type(struct super_block *sb, 291 struct ext4_super_block *es) 292 { 293 if (!ext4_has_feature_metadata_csum(sb)) 294 return 1; 295 296 return es->s_checksum_type == EXT4_CRC32C_CHKSUM; 297 } 298 299 __le32 ext4_superblock_csum(struct ext4_super_block *es) 300 { 301 int offset = offsetof(struct ext4_super_block, s_checksum); 302 __u32 csum; 303 304 csum = ext4_chksum(~0, (char *)es, offset); 305 306 return cpu_to_le32(csum); 307 } 308 309 static int ext4_superblock_csum_verify(struct super_block *sb, 310 struct ext4_super_block *es) 311 { 312 if (!ext4_has_feature_metadata_csum(sb)) 313 return 1; 314 315 return es->s_checksum == ext4_superblock_csum(es); 316 } 317 318 void ext4_superblock_csum_set(struct super_block *sb) 319 { 320 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 321 322 if (!ext4_has_feature_metadata_csum(sb)) 323 return; 324 325 es->s_checksum = ext4_superblock_csum(es); 326 } 327 328 ext4_fsblk_t ext4_block_bitmap(struct super_block *sb, 329 struct ext4_group_desc *bg) 330 { 331 return le32_to_cpu(bg->bg_block_bitmap_lo) | 332 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 333 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0); 334 } 335 336 ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb, 337 struct ext4_group_desc *bg) 338 { 339 return le32_to_cpu(bg->bg_inode_bitmap_lo) | 340 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 341 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0); 342 } 343 344 ext4_fsblk_t ext4_inode_table(struct super_block *sb, 345 struct ext4_group_desc *bg) 346 { 347 return le32_to_cpu(bg->bg_inode_table_lo) | 348 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 349 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0); 350 } 351 352 __u32 ext4_free_group_clusters(struct super_block *sb, 353 struct ext4_group_desc *bg) 354 { 355 return le16_to_cpu(bg->bg_free_blocks_count_lo) | 356 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 357 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0); 358 } 359 360 __u32 ext4_free_inodes_count(struct super_block *sb, 361 struct ext4_group_desc *bg) 362 { 363 return le16_to_cpu(READ_ONCE(bg->bg_free_inodes_count_lo)) | 364 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 365 (__u32)le16_to_cpu(READ_ONCE(bg->bg_free_inodes_count_hi)) << 16 : 0); 366 } 367 368 __u32 ext4_used_dirs_count(struct super_block *sb, 369 struct ext4_group_desc *bg) 370 { 371 return le16_to_cpu(bg->bg_used_dirs_count_lo) | 372 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 373 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0); 374 } 375 376 __u32 ext4_itable_unused_count(struct super_block *sb, 377 struct ext4_group_desc *bg) 378 { 379 return le16_to_cpu(bg->bg_itable_unused_lo) | 380 (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 381 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0); 382 } 383 384 void ext4_block_bitmap_set(struct super_block *sb, 385 struct ext4_group_desc *bg, ext4_fsblk_t blk) 386 { 387 bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk); 388 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 389 bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32); 390 } 391 392 void ext4_inode_bitmap_set(struct super_block *sb, 393 struct ext4_group_desc *bg, ext4_fsblk_t blk) 394 { 395 bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk); 396 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 397 bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32); 398 } 399 400 void ext4_inode_table_set(struct super_block *sb, 401 struct ext4_group_desc *bg, ext4_fsblk_t blk) 402 { 403 bg->bg_inode_table_lo = cpu_to_le32((u32)blk); 404 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 405 bg->bg_inode_table_hi = cpu_to_le32(blk >> 32); 406 } 407 408 void ext4_free_group_clusters_set(struct super_block *sb, 409 struct ext4_group_desc *bg, __u32 count) 410 { 411 bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count); 412 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 413 bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16); 414 } 415 416 void ext4_free_inodes_set(struct super_block *sb, 417 struct ext4_group_desc *bg, __u32 count) 418 { 419 WRITE_ONCE(bg->bg_free_inodes_count_lo, cpu_to_le16((__u16)count)); 420 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 421 WRITE_ONCE(bg->bg_free_inodes_count_hi, cpu_to_le16(count >> 16)); 422 } 423 424 void ext4_used_dirs_set(struct super_block *sb, 425 struct ext4_group_desc *bg, __u32 count) 426 { 427 bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count); 428 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 429 bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16); 430 } 431 432 void ext4_itable_unused_set(struct super_block *sb, 433 struct ext4_group_desc *bg, __u32 count) 434 { 435 bg->bg_itable_unused_lo = cpu_to_le16((__u16)count); 436 if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 437 bg->bg_itable_unused_hi = cpu_to_le16(count >> 16); 438 } 439 440 static void __ext4_update_tstamp(__le32 *lo, __u8 *hi, time64_t now) 441 { 442 now = clamp_val(now, 0, (1ull << 40) - 1); 443 444 *lo = cpu_to_le32(lower_32_bits(now)); 445 *hi = upper_32_bits(now); 446 } 447 448 static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi) 449 { 450 return ((time64_t)(*hi) << 32) + le32_to_cpu(*lo); 451 } 452 #define ext4_update_tstamp(es, tstamp) \ 453 __ext4_update_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi, \ 454 ktime_get_real_seconds()) 455 #define ext4_get_tstamp(es, tstamp) \ 456 __ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi) 457 458 /* 459 * The ext4_maybe_update_superblock() function checks and updates the 460 * superblock if needed. 461 * 462 * This function is designed to update the on-disk superblock only under 463 * certain conditions to prevent excessive disk writes and unnecessary 464 * waking of the disk from sleep. The superblock will be updated if: 465 * 1. More than sbi->s_sb_update_sec (def: 1 hour) has passed since the last 466 * superblock update 467 * 2. More than sbi->s_sb_update_kb (def: 16MB) kbs have been written since the 468 * last superblock update. 469 * 470 * @sb: The superblock 471 */ 472 static void ext4_maybe_update_superblock(struct super_block *sb) 473 { 474 struct ext4_sb_info *sbi = EXT4_SB(sb); 475 struct ext4_super_block *es = sbi->s_es; 476 journal_t *journal = sbi->s_journal; 477 time64_t now; 478 __u64 last_update; 479 __u64 lifetime_write_kbytes; 480 __u64 diff_size; 481 482 if (ext4_emergency_state(sb) || sb_rdonly(sb) || 483 !(sb->s_flags & SB_ACTIVE) || !journal || 484 journal->j_flags & JBD2_UNMOUNT) 485 return; 486 487 now = ktime_get_real_seconds(); 488 last_update = ext4_get_tstamp(es, s_wtime); 489 490 if (likely(now - last_update < sbi->s_sb_update_sec)) 491 return; 492 493 lifetime_write_kbytes = sbi->s_kbytes_written + 494 ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) - 495 sbi->s_sectors_written_start) >> 1); 496 497 /* Get the number of kilobytes not written to disk to account 498 * for statistics and compare with a multiple of 16 MB. This 499 * is used to determine when the next superblock commit should 500 * occur (i.e. not more often than once per 16MB if there was 501 * less written in an hour). 502 */ 503 diff_size = lifetime_write_kbytes - le64_to_cpu(es->s_kbytes_written); 504 505 if (diff_size > sbi->s_sb_update_kb) 506 schedule_work(&EXT4_SB(sb)->s_sb_upd_work); 507 } 508 509 static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) 510 { 511 struct super_block *sb = journal->j_private; 512 513 BUG_ON(txn->t_state == T_FINISHED); 514 515 ext4_process_freed_data(sb, txn->t_tid); 516 ext4_maybe_update_superblock(sb); 517 } 518 519 static bool ext4_journalled_writepage_needs_redirty(struct jbd2_inode *jinode, 520 struct folio *folio) 521 { 522 struct buffer_head *bh, *head; 523 struct journal_head *jh; 524 525 bh = head = folio_buffers(folio); 526 do { 527 /* 528 * We have to redirty a page in these cases: 529 * 1) If buffer is dirty, it means the page was dirty because it 530 * contains a buffer that needs checkpointing. So the dirty bit 531 * needs to be preserved so that checkpointing writes the buffer 532 * properly. 533 * 2) If buffer is not part of the committing transaction 534 * (we may have just accidentally come across this buffer because 535 * inode range tracking is not exact) or if the currently running 536 * transaction already contains this buffer as well, dirty bit 537 * needs to be preserved so that the buffer gets writeprotected 538 * properly on running transaction's commit. 539 */ 540 jh = bh2jh(bh); 541 if (buffer_dirty(bh) || 542 (jh && (jh->b_transaction != jinode->i_transaction || 543 jh->b_next_transaction))) 544 return true; 545 } while ((bh = bh->b_this_page) != head); 546 547 return false; 548 } 549 550 static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode *jinode) 551 { 552 struct address_space *mapping = jinode->i_vfs_inode->i_mapping; 553 struct writeback_control wbc = { 554 .sync_mode = WB_SYNC_ALL, 555 .nr_to_write = LONG_MAX, 556 .range_start = jinode->i_dirty_start, 557 .range_end = jinode->i_dirty_end, 558 }; 559 struct folio *folio = NULL; 560 int error; 561 562 /* 563 * writeback_iter() already checks for dirty pages and calls 564 * folio_clear_dirty_for_io(), which we want to write protect the 565 * folios. 566 * 567 * However, we may have to redirty a folio sometimes. 568 */ 569 while ((folio = writeback_iter(mapping, &wbc, folio, &error))) { 570 if (ext4_journalled_writepage_needs_redirty(jinode, folio)) 571 folio_redirty_for_writepage(&wbc, folio); 572 folio_unlock(folio); 573 } 574 575 return error; 576 } 577 578 static int ext4_journal_submit_inode_data_buffers(struct jbd2_inode *jinode) 579 { 580 int ret; 581 582 if (ext4_should_journal_data(jinode->i_vfs_inode)) 583 ret = ext4_journalled_submit_inode_data_buffers(jinode); 584 else 585 ret = ext4_normal_submit_inode_data_buffers(jinode); 586 return ret; 587 } 588 589 static int ext4_journal_finish_inode_data_buffers(struct jbd2_inode *jinode) 590 { 591 int ret = 0; 592 593 if (!ext4_should_journal_data(jinode->i_vfs_inode)) 594 ret = jbd2_journal_finish_inode_data_buffers(jinode); 595 596 return ret; 597 } 598 599 static bool system_going_down(void) 600 { 601 return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF 602 || system_state == SYSTEM_RESTART; 603 } 604 605 struct ext4_err_translation { 606 int code; 607 int errno; 608 }; 609 610 #define EXT4_ERR_TRANSLATE(err) { .code = EXT4_ERR_##err, .errno = err } 611 612 static struct ext4_err_translation err_translation[] = { 613 EXT4_ERR_TRANSLATE(EIO), 614 EXT4_ERR_TRANSLATE(ENOMEM), 615 EXT4_ERR_TRANSLATE(EFSBADCRC), 616 EXT4_ERR_TRANSLATE(EFSCORRUPTED), 617 EXT4_ERR_TRANSLATE(ENOSPC), 618 EXT4_ERR_TRANSLATE(ENOKEY), 619 EXT4_ERR_TRANSLATE(EROFS), 620 EXT4_ERR_TRANSLATE(EFBIG), 621 EXT4_ERR_TRANSLATE(EEXIST), 622 EXT4_ERR_TRANSLATE(ERANGE), 623 EXT4_ERR_TRANSLATE(EOVERFLOW), 624 EXT4_ERR_TRANSLATE(EBUSY), 625 EXT4_ERR_TRANSLATE(ENOTDIR), 626 EXT4_ERR_TRANSLATE(ENOTEMPTY), 627 EXT4_ERR_TRANSLATE(ESHUTDOWN), 628 EXT4_ERR_TRANSLATE(EFAULT), 629 }; 630 631 static int ext4_errno_to_code(int errno) 632 { 633 int i; 634 635 for (i = 0; i < ARRAY_SIZE(err_translation); i++) 636 if (err_translation[i].errno == errno) 637 return err_translation[i].code; 638 return EXT4_ERR_UNKNOWN; 639 } 640 641 static void save_error_info(struct super_block *sb, int error, 642 __u32 ino, __u64 block, 643 const char *func, unsigned int line) 644 { 645 struct ext4_sb_info *sbi = EXT4_SB(sb); 646 647 /* We default to EFSCORRUPTED error... */ 648 if (error == 0) 649 error = EFSCORRUPTED; 650 651 spin_lock(&sbi->s_error_lock); 652 sbi->s_add_error_count++; 653 sbi->s_last_error_code = error; 654 sbi->s_last_error_line = line; 655 sbi->s_last_error_ino = ino; 656 sbi->s_last_error_block = block; 657 sbi->s_last_error_func = func; 658 sbi->s_last_error_time = ktime_get_real_seconds(); 659 if (!sbi->s_first_error_time) { 660 sbi->s_first_error_code = error; 661 sbi->s_first_error_line = line; 662 sbi->s_first_error_ino = ino; 663 sbi->s_first_error_block = block; 664 sbi->s_first_error_func = func; 665 sbi->s_first_error_time = sbi->s_last_error_time; 666 } 667 spin_unlock(&sbi->s_error_lock); 668 } 669 670 /* Deal with the reporting of failure conditions on a filesystem such as 671 * inconsistencies detected or read IO failures. 672 * 673 * On ext2, we can store the error state of the filesystem in the 674 * superblock. That is not possible on ext4, because we may have other 675 * write ordering constraints on the superblock which prevent us from 676 * writing it out straight away; and given that the journal is about to 677 * be aborted, we can't rely on the current, or future, transactions to 678 * write out the superblock safely. 679 * 680 * We'll just use the jbd2_journal_abort() error code to record an error in 681 * the journal instead. On recovery, the journal will complain about 682 * that error until we've noted it down and cleared it. 683 * 684 * If force_ro is set, we unconditionally force the filesystem into an 685 * ABORT|READONLY state, unless the error response on the fs has been set to 686 * panic in which case we take the easy way out and panic immediately. This is 687 * used to deal with unrecoverable failures such as journal IO errors or ENOMEM 688 * at a critical moment in log management. 689 */ 690 static void ext4_handle_error(struct super_block *sb, bool force_ro, int error, 691 __u32 ino, __u64 block, 692 const char *func, unsigned int line) 693 { 694 journal_t *journal = EXT4_SB(sb)->s_journal; 695 bool continue_fs = !force_ro && test_opt(sb, ERRORS_CONT); 696 697 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; 698 if (test_opt(sb, WARN_ON_ERROR)) 699 WARN_ON_ONCE(1); 700 701 if (!continue_fs && !ext4_emergency_ro(sb) && journal) 702 jbd2_journal_abort(journal, -error); 703 704 if (!bdev_read_only(sb->s_bdev)) { 705 save_error_info(sb, error, ino, block, func, line); 706 /* 707 * In case the fs should keep running, we need to writeout 708 * superblock through the journal. Due to lock ordering 709 * constraints, it may not be safe to do it right here so we 710 * defer superblock flushing to a workqueue. We just need to be 711 * careful when the journal is already shutting down. If we get 712 * here in that case, just update the sb directly as the last 713 * transaction won't commit anyway. 714 */ 715 if (continue_fs && journal && 716 !ext4_test_mount_flag(sb, EXT4_MF_JOURNAL_DESTROY)) 717 schedule_work(&EXT4_SB(sb)->s_sb_upd_work); 718 else 719 ext4_commit_super(sb); 720 } 721 722 /* 723 * We force ERRORS_RO behavior when system is rebooting. Otherwise we 724 * could panic during 'reboot -f' as the underlying device got already 725 * disabled. 726 */ 727 if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) { 728 panic("EXT4-fs (device %s): panic forced after error\n", 729 sb->s_id); 730 } 731 732 if (ext4_emergency_ro(sb) || continue_fs) 733 return; 734 735 ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); 736 /* 737 * We don't set SB_RDONLY because that requires sb->s_umount 738 * semaphore and setting it without proper remount procedure is 739 * confusing code such as freeze_super() leading to deadlocks 740 * and other problems. 741 */ 742 set_bit(EXT4_FLAGS_EMERGENCY_RO, &EXT4_SB(sb)->s_ext4_flags); 743 } 744 745 static void update_super_work(struct work_struct *work) 746 { 747 struct ext4_sb_info *sbi = container_of(work, struct ext4_sb_info, 748 s_sb_upd_work); 749 journal_t *journal = sbi->s_journal; 750 handle_t *handle; 751 752 /* 753 * If the journal is still running, we have to write out superblock 754 * through the journal to avoid collisions of other journalled sb 755 * updates. 756 * 757 * We use directly jbd2 functions here to avoid recursing back into 758 * ext4 error handling code during handling of previous errors. 759 */ 760 if (!ext4_emergency_state(sbi->s_sb) && 761 !sb_rdonly(sbi->s_sb) && journal) { 762 struct buffer_head *sbh = sbi->s_sbh; 763 bool call_notify_err = false; 764 765 handle = jbd2_journal_start(journal, 1); 766 if (IS_ERR(handle)) 767 goto write_directly; 768 if (jbd2_journal_get_write_access(handle, sbh)) { 769 jbd2_journal_stop(handle); 770 goto write_directly; 771 } 772 773 if (sbi->s_add_error_count > 0) 774 call_notify_err = true; 775 776 ext4_update_super(sbi->s_sb); 777 if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) { 778 ext4_msg(sbi->s_sb, KERN_ERR, "previous I/O error to " 779 "superblock detected"); 780 clear_buffer_write_io_error(sbh); 781 set_buffer_uptodate(sbh); 782 } 783 784 if (jbd2_journal_dirty_metadata(handle, sbh)) { 785 jbd2_journal_stop(handle); 786 goto write_directly; 787 } 788 jbd2_journal_stop(handle); 789 790 if (call_notify_err) 791 ext4_notify_error_sysfs(sbi); 792 793 return; 794 } 795 write_directly: 796 /* 797 * Write through journal failed. Write sb directly to get error info 798 * out and hope for the best. 799 */ 800 ext4_commit_super(sbi->s_sb); 801 ext4_notify_error_sysfs(sbi); 802 } 803 804 #define ext4_error_ratelimit(sb) \ 805 ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state), \ 806 "EXT4-fs error") 807 808 void __ext4_error(struct super_block *sb, const char *function, 809 unsigned int line, bool force_ro, int error, __u64 block, 810 const char *fmt, ...) 811 { 812 struct va_format vaf; 813 va_list args; 814 815 if (unlikely(ext4_emergency_state(sb))) 816 return; 817 818 trace_ext4_error(sb, function, line); 819 if (ext4_error_ratelimit(sb)) { 820 va_start(args, fmt); 821 vaf.fmt = fmt; 822 vaf.va = &args; 823 printk(KERN_CRIT 824 "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n", 825 sb->s_id, function, line, current->comm, &vaf); 826 va_end(args); 827 } 828 fserror_report_metadata(sb, error ? -abs(error) : -EFSCORRUPTED, 829 GFP_ATOMIC); 830 831 ext4_handle_error(sb, force_ro, error, 0, block, function, line); 832 } 833 834 void __ext4_error_inode(struct inode *inode, const char *function, 835 unsigned int line, ext4_fsblk_t block, int error, 836 const char *fmt, ...) 837 { 838 va_list args; 839 struct va_format vaf; 840 841 if (unlikely(ext4_emergency_state(inode->i_sb))) 842 return; 843 844 trace_ext4_error(inode->i_sb, function, line); 845 if (ext4_error_ratelimit(inode->i_sb)) { 846 va_start(args, fmt); 847 vaf.fmt = fmt; 848 vaf.va = &args; 849 if (block) 850 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: " 851 "inode #%lu: block %llu: comm %s: %pV\n", 852 inode->i_sb->s_id, function, line, inode->i_ino, 853 block, current->comm, &vaf); 854 else 855 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: " 856 "inode #%lu: comm %s: %pV\n", 857 inode->i_sb->s_id, function, line, inode->i_ino, 858 current->comm, &vaf); 859 va_end(args); 860 } 861 fserror_report_file_metadata(inode, 862 error ? -abs(error) : -EFSCORRUPTED, 863 GFP_ATOMIC); 864 865 ext4_handle_error(inode->i_sb, false, error, inode->i_ino, block, 866 function, line); 867 } 868 869 void __ext4_error_file(struct file *file, const char *function, 870 unsigned int line, ext4_fsblk_t block, 871 const char *fmt, ...) 872 { 873 va_list args; 874 struct va_format vaf; 875 struct inode *inode = file_inode(file); 876 char pathname[80], *path; 877 878 if (unlikely(ext4_emergency_state(inode->i_sb))) 879 return; 880 881 trace_ext4_error(inode->i_sb, function, line); 882 if (ext4_error_ratelimit(inode->i_sb)) { 883 path = file_path(file, pathname, sizeof(pathname)); 884 if (IS_ERR(path)) 885 path = "(unknown)"; 886 va_start(args, fmt); 887 vaf.fmt = fmt; 888 vaf.va = &args; 889 if (block) 890 printk(KERN_CRIT 891 "EXT4-fs error (device %s): %s:%d: inode #%lu: " 892 "block %llu: comm %s: path %s: %pV\n", 893 inode->i_sb->s_id, function, line, inode->i_ino, 894 block, current->comm, path, &vaf); 895 else 896 printk(KERN_CRIT 897 "EXT4-fs error (device %s): %s:%d: inode #%lu: " 898 "comm %s: path %s: %pV\n", 899 inode->i_sb->s_id, function, line, inode->i_ino, 900 current->comm, path, &vaf); 901 va_end(args); 902 } 903 fserror_report_file_metadata(inode, -EFSCORRUPTED, GFP_ATOMIC); 904 905 ext4_handle_error(inode->i_sb, false, EFSCORRUPTED, inode->i_ino, block, 906 function, line); 907 } 908 909 const char *ext4_decode_error(struct super_block *sb, int errno, 910 char nbuf[16]) 911 { 912 char *errstr = NULL; 913 914 switch (errno) { 915 case -EFSCORRUPTED: 916 errstr = "Corrupt filesystem"; 917 break; 918 case -EFSBADCRC: 919 errstr = "Filesystem failed CRC"; 920 break; 921 case -EIO: 922 errstr = "IO failure"; 923 break; 924 case -ENOMEM: 925 errstr = "Out of memory"; 926 break; 927 case -EROFS: 928 if (!sb || (EXT4_SB(sb)->s_journal && 929 EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT)) 930 errstr = "Journal has aborted"; 931 else 932 errstr = "Readonly filesystem"; 933 break; 934 default: 935 /* If the caller passed in an extra buffer for unknown 936 * errors, textualise them now. Else we just return 937 * NULL. */ 938 if (nbuf) { 939 /* Check for truncated error codes... */ 940 if (snprintf(nbuf, 16, "error %d", -errno) >= 0) 941 errstr = nbuf; 942 } 943 break; 944 } 945 946 return errstr; 947 } 948 949 /* __ext4_std_error decodes expected errors from journaling functions 950 * automatically and invokes the appropriate error response. */ 951 952 void __ext4_std_error(struct super_block *sb, const char *function, 953 unsigned int line, int errno) 954 { 955 char nbuf[16]; 956 const char *errstr; 957 958 if (unlikely(ext4_emergency_state(sb))) 959 return; 960 961 /* Special case: if the error is EROFS, and we're not already 962 * inside a transaction, then there's really no point in logging 963 * an error. */ 964 if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb)) 965 return; 966 967 if (ext4_error_ratelimit(sb)) { 968 errstr = ext4_decode_error(sb, errno, nbuf); 969 printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n", 970 sb->s_id, function, line, errstr); 971 } 972 fserror_report_metadata(sb, errno ? -abs(errno) : -EFSCORRUPTED, 973 GFP_ATOMIC); 974 975 ext4_handle_error(sb, false, -errno, 0, 0, function, line); 976 } 977 978 void __ext4_msg(struct super_block *sb, 979 const char *prefix, const char *fmt, ...) 980 { 981 struct va_format vaf; 982 va_list args; 983 984 if (sb) { 985 atomic_inc(&EXT4_SB(sb)->s_msg_count); 986 if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), 987 "EXT4-fs")) 988 return; 989 } 990 991 va_start(args, fmt); 992 vaf.fmt = fmt; 993 vaf.va = &args; 994 if (sb) 995 printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf); 996 else 997 printk("%sEXT4-fs: %pV\n", prefix, &vaf); 998 va_end(args); 999 } 1000 1001 static int ext4_warning_ratelimit(struct super_block *sb) 1002 { 1003 atomic_inc(&EXT4_SB(sb)->s_warning_count); 1004 return ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), 1005 "EXT4-fs warning"); 1006 } 1007 1008 void __ext4_warning(struct super_block *sb, const char *function, 1009 unsigned int line, const char *fmt, ...) 1010 { 1011 struct va_format vaf; 1012 va_list args; 1013 1014 if (!ext4_warning_ratelimit(sb)) 1015 return; 1016 1017 va_start(args, fmt); 1018 vaf.fmt = fmt; 1019 vaf.va = &args; 1020 printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n", 1021 sb->s_id, function, line, &vaf); 1022 va_end(args); 1023 } 1024 1025 void __ext4_warning_inode(const struct inode *inode, const char *function, 1026 unsigned int line, const char *fmt, ...) 1027 { 1028 struct va_format vaf; 1029 va_list args; 1030 1031 if (!ext4_warning_ratelimit(inode->i_sb)) 1032 return; 1033 1034 va_start(args, fmt); 1035 vaf.fmt = fmt; 1036 vaf.va = &args; 1037 printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: " 1038 "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id, 1039 function, line, inode->i_ino, current->comm, &vaf); 1040 va_end(args); 1041 } 1042 1043 void __ext4_grp_locked_error(const char *function, unsigned int line, 1044 struct super_block *sb, ext4_group_t grp, 1045 unsigned long ino, ext4_fsblk_t block, 1046 const char *fmt, ...) 1047 __releases(bitlock) 1048 __acquires(bitlock) 1049 { 1050 struct va_format vaf; 1051 va_list args; 1052 1053 if (unlikely(ext4_emergency_state(sb))) 1054 return; 1055 1056 trace_ext4_error(sb, function, line); 1057 if (ext4_error_ratelimit(sb)) { 1058 va_start(args, fmt); 1059 vaf.fmt = fmt; 1060 vaf.va = &args; 1061 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ", 1062 sb->s_id, function, line, grp); 1063 if (ino) 1064 printk(KERN_CONT "inode %lu: ", ino); 1065 if (block) 1066 printk(KERN_CONT "block %llu:", 1067 (unsigned long long) block); 1068 printk(KERN_CONT "%pV\n", &vaf); 1069 va_end(args); 1070 } 1071 1072 if (test_opt(sb, ERRORS_CONT)) { 1073 if (test_opt(sb, WARN_ON_ERROR)) 1074 WARN_ON_ONCE(1); 1075 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; 1076 if (!bdev_read_only(sb->s_bdev)) { 1077 save_error_info(sb, EFSCORRUPTED, ino, block, function, 1078 line); 1079 schedule_work(&EXT4_SB(sb)->s_sb_upd_work); 1080 } 1081 return; 1082 } 1083 ext4_unlock_group(sb, grp); 1084 ext4_handle_error(sb, false, EFSCORRUPTED, ino, block, function, line); 1085 /* 1086 * We only get here in the ERRORS_RO case; relocking the group 1087 * may be dangerous, but nothing bad will happen since the 1088 * filesystem will have already been marked read/only and the 1089 * journal has been aborted. We return 1 as a hint to callers 1090 * who might what to use the return value from 1091 * ext4_grp_locked_error() to distinguish between the 1092 * ERRORS_CONT and ERRORS_RO case, and perhaps return more 1093 * aggressively from the ext4 function in question, with a 1094 * more appropriate error code. 1095 */ 1096 ext4_lock_group(sb, grp); 1097 return; 1098 } 1099 1100 void ext4_mark_group_bitmap_corrupted(struct super_block *sb, 1101 ext4_group_t group, 1102 unsigned int flags) 1103 { 1104 struct ext4_sb_info *sbi = EXT4_SB(sb); 1105 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 1106 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL); 1107 int ret; 1108 1109 if (!grp || !gdp) 1110 return; 1111 if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) { 1112 ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, 1113 &grp->bb_state); 1114 if (!ret) 1115 percpu_counter_sub(&sbi->s_freeclusters_counter, 1116 grp->bb_free); 1117 } 1118 1119 if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) { 1120 ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, 1121 &grp->bb_state); 1122 if (!ret && gdp) { 1123 int count; 1124 1125 count = ext4_free_inodes_count(sb, gdp); 1126 percpu_counter_sub(&sbi->s_freeinodes_counter, 1127 count); 1128 } 1129 } 1130 } 1131 1132 void ext4_update_dynamic_rev(struct super_block *sb) 1133 { 1134 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 1135 1136 if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV) 1137 return; 1138 1139 ext4_warning(sb, 1140 "updating to rev %d because of new feature flag, " 1141 "running e2fsck is recommended", 1142 EXT4_DYNAMIC_REV); 1143 1144 es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO); 1145 es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE); 1146 es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV); 1147 /* leave es->s_feature_*compat flags alone */ 1148 /* es->s_uuid will be set by e2fsck if empty */ 1149 1150 /* 1151 * The rest of the superblock fields should be zero, and if not it 1152 * means they are likely already in use, so leave them alone. We 1153 * can leave it up to e2fsck to clean up any inconsistencies there. 1154 */ 1155 } 1156 1157 static inline struct inode *orphan_list_entry(struct list_head *l) 1158 { 1159 return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode; 1160 } 1161 1162 static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi) 1163 { 1164 struct list_head *l; 1165 1166 ext4_msg(sb, KERN_ERR, "sb orphan head is %d", 1167 le32_to_cpu(sbi->s_es->s_last_orphan)); 1168 1169 printk(KERN_ERR "sb_info orphan list:\n"); 1170 list_for_each(l, &sbi->s_orphan) { 1171 struct inode *inode = orphan_list_entry(l); 1172 printk(KERN_ERR " " 1173 "inode %s:%lu at %p: mode %o, nlink %d, next %d\n", 1174 inode->i_sb->s_id, inode->i_ino, inode, 1175 inode->i_mode, inode->i_nlink, 1176 NEXT_ORPHAN(inode)); 1177 } 1178 } 1179 1180 #ifdef CONFIG_QUOTA 1181 static int ext4_quota_off(struct super_block *sb, int type); 1182 1183 static inline void ext4_quotas_off(struct super_block *sb, int type) 1184 { 1185 BUG_ON(type > EXT4_MAXQUOTAS); 1186 1187 /* Use our quota_off function to clear inode flags etc. */ 1188 for (type--; type >= 0; type--) 1189 ext4_quota_off(sb, type); 1190 } 1191 1192 /* 1193 * This is a helper function which is used in the mount/remount 1194 * codepaths (which holds s_umount) to fetch the quota file name. 1195 */ 1196 static inline char *get_qf_name(struct super_block *sb, 1197 struct ext4_sb_info *sbi, 1198 int type) 1199 { 1200 return rcu_dereference_protected(sbi->s_qf_names[type], 1201 lockdep_is_held(&sb->s_umount)); 1202 } 1203 #else 1204 static inline void ext4_quotas_off(struct super_block *sb, int type) 1205 { 1206 } 1207 #endif 1208 1209 static int ext4_percpu_param_init(struct ext4_sb_info *sbi) 1210 { 1211 ext4_fsblk_t block; 1212 int err; 1213 1214 block = ext4_count_free_clusters(sbi->s_sb); 1215 ext4_free_blocks_count_set(sbi->s_es, EXT4_C2B(sbi, block)); 1216 err = percpu_counter_init(&sbi->s_freeclusters_counter, block, 1217 GFP_KERNEL); 1218 if (!err) { 1219 unsigned long freei = ext4_count_free_inodes(sbi->s_sb); 1220 sbi->s_es->s_free_inodes_count = cpu_to_le32(freei); 1221 err = percpu_counter_init(&sbi->s_freeinodes_counter, freei, 1222 GFP_KERNEL); 1223 } 1224 if (!err) 1225 err = percpu_counter_init(&sbi->s_dirs_counter, 1226 ext4_count_dirs(sbi->s_sb), GFP_KERNEL); 1227 if (!err) 1228 err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0, 1229 GFP_KERNEL); 1230 if (!err) 1231 err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0, 1232 GFP_KERNEL); 1233 if (!err) 1234 err = percpu_init_rwsem(&sbi->s_writepages_rwsem); 1235 1236 if (err) 1237 ext4_msg(sbi->s_sb, KERN_ERR, "insufficient memory"); 1238 1239 return err; 1240 } 1241 1242 static void ext4_percpu_param_destroy(struct ext4_sb_info *sbi) 1243 { 1244 percpu_counter_destroy(&sbi->s_freeclusters_counter); 1245 percpu_counter_destroy(&sbi->s_freeinodes_counter); 1246 percpu_counter_destroy(&sbi->s_dirs_counter); 1247 percpu_counter_destroy(&sbi->s_dirtyclusters_counter); 1248 percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit); 1249 percpu_free_rwsem(&sbi->s_writepages_rwsem); 1250 } 1251 1252 static void ext4_group_desc_free(struct ext4_sb_info *sbi) 1253 { 1254 struct buffer_head **group_desc; 1255 int i; 1256 1257 rcu_read_lock(); 1258 group_desc = rcu_dereference(sbi->s_group_desc); 1259 for (i = 0; i < sbi->s_gdb_count; i++) 1260 brelse(group_desc[i]); 1261 kvfree(group_desc); 1262 rcu_read_unlock(); 1263 } 1264 1265 static void ext4_flex_groups_free(struct ext4_sb_info *sbi) 1266 { 1267 struct flex_groups **flex_groups; 1268 int i; 1269 1270 rcu_read_lock(); 1271 flex_groups = rcu_dereference(sbi->s_flex_groups); 1272 if (flex_groups) { 1273 for (i = 0; i < sbi->s_flex_groups_allocated; i++) 1274 kvfree(flex_groups[i]); 1275 kvfree(flex_groups); 1276 } 1277 rcu_read_unlock(); 1278 } 1279 1280 static void ext4_put_super(struct super_block *sb) 1281 { 1282 struct ext4_sb_info *sbi = EXT4_SB(sb); 1283 struct ext4_super_block *es = sbi->s_es; 1284 int aborted = 0; 1285 int err; 1286 1287 /* 1288 * Unregister sysfs before destroying jbd2 journal. 1289 * Since we could still access attr_journal_task attribute via sysfs 1290 * path which could have sbi->s_journal->j_task as NULL 1291 * Unregister sysfs before flush sbi->s_sb_upd_work. 1292 * Since user may read /proc/fs/ext4/xx/mb_groups during umount, If 1293 * read metadata verify failed then will queue error work. 1294 * update_super_work will call start_this_handle may trigger 1295 * BUG_ON. 1296 */ 1297 ext4_unregister_sysfs(sb); 1298 1299 if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs unmount")) 1300 ext4_msg(sb, KERN_INFO, "unmounting filesystem %pU.", 1301 &sb->s_uuid); 1302 1303 ext4_unregister_li_request(sb); 1304 ext4_quotas_off(sb, EXT4_MAXQUOTAS); 1305 1306 destroy_workqueue(sbi->rsv_conversion_wq); 1307 ext4_release_orphan_info(sb); 1308 1309 if (sbi->s_journal) { 1310 aborted = is_journal_aborted(sbi->s_journal); 1311 err = ext4_journal_destroy(sbi, sbi->s_journal); 1312 if ((err < 0) && !aborted) { 1313 ext4_abort(sb, -err, "Couldn't clean up the journal"); 1314 } 1315 } else 1316 flush_work(&sbi->s_sb_upd_work); 1317 1318 ext4_es_unregister_shrinker(sbi); 1319 timer_shutdown_sync(&sbi->s_err_report); 1320 ext4_release_system_zone(sb); 1321 ext4_mb_release(sb); 1322 ext4_ext_release(sb); 1323 1324 if (!ext4_emergency_state(sb) && !sb_rdonly(sb)) { 1325 if (!aborted) { 1326 ext4_clear_feature_journal_needs_recovery(sb); 1327 ext4_clear_feature_orphan_present(sb); 1328 es->s_state = cpu_to_le16(sbi->s_mount_state); 1329 } 1330 ext4_commit_super(sb); 1331 } 1332 1333 ext4_group_desc_free(sbi); 1334 ext4_flex_groups_free(sbi); 1335 1336 WARN_ON_ONCE(!(sbi->s_mount_state & EXT4_ERROR_FS) && 1337 percpu_counter_sum(&sbi->s_dirtyclusters_counter)); 1338 ext4_percpu_param_destroy(sbi); 1339 #ifdef CONFIG_QUOTA 1340 for (int i = 0; i < EXT4_MAXQUOTAS; i++) 1341 kfree(get_qf_name(sb, sbi, i)); 1342 #endif 1343 1344 /* Debugging code just in case the in-memory inode orphan list 1345 * isn't empty. The on-disk one can be non-empty if we've 1346 * detected an error and taken the fs readonly, but the 1347 * in-memory list had better be clean by this point. */ 1348 if (!list_empty(&sbi->s_orphan)) 1349 dump_orphan_list(sb, sbi); 1350 ASSERT(list_empty(&sbi->s_orphan)); 1351 1352 sync_blockdev(sb->s_bdev); 1353 invalidate_bdev(sb->s_bdev); 1354 if (sbi->s_journal_bdev_file) { 1355 /* 1356 * Invalidate the journal device's buffers. We don't want them 1357 * floating about in memory - the physical journal device may 1358 * hotswapped, and it breaks the `ro-after' testing code. 1359 */ 1360 sync_blockdev(file_bdev(sbi->s_journal_bdev_file)); 1361 invalidate_bdev(file_bdev(sbi->s_journal_bdev_file)); 1362 } 1363 1364 ext4_xattr_destroy_cache(sbi->s_ea_inode_cache); 1365 sbi->s_ea_inode_cache = NULL; 1366 1367 ext4_xattr_destroy_cache(sbi->s_ea_block_cache); 1368 sbi->s_ea_block_cache = NULL; 1369 1370 ext4_stop_mmpd(sbi); 1371 1372 brelse(sbi->s_sbh); 1373 sb->s_fs_info = NULL; 1374 /* 1375 * Now that we are completely done shutting down the 1376 * superblock, we need to actually destroy the kobject. 1377 */ 1378 kobject_put(&sbi->s_kobj); 1379 wait_for_completion(&sbi->s_kobj_unregister); 1380 kfree(sbi->s_blockgroup_lock); 1381 fs_put_dax(sbi->s_daxdev, NULL); 1382 fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy); 1383 #if IS_ENABLED(CONFIG_UNICODE) 1384 utf8_unload(sb->s_encoding); 1385 #endif 1386 kfree(sbi); 1387 } 1388 1389 static struct kmem_cache *ext4_inode_cachep; 1390 1391 /* 1392 * Called inside transaction, so use GFP_NOFS 1393 */ 1394 static struct inode *ext4_alloc_inode(struct super_block *sb) 1395 { 1396 struct ext4_inode_info *ei; 1397 1398 ei = alloc_inode_sb(sb, ext4_inode_cachep, GFP_NOFS); 1399 if (!ei) 1400 return NULL; 1401 1402 inode_set_iversion(&ei->vfs_inode, 1); 1403 ei->i_flags = 0; 1404 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 1405 spin_lock_init(&ei->i_raw_lock); 1406 ei->i_prealloc_node = RB_ROOT; 1407 atomic_set(&ei->i_prealloc_active, 0); 1408 rwlock_init(&ei->i_prealloc_lock); 1409 ext4_es_init_tree(&ei->i_es_tree); 1410 rwlock_init(&ei->i_es_lock); 1411 INIT_LIST_HEAD(&ei->i_es_list); 1412 ei->i_es_all_nr = 0; 1413 ei->i_es_shk_nr = 0; 1414 ei->i_es_shrink_lblk = 0; 1415 ei->i_es_seq = 0; 1416 ei->i_reserved_data_blocks = 0; 1417 spin_lock_init(&(ei->i_block_reservation_lock)); 1418 ext4_init_pending_tree(&ei->i_pending_tree); 1419 #ifdef CONFIG_QUOTA 1420 ei->i_reserved_quota = 0; 1421 memset(&ei->i_dquot, 0, sizeof(ei->i_dquot)); 1422 #endif 1423 ei->jinode = NULL; 1424 INIT_LIST_HEAD(&ei->i_rsv_conversion_list); 1425 spin_lock_init(&ei->i_completed_io_lock); 1426 ei->i_sync_tid = 0; 1427 ei->i_datasync_tid = 0; 1428 INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work); 1429 ext4_fc_init_inode(&ei->vfs_inode); 1430 spin_lock_init(&ei->i_fc_lock); 1431 return &ei->vfs_inode; 1432 } 1433 1434 static int ext4_drop_inode(struct inode *inode) 1435 { 1436 int drop = inode_generic_drop(inode); 1437 1438 if (!drop) 1439 drop = fscrypt_drop_inode(inode); 1440 1441 trace_ext4_drop_inode(inode, drop); 1442 return drop; 1443 } 1444 1445 static void ext4_free_in_core_inode(struct inode *inode) 1446 { 1447 fscrypt_free_inode(inode); 1448 if (!list_empty(&(EXT4_I(inode)->i_fc_list))) { 1449 pr_warn("%s: inode %ld still in fc list", 1450 __func__, inode->i_ino); 1451 } 1452 kmem_cache_free(ext4_inode_cachep, EXT4_I(inode)); 1453 } 1454 1455 static void ext4_destroy_inode(struct inode *inode) 1456 { 1457 if (ext4_inode_orphan_tracked(inode)) { 1458 ext4_msg(inode->i_sb, KERN_ERR, 1459 "Inode %lu (%p): inode tracked as orphan!", 1460 inode->i_ino, EXT4_I(inode)); 1461 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4, 1462 EXT4_I(inode), sizeof(struct ext4_inode_info), 1463 true); 1464 dump_stack(); 1465 } 1466 1467 if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ERROR_FS) && 1468 WARN_ON_ONCE(EXT4_I(inode)->i_reserved_data_blocks)) 1469 ext4_msg(inode->i_sb, KERN_ERR, 1470 "Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!", 1471 inode->i_ino, EXT4_I(inode), 1472 EXT4_I(inode)->i_reserved_data_blocks); 1473 } 1474 1475 static void ext4_shutdown(struct super_block *sb) 1476 { 1477 ext4_force_shutdown(sb, EXT4_GOING_FLAGS_NOLOGFLUSH); 1478 } 1479 1480 static void init_once(void *foo) 1481 { 1482 struct ext4_inode_info *ei = foo; 1483 1484 INIT_LIST_HEAD(&ei->i_orphan); 1485 init_rwsem(&ei->xattr_sem); 1486 init_rwsem(&ei->i_data_sem); 1487 inode_init_once(&ei->vfs_inode); 1488 ext4_fc_init_inode(&ei->vfs_inode); 1489 #ifdef CONFIG_FS_ENCRYPTION 1490 ei->i_crypt_info = NULL; 1491 #endif 1492 #ifdef CONFIG_FS_VERITY 1493 ei->i_verity_info = NULL; 1494 #endif 1495 } 1496 1497 static int __init init_inodecache(void) 1498 { 1499 struct kmem_cache_args args = { 1500 .useroffset = offsetof(struct ext4_inode_info, i_data), 1501 .usersize = sizeof_field(struct ext4_inode_info, i_data), 1502 .use_freeptr_offset = true, 1503 .freeptr_offset = offsetof(struct ext4_inode_info, i_flags), 1504 .ctor = init_once, 1505 }; 1506 1507 ext4_inode_cachep = kmem_cache_create("ext4_inode_cache", 1508 sizeof(struct ext4_inode_info), 1509 &args, 1510 SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT); 1511 1512 if (ext4_inode_cachep == NULL) 1513 return -ENOMEM; 1514 return 0; 1515 } 1516 1517 static void destroy_inodecache(void) 1518 { 1519 /* 1520 * Make sure all delayed rcu free inodes are flushed before we 1521 * destroy cache. 1522 */ 1523 rcu_barrier(); 1524 kmem_cache_destroy(ext4_inode_cachep); 1525 } 1526 1527 void ext4_clear_inode(struct inode *inode) 1528 { 1529 ext4_fc_del(inode); 1530 invalidate_inode_buffers(inode); 1531 clear_inode(inode); 1532 ext4_discard_preallocations(inode); 1533 ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); 1534 dquot_drop(inode); 1535 if (EXT4_I(inode)->jinode) { 1536 jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode), 1537 EXT4_I(inode)->jinode); 1538 jbd2_free_inode(EXT4_I(inode)->jinode); 1539 EXT4_I(inode)->jinode = NULL; 1540 } 1541 fscrypt_put_encryption_info(inode); 1542 fsverity_cleanup_inode(inode); 1543 } 1544 1545 static struct inode *ext4_nfs_get_inode(struct super_block *sb, 1546 u64 ino, u32 generation) 1547 { 1548 struct inode *inode; 1549 1550 /* 1551 * Currently we don't know the generation for parent directory, so 1552 * a generation of 0 means "accept any" 1553 */ 1554 inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE); 1555 if (IS_ERR(inode)) 1556 return ERR_CAST(inode); 1557 if (generation && inode->i_generation != generation) { 1558 iput(inode); 1559 return ERR_PTR(-ESTALE); 1560 } 1561 1562 return inode; 1563 } 1564 1565 static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid, 1566 int fh_len, int fh_type) 1567 { 1568 return generic_fh_to_dentry(sb, fid, fh_len, fh_type, 1569 ext4_nfs_get_inode); 1570 } 1571 1572 static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid, 1573 int fh_len, int fh_type) 1574 { 1575 return generic_fh_to_parent(sb, fid, fh_len, fh_type, 1576 ext4_nfs_get_inode); 1577 } 1578 1579 static int ext4_nfs_commit_metadata(struct inode *inode) 1580 { 1581 struct writeback_control wbc = { 1582 .sync_mode = WB_SYNC_ALL 1583 }; 1584 1585 trace_ext4_nfs_commit_metadata(inode); 1586 return ext4_write_inode(inode, &wbc); 1587 } 1588 1589 #ifdef CONFIG_QUOTA 1590 static const char * const quotatypes[] = INITQFNAMES; 1591 #define QTYPE2NAME(t) (quotatypes[t]) 1592 1593 static int ext4_write_dquot(struct dquot *dquot); 1594 static int ext4_acquire_dquot(struct dquot *dquot); 1595 static int ext4_release_dquot(struct dquot *dquot); 1596 static int ext4_mark_dquot_dirty(struct dquot *dquot); 1597 static int ext4_write_info(struct super_block *sb, int type); 1598 static int ext4_quota_on(struct super_block *sb, int type, int format_id, 1599 const struct path *path); 1600 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, 1601 size_t len, loff_t off); 1602 static ssize_t ext4_quota_write(struct super_block *sb, int type, 1603 const char *data, size_t len, loff_t off); 1604 static int ext4_quota_enable(struct super_block *sb, int type, int format_id, 1605 unsigned int flags); 1606 1607 static struct dquot __rcu **ext4_get_dquots(struct inode *inode) 1608 { 1609 return EXT4_I(inode)->i_dquot; 1610 } 1611 1612 static const struct dquot_operations ext4_quota_operations = { 1613 .get_reserved_space = ext4_get_reserved_space, 1614 .write_dquot = ext4_write_dquot, 1615 .acquire_dquot = ext4_acquire_dquot, 1616 .release_dquot = ext4_release_dquot, 1617 .mark_dirty = ext4_mark_dquot_dirty, 1618 .write_info = ext4_write_info, 1619 .alloc_dquot = dquot_alloc, 1620 .destroy_dquot = dquot_destroy, 1621 .get_projid = ext4_get_projid, 1622 .get_inode_usage = ext4_get_inode_usage, 1623 .get_next_id = dquot_get_next_id, 1624 }; 1625 1626 static const struct quotactl_ops ext4_qctl_operations = { 1627 .quota_on = ext4_quota_on, 1628 .quota_off = ext4_quota_off, 1629 .quota_sync = dquot_quota_sync, 1630 .get_state = dquot_get_state, 1631 .set_info = dquot_set_dqinfo, 1632 .get_dqblk = dquot_get_dqblk, 1633 .set_dqblk = dquot_set_dqblk, 1634 .get_nextdqblk = dquot_get_next_dqblk, 1635 }; 1636 #endif 1637 1638 static const struct super_operations ext4_sops = { 1639 .alloc_inode = ext4_alloc_inode, 1640 .free_inode = ext4_free_in_core_inode, 1641 .destroy_inode = ext4_destroy_inode, 1642 .write_inode = ext4_write_inode, 1643 .dirty_inode = ext4_dirty_inode, 1644 .drop_inode = ext4_drop_inode, 1645 .evict_inode = ext4_evict_inode, 1646 .put_super = ext4_put_super, 1647 .sync_fs = ext4_sync_fs, 1648 .freeze_fs = ext4_freeze, 1649 .unfreeze_fs = ext4_unfreeze, 1650 .statfs = ext4_statfs, 1651 .show_options = ext4_show_options, 1652 .shutdown = ext4_shutdown, 1653 #ifdef CONFIG_QUOTA 1654 .quota_read = ext4_quota_read, 1655 .quota_write = ext4_quota_write, 1656 .get_dquots = ext4_get_dquots, 1657 #endif 1658 }; 1659 1660 static const struct export_operations ext4_export_ops = { 1661 .encode_fh = generic_encode_ino32_fh, 1662 .fh_to_dentry = ext4_fh_to_dentry, 1663 .fh_to_parent = ext4_fh_to_parent, 1664 .get_parent = ext4_get_parent, 1665 .commit_metadata = ext4_nfs_commit_metadata, 1666 }; 1667 1668 enum { 1669 Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, 1670 Opt_resgid, Opt_resuid, Opt_sb, 1671 Opt_nouid32, Opt_debug, Opt_removed, 1672 Opt_user_xattr, Opt_acl, 1673 Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload, 1674 Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev, 1675 Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit, 1676 Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, 1677 Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption, 1678 Opt_inlinecrypt, 1679 Opt_usrjquota, Opt_grpjquota, Opt_quota, 1680 Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, 1681 Opt_usrquota, Opt_grpquota, Opt_prjquota, 1682 Opt_dax, Opt_dax_always, Opt_dax_inode, Opt_dax_never, 1683 Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error, 1684 Opt_nowarn_on_error, Opt_mblk_io_submit, Opt_debug_want_extra_isize, 1685 Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, 1686 Opt_inode_readahead_blks, Opt_journal_ioprio, 1687 Opt_dioread_nolock, Opt_dioread_lock, 1688 Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable, 1689 Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache, 1690 Opt_no_prefetch_block_bitmaps, Opt_mb_optimize_scan, 1691 Opt_errors, Opt_data, Opt_data_err, Opt_jqfmt, Opt_dax_type, 1692 #ifdef CONFIG_EXT4_DEBUG 1693 Opt_fc_debug_max_replay, Opt_fc_debug_force 1694 #endif 1695 }; 1696 1697 static const struct constant_table ext4_param_errors[] = { 1698 {"continue", EXT4_MOUNT_ERRORS_CONT}, 1699 {"panic", EXT4_MOUNT_ERRORS_PANIC}, 1700 {"remount-ro", EXT4_MOUNT_ERRORS_RO}, 1701 {} 1702 }; 1703 1704 static const struct constant_table ext4_param_data[] = { 1705 {"journal", EXT4_MOUNT_JOURNAL_DATA}, 1706 {"ordered", EXT4_MOUNT_ORDERED_DATA}, 1707 {"writeback", EXT4_MOUNT_WRITEBACK_DATA}, 1708 {} 1709 }; 1710 1711 static const struct constant_table ext4_param_data_err[] = { 1712 {"abort", Opt_data_err_abort}, 1713 {"ignore", Opt_data_err_ignore}, 1714 {} 1715 }; 1716 1717 static const struct constant_table ext4_param_jqfmt[] = { 1718 {"vfsold", QFMT_VFS_OLD}, 1719 {"vfsv0", QFMT_VFS_V0}, 1720 {"vfsv1", QFMT_VFS_V1}, 1721 {} 1722 }; 1723 1724 static const struct constant_table ext4_param_dax[] = { 1725 {"always", Opt_dax_always}, 1726 {"inode", Opt_dax_inode}, 1727 {"never", Opt_dax_never}, 1728 {} 1729 }; 1730 1731 /* 1732 * Mount option specification 1733 * We don't use fsparam_flag_no because of the way we set the 1734 * options and the way we show them in _ext4_show_options(). To 1735 * keep the changes to a minimum, let's keep the negative options 1736 * separate for now. 1737 */ 1738 static const struct fs_parameter_spec ext4_param_specs[] = { 1739 fsparam_flag ("bsddf", Opt_bsd_df), 1740 fsparam_flag ("minixdf", Opt_minix_df), 1741 fsparam_flag ("grpid", Opt_grpid), 1742 fsparam_flag ("bsdgroups", Opt_grpid), 1743 fsparam_flag ("nogrpid", Opt_nogrpid), 1744 fsparam_flag ("sysvgroups", Opt_nogrpid), 1745 fsparam_gid ("resgid", Opt_resgid), 1746 fsparam_uid ("resuid", Opt_resuid), 1747 fsparam_u32 ("sb", Opt_sb), 1748 fsparam_enum ("errors", Opt_errors, ext4_param_errors), 1749 fsparam_flag ("nouid32", Opt_nouid32), 1750 fsparam_flag ("debug", Opt_debug), 1751 fsparam_flag ("oldalloc", Opt_removed), 1752 fsparam_flag ("orlov", Opt_removed), 1753 fsparam_flag ("user_xattr", Opt_user_xattr), 1754 fsparam_flag ("acl", Opt_acl), 1755 fsparam_flag ("norecovery", Opt_noload), 1756 fsparam_flag ("noload", Opt_noload), 1757 fsparam_flag ("bh", Opt_removed), 1758 fsparam_flag ("nobh", Opt_removed), 1759 fsparam_u32 ("commit", Opt_commit), 1760 fsparam_u32 ("min_batch_time", Opt_min_batch_time), 1761 fsparam_u32 ("max_batch_time", Opt_max_batch_time), 1762 fsparam_u32 ("journal_dev", Opt_journal_dev), 1763 fsparam_bdev ("journal_path", Opt_journal_path), 1764 fsparam_flag ("journal_checksum", Opt_journal_checksum), 1765 fsparam_flag ("nojournal_checksum", Opt_nojournal_checksum), 1766 fsparam_flag ("journal_async_commit",Opt_journal_async_commit), 1767 fsparam_flag ("abort", Opt_abort), 1768 fsparam_enum ("data", Opt_data, ext4_param_data), 1769 fsparam_enum ("data_err", Opt_data_err, 1770 ext4_param_data_err), 1771 fsparam_string_empty 1772 ("usrjquota", Opt_usrjquota), 1773 fsparam_string_empty 1774 ("grpjquota", Opt_grpjquota), 1775 fsparam_enum ("jqfmt", Opt_jqfmt, ext4_param_jqfmt), 1776 fsparam_flag ("grpquota", Opt_grpquota), 1777 fsparam_flag ("quota", Opt_quota), 1778 fsparam_flag ("noquota", Opt_noquota), 1779 fsparam_flag ("usrquota", Opt_usrquota), 1780 fsparam_flag ("prjquota", Opt_prjquota), 1781 fsparam_flag ("barrier", Opt_barrier), 1782 fsparam_u32 ("barrier", Opt_barrier), 1783 fsparam_flag ("nobarrier", Opt_nobarrier), 1784 fsparam_flag ("i_version", Opt_removed), 1785 fsparam_flag ("dax", Opt_dax), 1786 fsparam_enum ("dax", Opt_dax_type, ext4_param_dax), 1787 fsparam_u32 ("stripe", Opt_stripe), 1788 fsparam_flag ("delalloc", Opt_delalloc), 1789 fsparam_flag ("nodelalloc", Opt_nodelalloc), 1790 fsparam_flag ("warn_on_error", Opt_warn_on_error), 1791 fsparam_flag ("nowarn_on_error", Opt_nowarn_on_error), 1792 fsparam_u32 ("debug_want_extra_isize", 1793 Opt_debug_want_extra_isize), 1794 fsparam_flag ("mblk_io_submit", Opt_removed), 1795 fsparam_flag ("nomblk_io_submit", Opt_removed), 1796 fsparam_flag ("block_validity", Opt_block_validity), 1797 fsparam_flag ("noblock_validity", Opt_noblock_validity), 1798 fsparam_u32 ("inode_readahead_blks", 1799 Opt_inode_readahead_blks), 1800 fsparam_u32 ("journal_ioprio", Opt_journal_ioprio), 1801 fsparam_u32 ("auto_da_alloc", Opt_auto_da_alloc), 1802 fsparam_flag ("auto_da_alloc", Opt_auto_da_alloc), 1803 fsparam_flag ("noauto_da_alloc", Opt_noauto_da_alloc), 1804 fsparam_flag ("dioread_nolock", Opt_dioread_nolock), 1805 fsparam_flag ("nodioread_nolock", Opt_dioread_lock), 1806 fsparam_flag ("dioread_lock", Opt_dioread_lock), 1807 fsparam_flag ("discard", Opt_discard), 1808 fsparam_flag ("nodiscard", Opt_nodiscard), 1809 fsparam_u32 ("init_itable", Opt_init_itable), 1810 fsparam_flag ("init_itable", Opt_init_itable), 1811 fsparam_flag ("noinit_itable", Opt_noinit_itable), 1812 #ifdef CONFIG_EXT4_DEBUG 1813 fsparam_flag ("fc_debug_force", Opt_fc_debug_force), 1814 fsparam_u32 ("fc_debug_max_replay", Opt_fc_debug_max_replay), 1815 #endif 1816 fsparam_u32 ("max_dir_size_kb", Opt_max_dir_size_kb), 1817 fsparam_flag ("test_dummy_encryption", 1818 Opt_test_dummy_encryption), 1819 fsparam_string ("test_dummy_encryption", 1820 Opt_test_dummy_encryption), 1821 fsparam_flag ("inlinecrypt", Opt_inlinecrypt), 1822 fsparam_flag ("nombcache", Opt_nombcache), 1823 fsparam_flag ("no_mbcache", Opt_nombcache), /* for backward compatibility */ 1824 fsparam_flag ("prefetch_block_bitmaps", 1825 Opt_removed), 1826 fsparam_flag ("no_prefetch_block_bitmaps", 1827 Opt_no_prefetch_block_bitmaps), 1828 fsparam_s32 ("mb_optimize_scan", Opt_mb_optimize_scan), 1829 fsparam_string ("check", Opt_removed), /* mount option from ext2/3 */ 1830 fsparam_flag ("nocheck", Opt_removed), /* mount option from ext2/3 */ 1831 fsparam_flag ("reservation", Opt_removed), /* mount option from ext2/3 */ 1832 fsparam_flag ("noreservation", Opt_removed), /* mount option from ext2/3 */ 1833 fsparam_u32 ("journal", Opt_removed), /* mount option from ext2/3 */ 1834 {} 1835 }; 1836 1837 1838 #define MOPT_SET 0x0001 1839 #define MOPT_CLEAR 0x0002 1840 #define MOPT_NOSUPPORT 0x0004 1841 #define MOPT_EXPLICIT 0x0008 1842 #ifdef CONFIG_QUOTA 1843 #define MOPT_Q 0 1844 #define MOPT_QFMT 0x0010 1845 #else 1846 #define MOPT_Q MOPT_NOSUPPORT 1847 #define MOPT_QFMT MOPT_NOSUPPORT 1848 #endif 1849 #define MOPT_NO_EXT2 0x0020 1850 #define MOPT_NO_EXT3 0x0040 1851 #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3) 1852 #define MOPT_SKIP 0x0080 1853 #define MOPT_2 0x0100 1854 1855 static const struct mount_opts { 1856 int token; 1857 int mount_opt; 1858 int flags; 1859 } ext4_mount_opts[] = { 1860 {Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET}, 1861 {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR}, 1862 {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET}, 1863 {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR}, 1864 {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET}, 1865 {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR}, 1866 {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK, 1867 MOPT_EXT4_ONLY | MOPT_SET}, 1868 {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK, 1869 MOPT_EXT4_ONLY | MOPT_CLEAR}, 1870 {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET}, 1871 {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR}, 1872 {Opt_delalloc, EXT4_MOUNT_DELALLOC, 1873 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, 1874 {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, 1875 MOPT_EXT4_ONLY | MOPT_CLEAR}, 1876 {Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET}, 1877 {Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR}, 1878 {Opt_commit, 0, MOPT_NO_EXT2}, 1879 {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, 1880 MOPT_EXT4_ONLY | MOPT_CLEAR}, 1881 {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, 1882 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, 1883 {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT | 1884 EXT4_MOUNT_JOURNAL_CHECKSUM), 1885 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, 1886 {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET}, 1887 {Opt_data_err, EXT4_MOUNT_DATA_ERR_ABORT, MOPT_NO_EXT2}, 1888 {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET}, 1889 {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR}, 1890 {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET}, 1891 {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR}, 1892 {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR}, 1893 {Opt_dax_type, 0, MOPT_EXT4_ONLY}, 1894 {Opt_journal_dev, 0, MOPT_NO_EXT2}, 1895 {Opt_journal_path, 0, MOPT_NO_EXT2}, 1896 {Opt_journal_ioprio, 0, MOPT_NO_EXT2}, 1897 {Opt_data, 0, MOPT_NO_EXT2}, 1898 {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET}, 1899 #ifdef CONFIG_EXT4_FS_POSIX_ACL 1900 {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET}, 1901 #else 1902 {Opt_acl, 0, MOPT_NOSUPPORT}, 1903 #endif 1904 {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET}, 1905 {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET}, 1906 {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q}, 1907 {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, 1908 MOPT_SET | MOPT_Q}, 1909 {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA, 1910 MOPT_SET | MOPT_Q}, 1911 {Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA, 1912 MOPT_SET | MOPT_Q}, 1913 {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA | 1914 EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA), 1915 MOPT_CLEAR | MOPT_Q}, 1916 {Opt_usrjquota, 0, MOPT_Q}, 1917 {Opt_grpjquota, 0, MOPT_Q}, 1918 {Opt_jqfmt, 0, MOPT_QFMT}, 1919 {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET}, 1920 {Opt_no_prefetch_block_bitmaps, EXT4_MOUNT_NO_PREFETCH_BLOCK_BITMAPS, 1921 MOPT_SET}, 1922 #ifdef CONFIG_EXT4_DEBUG 1923 {Opt_fc_debug_force, EXT4_MOUNT2_JOURNAL_FAST_COMMIT, 1924 MOPT_SET | MOPT_2 | MOPT_EXT4_ONLY}, 1925 #endif 1926 {Opt_abort, EXT4_MOUNT2_ABORT, MOPT_SET | MOPT_2}, 1927 {Opt_err, 0, 0} 1928 }; 1929 1930 #if IS_ENABLED(CONFIG_UNICODE) 1931 static const struct ext4_sb_encodings { 1932 __u16 magic; 1933 char *name; 1934 unsigned int version; 1935 } ext4_sb_encoding_map[] = { 1936 {EXT4_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)}, 1937 }; 1938 1939 static const struct ext4_sb_encodings * 1940 ext4_sb_read_encoding(const struct ext4_super_block *es) 1941 { 1942 __u16 magic = le16_to_cpu(es->s_encoding); 1943 int i; 1944 1945 for (i = 0; i < ARRAY_SIZE(ext4_sb_encoding_map); i++) 1946 if (magic == ext4_sb_encoding_map[i].magic) 1947 return &ext4_sb_encoding_map[i]; 1948 1949 return NULL; 1950 } 1951 #endif 1952 1953 #define EXT4_SPEC_JQUOTA (1 << 0) 1954 #define EXT4_SPEC_JQFMT (1 << 1) 1955 #define EXT4_SPEC_DATAJ (1 << 2) 1956 #define EXT4_SPEC_SB_BLOCK (1 << 3) 1957 #define EXT4_SPEC_JOURNAL_DEV (1 << 4) 1958 #define EXT4_SPEC_JOURNAL_IOPRIO (1 << 5) 1959 #define EXT4_SPEC_s_want_extra_isize (1 << 7) 1960 #define EXT4_SPEC_s_max_batch_time (1 << 8) 1961 #define EXT4_SPEC_s_min_batch_time (1 << 9) 1962 #define EXT4_SPEC_s_inode_readahead_blks (1 << 10) 1963 #define EXT4_SPEC_s_li_wait_mult (1 << 11) 1964 #define EXT4_SPEC_s_max_dir_size_kb (1 << 12) 1965 #define EXT4_SPEC_s_stripe (1 << 13) 1966 #define EXT4_SPEC_s_resuid (1 << 14) 1967 #define EXT4_SPEC_s_resgid (1 << 15) 1968 #define EXT4_SPEC_s_commit_interval (1 << 16) 1969 #define EXT4_SPEC_s_fc_debug_max_replay (1 << 17) 1970 #define EXT4_SPEC_s_sb_block (1 << 18) 1971 #define EXT4_SPEC_mb_optimize_scan (1 << 19) 1972 1973 struct ext4_fs_context { 1974 char *s_qf_names[EXT4_MAXQUOTAS]; 1975 struct fscrypt_dummy_policy dummy_enc_policy; 1976 int s_jquota_fmt; /* Format of quota to use */ 1977 #ifdef CONFIG_EXT4_DEBUG 1978 int s_fc_debug_max_replay; 1979 #endif 1980 unsigned short qname_spec; 1981 unsigned long vals_s_flags; /* Bits to set in s_flags */ 1982 unsigned long mask_s_flags; /* Bits changed in s_flags */ 1983 unsigned long journal_devnum; 1984 unsigned long s_commit_interval; 1985 unsigned long s_stripe; 1986 unsigned int s_inode_readahead_blks; 1987 unsigned int s_want_extra_isize; 1988 unsigned int s_li_wait_mult; 1989 unsigned int s_max_dir_size_kb; 1990 unsigned int journal_ioprio; 1991 unsigned int vals_s_mount_opt; 1992 unsigned int mask_s_mount_opt; 1993 unsigned int vals_s_mount_opt2; 1994 unsigned int mask_s_mount_opt2; 1995 unsigned int opt_flags; /* MOPT flags */ 1996 unsigned int spec; 1997 u32 s_max_batch_time; 1998 u32 s_min_batch_time; 1999 kuid_t s_resuid; 2000 kgid_t s_resgid; 2001 ext4_fsblk_t s_sb_block; 2002 }; 2003 2004 static void ext4_fc_free(struct fs_context *fc) 2005 { 2006 struct ext4_fs_context *ctx = fc->fs_private; 2007 int i; 2008 2009 if (!ctx) 2010 return; 2011 2012 for (i = 0; i < EXT4_MAXQUOTAS; i++) 2013 kfree(ctx->s_qf_names[i]); 2014 2015 fscrypt_free_dummy_policy(&ctx->dummy_enc_policy); 2016 kfree(ctx); 2017 } 2018 2019 int ext4_init_fs_context(struct fs_context *fc) 2020 { 2021 struct ext4_fs_context *ctx; 2022 2023 ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL); 2024 if (!ctx) 2025 return -ENOMEM; 2026 2027 fc->fs_private = ctx; 2028 fc->ops = &ext4_context_ops; 2029 2030 /* i_version is always enabled now */ 2031 fc->sb_flags |= SB_I_VERSION; 2032 2033 return 0; 2034 } 2035 2036 #ifdef CONFIG_QUOTA 2037 /* 2038 * Note the name of the specified quota file. 2039 */ 2040 static int note_qf_name(struct fs_context *fc, int qtype, 2041 struct fs_parameter *param) 2042 { 2043 struct ext4_fs_context *ctx = fc->fs_private; 2044 char *qname; 2045 2046 if (param->size < 1) { 2047 ext4_msg(NULL, KERN_ERR, "Missing quota name"); 2048 return -EINVAL; 2049 } 2050 if (strchr(param->string, '/')) { 2051 ext4_msg(NULL, KERN_ERR, 2052 "quotafile must be on filesystem root"); 2053 return -EINVAL; 2054 } 2055 if (ctx->s_qf_names[qtype]) { 2056 if (strcmp(ctx->s_qf_names[qtype], param->string) != 0) { 2057 ext4_msg(NULL, KERN_ERR, 2058 "%s quota file already specified", 2059 QTYPE2NAME(qtype)); 2060 return -EINVAL; 2061 } 2062 return 0; 2063 } 2064 2065 qname = kmemdup_nul(param->string, param->size, GFP_KERNEL); 2066 if (!qname) { 2067 ext4_msg(NULL, KERN_ERR, 2068 "Not enough memory for storing quotafile name"); 2069 return -ENOMEM; 2070 } 2071 ctx->s_qf_names[qtype] = qname; 2072 ctx->qname_spec |= 1 << qtype; 2073 ctx->spec |= EXT4_SPEC_JQUOTA; 2074 return 0; 2075 } 2076 2077 /* 2078 * Clear the name of the specified quota file. 2079 */ 2080 static int unnote_qf_name(struct fs_context *fc, int qtype) 2081 { 2082 struct ext4_fs_context *ctx = fc->fs_private; 2083 2084 kfree(ctx->s_qf_names[qtype]); 2085 2086 ctx->s_qf_names[qtype] = NULL; 2087 ctx->qname_spec |= 1 << qtype; 2088 ctx->spec |= EXT4_SPEC_JQUOTA; 2089 return 0; 2090 } 2091 #endif 2092 2093 static int ext4_parse_test_dummy_encryption(const struct fs_parameter *param, 2094 struct ext4_fs_context *ctx) 2095 { 2096 int err; 2097 2098 if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) { 2099 ext4_msg(NULL, KERN_WARNING, 2100 "test_dummy_encryption option not supported"); 2101 return -EINVAL; 2102 } 2103 err = fscrypt_parse_test_dummy_encryption(param, 2104 &ctx->dummy_enc_policy); 2105 if (err == -EINVAL) { 2106 ext4_msg(NULL, KERN_WARNING, 2107 "Value of option \"%s\" is unrecognized", param->key); 2108 } else if (err == -EEXIST) { 2109 ext4_msg(NULL, KERN_WARNING, 2110 "Conflicting test_dummy_encryption options"); 2111 return -EINVAL; 2112 } 2113 return err; 2114 } 2115 2116 #define EXT4_SET_CTX(name) \ 2117 static inline __maybe_unused \ 2118 void ctx_set_##name(struct ext4_fs_context *ctx, unsigned long flag) \ 2119 { \ 2120 ctx->mask_s_##name |= flag; \ 2121 ctx->vals_s_##name |= flag; \ 2122 } 2123 2124 #define EXT4_CLEAR_CTX(name) \ 2125 static inline __maybe_unused \ 2126 void ctx_clear_##name(struct ext4_fs_context *ctx, unsigned long flag) \ 2127 { \ 2128 ctx->mask_s_##name |= flag; \ 2129 ctx->vals_s_##name &= ~flag; \ 2130 } 2131 2132 #define EXT4_TEST_CTX(name) \ 2133 static inline unsigned long \ 2134 ctx_test_##name(struct ext4_fs_context *ctx, unsigned long flag) \ 2135 { \ 2136 return (ctx->vals_s_##name & flag); \ 2137 } 2138 2139 EXT4_SET_CTX(flags); /* set only */ 2140 EXT4_SET_CTX(mount_opt); 2141 EXT4_CLEAR_CTX(mount_opt); 2142 EXT4_TEST_CTX(mount_opt); 2143 EXT4_SET_CTX(mount_opt2); 2144 EXT4_CLEAR_CTX(mount_opt2); 2145 EXT4_TEST_CTX(mount_opt2); 2146 2147 static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param) 2148 { 2149 struct ext4_fs_context *ctx = fc->fs_private; 2150 struct fs_parse_result result; 2151 const struct mount_opts *m; 2152 int is_remount; 2153 int token; 2154 2155 token = fs_parse(fc, ext4_param_specs, param, &result); 2156 if (token < 0) 2157 return token; 2158 is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE; 2159 2160 for (m = ext4_mount_opts; m->token != Opt_err; m++) 2161 if (token == m->token) 2162 break; 2163 2164 ctx->opt_flags |= m->flags; 2165 2166 if (m->flags & MOPT_EXPLICIT) { 2167 if (m->mount_opt & EXT4_MOUNT_DELALLOC) { 2168 ctx_set_mount_opt2(ctx, EXT4_MOUNT2_EXPLICIT_DELALLOC); 2169 } else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) { 2170 ctx_set_mount_opt2(ctx, 2171 EXT4_MOUNT2_EXPLICIT_JOURNAL_CHECKSUM); 2172 } else 2173 return -EINVAL; 2174 } 2175 2176 if (m->flags & MOPT_NOSUPPORT) { 2177 ext4_msg(NULL, KERN_ERR, "%s option not supported", 2178 param->key); 2179 return 0; 2180 } 2181 2182 switch (token) { 2183 #ifdef CONFIG_QUOTA 2184 case Opt_usrjquota: 2185 if (!*param->string) 2186 return unnote_qf_name(fc, USRQUOTA); 2187 else 2188 return note_qf_name(fc, USRQUOTA, param); 2189 case Opt_grpjquota: 2190 if (!*param->string) 2191 return unnote_qf_name(fc, GRPQUOTA); 2192 else 2193 return note_qf_name(fc, GRPQUOTA, param); 2194 #endif 2195 case Opt_sb: 2196 if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { 2197 ext4_msg(NULL, KERN_WARNING, 2198 "Ignoring %s option on remount", param->key); 2199 } else { 2200 ctx->s_sb_block = result.uint_32; 2201 ctx->spec |= EXT4_SPEC_s_sb_block; 2202 } 2203 return 0; 2204 case Opt_removed: 2205 ext4_msg(NULL, KERN_WARNING, "Ignoring removed %s option", 2206 param->key); 2207 return 0; 2208 case Opt_inlinecrypt: 2209 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT 2210 ctx_set_flags(ctx, SB_INLINECRYPT); 2211 #else 2212 ext4_msg(NULL, KERN_ERR, "inline encryption not supported"); 2213 #endif 2214 return 0; 2215 case Opt_errors: 2216 ctx_clear_mount_opt(ctx, EXT4_MOUNT_ERRORS_MASK); 2217 ctx_set_mount_opt(ctx, result.uint_32); 2218 return 0; 2219 #ifdef CONFIG_QUOTA 2220 case Opt_jqfmt: 2221 ctx->s_jquota_fmt = result.uint_32; 2222 ctx->spec |= EXT4_SPEC_JQFMT; 2223 return 0; 2224 #endif 2225 case Opt_data: 2226 ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS); 2227 ctx_set_mount_opt(ctx, result.uint_32); 2228 ctx->spec |= EXT4_SPEC_DATAJ; 2229 return 0; 2230 case Opt_commit: 2231 if (result.uint_32 == 0) 2232 result.uint_32 = JBD2_DEFAULT_MAX_COMMIT_AGE; 2233 else if (result.uint_32 > INT_MAX / HZ) { 2234 ext4_msg(NULL, KERN_ERR, 2235 "Invalid commit interval %d, " 2236 "must be smaller than %d", 2237 result.uint_32, INT_MAX / HZ); 2238 return -EINVAL; 2239 } 2240 ctx->s_commit_interval = HZ * result.uint_32; 2241 ctx->spec |= EXT4_SPEC_s_commit_interval; 2242 return 0; 2243 case Opt_debug_want_extra_isize: 2244 if ((result.uint_32 & 1) || (result.uint_32 < 4)) { 2245 ext4_msg(NULL, KERN_ERR, 2246 "Invalid want_extra_isize %d", result.uint_32); 2247 return -EINVAL; 2248 } 2249 ctx->s_want_extra_isize = result.uint_32; 2250 ctx->spec |= EXT4_SPEC_s_want_extra_isize; 2251 return 0; 2252 case Opt_max_batch_time: 2253 ctx->s_max_batch_time = result.uint_32; 2254 ctx->spec |= EXT4_SPEC_s_max_batch_time; 2255 return 0; 2256 case Opt_min_batch_time: 2257 ctx->s_min_batch_time = result.uint_32; 2258 ctx->spec |= EXT4_SPEC_s_min_batch_time; 2259 return 0; 2260 case Opt_inode_readahead_blks: 2261 if (result.uint_32 && 2262 (result.uint_32 > (1 << 30) || 2263 !is_power_of_2(result.uint_32))) { 2264 ext4_msg(NULL, KERN_ERR, 2265 "EXT4-fs: inode_readahead_blks must be " 2266 "0 or a power of 2 smaller than 2^31"); 2267 return -EINVAL; 2268 } 2269 ctx->s_inode_readahead_blks = result.uint_32; 2270 ctx->spec |= EXT4_SPEC_s_inode_readahead_blks; 2271 return 0; 2272 case Opt_init_itable: 2273 ctx_set_mount_opt(ctx, EXT4_MOUNT_INIT_INODE_TABLE); 2274 ctx->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; 2275 if (param->type == fs_value_is_string) 2276 ctx->s_li_wait_mult = result.uint_32; 2277 ctx->spec |= EXT4_SPEC_s_li_wait_mult; 2278 return 0; 2279 case Opt_max_dir_size_kb: 2280 ctx->s_max_dir_size_kb = result.uint_32; 2281 ctx->spec |= EXT4_SPEC_s_max_dir_size_kb; 2282 return 0; 2283 #ifdef CONFIG_EXT4_DEBUG 2284 case Opt_fc_debug_max_replay: 2285 ctx->s_fc_debug_max_replay = result.uint_32; 2286 ctx->spec |= EXT4_SPEC_s_fc_debug_max_replay; 2287 return 0; 2288 #endif 2289 case Opt_stripe: 2290 ctx->s_stripe = result.uint_32; 2291 ctx->spec |= EXT4_SPEC_s_stripe; 2292 return 0; 2293 case Opt_resuid: 2294 ctx->s_resuid = result.uid; 2295 ctx->spec |= EXT4_SPEC_s_resuid; 2296 return 0; 2297 case Opt_resgid: 2298 ctx->s_resgid = result.gid; 2299 ctx->spec |= EXT4_SPEC_s_resgid; 2300 return 0; 2301 case Opt_journal_dev: 2302 if (is_remount) { 2303 ext4_msg(NULL, KERN_ERR, 2304 "Cannot specify journal on remount"); 2305 return -EINVAL; 2306 } 2307 ctx->journal_devnum = result.uint_32; 2308 ctx->spec |= EXT4_SPEC_JOURNAL_DEV; 2309 return 0; 2310 case Opt_journal_path: 2311 { 2312 struct inode *journal_inode; 2313 struct path path; 2314 int error; 2315 2316 if (is_remount) { 2317 ext4_msg(NULL, KERN_ERR, 2318 "Cannot specify journal on remount"); 2319 return -EINVAL; 2320 } 2321 2322 error = fs_lookup_param(fc, param, 1, LOOKUP_FOLLOW, &path); 2323 if (error) { 2324 ext4_msg(NULL, KERN_ERR, "error: could not find " 2325 "journal device path"); 2326 return -EINVAL; 2327 } 2328 2329 journal_inode = d_inode(path.dentry); 2330 ctx->journal_devnum = new_encode_dev(journal_inode->i_rdev); 2331 ctx->spec |= EXT4_SPEC_JOURNAL_DEV; 2332 path_put(&path); 2333 return 0; 2334 } 2335 case Opt_journal_ioprio: 2336 if (result.uint_32 > 7) { 2337 ext4_msg(NULL, KERN_ERR, "Invalid journal IO priority" 2338 " (must be 0-7)"); 2339 return -EINVAL; 2340 } 2341 ctx->journal_ioprio = 2342 IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, result.uint_32); 2343 ctx->spec |= EXT4_SPEC_JOURNAL_IOPRIO; 2344 return 0; 2345 case Opt_test_dummy_encryption: 2346 return ext4_parse_test_dummy_encryption(param, ctx); 2347 case Opt_dax: 2348 case Opt_dax_type: 2349 #ifdef CONFIG_FS_DAX 2350 { 2351 int type = (token == Opt_dax) ? 2352 Opt_dax : result.uint_32; 2353 2354 switch (type) { 2355 case Opt_dax: 2356 case Opt_dax_always: 2357 ctx_set_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS); 2358 ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER); 2359 break; 2360 case Opt_dax_never: 2361 ctx_set_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER); 2362 ctx_clear_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS); 2363 break; 2364 case Opt_dax_inode: 2365 ctx_clear_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS); 2366 ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER); 2367 /* Strictly for printing options */ 2368 ctx_set_mount_opt2(ctx, EXT4_MOUNT2_DAX_INODE); 2369 break; 2370 } 2371 return 0; 2372 } 2373 #else 2374 ext4_msg(NULL, KERN_INFO, "dax option not supported"); 2375 return -EINVAL; 2376 #endif 2377 case Opt_data_err: 2378 if (result.uint_32 == Opt_data_err_abort) 2379 ctx_set_mount_opt(ctx, m->mount_opt); 2380 else if (result.uint_32 == Opt_data_err_ignore) 2381 ctx_clear_mount_opt(ctx, m->mount_opt); 2382 return 0; 2383 case Opt_mb_optimize_scan: 2384 if (result.int_32 == 1) { 2385 ctx_set_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN); 2386 ctx->spec |= EXT4_SPEC_mb_optimize_scan; 2387 } else if (result.int_32 == 0) { 2388 ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN); 2389 ctx->spec |= EXT4_SPEC_mb_optimize_scan; 2390 } else { 2391 ext4_msg(NULL, KERN_WARNING, 2392 "mb_optimize_scan should be set to 0 or 1."); 2393 return -EINVAL; 2394 } 2395 return 0; 2396 } 2397 2398 /* 2399 * At this point we should only be getting options requiring MOPT_SET, 2400 * or MOPT_CLEAR. Anything else is a bug 2401 */ 2402 if (m->token == Opt_err) { 2403 ext4_msg(NULL, KERN_WARNING, "buggy handling of option %s", 2404 param->key); 2405 WARN_ON(1); 2406 return -EINVAL; 2407 } 2408 2409 else { 2410 unsigned int set = 0; 2411 2412 if ((param->type == fs_value_is_flag) || 2413 result.uint_32 > 0) 2414 set = 1; 2415 2416 if (m->flags & MOPT_CLEAR) 2417 set = !set; 2418 else if (unlikely(!(m->flags & MOPT_SET))) { 2419 ext4_msg(NULL, KERN_WARNING, 2420 "buggy handling of option %s", 2421 param->key); 2422 WARN_ON(1); 2423 return -EINVAL; 2424 } 2425 if (m->flags & MOPT_2) { 2426 if (set != 0) 2427 ctx_set_mount_opt2(ctx, m->mount_opt); 2428 else 2429 ctx_clear_mount_opt2(ctx, m->mount_opt); 2430 } else { 2431 if (set != 0) 2432 ctx_set_mount_opt(ctx, m->mount_opt); 2433 else 2434 ctx_clear_mount_opt(ctx, m->mount_opt); 2435 } 2436 } 2437 2438 return 0; 2439 } 2440 2441 static int parse_options(struct fs_context *fc, char *options) 2442 { 2443 struct fs_parameter param; 2444 int ret; 2445 char *key; 2446 2447 if (!options) 2448 return 0; 2449 2450 while ((key = strsep(&options, ",")) != NULL) { 2451 if (*key) { 2452 size_t v_len = 0; 2453 char *value = strchr(key, '='); 2454 2455 param.type = fs_value_is_flag; 2456 param.string = NULL; 2457 2458 if (value) { 2459 if (value == key) 2460 continue; 2461 2462 *value++ = 0; 2463 v_len = strlen(value); 2464 param.string = kmemdup_nul(value, v_len, 2465 GFP_KERNEL); 2466 if (!param.string) 2467 return -ENOMEM; 2468 param.type = fs_value_is_string; 2469 } 2470 2471 param.key = key; 2472 param.size = v_len; 2473 2474 ret = ext4_parse_param(fc, ¶m); 2475 kfree(param.string); 2476 if (ret < 0) 2477 return ret; 2478 } 2479 } 2480 2481 ret = ext4_validate_options(fc); 2482 if (ret < 0) 2483 return ret; 2484 2485 return 0; 2486 } 2487 2488 static int parse_apply_sb_mount_options(struct super_block *sb, 2489 struct ext4_fs_context *m_ctx) 2490 { 2491 struct ext4_sb_info *sbi = EXT4_SB(sb); 2492 char s_mount_opts[64]; 2493 struct ext4_fs_context *s_ctx = NULL; 2494 struct fs_context *fc = NULL; 2495 int ret = -ENOMEM; 2496 2497 if (!sbi->s_es->s_mount_opts[0]) 2498 return 0; 2499 2500 if (strscpy_pad(s_mount_opts, sbi->s_es->s_mount_opts) < 0) 2501 return -E2BIG; 2502 2503 fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL); 2504 if (!fc) 2505 return -ENOMEM; 2506 2507 s_ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL); 2508 if (!s_ctx) 2509 goto out_free; 2510 2511 fc->fs_private = s_ctx; 2512 fc->s_fs_info = sbi; 2513 2514 ret = parse_options(fc, s_mount_opts); 2515 if (ret < 0) 2516 goto parse_failed; 2517 2518 ret = ext4_check_opt_consistency(fc, sb); 2519 if (ret < 0) { 2520 parse_failed: 2521 ext4_msg(sb, KERN_WARNING, 2522 "failed to parse options in superblock: %s", 2523 s_mount_opts); 2524 ret = 0; 2525 goto out_free; 2526 } 2527 2528 if (s_ctx->spec & EXT4_SPEC_JOURNAL_DEV) 2529 m_ctx->journal_devnum = s_ctx->journal_devnum; 2530 if (s_ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO) 2531 m_ctx->journal_ioprio = s_ctx->journal_ioprio; 2532 2533 ext4_apply_options(fc, sb); 2534 ret = 0; 2535 2536 out_free: 2537 ext4_fc_free(fc); 2538 kfree(fc); 2539 return ret; 2540 } 2541 2542 static void ext4_apply_quota_options(struct fs_context *fc, 2543 struct super_block *sb) 2544 { 2545 #ifdef CONFIG_QUOTA 2546 bool quota_feature = ext4_has_feature_quota(sb); 2547 struct ext4_fs_context *ctx = fc->fs_private; 2548 struct ext4_sb_info *sbi = EXT4_SB(sb); 2549 char *qname; 2550 int i; 2551 2552 if (quota_feature) 2553 return; 2554 2555 if (ctx->spec & EXT4_SPEC_JQUOTA) { 2556 for (i = 0; i < EXT4_MAXQUOTAS; i++) { 2557 if (!(ctx->qname_spec & (1 << i))) 2558 continue; 2559 2560 qname = ctx->s_qf_names[i]; /* May be NULL */ 2561 if (qname) 2562 set_opt(sb, QUOTA); 2563 ctx->s_qf_names[i] = NULL; 2564 qname = rcu_replace_pointer(sbi->s_qf_names[i], qname, 2565 lockdep_is_held(&sb->s_umount)); 2566 if (qname) 2567 kfree_rcu_mightsleep(qname); 2568 } 2569 } 2570 2571 if (ctx->spec & EXT4_SPEC_JQFMT) 2572 sbi->s_jquota_fmt = ctx->s_jquota_fmt; 2573 #endif 2574 } 2575 2576 /* 2577 * Check quota settings consistency. 2578 */ 2579 static int ext4_check_quota_consistency(struct fs_context *fc, 2580 struct super_block *sb) 2581 { 2582 #ifdef CONFIG_QUOTA 2583 struct ext4_fs_context *ctx = fc->fs_private; 2584 struct ext4_sb_info *sbi = EXT4_SB(sb); 2585 bool quota_feature = ext4_has_feature_quota(sb); 2586 bool quota_loaded = sb_any_quota_loaded(sb); 2587 bool usr_qf_name, grp_qf_name, usrquota, grpquota; 2588 int quota_flags, i; 2589 2590 /* 2591 * We do the test below only for project quotas. 'usrquota' and 2592 * 'grpquota' mount options are allowed even without quota feature 2593 * to support legacy quotas in quota files. 2594 */ 2595 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_PRJQUOTA) && 2596 !ext4_has_feature_project(sb)) { 2597 ext4_msg(NULL, KERN_ERR, "Project quota feature not enabled. " 2598 "Cannot enable project quota enforcement."); 2599 return -EINVAL; 2600 } 2601 2602 quota_flags = EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA | 2603 EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA; 2604 if (quota_loaded && 2605 ctx->mask_s_mount_opt & quota_flags && 2606 !ctx_test_mount_opt(ctx, quota_flags)) 2607 goto err_quota_change; 2608 2609 if (ctx->spec & EXT4_SPEC_JQUOTA) { 2610 2611 for (i = 0; i < EXT4_MAXQUOTAS; i++) { 2612 if (!(ctx->qname_spec & (1 << i))) 2613 continue; 2614 2615 if (quota_loaded && 2616 !!sbi->s_qf_names[i] != !!ctx->s_qf_names[i]) 2617 goto err_jquota_change; 2618 2619 if (sbi->s_qf_names[i] && ctx->s_qf_names[i] && 2620 strcmp(get_qf_name(sb, sbi, i), 2621 ctx->s_qf_names[i]) != 0) 2622 goto err_jquota_specified; 2623 } 2624 2625 if (quota_feature) { 2626 ext4_msg(NULL, KERN_INFO, 2627 "Journaled quota options ignored when " 2628 "QUOTA feature is enabled"); 2629 return 0; 2630 } 2631 } 2632 2633 if (ctx->spec & EXT4_SPEC_JQFMT) { 2634 if (sbi->s_jquota_fmt != ctx->s_jquota_fmt && quota_loaded) 2635 goto err_jquota_change; 2636 if (quota_feature) { 2637 ext4_msg(NULL, KERN_INFO, "Quota format mount options " 2638 "ignored when QUOTA feature is enabled"); 2639 return 0; 2640 } 2641 } 2642 2643 /* Make sure we don't mix old and new quota format */ 2644 usr_qf_name = (get_qf_name(sb, sbi, USRQUOTA) || 2645 ctx->s_qf_names[USRQUOTA]); 2646 grp_qf_name = (get_qf_name(sb, sbi, GRPQUOTA) || 2647 ctx->s_qf_names[GRPQUOTA]); 2648 2649 usrquota = (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) || 2650 test_opt(sb, USRQUOTA)); 2651 2652 grpquota = (ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA) || 2653 test_opt(sb, GRPQUOTA)); 2654 2655 if (usr_qf_name) { 2656 ctx_clear_mount_opt(ctx, EXT4_MOUNT_USRQUOTA); 2657 usrquota = false; 2658 } 2659 if (grp_qf_name) { 2660 ctx_clear_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA); 2661 grpquota = false; 2662 } 2663 2664 if (usr_qf_name || grp_qf_name) { 2665 if (usrquota || grpquota) { 2666 ext4_msg(NULL, KERN_ERR, "old and new quota " 2667 "format mixing"); 2668 return -EINVAL; 2669 } 2670 2671 if (!(ctx->spec & EXT4_SPEC_JQFMT || sbi->s_jquota_fmt)) { 2672 ext4_msg(NULL, KERN_ERR, "journaled quota format " 2673 "not specified"); 2674 return -EINVAL; 2675 } 2676 } 2677 2678 return 0; 2679 2680 err_quota_change: 2681 ext4_msg(NULL, KERN_ERR, 2682 "Cannot change quota options when quota turned on"); 2683 return -EINVAL; 2684 err_jquota_change: 2685 ext4_msg(NULL, KERN_ERR, "Cannot change journaled quota " 2686 "options when quota turned on"); 2687 return -EINVAL; 2688 err_jquota_specified: 2689 ext4_msg(NULL, KERN_ERR, "%s quota file already specified", 2690 QTYPE2NAME(i)); 2691 return -EINVAL; 2692 #else 2693 return 0; 2694 #endif 2695 } 2696 2697 static int ext4_check_test_dummy_encryption(const struct fs_context *fc, 2698 struct super_block *sb) 2699 { 2700 const struct ext4_fs_context *ctx = fc->fs_private; 2701 const struct ext4_sb_info *sbi = EXT4_SB(sb); 2702 2703 if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy)) 2704 return 0; 2705 2706 if (!ext4_has_feature_encrypt(sb)) { 2707 ext4_msg(NULL, KERN_WARNING, 2708 "test_dummy_encryption requires encrypt feature"); 2709 return -EINVAL; 2710 } 2711 /* 2712 * This mount option is just for testing, and it's not worthwhile to 2713 * implement the extra complexity (e.g. RCU protection) that would be 2714 * needed to allow it to be set or changed during remount. We do allow 2715 * it to be specified during remount, but only if there is no change. 2716 */ 2717 if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { 2718 if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy, 2719 &ctx->dummy_enc_policy)) 2720 return 0; 2721 ext4_msg(NULL, KERN_WARNING, 2722 "Can't set or change test_dummy_encryption on remount"); 2723 return -EINVAL; 2724 } 2725 /* Also make sure s_mount_opts didn't contain a conflicting value. */ 2726 if (fscrypt_is_dummy_policy_set(&sbi->s_dummy_enc_policy)) { 2727 if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy, 2728 &ctx->dummy_enc_policy)) 2729 return 0; 2730 ext4_msg(NULL, KERN_WARNING, 2731 "Conflicting test_dummy_encryption options"); 2732 return -EINVAL; 2733 } 2734 return 0; 2735 } 2736 2737 static void ext4_apply_test_dummy_encryption(struct ext4_fs_context *ctx, 2738 struct super_block *sb) 2739 { 2740 if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy) || 2741 /* if already set, it was already verified to be the same */ 2742 fscrypt_is_dummy_policy_set(&EXT4_SB(sb)->s_dummy_enc_policy)) 2743 return; 2744 EXT4_SB(sb)->s_dummy_enc_policy = ctx->dummy_enc_policy; 2745 memset(&ctx->dummy_enc_policy, 0, sizeof(ctx->dummy_enc_policy)); 2746 ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled"); 2747 } 2748 2749 static int ext4_check_opt_consistency(struct fs_context *fc, 2750 struct super_block *sb) 2751 { 2752 struct ext4_fs_context *ctx = fc->fs_private; 2753 struct ext4_sb_info *sbi = fc->s_fs_info; 2754 int is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE; 2755 int err; 2756 2757 if ((ctx->opt_flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) { 2758 ext4_msg(NULL, KERN_ERR, 2759 "Mount option(s) incompatible with ext2"); 2760 return -EINVAL; 2761 } 2762 if ((ctx->opt_flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) { 2763 ext4_msg(NULL, KERN_ERR, 2764 "Mount option(s) incompatible with ext3"); 2765 return -EINVAL; 2766 } 2767 2768 if (ctx->s_want_extra_isize > 2769 (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE)) { 2770 ext4_msg(NULL, KERN_ERR, 2771 "Invalid want_extra_isize %d", 2772 ctx->s_want_extra_isize); 2773 return -EINVAL; 2774 } 2775 2776 err = ext4_check_test_dummy_encryption(fc, sb); 2777 if (err) 2778 return err; 2779 2780 if ((ctx->spec & EXT4_SPEC_DATAJ) && is_remount) { 2781 if (!sbi->s_journal) { 2782 ext4_msg(NULL, KERN_WARNING, 2783 "Remounting file system with no journal " 2784 "so ignoring journalled data option"); 2785 ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS); 2786 } else if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS) != 2787 test_opt(sb, DATA_FLAGS)) { 2788 ext4_msg(NULL, KERN_ERR, "Cannot change data mode " 2789 "on remount"); 2790 return -EINVAL; 2791 } 2792 } 2793 2794 if (is_remount) { 2795 if (!sbi->s_journal && 2796 ctx_test_mount_opt(ctx, EXT4_MOUNT_DATA_ERR_ABORT)) { 2797 ext4_msg(NULL, KERN_WARNING, 2798 "Remounting fs w/o journal so ignoring data_err option"); 2799 ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_ERR_ABORT); 2800 } 2801 2802 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) && 2803 (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) { 2804 ext4_msg(NULL, KERN_ERR, "can't mount with " 2805 "both data=journal and dax"); 2806 return -EINVAL; 2807 } 2808 2809 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) && 2810 (!(sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) || 2811 (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER))) { 2812 fail_dax_change_remount: 2813 ext4_msg(NULL, KERN_ERR, "can't change " 2814 "dax mount option while remounting"); 2815 return -EINVAL; 2816 } else if (ctx_test_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER) && 2817 (!(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) || 2818 (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS))) { 2819 goto fail_dax_change_remount; 2820 } else if (ctx_test_mount_opt2(ctx, EXT4_MOUNT2_DAX_INODE) && 2821 ((sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) || 2822 (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) || 2823 !(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_INODE))) { 2824 goto fail_dax_change_remount; 2825 } 2826 } 2827 2828 return ext4_check_quota_consistency(fc, sb); 2829 } 2830 2831 static void ext4_apply_options(struct fs_context *fc, struct super_block *sb) 2832 { 2833 struct ext4_fs_context *ctx = fc->fs_private; 2834 struct ext4_sb_info *sbi = fc->s_fs_info; 2835 2836 sbi->s_mount_opt &= ~ctx->mask_s_mount_opt; 2837 sbi->s_mount_opt |= ctx->vals_s_mount_opt; 2838 sbi->s_mount_opt2 &= ~ctx->mask_s_mount_opt2; 2839 sbi->s_mount_opt2 |= ctx->vals_s_mount_opt2; 2840 sb->s_flags &= ~ctx->mask_s_flags; 2841 sb->s_flags |= ctx->vals_s_flags; 2842 2843 #define APPLY(X) ({ if (ctx->spec & EXT4_SPEC_##X) sbi->X = ctx->X; }) 2844 APPLY(s_commit_interval); 2845 APPLY(s_stripe); 2846 APPLY(s_max_batch_time); 2847 APPLY(s_min_batch_time); 2848 APPLY(s_want_extra_isize); 2849 APPLY(s_inode_readahead_blks); 2850 APPLY(s_max_dir_size_kb); 2851 APPLY(s_li_wait_mult); 2852 APPLY(s_resgid); 2853 APPLY(s_resuid); 2854 2855 #ifdef CONFIG_EXT4_DEBUG 2856 APPLY(s_fc_debug_max_replay); 2857 #endif 2858 2859 ext4_apply_quota_options(fc, sb); 2860 ext4_apply_test_dummy_encryption(ctx, sb); 2861 } 2862 2863 2864 static int ext4_validate_options(struct fs_context *fc) 2865 { 2866 #ifdef CONFIG_QUOTA 2867 struct ext4_fs_context *ctx = fc->fs_private; 2868 char *usr_qf_name, *grp_qf_name; 2869 2870 usr_qf_name = ctx->s_qf_names[USRQUOTA]; 2871 grp_qf_name = ctx->s_qf_names[GRPQUOTA]; 2872 2873 if (usr_qf_name || grp_qf_name) { 2874 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) && usr_qf_name) 2875 ctx_clear_mount_opt(ctx, EXT4_MOUNT_USRQUOTA); 2876 2877 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA) && grp_qf_name) 2878 ctx_clear_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA); 2879 2880 if (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) || 2881 ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA)) { 2882 ext4_msg(NULL, KERN_ERR, "old and new quota " 2883 "format mixing"); 2884 return -EINVAL; 2885 } 2886 } 2887 #endif 2888 return 1; 2889 } 2890 2891 static inline void ext4_show_quota_options(struct seq_file *seq, 2892 struct super_block *sb) 2893 { 2894 #if defined(CONFIG_QUOTA) 2895 struct ext4_sb_info *sbi = EXT4_SB(sb); 2896 char *usr_qf_name, *grp_qf_name; 2897 2898 if (sbi->s_jquota_fmt) { 2899 char *fmtname = ""; 2900 2901 switch (sbi->s_jquota_fmt) { 2902 case QFMT_VFS_OLD: 2903 fmtname = "vfsold"; 2904 break; 2905 case QFMT_VFS_V0: 2906 fmtname = "vfsv0"; 2907 break; 2908 case QFMT_VFS_V1: 2909 fmtname = "vfsv1"; 2910 break; 2911 } 2912 seq_printf(seq, ",jqfmt=%s", fmtname); 2913 } 2914 2915 rcu_read_lock(); 2916 usr_qf_name = rcu_dereference(sbi->s_qf_names[USRQUOTA]); 2917 grp_qf_name = rcu_dereference(sbi->s_qf_names[GRPQUOTA]); 2918 if (usr_qf_name) 2919 seq_show_option(seq, "usrjquota", usr_qf_name); 2920 if (grp_qf_name) 2921 seq_show_option(seq, "grpjquota", grp_qf_name); 2922 rcu_read_unlock(); 2923 #endif 2924 } 2925 2926 static const char *token2str(int token) 2927 { 2928 const struct fs_parameter_spec *spec; 2929 2930 for (spec = ext4_param_specs; spec->name != NULL; spec++) 2931 if (spec->opt == token && !spec->type) 2932 break; 2933 return spec->name; 2934 } 2935 2936 /* 2937 * Show an option if 2938 * - it's set to a non-default value OR 2939 * - if the per-sb default is different from the global default 2940 */ 2941 static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, 2942 int nodefs) 2943 { 2944 struct ext4_sb_info *sbi = EXT4_SB(sb); 2945 struct ext4_super_block *es = sbi->s_es; 2946 int def_errors; 2947 const struct mount_opts *m; 2948 char sep = nodefs ? '\n' : ','; 2949 2950 #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep) 2951 #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg) 2952 2953 if (sbi->s_sb_block != 1) 2954 SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block); 2955 2956 for (m = ext4_mount_opts; m->token != Opt_err; m++) { 2957 int want_set = m->flags & MOPT_SET; 2958 int opt_2 = m->flags & MOPT_2; 2959 unsigned int mount_opt, def_mount_opt; 2960 2961 if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) || 2962 m->flags & MOPT_SKIP) 2963 continue; 2964 2965 if (opt_2) { 2966 mount_opt = sbi->s_mount_opt2; 2967 def_mount_opt = sbi->s_def_mount_opt2; 2968 } else { 2969 mount_opt = sbi->s_mount_opt; 2970 def_mount_opt = sbi->s_def_mount_opt; 2971 } 2972 /* skip if same as the default */ 2973 if (!nodefs && !(m->mount_opt & (mount_opt ^ def_mount_opt))) 2974 continue; 2975 /* select Opt_noFoo vs Opt_Foo */ 2976 if ((want_set && 2977 (mount_opt & m->mount_opt) != m->mount_opt) || 2978 (!want_set && (mount_opt & m->mount_opt))) 2979 continue; 2980 SEQ_OPTS_PRINT("%s", token2str(m->token)); 2981 } 2982 2983 if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) || 2984 ext4_get_resuid(es) != EXT4_DEF_RESUID) 2985 SEQ_OPTS_PRINT("resuid=%u", 2986 from_kuid_munged(&init_user_ns, sbi->s_resuid)); 2987 if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) || 2988 ext4_get_resgid(es) != EXT4_DEF_RESGID) 2989 SEQ_OPTS_PRINT("resgid=%u", 2990 from_kgid_munged(&init_user_ns, sbi->s_resgid)); 2991 def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors); 2992 if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO) 2993 SEQ_OPTS_PUTS("errors=remount-ro"); 2994 if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE) 2995 SEQ_OPTS_PUTS("errors=continue"); 2996 if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC) 2997 SEQ_OPTS_PUTS("errors=panic"); 2998 if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) 2999 SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ); 3000 if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME) 3001 SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time); 3002 if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) 3003 SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time); 3004 if (nodefs && sb->s_flags & SB_I_VERSION) 3005 SEQ_OPTS_PUTS("i_version"); 3006 if (nodefs || sbi->s_stripe) 3007 SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe); 3008 if (nodefs || EXT4_MOUNT_DATA_FLAGS & 3009 (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) { 3010 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) 3011 SEQ_OPTS_PUTS("data=journal"); 3012 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) 3013 SEQ_OPTS_PUTS("data=ordered"); 3014 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) 3015 SEQ_OPTS_PUTS("data=writeback"); 3016 } 3017 if (nodefs || 3018 sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS) 3019 SEQ_OPTS_PRINT("inode_readahead_blks=%u", 3020 sbi->s_inode_readahead_blks); 3021 3022 if (test_opt(sb, INIT_INODE_TABLE) && (nodefs || 3023 (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT))) 3024 SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult); 3025 if (nodefs || sbi->s_max_dir_size_kb) 3026 SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb); 3027 if (test_opt(sb, DATA_ERR_ABORT)) 3028 SEQ_OPTS_PUTS("data_err=abort"); 3029 3030 fscrypt_show_test_dummy_encryption(seq, sep, sb); 3031 3032 if (sb->s_flags & SB_INLINECRYPT) 3033 SEQ_OPTS_PUTS("inlinecrypt"); 3034 3035 if (test_opt(sb, DAX_ALWAYS)) { 3036 if (IS_EXT2_SB(sb)) 3037 SEQ_OPTS_PUTS("dax"); 3038 else 3039 SEQ_OPTS_PUTS("dax=always"); 3040 } else if (test_opt2(sb, DAX_NEVER)) { 3041 SEQ_OPTS_PUTS("dax=never"); 3042 } else if (test_opt2(sb, DAX_INODE)) { 3043 SEQ_OPTS_PUTS("dax=inode"); 3044 } 3045 3046 if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD && 3047 !test_opt2(sb, MB_OPTIMIZE_SCAN)) { 3048 SEQ_OPTS_PUTS("mb_optimize_scan=0"); 3049 } else if (sbi->s_groups_count < MB_DEFAULT_LINEAR_SCAN_THRESHOLD && 3050 test_opt2(sb, MB_OPTIMIZE_SCAN)) { 3051 SEQ_OPTS_PUTS("mb_optimize_scan=1"); 3052 } 3053 3054 if (nodefs && !test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS)) 3055 SEQ_OPTS_PUTS("prefetch_block_bitmaps"); 3056 3057 if (ext4_emergency_ro(sb)) 3058 SEQ_OPTS_PUTS("emergency_ro"); 3059 3060 if (ext4_forced_shutdown(sb)) 3061 SEQ_OPTS_PUTS("shutdown"); 3062 3063 ext4_show_quota_options(seq, sb); 3064 return 0; 3065 } 3066 3067 static int ext4_show_options(struct seq_file *seq, struct dentry *root) 3068 { 3069 return _ext4_show_options(seq, root->d_sb, 0); 3070 } 3071 3072 int ext4_seq_options_show(struct seq_file *seq, void *offset) 3073 { 3074 struct super_block *sb = seq->private; 3075 int rc; 3076 3077 seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw"); 3078 rc = _ext4_show_options(seq, sb, 1); 3079 seq_putc(seq, '\n'); 3080 return rc; 3081 } 3082 3083 static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, 3084 int read_only) 3085 { 3086 struct ext4_sb_info *sbi = EXT4_SB(sb); 3087 int err = 0; 3088 3089 if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) { 3090 ext4_msg(sb, KERN_ERR, "revision level too high, " 3091 "forcing read-only mode"); 3092 err = -EROFS; 3093 goto done; 3094 } 3095 if (read_only) 3096 goto done; 3097 if (!(sbi->s_mount_state & EXT4_VALID_FS)) 3098 ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, " 3099 "running e2fsck is recommended"); 3100 else if (sbi->s_mount_state & EXT4_ERROR_FS) 3101 ext4_msg(sb, KERN_WARNING, 3102 "warning: mounting fs with errors, " 3103 "running e2fsck is recommended"); 3104 else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 && 3105 le16_to_cpu(es->s_mnt_count) >= 3106 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count)) 3107 ext4_msg(sb, KERN_WARNING, 3108 "warning: maximal mount count reached, " 3109 "running e2fsck is recommended"); 3110 else if (le32_to_cpu(es->s_checkinterval) && 3111 (ext4_get_tstamp(es, s_lastcheck) + 3112 le32_to_cpu(es->s_checkinterval) <= ktime_get_real_seconds())) 3113 ext4_msg(sb, KERN_WARNING, 3114 "warning: checktime reached, " 3115 "running e2fsck is recommended"); 3116 if (!sbi->s_journal) 3117 es->s_state &= cpu_to_le16(~EXT4_VALID_FS); 3118 if (!(__s16) le16_to_cpu(es->s_max_mnt_count)) 3119 es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT); 3120 le16_add_cpu(&es->s_mnt_count, 1); 3121 ext4_update_tstamp(es, s_mtime); 3122 if (sbi->s_journal) { 3123 ext4_set_feature_journal_needs_recovery(sb); 3124 if (ext4_has_feature_orphan_file(sb)) 3125 ext4_set_feature_orphan_present(sb); 3126 } 3127 3128 err = ext4_commit_super(sb); 3129 done: 3130 if (test_opt(sb, DEBUG)) 3131 printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, " 3132 "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n", 3133 sb->s_blocksize, 3134 sbi->s_groups_count, 3135 EXT4_BLOCKS_PER_GROUP(sb), 3136 EXT4_INODES_PER_GROUP(sb), 3137 sbi->s_mount_opt, sbi->s_mount_opt2); 3138 return err; 3139 } 3140 3141 int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup) 3142 { 3143 struct ext4_sb_info *sbi = EXT4_SB(sb); 3144 struct flex_groups **old_groups, **new_groups; 3145 int size, i, j; 3146 3147 if (!sbi->s_log_groups_per_flex) 3148 return 0; 3149 3150 size = ext4_flex_group(sbi, ngroup - 1) + 1; 3151 if (size <= sbi->s_flex_groups_allocated) 3152 return 0; 3153 3154 new_groups = kvzalloc(roundup_pow_of_two(size * 3155 sizeof(*sbi->s_flex_groups)), GFP_KERNEL); 3156 if (!new_groups) { 3157 ext4_msg(sb, KERN_ERR, 3158 "not enough memory for %d flex group pointers", size); 3159 return -ENOMEM; 3160 } 3161 for (i = sbi->s_flex_groups_allocated; i < size; i++) { 3162 new_groups[i] = kvzalloc(roundup_pow_of_two( 3163 sizeof(struct flex_groups)), 3164 GFP_KERNEL); 3165 if (!new_groups[i]) { 3166 for (j = sbi->s_flex_groups_allocated; j < i; j++) 3167 kvfree(new_groups[j]); 3168 kvfree(new_groups); 3169 ext4_msg(sb, KERN_ERR, 3170 "not enough memory for %d flex groups", size); 3171 return -ENOMEM; 3172 } 3173 } 3174 rcu_read_lock(); 3175 old_groups = rcu_dereference(sbi->s_flex_groups); 3176 if (old_groups) 3177 memcpy(new_groups, old_groups, 3178 (sbi->s_flex_groups_allocated * 3179 sizeof(struct flex_groups *))); 3180 rcu_read_unlock(); 3181 rcu_assign_pointer(sbi->s_flex_groups, new_groups); 3182 sbi->s_flex_groups_allocated = size; 3183 if (old_groups) 3184 ext4_kvfree_array_rcu(old_groups); 3185 return 0; 3186 } 3187 3188 static int ext4_fill_flex_info(struct super_block *sb) 3189 { 3190 struct ext4_sb_info *sbi = EXT4_SB(sb); 3191 struct ext4_group_desc *gdp = NULL; 3192 struct flex_groups *fg; 3193 ext4_group_t flex_group; 3194 int i, err; 3195 3196 sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex; 3197 if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) { 3198 sbi->s_log_groups_per_flex = 0; 3199 return 1; 3200 } 3201 3202 err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count); 3203 if (err) 3204 goto failed; 3205 3206 for (i = 0; i < sbi->s_groups_count; i++) { 3207 gdp = ext4_get_group_desc(sb, i, NULL); 3208 3209 flex_group = ext4_flex_group(sbi, i); 3210 fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group); 3211 atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes); 3212 atomic64_add(ext4_free_group_clusters(sb, gdp), 3213 &fg->free_clusters); 3214 atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs); 3215 } 3216 3217 return 1; 3218 failed: 3219 return 0; 3220 } 3221 3222 static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group, 3223 struct ext4_group_desc *gdp) 3224 { 3225 int offset = offsetof(struct ext4_group_desc, bg_checksum); 3226 __u16 crc = 0; 3227 __le32 le_group = cpu_to_le32(block_group); 3228 struct ext4_sb_info *sbi = EXT4_SB(sb); 3229 3230 if (ext4_has_feature_metadata_csum(sbi->s_sb)) { 3231 /* Use new metadata_csum algorithm */ 3232 __u32 csum32; 3233 __u16 dummy_csum = 0; 3234 3235 csum32 = ext4_chksum(sbi->s_csum_seed, (__u8 *)&le_group, 3236 sizeof(le_group)); 3237 csum32 = ext4_chksum(csum32, (__u8 *)gdp, offset); 3238 csum32 = ext4_chksum(csum32, (__u8 *)&dummy_csum, 3239 sizeof(dummy_csum)); 3240 offset += sizeof(dummy_csum); 3241 if (offset < sbi->s_desc_size) 3242 csum32 = ext4_chksum(csum32, (__u8 *)gdp + offset, 3243 sbi->s_desc_size - offset); 3244 3245 crc = csum32 & 0xFFFF; 3246 goto out; 3247 } 3248 3249 /* old crc16 code */ 3250 if (!ext4_has_feature_gdt_csum(sb)) 3251 return 0; 3252 3253 crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid)); 3254 crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group)); 3255 crc = crc16(crc, (__u8 *)gdp, offset); 3256 offset += sizeof(gdp->bg_checksum); /* skip checksum */ 3257 /* for checksum of struct ext4_group_desc do the rest...*/ 3258 if (ext4_has_feature_64bit(sb) && offset < sbi->s_desc_size) 3259 crc = crc16(crc, (__u8 *)gdp + offset, 3260 sbi->s_desc_size - offset); 3261 3262 out: 3263 return cpu_to_le16(crc); 3264 } 3265 3266 int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group, 3267 struct ext4_group_desc *gdp) 3268 { 3269 if (ext4_has_group_desc_csum(sb) && 3270 (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp))) 3271 return 0; 3272 3273 return 1; 3274 } 3275 3276 void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group, 3277 struct ext4_group_desc *gdp) 3278 { 3279 if (!ext4_has_group_desc_csum(sb)) 3280 return; 3281 gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp); 3282 } 3283 3284 /* Called at mount-time, super-block is locked */ 3285 static int ext4_check_descriptors(struct super_block *sb, 3286 ext4_fsblk_t sb_block, 3287 ext4_group_t *first_not_zeroed) 3288 { 3289 struct ext4_sb_info *sbi = EXT4_SB(sb); 3290 ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); 3291 ext4_fsblk_t last_block; 3292 ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0); 3293 ext4_fsblk_t block_bitmap; 3294 ext4_fsblk_t inode_bitmap; 3295 ext4_fsblk_t inode_table; 3296 int flexbg_flag = 0; 3297 ext4_group_t i, grp = sbi->s_groups_count; 3298 3299 if (ext4_has_feature_flex_bg(sb)) 3300 flexbg_flag = 1; 3301 3302 ext4_debug("Checking group descriptors"); 3303 3304 for (i = 0; i < sbi->s_groups_count; i++) { 3305 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL); 3306 3307 if (i == sbi->s_groups_count - 1 || flexbg_flag) 3308 last_block = ext4_blocks_count(sbi->s_es) - 1; 3309 else 3310 last_block = first_block + 3311 (EXT4_BLOCKS_PER_GROUP(sb) - 1); 3312 3313 if ((grp == sbi->s_groups_count) && 3314 !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) 3315 grp = i; 3316 3317 block_bitmap = ext4_block_bitmap(sb, gdp); 3318 if (block_bitmap == sb_block) { 3319 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3320 "Block bitmap for group %u overlaps " 3321 "superblock", i); 3322 if (!sb_rdonly(sb)) 3323 return 0; 3324 } 3325 if (block_bitmap >= sb_block + 1 && 3326 block_bitmap <= last_bg_block) { 3327 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3328 "Block bitmap for group %u overlaps " 3329 "block group descriptors", i); 3330 if (!sb_rdonly(sb)) 3331 return 0; 3332 } 3333 if (block_bitmap < first_block || block_bitmap > last_block) { 3334 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3335 "Block bitmap for group %u not in group " 3336 "(block %llu)!", i, block_bitmap); 3337 return 0; 3338 } 3339 inode_bitmap = ext4_inode_bitmap(sb, gdp); 3340 if (inode_bitmap == sb_block) { 3341 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3342 "Inode bitmap for group %u overlaps " 3343 "superblock", i); 3344 if (!sb_rdonly(sb)) 3345 return 0; 3346 } 3347 if (inode_bitmap >= sb_block + 1 && 3348 inode_bitmap <= last_bg_block) { 3349 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3350 "Inode bitmap for group %u overlaps " 3351 "block group descriptors", i); 3352 if (!sb_rdonly(sb)) 3353 return 0; 3354 } 3355 if (inode_bitmap < first_block || inode_bitmap > last_block) { 3356 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3357 "Inode bitmap for group %u not in group " 3358 "(block %llu)!", i, inode_bitmap); 3359 return 0; 3360 } 3361 inode_table = ext4_inode_table(sb, gdp); 3362 if (inode_table == sb_block) { 3363 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3364 "Inode table for group %u overlaps " 3365 "superblock", i); 3366 if (!sb_rdonly(sb)) 3367 return 0; 3368 } 3369 if (inode_table >= sb_block + 1 && 3370 inode_table <= last_bg_block) { 3371 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3372 "Inode table for group %u overlaps " 3373 "block group descriptors", i); 3374 if (!sb_rdonly(sb)) 3375 return 0; 3376 } 3377 if (inode_table < first_block || 3378 inode_table + sbi->s_itb_per_group - 1 > last_block) { 3379 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3380 "Inode table for group %u not in group " 3381 "(block %llu)!", i, inode_table); 3382 return 0; 3383 } 3384 ext4_lock_group(sb, i); 3385 if (!ext4_group_desc_csum_verify(sb, i, gdp)) { 3386 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3387 "Checksum for group %u failed (%u!=%u)", 3388 i, le16_to_cpu(ext4_group_desc_csum(sb, i, 3389 gdp)), le16_to_cpu(gdp->bg_checksum)); 3390 if (!sb_rdonly(sb)) { 3391 ext4_unlock_group(sb, i); 3392 return 0; 3393 } 3394 } 3395 ext4_unlock_group(sb, i); 3396 if (!flexbg_flag) 3397 first_block += EXT4_BLOCKS_PER_GROUP(sb); 3398 } 3399 if (NULL != first_not_zeroed) 3400 *first_not_zeroed = grp; 3401 return 1; 3402 } 3403 3404 /* 3405 * Maximal extent format file size. 3406 * Resulting logical blkno at s_maxbytes must fit in our on-disk 3407 * extent format containers, within a sector_t, and within i_blocks 3408 * in the vfs. ext4 inode has 48 bits of i_block in fsblock units, 3409 * so that won't be a limiting factor. 3410 * 3411 * However there is other limiting factor. We do store extents in the form 3412 * of starting block and length, hence the resulting length of the extent 3413 * covering maximum file size must fit into on-disk format containers as 3414 * well. Given that length is always by 1 unit bigger than max unit (because 3415 * we count 0 as well) we have to lower the s_maxbytes by one fs block. 3416 * 3417 * Note, this does *not* consider any metadata overhead for vfs i_blocks. 3418 */ 3419 static loff_t ext4_max_size(int blkbits, int has_huge_files) 3420 { 3421 loff_t res; 3422 loff_t upper_limit = MAX_LFS_FILESIZE; 3423 3424 BUILD_BUG_ON(sizeof(blkcnt_t) < sizeof(u64)); 3425 3426 if (!has_huge_files) { 3427 upper_limit = (1LL << 32) - 1; 3428 3429 /* total blocks in file system block size */ 3430 upper_limit >>= (blkbits - 9); 3431 upper_limit <<= blkbits; 3432 } 3433 3434 /* 3435 * 32-bit extent-start container, ee_block. We lower the maxbytes 3436 * by one fs block, so ee_len can cover the extent of maximum file 3437 * size 3438 */ 3439 res = (1LL << 32) - 1; 3440 res <<= blkbits; 3441 3442 /* Sanity check against vm- & vfs- imposed limits */ 3443 if (res > upper_limit) 3444 res = upper_limit; 3445 3446 return res; 3447 } 3448 3449 /* 3450 * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect 3451 * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks. 3452 * We need to be 1 filesystem block less than the 2^48 sector limit. 3453 */ 3454 static loff_t ext4_max_bitmap_size(int bits, int has_huge_files) 3455 { 3456 loff_t upper_limit, res = EXT4_NDIR_BLOCKS; 3457 int meta_blocks; 3458 unsigned int ppb = 1 << (bits - 2); 3459 3460 /* 3461 * This is calculated to be the largest file size for a dense, block 3462 * mapped file such that the file's total number of 512-byte sectors, 3463 * including data and all indirect blocks, does not exceed (2^48 - 1). 3464 * 3465 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total 3466 * number of 512-byte sectors of the file. 3467 */ 3468 if (!has_huge_files) { 3469 /* 3470 * !has_huge_files or implies that the inode i_block field 3471 * represents total file blocks in 2^32 512-byte sectors == 3472 * size of vfs inode i_blocks * 8 3473 */ 3474 upper_limit = (1LL << 32) - 1; 3475 3476 /* total blocks in file system block size */ 3477 upper_limit >>= (bits - 9); 3478 3479 } else { 3480 /* 3481 * We use 48 bit ext4_inode i_blocks 3482 * With EXT4_HUGE_FILE_FL set the i_blocks 3483 * represent total number of blocks in 3484 * file system block size 3485 */ 3486 upper_limit = (1LL << 48) - 1; 3487 3488 } 3489 3490 /* Compute how many blocks we can address by block tree */ 3491 res += ppb; 3492 res += ppb * ppb; 3493 res += ((loff_t)ppb) * ppb * ppb; 3494 /* Compute how many metadata blocks are needed */ 3495 meta_blocks = 1; 3496 meta_blocks += 1 + ppb; 3497 meta_blocks += 1 + ppb + ppb * ppb; 3498 /* Does block tree limit file size? */ 3499 if (res + meta_blocks <= upper_limit) 3500 goto check_lfs; 3501 3502 res = upper_limit; 3503 /* How many metadata blocks are needed for addressing upper_limit? */ 3504 upper_limit -= EXT4_NDIR_BLOCKS; 3505 /* indirect blocks */ 3506 meta_blocks = 1; 3507 upper_limit -= ppb; 3508 /* double indirect blocks */ 3509 if (upper_limit < ppb * ppb) { 3510 meta_blocks += 1 + DIV_ROUND_UP_ULL(upper_limit, ppb); 3511 res -= meta_blocks; 3512 goto check_lfs; 3513 } 3514 meta_blocks += 1 + ppb; 3515 upper_limit -= ppb * ppb; 3516 /* tripple indirect blocks for the rest */ 3517 meta_blocks += 1 + DIV_ROUND_UP_ULL(upper_limit, ppb) + 3518 DIV_ROUND_UP_ULL(upper_limit, ppb*ppb); 3519 res -= meta_blocks; 3520 check_lfs: 3521 res <<= bits; 3522 if (res > MAX_LFS_FILESIZE) 3523 res = MAX_LFS_FILESIZE; 3524 3525 return res; 3526 } 3527 3528 static ext4_fsblk_t descriptor_loc(struct super_block *sb, 3529 ext4_fsblk_t logical_sb_block, int nr) 3530 { 3531 struct ext4_sb_info *sbi = EXT4_SB(sb); 3532 ext4_group_t bg, first_meta_bg; 3533 int has_super = 0; 3534 3535 first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg); 3536 3537 if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg) 3538 return logical_sb_block + nr + 1; 3539 bg = sbi->s_desc_per_block * nr; 3540 if (ext4_bg_has_super(sb, bg)) 3541 has_super = 1; 3542 3543 /* 3544 * If we have a meta_bg fs with 1k blocks, group 0's GDT is at 3545 * block 2, not 1. If s_first_data_block == 0 (bigalloc is enabled 3546 * on modern mke2fs or blksize > 1k on older mke2fs) then we must 3547 * compensate. 3548 */ 3549 if (sb->s_blocksize == 1024 && nr == 0 && 3550 le32_to_cpu(sbi->s_es->s_first_data_block) == 0) 3551 has_super++; 3552 3553 return (has_super + ext4_group_first_block_no(sb, bg)); 3554 } 3555 3556 /** 3557 * ext4_get_stripe_size: Get the stripe size. 3558 * @sbi: In memory super block info 3559 * 3560 * If we have specified it via mount option, then 3561 * use the mount option value. If the value specified at mount time is 3562 * greater than the blocks per group use the super block value. 3563 * If the super block value is greater than blocks per group return 0. 3564 * Allocator needs it be less than blocks per group. 3565 * 3566 */ 3567 static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi) 3568 { 3569 unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride); 3570 unsigned long stripe_width = 3571 le32_to_cpu(sbi->s_es->s_raid_stripe_width); 3572 int ret; 3573 3574 if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group) 3575 ret = sbi->s_stripe; 3576 else if (stripe_width && stripe_width <= sbi->s_blocks_per_group) 3577 ret = stripe_width; 3578 else if (stride && stride <= sbi->s_blocks_per_group) 3579 ret = stride; 3580 else 3581 ret = 0; 3582 3583 /* 3584 * If the stripe width is 1, this makes no sense and 3585 * we set it to 0 to turn off stripe handling code. 3586 */ 3587 if (ret <= 1) 3588 ret = 0; 3589 3590 return ret; 3591 } 3592 3593 /* 3594 * Check whether this filesystem can be mounted based on 3595 * the features present and the RDONLY/RDWR mount requested. 3596 * Returns 1 if this filesystem can be mounted as requested, 3597 * 0 if it cannot be. 3598 */ 3599 int ext4_feature_set_ok(struct super_block *sb, int readonly) 3600 { 3601 if (ext4_has_unknown_ext4_incompat_features(sb)) { 3602 ext4_msg(sb, KERN_ERR, 3603 "Couldn't mount because of " 3604 "unsupported optional features (%x)", 3605 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) & 3606 ~EXT4_FEATURE_INCOMPAT_SUPP)); 3607 return 0; 3608 } 3609 3610 if (!IS_ENABLED(CONFIG_UNICODE) && ext4_has_feature_casefold(sb)) { 3611 ext4_msg(sb, KERN_ERR, 3612 "Filesystem with casefold feature cannot be " 3613 "mounted without CONFIG_UNICODE"); 3614 return 0; 3615 } 3616 3617 if (readonly) 3618 return 1; 3619 3620 if (ext4_has_feature_readonly(sb)) { 3621 ext4_msg(sb, KERN_INFO, "filesystem is read-only"); 3622 sb->s_flags |= SB_RDONLY; 3623 return 1; 3624 } 3625 3626 /* Check that feature set is OK for a read-write mount */ 3627 if (ext4_has_unknown_ext4_ro_compat_features(sb)) { 3628 ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of " 3629 "unsupported optional features (%x)", 3630 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) & 3631 ~EXT4_FEATURE_RO_COMPAT_SUPP)); 3632 return 0; 3633 } 3634 if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) { 3635 ext4_msg(sb, KERN_ERR, 3636 "Can't support bigalloc feature without " 3637 "extents feature\n"); 3638 return 0; 3639 } 3640 3641 #if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2) 3642 if (!readonly && (ext4_has_feature_quota(sb) || 3643 ext4_has_feature_project(sb))) { 3644 ext4_msg(sb, KERN_ERR, 3645 "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2"); 3646 return 0; 3647 } 3648 #endif /* CONFIG_QUOTA */ 3649 return 1; 3650 } 3651 3652 /* 3653 * This function is called once a day if we have errors logged 3654 * on the file system 3655 */ 3656 static void print_daily_error_info(struct timer_list *t) 3657 { 3658 struct ext4_sb_info *sbi = timer_container_of(sbi, t, s_err_report); 3659 struct super_block *sb = sbi->s_sb; 3660 struct ext4_super_block *es = sbi->s_es; 3661 3662 if (es->s_error_count) 3663 /* fsck newer than v1.41.13 is needed to clean this condition. */ 3664 ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u", 3665 le32_to_cpu(es->s_error_count)); 3666 if (es->s_first_error_time) { 3667 printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %llu: %.*s:%d", 3668 sb->s_id, 3669 ext4_get_tstamp(es, s_first_error_time), 3670 (int) sizeof(es->s_first_error_func), 3671 es->s_first_error_func, 3672 le32_to_cpu(es->s_first_error_line)); 3673 if (es->s_first_error_ino) 3674 printk(KERN_CONT ": inode %u", 3675 le32_to_cpu(es->s_first_error_ino)); 3676 if (es->s_first_error_block) 3677 printk(KERN_CONT ": block %llu", (unsigned long long) 3678 le64_to_cpu(es->s_first_error_block)); 3679 printk(KERN_CONT "\n"); 3680 } 3681 if (es->s_last_error_time) { 3682 printk(KERN_NOTICE "EXT4-fs (%s): last error at time %llu: %.*s:%d", 3683 sb->s_id, 3684 ext4_get_tstamp(es, s_last_error_time), 3685 (int) sizeof(es->s_last_error_func), 3686 es->s_last_error_func, 3687 le32_to_cpu(es->s_last_error_line)); 3688 if (es->s_last_error_ino) 3689 printk(KERN_CONT ": inode %u", 3690 le32_to_cpu(es->s_last_error_ino)); 3691 if (es->s_last_error_block) 3692 printk(KERN_CONT ": block %llu", (unsigned long long) 3693 le64_to_cpu(es->s_last_error_block)); 3694 printk(KERN_CONT "\n"); 3695 } 3696 mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */ 3697 } 3698 3699 /* Find next suitable group and run ext4_init_inode_table */ 3700 static int ext4_run_li_request(struct ext4_li_request *elr) 3701 { 3702 struct ext4_group_desc *gdp = NULL; 3703 struct super_block *sb = elr->lr_super; 3704 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; 3705 ext4_group_t group = elr->lr_next_group; 3706 unsigned int prefetch_ios = 0; 3707 int ret = 0; 3708 int nr = EXT4_SB(sb)->s_mb_prefetch; 3709 u64 start_time; 3710 3711 if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) { 3712 elr->lr_next_group = ext4_mb_prefetch(sb, group, nr, &prefetch_ios); 3713 ext4_mb_prefetch_fini(sb, elr->lr_next_group, nr); 3714 trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group, nr); 3715 if (group >= elr->lr_next_group) { 3716 ret = 1; 3717 if (elr->lr_first_not_zeroed != ngroups && 3718 !ext4_emergency_state(sb) && !sb_rdonly(sb) && 3719 test_opt(sb, INIT_INODE_TABLE)) { 3720 elr->lr_next_group = elr->lr_first_not_zeroed; 3721 elr->lr_mode = EXT4_LI_MODE_ITABLE; 3722 ret = 0; 3723 } 3724 } 3725 return ret; 3726 } 3727 3728 for (; group < ngroups; group++) { 3729 gdp = ext4_get_group_desc(sb, group, NULL); 3730 if (!gdp) { 3731 ret = 1; 3732 break; 3733 } 3734 3735 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) 3736 break; 3737 } 3738 3739 if (group >= ngroups) 3740 ret = 1; 3741 3742 if (!ret) { 3743 start_time = ktime_get_ns(); 3744 ret = ext4_init_inode_table(sb, group, 3745 elr->lr_timeout ? 0 : 1); 3746 trace_ext4_lazy_itable_init(sb, group); 3747 if (elr->lr_timeout == 0) { 3748 elr->lr_timeout = nsecs_to_jiffies((ktime_get_ns() - start_time) * 3749 EXT4_SB(elr->lr_super)->s_li_wait_mult); 3750 } 3751 elr->lr_next_sched = jiffies + elr->lr_timeout; 3752 elr->lr_next_group = group + 1; 3753 } 3754 return ret; 3755 } 3756 3757 /* 3758 * Remove lr_request from the list_request and free the 3759 * request structure. Should be called with li_list_mtx held 3760 */ 3761 static void ext4_remove_li_request(struct ext4_li_request *elr) 3762 { 3763 if (!elr) 3764 return; 3765 3766 list_del(&elr->lr_request); 3767 EXT4_SB(elr->lr_super)->s_li_request = NULL; 3768 kfree(elr); 3769 } 3770 3771 static void ext4_unregister_li_request(struct super_block *sb) 3772 { 3773 mutex_lock(&ext4_li_mtx); 3774 if (!ext4_li_info) { 3775 mutex_unlock(&ext4_li_mtx); 3776 return; 3777 } 3778 3779 mutex_lock(&ext4_li_info->li_list_mtx); 3780 ext4_remove_li_request(EXT4_SB(sb)->s_li_request); 3781 mutex_unlock(&ext4_li_info->li_list_mtx); 3782 mutex_unlock(&ext4_li_mtx); 3783 } 3784 3785 static struct task_struct *ext4_lazyinit_task; 3786 3787 /* 3788 * This is the function where ext4lazyinit thread lives. It walks 3789 * through the request list searching for next scheduled filesystem. 3790 * When such a fs is found, run the lazy initialization request 3791 * (ext4_rn_li_request) and keep track of the time spend in this 3792 * function. Based on that time we compute next schedule time of 3793 * the request. When walking through the list is complete, compute 3794 * next waking time and put itself into sleep. 3795 */ 3796 static int ext4_lazyinit_thread(void *arg) 3797 { 3798 struct ext4_lazy_init *eli = arg; 3799 struct list_head *pos, *n; 3800 struct ext4_li_request *elr; 3801 unsigned long next_wakeup, cur; 3802 3803 BUG_ON(NULL == eli); 3804 set_freezable(); 3805 3806 cont_thread: 3807 while (true) { 3808 bool next_wakeup_initialized = false; 3809 3810 next_wakeup = 0; 3811 mutex_lock(&eli->li_list_mtx); 3812 if (list_empty(&eli->li_request_list)) { 3813 mutex_unlock(&eli->li_list_mtx); 3814 goto exit_thread; 3815 } 3816 list_for_each_safe(pos, n, &eli->li_request_list) { 3817 int err = 0; 3818 int progress = 0; 3819 elr = list_entry(pos, struct ext4_li_request, 3820 lr_request); 3821 3822 if (time_before(jiffies, elr->lr_next_sched)) { 3823 if (!next_wakeup_initialized || 3824 time_before(elr->lr_next_sched, next_wakeup)) { 3825 next_wakeup = elr->lr_next_sched; 3826 next_wakeup_initialized = true; 3827 } 3828 continue; 3829 } 3830 if (down_read_trylock(&elr->lr_super->s_umount)) { 3831 if (sb_start_write_trylock(elr->lr_super)) { 3832 progress = 1; 3833 /* 3834 * We hold sb->s_umount, sb can not 3835 * be removed from the list, it is 3836 * now safe to drop li_list_mtx 3837 */ 3838 mutex_unlock(&eli->li_list_mtx); 3839 err = ext4_run_li_request(elr); 3840 sb_end_write(elr->lr_super); 3841 mutex_lock(&eli->li_list_mtx); 3842 n = pos->next; 3843 } 3844 up_read((&elr->lr_super->s_umount)); 3845 } 3846 /* error, remove the lazy_init job */ 3847 if (err) { 3848 ext4_remove_li_request(elr); 3849 continue; 3850 } 3851 if (!progress) { 3852 elr->lr_next_sched = jiffies + 3853 get_random_u32_below(EXT4_DEF_LI_MAX_START_DELAY * HZ); 3854 } 3855 if (!next_wakeup_initialized || 3856 time_before(elr->lr_next_sched, next_wakeup)) { 3857 next_wakeup = elr->lr_next_sched; 3858 next_wakeup_initialized = true; 3859 } 3860 } 3861 mutex_unlock(&eli->li_list_mtx); 3862 3863 try_to_freeze(); 3864 3865 cur = jiffies; 3866 if (!next_wakeup_initialized || time_after_eq(cur, next_wakeup)) { 3867 cond_resched(); 3868 continue; 3869 } 3870 3871 schedule_timeout_interruptible(next_wakeup - cur); 3872 3873 if (kthread_should_stop()) { 3874 ext4_clear_request_list(); 3875 goto exit_thread; 3876 } 3877 } 3878 3879 exit_thread: 3880 /* 3881 * It looks like the request list is empty, but we need 3882 * to check it under the li_list_mtx lock, to prevent any 3883 * additions into it, and of course we should lock ext4_li_mtx 3884 * to atomically free the list and ext4_li_info, because at 3885 * this point another ext4 filesystem could be registering 3886 * new one. 3887 */ 3888 mutex_lock(&ext4_li_mtx); 3889 mutex_lock(&eli->li_list_mtx); 3890 if (!list_empty(&eli->li_request_list)) { 3891 mutex_unlock(&eli->li_list_mtx); 3892 mutex_unlock(&ext4_li_mtx); 3893 goto cont_thread; 3894 } 3895 mutex_unlock(&eli->li_list_mtx); 3896 kfree(ext4_li_info); 3897 ext4_li_info = NULL; 3898 mutex_unlock(&ext4_li_mtx); 3899 3900 return 0; 3901 } 3902 3903 static void ext4_clear_request_list(void) 3904 { 3905 struct list_head *pos, *n; 3906 struct ext4_li_request *elr; 3907 3908 mutex_lock(&ext4_li_info->li_list_mtx); 3909 list_for_each_safe(pos, n, &ext4_li_info->li_request_list) { 3910 elr = list_entry(pos, struct ext4_li_request, 3911 lr_request); 3912 ext4_remove_li_request(elr); 3913 } 3914 mutex_unlock(&ext4_li_info->li_list_mtx); 3915 } 3916 3917 static int ext4_run_lazyinit_thread(void) 3918 { 3919 ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread, 3920 ext4_li_info, "ext4lazyinit"); 3921 if (IS_ERR(ext4_lazyinit_task)) { 3922 int err = PTR_ERR(ext4_lazyinit_task); 3923 ext4_clear_request_list(); 3924 kfree(ext4_li_info); 3925 ext4_li_info = NULL; 3926 printk(KERN_CRIT "EXT4-fs: error %d creating inode table " 3927 "initialization thread\n", 3928 err); 3929 return err; 3930 } 3931 ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING; 3932 return 0; 3933 } 3934 3935 /* 3936 * Check whether it make sense to run itable init. thread or not. 3937 * If there is at least one uninitialized inode table, return 3938 * corresponding group number, else the loop goes through all 3939 * groups and return total number of groups. 3940 */ 3941 static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) 3942 { 3943 ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count; 3944 struct ext4_group_desc *gdp = NULL; 3945 3946 if (!ext4_has_group_desc_csum(sb)) 3947 return ngroups; 3948 3949 for (group = 0; group < ngroups; group++) { 3950 gdp = ext4_get_group_desc(sb, group, NULL); 3951 if (!gdp) 3952 continue; 3953 3954 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) 3955 break; 3956 } 3957 3958 return group; 3959 } 3960 3961 static int ext4_li_info_new(void) 3962 { 3963 struct ext4_lazy_init *eli = NULL; 3964 3965 eli = kzalloc(sizeof(*eli), GFP_KERNEL); 3966 if (!eli) 3967 return -ENOMEM; 3968 3969 INIT_LIST_HEAD(&eli->li_request_list); 3970 mutex_init(&eli->li_list_mtx); 3971 3972 eli->li_state |= EXT4_LAZYINIT_QUIT; 3973 3974 ext4_li_info = eli; 3975 3976 return 0; 3977 } 3978 3979 static struct ext4_li_request *ext4_li_request_new(struct super_block *sb, 3980 ext4_group_t start) 3981 { 3982 struct ext4_li_request *elr; 3983 3984 elr = kzalloc(sizeof(*elr), GFP_KERNEL); 3985 if (!elr) 3986 return NULL; 3987 3988 elr->lr_super = sb; 3989 elr->lr_first_not_zeroed = start; 3990 if (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS)) { 3991 elr->lr_mode = EXT4_LI_MODE_ITABLE; 3992 elr->lr_next_group = start; 3993 } else { 3994 elr->lr_mode = EXT4_LI_MODE_PREFETCH_BBITMAP; 3995 } 3996 3997 /* 3998 * Randomize first schedule time of the request to 3999 * spread the inode table initialization requests 4000 * better. 4001 */ 4002 elr->lr_next_sched = jiffies + get_random_u32_below(EXT4_DEF_LI_MAX_START_DELAY * HZ); 4003 return elr; 4004 } 4005 4006 int ext4_register_li_request(struct super_block *sb, 4007 ext4_group_t first_not_zeroed) 4008 { 4009 struct ext4_sb_info *sbi = EXT4_SB(sb); 4010 struct ext4_li_request *elr = NULL; 4011 ext4_group_t ngroups = sbi->s_groups_count; 4012 int ret = 0; 4013 4014 mutex_lock(&ext4_li_mtx); 4015 if (sbi->s_li_request != NULL) { 4016 /* 4017 * Reset timeout so it can be computed again, because 4018 * s_li_wait_mult might have changed. 4019 */ 4020 sbi->s_li_request->lr_timeout = 0; 4021 goto out; 4022 } 4023 4024 if (ext4_emergency_state(sb) || sb_rdonly(sb) || 4025 (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS) && 4026 (first_not_zeroed == ngroups || !test_opt(sb, INIT_INODE_TABLE)))) 4027 goto out; 4028 4029 elr = ext4_li_request_new(sb, first_not_zeroed); 4030 if (!elr) { 4031 ret = -ENOMEM; 4032 goto out; 4033 } 4034 4035 if (NULL == ext4_li_info) { 4036 ret = ext4_li_info_new(); 4037 if (ret) 4038 goto out; 4039 } 4040 4041 mutex_lock(&ext4_li_info->li_list_mtx); 4042 list_add(&elr->lr_request, &ext4_li_info->li_request_list); 4043 mutex_unlock(&ext4_li_info->li_list_mtx); 4044 4045 sbi->s_li_request = elr; 4046 /* 4047 * set elr to NULL here since it has been inserted to 4048 * the request_list and the removal and free of it is 4049 * handled by ext4_clear_request_list from now on. 4050 */ 4051 elr = NULL; 4052 4053 if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) { 4054 ret = ext4_run_lazyinit_thread(); 4055 if (ret) 4056 goto out; 4057 } 4058 out: 4059 mutex_unlock(&ext4_li_mtx); 4060 if (ret) 4061 kfree(elr); 4062 return ret; 4063 } 4064 4065 /* 4066 * We do not need to lock anything since this is called on 4067 * module unload. 4068 */ 4069 static void ext4_destroy_lazyinit_thread(void) 4070 { 4071 /* 4072 * If thread exited earlier 4073 * there's nothing to be done. 4074 */ 4075 if (!ext4_li_info || !ext4_lazyinit_task) 4076 return; 4077 4078 kthread_stop(ext4_lazyinit_task); 4079 } 4080 4081 static int set_journal_csum_feature_set(struct super_block *sb) 4082 { 4083 int ret = 1; 4084 int compat, incompat; 4085 struct ext4_sb_info *sbi = EXT4_SB(sb); 4086 4087 if (ext4_has_feature_metadata_csum(sb)) { 4088 /* journal checksum v3 */ 4089 compat = 0; 4090 incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3; 4091 } else { 4092 /* journal checksum v1 */ 4093 compat = JBD2_FEATURE_COMPAT_CHECKSUM; 4094 incompat = 0; 4095 } 4096 4097 jbd2_journal_clear_features(sbi->s_journal, 4098 JBD2_FEATURE_COMPAT_CHECKSUM, 0, 4099 JBD2_FEATURE_INCOMPAT_CSUM_V3 | 4100 JBD2_FEATURE_INCOMPAT_CSUM_V2); 4101 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 4102 ret = jbd2_journal_set_features(sbi->s_journal, 4103 compat, 0, 4104 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | 4105 incompat); 4106 } else if (test_opt(sb, JOURNAL_CHECKSUM)) { 4107 ret = jbd2_journal_set_features(sbi->s_journal, 4108 compat, 0, 4109 incompat); 4110 jbd2_journal_clear_features(sbi->s_journal, 0, 0, 4111 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); 4112 } else { 4113 jbd2_journal_clear_features(sbi->s_journal, 0, 0, 4114 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); 4115 } 4116 4117 return ret; 4118 } 4119 4120 /* 4121 * Note: calculating the overhead so we can be compatible with 4122 * historical BSD practice is quite difficult in the face of 4123 * clusters/bigalloc. This is because multiple metadata blocks from 4124 * different block group can end up in the same allocation cluster. 4125 * Calculating the exact overhead in the face of clustered allocation 4126 * requires either O(all block bitmaps) in memory or O(number of block 4127 * groups**2) in time. We will still calculate the superblock for 4128 * older file systems --- and if we come across with a bigalloc file 4129 * system with zero in s_overhead_clusters the estimate will be close to 4130 * correct especially for very large cluster sizes --- but for newer 4131 * file systems, it's better to calculate this figure once at mkfs 4132 * time, and store it in the superblock. If the superblock value is 4133 * present (even for non-bigalloc file systems), we will use it. 4134 */ 4135 static int count_overhead(struct super_block *sb, ext4_group_t grp, 4136 char *buf) 4137 { 4138 struct ext4_sb_info *sbi = EXT4_SB(sb); 4139 struct ext4_group_desc *gdp; 4140 ext4_fsblk_t first_block, last_block, b; 4141 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 4142 int s, j, count = 0; 4143 int has_super = ext4_bg_has_super(sb, grp); 4144 4145 if (!ext4_has_feature_bigalloc(sb)) 4146 return (has_super + ext4_bg_num_gdb(sb, grp) + 4147 (has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) + 4148 sbi->s_itb_per_group + 2); 4149 4150 first_block = le32_to_cpu(sbi->s_es->s_first_data_block) + 4151 (grp * EXT4_BLOCKS_PER_GROUP(sb)); 4152 last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1; 4153 for (i = 0; i < ngroups; i++) { 4154 gdp = ext4_get_group_desc(sb, i, NULL); 4155 b = ext4_block_bitmap(sb, gdp); 4156 if (b >= first_block && b <= last_block) { 4157 ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf); 4158 count++; 4159 } 4160 b = ext4_inode_bitmap(sb, gdp); 4161 if (b >= first_block && b <= last_block) { 4162 ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf); 4163 count++; 4164 } 4165 b = ext4_inode_table(sb, gdp); 4166 if (b >= first_block && b + sbi->s_itb_per_group <= last_block) 4167 for (j = 0; j < sbi->s_itb_per_group; j++, b++) { 4168 int c = EXT4_B2C(sbi, b - first_block); 4169 ext4_set_bit(c, buf); 4170 count++; 4171 } 4172 if (i != grp) 4173 continue; 4174 s = 0; 4175 if (ext4_bg_has_super(sb, grp)) { 4176 ext4_set_bit(s++, buf); 4177 count++; 4178 } 4179 j = ext4_bg_num_gdb(sb, grp); 4180 if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) { 4181 ext4_error(sb, "Invalid number of block group " 4182 "descriptor blocks: %d", j); 4183 j = EXT4_BLOCKS_PER_GROUP(sb) - s; 4184 } 4185 count += j; 4186 for (; j > 0; j--) 4187 ext4_set_bit(EXT4_B2C(sbi, s++), buf); 4188 } 4189 if (!count) 4190 return 0; 4191 return EXT4_CLUSTERS_PER_GROUP(sb) - 4192 ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8); 4193 } 4194 4195 /* 4196 * Compute the overhead and stash it in sbi->s_overhead 4197 */ 4198 int ext4_calculate_overhead(struct super_block *sb) 4199 { 4200 struct ext4_sb_info *sbi = EXT4_SB(sb); 4201 struct ext4_super_block *es = sbi->s_es; 4202 struct inode *j_inode; 4203 unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum); 4204 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 4205 ext4_fsblk_t overhead = 0; 4206 char *buf = kvmalloc(sb->s_blocksize, GFP_NOFS | __GFP_ZERO); 4207 4208 if (!buf) 4209 return -ENOMEM; 4210 4211 /* 4212 * Compute the overhead (FS structures). This is constant 4213 * for a given filesystem unless the number of block groups 4214 * changes so we cache the previous value until it does. 4215 */ 4216 4217 /* 4218 * All of the blocks before first_data_block are overhead 4219 */ 4220 overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block)); 4221 4222 /* 4223 * Add the overhead found in each block group 4224 */ 4225 for (i = 0; i < ngroups; i++) { 4226 int blks; 4227 4228 blks = count_overhead(sb, i, buf); 4229 overhead += blks; 4230 if (blks) 4231 memset(buf, 0, sb->s_blocksize); 4232 cond_resched(); 4233 } 4234 4235 /* 4236 * Add the internal journal blocks whether the journal has been 4237 * loaded or not 4238 */ 4239 if (sbi->s_journal && !sbi->s_journal_bdev_file) 4240 overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_total_len); 4241 else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) { 4242 /* j_inum for internal journal is non-zero */ 4243 j_inode = ext4_get_journal_inode(sb, j_inum); 4244 if (!IS_ERR(j_inode)) { 4245 j_blocks = j_inode->i_size >> sb->s_blocksize_bits; 4246 overhead += EXT4_NUM_B2C(sbi, j_blocks); 4247 iput(j_inode); 4248 } else { 4249 ext4_msg(sb, KERN_ERR, "can't get journal size"); 4250 } 4251 } 4252 sbi->s_overhead = overhead; 4253 smp_wmb(); 4254 kvfree(buf); 4255 return 0; 4256 } 4257 4258 static void ext4_set_resv_clusters(struct super_block *sb) 4259 { 4260 ext4_fsblk_t resv_clusters; 4261 struct ext4_sb_info *sbi = EXT4_SB(sb); 4262 4263 /* 4264 * There's no need to reserve anything when we aren't using extents. 4265 * The space estimates are exact, there are no unwritten extents, 4266 * hole punching doesn't need new metadata... This is needed especially 4267 * to keep ext2/3 backward compatibility. 4268 */ 4269 if (!ext4_has_feature_extents(sb)) 4270 return; 4271 /* 4272 * By default we reserve 2% or 4096 clusters, whichever is smaller. 4273 * This should cover the situations where we can not afford to run 4274 * out of space like for example punch hole, or converting 4275 * unwritten extents in delalloc path. In most cases such 4276 * allocation would require 1, or 2 blocks, higher numbers are 4277 * very rare. 4278 */ 4279 resv_clusters = (ext4_blocks_count(sbi->s_es) >> 4280 sbi->s_cluster_bits); 4281 4282 do_div(resv_clusters, 50); 4283 resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096); 4284 4285 atomic64_set(&sbi->s_resv_clusters, resv_clusters); 4286 } 4287 4288 static const char *ext4_quota_mode(struct super_block *sb) 4289 { 4290 #ifdef CONFIG_QUOTA 4291 if (!ext4_quota_capable(sb)) 4292 return "none"; 4293 4294 if (EXT4_SB(sb)->s_journal && ext4_is_quota_journalled(sb)) 4295 return "journalled"; 4296 else 4297 return "writeback"; 4298 #else 4299 return "disabled"; 4300 #endif 4301 } 4302 4303 static void ext4_setup_csum_trigger(struct super_block *sb, 4304 enum ext4_journal_trigger_type type, 4305 void (*trigger)( 4306 struct jbd2_buffer_trigger_type *type, 4307 struct buffer_head *bh, 4308 void *mapped_data, 4309 size_t size)) 4310 { 4311 struct ext4_sb_info *sbi = EXT4_SB(sb); 4312 4313 sbi->s_journal_triggers[type].sb = sb; 4314 sbi->s_journal_triggers[type].tr_triggers.t_frozen = trigger; 4315 } 4316 4317 static void ext4_free_sbi(struct ext4_sb_info *sbi) 4318 { 4319 if (!sbi) 4320 return; 4321 4322 kfree(sbi->s_blockgroup_lock); 4323 fs_put_dax(sbi->s_daxdev, NULL); 4324 kfree(sbi); 4325 } 4326 4327 static struct ext4_sb_info *ext4_alloc_sbi(struct super_block *sb) 4328 { 4329 struct ext4_sb_info *sbi; 4330 4331 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 4332 if (!sbi) 4333 return NULL; 4334 4335 sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off, 4336 NULL, NULL); 4337 4338 sbi->s_blockgroup_lock = 4339 kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); 4340 4341 if (!sbi->s_blockgroup_lock) 4342 goto err_out; 4343 4344 sb->s_fs_info = sbi; 4345 sbi->s_sb = sb; 4346 return sbi; 4347 err_out: 4348 fs_put_dax(sbi->s_daxdev, NULL); 4349 kfree(sbi); 4350 return NULL; 4351 } 4352 4353 static void ext4_set_def_opts(struct super_block *sb, 4354 struct ext4_super_block *es) 4355 { 4356 unsigned long def_mount_opts; 4357 4358 /* Set defaults before we parse the mount options */ 4359 def_mount_opts = le32_to_cpu(es->s_default_mount_opts); 4360 set_opt(sb, INIT_INODE_TABLE); 4361 if (def_mount_opts & EXT4_DEFM_DEBUG) 4362 set_opt(sb, DEBUG); 4363 if (def_mount_opts & EXT4_DEFM_BSDGROUPS) 4364 set_opt(sb, GRPID); 4365 if (def_mount_opts & EXT4_DEFM_UID16) 4366 set_opt(sb, NO_UID32); 4367 /* xattr user namespace & acls are now defaulted on */ 4368 set_opt(sb, XATTR_USER); 4369 #ifdef CONFIG_EXT4_FS_POSIX_ACL 4370 set_opt(sb, POSIX_ACL); 4371 #endif 4372 if (ext4_has_feature_fast_commit(sb)) 4373 set_opt2(sb, JOURNAL_FAST_COMMIT); 4374 /* don't forget to enable journal_csum when metadata_csum is enabled. */ 4375 if (ext4_has_feature_metadata_csum(sb)) 4376 set_opt(sb, JOURNAL_CHECKSUM); 4377 4378 if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) 4379 set_opt(sb, JOURNAL_DATA); 4380 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) 4381 set_opt(sb, ORDERED_DATA); 4382 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK) 4383 set_opt(sb, WRITEBACK_DATA); 4384 4385 if (le16_to_cpu(es->s_errors) == EXT4_ERRORS_PANIC) 4386 set_opt(sb, ERRORS_PANIC); 4387 else if (le16_to_cpu(es->s_errors) == EXT4_ERRORS_CONTINUE) 4388 set_opt(sb, ERRORS_CONT); 4389 else 4390 set_opt(sb, ERRORS_RO); 4391 /* block_validity enabled by default; disable with noblock_validity */ 4392 set_opt(sb, BLOCK_VALIDITY); 4393 if (def_mount_opts & EXT4_DEFM_DISCARD) 4394 set_opt(sb, DISCARD); 4395 4396 if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0) 4397 set_opt(sb, BARRIER); 4398 4399 /* 4400 * enable delayed allocation by default 4401 * Use -o nodelalloc to turn it off 4402 */ 4403 if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) && 4404 ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0)) 4405 set_opt(sb, DELALLOC); 4406 4407 set_opt(sb, DIOREAD_NOLOCK); 4408 } 4409 4410 static int ext4_handle_clustersize(struct super_block *sb) 4411 { 4412 struct ext4_sb_info *sbi = EXT4_SB(sb); 4413 struct ext4_super_block *es = sbi->s_es; 4414 int clustersize; 4415 4416 /* Handle clustersize */ 4417 clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size); 4418 if (ext4_has_feature_bigalloc(sb)) { 4419 if (clustersize < sb->s_blocksize) { 4420 ext4_msg(sb, KERN_ERR, 4421 "cluster size (%d) smaller than " 4422 "block size (%lu)", clustersize, sb->s_blocksize); 4423 return -EINVAL; 4424 } 4425 sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - 4426 le32_to_cpu(es->s_log_block_size); 4427 } else { 4428 if (clustersize != sb->s_blocksize) { 4429 ext4_msg(sb, KERN_ERR, 4430 "fragment/cluster size (%d) != " 4431 "block size (%lu)", clustersize, sb->s_blocksize); 4432 return -EINVAL; 4433 } 4434 if (sbi->s_blocks_per_group > sb->s_blocksize * 8) { 4435 ext4_msg(sb, KERN_ERR, 4436 "#blocks per group too big: %lu", 4437 sbi->s_blocks_per_group); 4438 return -EINVAL; 4439 } 4440 sbi->s_cluster_bits = 0; 4441 } 4442 sbi->s_clusters_per_group = le32_to_cpu(es->s_clusters_per_group); 4443 if (sbi->s_clusters_per_group > sb->s_blocksize * 8) { 4444 ext4_msg(sb, KERN_ERR, "#clusters per group too big: %lu", 4445 sbi->s_clusters_per_group); 4446 return -EINVAL; 4447 } 4448 if (sbi->s_blocks_per_group != 4449 (sbi->s_clusters_per_group * (clustersize / sb->s_blocksize))) { 4450 ext4_msg(sb, KERN_ERR, 4451 "blocks per group (%lu) and clusters per group (%lu) inconsistent", 4452 sbi->s_blocks_per_group, sbi->s_clusters_per_group); 4453 return -EINVAL; 4454 } 4455 sbi->s_cluster_ratio = clustersize / sb->s_blocksize; 4456 4457 /* Do we have standard group size of clustersize * 8 blocks ? */ 4458 if (sbi->s_blocks_per_group == clustersize << 3) 4459 set_opt2(sb, STD_GROUP_SIZE); 4460 4461 return 0; 4462 } 4463 4464 /* 4465 * ext4_atomic_write_init: Initializes filesystem min & max atomic write units. 4466 * With non-bigalloc filesystem awu will be based upon filesystem blocksize 4467 * & bdev awu units. 4468 * With bigalloc it will be based upon bigalloc cluster size & bdev awu units. 4469 * @sb: super block 4470 */ 4471 static void ext4_atomic_write_init(struct super_block *sb) 4472 { 4473 struct ext4_sb_info *sbi = EXT4_SB(sb); 4474 struct block_device *bdev = sb->s_bdev; 4475 unsigned int clustersize = EXT4_CLUSTER_SIZE(sb); 4476 4477 if (!bdev_can_atomic_write(bdev)) 4478 return; 4479 4480 if (!ext4_has_feature_extents(sb)) 4481 return; 4482 4483 sbi->s_awu_min = max(sb->s_blocksize, 4484 bdev_atomic_write_unit_min_bytes(bdev)); 4485 sbi->s_awu_max = min(clustersize, 4486 bdev_atomic_write_unit_max_bytes(bdev)); 4487 if (sbi->s_awu_min && sbi->s_awu_max && 4488 sbi->s_awu_min <= sbi->s_awu_max) { 4489 ext4_msg(sb, KERN_NOTICE, "Supports (experimental) DIO atomic writes awu_min: %u, awu_max: %u", 4490 sbi->s_awu_min, sbi->s_awu_max); 4491 } else { 4492 sbi->s_awu_min = 0; 4493 sbi->s_awu_max = 0; 4494 } 4495 } 4496 4497 static void ext4_fast_commit_init(struct super_block *sb) 4498 { 4499 struct ext4_sb_info *sbi = EXT4_SB(sb); 4500 4501 /* Initialize fast commit stuff */ 4502 atomic_set(&sbi->s_fc_subtid, 0); 4503 INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_MAIN]); 4504 INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_STAGING]); 4505 INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_MAIN]); 4506 INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]); 4507 sbi->s_fc_bytes = 0; 4508 ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE); 4509 sbi->s_fc_ineligible_tid = 0; 4510 mutex_init(&sbi->s_fc_lock); 4511 memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats)); 4512 sbi->s_fc_replay_state.fc_regions = NULL; 4513 sbi->s_fc_replay_state.fc_regions_size = 0; 4514 sbi->s_fc_replay_state.fc_regions_used = 0; 4515 sbi->s_fc_replay_state.fc_regions_valid = 0; 4516 sbi->s_fc_replay_state.fc_modified_inodes = NULL; 4517 sbi->s_fc_replay_state.fc_modified_inodes_size = 0; 4518 sbi->s_fc_replay_state.fc_modified_inodes_used = 0; 4519 } 4520 4521 static int ext4_inode_info_init(struct super_block *sb, 4522 struct ext4_super_block *es) 4523 { 4524 struct ext4_sb_info *sbi = EXT4_SB(sb); 4525 4526 if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) { 4527 sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE; 4528 sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO; 4529 } else { 4530 sbi->s_inode_size = le16_to_cpu(es->s_inode_size); 4531 sbi->s_first_ino = le32_to_cpu(es->s_first_ino); 4532 if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) { 4533 ext4_msg(sb, KERN_ERR, "invalid first ino: %u", 4534 sbi->s_first_ino); 4535 return -EINVAL; 4536 } 4537 if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || 4538 (!is_power_of_2(sbi->s_inode_size)) || 4539 (sbi->s_inode_size > sb->s_blocksize)) { 4540 ext4_msg(sb, KERN_ERR, 4541 "unsupported inode size: %d", 4542 sbi->s_inode_size); 4543 ext4_msg(sb, KERN_ERR, "blocksize: %lu", sb->s_blocksize); 4544 return -EINVAL; 4545 } 4546 /* 4547 * i_atime_extra is the last extra field available for 4548 * [acm]times in struct ext4_inode. Checking for that 4549 * field should suffice to ensure we have extra space 4550 * for all three. 4551 */ 4552 if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) + 4553 sizeof(((struct ext4_inode *)0)->i_atime_extra)) { 4554 sb->s_time_gran = 1; 4555 sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX; 4556 } else { 4557 sb->s_time_gran = NSEC_PER_SEC; 4558 sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX; 4559 } 4560 sb->s_time_min = EXT4_TIMESTAMP_MIN; 4561 } 4562 4563 if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) { 4564 sbi->s_want_extra_isize = sizeof(struct ext4_inode) - 4565 EXT4_GOOD_OLD_INODE_SIZE; 4566 if (ext4_has_feature_extra_isize(sb)) { 4567 unsigned v, max = (sbi->s_inode_size - 4568 EXT4_GOOD_OLD_INODE_SIZE); 4569 4570 v = le16_to_cpu(es->s_want_extra_isize); 4571 if (v > max) { 4572 ext4_msg(sb, KERN_ERR, 4573 "bad s_want_extra_isize: %d", v); 4574 return -EINVAL; 4575 } 4576 if (sbi->s_want_extra_isize < v) 4577 sbi->s_want_extra_isize = v; 4578 4579 v = le16_to_cpu(es->s_min_extra_isize); 4580 if (v > max) { 4581 ext4_msg(sb, KERN_ERR, 4582 "bad s_min_extra_isize: %d", v); 4583 return -EINVAL; 4584 } 4585 if (sbi->s_want_extra_isize < v) 4586 sbi->s_want_extra_isize = v; 4587 } 4588 } 4589 4590 return 0; 4591 } 4592 4593 #if IS_ENABLED(CONFIG_UNICODE) 4594 static int ext4_encoding_init(struct super_block *sb, struct ext4_super_block *es) 4595 { 4596 const struct ext4_sb_encodings *encoding_info; 4597 struct unicode_map *encoding; 4598 __u16 encoding_flags = le16_to_cpu(es->s_encoding_flags); 4599 4600 if (!ext4_has_feature_casefold(sb) || sb->s_encoding) 4601 return 0; 4602 4603 encoding_info = ext4_sb_read_encoding(es); 4604 if (!encoding_info) { 4605 ext4_msg(sb, KERN_ERR, 4606 "Encoding requested by superblock is unknown"); 4607 return -EINVAL; 4608 } 4609 4610 encoding = utf8_load(encoding_info->version); 4611 if (IS_ERR(encoding)) { 4612 ext4_msg(sb, KERN_ERR, 4613 "can't mount with superblock charset: %s-%u.%u.%u " 4614 "not supported by the kernel. flags: 0x%x.", 4615 encoding_info->name, 4616 unicode_major(encoding_info->version), 4617 unicode_minor(encoding_info->version), 4618 unicode_rev(encoding_info->version), 4619 encoding_flags); 4620 return -EINVAL; 4621 } 4622 ext4_msg(sb, KERN_INFO,"Using encoding defined by superblock: " 4623 "%s-%u.%u.%u with flags 0x%hx", encoding_info->name, 4624 unicode_major(encoding_info->version), 4625 unicode_minor(encoding_info->version), 4626 unicode_rev(encoding_info->version), 4627 encoding_flags); 4628 4629 sb->s_encoding = encoding; 4630 sb->s_encoding_flags = encoding_flags; 4631 4632 return 0; 4633 } 4634 #else 4635 static inline int ext4_encoding_init(struct super_block *sb, struct ext4_super_block *es) 4636 { 4637 return 0; 4638 } 4639 #endif 4640 4641 static int ext4_init_metadata_csum(struct super_block *sb, struct ext4_super_block *es) 4642 { 4643 struct ext4_sb_info *sbi = EXT4_SB(sb); 4644 4645 /* Warn if metadata_csum and gdt_csum are both set. */ 4646 if (ext4_has_feature_metadata_csum(sb) && 4647 ext4_has_feature_gdt_csum(sb)) 4648 ext4_warning(sb, "metadata_csum and uninit_bg are " 4649 "redundant flags; please run fsck."); 4650 4651 /* Check for a known checksum algorithm */ 4652 if (!ext4_verify_csum_type(sb, es)) { 4653 ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " 4654 "unknown checksum algorithm."); 4655 return -EINVAL; 4656 } 4657 ext4_setup_csum_trigger(sb, EXT4_JTR_ORPHAN_FILE, 4658 ext4_orphan_file_block_trigger); 4659 4660 /* Check superblock checksum */ 4661 if (!ext4_superblock_csum_verify(sb, es)) { 4662 ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " 4663 "invalid superblock checksum. Run e2fsck?"); 4664 return -EFSBADCRC; 4665 } 4666 4667 /* Precompute checksum seed for all metadata */ 4668 if (ext4_has_feature_csum_seed(sb)) 4669 sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed); 4670 else if (ext4_has_feature_metadata_csum(sb) || 4671 ext4_has_feature_ea_inode(sb)) 4672 sbi->s_csum_seed = ext4_chksum(~0, es->s_uuid, 4673 sizeof(es->s_uuid)); 4674 return 0; 4675 } 4676 4677 static int ext4_check_feature_compatibility(struct super_block *sb, 4678 struct ext4_super_block *es, 4679 int silent) 4680 { 4681 struct ext4_sb_info *sbi = EXT4_SB(sb); 4682 4683 if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV && 4684 (ext4_has_compat_features(sb) || 4685 ext4_has_ro_compat_features(sb) || 4686 ext4_has_incompat_features(sb))) 4687 ext4_msg(sb, KERN_WARNING, 4688 "feature flags set on rev 0 fs, " 4689 "running e2fsck is recommended"); 4690 4691 if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) { 4692 set_opt2(sb, HURD_COMPAT); 4693 if (ext4_has_feature_64bit(sb)) { 4694 ext4_msg(sb, KERN_ERR, 4695 "The Hurd can't support 64-bit file systems"); 4696 return -EINVAL; 4697 } 4698 4699 /* 4700 * ea_inode feature uses l_i_version field which is not 4701 * available in HURD_COMPAT mode. 4702 */ 4703 if (ext4_has_feature_ea_inode(sb)) { 4704 ext4_msg(sb, KERN_ERR, 4705 "ea_inode feature is not supported for Hurd"); 4706 return -EINVAL; 4707 } 4708 } 4709 4710 if (IS_EXT2_SB(sb)) { 4711 if (ext2_feature_set_ok(sb)) 4712 ext4_msg(sb, KERN_INFO, "mounting ext2 file system " 4713 "using the ext4 subsystem"); 4714 else { 4715 /* 4716 * If we're probing be silent, if this looks like 4717 * it's actually an ext[34] filesystem. 4718 */ 4719 if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb))) 4720 return -EINVAL; 4721 ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due " 4722 "to feature incompatibilities"); 4723 return -EINVAL; 4724 } 4725 } 4726 4727 if (IS_EXT3_SB(sb)) { 4728 if (ext3_feature_set_ok(sb)) 4729 ext4_msg(sb, KERN_INFO, "mounting ext3 file system " 4730 "using the ext4 subsystem"); 4731 else { 4732 /* 4733 * If we're probing be silent, if this looks like 4734 * it's actually an ext4 filesystem. 4735 */ 4736 if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb))) 4737 return -EINVAL; 4738 ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due " 4739 "to feature incompatibilities"); 4740 return -EINVAL; 4741 } 4742 } 4743 4744 /* 4745 * Check feature flags regardless of the revision level, since we 4746 * previously didn't change the revision level when setting the flags, 4747 * so there is a chance incompat flags are set on a rev 0 filesystem. 4748 */ 4749 if (!ext4_feature_set_ok(sb, (sb_rdonly(sb)))) 4750 return -EINVAL; 4751 4752 if (sbi->s_daxdev) { 4753 if (sb->s_blocksize == PAGE_SIZE) 4754 set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags); 4755 else 4756 ext4_msg(sb, KERN_ERR, "unsupported blocksize for DAX\n"); 4757 } 4758 4759 if (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) { 4760 if (ext4_has_feature_inline_data(sb)) { 4761 ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem" 4762 " that may contain inline data"); 4763 return -EINVAL; 4764 } 4765 if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) { 4766 ext4_msg(sb, KERN_ERR, 4767 "DAX unsupported by block device."); 4768 return -EINVAL; 4769 } 4770 } 4771 4772 if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) { 4773 ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d", 4774 es->s_encryption_level); 4775 return -EINVAL; 4776 } 4777 4778 return 0; 4779 } 4780 4781 static int ext4_check_geometry(struct super_block *sb, 4782 struct ext4_super_block *es) 4783 { 4784 struct ext4_sb_info *sbi = EXT4_SB(sb); 4785 __u64 blocks_count; 4786 int err; 4787 4788 if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (sb->s_blocksize / 4)) { 4789 ext4_msg(sb, KERN_ERR, 4790 "Number of reserved GDT blocks insanely large: %d", 4791 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks)); 4792 return -EINVAL; 4793 } 4794 /* 4795 * Test whether we have more sectors than will fit in sector_t, 4796 * and whether the max offset is addressable by the page cache. 4797 */ 4798 err = generic_check_addressable(sb->s_blocksize_bits, 4799 ext4_blocks_count(es)); 4800 if (err) { 4801 ext4_msg(sb, KERN_ERR, "filesystem" 4802 " too large to mount safely on this system"); 4803 return err; 4804 } 4805 4806 /* check blocks count against device size */ 4807 blocks_count = sb_bdev_nr_blocks(sb); 4808 if (blocks_count && ext4_blocks_count(es) > blocks_count) { 4809 ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu " 4810 "exceeds size of device (%llu blocks)", 4811 ext4_blocks_count(es), blocks_count); 4812 return -EINVAL; 4813 } 4814 4815 /* 4816 * It makes no sense for the first data block to be beyond the end 4817 * of the filesystem. 4818 */ 4819 if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) { 4820 ext4_msg(sb, KERN_WARNING, "bad geometry: first data " 4821 "block %u is beyond end of filesystem (%llu)", 4822 le32_to_cpu(es->s_first_data_block), 4823 ext4_blocks_count(es)); 4824 return -EINVAL; 4825 } 4826 if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) && 4827 (sbi->s_cluster_ratio == 1)) { 4828 ext4_msg(sb, KERN_WARNING, "bad geometry: first data " 4829 "block is 0 with a 1k block and cluster size"); 4830 return -EINVAL; 4831 } 4832 4833 blocks_count = (ext4_blocks_count(es) - 4834 le32_to_cpu(es->s_first_data_block) + 4835 EXT4_BLOCKS_PER_GROUP(sb) - 1); 4836 do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb)); 4837 if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) { 4838 ext4_msg(sb, KERN_WARNING, "groups count too large: %llu " 4839 "(block count %llu, first data block %u, " 4840 "blocks per group %lu)", blocks_count, 4841 ext4_blocks_count(es), 4842 le32_to_cpu(es->s_first_data_block), 4843 EXT4_BLOCKS_PER_GROUP(sb)); 4844 return -EINVAL; 4845 } 4846 sbi->s_groups_count = blocks_count; 4847 sbi->s_blockfile_groups = min(sbi->s_groups_count, 4848 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); 4849 if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) != 4850 le32_to_cpu(es->s_inodes_count)) { 4851 ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu", 4852 le32_to_cpu(es->s_inodes_count), 4853 ((u64)sbi->s_groups_count * sbi->s_inodes_per_group)); 4854 return -EINVAL; 4855 } 4856 4857 return 0; 4858 } 4859 4860 static int ext4_group_desc_init(struct super_block *sb, 4861 struct ext4_super_block *es, 4862 ext4_fsblk_t logical_sb_block, 4863 ext4_group_t *first_not_zeroed) 4864 { 4865 struct ext4_sb_info *sbi = EXT4_SB(sb); 4866 unsigned int db_count; 4867 ext4_fsblk_t block; 4868 int i; 4869 4870 db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / 4871 EXT4_DESC_PER_BLOCK(sb); 4872 if (ext4_has_feature_meta_bg(sb)) { 4873 if (le32_to_cpu(es->s_first_meta_bg) > db_count) { 4874 ext4_msg(sb, KERN_WARNING, 4875 "first meta block group too large: %u " 4876 "(group descriptor block count %u)", 4877 le32_to_cpu(es->s_first_meta_bg), db_count); 4878 return -EINVAL; 4879 } 4880 } 4881 rcu_assign_pointer(sbi->s_group_desc, 4882 kvmalloc_array(db_count, 4883 sizeof(struct buffer_head *), 4884 GFP_KERNEL)); 4885 if (sbi->s_group_desc == NULL) { 4886 ext4_msg(sb, KERN_ERR, "not enough memory"); 4887 return -ENOMEM; 4888 } 4889 4890 bgl_lock_init(sbi->s_blockgroup_lock); 4891 4892 /* Pre-read the descriptors into the buffer cache */ 4893 for (i = 0; i < db_count; i++) { 4894 block = descriptor_loc(sb, logical_sb_block, i); 4895 ext4_sb_breadahead_unmovable(sb, block); 4896 } 4897 4898 for (i = 0; i < db_count; i++) { 4899 struct buffer_head *bh; 4900 4901 block = descriptor_loc(sb, logical_sb_block, i); 4902 bh = ext4_sb_bread_unmovable(sb, block); 4903 if (IS_ERR(bh)) { 4904 ext4_msg(sb, KERN_ERR, 4905 "can't read group descriptor %d", i); 4906 sbi->s_gdb_count = i; 4907 return PTR_ERR(bh); 4908 } 4909 rcu_read_lock(); 4910 rcu_dereference(sbi->s_group_desc)[i] = bh; 4911 rcu_read_unlock(); 4912 } 4913 sbi->s_gdb_count = db_count; 4914 if (!ext4_check_descriptors(sb, logical_sb_block, first_not_zeroed)) { 4915 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); 4916 return -EFSCORRUPTED; 4917 } 4918 4919 return 0; 4920 } 4921 4922 static int ext4_load_and_init_journal(struct super_block *sb, 4923 struct ext4_super_block *es, 4924 struct ext4_fs_context *ctx) 4925 { 4926 struct ext4_sb_info *sbi = EXT4_SB(sb); 4927 int err; 4928 4929 err = ext4_load_journal(sb, es, ctx->journal_devnum); 4930 if (err) 4931 return err; 4932 4933 if (ext4_has_feature_64bit(sb) && 4934 !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, 4935 JBD2_FEATURE_INCOMPAT_64BIT)) { 4936 ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature"); 4937 goto out; 4938 } 4939 4940 if (!set_journal_csum_feature_set(sb)) { 4941 ext4_msg(sb, KERN_ERR, "Failed to set journal checksum " 4942 "feature set"); 4943 goto out; 4944 } 4945 4946 if (test_opt2(sb, JOURNAL_FAST_COMMIT) && 4947 !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, 4948 JBD2_FEATURE_INCOMPAT_FAST_COMMIT)) { 4949 ext4_msg(sb, KERN_ERR, 4950 "Failed to set fast commit journal feature"); 4951 goto out; 4952 } 4953 4954 /* We have now updated the journal if required, so we can 4955 * validate the data journaling mode. */ 4956 switch (test_opt(sb, DATA_FLAGS)) { 4957 case 0: 4958 /* No mode set, assume a default based on the journal 4959 * capabilities: ORDERED_DATA if the journal can 4960 * cope, else JOURNAL_DATA 4961 */ 4962 if (jbd2_journal_check_available_features 4963 (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { 4964 set_opt(sb, ORDERED_DATA); 4965 sbi->s_def_mount_opt |= EXT4_MOUNT_ORDERED_DATA; 4966 } else { 4967 set_opt(sb, JOURNAL_DATA); 4968 sbi->s_def_mount_opt |= EXT4_MOUNT_JOURNAL_DATA; 4969 } 4970 break; 4971 4972 case EXT4_MOUNT_ORDERED_DATA: 4973 case EXT4_MOUNT_WRITEBACK_DATA: 4974 if (!jbd2_journal_check_available_features 4975 (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { 4976 ext4_msg(sb, KERN_ERR, "Journal does not support " 4977 "requested data journaling mode"); 4978 goto out; 4979 } 4980 break; 4981 default: 4982 break; 4983 } 4984 4985 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA && 4986 test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 4987 ext4_msg(sb, KERN_ERR, "can't mount with " 4988 "journal_async_commit in data=ordered mode"); 4989 goto out; 4990 } 4991 4992 set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio); 4993 4994 sbi->s_journal->j_submit_inode_data_buffers = 4995 ext4_journal_submit_inode_data_buffers; 4996 sbi->s_journal->j_finish_inode_data_buffers = 4997 ext4_journal_finish_inode_data_buffers; 4998 4999 return 0; 5000 5001 out: 5002 ext4_journal_destroy(sbi, sbi->s_journal); 5003 return -EINVAL; 5004 } 5005 5006 static int ext4_check_journal_data_mode(struct super_block *sb) 5007 { 5008 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { 5009 printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with " 5010 "data=journal disables delayed allocation, " 5011 "dioread_nolock, O_DIRECT and fast_commit support!\n"); 5012 /* can't mount with both data=journal and dioread_nolock. */ 5013 clear_opt(sb, DIOREAD_NOLOCK); 5014 clear_opt2(sb, JOURNAL_FAST_COMMIT); 5015 if (test_opt2(sb, EXPLICIT_DELALLOC)) { 5016 ext4_msg(sb, KERN_ERR, "can't mount with " 5017 "both data=journal and delalloc"); 5018 return -EINVAL; 5019 } 5020 if (test_opt(sb, DAX_ALWAYS)) { 5021 ext4_msg(sb, KERN_ERR, "can't mount with " 5022 "both data=journal and dax"); 5023 return -EINVAL; 5024 } 5025 if (ext4_has_feature_encrypt(sb)) { 5026 ext4_msg(sb, KERN_WARNING, 5027 "encrypted files will use data=ordered " 5028 "instead of data journaling mode"); 5029 } 5030 if (test_opt(sb, DELALLOC)) 5031 clear_opt(sb, DELALLOC); 5032 } else { 5033 sb->s_iflags |= SB_I_CGROUPWB; 5034 } 5035 5036 return 0; 5037 } 5038 5039 static const char *ext4_has_journal_option(struct super_block *sb) 5040 { 5041 struct ext4_sb_info *sbi = EXT4_SB(sb); 5042 5043 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) 5044 return "journal_async_commit"; 5045 if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) 5046 return "journal_checksum"; 5047 if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) 5048 return "commit="; 5049 if (EXT4_MOUNT_DATA_FLAGS & 5050 (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) 5051 return "data="; 5052 if (test_opt(sb, DATA_ERR_ABORT)) 5053 return "data_err=abort"; 5054 return NULL; 5055 } 5056 5057 /* 5058 * Limit the maximum folio order to 2048 blocks to prevent overestimation 5059 * of reserve handle credits during the folio writeback in environments 5060 * where the PAGE_SIZE exceeds 4KB. 5061 */ 5062 #define EXT4_MAX_PAGECACHE_ORDER(sb) \ 5063 umin(MAX_PAGECACHE_ORDER, (11 + (sb)->s_blocksize_bits - PAGE_SHIFT)) 5064 static void ext4_set_max_mapping_order(struct super_block *sb) 5065 { 5066 struct ext4_sb_info *sbi = EXT4_SB(sb); 5067 5068 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) 5069 sbi->s_max_folio_order = sbi->s_min_folio_order; 5070 else 5071 sbi->s_max_folio_order = EXT4_MAX_PAGECACHE_ORDER(sb); 5072 } 5073 5074 static int ext4_check_large_folio(struct super_block *sb) 5075 { 5076 const char *err_str = NULL; 5077 5078 if (ext4_has_feature_encrypt(sb)) 5079 err_str = "encrypt"; 5080 5081 if (!err_str) { 5082 ext4_set_max_mapping_order(sb); 5083 } else if (sb->s_blocksize > PAGE_SIZE) { 5084 ext4_msg(sb, KERN_ERR, "bs(%lu) > ps(%lu) unsupported for %s", 5085 sb->s_blocksize, PAGE_SIZE, err_str); 5086 return -EINVAL; 5087 } 5088 5089 return 0; 5090 } 5091 5092 static int ext4_load_super(struct super_block *sb, ext4_fsblk_t *lsb, 5093 int silent) 5094 { 5095 struct ext4_sb_info *sbi = EXT4_SB(sb); 5096 struct ext4_super_block *es; 5097 ext4_fsblk_t logical_sb_block; 5098 unsigned long offset = 0; 5099 struct buffer_head *bh; 5100 int ret = -EINVAL; 5101 int blocksize; 5102 5103 blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE); 5104 if (!blocksize) { 5105 ext4_msg(sb, KERN_ERR, "unable to set blocksize"); 5106 return -EINVAL; 5107 } 5108 5109 /* 5110 * The ext4 superblock will not be buffer aligned for other than 1kB 5111 * block sizes. We need to calculate the offset from buffer start. 5112 */ 5113 if (blocksize != EXT4_MIN_BLOCK_SIZE) { 5114 logical_sb_block = sbi->s_sb_block * EXT4_MIN_BLOCK_SIZE; 5115 offset = do_div(logical_sb_block, blocksize); 5116 } else { 5117 logical_sb_block = sbi->s_sb_block; 5118 } 5119 5120 bh = ext4_sb_bread_unmovable(sb, logical_sb_block); 5121 if (IS_ERR(bh)) { 5122 ext4_msg(sb, KERN_ERR, "unable to read superblock"); 5123 return PTR_ERR(bh); 5124 } 5125 /* 5126 * Note: s_es must be initialized as soon as possible because 5127 * some ext4 macro-instructions depend on its value 5128 */ 5129 es = (struct ext4_super_block *) (bh->b_data + offset); 5130 sbi->s_es = es; 5131 sb->s_magic = le16_to_cpu(es->s_magic); 5132 if (sb->s_magic != EXT4_SUPER_MAGIC) { 5133 if (!silent) 5134 ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); 5135 goto out; 5136 } 5137 5138 if (le32_to_cpu(es->s_log_block_size) > 5139 (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { 5140 ext4_msg(sb, KERN_ERR, 5141 "Invalid log block size: %u", 5142 le32_to_cpu(es->s_log_block_size)); 5143 goto out; 5144 } 5145 if (le32_to_cpu(es->s_log_cluster_size) > 5146 (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { 5147 ext4_msg(sb, KERN_ERR, 5148 "Invalid log cluster size: %u", 5149 le32_to_cpu(es->s_log_cluster_size)); 5150 goto out; 5151 } 5152 5153 blocksize = EXT4_MIN_BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); 5154 5155 /* 5156 * If the default block size is not the same as the real block size, 5157 * we need to reload it. 5158 */ 5159 if (sb->s_blocksize == blocksize) 5160 goto success; 5161 5162 /* 5163 * bh must be released before kill_bdev(), otherwise 5164 * it won't be freed and its page also. kill_bdev() 5165 * is called by sb_set_blocksize(). 5166 */ 5167 brelse(bh); 5168 /* Validate the filesystem blocksize */ 5169 if (!sb_set_blocksize(sb, blocksize)) { 5170 ext4_msg(sb, KERN_ERR, "bad block size %d", 5171 blocksize); 5172 bh = NULL; 5173 goto out; 5174 } 5175 5176 logical_sb_block = sbi->s_sb_block * EXT4_MIN_BLOCK_SIZE; 5177 offset = do_div(logical_sb_block, blocksize); 5178 bh = ext4_sb_bread_unmovable(sb, logical_sb_block); 5179 if (IS_ERR(bh)) { 5180 ext4_msg(sb, KERN_ERR, "Can't read superblock on 2nd try"); 5181 ret = PTR_ERR(bh); 5182 bh = NULL; 5183 goto out; 5184 } 5185 es = (struct ext4_super_block *)(bh->b_data + offset); 5186 sbi->s_es = es; 5187 if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) { 5188 ext4_msg(sb, KERN_ERR, "Magic mismatch, very weird!"); 5189 goto out; 5190 } 5191 5192 success: 5193 sbi->s_min_folio_order = get_order(blocksize); 5194 *lsb = logical_sb_block; 5195 sbi->s_sbh = bh; 5196 return 0; 5197 out: 5198 brelse(bh); 5199 return ret; 5200 } 5201 5202 static int ext4_hash_info_init(struct super_block *sb) 5203 { 5204 struct ext4_sb_info *sbi = EXT4_SB(sb); 5205 struct ext4_super_block *es = sbi->s_es; 5206 unsigned int i; 5207 5208 sbi->s_def_hash_version = es->s_def_hash_version; 5209 5210 if (sbi->s_def_hash_version > DX_HASH_LAST) { 5211 ext4_msg(sb, KERN_ERR, 5212 "Invalid default hash set in the superblock"); 5213 return -EINVAL; 5214 } else if (sbi->s_def_hash_version == DX_HASH_SIPHASH) { 5215 ext4_msg(sb, KERN_ERR, 5216 "SIPHASH is not a valid default hash value"); 5217 return -EINVAL; 5218 } 5219 5220 for (i = 0; i < 4; i++) 5221 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); 5222 5223 if (ext4_has_feature_dir_index(sb)) { 5224 i = le32_to_cpu(es->s_flags); 5225 if (i & EXT2_FLAGS_UNSIGNED_HASH) 5226 sbi->s_hash_unsigned = 3; 5227 else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { 5228 #ifdef __CHAR_UNSIGNED__ 5229 if (!sb_rdonly(sb)) 5230 es->s_flags |= 5231 cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); 5232 sbi->s_hash_unsigned = 3; 5233 #else 5234 if (!sb_rdonly(sb)) 5235 es->s_flags |= 5236 cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); 5237 #endif 5238 } 5239 } 5240 return 0; 5241 } 5242 5243 static int ext4_block_group_meta_init(struct super_block *sb, int silent) 5244 { 5245 struct ext4_sb_info *sbi = EXT4_SB(sb); 5246 struct ext4_super_block *es = sbi->s_es; 5247 int has_huge_files; 5248 5249 has_huge_files = ext4_has_feature_huge_file(sb); 5250 sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits, 5251 has_huge_files); 5252 sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files); 5253 5254 sbi->s_desc_size = le16_to_cpu(es->s_desc_size); 5255 if (ext4_has_feature_64bit(sb)) { 5256 if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT || 5257 sbi->s_desc_size > EXT4_MAX_DESC_SIZE || 5258 !is_power_of_2(sbi->s_desc_size)) { 5259 ext4_msg(sb, KERN_ERR, 5260 "unsupported descriptor size %lu", 5261 sbi->s_desc_size); 5262 return -EINVAL; 5263 } 5264 } else 5265 sbi->s_desc_size = EXT4_MIN_DESC_SIZE; 5266 5267 sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); 5268 sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); 5269 5270 sbi->s_inodes_per_block = sb->s_blocksize / EXT4_INODE_SIZE(sb); 5271 if (sbi->s_inodes_per_block == 0 || sbi->s_blocks_per_group == 0) { 5272 if (!silent) 5273 ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); 5274 return -EINVAL; 5275 } 5276 if (sbi->s_inodes_per_group < sbi->s_inodes_per_block || 5277 sbi->s_inodes_per_group > sb->s_blocksize * 8) { 5278 ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n", 5279 sbi->s_inodes_per_group); 5280 return -EINVAL; 5281 } 5282 sbi->s_itb_per_group = sbi->s_inodes_per_group / 5283 sbi->s_inodes_per_block; 5284 sbi->s_desc_per_block = sb->s_blocksize / EXT4_DESC_SIZE(sb); 5285 sbi->s_mount_state = le16_to_cpu(es->s_state) & ~EXT4_FC_REPLAY; 5286 sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb)); 5287 sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb)); 5288 5289 return 0; 5290 } 5291 5292 /* 5293 * It's hard to get stripe aligned blocks if stripe is not aligned with 5294 * cluster, just disable stripe and alert user to simplify code and avoid 5295 * stripe aligned allocation which will rarely succeed. 5296 */ 5297 static bool ext4_is_stripe_incompatible(struct super_block *sb, unsigned long stripe) 5298 { 5299 struct ext4_sb_info *sbi = EXT4_SB(sb); 5300 return (stripe > 0 && sbi->s_cluster_ratio > 1 && 5301 stripe % sbi->s_cluster_ratio != 0); 5302 } 5303 5304 static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) 5305 { 5306 struct ext4_super_block *es = NULL; 5307 struct ext4_sb_info *sbi = EXT4_SB(sb); 5308 ext4_fsblk_t logical_sb_block; 5309 struct inode *root; 5310 int needs_recovery; 5311 int err; 5312 ext4_group_t first_not_zeroed; 5313 struct ext4_fs_context *ctx = fc->fs_private; 5314 int silent = fc->sb_flags & SB_SILENT; 5315 5316 /* Set defaults for the variables that will be set during parsing */ 5317 if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)) 5318 ctx->journal_ioprio = EXT4_DEF_JOURNAL_IOPRIO; 5319 5320 sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS; 5321 sbi->s_sectors_written_start = 5322 part_stat_read(sb->s_bdev, sectors[STAT_WRITE]); 5323 5324 err = ext4_load_super(sb, &logical_sb_block, silent); 5325 if (err) 5326 goto out_fail; 5327 5328 es = sbi->s_es; 5329 sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written); 5330 5331 err = ext4_init_metadata_csum(sb, es); 5332 if (err) 5333 goto failed_mount; 5334 5335 ext4_set_def_opts(sb, es); 5336 5337 sbi->s_resuid = make_kuid(&init_user_ns, ext4_get_resuid(es)); 5338 sbi->s_resgid = make_kgid(&init_user_ns, ext4_get_resuid(es)); 5339 sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ; 5340 sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME; 5341 sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME; 5342 sbi->s_sb_update_kb = EXT4_DEF_SB_UPDATE_INTERVAL_KB; 5343 sbi->s_sb_update_sec = EXT4_DEF_SB_UPDATE_INTERVAL_SEC; 5344 5345 /* 5346 * set default s_li_wait_mult for lazyinit, for the case there is 5347 * no mount option specified. 5348 */ 5349 sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; 5350 5351 err = ext4_inode_info_init(sb, es); 5352 if (err) 5353 goto failed_mount; 5354 5355 err = parse_apply_sb_mount_options(sb, ctx); 5356 if (err < 0) 5357 goto failed_mount; 5358 5359 sbi->s_def_mount_opt = sbi->s_mount_opt; 5360 sbi->s_def_mount_opt2 = sbi->s_mount_opt2; 5361 5362 err = ext4_check_opt_consistency(fc, sb); 5363 if (err < 0) 5364 goto failed_mount; 5365 5366 ext4_apply_options(fc, sb); 5367 5368 err = ext4_check_large_folio(sb); 5369 if (err < 0) 5370 goto failed_mount; 5371 5372 err = ext4_encoding_init(sb, es); 5373 if (err) 5374 goto failed_mount; 5375 5376 err = ext4_check_journal_data_mode(sb); 5377 if (err) 5378 goto failed_mount; 5379 5380 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | 5381 (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0); 5382 5383 /* HSM events are allowed by default. */ 5384 sb->s_iflags |= SB_I_ALLOW_HSM; 5385 5386 err = ext4_check_feature_compatibility(sb, es, silent); 5387 if (err) 5388 goto failed_mount; 5389 5390 err = ext4_block_group_meta_init(sb, silent); 5391 if (err) 5392 goto failed_mount; 5393 5394 err = ext4_hash_info_init(sb); 5395 if (err) 5396 goto failed_mount; 5397 5398 err = ext4_handle_clustersize(sb); 5399 if (err) 5400 goto failed_mount; 5401 5402 err = ext4_check_geometry(sb, es); 5403 if (err) 5404 goto failed_mount; 5405 5406 timer_setup(&sbi->s_err_report, print_daily_error_info, 0); 5407 spin_lock_init(&sbi->s_error_lock); 5408 INIT_WORK(&sbi->s_sb_upd_work, update_super_work); 5409 5410 err = ext4_group_desc_init(sb, es, logical_sb_block, &first_not_zeroed); 5411 if (err) 5412 goto failed_mount3; 5413 5414 err = ext4_es_register_shrinker(sbi); 5415 if (err) 5416 goto failed_mount3; 5417 5418 sbi->s_stripe = ext4_get_stripe_size(sbi); 5419 if (ext4_is_stripe_incompatible(sb, sbi->s_stripe)) { 5420 ext4_msg(sb, KERN_WARNING, 5421 "stripe (%lu) is not aligned with cluster size (%u), " 5422 "stripe is disabled", 5423 sbi->s_stripe, sbi->s_cluster_ratio); 5424 sbi->s_stripe = 0; 5425 } 5426 sbi->s_extent_max_zeroout_kb = 32; 5427 5428 /* 5429 * set up enough so that it can read an inode 5430 */ 5431 sb->s_op = &ext4_sops; 5432 sb->s_export_op = &ext4_export_ops; 5433 sb->s_xattr = ext4_xattr_handlers; 5434 #ifdef CONFIG_FS_ENCRYPTION 5435 sb->s_cop = &ext4_cryptops; 5436 #endif 5437 #ifdef CONFIG_FS_VERITY 5438 sb->s_vop = &ext4_verityops; 5439 #endif 5440 #ifdef CONFIG_QUOTA 5441 sb->dq_op = &ext4_quota_operations; 5442 if (ext4_has_feature_quota(sb)) 5443 sb->s_qcop = &dquot_quotactl_sysfile_ops; 5444 else 5445 sb->s_qcop = &ext4_qctl_operations; 5446 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; 5447 #endif 5448 super_set_uuid(sb, es->s_uuid, sizeof(es->s_uuid)); 5449 super_set_sysfs_name_bdev(sb); 5450 5451 INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ 5452 mutex_init(&sbi->s_orphan_lock); 5453 5454 spin_lock_init(&sbi->s_bdev_wb_lock); 5455 5456 ext4_atomic_write_init(sb); 5457 ext4_fast_commit_init(sb); 5458 5459 sb->s_root = NULL; 5460 5461 needs_recovery = (es->s_last_orphan != 0 || 5462 ext4_has_feature_orphan_present(sb) || 5463 ext4_has_feature_journal_needs_recovery(sb)); 5464 5465 if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb)) { 5466 err = ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)); 5467 if (err) 5468 goto failed_mount3a; 5469 } 5470 5471 err = -EINVAL; 5472 /* 5473 * The first inode we look at is the journal inode. Don't try 5474 * root first: it may be modified in the journal! 5475 */ 5476 if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) { 5477 err = ext4_load_and_init_journal(sb, es, ctx); 5478 if (err) 5479 goto failed_mount3a; 5480 if (bdev_read_only(sb->s_bdev)) 5481 needs_recovery = 0; 5482 } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) && 5483 ext4_has_feature_journal_needs_recovery(sb)) { 5484 ext4_msg(sb, KERN_ERR, "required journal recovery " 5485 "suppressed and not mounted read-only"); 5486 goto failed_mount3a; 5487 } else { 5488 const char *journal_option; 5489 5490 /* Nojournal mode, all journal mount options are illegal */ 5491 journal_option = ext4_has_journal_option(sb); 5492 if (journal_option != NULL) { 5493 ext4_msg(sb, KERN_ERR, 5494 "can't mount with %s, fs mounted w/o journal", 5495 journal_option); 5496 goto failed_mount3a; 5497 } 5498 5499 sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM; 5500 clear_opt(sb, JOURNAL_CHECKSUM); 5501 clear_opt(sb, DATA_FLAGS); 5502 clear_opt2(sb, JOURNAL_FAST_COMMIT); 5503 sbi->s_journal = NULL; 5504 needs_recovery = 0; 5505 } 5506 5507 if (!test_opt(sb, NO_MBCACHE)) { 5508 sbi->s_ea_block_cache = ext4_xattr_create_cache(); 5509 if (!sbi->s_ea_block_cache) { 5510 ext4_msg(sb, KERN_ERR, 5511 "Failed to create ea_block_cache"); 5512 err = -EINVAL; 5513 goto failed_mount_wq; 5514 } 5515 5516 if (ext4_has_feature_ea_inode(sb)) { 5517 sbi->s_ea_inode_cache = ext4_xattr_create_cache(); 5518 if (!sbi->s_ea_inode_cache) { 5519 ext4_msg(sb, KERN_ERR, 5520 "Failed to create ea_inode_cache"); 5521 err = -EINVAL; 5522 goto failed_mount_wq; 5523 } 5524 } 5525 } 5526 5527 /* 5528 * Get the # of file system overhead blocks from the 5529 * superblock if present. 5530 */ 5531 sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters); 5532 /* ignore the precalculated value if it is ridiculous */ 5533 if (sbi->s_overhead > ext4_blocks_count(es)) 5534 sbi->s_overhead = 0; 5535 /* 5536 * If the bigalloc feature is not enabled recalculating the 5537 * overhead doesn't take long, so we might as well just redo 5538 * it to make sure we are using the correct value. 5539 */ 5540 if (!ext4_has_feature_bigalloc(sb)) 5541 sbi->s_overhead = 0; 5542 if (sbi->s_overhead == 0) { 5543 err = ext4_calculate_overhead(sb); 5544 if (err) 5545 goto failed_mount_wq; 5546 } 5547 5548 /* 5549 * The maximum number of concurrent works can be high and 5550 * concurrency isn't really necessary. Limit it to 1. 5551 */ 5552 EXT4_SB(sb)->rsv_conversion_wq = 5553 alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); 5554 if (!EXT4_SB(sb)->rsv_conversion_wq) { 5555 printk(KERN_ERR "EXT4-fs: failed to create workqueue\n"); 5556 err = -ENOMEM; 5557 goto failed_mount4; 5558 } 5559 5560 /* 5561 * The jbd2_journal_load will have done any necessary log recovery, 5562 * so we can safely mount the rest of the filesystem now. 5563 */ 5564 5565 root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL); 5566 if (IS_ERR(root)) { 5567 ext4_msg(sb, KERN_ERR, "get root inode failed"); 5568 err = PTR_ERR(root); 5569 root = NULL; 5570 goto failed_mount4; 5571 } 5572 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { 5573 ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck"); 5574 iput(root); 5575 err = -EFSCORRUPTED; 5576 goto failed_mount4; 5577 } 5578 5579 generic_set_sb_d_ops(sb); 5580 sb->s_root = d_make_root(root); 5581 if (!sb->s_root) { 5582 ext4_msg(sb, KERN_ERR, "get root dentry failed"); 5583 err = -ENOMEM; 5584 goto failed_mount4; 5585 } 5586 5587 err = ext4_setup_super(sb, es, sb_rdonly(sb)); 5588 if (err == -EROFS) { 5589 sb->s_flags |= SB_RDONLY; 5590 } else if (err) 5591 goto failed_mount4a; 5592 5593 ext4_set_resv_clusters(sb); 5594 5595 if (test_opt(sb, BLOCK_VALIDITY)) { 5596 err = ext4_setup_system_zone(sb); 5597 if (err) { 5598 ext4_msg(sb, KERN_ERR, "failed to initialize system " 5599 "zone (%d)", err); 5600 goto failed_mount4a; 5601 } 5602 } 5603 ext4_fc_replay_cleanup(sb); 5604 5605 ext4_ext_init(sb); 5606 5607 /* 5608 * Enable optimize_scan if number of groups is > threshold. This can be 5609 * turned off by passing "mb_optimize_scan=0". This can also be 5610 * turned on forcefully by passing "mb_optimize_scan=1". 5611 */ 5612 if (!(ctx->spec & EXT4_SPEC_mb_optimize_scan)) { 5613 if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD) 5614 set_opt2(sb, MB_OPTIMIZE_SCAN); 5615 else 5616 clear_opt2(sb, MB_OPTIMIZE_SCAN); 5617 } 5618 5619 err = ext4_mb_init(sb); 5620 if (err) { 5621 ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)", 5622 err); 5623 goto failed_mount5; 5624 } 5625 5626 /* 5627 * We can only set up the journal commit callback once 5628 * mballoc is initialized 5629 */ 5630 if (sbi->s_journal) 5631 sbi->s_journal->j_commit_callback = 5632 ext4_journal_commit_callback; 5633 5634 err = ext4_percpu_param_init(sbi); 5635 if (err) 5636 goto failed_mount6; 5637 5638 if (ext4_has_feature_flex_bg(sb)) 5639 if (!ext4_fill_flex_info(sb)) { 5640 ext4_msg(sb, KERN_ERR, 5641 "unable to initialize " 5642 "flex_bg meta info!"); 5643 err = -ENOMEM; 5644 goto failed_mount6; 5645 } 5646 5647 err = ext4_register_li_request(sb, first_not_zeroed); 5648 if (err) 5649 goto failed_mount6; 5650 5651 err = ext4_init_orphan_info(sb); 5652 if (err) 5653 goto failed_mount7; 5654 #ifdef CONFIG_QUOTA 5655 /* Enable quota usage during mount. */ 5656 if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) { 5657 err = ext4_enable_quotas(sb); 5658 if (err) 5659 goto failed_mount8; 5660 } 5661 #endif /* CONFIG_QUOTA */ 5662 5663 /* 5664 * Save the original bdev mapping's wb_err value which could be 5665 * used to detect the metadata async write error. 5666 */ 5667 errseq_check_and_advance(&sb->s_bdev->bd_mapping->wb_err, 5668 &sbi->s_bdev_wb_err); 5669 EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS; 5670 ext4_orphan_cleanup(sb, es); 5671 EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS; 5672 /* 5673 * Update the checksum after updating free space/inode counters and 5674 * ext4_orphan_cleanup. Otherwise the superblock can have an incorrect 5675 * checksum in the buffer cache until it is written out and 5676 * e2fsprogs programs trying to open a file system immediately 5677 * after it is mounted can fail. 5678 */ 5679 ext4_superblock_csum_set(sb); 5680 if (needs_recovery) { 5681 ext4_msg(sb, KERN_INFO, "recovery complete"); 5682 err = ext4_mark_recovery_complete(sb, es); 5683 if (err) 5684 goto failed_mount9; 5685 } 5686 5687 if (test_opt(sb, DISCARD) && !bdev_max_discard_sectors(sb->s_bdev)) { 5688 ext4_msg(sb, KERN_WARNING, 5689 "mounting with \"discard\" option, but the device does not support discard"); 5690 clear_opt(sb, DISCARD); 5691 } 5692 5693 if (es->s_error_count) 5694 mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */ 5695 5696 /* Enable message ratelimiting. Default is 10 messages per 5 secs. */ 5697 ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10); 5698 ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10); 5699 ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10); 5700 atomic_set(&sbi->s_warning_count, 0); 5701 atomic_set(&sbi->s_msg_count, 0); 5702 5703 /* Register sysfs after all initializations are complete. */ 5704 err = ext4_register_sysfs(sb); 5705 if (err) 5706 goto failed_mount9; 5707 5708 return 0; 5709 5710 failed_mount9: 5711 ext4_quotas_off(sb, EXT4_MAXQUOTAS); 5712 failed_mount8: __maybe_unused 5713 ext4_release_orphan_info(sb); 5714 failed_mount7: 5715 ext4_unregister_li_request(sb); 5716 failed_mount6: 5717 ext4_mb_release(sb); 5718 ext4_flex_groups_free(sbi); 5719 ext4_percpu_param_destroy(sbi); 5720 failed_mount5: 5721 ext4_ext_release(sb); 5722 ext4_release_system_zone(sb); 5723 failed_mount4a: 5724 dput(sb->s_root); 5725 sb->s_root = NULL; 5726 failed_mount4: 5727 ext4_msg(sb, KERN_ERR, "mount failed"); 5728 if (EXT4_SB(sb)->rsv_conversion_wq) 5729 destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq); 5730 failed_mount_wq: 5731 ext4_xattr_destroy_cache(sbi->s_ea_inode_cache); 5732 sbi->s_ea_inode_cache = NULL; 5733 5734 ext4_xattr_destroy_cache(sbi->s_ea_block_cache); 5735 sbi->s_ea_block_cache = NULL; 5736 5737 if (sbi->s_journal) { 5738 ext4_journal_destroy(sbi, sbi->s_journal); 5739 } 5740 failed_mount3a: 5741 ext4_es_unregister_shrinker(sbi); 5742 failed_mount3: 5743 /* flush s_sb_upd_work before sbi destroy */ 5744 flush_work(&sbi->s_sb_upd_work); 5745 ext4_stop_mmpd(sbi); 5746 timer_delete_sync(&sbi->s_err_report); 5747 ext4_group_desc_free(sbi); 5748 failed_mount: 5749 #if IS_ENABLED(CONFIG_UNICODE) 5750 utf8_unload(sb->s_encoding); 5751 #endif 5752 5753 #ifdef CONFIG_QUOTA 5754 for (unsigned int i = 0; i < EXT4_MAXQUOTAS; i++) 5755 kfree(get_qf_name(sb, sbi, i)); 5756 #endif 5757 fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy); 5758 brelse(sbi->s_sbh); 5759 if (sbi->s_journal_bdev_file) { 5760 invalidate_bdev(file_bdev(sbi->s_journal_bdev_file)); 5761 bdev_fput(sbi->s_journal_bdev_file); 5762 } 5763 out_fail: 5764 invalidate_bdev(sb->s_bdev); 5765 sb->s_fs_info = NULL; 5766 return err; 5767 } 5768 5769 static int ext4_fill_super(struct super_block *sb, struct fs_context *fc) 5770 { 5771 struct ext4_fs_context *ctx = fc->fs_private; 5772 struct ext4_sb_info *sbi; 5773 const char *descr; 5774 int ret; 5775 5776 sbi = ext4_alloc_sbi(sb); 5777 if (!sbi) 5778 return -ENOMEM; 5779 5780 fc->s_fs_info = sbi; 5781 5782 /* Cleanup superblock name */ 5783 strreplace(sb->s_id, '/', '!'); 5784 5785 sbi->s_sb_block = 1; /* Default super block location */ 5786 if (ctx->spec & EXT4_SPEC_s_sb_block) 5787 sbi->s_sb_block = ctx->s_sb_block; 5788 5789 ret = __ext4_fill_super(fc, sb); 5790 if (ret < 0) 5791 goto free_sbi; 5792 5793 if (sbi->s_journal) { 5794 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) 5795 descr = " journalled data mode"; 5796 else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) 5797 descr = " ordered data mode"; 5798 else 5799 descr = " writeback data mode"; 5800 } else 5801 descr = "out journal"; 5802 5803 if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount")) 5804 ext4_msg(sb, KERN_INFO, "mounted filesystem %pU %s with%s. " 5805 "Quota mode: %s.", &sb->s_uuid, 5806 sb_rdonly(sb) ? "ro" : "r/w", descr, 5807 ext4_quota_mode(sb)); 5808 5809 /* Update the s_overhead_clusters if necessary */ 5810 ext4_update_overhead(sb, false); 5811 return 0; 5812 5813 free_sbi: 5814 ext4_free_sbi(sbi); 5815 fc->s_fs_info = NULL; 5816 return ret; 5817 } 5818 5819 static int ext4_get_tree(struct fs_context *fc) 5820 { 5821 return get_tree_bdev(fc, ext4_fill_super); 5822 } 5823 5824 /* 5825 * Setup any per-fs journal parameters now. We'll do this both on 5826 * initial mount, once the journal has been initialised but before we've 5827 * done any recovery; and again on any subsequent remount. 5828 */ 5829 static void ext4_init_journal_params(struct super_block *sb, journal_t *journal) 5830 { 5831 struct ext4_sb_info *sbi = EXT4_SB(sb); 5832 5833 journal->j_commit_interval = sbi->s_commit_interval; 5834 journal->j_min_batch_time = sbi->s_min_batch_time; 5835 journal->j_max_batch_time = sbi->s_max_batch_time; 5836 ext4_fc_init(sb, journal); 5837 5838 write_lock(&journal->j_state_lock); 5839 if (test_opt(sb, BARRIER)) 5840 journal->j_flags |= JBD2_BARRIER; 5841 else 5842 journal->j_flags &= ~JBD2_BARRIER; 5843 /* 5844 * Always enable journal cycle record option, letting the journal 5845 * records log transactions continuously between each mount. 5846 */ 5847 journal->j_flags |= JBD2_CYCLE_RECORD; 5848 write_unlock(&journal->j_state_lock); 5849 } 5850 5851 static struct inode *ext4_get_journal_inode(struct super_block *sb, 5852 unsigned int journal_inum) 5853 { 5854 struct inode *journal_inode; 5855 5856 /* 5857 * Test for the existence of a valid inode on disk. Bad things 5858 * happen if we iget() an unused inode, as the subsequent iput() 5859 * will try to delete it. 5860 */ 5861 journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL); 5862 if (IS_ERR(journal_inode)) { 5863 ext4_msg(sb, KERN_ERR, "no journal found"); 5864 return ERR_CAST(journal_inode); 5865 } 5866 if (!journal_inode->i_nlink) { 5867 make_bad_inode(journal_inode); 5868 iput(journal_inode); 5869 ext4_msg(sb, KERN_ERR, "journal inode is deleted"); 5870 return ERR_PTR(-EFSCORRUPTED); 5871 } 5872 if (!S_ISREG(journal_inode->i_mode) || IS_ENCRYPTED(journal_inode)) { 5873 ext4_msg(sb, KERN_ERR, "invalid journal inode"); 5874 iput(journal_inode); 5875 return ERR_PTR(-EFSCORRUPTED); 5876 } 5877 5878 ext4_debug("Journal inode found at %p: %lld bytes\n", 5879 journal_inode, journal_inode->i_size); 5880 return journal_inode; 5881 } 5882 5883 static int ext4_journal_bmap(journal_t *journal, sector_t *block) 5884 { 5885 struct ext4_map_blocks map; 5886 int ret; 5887 5888 if (journal->j_inode == NULL) 5889 return 0; 5890 5891 map.m_lblk = *block; 5892 map.m_len = 1; 5893 ret = ext4_map_blocks(NULL, journal->j_inode, &map, 0); 5894 if (ret <= 0) { 5895 ext4_msg(journal->j_inode->i_sb, KERN_CRIT, 5896 "journal bmap failed: block %llu ret %d\n", 5897 *block, ret); 5898 jbd2_journal_abort(journal, ret ? ret : -EFSCORRUPTED); 5899 return ret; 5900 } 5901 *block = map.m_pblk; 5902 return 0; 5903 } 5904 5905 static journal_t *ext4_open_inode_journal(struct super_block *sb, 5906 unsigned int journal_inum) 5907 { 5908 struct inode *journal_inode; 5909 journal_t *journal; 5910 5911 journal_inode = ext4_get_journal_inode(sb, journal_inum); 5912 if (IS_ERR(journal_inode)) 5913 return ERR_CAST(journal_inode); 5914 5915 journal = jbd2_journal_init_inode(journal_inode); 5916 if (IS_ERR(journal)) { 5917 ext4_msg(sb, KERN_ERR, "Could not load journal inode"); 5918 iput(journal_inode); 5919 return ERR_CAST(journal); 5920 } 5921 journal->j_private = sb; 5922 journal->j_bmap = ext4_journal_bmap; 5923 ext4_init_journal_params(sb, journal); 5924 return journal; 5925 } 5926 5927 static struct file *ext4_get_journal_blkdev(struct super_block *sb, 5928 dev_t j_dev, ext4_fsblk_t *j_start, 5929 ext4_fsblk_t *j_len) 5930 { 5931 struct buffer_head *bh; 5932 struct block_device *bdev; 5933 struct file *bdev_file; 5934 int hblock, blocksize; 5935 ext4_fsblk_t sb_block; 5936 unsigned long offset; 5937 struct ext4_super_block *es; 5938 int errno; 5939 5940 bdev_file = bdev_file_open_by_dev(j_dev, 5941 BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_RESTRICT_WRITES, 5942 sb, &fs_holder_ops); 5943 if (IS_ERR(bdev_file)) { 5944 ext4_msg(sb, KERN_ERR, 5945 "failed to open journal device unknown-block(%u,%u) %ld", 5946 MAJOR(j_dev), MINOR(j_dev), PTR_ERR(bdev_file)); 5947 return bdev_file; 5948 } 5949 5950 bdev = file_bdev(bdev_file); 5951 blocksize = sb->s_blocksize; 5952 hblock = bdev_logical_block_size(bdev); 5953 if (blocksize < hblock) { 5954 ext4_msg(sb, KERN_ERR, 5955 "blocksize too small for journal device"); 5956 errno = -EINVAL; 5957 goto out_bdev; 5958 } 5959 5960 sb_block = EXT4_MIN_BLOCK_SIZE / blocksize; 5961 offset = EXT4_MIN_BLOCK_SIZE % blocksize; 5962 set_blocksize(bdev_file, blocksize); 5963 bh = __bread(bdev, sb_block, blocksize); 5964 if (!bh) { 5965 ext4_msg(sb, KERN_ERR, "couldn't read superblock of " 5966 "external journal"); 5967 errno = -EINVAL; 5968 goto out_bdev; 5969 } 5970 5971 es = (struct ext4_super_block *) (bh->b_data + offset); 5972 if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) || 5973 !(le32_to_cpu(es->s_feature_incompat) & 5974 EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) { 5975 ext4_msg(sb, KERN_ERR, "external journal has bad superblock"); 5976 errno = -EFSCORRUPTED; 5977 goto out_bh; 5978 } 5979 5980 if ((le32_to_cpu(es->s_feature_ro_compat) & 5981 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && 5982 es->s_checksum != ext4_superblock_csum(es)) { 5983 ext4_msg(sb, KERN_ERR, "external journal has corrupt superblock"); 5984 errno = -EFSCORRUPTED; 5985 goto out_bh; 5986 } 5987 5988 if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) { 5989 ext4_msg(sb, KERN_ERR, "journal UUID does not match"); 5990 errno = -EFSCORRUPTED; 5991 goto out_bh; 5992 } 5993 5994 *j_start = sb_block + 1; 5995 *j_len = ext4_blocks_count(es); 5996 brelse(bh); 5997 return bdev_file; 5998 5999 out_bh: 6000 brelse(bh); 6001 out_bdev: 6002 bdev_fput(bdev_file); 6003 return ERR_PTR(errno); 6004 } 6005 6006 static journal_t *ext4_open_dev_journal(struct super_block *sb, 6007 dev_t j_dev) 6008 { 6009 journal_t *journal; 6010 ext4_fsblk_t j_start; 6011 ext4_fsblk_t j_len; 6012 struct file *bdev_file; 6013 int errno = 0; 6014 6015 bdev_file = ext4_get_journal_blkdev(sb, j_dev, &j_start, &j_len); 6016 if (IS_ERR(bdev_file)) 6017 return ERR_CAST(bdev_file); 6018 6019 journal = jbd2_journal_init_dev(file_bdev(bdev_file), sb->s_bdev, j_start, 6020 j_len, sb->s_blocksize); 6021 if (IS_ERR(journal)) { 6022 ext4_msg(sb, KERN_ERR, "failed to create device journal"); 6023 errno = PTR_ERR(journal); 6024 goto out_bdev; 6025 } 6026 if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) { 6027 ext4_msg(sb, KERN_ERR, "External journal has more than one " 6028 "user (unsupported) - %d", 6029 be32_to_cpu(journal->j_superblock->s_nr_users)); 6030 errno = -EINVAL; 6031 goto out_journal; 6032 } 6033 journal->j_private = sb; 6034 EXT4_SB(sb)->s_journal_bdev_file = bdev_file; 6035 ext4_init_journal_params(sb, journal); 6036 return journal; 6037 6038 out_journal: 6039 ext4_journal_destroy(EXT4_SB(sb), journal); 6040 out_bdev: 6041 bdev_fput(bdev_file); 6042 return ERR_PTR(errno); 6043 } 6044 6045 static int ext4_load_journal(struct super_block *sb, 6046 struct ext4_super_block *es, 6047 unsigned long journal_devnum) 6048 { 6049 journal_t *journal; 6050 unsigned int journal_inum = le32_to_cpu(es->s_journal_inum); 6051 dev_t journal_dev; 6052 int err = 0; 6053 int really_read_only; 6054 int journal_dev_ro; 6055 6056 if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) 6057 return -EFSCORRUPTED; 6058 6059 if (journal_devnum && 6060 journal_devnum != le32_to_cpu(es->s_journal_dev)) { 6061 ext4_msg(sb, KERN_INFO, "external journal device major/minor " 6062 "numbers have changed"); 6063 journal_dev = new_decode_dev(journal_devnum); 6064 } else 6065 journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); 6066 6067 if (journal_inum && journal_dev) { 6068 ext4_msg(sb, KERN_ERR, 6069 "filesystem has both journal inode and journal device!"); 6070 return -EINVAL; 6071 } 6072 6073 if (journal_inum) { 6074 journal = ext4_open_inode_journal(sb, journal_inum); 6075 if (IS_ERR(journal)) 6076 return PTR_ERR(journal); 6077 } else { 6078 journal = ext4_open_dev_journal(sb, journal_dev); 6079 if (IS_ERR(journal)) 6080 return PTR_ERR(journal); 6081 } 6082 6083 journal_dev_ro = bdev_read_only(journal->j_dev); 6084 really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro; 6085 6086 if (journal_dev_ro && !sb_rdonly(sb)) { 6087 ext4_msg(sb, KERN_ERR, 6088 "journal device read-only, try mounting with '-o ro'"); 6089 err = -EROFS; 6090 goto err_out; 6091 } 6092 6093 /* 6094 * Are we loading a blank journal or performing recovery after a 6095 * crash? For recovery, we need to check in advance whether we 6096 * can get read-write access to the device. 6097 */ 6098 if (ext4_has_feature_journal_needs_recovery(sb)) { 6099 if (sb_rdonly(sb)) { 6100 ext4_msg(sb, KERN_INFO, "INFO: recovery " 6101 "required on readonly filesystem"); 6102 if (really_read_only) { 6103 ext4_msg(sb, KERN_ERR, "write access " 6104 "unavailable, cannot proceed " 6105 "(try mounting with noload)"); 6106 err = -EROFS; 6107 goto err_out; 6108 } 6109 ext4_msg(sb, KERN_INFO, "write access will " 6110 "be enabled during recovery"); 6111 } 6112 } 6113 6114 if (!(journal->j_flags & JBD2_BARRIER)) 6115 ext4_msg(sb, KERN_INFO, "barriers disabled"); 6116 6117 if (!ext4_has_feature_journal_needs_recovery(sb)) 6118 err = jbd2_journal_wipe(journal, !really_read_only); 6119 if (!err) { 6120 char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL); 6121 __le16 orig_state; 6122 bool changed = false; 6123 6124 if (save) 6125 memcpy(save, ((char *) es) + 6126 EXT4_S_ERR_START, EXT4_S_ERR_LEN); 6127 err = jbd2_journal_load(journal); 6128 if (save && memcmp(((char *) es) + EXT4_S_ERR_START, 6129 save, EXT4_S_ERR_LEN)) { 6130 memcpy(((char *) es) + EXT4_S_ERR_START, 6131 save, EXT4_S_ERR_LEN); 6132 changed = true; 6133 } 6134 kfree(save); 6135 orig_state = es->s_state; 6136 es->s_state |= cpu_to_le16(EXT4_SB(sb)->s_mount_state & 6137 EXT4_ERROR_FS); 6138 if (orig_state != es->s_state) 6139 changed = true; 6140 /* Write out restored error information to the superblock */ 6141 if (changed && !really_read_only) { 6142 int err2; 6143 err2 = ext4_commit_super(sb); 6144 err = err ? : err2; 6145 } 6146 } 6147 6148 if (err) { 6149 ext4_msg(sb, KERN_ERR, "error loading journal"); 6150 goto err_out; 6151 } 6152 6153 EXT4_SB(sb)->s_journal = journal; 6154 err = ext4_clear_journal_err(sb, es); 6155 if (err) { 6156 ext4_journal_destroy(EXT4_SB(sb), journal); 6157 return err; 6158 } 6159 6160 if (!really_read_only && journal_devnum && 6161 journal_devnum != le32_to_cpu(es->s_journal_dev)) { 6162 es->s_journal_dev = cpu_to_le32(journal_devnum); 6163 ext4_commit_super(sb); 6164 } 6165 if (!really_read_only && journal_inum && 6166 journal_inum != le32_to_cpu(es->s_journal_inum)) { 6167 es->s_journal_inum = cpu_to_le32(journal_inum); 6168 ext4_commit_super(sb); 6169 } 6170 6171 return 0; 6172 6173 err_out: 6174 ext4_journal_destroy(EXT4_SB(sb), journal); 6175 return err; 6176 } 6177 6178 /* Copy state of EXT4_SB(sb) into buffer for on-disk superblock */ 6179 static void ext4_update_super(struct super_block *sb) 6180 { 6181 struct ext4_sb_info *sbi = EXT4_SB(sb); 6182 struct ext4_super_block *es = sbi->s_es; 6183 struct buffer_head *sbh = sbi->s_sbh; 6184 6185 lock_buffer(sbh); 6186 /* 6187 * If the file system is mounted read-only, don't update the 6188 * superblock write time. This avoids updating the superblock 6189 * write time when we are mounting the root file system 6190 * read/only but we need to replay the journal; at that point, 6191 * for people who are east of GMT and who make their clock 6192 * tick in localtime for Windows bug-for-bug compatibility, 6193 * the clock is set in the future, and this will cause e2fsck 6194 * to complain and force a full file system check. 6195 */ 6196 if (!sb_rdonly(sb)) 6197 ext4_update_tstamp(es, s_wtime); 6198 es->s_kbytes_written = 6199 cpu_to_le64(sbi->s_kbytes_written + 6200 ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) - 6201 sbi->s_sectors_written_start) >> 1)); 6202 if (percpu_counter_initialized(&sbi->s_freeclusters_counter)) 6203 ext4_free_blocks_count_set(es, 6204 EXT4_C2B(sbi, percpu_counter_sum_positive( 6205 &sbi->s_freeclusters_counter))); 6206 if (percpu_counter_initialized(&sbi->s_freeinodes_counter)) 6207 es->s_free_inodes_count = 6208 cpu_to_le32(percpu_counter_sum_positive( 6209 &sbi->s_freeinodes_counter)); 6210 /* Copy error information to the on-disk superblock */ 6211 spin_lock(&sbi->s_error_lock); 6212 if (sbi->s_add_error_count > 0) { 6213 es->s_state |= cpu_to_le16(EXT4_ERROR_FS); 6214 if (!es->s_first_error_time && !es->s_first_error_time_hi) { 6215 __ext4_update_tstamp(&es->s_first_error_time, 6216 &es->s_first_error_time_hi, 6217 sbi->s_first_error_time); 6218 strtomem_pad(es->s_first_error_func, 6219 sbi->s_first_error_func, 0); 6220 es->s_first_error_line = 6221 cpu_to_le32(sbi->s_first_error_line); 6222 es->s_first_error_ino = 6223 cpu_to_le32(sbi->s_first_error_ino); 6224 es->s_first_error_block = 6225 cpu_to_le64(sbi->s_first_error_block); 6226 es->s_first_error_errcode = 6227 ext4_errno_to_code(sbi->s_first_error_code); 6228 } 6229 __ext4_update_tstamp(&es->s_last_error_time, 6230 &es->s_last_error_time_hi, 6231 sbi->s_last_error_time); 6232 strtomem_pad(es->s_last_error_func, sbi->s_last_error_func, 0); 6233 es->s_last_error_line = cpu_to_le32(sbi->s_last_error_line); 6234 es->s_last_error_ino = cpu_to_le32(sbi->s_last_error_ino); 6235 es->s_last_error_block = cpu_to_le64(sbi->s_last_error_block); 6236 es->s_last_error_errcode = 6237 ext4_errno_to_code(sbi->s_last_error_code); 6238 /* 6239 * Start the daily error reporting function if it hasn't been 6240 * started already 6241 */ 6242 if (!es->s_error_count) 6243 mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); 6244 le32_add_cpu(&es->s_error_count, sbi->s_add_error_count); 6245 sbi->s_add_error_count = 0; 6246 } 6247 spin_unlock(&sbi->s_error_lock); 6248 6249 ext4_superblock_csum_set(sb); 6250 unlock_buffer(sbh); 6251 } 6252 6253 static int ext4_commit_super(struct super_block *sb) 6254 { 6255 struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; 6256 6257 if (!sbh) 6258 return -EINVAL; 6259 6260 ext4_update_super(sb); 6261 6262 lock_buffer(sbh); 6263 /* Buffer got discarded which means block device got invalidated */ 6264 if (!buffer_mapped(sbh)) { 6265 unlock_buffer(sbh); 6266 return -EIO; 6267 } 6268 6269 if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) { 6270 /* 6271 * Oh, dear. A previous attempt to write the 6272 * superblock failed. This could happen because the 6273 * USB device was yanked out. Or it could happen to 6274 * be a transient write error and maybe the block will 6275 * be remapped. Nothing we can do but to retry the 6276 * write and hope for the best. 6277 */ 6278 ext4_msg(sb, KERN_ERR, "previous I/O error to " 6279 "superblock detected"); 6280 clear_buffer_write_io_error(sbh); 6281 set_buffer_uptodate(sbh); 6282 } 6283 get_bh(sbh); 6284 /* Clear potential dirty bit if it was journalled update */ 6285 clear_buffer_dirty(sbh); 6286 sbh->b_end_io = end_buffer_write_sync; 6287 submit_bh(REQ_OP_WRITE | REQ_SYNC | 6288 (test_opt(sb, BARRIER) ? REQ_FUA : 0), sbh); 6289 wait_on_buffer(sbh); 6290 if (buffer_write_io_error(sbh)) { 6291 ext4_msg(sb, KERN_ERR, "I/O error while writing " 6292 "superblock"); 6293 clear_buffer_write_io_error(sbh); 6294 set_buffer_uptodate(sbh); 6295 return -EIO; 6296 } 6297 return 0; 6298 } 6299 6300 /* 6301 * Have we just finished recovery? If so, and if we are mounting (or 6302 * remounting) the filesystem readonly, then we will end up with a 6303 * consistent fs on disk. Record that fact. 6304 */ 6305 static int ext4_mark_recovery_complete(struct super_block *sb, 6306 struct ext4_super_block *es) 6307 { 6308 int err; 6309 journal_t *journal = EXT4_SB(sb)->s_journal; 6310 6311 if (!ext4_has_feature_journal(sb)) { 6312 if (journal != NULL) { 6313 ext4_error(sb, "Journal got removed while the fs was " 6314 "mounted!"); 6315 return -EFSCORRUPTED; 6316 } 6317 return 0; 6318 } 6319 jbd2_journal_lock_updates(journal); 6320 err = jbd2_journal_flush(journal, 0); 6321 if (err < 0) 6322 goto out; 6323 6324 if (sb_rdonly(sb) && (ext4_has_feature_journal_needs_recovery(sb) || 6325 ext4_has_feature_orphan_present(sb))) { 6326 if (!ext4_orphan_file_empty(sb)) { 6327 ext4_error(sb, "Orphan file not empty on read-only fs."); 6328 err = -EFSCORRUPTED; 6329 goto out; 6330 } 6331 ext4_clear_feature_journal_needs_recovery(sb); 6332 ext4_clear_feature_orphan_present(sb); 6333 ext4_commit_super(sb); 6334 } 6335 out: 6336 jbd2_journal_unlock_updates(journal); 6337 return err; 6338 } 6339 6340 /* 6341 * If we are mounting (or read-write remounting) a filesystem whose journal 6342 * has recorded an error from a previous lifetime, move that error to the 6343 * main filesystem now. 6344 */ 6345 static int ext4_clear_journal_err(struct super_block *sb, 6346 struct ext4_super_block *es) 6347 { 6348 journal_t *journal; 6349 int j_errno; 6350 const char *errstr; 6351 6352 if (!ext4_has_feature_journal(sb)) { 6353 ext4_error(sb, "Journal got removed while the fs was mounted!"); 6354 return -EFSCORRUPTED; 6355 } 6356 6357 journal = EXT4_SB(sb)->s_journal; 6358 6359 /* 6360 * Now check for any error status which may have been recorded in the 6361 * journal by a prior ext4_error() or ext4_abort() 6362 */ 6363 6364 j_errno = jbd2_journal_errno(journal); 6365 if (j_errno) { 6366 char nbuf[16]; 6367 6368 errstr = ext4_decode_error(sb, j_errno, nbuf); 6369 ext4_warning(sb, "Filesystem error recorded " 6370 "from previous mount: %s", errstr); 6371 6372 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; 6373 es->s_state |= cpu_to_le16(EXT4_ERROR_FS); 6374 j_errno = ext4_commit_super(sb); 6375 if (j_errno) 6376 return j_errno; 6377 ext4_warning(sb, "Marked fs in need of filesystem check."); 6378 6379 jbd2_journal_clear_err(journal); 6380 jbd2_journal_update_sb_errno(journal); 6381 } 6382 return 0; 6383 } 6384 6385 /* 6386 * Force the running and committing transactions to commit, 6387 * and wait on the commit. 6388 */ 6389 int ext4_force_commit(struct super_block *sb) 6390 { 6391 return ext4_journal_force_commit(EXT4_SB(sb)->s_journal); 6392 } 6393 6394 static int ext4_sync_fs(struct super_block *sb, int wait) 6395 { 6396 int ret = 0; 6397 tid_t target; 6398 bool needs_barrier = false; 6399 struct ext4_sb_info *sbi = EXT4_SB(sb); 6400 6401 ret = ext4_emergency_state(sb); 6402 if (unlikely(ret)) 6403 return ret; 6404 6405 trace_ext4_sync_fs(sb, wait); 6406 flush_workqueue(sbi->rsv_conversion_wq); 6407 /* 6408 * Writeback quota in non-journalled quota case - journalled quota has 6409 * no dirty dquots 6410 */ 6411 dquot_writeback_dquots(sb, -1); 6412 /* 6413 * Data writeback is possible w/o journal transaction, so barrier must 6414 * being sent at the end of the function. But we can skip it if 6415 * transaction_commit will do it for us. 6416 */ 6417 if (sbi->s_journal) { 6418 target = jbd2_get_latest_transaction(sbi->s_journal); 6419 if (wait && sbi->s_journal->j_flags & JBD2_BARRIER && 6420 !jbd2_trans_will_send_data_barrier(sbi->s_journal, target)) 6421 needs_barrier = true; 6422 6423 if (jbd2_journal_start_commit(sbi->s_journal, &target)) { 6424 if (wait) 6425 ret = jbd2_log_wait_commit(sbi->s_journal, 6426 target); 6427 } 6428 } else if (wait && test_opt(sb, BARRIER)) 6429 needs_barrier = true; 6430 if (needs_barrier) { 6431 int err; 6432 err = blkdev_issue_flush(sb->s_bdev); 6433 if (!ret) 6434 ret = err; 6435 } 6436 6437 return ret; 6438 } 6439 6440 /* 6441 * LVM calls this function before a (read-only) snapshot is created. This 6442 * gives us a chance to flush the journal completely and mark the fs clean. 6443 * 6444 * Note that only this function cannot bring a filesystem to be in a clean 6445 * state independently. It relies on upper layer to stop all data & metadata 6446 * modifications. 6447 */ 6448 static int ext4_freeze(struct super_block *sb) 6449 { 6450 int error = 0; 6451 journal_t *journal = EXT4_SB(sb)->s_journal; 6452 6453 if (journal) { 6454 /* Now we set up the journal barrier. */ 6455 jbd2_journal_lock_updates(journal); 6456 6457 /* 6458 * Don't clear the needs_recovery flag if we failed to 6459 * flush the journal. 6460 */ 6461 error = jbd2_journal_flush(journal, 0); 6462 if (error < 0) 6463 goto out; 6464 6465 /* Journal blocked and flushed, clear needs_recovery flag. */ 6466 ext4_clear_feature_journal_needs_recovery(sb); 6467 if (ext4_orphan_file_empty(sb)) 6468 ext4_clear_feature_orphan_present(sb); 6469 } 6470 6471 error = ext4_commit_super(sb); 6472 out: 6473 if (journal) 6474 /* we rely on upper layer to stop further updates */ 6475 jbd2_journal_unlock_updates(journal); 6476 return error; 6477 } 6478 6479 /* 6480 * Called by LVM after the snapshot is done. We need to reset the RECOVER 6481 * flag here, even though the filesystem is not technically dirty yet. 6482 */ 6483 static int ext4_unfreeze(struct super_block *sb) 6484 { 6485 if (ext4_emergency_state(sb)) 6486 return 0; 6487 6488 if (EXT4_SB(sb)->s_journal) { 6489 /* Reset the needs_recovery flag before the fs is unlocked. */ 6490 ext4_set_feature_journal_needs_recovery(sb); 6491 if (ext4_has_feature_orphan_file(sb)) 6492 ext4_set_feature_orphan_present(sb); 6493 } 6494 6495 ext4_commit_super(sb); 6496 return 0; 6497 } 6498 6499 /* 6500 * Structure to save mount options for ext4_remount's benefit 6501 */ 6502 struct ext4_mount_options { 6503 unsigned long s_mount_opt; 6504 unsigned long s_mount_opt2; 6505 kuid_t s_resuid; 6506 kgid_t s_resgid; 6507 unsigned long s_commit_interval; 6508 u32 s_min_batch_time, s_max_batch_time; 6509 #ifdef CONFIG_QUOTA 6510 int s_jquota_fmt; 6511 char *s_qf_names[EXT4_MAXQUOTAS]; 6512 #endif 6513 }; 6514 6515 static int __ext4_remount(struct fs_context *fc, struct super_block *sb) 6516 { 6517 struct ext4_fs_context *ctx = fc->fs_private; 6518 struct ext4_super_block *es; 6519 struct ext4_sb_info *sbi = EXT4_SB(sb); 6520 unsigned long old_sb_flags; 6521 struct ext4_mount_options old_opts; 6522 ext4_group_t g; 6523 int err = 0; 6524 int alloc_ctx; 6525 #ifdef CONFIG_QUOTA 6526 int enable_quota = 0; 6527 int i, j; 6528 char *to_free[EXT4_MAXQUOTAS]; 6529 #endif 6530 6531 6532 /* Store the original options */ 6533 old_sb_flags = sb->s_flags; 6534 old_opts.s_mount_opt = sbi->s_mount_opt; 6535 old_opts.s_mount_opt2 = sbi->s_mount_opt2; 6536 old_opts.s_resuid = sbi->s_resuid; 6537 old_opts.s_resgid = sbi->s_resgid; 6538 old_opts.s_commit_interval = sbi->s_commit_interval; 6539 old_opts.s_min_batch_time = sbi->s_min_batch_time; 6540 old_opts.s_max_batch_time = sbi->s_max_batch_time; 6541 #ifdef CONFIG_QUOTA 6542 old_opts.s_jquota_fmt = sbi->s_jquota_fmt; 6543 for (i = 0; i < EXT4_MAXQUOTAS; i++) 6544 if (sbi->s_qf_names[i]) { 6545 char *qf_name = get_qf_name(sb, sbi, i); 6546 6547 old_opts.s_qf_names[i] = kstrdup(qf_name, GFP_KERNEL); 6548 if (!old_opts.s_qf_names[i]) { 6549 for (j = 0; j < i; j++) 6550 kfree(old_opts.s_qf_names[j]); 6551 return -ENOMEM; 6552 } 6553 } else 6554 old_opts.s_qf_names[i] = NULL; 6555 #endif 6556 if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)) { 6557 if (sbi->s_journal && sbi->s_journal->j_task->io_context) 6558 ctx->journal_ioprio = 6559 sbi->s_journal->j_task->io_context->ioprio; 6560 else 6561 ctx->journal_ioprio = EXT4_DEF_JOURNAL_IOPRIO; 6562 6563 } 6564 6565 if ((ctx->spec & EXT4_SPEC_s_stripe) && 6566 ext4_is_stripe_incompatible(sb, ctx->s_stripe)) { 6567 ext4_msg(sb, KERN_WARNING, 6568 "stripe (%lu) is not aligned with cluster size (%u), " 6569 "stripe is disabled", 6570 ctx->s_stripe, sbi->s_cluster_ratio); 6571 ctx->s_stripe = 0; 6572 } 6573 6574 /* 6575 * Changing the DIOREAD_NOLOCK or DELALLOC mount options may cause 6576 * two calls to ext4_should_dioread_nolock() to return inconsistent 6577 * values, triggering WARN_ON in ext4_add_complete_io(). we grab 6578 * here s_writepages_rwsem to avoid race between writepages ops and 6579 * remount. 6580 */ 6581 alloc_ctx = ext4_writepages_down_write(sb); 6582 ext4_apply_options(fc, sb); 6583 ext4_writepages_up_write(sb, alloc_ctx); 6584 6585 if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^ 6586 test_opt(sb, JOURNAL_CHECKSUM)) { 6587 ext4_msg(sb, KERN_ERR, "changing journal_checksum " 6588 "during remount not supported; ignoring"); 6589 sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM; 6590 } 6591 6592 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { 6593 if (test_opt2(sb, EXPLICIT_DELALLOC)) { 6594 ext4_msg(sb, KERN_ERR, "can't mount with " 6595 "both data=journal and delalloc"); 6596 err = -EINVAL; 6597 goto restore_opts; 6598 } 6599 if (test_opt(sb, DIOREAD_NOLOCK)) { 6600 ext4_msg(sb, KERN_ERR, "can't mount with " 6601 "both data=journal and dioread_nolock"); 6602 err = -EINVAL; 6603 goto restore_opts; 6604 } 6605 } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) { 6606 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 6607 ext4_msg(sb, KERN_ERR, "can't mount with " 6608 "journal_async_commit in data=ordered mode"); 6609 err = -EINVAL; 6610 goto restore_opts; 6611 } 6612 } 6613 6614 if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) { 6615 ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount"); 6616 err = -EINVAL; 6617 goto restore_opts; 6618 } 6619 6620 if ((old_opts.s_mount_opt & EXT4_MOUNT_DELALLOC) && 6621 !test_opt(sb, DELALLOC)) { 6622 ext4_msg(sb, KERN_ERR, "can't disable delalloc during remount"); 6623 err = -EINVAL; 6624 goto restore_opts; 6625 } 6626 6627 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | 6628 (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0); 6629 6630 es = sbi->s_es; 6631 6632 if (sbi->s_journal) { 6633 ext4_init_journal_params(sb, sbi->s_journal); 6634 set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio); 6635 } 6636 6637 /* Flush outstanding errors before changing fs state */ 6638 flush_work(&sbi->s_sb_upd_work); 6639 6640 if ((bool)(fc->sb_flags & SB_RDONLY) != sb_rdonly(sb)) { 6641 if (ext4_emergency_state(sb)) { 6642 err = -EROFS; 6643 goto restore_opts; 6644 } 6645 6646 if (fc->sb_flags & SB_RDONLY) { 6647 err = sync_filesystem(sb); 6648 if (err < 0) 6649 goto restore_opts; 6650 err = dquot_suspend(sb, -1); 6651 if (err < 0) 6652 goto restore_opts; 6653 6654 /* 6655 * First of all, the unconditional stuff we have to do 6656 * to disable replay of the journal when we next remount 6657 */ 6658 sb->s_flags |= SB_RDONLY; 6659 6660 /* 6661 * OK, test if we are remounting a valid rw partition 6662 * readonly, and if so set the rdonly flag and then 6663 * mark the partition as valid again. 6664 */ 6665 if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) && 6666 (sbi->s_mount_state & EXT4_VALID_FS)) 6667 es->s_state = cpu_to_le16(sbi->s_mount_state); 6668 6669 if (sbi->s_journal) { 6670 /* 6671 * We let remount-ro finish even if marking fs 6672 * as clean failed... 6673 */ 6674 ext4_mark_recovery_complete(sb, es); 6675 } 6676 } else { 6677 /* Make sure we can mount this feature set readwrite */ 6678 if (ext4_has_feature_readonly(sb) || 6679 !ext4_feature_set_ok(sb, 0)) { 6680 err = -EROFS; 6681 goto restore_opts; 6682 } 6683 /* 6684 * Make sure the group descriptor checksums 6685 * are sane. If they aren't, refuse to remount r/w. 6686 */ 6687 for (g = 0; g < sbi->s_groups_count; g++) { 6688 struct ext4_group_desc *gdp = 6689 ext4_get_group_desc(sb, g, NULL); 6690 6691 if (!ext4_group_desc_csum_verify(sb, g, gdp)) { 6692 ext4_msg(sb, KERN_ERR, 6693 "ext4_remount: Checksum for group %u failed (%u!=%u)", 6694 g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)), 6695 le16_to_cpu(gdp->bg_checksum)); 6696 err = -EFSBADCRC; 6697 goto restore_opts; 6698 } 6699 } 6700 6701 /* 6702 * If we have an unprocessed orphan list hanging 6703 * around from a previously readonly bdev mount, 6704 * require a full umount/remount for now. 6705 */ 6706 if (es->s_last_orphan || !ext4_orphan_file_empty(sb)) { 6707 ext4_msg(sb, KERN_WARNING, "Couldn't " 6708 "remount RDWR because of unprocessed " 6709 "orphan inode list. Please " 6710 "umount/remount instead"); 6711 err = -EINVAL; 6712 goto restore_opts; 6713 } 6714 6715 /* 6716 * Mounting a RDONLY partition read-write, so reread 6717 * and store the current valid flag. (It may have 6718 * been changed by e2fsck since we originally mounted 6719 * the partition.) 6720 */ 6721 if (sbi->s_journal) { 6722 err = ext4_clear_journal_err(sb, es); 6723 if (err) 6724 goto restore_opts; 6725 } 6726 sbi->s_mount_state = (le16_to_cpu(es->s_state) & 6727 ~EXT4_FC_REPLAY); 6728 6729 err = ext4_setup_super(sb, es, 0); 6730 if (err) 6731 goto restore_opts; 6732 6733 sb->s_flags &= ~SB_RDONLY; 6734 if (ext4_has_feature_mmp(sb)) { 6735 err = ext4_multi_mount_protect(sb, 6736 le64_to_cpu(es->s_mmp_block)); 6737 if (err) 6738 goto restore_opts; 6739 } 6740 #ifdef CONFIG_QUOTA 6741 enable_quota = 1; 6742 #endif 6743 } 6744 } 6745 6746 /* 6747 * Handle creation of system zone data early because it can fail. 6748 * Releasing of existing data is done when we are sure remount will 6749 * succeed. 6750 */ 6751 if (test_opt(sb, BLOCK_VALIDITY) && !sbi->s_system_blks) { 6752 err = ext4_setup_system_zone(sb); 6753 if (err) 6754 goto restore_opts; 6755 } 6756 6757 if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) { 6758 err = ext4_commit_super(sb); 6759 if (err) 6760 goto restore_opts; 6761 } 6762 6763 #ifdef CONFIG_QUOTA 6764 if (enable_quota) { 6765 if (sb_any_quota_suspended(sb)) 6766 dquot_resume(sb, -1); 6767 else if (ext4_has_feature_quota(sb)) { 6768 err = ext4_enable_quotas(sb); 6769 if (err) 6770 goto restore_opts; 6771 } 6772 } 6773 /* Release old quota file names */ 6774 for (i = 0; i < EXT4_MAXQUOTAS; i++) 6775 kfree(old_opts.s_qf_names[i]); 6776 #endif 6777 if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks) 6778 ext4_release_system_zone(sb); 6779 6780 /* 6781 * Reinitialize lazy itable initialization thread based on 6782 * current settings 6783 */ 6784 if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE)) 6785 ext4_unregister_li_request(sb); 6786 else { 6787 ext4_group_t first_not_zeroed; 6788 first_not_zeroed = ext4_has_uninit_itable(sb); 6789 ext4_register_li_request(sb, first_not_zeroed); 6790 } 6791 6792 if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb)) 6793 ext4_stop_mmpd(sbi); 6794 6795 /* 6796 * Handle aborting the filesystem as the last thing during remount to 6797 * avoid obsure errors during remount when some option changes fail to 6798 * apply due to shutdown filesystem. 6799 */ 6800 if (test_opt2(sb, ABORT)) 6801 ext4_abort(sb, ESHUTDOWN, "Abort forced by user"); 6802 6803 return 0; 6804 6805 restore_opts: 6806 /* 6807 * If there was a failing r/w to ro transition, we may need to 6808 * re-enable quota 6809 */ 6810 if (sb_rdonly(sb) && !(old_sb_flags & SB_RDONLY) && 6811 sb_any_quota_suspended(sb)) 6812 dquot_resume(sb, -1); 6813 6814 alloc_ctx = ext4_writepages_down_write(sb); 6815 sb->s_flags = old_sb_flags; 6816 sbi->s_mount_opt = old_opts.s_mount_opt; 6817 sbi->s_mount_opt2 = old_opts.s_mount_opt2; 6818 sbi->s_resuid = old_opts.s_resuid; 6819 sbi->s_resgid = old_opts.s_resgid; 6820 sbi->s_commit_interval = old_opts.s_commit_interval; 6821 sbi->s_min_batch_time = old_opts.s_min_batch_time; 6822 sbi->s_max_batch_time = old_opts.s_max_batch_time; 6823 ext4_writepages_up_write(sb, alloc_ctx); 6824 6825 if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks) 6826 ext4_release_system_zone(sb); 6827 #ifdef CONFIG_QUOTA 6828 sbi->s_jquota_fmt = old_opts.s_jquota_fmt; 6829 for (i = 0; i < EXT4_MAXQUOTAS; i++) { 6830 to_free[i] = get_qf_name(sb, sbi, i); 6831 rcu_assign_pointer(sbi->s_qf_names[i], old_opts.s_qf_names[i]); 6832 } 6833 synchronize_rcu(); 6834 for (i = 0; i < EXT4_MAXQUOTAS; i++) 6835 kfree(to_free[i]); 6836 #endif 6837 if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb)) 6838 ext4_stop_mmpd(sbi); 6839 return err; 6840 } 6841 6842 static int ext4_reconfigure(struct fs_context *fc) 6843 { 6844 struct super_block *sb = fc->root->d_sb; 6845 int ret; 6846 bool old_ro = sb_rdonly(sb); 6847 6848 fc->s_fs_info = EXT4_SB(sb); 6849 6850 ret = ext4_check_opt_consistency(fc, sb); 6851 if (ret < 0) 6852 return ret; 6853 6854 ret = __ext4_remount(fc, sb); 6855 if (ret < 0) 6856 return ret; 6857 6858 ext4_msg(sb, KERN_INFO, "re-mounted %pU%s.", 6859 &sb->s_uuid, 6860 (old_ro != sb_rdonly(sb)) ? (sb_rdonly(sb) ? " ro" : " r/w") : ""); 6861 6862 return 0; 6863 } 6864 6865 #ifdef CONFIG_QUOTA 6866 static int ext4_statfs_project(struct super_block *sb, 6867 kprojid_t projid, struct kstatfs *buf) 6868 { 6869 struct kqid qid; 6870 struct dquot *dquot; 6871 u64 limit; 6872 u64 curblock; 6873 6874 qid = make_kqid_projid(projid); 6875 dquot = dqget(sb, qid); 6876 if (IS_ERR(dquot)) 6877 return PTR_ERR(dquot); 6878 spin_lock(&dquot->dq_dqb_lock); 6879 6880 limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit, 6881 dquot->dq_dqb.dqb_bhardlimit); 6882 limit >>= sb->s_blocksize_bits; 6883 6884 if (limit) { 6885 uint64_t remaining = 0; 6886 6887 curblock = (dquot->dq_dqb.dqb_curspace + 6888 dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; 6889 if (limit > curblock) 6890 remaining = limit - curblock; 6891 6892 buf->f_blocks = min(buf->f_blocks, limit); 6893 buf->f_bfree = min(buf->f_bfree, remaining); 6894 buf->f_bavail = min(buf->f_bavail, remaining); 6895 } 6896 6897 limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit, 6898 dquot->dq_dqb.dqb_ihardlimit); 6899 if (limit) { 6900 uint64_t remaining = 0; 6901 6902 if (limit > dquot->dq_dqb.dqb_curinodes) 6903 remaining = limit - dquot->dq_dqb.dqb_curinodes; 6904 6905 buf->f_files = min(buf->f_files, limit); 6906 buf->f_ffree = min(buf->f_ffree, remaining); 6907 } 6908 6909 spin_unlock(&dquot->dq_dqb_lock); 6910 dqput(dquot); 6911 return 0; 6912 } 6913 #endif 6914 6915 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) 6916 { 6917 struct super_block *sb = dentry->d_sb; 6918 struct ext4_sb_info *sbi = EXT4_SB(sb); 6919 struct ext4_super_block *es = sbi->s_es; 6920 ext4_fsblk_t overhead = 0, resv_blocks; 6921 s64 bfree; 6922 resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters)); 6923 6924 if (!test_opt(sb, MINIX_DF)) 6925 overhead = sbi->s_overhead; 6926 6927 buf->f_type = EXT4_SUPER_MAGIC; 6928 buf->f_bsize = sb->s_blocksize; 6929 buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead); 6930 bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) - 6931 percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter); 6932 /* prevent underflow in case that few free space is available */ 6933 buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0)); 6934 buf->f_bavail = buf->f_bfree - 6935 (ext4_r_blocks_count(es) + resv_blocks); 6936 if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks)) 6937 buf->f_bavail = 0; 6938 buf->f_files = le32_to_cpu(es->s_inodes_count); 6939 buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter); 6940 buf->f_namelen = EXT4_NAME_LEN; 6941 buf->f_fsid = uuid_to_fsid(es->s_uuid); 6942 6943 #ifdef CONFIG_QUOTA 6944 if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) && 6945 sb_has_quota_limits_enabled(sb, PRJQUOTA)) 6946 ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf); 6947 #endif 6948 return 0; 6949 } 6950 6951 6952 #ifdef CONFIG_QUOTA 6953 6954 /* 6955 * Helper functions so that transaction is started before we acquire dqio_sem 6956 * to keep correct lock ordering of transaction > dqio_sem 6957 */ 6958 static inline struct inode *dquot_to_inode(struct dquot *dquot) 6959 { 6960 return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type]; 6961 } 6962 6963 static int ext4_write_dquot(struct dquot *dquot) 6964 { 6965 int ret, err; 6966 handle_t *handle; 6967 struct inode *inode; 6968 6969 inode = dquot_to_inode(dquot); 6970 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 6971 EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); 6972 if (IS_ERR(handle)) 6973 return PTR_ERR(handle); 6974 ret = dquot_commit(dquot); 6975 if (ret < 0) 6976 ext4_error_err(dquot->dq_sb, -ret, 6977 "Failed to commit dquot type %d", 6978 dquot->dq_id.type); 6979 err = ext4_journal_stop(handle); 6980 if (!ret) 6981 ret = err; 6982 return ret; 6983 } 6984 6985 static int ext4_acquire_dquot(struct dquot *dquot) 6986 { 6987 int ret, err; 6988 handle_t *handle; 6989 6990 handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA, 6991 EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb)); 6992 if (IS_ERR(handle)) 6993 return PTR_ERR(handle); 6994 ret = dquot_acquire(dquot); 6995 if (ret < 0) 6996 ext4_error_err(dquot->dq_sb, -ret, 6997 "Failed to acquire dquot type %d", 6998 dquot->dq_id.type); 6999 err = ext4_journal_stop(handle); 7000 if (!ret) 7001 ret = err; 7002 return ret; 7003 } 7004 7005 static int ext4_release_dquot(struct dquot *dquot) 7006 { 7007 int ret, err; 7008 handle_t *handle; 7009 bool freeze_protected = false; 7010 7011 /* 7012 * Trying to sb_start_intwrite() in a running transaction 7013 * can result in a deadlock. Further, running transactions 7014 * are already protected from freezing. 7015 */ 7016 if (!ext4_journal_current_handle()) { 7017 sb_start_intwrite(dquot->dq_sb); 7018 freeze_protected = true; 7019 } 7020 7021 handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA, 7022 EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb)); 7023 if (IS_ERR(handle)) { 7024 /* Release dquot anyway to avoid endless cycle in dqput() */ 7025 dquot_release(dquot); 7026 if (freeze_protected) 7027 sb_end_intwrite(dquot->dq_sb); 7028 return PTR_ERR(handle); 7029 } 7030 ret = dquot_release(dquot); 7031 if (ret < 0) 7032 ext4_error_err(dquot->dq_sb, -ret, 7033 "Failed to release dquot type %d", 7034 dquot->dq_id.type); 7035 err = ext4_journal_stop(handle); 7036 if (!ret) 7037 ret = err; 7038 7039 if (freeze_protected) 7040 sb_end_intwrite(dquot->dq_sb); 7041 7042 return ret; 7043 } 7044 7045 static int ext4_mark_dquot_dirty(struct dquot *dquot) 7046 { 7047 struct super_block *sb = dquot->dq_sb; 7048 7049 if (ext4_is_quota_journalled(sb)) { 7050 dquot_mark_dquot_dirty(dquot); 7051 return ext4_write_dquot(dquot); 7052 } else { 7053 return dquot_mark_dquot_dirty(dquot); 7054 } 7055 } 7056 7057 static int ext4_write_info(struct super_block *sb, int type) 7058 { 7059 int ret, err; 7060 handle_t *handle; 7061 7062 /* Data block + inode block */ 7063 handle = ext4_journal_start_sb(sb, EXT4_HT_QUOTA, 2); 7064 if (IS_ERR(handle)) 7065 return PTR_ERR(handle); 7066 ret = dquot_commit_info(sb, type); 7067 err = ext4_journal_stop(handle); 7068 if (!ret) 7069 ret = err; 7070 return ret; 7071 } 7072 7073 static void lockdep_set_quota_inode(struct inode *inode, int subclass) 7074 { 7075 struct ext4_inode_info *ei = EXT4_I(inode); 7076 7077 /* The first argument of lockdep_set_subclass has to be 7078 * *exactly* the same as the argument to init_rwsem() --- in 7079 * this case, in init_once() --- or lockdep gets unhappy 7080 * because the name of the lock is set using the 7081 * stringification of the argument to init_rwsem(). 7082 */ 7083 (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */ 7084 lockdep_set_subclass(&ei->i_data_sem, subclass); 7085 } 7086 7087 /* 7088 * Standard function to be called on quota_on 7089 */ 7090 static int ext4_quota_on(struct super_block *sb, int type, int format_id, 7091 const struct path *path) 7092 { 7093 int err; 7094 7095 if (!test_opt(sb, QUOTA)) 7096 return -EINVAL; 7097 7098 /* Quotafile not on the same filesystem? */ 7099 if (path->dentry->d_sb != sb) 7100 return -EXDEV; 7101 7102 /* Quota already enabled for this file? */ 7103 if (IS_NOQUOTA(d_inode(path->dentry))) 7104 return -EBUSY; 7105 7106 /* Journaling quota? */ 7107 if (EXT4_SB(sb)->s_qf_names[type]) { 7108 /* Quotafile not in fs root? */ 7109 if (path->dentry->d_parent != sb->s_root) 7110 ext4_msg(sb, KERN_WARNING, 7111 "Quota file not on filesystem root. " 7112 "Journaled quota will not work"); 7113 sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY; 7114 } else { 7115 /* 7116 * Clear the flag just in case mount options changed since 7117 * last time. 7118 */ 7119 sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY; 7120 } 7121 7122 lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA); 7123 err = dquot_quota_on(sb, type, format_id, path); 7124 if (!err) { 7125 struct inode *inode = d_inode(path->dentry); 7126 handle_t *handle; 7127 7128 /* 7129 * Set inode flags to prevent userspace from messing with quota 7130 * files. If this fails, we return success anyway since quotas 7131 * are already enabled and this is not a hard failure. 7132 */ 7133 inode_lock(inode); 7134 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1); 7135 if (IS_ERR(handle)) 7136 goto unlock_inode; 7137 EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL; 7138 inode_set_flags(inode, S_NOATIME | S_IMMUTABLE, 7139 S_NOATIME | S_IMMUTABLE); 7140 err = ext4_mark_inode_dirty(handle, inode); 7141 ext4_journal_stop(handle); 7142 unlock_inode: 7143 inode_unlock(inode); 7144 if (err) 7145 dquot_quota_off(sb, type); 7146 } 7147 if (err) 7148 lockdep_set_quota_inode(path->dentry->d_inode, 7149 I_DATA_SEM_NORMAL); 7150 return err; 7151 } 7152 7153 static inline bool ext4_check_quota_inum(int type, unsigned long qf_inum) 7154 { 7155 switch (type) { 7156 case USRQUOTA: 7157 return qf_inum == EXT4_USR_QUOTA_INO; 7158 case GRPQUOTA: 7159 return qf_inum == EXT4_GRP_QUOTA_INO; 7160 case PRJQUOTA: 7161 return qf_inum >= EXT4_GOOD_OLD_FIRST_INO; 7162 default: 7163 BUG(); 7164 } 7165 } 7166 7167 static int ext4_quota_enable(struct super_block *sb, int type, int format_id, 7168 unsigned int flags) 7169 { 7170 int err; 7171 struct inode *qf_inode; 7172 unsigned long qf_inums[EXT4_MAXQUOTAS] = { 7173 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum), 7174 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum), 7175 le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum) 7176 }; 7177 7178 BUG_ON(!ext4_has_feature_quota(sb)); 7179 7180 if (!qf_inums[type]) 7181 return -EPERM; 7182 7183 if (!ext4_check_quota_inum(type, qf_inums[type])) { 7184 ext4_error(sb, "Bad quota inum: %lu, type: %d", 7185 qf_inums[type], type); 7186 return -EUCLEAN; 7187 } 7188 7189 qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL); 7190 if (IS_ERR(qf_inode)) { 7191 ext4_error(sb, "Bad quota inode: %lu, type: %d", 7192 qf_inums[type], type); 7193 return PTR_ERR(qf_inode); 7194 } 7195 7196 /* Don't account quota for quota files to avoid recursion */ 7197 qf_inode->i_flags |= S_NOQUOTA; 7198 lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA); 7199 err = dquot_load_quota_inode(qf_inode, type, format_id, flags); 7200 if (err) 7201 lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL); 7202 iput(qf_inode); 7203 7204 return err; 7205 } 7206 7207 /* Enable usage tracking for all quota types. */ 7208 int ext4_enable_quotas(struct super_block *sb) 7209 { 7210 int type, err = 0; 7211 unsigned long qf_inums[EXT4_MAXQUOTAS] = { 7212 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum), 7213 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum), 7214 le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum) 7215 }; 7216 bool quota_mopt[EXT4_MAXQUOTAS] = { 7217 test_opt(sb, USRQUOTA), 7218 test_opt(sb, GRPQUOTA), 7219 test_opt(sb, PRJQUOTA), 7220 }; 7221 7222 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY; 7223 for (type = 0; type < EXT4_MAXQUOTAS; type++) { 7224 if (qf_inums[type]) { 7225 err = ext4_quota_enable(sb, type, QFMT_VFS_V1, 7226 DQUOT_USAGE_ENABLED | 7227 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0)); 7228 if (err) { 7229 ext4_warning(sb, 7230 "Failed to enable quota tracking " 7231 "(type=%d, err=%d, ino=%lu). " 7232 "Please run e2fsck to fix.", type, 7233 err, qf_inums[type]); 7234 7235 ext4_quotas_off(sb, type); 7236 return err; 7237 } 7238 } 7239 } 7240 return 0; 7241 } 7242 7243 static int ext4_quota_off(struct super_block *sb, int type) 7244 { 7245 struct inode *inode = sb_dqopt(sb)->files[type]; 7246 handle_t *handle; 7247 int err; 7248 7249 /* Force all delayed allocation blocks to be allocated. 7250 * Caller already holds s_umount sem */ 7251 if (test_opt(sb, DELALLOC)) 7252 sync_filesystem(sb); 7253 7254 if (!inode || !igrab(inode)) 7255 goto out; 7256 7257 err = dquot_quota_off(sb, type); 7258 if (err || ext4_has_feature_quota(sb)) 7259 goto out_put; 7260 /* 7261 * When the filesystem was remounted read-only first, we cannot cleanup 7262 * inode flags here. Bad luck but people should be using QUOTA feature 7263 * these days anyway. 7264 */ 7265 if (sb_rdonly(sb)) 7266 goto out_put; 7267 7268 inode_lock(inode); 7269 /* 7270 * Update modification times of quota files when userspace can 7271 * start looking at them. If we fail, we return success anyway since 7272 * this is not a hard failure and quotas are already disabled. 7273 */ 7274 handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1); 7275 if (IS_ERR(handle)) { 7276 err = PTR_ERR(handle); 7277 goto out_unlock; 7278 } 7279 EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL); 7280 inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE); 7281 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 7282 err = ext4_mark_inode_dirty(handle, inode); 7283 ext4_journal_stop(handle); 7284 out_unlock: 7285 inode_unlock(inode); 7286 out_put: 7287 lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL); 7288 iput(inode); 7289 return err; 7290 out: 7291 return dquot_quota_off(sb, type); 7292 } 7293 7294 /* Read data from quotafile - avoid pagecache and such because we cannot afford 7295 * acquiring the locks... As quota files are never truncated and quota code 7296 * itself serializes the operations (and no one else should touch the files) 7297 * we don't have to be afraid of races */ 7298 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, 7299 size_t len, loff_t off) 7300 { 7301 struct inode *inode = sb_dqopt(sb)->files[type]; 7302 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); 7303 int offset = off & (sb->s_blocksize - 1); 7304 int tocopy; 7305 size_t toread; 7306 struct buffer_head *bh; 7307 loff_t i_size = i_size_read(inode); 7308 7309 if (off > i_size) 7310 return 0; 7311 if (off+len > i_size) 7312 len = i_size-off; 7313 toread = len; 7314 while (toread > 0) { 7315 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread); 7316 bh = ext4_bread(NULL, inode, blk, 0); 7317 if (IS_ERR(bh)) 7318 return PTR_ERR(bh); 7319 if (!bh) /* A hole? */ 7320 memset(data, 0, tocopy); 7321 else 7322 memcpy(data, bh->b_data+offset, tocopy); 7323 brelse(bh); 7324 offset = 0; 7325 toread -= tocopy; 7326 data += tocopy; 7327 blk++; 7328 } 7329 return len; 7330 } 7331 7332 /* Write to quotafile (we know the transaction is already started and has 7333 * enough credits) */ 7334 static ssize_t ext4_quota_write(struct super_block *sb, int type, 7335 const char *data, size_t len, loff_t off) 7336 { 7337 struct inode *inode = sb_dqopt(sb)->files[type]; 7338 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); 7339 int err = 0, err2 = 0, offset = off & (sb->s_blocksize - 1); 7340 int retries = 0; 7341 struct buffer_head *bh; 7342 handle_t *handle = journal_current_handle(); 7343 7344 if (!handle) { 7345 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" 7346 " cancelled because transaction is not started", 7347 (unsigned long long)off, (unsigned long long)len); 7348 return -EIO; 7349 } 7350 /* 7351 * Since we account only one data block in transaction credits, 7352 * then it is impossible to cross a block boundary. 7353 */ 7354 if (sb->s_blocksize - offset < len) { 7355 ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" 7356 " cancelled because not block aligned", 7357 (unsigned long long)off, (unsigned long long)len); 7358 return -EIO; 7359 } 7360 7361 do { 7362 bh = ext4_bread(handle, inode, blk, 7363 EXT4_GET_BLOCKS_CREATE | 7364 EXT4_GET_BLOCKS_METADATA_NOFAIL); 7365 } while (PTR_ERR(bh) == -ENOSPC && 7366 ext4_should_retry_alloc(inode->i_sb, &retries)); 7367 if (IS_ERR(bh)) 7368 return PTR_ERR(bh); 7369 if (!bh) 7370 goto out; 7371 BUFFER_TRACE(bh, "get write access"); 7372 err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE); 7373 if (err) { 7374 brelse(bh); 7375 return err; 7376 } 7377 lock_buffer(bh); 7378 memcpy(bh->b_data+offset, data, len); 7379 flush_dcache_folio(bh->b_folio); 7380 unlock_buffer(bh); 7381 err = ext4_handle_dirty_metadata(handle, NULL, bh); 7382 brelse(bh); 7383 out: 7384 if (inode->i_size < off + len) { 7385 i_size_write(inode, off + len); 7386 EXT4_I(inode)->i_disksize = inode->i_size; 7387 err2 = ext4_mark_inode_dirty(handle, inode); 7388 if (unlikely(err2 && !err)) 7389 err = err2; 7390 } 7391 return err ? err : len; 7392 } 7393 #endif 7394 7395 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2) 7396 static inline void register_as_ext2(void) 7397 { 7398 int err = register_filesystem(&ext2_fs_type); 7399 if (err) 7400 printk(KERN_WARNING 7401 "EXT4-fs: Unable to register as ext2 (%d)\n", err); 7402 } 7403 7404 static inline void unregister_as_ext2(void) 7405 { 7406 unregister_filesystem(&ext2_fs_type); 7407 } 7408 7409 static inline int ext2_feature_set_ok(struct super_block *sb) 7410 { 7411 if (ext4_has_unknown_ext2_incompat_features(sb)) 7412 return 0; 7413 if (sb_rdonly(sb)) 7414 return 1; 7415 if (ext4_has_unknown_ext2_ro_compat_features(sb)) 7416 return 0; 7417 return 1; 7418 } 7419 #else 7420 static inline void register_as_ext2(void) { } 7421 static inline void unregister_as_ext2(void) { } 7422 static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; } 7423 #endif 7424 7425 static inline void register_as_ext3(void) 7426 { 7427 int err = register_filesystem(&ext3_fs_type); 7428 if (err) 7429 printk(KERN_WARNING 7430 "EXT4-fs: Unable to register as ext3 (%d)\n", err); 7431 } 7432 7433 static inline void unregister_as_ext3(void) 7434 { 7435 unregister_filesystem(&ext3_fs_type); 7436 } 7437 7438 static inline int ext3_feature_set_ok(struct super_block *sb) 7439 { 7440 if (ext4_has_unknown_ext3_incompat_features(sb)) 7441 return 0; 7442 if (!ext4_has_feature_journal(sb)) 7443 return 0; 7444 if (sb_rdonly(sb)) 7445 return 1; 7446 if (ext4_has_unknown_ext3_ro_compat_features(sb)) 7447 return 0; 7448 return 1; 7449 } 7450 7451 static void ext4_kill_sb(struct super_block *sb) 7452 { 7453 struct ext4_sb_info *sbi = EXT4_SB(sb); 7454 struct file *bdev_file = sbi ? sbi->s_journal_bdev_file : NULL; 7455 7456 kill_block_super(sb); 7457 7458 if (bdev_file) 7459 bdev_fput(bdev_file); 7460 } 7461 7462 static struct file_system_type ext4_fs_type = { 7463 .owner = THIS_MODULE, 7464 .name = "ext4", 7465 .init_fs_context = ext4_init_fs_context, 7466 .parameters = ext4_param_specs, 7467 .kill_sb = ext4_kill_sb, 7468 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME | 7469 FS_LBS, 7470 }; 7471 MODULE_ALIAS_FS("ext4"); 7472 7473 static int __init ext4_init_fs(void) 7474 { 7475 int err; 7476 7477 ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64); 7478 ext4_li_info = NULL; 7479 7480 /* Build-time check for flags consistency */ 7481 ext4_check_flag_values(); 7482 7483 err = ext4_init_es(); 7484 if (err) 7485 return err; 7486 7487 err = ext4_init_pending(); 7488 if (err) 7489 goto out7; 7490 7491 err = ext4_init_post_read_processing(); 7492 if (err) 7493 goto out6; 7494 7495 err = ext4_init_pageio(); 7496 if (err) 7497 goto out5; 7498 7499 err = ext4_init_system_zone(); 7500 if (err) 7501 goto out4; 7502 7503 err = ext4_init_sysfs(); 7504 if (err) 7505 goto out3; 7506 7507 err = ext4_init_mballoc(); 7508 if (err) 7509 goto out2; 7510 err = init_inodecache(); 7511 if (err) 7512 goto out1; 7513 7514 err = ext4_fc_init_dentry_cache(); 7515 if (err) 7516 goto out05; 7517 7518 register_as_ext3(); 7519 register_as_ext2(); 7520 err = register_filesystem(&ext4_fs_type); 7521 if (err) 7522 goto out; 7523 7524 return 0; 7525 out: 7526 unregister_as_ext2(); 7527 unregister_as_ext3(); 7528 ext4_fc_destroy_dentry_cache(); 7529 out05: 7530 destroy_inodecache(); 7531 out1: 7532 ext4_exit_mballoc(); 7533 out2: 7534 ext4_exit_sysfs(); 7535 out3: 7536 ext4_exit_system_zone(); 7537 out4: 7538 ext4_exit_pageio(); 7539 out5: 7540 ext4_exit_post_read_processing(); 7541 out6: 7542 ext4_exit_pending(); 7543 out7: 7544 ext4_exit_es(); 7545 7546 return err; 7547 } 7548 7549 static void __exit ext4_exit_fs(void) 7550 { 7551 ext4_destroy_lazyinit_thread(); 7552 unregister_as_ext2(); 7553 unregister_as_ext3(); 7554 unregister_filesystem(&ext4_fs_type); 7555 ext4_fc_destroy_dentry_cache(); 7556 destroy_inodecache(); 7557 ext4_exit_mballoc(); 7558 ext4_exit_sysfs(); 7559 ext4_exit_system_zone(); 7560 ext4_exit_pageio(); 7561 ext4_exit_post_read_processing(); 7562 ext4_exit_es(); 7563 ext4_exit_pending(); 7564 } 7565 7566 MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); 7567 MODULE_DESCRIPTION("Fourth Extended Filesystem"); 7568 MODULE_LICENSE("GPL"); 7569 module_init(ext4_init_fs) 7570 module_exit(ext4_exit_fs) 7571