1f5166768STheodore Ts'o // SPDX-License-Identifier: GPL-2.0 2ac27a0ecSDave Kleikamp /* 3617ba13bSMingming Cao * linux/fs/ext4/super.c 4ac27a0ecSDave Kleikamp * 5ac27a0ecSDave Kleikamp * Copyright (C) 1992, 1993, 1994, 1995 6ac27a0ecSDave Kleikamp * Remy Card (card@masi.ibp.fr) 7ac27a0ecSDave Kleikamp * Laboratoire MASI - Institut Blaise Pascal 8ac27a0ecSDave Kleikamp * Universite Pierre et Marie Curie (Paris VI) 9ac27a0ecSDave Kleikamp * 10ac27a0ecSDave Kleikamp * from 11ac27a0ecSDave Kleikamp * 12ac27a0ecSDave Kleikamp * linux/fs/minix/inode.c 13ac27a0ecSDave Kleikamp * 14ac27a0ecSDave Kleikamp * Copyright (C) 1991, 1992 Linus Torvalds 15ac27a0ecSDave Kleikamp * 16ac27a0ecSDave Kleikamp * Big-endian to little-endian byte-swapping/bitmaps by 17ac27a0ecSDave Kleikamp * David S. Miller (davem@caip.rutgers.edu), 1995 18ac27a0ecSDave Kleikamp */ 19ac27a0ecSDave Kleikamp 20ac27a0ecSDave Kleikamp #include <linux/module.h> 21ac27a0ecSDave Kleikamp #include <linux/string.h> 22ac27a0ecSDave Kleikamp #include <linux/fs.h> 23ac27a0ecSDave Kleikamp #include <linux/time.h> 24c5ca7c76STheodore Ts'o #include <linux/vmalloc.h> 25ac27a0ecSDave Kleikamp #include <linux/slab.h> 26ac27a0ecSDave Kleikamp #include <linux/init.h> 27ac27a0ecSDave Kleikamp #include <linux/blkdev.h> 2866114cadSTejun Heo #include <linux/backing-dev.h> 29ac27a0ecSDave Kleikamp #include <linux/parser.h> 30ac27a0ecSDave Kleikamp #include <linux/buffer_head.h> 31a5694255SChristoph Hellwig #include <linux/exportfs.h> 32ac27a0ecSDave Kleikamp #include <linux/vfs.h> 33ac27a0ecSDave Kleikamp #include <linux/random.h> 34ac27a0ecSDave Kleikamp #include <linux/mount.h> 35ac27a0ecSDave Kleikamp #include <linux/namei.h> 36ac27a0ecSDave Kleikamp #include <linux/quotaops.h> 37ac27a0ecSDave Kleikamp #include <linux/seq_file.h> 383197ebdbSTheodore Ts'o #include <linux/ctype.h> 391330593eSVignesh Babu #include <linux/log2.h> 40717d50e4SAndreas Dilger #include <linux/crc16.h> 41ef510424SDan Williams #include <linux/dax.h> 427c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 43ee73f9a5SJeff Layton #include <linux/iversion.h> 44c83ad55eSGabriel Krisman Bertazi #include <linux/unicode.h> 45c6a564ffSChristoph Hellwig #include <linux/part_stat.h> 46bfff6873SLukas Czerner #include <linux/kthread.h> 47bfff6873SLukas Czerner #include <linux/freezer.h> 489a089b21SGabriel Krisman Bertazi #include <linux/fsnotify.h> 49e5a185c2SLukas Czerner #include <linux/fs_context.h> 50e5a185c2SLukas Czerner #include <linux/fs_parser.h> 51bfff6873SLukas Czerner 523dcf5451SChristoph Hellwig #include "ext4.h" 534a092d73STheodore Ts'o #include "ext4_extents.h" /* Needed for trace points definition */ 543dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 55ac27a0ecSDave Kleikamp #include "xattr.h" 56ac27a0ecSDave Kleikamp #include "acl.h" 573661d286STheodore Ts'o #include "mballoc.h" 580c9ec4beSDarrick J. Wong #include "fsmap.h" 59ac27a0ecSDave Kleikamp 609bffad1eSTheodore Ts'o #define CREATE_TRACE_POINTS 619bffad1eSTheodore Ts'o #include <trace/events/ext4.h> 629bffad1eSTheodore Ts'o 630b75a840SLukas Czerner static struct ext4_lazy_init *ext4_li_info; 6459ebc7fdSZheng Yongjun static DEFINE_MUTEX(ext4_li_mtx); 65e294a537STheodore Ts'o static struct ratelimit_state ext4_mount_msg_ratelimit; 669f6200bbSTheodore Ts'o 67617ba13bSMingming Cao static int ext4_load_journal(struct super_block *, struct ext4_super_block *, 68ac27a0ecSDave Kleikamp unsigned long journal_devnum); 692adf6da8STheodore Ts'o static int ext4_show_options(struct seq_file *seq, struct dentry *root); 702d01ddc8SJan Kara static void ext4_update_super(struct super_block *sb); 714392fbc4SJan Kara static int ext4_commit_super(struct super_block *sb); 7211215630SJan Kara static int ext4_mark_recovery_complete(struct super_block *sb, 73617ba13bSMingming Cao struct ext4_super_block *es); 7411215630SJan Kara static int ext4_clear_journal_err(struct super_block *sb, 75617ba13bSMingming Cao struct ext4_super_block *es); 76617ba13bSMingming Cao static int ext4_sync_fs(struct super_block *sb, int wait); 77617ba13bSMingming Cao static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); 78c4be0c1dSTakashi Sato static int ext4_unfreeze(struct super_block *sb); 79c4be0c1dSTakashi Sato static int ext4_freeze(struct super_block *sb); 802035e776STheodore Ts'o static inline int ext2_feature_set_ok(struct super_block *sb); 812035e776STheodore Ts'o static inline int ext3_feature_set_ok(struct super_block *sb); 82bfff6873SLukas Czerner static void ext4_destroy_lazyinit_thread(void); 83bfff6873SLukas Czerner static void ext4_unregister_li_request(struct super_block *sb); 848f1f7453SEric Sandeen static void ext4_clear_request_list(void); 85c6cb7e77SEric Whitney static struct inode *ext4_get_journal_inode(struct super_block *sb, 86c6cb7e77SEric Whitney unsigned int journal_inum); 87da812f61SLukas Czerner static int ext4_validate_options(struct fs_context *fc); 88b6bd2435SLukas Czerner static int ext4_check_opt_consistency(struct fs_context *fc, 89e6e268cbSLukas Czerner struct super_block *sb); 9085456054SEric Biggers static void ext4_apply_options(struct fs_context *fc, struct super_block *sb); 9102f960f8SLukas Czerner static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param); 92cebe85d5SLukas Czerner static int ext4_get_tree(struct fs_context *fc); 93cebe85d5SLukas Czerner static int ext4_reconfigure(struct fs_context *fc); 94cebe85d5SLukas Czerner static void ext4_fc_free(struct fs_context *fc); 95cebe85d5SLukas Czerner static int ext4_init_fs_context(struct fs_context *fc); 961489dffdSChristoph Hellwig static void ext4_kill_sb(struct super_block *sb); 97cebe85d5SLukas Czerner static const struct fs_parameter_spec ext4_param_specs[]; 98ac27a0ecSDave Kleikamp 99e74031fdSJan Kara /* 100e74031fdSJan Kara * Lock ordering 101e74031fdSJan Kara * 102e74031fdSJan Kara * page fault path: 103d4f5258eSJan Kara * mmap_lock -> sb_start_pagefault -> invalidate_lock (r) -> transaction start 104d4f5258eSJan Kara * -> page lock -> i_data_sem (rw) 105e74031fdSJan Kara * 106e74031fdSJan Kara * buffered write path: 107c1e8d7c6SMichel Lespinasse * sb_start_write -> i_mutex -> mmap_lock 108e74031fdSJan Kara * sb_start_write -> i_mutex -> transaction start -> page lock -> 109e74031fdSJan Kara * i_data_sem (rw) 110e74031fdSJan Kara * 111e74031fdSJan Kara * truncate: 112d4f5258eSJan Kara * sb_start_write -> i_mutex -> invalidate_lock (w) -> i_mmap_rwsem (w) -> 113d4f5258eSJan Kara * page lock 114d4f5258eSJan Kara * sb_start_write -> i_mutex -> invalidate_lock (w) -> transaction start -> 1151d39834fSNikolay Borisov * i_data_sem (rw) 116e74031fdSJan Kara * 117e74031fdSJan Kara * direct IO: 118c1e8d7c6SMichel Lespinasse * sb_start_write -> i_mutex -> mmap_lock 1191d39834fSNikolay Borisov * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw) 120e74031fdSJan Kara * 121e74031fdSJan Kara * writepages: 122e74031fdSJan Kara * transaction start -> page lock(s) -> i_data_sem (rw) 123e74031fdSJan Kara */ 124e74031fdSJan Kara 12502f960f8SLukas Czerner static const struct fs_context_operations ext4_context_ops = { 12602f960f8SLukas Czerner .parse_param = ext4_parse_param, 127cebe85d5SLukas Czerner .get_tree = ext4_get_tree, 128cebe85d5SLukas Czerner .reconfigure = ext4_reconfigure, 129cebe85d5SLukas Czerner .free = ext4_fc_free, 13002f960f8SLukas Czerner }; 13102f960f8SLukas Czerner 13202f960f8SLukas Czerner 133c290ea01SJan Kara #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2) 1342035e776STheodore Ts'o static struct file_system_type ext2_fs_type = { 1352035e776STheodore Ts'o .owner = THIS_MODULE, 1362035e776STheodore Ts'o .name = "ext2", 137cebe85d5SLukas Czerner .init_fs_context = ext4_init_fs_context, 138cebe85d5SLukas Czerner .parameters = ext4_param_specs, 1391489dffdSChristoph Hellwig .kill_sb = ext4_kill_sb, 1402035e776STheodore Ts'o .fs_flags = FS_REQUIRES_DEV, 1412035e776STheodore Ts'o }; 1427f78e035SEric W. Biederman MODULE_ALIAS_FS("ext2"); 143fa7614ddSEric W. Biederman MODULE_ALIAS("ext2"); 1444b41828bSChristoph Hellwig #define IS_EXT2_SB(sb) ((sb)->s_type == &ext2_fs_type) 1452035e776STheodore Ts'o #else 1462035e776STheodore Ts'o #define IS_EXT2_SB(sb) (0) 1472035e776STheodore Ts'o #endif 1482035e776STheodore Ts'o 1492035e776STheodore Ts'o 150ba69f9abSJan Kara static struct file_system_type ext3_fs_type = { 151ba69f9abSJan Kara .owner = THIS_MODULE, 152ba69f9abSJan Kara .name = "ext3", 153cebe85d5SLukas Czerner .init_fs_context = ext4_init_fs_context, 154cebe85d5SLukas Czerner .parameters = ext4_param_specs, 1551489dffdSChristoph Hellwig .kill_sb = ext4_kill_sb, 156ba69f9abSJan Kara .fs_flags = FS_REQUIRES_DEV, 157ba69f9abSJan Kara }; 1587f78e035SEric W. Biederman MODULE_ALIAS_FS("ext3"); 159fa7614ddSEric W. Biederman MODULE_ALIAS("ext3"); 1604b41828bSChristoph Hellwig #define IS_EXT3_SB(sb) ((sb)->s_type == &ext3_fs_type) 161bd81d8eeSLaurent Vivier 162fa491b14Szhangyi (F) 16367c0f556SBart Van Assche static inline void __ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags, 164fa491b14Szhangyi (F) bh_end_io_t *end_io) 165fa491b14Szhangyi (F) { 166fb265c9cSTheodore Ts'o /* 167fa491b14Szhangyi (F) * buffer's verified bit is no longer valid after reading from 168fa491b14Szhangyi (F) * disk again due to write out error, clear it to make sure we 169fa491b14Szhangyi (F) * recheck the buffer contents. 170fa491b14Szhangyi (F) */ 171fa491b14Szhangyi (F) clear_buffer_verified(bh); 172fa491b14Szhangyi (F) 173fa491b14Szhangyi (F) bh->b_end_io = end_io ? end_io : end_buffer_read_sync; 174fa491b14Szhangyi (F) get_bh(bh); 1751420c4a5SBart Van Assche submit_bh(REQ_OP_READ | op_flags, bh); 176fa491b14Szhangyi (F) } 177fa491b14Szhangyi (F) 17867c0f556SBart Van Assche void ext4_read_bh_nowait(struct buffer_head *bh, blk_opf_t op_flags, 179fa491b14Szhangyi (F) bh_end_io_t *end_io) 180fa491b14Szhangyi (F) { 181fa491b14Szhangyi (F) BUG_ON(!buffer_locked(bh)); 182fa491b14Szhangyi (F) 183fa491b14Szhangyi (F) if (ext4_buffer_uptodate(bh)) { 184fa491b14Szhangyi (F) unlock_buffer(bh); 185fa491b14Szhangyi (F) return; 186fa491b14Szhangyi (F) } 187fa491b14Szhangyi (F) __ext4_read_bh(bh, op_flags, end_io); 188fa491b14Szhangyi (F) } 189fa491b14Szhangyi (F) 19067c0f556SBart Van Assche int ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags, bh_end_io_t *end_io) 191fa491b14Szhangyi (F) { 192fa491b14Szhangyi (F) BUG_ON(!buffer_locked(bh)); 193fa491b14Szhangyi (F) 194fa491b14Szhangyi (F) if (ext4_buffer_uptodate(bh)) { 195fa491b14Szhangyi (F) unlock_buffer(bh); 196fa491b14Szhangyi (F) return 0; 197fa491b14Szhangyi (F) } 198fa491b14Szhangyi (F) 199fa491b14Szhangyi (F) __ext4_read_bh(bh, op_flags, end_io); 200fa491b14Szhangyi (F) 201fa491b14Szhangyi (F) wait_on_buffer(bh); 202fa491b14Szhangyi (F) if (buffer_uptodate(bh)) 203fa491b14Szhangyi (F) return 0; 204fa491b14Szhangyi (F) return -EIO; 205fa491b14Szhangyi (F) } 206fa491b14Szhangyi (F) 20767c0f556SBart Van Assche int ext4_read_bh_lock(struct buffer_head *bh, blk_opf_t op_flags, bool wait) 208fa491b14Szhangyi (F) { 2090b73284cSZhang Yi lock_buffer(bh); 2100b73284cSZhang Yi if (!wait) { 211fa491b14Szhangyi (F) ext4_read_bh_nowait(bh, op_flags, NULL); 212fa491b14Szhangyi (F) return 0; 213fa491b14Szhangyi (F) } 2140b73284cSZhang Yi return ext4_read_bh(bh, op_flags, NULL); 215fa491b14Szhangyi (F) } 216fa491b14Szhangyi (F) 217fb265c9cSTheodore Ts'o /* 2188394a6abSzhangyi (F) * This works like __bread_gfp() except it uses ERR_PTR for error 219fb265c9cSTheodore Ts'o * returns. Currently with sb_bread it's impossible to distinguish 220fb265c9cSTheodore Ts'o * between ENOMEM and EIO situations (since both result in a NULL 221fb265c9cSTheodore Ts'o * return. 222fb265c9cSTheodore Ts'o */ 2238394a6abSzhangyi (F) static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb, 22467c0f556SBart Van Assche sector_t block, 22567c0f556SBart Van Assche blk_opf_t op_flags, gfp_t gfp) 226fb265c9cSTheodore Ts'o { 2272d069c08Szhangyi (F) struct buffer_head *bh; 2282d069c08Szhangyi (F) int ret; 229fb265c9cSTheodore Ts'o 2308394a6abSzhangyi (F) bh = sb_getblk_gfp(sb, block, gfp); 231fb265c9cSTheodore Ts'o if (bh == NULL) 232fb265c9cSTheodore Ts'o return ERR_PTR(-ENOMEM); 233cf2834a5STheodore Ts'o if (ext4_buffer_uptodate(bh)) 234fb265c9cSTheodore Ts'o return bh; 2352d069c08Szhangyi (F) 2362d069c08Szhangyi (F) ret = ext4_read_bh_lock(bh, REQ_META | op_flags, true); 2372d069c08Szhangyi (F) if (ret) { 238fb265c9cSTheodore Ts'o put_bh(bh); 2392d069c08Szhangyi (F) return ERR_PTR(ret); 2402d069c08Szhangyi (F) } 2412d069c08Szhangyi (F) return bh; 242fb265c9cSTheodore Ts'o } 243fb265c9cSTheodore Ts'o 2448394a6abSzhangyi (F) struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block, 24567c0f556SBart Van Assche blk_opf_t op_flags) 2468394a6abSzhangyi (F) { 247*8a83ac54SMatthew Wilcox (Oracle) gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_inode->i_mapping, 248*8a83ac54SMatthew Wilcox (Oracle) ~__GFP_FS) | __GFP_MOVABLE; 249*8a83ac54SMatthew Wilcox (Oracle) 250*8a83ac54SMatthew Wilcox (Oracle) return __ext4_sb_bread_gfp(sb, block, op_flags, gfp); 2518394a6abSzhangyi (F) } 2528394a6abSzhangyi (F) 2538394a6abSzhangyi (F) struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb, 2548394a6abSzhangyi (F) sector_t block) 2558394a6abSzhangyi (F) { 256*8a83ac54SMatthew Wilcox (Oracle) gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_inode->i_mapping, 257*8a83ac54SMatthew Wilcox (Oracle) ~__GFP_FS); 258*8a83ac54SMatthew Wilcox (Oracle) 259*8a83ac54SMatthew Wilcox (Oracle) return __ext4_sb_bread_gfp(sb, block, 0, gfp); 2608394a6abSzhangyi (F) } 2618394a6abSzhangyi (F) 2625df1d412Szhangyi (F) void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block) 2635df1d412Szhangyi (F) { 264e509ad4dSMatthew Wilcox (Oracle) struct buffer_head *bh = bdev_getblk(sb->s_bdev, block, 265e509ad4dSMatthew Wilcox (Oracle) sb->s_blocksize, GFP_NOWAIT); 2665df1d412Szhangyi (F) 2675df1d412Szhangyi (F) if (likely(bh)) { 2680b73284cSZhang Yi if (trylock_buffer(bh)) 2690b73284cSZhang Yi ext4_read_bh_nowait(bh, REQ_RAHEAD, NULL); 2705df1d412Szhangyi (F) brelse(bh); 2715df1d412Szhangyi (F) } 272c197855eSStephen Hemminger } 273a9c47317SDarrick J. Wong 274a9c47317SDarrick J. Wong static int ext4_verify_csum_type(struct super_block *sb, 2759aa5d32bSDmitry Monakhov struct ext4_super_block *es) 276a9c47317SDarrick J. Wong { 277a9c47317SDarrick J. Wong if (!ext4_has_feature_metadata_csum(sb)) 278a9c47317SDarrick J. Wong return 1; 279a9c47317SDarrick J. Wong 280a9c47317SDarrick J. Wong return es->s_checksum_type == EXT4_CRC32C_CHKSUM; 28106db49e6STheodore Ts'o } 282a9c47317SDarrick J. Wong 283bbc605cdSLukas Czerner __le32 ext4_superblock_csum(struct super_block *sb, 284a9c47317SDarrick J. Wong struct ext4_super_block *es) 285a9c47317SDarrick J. Wong { 286a9c47317SDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(sb); 287a9c47317SDarrick J. Wong int offset = offsetof(struct ext4_super_block, s_checksum); 288a9c47317SDarrick J. Wong __u32 csum; 289a9c47317SDarrick J. Wong 290a9c47317SDarrick J. Wong csum = ext4_chksum(sbi, ~0, (char *)es, offset); 291a9c47317SDarrick J. Wong 292a9c47317SDarrick J. Wong return cpu_to_le32(csum); 293a9c47317SDarrick J. Wong } 294a9c47317SDarrick J. Wong 295a9c47317SDarrick J. Wong static int ext4_superblock_csum_verify(struct super_block *sb, 296a9c47317SDarrick J. Wong struct ext4_super_block *es) 297a9c47317SDarrick J. Wong { 298a9c47317SDarrick J. Wong if (!ext4_has_metadata_csum(sb)) 299a9c47317SDarrick J. Wong return 1; 300a9c47317SDarrick J. Wong 301a9c47317SDarrick J. Wong return es->s_checksum == ext4_superblock_csum(sb, es); 302a9c47317SDarrick J. Wong } 303a9c47317SDarrick J. Wong 304a9c47317SDarrick J. Wong void ext4_superblock_csum_set(struct super_block *sb) 305a9c47317SDarrick J. Wong { 30606db49e6STheodore Ts'o struct ext4_super_block *es = EXT4_SB(sb)->s_es; 30706db49e6STheodore Ts'o 3089aa5d32bSDmitry Monakhov if (!ext4_has_metadata_csum(sb)) 309a9c47317SDarrick J. Wong return; 310a9c47317SDarrick J. Wong 311a9c47317SDarrick J. Wong es->s_checksum = ext4_superblock_csum(sb, es); 312a9c47317SDarrick J. Wong } 313a9c47317SDarrick J. Wong 3148fadc143SAlexandre Ratchov ext4_fsblk_t ext4_block_bitmap(struct super_block *sb, 3158fadc143SAlexandre Ratchov struct ext4_group_desc *bg) 316bd81d8eeSLaurent Vivier { 3173a14589cSAneesh Kumar K.V return le32_to_cpu(bg->bg_block_bitmap_lo) | 3188fadc143SAlexandre Ratchov (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 3198fadc143SAlexandre Ratchov (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0); 320bd81d8eeSLaurent Vivier } 321bd81d8eeSLaurent Vivier 3228fadc143SAlexandre Ratchov ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb, 3238fadc143SAlexandre Ratchov struct ext4_group_desc *bg) 324bd81d8eeSLaurent Vivier { 3255272f837SAneesh Kumar K.V return le32_to_cpu(bg->bg_inode_bitmap_lo) | 3268fadc143SAlexandre Ratchov (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 3278fadc143SAlexandre Ratchov (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0); 328bd81d8eeSLaurent Vivier } 329bd81d8eeSLaurent Vivier 3308fadc143SAlexandre Ratchov ext4_fsblk_t ext4_inode_table(struct super_block *sb, 3318fadc143SAlexandre Ratchov struct ext4_group_desc *bg) 332bd81d8eeSLaurent Vivier { 3335272f837SAneesh Kumar K.V return le32_to_cpu(bg->bg_inode_table_lo) | 3348fadc143SAlexandre Ratchov (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 3358fadc143SAlexandre Ratchov (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0); 336bd81d8eeSLaurent Vivier } 337bd81d8eeSLaurent Vivier 338021b65bbSTheodore Ts'o __u32 ext4_free_group_clusters(struct super_block *sb, 339560671a0SAneesh Kumar K.V struct ext4_group_desc *bg) 340560671a0SAneesh Kumar K.V { 341560671a0SAneesh Kumar K.V return le16_to_cpu(bg->bg_free_blocks_count_lo) | 342560671a0SAneesh Kumar K.V (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 343560671a0SAneesh Kumar K.V (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0); 344560671a0SAneesh Kumar K.V } 345560671a0SAneesh Kumar K.V 346560671a0SAneesh Kumar K.V __u32 ext4_free_inodes_count(struct super_block *sb, 347560671a0SAneesh Kumar K.V struct ext4_group_desc *bg) 348560671a0SAneesh Kumar K.V { 349560671a0SAneesh Kumar K.V return le16_to_cpu(bg->bg_free_inodes_count_lo) | 350560671a0SAneesh Kumar K.V (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 351560671a0SAneesh Kumar K.V (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0); 352560671a0SAneesh Kumar K.V } 353560671a0SAneesh Kumar K.V 354560671a0SAneesh Kumar K.V __u32 ext4_used_dirs_count(struct super_block *sb, 355560671a0SAneesh Kumar K.V struct ext4_group_desc *bg) 356560671a0SAneesh Kumar K.V { 357560671a0SAneesh Kumar K.V return le16_to_cpu(bg->bg_used_dirs_count_lo) | 358560671a0SAneesh Kumar K.V (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 359560671a0SAneesh Kumar K.V (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0); 360560671a0SAneesh Kumar K.V } 361560671a0SAneesh Kumar K.V 362560671a0SAneesh Kumar K.V __u32 ext4_itable_unused_count(struct super_block *sb, 363560671a0SAneesh Kumar K.V struct ext4_group_desc *bg) 364560671a0SAneesh Kumar K.V { 365560671a0SAneesh Kumar K.V return le16_to_cpu(bg->bg_itable_unused_lo) | 366560671a0SAneesh Kumar K.V (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 367560671a0SAneesh Kumar K.V (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0); 368560671a0SAneesh Kumar K.V } 369560671a0SAneesh Kumar K.V 3708fadc143SAlexandre Ratchov void ext4_block_bitmap_set(struct super_block *sb, 3718fadc143SAlexandre Ratchov struct ext4_group_desc *bg, ext4_fsblk_t blk) 372bd81d8eeSLaurent Vivier { 3733a14589cSAneesh Kumar K.V bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk); 3748fadc143SAlexandre Ratchov if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 3758fadc143SAlexandre Ratchov bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32); 376bd81d8eeSLaurent Vivier } 377bd81d8eeSLaurent Vivier 3788fadc143SAlexandre Ratchov void ext4_inode_bitmap_set(struct super_block *sb, 3798fadc143SAlexandre Ratchov struct ext4_group_desc *bg, ext4_fsblk_t blk) 380bd81d8eeSLaurent Vivier { 3815272f837SAneesh Kumar K.V bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk); 3828fadc143SAlexandre Ratchov if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 3838fadc143SAlexandre Ratchov bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32); 384bd81d8eeSLaurent Vivier } 385bd81d8eeSLaurent Vivier 3868fadc143SAlexandre Ratchov void ext4_inode_table_set(struct super_block *sb, 3878fadc143SAlexandre Ratchov struct ext4_group_desc *bg, ext4_fsblk_t blk) 388bd81d8eeSLaurent Vivier { 3895272f837SAneesh Kumar K.V bg->bg_inode_table_lo = cpu_to_le32((u32)blk); 3908fadc143SAlexandre Ratchov if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 3918fadc143SAlexandre Ratchov bg->bg_inode_table_hi = cpu_to_le32(blk >> 32); 392bd81d8eeSLaurent Vivier } 393bd81d8eeSLaurent Vivier 394021b65bbSTheodore Ts'o void ext4_free_group_clusters_set(struct super_block *sb, 395560671a0SAneesh Kumar K.V struct ext4_group_desc *bg, __u32 count) 396560671a0SAneesh Kumar K.V { 397560671a0SAneesh Kumar K.V bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count); 398560671a0SAneesh Kumar K.V if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 399560671a0SAneesh Kumar K.V bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16); 400560671a0SAneesh Kumar K.V } 401560671a0SAneesh Kumar K.V 402560671a0SAneesh Kumar K.V void ext4_free_inodes_set(struct super_block *sb, 403560671a0SAneesh Kumar K.V struct ext4_group_desc *bg, __u32 count) 404560671a0SAneesh Kumar K.V { 405560671a0SAneesh Kumar K.V bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count); 406560671a0SAneesh Kumar K.V if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 407560671a0SAneesh Kumar K.V bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16); 408560671a0SAneesh Kumar K.V } 409560671a0SAneesh Kumar K.V 410560671a0SAneesh Kumar K.V void ext4_used_dirs_set(struct super_block *sb, 411560671a0SAneesh Kumar K.V struct ext4_group_desc *bg, __u32 count) 412560671a0SAneesh Kumar K.V { 413560671a0SAneesh Kumar K.V bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count); 414560671a0SAneesh Kumar K.V if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 415560671a0SAneesh Kumar K.V bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16); 416560671a0SAneesh Kumar K.V } 417560671a0SAneesh Kumar K.V 418560671a0SAneesh Kumar K.V void ext4_itable_unused_set(struct super_block *sb, 419560671a0SAneesh Kumar K.V struct ext4_group_desc *bg, __u32 count) 420560671a0SAneesh Kumar K.V { 421560671a0SAneesh Kumar K.V bg->bg_itable_unused_lo = cpu_to_le16((__u16)count); 422560671a0SAneesh Kumar K.V if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 423560671a0SAneesh Kumar K.V bg->bg_itable_unused_hi = cpu_to_le16(count >> 16); 424560671a0SAneesh Kumar K.V } 425560671a0SAneesh Kumar K.V 426c92dc856SJan Kara static void __ext4_update_tstamp(__le32 *lo, __u8 *hi, time64_t now) 4276a0678a7SArnd Bergmann { 4286a0678a7SArnd Bergmann now = clamp_val(now, 0, (1ull << 40) - 1); 4296a0678a7SArnd Bergmann 4306a0678a7SArnd Bergmann *lo = cpu_to_le32(lower_32_bits(now)); 4316a0678a7SArnd Bergmann *hi = upper_32_bits(now); 4326a0678a7SArnd Bergmann } 4336a0678a7SArnd Bergmann 4346a0678a7SArnd Bergmann static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi) 4356a0678a7SArnd Bergmann { 4366a0678a7SArnd Bergmann return ((time64_t)(*hi) << 32) + le32_to_cpu(*lo); 4376a0678a7SArnd Bergmann } 4386a0678a7SArnd Bergmann #define ext4_update_tstamp(es, tstamp) \ 439c92dc856SJan Kara __ext4_update_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi, \ 440c92dc856SJan Kara ktime_get_real_seconds()) 4416a0678a7SArnd Bergmann #define ext4_get_tstamp(es, tstamp) \ 4426a0678a7SArnd Bergmann __ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi) 443d3d1faf6SCurt Wohlgemuth 444ff0722deSVitaliy Kuznetsov #define EXT4_SB_REFRESH_INTERVAL_SEC (3600) /* seconds (1 hour) */ 445ff0722deSVitaliy Kuznetsov #define EXT4_SB_REFRESH_INTERVAL_KB (16384) /* kilobytes (16MB) */ 446ff0722deSVitaliy Kuznetsov 447ff0722deSVitaliy Kuznetsov /* 448ff0722deSVitaliy Kuznetsov * The ext4_maybe_update_superblock() function checks and updates the 449ff0722deSVitaliy Kuznetsov * superblock if needed. 450ff0722deSVitaliy Kuznetsov * 451ff0722deSVitaliy Kuznetsov * This function is designed to update the on-disk superblock only under 452ff0722deSVitaliy Kuznetsov * certain conditions to prevent excessive disk writes and unnecessary 453ff0722deSVitaliy Kuznetsov * waking of the disk from sleep. The superblock will be updated if: 454ff0722deSVitaliy Kuznetsov * 1. More than an hour has passed since the last superblock update, and 455ff0722deSVitaliy Kuznetsov * 2. More than 16MB have been written since the last superblock update. 456ff0722deSVitaliy Kuznetsov * 457ff0722deSVitaliy Kuznetsov * @sb: The superblock 458ff0722deSVitaliy Kuznetsov */ 459ff0722deSVitaliy Kuznetsov static void ext4_maybe_update_superblock(struct super_block *sb) 460ff0722deSVitaliy Kuznetsov { 461ff0722deSVitaliy Kuznetsov struct ext4_sb_info *sbi = EXT4_SB(sb); 462ff0722deSVitaliy Kuznetsov struct ext4_super_block *es = sbi->s_es; 463ff0722deSVitaliy Kuznetsov journal_t *journal = sbi->s_journal; 464ff0722deSVitaliy Kuznetsov time64_t now; 465ff0722deSVitaliy Kuznetsov __u64 last_update; 466ff0722deSVitaliy Kuznetsov __u64 lifetime_write_kbytes; 467ff0722deSVitaliy Kuznetsov __u64 diff_size; 468ff0722deSVitaliy Kuznetsov 469ff0722deSVitaliy Kuznetsov if (sb_rdonly(sb) || !(sb->s_flags & SB_ACTIVE) || 470ff0722deSVitaliy Kuznetsov !journal || (journal->j_flags & JBD2_UNMOUNT)) 471ff0722deSVitaliy Kuznetsov return; 472ff0722deSVitaliy Kuznetsov 473ff0722deSVitaliy Kuznetsov now = ktime_get_real_seconds(); 474ff0722deSVitaliy Kuznetsov last_update = ext4_get_tstamp(es, s_wtime); 475ff0722deSVitaliy Kuznetsov 476ff0722deSVitaliy Kuznetsov if (likely(now - last_update < EXT4_SB_REFRESH_INTERVAL_SEC)) 477ff0722deSVitaliy Kuznetsov return; 478ff0722deSVitaliy Kuznetsov 479ff0722deSVitaliy Kuznetsov lifetime_write_kbytes = sbi->s_kbytes_written + 480ff0722deSVitaliy Kuznetsov ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) - 481ff0722deSVitaliy Kuznetsov sbi->s_sectors_written_start) >> 1); 482ff0722deSVitaliy Kuznetsov 483ff0722deSVitaliy Kuznetsov /* Get the number of kilobytes not written to disk to account 484ff0722deSVitaliy Kuznetsov * for statistics and compare with a multiple of 16 MB. This 485ff0722deSVitaliy Kuznetsov * is used to determine when the next superblock commit should 486ff0722deSVitaliy Kuznetsov * occur (i.e. not more often than once per 16MB if there was 487ff0722deSVitaliy Kuznetsov * less written in an hour). 488ff0722deSVitaliy Kuznetsov */ 489ff0722deSVitaliy Kuznetsov diff_size = lifetime_write_kbytes - le64_to_cpu(es->s_kbytes_written); 490ff0722deSVitaliy Kuznetsov 491ff0722deSVitaliy Kuznetsov if (diff_size > EXT4_SB_REFRESH_INTERVAL_KB) 492bb15cea2STheodore Ts'o schedule_work(&EXT4_SB(sb)->s_sb_upd_work); 493ff0722deSVitaliy Kuznetsov } 494ff0722deSVitaliy Kuznetsov 495bdfe0cbdSTheodore Ts'o /* 496bdfe0cbdSTheodore Ts'o * The del_gendisk() function uninitializes the disk-specific data 497bdfe0cbdSTheodore Ts'o * structures, including the bdi structure, without telling anyone 498bdfe0cbdSTheodore Ts'o * else. Once this happens, any attempt to call mark_buffer_dirty() 499bdfe0cbdSTheodore Ts'o * (for example, by ext4_commit_super), will cause a kernel OOPS. 500bdfe0cbdSTheodore Ts'o * This is a kludge to prevent these oops until we can put in a proper 501bdfe0cbdSTheodore Ts'o * hook in del_gendisk() to inform the VFS and file system layers. 502bdfe0cbdSTheodore Ts'o */ 503bdfe0cbdSTheodore Ts'o static int block_device_ejected(struct super_block *sb) 504bdfe0cbdSTheodore Ts'o { 505bdfe0cbdSTheodore Ts'o struct inode *bd_inode = sb->s_bdev->bd_inode; 506bdfe0cbdSTheodore Ts'o struct backing_dev_info *bdi = inode_to_bdi(bd_inode); 507bdfe0cbdSTheodore Ts'o 508bdfe0cbdSTheodore Ts'o return bdi->dev == NULL; 509bdfe0cbdSTheodore Ts'o } 510bdfe0cbdSTheodore Ts'o 51118aadd47SBobi Jam static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) 51218aadd47SBobi Jam { 51318aadd47SBobi Jam struct super_block *sb = journal->j_private; 51418aadd47SBobi Jam struct ext4_sb_info *sbi = EXT4_SB(sb); 51518aadd47SBobi Jam int error = is_journal_aborted(journal); 5165d3ee208SDmitry Monakhov struct ext4_journal_cb_entry *jce; 51718aadd47SBobi Jam 5185d3ee208SDmitry Monakhov BUG_ON(txn->t_state == T_FINISHED); 519a0154344SDaeho Jeong 520a0154344SDaeho Jeong ext4_process_freed_data(sb, txn->t_tid); 521ff0722deSVitaliy Kuznetsov ext4_maybe_update_superblock(sb); 522a0154344SDaeho Jeong 52318aadd47SBobi Jam spin_lock(&sbi->s_md_lock); 5245d3ee208SDmitry Monakhov while (!list_empty(&txn->t_private_list)) { 5255d3ee208SDmitry Monakhov jce = list_entry(txn->t_private_list.next, 5265d3ee208SDmitry Monakhov struct ext4_journal_cb_entry, jce_list); 52718aadd47SBobi Jam list_del_init(&jce->jce_list); 52818aadd47SBobi Jam spin_unlock(&sbi->s_md_lock); 52918aadd47SBobi Jam jce->jce_func(sb, jce, error); 53018aadd47SBobi Jam spin_lock(&sbi->s_md_lock); 53118aadd47SBobi Jam } 53218aadd47SBobi Jam spin_unlock(&sbi->s_md_lock); 53318aadd47SBobi Jam } 5341c13d5c0STheodore Ts'o 535afb585a9SMauricio Faria de Oliveira /* 536afb585a9SMauricio Faria de Oliveira * This writepage callback for write_cache_pages() 537afb585a9SMauricio Faria de Oliveira * takes care of a few cases after page cleaning. 538afb585a9SMauricio Faria de Oliveira * 539afb585a9SMauricio Faria de Oliveira * write_cache_pages() already checks for dirty pages 540afb585a9SMauricio Faria de Oliveira * and calls clear_page_dirty_for_io(), which we want, 541afb585a9SMauricio Faria de Oliveira * to write protect the pages. 542afb585a9SMauricio Faria de Oliveira * 543afb585a9SMauricio Faria de Oliveira * However, we may have to redirty a page (see below.) 544afb585a9SMauricio Faria de Oliveira */ 545d585bdbeSMatthew Wilcox (Oracle) static int ext4_journalled_writepage_callback(struct folio *folio, 546afb585a9SMauricio Faria de Oliveira struct writeback_control *wbc, 547afb585a9SMauricio Faria de Oliveira void *data) 548afb585a9SMauricio Faria de Oliveira { 549afb585a9SMauricio Faria de Oliveira transaction_t *transaction = (transaction_t *) data; 550afb585a9SMauricio Faria de Oliveira struct buffer_head *bh, *head; 551afb585a9SMauricio Faria de Oliveira struct journal_head *jh; 552afb585a9SMauricio Faria de Oliveira 553d585bdbeSMatthew Wilcox (Oracle) bh = head = folio_buffers(folio); 554afb585a9SMauricio Faria de Oliveira do { 555afb585a9SMauricio Faria de Oliveira /* 556afb585a9SMauricio Faria de Oliveira * We have to redirty a page in these cases: 557afb585a9SMauricio Faria de Oliveira * 1) If buffer is dirty, it means the page was dirty because it 558afb585a9SMauricio Faria de Oliveira * contains a buffer that needs checkpointing. So the dirty bit 559afb585a9SMauricio Faria de Oliveira * needs to be preserved so that checkpointing writes the buffer 560afb585a9SMauricio Faria de Oliveira * properly. 561afb585a9SMauricio Faria de Oliveira * 2) If buffer is not part of the committing transaction 562afb585a9SMauricio Faria de Oliveira * (we may have just accidentally come across this buffer because 563afb585a9SMauricio Faria de Oliveira * inode range tracking is not exact) or if the currently running 564afb585a9SMauricio Faria de Oliveira * transaction already contains this buffer as well, dirty bit 565afb585a9SMauricio Faria de Oliveira * needs to be preserved so that the buffer gets writeprotected 566afb585a9SMauricio Faria de Oliveira * properly on running transaction's commit. 567afb585a9SMauricio Faria de Oliveira */ 568afb585a9SMauricio Faria de Oliveira jh = bh2jh(bh); 569afb585a9SMauricio Faria de Oliveira if (buffer_dirty(bh) || 570afb585a9SMauricio Faria de Oliveira (jh && (jh->b_transaction != transaction || 571afb585a9SMauricio Faria de Oliveira jh->b_next_transaction))) { 572d585bdbeSMatthew Wilcox (Oracle) folio_redirty_for_writepage(wbc, folio); 573afb585a9SMauricio Faria de Oliveira goto out; 574afb585a9SMauricio Faria de Oliveira } 575afb585a9SMauricio Faria de Oliveira } while ((bh = bh->b_this_page) != head); 576afb585a9SMauricio Faria de Oliveira 577afb585a9SMauricio Faria de Oliveira out: 578afb585a9SMauricio Faria de Oliveira return AOP_WRITEPAGE_ACTIVATE; 579afb585a9SMauricio Faria de Oliveira } 580afb585a9SMauricio Faria de Oliveira 581afb585a9SMauricio Faria de Oliveira static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode *jinode) 582afb585a9SMauricio Faria de Oliveira { 583afb585a9SMauricio Faria de Oliveira struct address_space *mapping = jinode->i_vfs_inode->i_mapping; 584afb585a9SMauricio Faria de Oliveira struct writeback_control wbc = { 585afb585a9SMauricio Faria de Oliveira .sync_mode = WB_SYNC_ALL, 586afb585a9SMauricio Faria de Oliveira .nr_to_write = LONG_MAX, 587afb585a9SMauricio Faria de Oliveira .range_start = jinode->i_dirty_start, 588afb585a9SMauricio Faria de Oliveira .range_end = jinode->i_dirty_end, 589afb585a9SMauricio Faria de Oliveira }; 590afb585a9SMauricio Faria de Oliveira 591afb585a9SMauricio Faria de Oliveira return write_cache_pages(mapping, &wbc, 592afb585a9SMauricio Faria de Oliveira ext4_journalled_writepage_callback, 593afb585a9SMauricio Faria de Oliveira jinode->i_transaction); 594afb585a9SMauricio Faria de Oliveira } 595afb585a9SMauricio Faria de Oliveira 596afb585a9SMauricio Faria de Oliveira static int ext4_journal_submit_inode_data_buffers(struct jbd2_inode *jinode) 597afb585a9SMauricio Faria de Oliveira { 598afb585a9SMauricio Faria de Oliveira int ret; 599afb585a9SMauricio Faria de Oliveira 600afb585a9SMauricio Faria de Oliveira if (ext4_should_journal_data(jinode->i_vfs_inode)) 601afb585a9SMauricio Faria de Oliveira ret = ext4_journalled_submit_inode_data_buffers(jinode); 602afb585a9SMauricio Faria de Oliveira else 60359205c8dSJan Kara ret = ext4_normal_submit_inode_data_buffers(jinode); 604afb585a9SMauricio Faria de Oliveira return ret; 605afb585a9SMauricio Faria de Oliveira } 606afb585a9SMauricio Faria de Oliveira 607afb585a9SMauricio Faria de Oliveira static int ext4_journal_finish_inode_data_buffers(struct jbd2_inode *jinode) 608afb585a9SMauricio Faria de Oliveira { 609afb585a9SMauricio Faria de Oliveira int ret = 0; 610afb585a9SMauricio Faria de Oliveira 611afb585a9SMauricio Faria de Oliveira if (!ext4_should_journal_data(jinode->i_vfs_inode)) 612afb585a9SMauricio Faria de Oliveira ret = jbd2_journal_finish_inode_data_buffers(jinode); 613afb585a9SMauricio Faria de Oliveira 614afb585a9SMauricio Faria de Oliveira return ret; 615afb585a9SMauricio Faria de Oliveira } 616afb585a9SMauricio Faria de Oliveira 6171dc1097fSJan Kara static bool system_going_down(void) 6181dc1097fSJan Kara { 6191dc1097fSJan Kara return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF 6201dc1097fSJan Kara || system_state == SYSTEM_RESTART; 6211dc1097fSJan Kara } 6221dc1097fSJan Kara 62302a7780eSJan Kara struct ext4_err_translation { 62402a7780eSJan Kara int code; 62502a7780eSJan Kara int errno; 62602a7780eSJan Kara }; 62702a7780eSJan Kara 62802a7780eSJan Kara #define EXT4_ERR_TRANSLATE(err) { .code = EXT4_ERR_##err, .errno = err } 62902a7780eSJan Kara 63002a7780eSJan Kara static struct ext4_err_translation err_translation[] = { 63102a7780eSJan Kara EXT4_ERR_TRANSLATE(EIO), 63202a7780eSJan Kara EXT4_ERR_TRANSLATE(ENOMEM), 63302a7780eSJan Kara EXT4_ERR_TRANSLATE(EFSBADCRC), 63402a7780eSJan Kara EXT4_ERR_TRANSLATE(EFSCORRUPTED), 63502a7780eSJan Kara EXT4_ERR_TRANSLATE(ENOSPC), 63602a7780eSJan Kara EXT4_ERR_TRANSLATE(ENOKEY), 63702a7780eSJan Kara EXT4_ERR_TRANSLATE(EROFS), 63802a7780eSJan Kara EXT4_ERR_TRANSLATE(EFBIG), 63902a7780eSJan Kara EXT4_ERR_TRANSLATE(EEXIST), 64002a7780eSJan Kara EXT4_ERR_TRANSLATE(ERANGE), 64102a7780eSJan Kara EXT4_ERR_TRANSLATE(EOVERFLOW), 64202a7780eSJan Kara EXT4_ERR_TRANSLATE(EBUSY), 64302a7780eSJan Kara EXT4_ERR_TRANSLATE(ENOTDIR), 64402a7780eSJan Kara EXT4_ERR_TRANSLATE(ENOTEMPTY), 64502a7780eSJan Kara EXT4_ERR_TRANSLATE(ESHUTDOWN), 64602a7780eSJan Kara EXT4_ERR_TRANSLATE(EFAULT), 64702a7780eSJan Kara }; 64802a7780eSJan Kara 64902a7780eSJan Kara static int ext4_errno_to_code(int errno) 65002a7780eSJan Kara { 65102a7780eSJan Kara int i; 65202a7780eSJan Kara 65302a7780eSJan Kara for (i = 0; i < ARRAY_SIZE(err_translation); i++) 65402a7780eSJan Kara if (err_translation[i].errno == errno) 65502a7780eSJan Kara return err_translation[i].code; 65602a7780eSJan Kara return EXT4_ERR_UNKNOWN; 65702a7780eSJan Kara } 65802a7780eSJan Kara 6592d01ddc8SJan Kara static void save_error_info(struct super_block *sb, int error, 66040676623SJan Kara __u32 ino, __u64 block, 66140676623SJan Kara const char *func, unsigned int line) 66240676623SJan Kara { 663c92dc856SJan Kara struct ext4_sb_info *sbi = EXT4_SB(sb); 66440676623SJan Kara 66502a7780eSJan Kara /* We default to EFSCORRUPTED error... */ 66602a7780eSJan Kara if (error == 0) 66702a7780eSJan Kara error = EFSCORRUPTED; 668c92dc856SJan Kara 669c92dc856SJan Kara spin_lock(&sbi->s_error_lock); 670c92dc856SJan Kara sbi->s_add_error_count++; 671c92dc856SJan Kara sbi->s_last_error_code = error; 672c92dc856SJan Kara sbi->s_last_error_line = line; 673c92dc856SJan Kara sbi->s_last_error_ino = ino; 674c92dc856SJan Kara sbi->s_last_error_block = block; 675c92dc856SJan Kara sbi->s_last_error_func = func; 676c92dc856SJan Kara sbi->s_last_error_time = ktime_get_real_seconds(); 677c92dc856SJan Kara if (!sbi->s_first_error_time) { 678c92dc856SJan Kara sbi->s_first_error_code = error; 679c92dc856SJan Kara sbi->s_first_error_line = line; 680c92dc856SJan Kara sbi->s_first_error_ino = ino; 681c92dc856SJan Kara sbi->s_first_error_block = block; 682c92dc856SJan Kara sbi->s_first_error_func = func; 683c92dc856SJan Kara sbi->s_first_error_time = sbi->s_last_error_time; 68440676623SJan Kara } 685c92dc856SJan Kara spin_unlock(&sbi->s_error_lock); 68640676623SJan Kara } 68740676623SJan Kara 688ac27a0ecSDave Kleikamp /* Deal with the reporting of failure conditions on a filesystem such as 689ac27a0ecSDave Kleikamp * inconsistencies detected or read IO failures. 690ac27a0ecSDave Kleikamp * 691ac27a0ecSDave Kleikamp * On ext2, we can store the error state of the filesystem in the 692617ba13bSMingming Cao * superblock. That is not possible on ext4, because we may have other 693ac27a0ecSDave Kleikamp * write ordering constraints on the superblock which prevent us from 694ac27a0ecSDave Kleikamp * writing it out straight away; and given that the journal is about to 695ac27a0ecSDave Kleikamp * be aborted, we can't rely on the current, or future, transactions to 696ac27a0ecSDave Kleikamp * write out the superblock safely. 697ac27a0ecSDave Kleikamp * 698dab291afSMingming Cao * We'll just use the jbd2_journal_abort() error code to record an error in 699d6b198bcSThadeu Lima de Souza Cascardo * the journal instead. On recovery, the journal will complain about 700ac27a0ecSDave Kleikamp * that error until we've noted it down and cleared it. 701014c9caaSJan Kara * 702014c9caaSJan Kara * If force_ro is set, we unconditionally force the filesystem into an 703014c9caaSJan Kara * ABORT|READONLY state, unless the error response on the fs has been set to 704014c9caaSJan Kara * panic in which case we take the easy way out and panic immediately. This is 705014c9caaSJan Kara * used to deal with unrecoverable failures such as journal IO errors or ENOMEM 706014c9caaSJan Kara * at a critical moment in log management. 707ac27a0ecSDave Kleikamp */ 708e789ca0cSJan Kara static void ext4_handle_error(struct super_block *sb, bool force_ro, int error, 709e789ca0cSJan Kara __u32 ino, __u64 block, 710e789ca0cSJan Kara const char *func, unsigned int line) 711ac27a0ecSDave Kleikamp { 712b08070ecSJan Kara journal_t *journal = EXT4_SB(sb)->s_journal; 7132d01ddc8SJan Kara bool continue_fs = !force_ro && test_opt(sb, ERRORS_CONT); 714b08070ecSJan Kara 715e789ca0cSJan Kara EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; 716327eaf73STheodore Ts'o if (test_opt(sb, WARN_ON_ERROR)) 717327eaf73STheodore Ts'o WARN_ON_ONCE(1); 718327eaf73STheodore Ts'o 7192d01ddc8SJan Kara if (!continue_fs && !sb_rdonly(sb)) { 72095257987SJan Kara set_bit(EXT4_FLAGS_SHUTDOWN, &EXT4_SB(sb)->s_ext4_flags); 721ac27a0ecSDave Kleikamp if (journal) 722dab291afSMingming Cao jbd2_journal_abort(journal, -EIO); 7232d01ddc8SJan Kara } 7242d01ddc8SJan Kara 7252d01ddc8SJan Kara if (!bdev_read_only(sb->s_bdev)) { 7262d01ddc8SJan Kara save_error_info(sb, error, ino, block, func, line); 7272d01ddc8SJan Kara /* 7282d01ddc8SJan Kara * In case the fs should keep running, we need to writeout 7292d01ddc8SJan Kara * superblock through the journal. Due to lock ordering 7302d01ddc8SJan Kara * constraints, it may not be safe to do it right here so we 7312d01ddc8SJan Kara * defer superblock flushing to a workqueue. 7322d01ddc8SJan Kara */ 733bb9464e0Syangerkun if (continue_fs && journal) 734bb15cea2STheodore Ts'o schedule_work(&EXT4_SB(sb)->s_sb_upd_work); 7352d01ddc8SJan Kara else 7362d01ddc8SJan Kara ext4_commit_super(sb); 7372d01ddc8SJan Kara } 7382d01ddc8SJan Kara 7391dc1097fSJan Kara /* 7401dc1097fSJan Kara * We force ERRORS_RO behavior when system is rebooting. Otherwise we 7411dc1097fSJan Kara * could panic during 'reboot -f' as the underlying device got already 7421dc1097fSJan Kara * disabled. 7431dc1097fSJan Kara */ 744014c9caaSJan Kara if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) { 745617ba13bSMingming Cao panic("EXT4-fs (device %s): panic forced after error\n", 746ac27a0ecSDave Kleikamp sb->s_id); 747ac27a0ecSDave Kleikamp } 748ac2f7ca5SYe Bin 749ac2f7ca5SYe Bin if (sb_rdonly(sb) || continue_fs) 750ac2f7ca5SYe Bin return; 751ac2f7ca5SYe Bin 752014c9caaSJan Kara ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); 753014c9caaSJan Kara /* 754014c9caaSJan Kara * Make sure updated value of ->s_mount_flags will be visible before 755014c9caaSJan Kara * ->s_flags update 756014c9caaSJan Kara */ 757014c9caaSJan Kara smp_wmb(); 758014c9caaSJan Kara sb->s_flags |= SB_RDONLY; 7594327ba52SDaeho Jeong } 760ac27a0ecSDave Kleikamp 761bb15cea2STheodore Ts'o static void update_super_work(struct work_struct *work) 762c92dc856SJan Kara { 763c92dc856SJan Kara struct ext4_sb_info *sbi = container_of(work, struct ext4_sb_info, 764bb15cea2STheodore Ts'o s_sb_upd_work); 7652d01ddc8SJan Kara journal_t *journal = sbi->s_journal; 7662d01ddc8SJan Kara handle_t *handle; 767c92dc856SJan Kara 7682d01ddc8SJan Kara /* 7692d01ddc8SJan Kara * If the journal is still running, we have to write out superblock 7702d01ddc8SJan Kara * through the journal to avoid collisions of other journalled sb 7712d01ddc8SJan Kara * updates. 7722d01ddc8SJan Kara * 7732d01ddc8SJan Kara * We use directly jbd2 functions here to avoid recursing back into 7742d01ddc8SJan Kara * ext4 error handling code during handling of previous errors. 7752d01ddc8SJan Kara */ 7762d01ddc8SJan Kara if (!sb_rdonly(sbi->s_sb) && journal) { 777558d6450SYe Bin struct buffer_head *sbh = sbi->s_sbh; 778ff0722deSVitaliy Kuznetsov bool call_notify_err; 7792d01ddc8SJan Kara handle = jbd2_journal_start(journal, 1); 7802d01ddc8SJan Kara if (IS_ERR(handle)) 7812d01ddc8SJan Kara goto write_directly; 782558d6450SYe Bin if (jbd2_journal_get_write_access(handle, sbh)) { 7832d01ddc8SJan Kara jbd2_journal_stop(handle); 7842d01ddc8SJan Kara goto write_directly; 7852d01ddc8SJan Kara } 786ff0722deSVitaliy Kuznetsov 787ff0722deSVitaliy Kuznetsov if (sbi->s_add_error_count > 0) 788ff0722deSVitaliy Kuznetsov call_notify_err = true; 789ff0722deSVitaliy Kuznetsov 7902d01ddc8SJan Kara ext4_update_super(sbi->s_sb); 791558d6450SYe Bin if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) { 792558d6450SYe Bin ext4_msg(sbi->s_sb, KERN_ERR, "previous I/O error to " 793558d6450SYe Bin "superblock detected"); 794558d6450SYe Bin clear_buffer_write_io_error(sbh); 795558d6450SYe Bin set_buffer_uptodate(sbh); 796558d6450SYe Bin } 797558d6450SYe Bin 798558d6450SYe Bin if (jbd2_journal_dirty_metadata(handle, sbh)) { 7992d01ddc8SJan Kara jbd2_journal_stop(handle); 8002d01ddc8SJan Kara goto write_directly; 8012d01ddc8SJan Kara } 8022d01ddc8SJan Kara jbd2_journal_stop(handle); 803ff0722deSVitaliy Kuznetsov 804ff0722deSVitaliy Kuznetsov if (call_notify_err) 805d578b994SJonathan Davies ext4_notify_error_sysfs(sbi); 806ff0722deSVitaliy Kuznetsov 8072d01ddc8SJan Kara return; 8082d01ddc8SJan Kara } 8092d01ddc8SJan Kara write_directly: 8102d01ddc8SJan Kara /* 8112d01ddc8SJan Kara * Write through journal failed. Write sb directly to get error info 8122d01ddc8SJan Kara * out and hope for the best. 8132d01ddc8SJan Kara */ 8144392fbc4SJan Kara ext4_commit_super(sbi->s_sb); 815d578b994SJonathan Davies ext4_notify_error_sysfs(sbi); 816ac27a0ecSDave Kleikamp } 817ac27a0ecSDave Kleikamp 818efbed4dcSTheodore Ts'o #define ext4_error_ratelimit(sb) \ 819efbed4dcSTheodore Ts'o ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state), \ 820efbed4dcSTheodore Ts'o "EXT4-fs error") 821efbed4dcSTheodore Ts'o 82212062dddSEric Sandeen void __ext4_error(struct super_block *sb, const char *function, 823014c9caaSJan Kara unsigned int line, bool force_ro, int error, __u64 block, 82454d3adbcSTheodore Ts'o const char *fmt, ...) 825ac27a0ecSDave Kleikamp { 8260ff2ea7dSJoe Perches struct va_format vaf; 827ac27a0ecSDave Kleikamp va_list args; 828ac27a0ecSDave Kleikamp 829eb8ab444SJan Kara if (unlikely(ext4_forced_shutdown(sb))) 8300db1ff22STheodore Ts'o return; 8310db1ff22STheodore Ts'o 832ccf0f32aSTheodore Ts'o trace_ext4_error(sb, function, line); 833efbed4dcSTheodore Ts'o if (ext4_error_ratelimit(sb)) { 834ac27a0ecSDave Kleikamp va_start(args, fmt); 8350ff2ea7dSJoe Perches vaf.fmt = fmt; 8360ff2ea7dSJoe Perches vaf.va = &args; 837efbed4dcSTheodore Ts'o printk(KERN_CRIT 838efbed4dcSTheodore Ts'o "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n", 8390ff2ea7dSJoe Perches sb->s_id, function, line, current->comm, &vaf); 840ac27a0ecSDave Kleikamp va_end(args); 841efbed4dcSTheodore Ts'o } 8429a089b21SGabriel Krisman Bertazi fsnotify_sb_error(sb, NULL, error ? error : EFSCORRUPTED); 8439a089b21SGabriel Krisman Bertazi 844e789ca0cSJan Kara ext4_handle_error(sb, force_ro, error, 0, block, function, line); 845ac27a0ecSDave Kleikamp } 846ac27a0ecSDave Kleikamp 847e7c96e8eSJoe Perches void __ext4_error_inode(struct inode *inode, const char *function, 84854d3adbcSTheodore Ts'o unsigned int line, ext4_fsblk_t block, int error, 849273df556SFrank Mayhar const char *fmt, ...) 850273df556SFrank Mayhar { 851273df556SFrank Mayhar va_list args; 852f7c21177STheodore Ts'o struct va_format vaf; 853273df556SFrank Mayhar 854eb8ab444SJan Kara if (unlikely(ext4_forced_shutdown(inode->i_sb))) 8550db1ff22STheodore Ts'o return; 8560db1ff22STheodore Ts'o 857ccf0f32aSTheodore Ts'o trace_ext4_error(inode->i_sb, function, line); 858efbed4dcSTheodore Ts'o if (ext4_error_ratelimit(inode->i_sb)) { 859273df556SFrank Mayhar va_start(args, fmt); 860f7c21177STheodore Ts'o vaf.fmt = fmt; 861f7c21177STheodore Ts'o vaf.va = &args; 862c398eda0STheodore Ts'o if (block) 863d9ee81daSJoe Perches printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: " 864d9ee81daSJoe Perches "inode #%lu: block %llu: comm %s: %pV\n", 865d9ee81daSJoe Perches inode->i_sb->s_id, function, line, inode->i_ino, 866d9ee81daSJoe Perches block, current->comm, &vaf); 867d9ee81daSJoe Perches else 868d9ee81daSJoe Perches printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: " 869d9ee81daSJoe Perches "inode #%lu: comm %s: %pV\n", 870d9ee81daSJoe Perches inode->i_sb->s_id, function, line, inode->i_ino, 871d9ee81daSJoe Perches current->comm, &vaf); 872273df556SFrank Mayhar va_end(args); 873efbed4dcSTheodore Ts'o } 8749a089b21SGabriel Krisman Bertazi fsnotify_sb_error(inode->i_sb, inode, error ? error : EFSCORRUPTED); 8759a089b21SGabriel Krisman Bertazi 876e789ca0cSJan Kara ext4_handle_error(inode->i_sb, false, error, inode->i_ino, block, 87754d3adbcSTheodore Ts'o function, line); 878273df556SFrank Mayhar } 879273df556SFrank Mayhar 880e7c96e8eSJoe Perches void __ext4_error_file(struct file *file, const char *function, 881f7c21177STheodore Ts'o unsigned int line, ext4_fsblk_t block, 882f7c21177STheodore Ts'o const char *fmt, ...) 883273df556SFrank Mayhar { 884273df556SFrank Mayhar va_list args; 885f7c21177STheodore Ts'o struct va_format vaf; 886496ad9aaSAl Viro struct inode *inode = file_inode(file); 887273df556SFrank Mayhar char pathname[80], *path; 888273df556SFrank Mayhar 889eb8ab444SJan Kara if (unlikely(ext4_forced_shutdown(inode->i_sb))) 8900db1ff22STheodore Ts'o return; 8910db1ff22STheodore Ts'o 892ccf0f32aSTheodore Ts'o trace_ext4_error(inode->i_sb, function, line); 893efbed4dcSTheodore Ts'o if (ext4_error_ratelimit(inode->i_sb)) { 8949bf39ab2SMiklos Szeredi path = file_path(file, pathname, sizeof(pathname)); 895f9a62d09SDan Carpenter if (IS_ERR(path)) 896273df556SFrank Mayhar path = "(unknown)"; 897f7c21177STheodore Ts'o va_start(args, fmt); 898f7c21177STheodore Ts'o vaf.fmt = fmt; 899f7c21177STheodore Ts'o vaf.va = &args; 900d9ee81daSJoe Perches if (block) 901d9ee81daSJoe Perches printk(KERN_CRIT 902d9ee81daSJoe Perches "EXT4-fs error (device %s): %s:%d: inode #%lu: " 903d9ee81daSJoe Perches "block %llu: comm %s: path %s: %pV\n", 904d9ee81daSJoe Perches inode->i_sb->s_id, function, line, inode->i_ino, 905d9ee81daSJoe Perches block, current->comm, path, &vaf); 906d9ee81daSJoe Perches else 907d9ee81daSJoe Perches printk(KERN_CRIT 908d9ee81daSJoe Perches "EXT4-fs error (device %s): %s:%d: inode #%lu: " 909d9ee81daSJoe Perches "comm %s: path %s: %pV\n", 910d9ee81daSJoe Perches inode->i_sb->s_id, function, line, inode->i_ino, 911d9ee81daSJoe Perches current->comm, path, &vaf); 912273df556SFrank Mayhar va_end(args); 913efbed4dcSTheodore Ts'o } 9149a089b21SGabriel Krisman Bertazi fsnotify_sb_error(inode->i_sb, inode, EFSCORRUPTED); 9159a089b21SGabriel Krisman Bertazi 916e789ca0cSJan Kara ext4_handle_error(inode->i_sb, false, EFSCORRUPTED, inode->i_ino, block, 91754d3adbcSTheodore Ts'o function, line); 918273df556SFrank Mayhar } 919273df556SFrank Mayhar 920722887ddSTheodore Ts'o const char *ext4_decode_error(struct super_block *sb, int errno, 921ac27a0ecSDave Kleikamp char nbuf[16]) 922ac27a0ecSDave Kleikamp { 923ac27a0ecSDave Kleikamp char *errstr = NULL; 924ac27a0ecSDave Kleikamp 925ac27a0ecSDave Kleikamp switch (errno) { 9266a797d27SDarrick J. Wong case -EFSCORRUPTED: 9276a797d27SDarrick J. Wong errstr = "Corrupt filesystem"; 9286a797d27SDarrick J. Wong break; 9296a797d27SDarrick J. Wong case -EFSBADCRC: 9306a797d27SDarrick J. Wong errstr = "Filesystem failed CRC"; 9316a797d27SDarrick J. Wong break; 932ac27a0ecSDave Kleikamp case -EIO: 933ac27a0ecSDave Kleikamp errstr = "IO failure"; 934ac27a0ecSDave Kleikamp break; 935ac27a0ecSDave Kleikamp case -ENOMEM: 936ac27a0ecSDave Kleikamp errstr = "Out of memory"; 937ac27a0ecSDave Kleikamp break; 938ac27a0ecSDave Kleikamp case -EROFS: 93978f1ddbbSTheodore Ts'o if (!sb || (EXT4_SB(sb)->s_journal && 94078f1ddbbSTheodore Ts'o EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT)) 941ac27a0ecSDave Kleikamp errstr = "Journal has aborted"; 942ac27a0ecSDave Kleikamp else 943ac27a0ecSDave Kleikamp errstr = "Readonly filesystem"; 944ac27a0ecSDave Kleikamp break; 945ac27a0ecSDave Kleikamp default: 946ac27a0ecSDave Kleikamp /* If the caller passed in an extra buffer for unknown 947ac27a0ecSDave Kleikamp * errors, textualise them now. Else we just return 948ac27a0ecSDave Kleikamp * NULL. */ 949ac27a0ecSDave Kleikamp if (nbuf) { 950ac27a0ecSDave Kleikamp /* Check for truncated error codes... */ 951ac27a0ecSDave Kleikamp if (snprintf(nbuf, 16, "error %d", -errno) >= 0) 952ac27a0ecSDave Kleikamp errstr = nbuf; 953ac27a0ecSDave Kleikamp } 954ac27a0ecSDave Kleikamp break; 955ac27a0ecSDave Kleikamp } 956ac27a0ecSDave Kleikamp 957ac27a0ecSDave Kleikamp return errstr; 958ac27a0ecSDave Kleikamp } 959ac27a0ecSDave Kleikamp 960617ba13bSMingming Cao /* __ext4_std_error decodes expected errors from journaling functions 961ac27a0ecSDave Kleikamp * automatically and invokes the appropriate error response. */ 962ac27a0ecSDave Kleikamp 963c398eda0STheodore Ts'o void __ext4_std_error(struct super_block *sb, const char *function, 964c398eda0STheodore Ts'o unsigned int line, int errno) 965ac27a0ecSDave Kleikamp { 966ac27a0ecSDave Kleikamp char nbuf[16]; 967ac27a0ecSDave Kleikamp const char *errstr; 968ac27a0ecSDave Kleikamp 969eb8ab444SJan Kara if (unlikely(ext4_forced_shutdown(sb))) 9700db1ff22STheodore Ts'o return; 9710db1ff22STheodore Ts'o 972ac27a0ecSDave Kleikamp /* Special case: if the error is EROFS, and we're not already 973ac27a0ecSDave Kleikamp * inside a transaction, then there's really no point in logging 974ac27a0ecSDave Kleikamp * an error. */ 975bc98a42cSDavid Howells if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb)) 976ac27a0ecSDave Kleikamp return; 977ac27a0ecSDave Kleikamp 978efbed4dcSTheodore Ts'o if (ext4_error_ratelimit(sb)) { 979617ba13bSMingming Cao errstr = ext4_decode_error(sb, errno, nbuf); 980c398eda0STheodore Ts'o printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n", 981c398eda0STheodore Ts'o sb->s_id, function, line, errstr); 982efbed4dcSTheodore Ts'o } 9839a089b21SGabriel Krisman Bertazi fsnotify_sb_error(sb, NULL, errno ? errno : EFSCORRUPTED); 984ac27a0ecSDave Kleikamp 985e789ca0cSJan Kara ext4_handle_error(sb, false, -errno, 0, 0, function, line); 986ac27a0ecSDave Kleikamp } 987ac27a0ecSDave Kleikamp 988e7c96e8eSJoe Perches void __ext4_msg(struct super_block *sb, 989e7c96e8eSJoe Perches const char *prefix, const char *fmt, ...) 990b31e1552SEric Sandeen { 9910ff2ea7dSJoe Perches struct va_format vaf; 992b31e1552SEric Sandeen va_list args; 993b31e1552SEric Sandeen 994da812f61SLukas Czerner if (sb) { 9951cf006edSDmitry Monakhov atomic_inc(&EXT4_SB(sb)->s_msg_count); 996da812f61SLukas Czerner if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), 997da812f61SLukas Czerner "EXT4-fs")) 998efbed4dcSTheodore Ts'o return; 999da812f61SLukas Czerner } 1000efbed4dcSTheodore Ts'o 1001b31e1552SEric Sandeen va_start(args, fmt); 10020ff2ea7dSJoe Perches vaf.fmt = fmt; 10030ff2ea7dSJoe Perches vaf.va = &args; 1004da812f61SLukas Czerner if (sb) 10050ff2ea7dSJoe Perches printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf); 1006da812f61SLukas Czerner else 1007da812f61SLukas Czerner printk("%sEXT4-fs: %pV\n", prefix, &vaf); 1008b31e1552SEric Sandeen va_end(args); 1009b31e1552SEric Sandeen } 1010b31e1552SEric Sandeen 10111cf006edSDmitry Monakhov static int ext4_warning_ratelimit(struct super_block *sb) 10121cf006edSDmitry Monakhov { 10131cf006edSDmitry Monakhov atomic_inc(&EXT4_SB(sb)->s_warning_count); 10141cf006edSDmitry Monakhov return ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), 10151cf006edSDmitry Monakhov "EXT4-fs warning"); 10161cf006edSDmitry Monakhov } 1017b03a2f7eSAndreas Dilger 101812062dddSEric Sandeen void __ext4_warning(struct super_block *sb, const char *function, 1019c398eda0STheodore Ts'o unsigned int line, const char *fmt, ...) 1020ac27a0ecSDave Kleikamp { 10210ff2ea7dSJoe Perches struct va_format vaf; 1022ac27a0ecSDave Kleikamp va_list args; 1023ac27a0ecSDave Kleikamp 1024b03a2f7eSAndreas Dilger if (!ext4_warning_ratelimit(sb)) 1025efbed4dcSTheodore Ts'o return; 1026efbed4dcSTheodore Ts'o 1027ac27a0ecSDave Kleikamp va_start(args, fmt); 10280ff2ea7dSJoe Perches vaf.fmt = fmt; 10290ff2ea7dSJoe Perches vaf.va = &args; 10300ff2ea7dSJoe Perches printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n", 10310ff2ea7dSJoe Perches sb->s_id, function, line, &vaf); 1032ac27a0ecSDave Kleikamp va_end(args); 1033ac27a0ecSDave Kleikamp } 1034ac27a0ecSDave Kleikamp 1035b03a2f7eSAndreas Dilger void __ext4_warning_inode(const struct inode *inode, const char *function, 1036b03a2f7eSAndreas Dilger unsigned int line, const char *fmt, ...) 1037b03a2f7eSAndreas Dilger { 1038b03a2f7eSAndreas Dilger struct va_format vaf; 1039b03a2f7eSAndreas Dilger va_list args; 1040b03a2f7eSAndreas Dilger 1041b03a2f7eSAndreas Dilger if (!ext4_warning_ratelimit(inode->i_sb)) 1042b03a2f7eSAndreas Dilger return; 1043b03a2f7eSAndreas Dilger 1044b03a2f7eSAndreas Dilger va_start(args, fmt); 1045b03a2f7eSAndreas Dilger vaf.fmt = fmt; 1046b03a2f7eSAndreas Dilger vaf.va = &args; 1047b03a2f7eSAndreas Dilger printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: " 1048b03a2f7eSAndreas Dilger "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id, 1049b03a2f7eSAndreas Dilger function, line, inode->i_ino, current->comm, &vaf); 1050b03a2f7eSAndreas Dilger va_end(args); 1051b03a2f7eSAndreas Dilger } 1052b03a2f7eSAndreas Dilger 1053e29136f8STheodore Ts'o void __ext4_grp_locked_error(const char *function, unsigned int line, 1054e29136f8STheodore Ts'o struct super_block *sb, ext4_group_t grp, 1055e29136f8STheodore Ts'o unsigned long ino, ext4_fsblk_t block, 1056e29136f8STheodore Ts'o const char *fmt, ...) 10575d1b1b3fSAneesh Kumar K.V __releases(bitlock) 10585d1b1b3fSAneesh Kumar K.V __acquires(bitlock) 10595d1b1b3fSAneesh Kumar K.V { 10600ff2ea7dSJoe Perches struct va_format vaf; 10615d1b1b3fSAneesh Kumar K.V va_list args; 10625d1b1b3fSAneesh Kumar K.V 1063eb8ab444SJan Kara if (unlikely(ext4_forced_shutdown(sb))) 10640db1ff22STheodore Ts'o return; 10650db1ff22STheodore Ts'o 1066ccf0f32aSTheodore Ts'o trace_ext4_error(sb, function, line); 1067efbed4dcSTheodore Ts'o if (ext4_error_ratelimit(sb)) { 10685d1b1b3fSAneesh Kumar K.V va_start(args, fmt); 10690ff2ea7dSJoe Perches vaf.fmt = fmt; 10700ff2ea7dSJoe Perches vaf.va = &args; 107121149d61SRobin Dong printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ", 1072e29136f8STheodore Ts'o sb->s_id, function, line, grp); 1073e29136f8STheodore Ts'o if (ino) 10740ff2ea7dSJoe Perches printk(KERN_CONT "inode %lu: ", ino); 1075e29136f8STheodore Ts'o if (block) 1076efbed4dcSTheodore Ts'o printk(KERN_CONT "block %llu:", 1077efbed4dcSTheodore Ts'o (unsigned long long) block); 10780ff2ea7dSJoe Perches printk(KERN_CONT "%pV\n", &vaf); 10795d1b1b3fSAneesh Kumar K.V va_end(args); 1080efbed4dcSTheodore Ts'o } 10815d1b1b3fSAneesh Kumar K.V 1082c92dc856SJan Kara if (test_opt(sb, ERRORS_CONT)) { 1083327eaf73STheodore Ts'o if (test_opt(sb, WARN_ON_ERROR)) 1084327eaf73STheodore Ts'o WARN_ON_ONCE(1); 1085e789ca0cSJan Kara EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; 10862d01ddc8SJan Kara if (!bdev_read_only(sb->s_bdev)) { 10872d01ddc8SJan Kara save_error_info(sb, EFSCORRUPTED, ino, block, function, 10882d01ddc8SJan Kara line); 1089bb15cea2STheodore Ts'o schedule_work(&EXT4_SB(sb)->s_sb_upd_work); 10902d01ddc8SJan Kara } 10915d1b1b3fSAneesh Kumar K.V return; 10925d1b1b3fSAneesh Kumar K.V } 10935d1b1b3fSAneesh Kumar K.V ext4_unlock_group(sb, grp); 1094e789ca0cSJan Kara ext4_handle_error(sb, false, EFSCORRUPTED, ino, block, function, line); 10955d1b1b3fSAneesh Kumar K.V /* 10965d1b1b3fSAneesh Kumar K.V * We only get here in the ERRORS_RO case; relocking the group 10975d1b1b3fSAneesh Kumar K.V * may be dangerous, but nothing bad will happen since the 10985d1b1b3fSAneesh Kumar K.V * filesystem will have already been marked read/only and the 10995d1b1b3fSAneesh Kumar K.V * journal has been aborted. We return 1 as a hint to callers 11005d1b1b3fSAneesh Kumar K.V * who might what to use the return value from 110125985edcSLucas De Marchi * ext4_grp_locked_error() to distinguish between the 11025d1b1b3fSAneesh Kumar K.V * ERRORS_CONT and ERRORS_RO case, and perhaps return more 11035d1b1b3fSAneesh Kumar K.V * aggressively from the ext4 function in question, with a 11045d1b1b3fSAneesh Kumar K.V * more appropriate error code. 11055d1b1b3fSAneesh Kumar K.V */ 11065d1b1b3fSAneesh Kumar K.V ext4_lock_group(sb, grp); 11075d1b1b3fSAneesh Kumar K.V return; 11085d1b1b3fSAneesh Kumar K.V } 11095d1b1b3fSAneesh Kumar K.V 1110db79e6d1SWang Shilong void ext4_mark_group_bitmap_corrupted(struct super_block *sb, 1111db79e6d1SWang Shilong ext4_group_t group, 1112db79e6d1SWang Shilong unsigned int flags) 1113db79e6d1SWang Shilong { 1114db79e6d1SWang Shilong struct ext4_sb_info *sbi = EXT4_SB(sb); 1115db79e6d1SWang Shilong struct ext4_group_info *grp = ext4_get_group_info(sb, group); 1116db79e6d1SWang Shilong struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL); 11179af0b3d1SWang Shilong int ret; 1118db79e6d1SWang Shilong 11195354b2afSTheodore Ts'o if (!grp || !gdp) 11205354b2afSTheodore Ts'o return; 11219af0b3d1SWang Shilong if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) { 11229af0b3d1SWang Shilong ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, 11239af0b3d1SWang Shilong &grp->bb_state); 11249af0b3d1SWang Shilong if (!ret) 1125db79e6d1SWang Shilong percpu_counter_sub(&sbi->s_freeclusters_counter, 1126db79e6d1SWang Shilong grp->bb_free); 1127db79e6d1SWang Shilong } 1128db79e6d1SWang Shilong 11299af0b3d1SWang Shilong if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) { 11309af0b3d1SWang Shilong ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, 11319af0b3d1SWang Shilong &grp->bb_state); 11329af0b3d1SWang Shilong if (!ret && gdp) { 1133db79e6d1SWang Shilong int count; 1134db79e6d1SWang Shilong 1135db79e6d1SWang Shilong count = ext4_free_inodes_count(sb, gdp); 1136db79e6d1SWang Shilong percpu_counter_sub(&sbi->s_freeinodes_counter, 1137db79e6d1SWang Shilong count); 1138db79e6d1SWang Shilong } 1139db79e6d1SWang Shilong } 1140db79e6d1SWang Shilong } 1141db79e6d1SWang Shilong 1142617ba13bSMingming Cao void ext4_update_dynamic_rev(struct super_block *sb) 1143ac27a0ecSDave Kleikamp { 1144617ba13bSMingming Cao struct ext4_super_block *es = EXT4_SB(sb)->s_es; 1145ac27a0ecSDave Kleikamp 1146617ba13bSMingming Cao if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV) 1147ac27a0ecSDave Kleikamp return; 1148ac27a0ecSDave Kleikamp 114912062dddSEric Sandeen ext4_warning(sb, 1150ac27a0ecSDave Kleikamp "updating to rev %d because of new feature flag, " 1151ac27a0ecSDave Kleikamp "running e2fsck is recommended", 1152617ba13bSMingming Cao EXT4_DYNAMIC_REV); 1153ac27a0ecSDave Kleikamp 1154617ba13bSMingming Cao es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO); 1155617ba13bSMingming Cao es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE); 1156617ba13bSMingming Cao es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV); 1157ac27a0ecSDave Kleikamp /* leave es->s_feature_*compat flags alone */ 1158ac27a0ecSDave Kleikamp /* es->s_uuid will be set by e2fsck if empty */ 1159ac27a0ecSDave Kleikamp 1160ac27a0ecSDave Kleikamp /* 1161ac27a0ecSDave Kleikamp * The rest of the superblock fields should be zero, and if not it 1162ac27a0ecSDave Kleikamp * means they are likely already in use, so leave them alone. We 1163ac27a0ecSDave Kleikamp * can leave it up to e2fsck to clean up any inconsistencies there. 1164ac27a0ecSDave Kleikamp */ 1165ac27a0ecSDave Kleikamp } 1166ac27a0ecSDave Kleikamp 1167ac27a0ecSDave Kleikamp static inline struct inode *orphan_list_entry(struct list_head *l) 1168ac27a0ecSDave Kleikamp { 1169617ba13bSMingming Cao return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode; 1170ac27a0ecSDave Kleikamp } 1171ac27a0ecSDave Kleikamp 1172617ba13bSMingming Cao static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi) 1173ac27a0ecSDave Kleikamp { 1174ac27a0ecSDave Kleikamp struct list_head *l; 1175ac27a0ecSDave Kleikamp 1176b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "sb orphan head is %d", 1177ac27a0ecSDave Kleikamp le32_to_cpu(sbi->s_es->s_last_orphan)); 1178ac27a0ecSDave Kleikamp 1179ac27a0ecSDave Kleikamp printk(KERN_ERR "sb_info orphan list:\n"); 1180ac27a0ecSDave Kleikamp list_for_each(l, &sbi->s_orphan) { 1181ac27a0ecSDave Kleikamp struct inode *inode = orphan_list_entry(l); 1182ac27a0ecSDave Kleikamp printk(KERN_ERR " " 1183ac27a0ecSDave Kleikamp "inode %s:%lu at %p: mode %o, nlink %d, next %d\n", 1184ac27a0ecSDave Kleikamp inode->i_sb->s_id, inode->i_ino, inode, 1185ac27a0ecSDave Kleikamp inode->i_mode, inode->i_nlink, 1186ac27a0ecSDave Kleikamp NEXT_ORPHAN(inode)); 1187ac27a0ecSDave Kleikamp } 1188ac27a0ecSDave Kleikamp } 1189ac27a0ecSDave Kleikamp 1190957153fcSJan Kara #ifdef CONFIG_QUOTA 1191957153fcSJan Kara static int ext4_quota_off(struct super_block *sb, int type); 1192957153fcSJan Kara 1193f3c1c42eSBaokun Li static inline void ext4_quotas_off(struct super_block *sb, int type) 1194957153fcSJan Kara { 1195f3c1c42eSBaokun Li BUG_ON(type > EXT4_MAXQUOTAS); 1196957153fcSJan Kara 1197957153fcSJan Kara /* Use our quota_off function to clear inode flags etc. */ 1198f3c1c42eSBaokun Li for (type--; type >= 0; type--) 1199957153fcSJan Kara ext4_quota_off(sb, type); 1200957153fcSJan Kara } 120133458eabSTheodore Ts'o 120233458eabSTheodore Ts'o /* 120333458eabSTheodore Ts'o * This is a helper function which is used in the mount/remount 120433458eabSTheodore Ts'o * codepaths (which holds s_umount) to fetch the quota file name. 120533458eabSTheodore Ts'o */ 120633458eabSTheodore Ts'o static inline char *get_qf_name(struct super_block *sb, 120733458eabSTheodore Ts'o struct ext4_sb_info *sbi, 120833458eabSTheodore Ts'o int type) 120933458eabSTheodore Ts'o { 121033458eabSTheodore Ts'o return rcu_dereference_protected(sbi->s_qf_names[type], 121133458eabSTheodore Ts'o lockdep_is_held(&sb->s_umount)); 121233458eabSTheodore Ts'o } 1213957153fcSJan Kara #else 1214f3c1c42eSBaokun Li static inline void ext4_quotas_off(struct super_block *sb, int type) 1215957153fcSJan Kara { 1216957153fcSJan Kara } 1217957153fcSJan Kara #endif 1218957153fcSJan Kara 12191f79467cSJason Yan static int ext4_percpu_param_init(struct ext4_sb_info *sbi) 12201f79467cSJason Yan { 12211f79467cSJason Yan ext4_fsblk_t block; 12221f79467cSJason Yan int err; 12231f79467cSJason Yan 12241f79467cSJason Yan block = ext4_count_free_clusters(sbi->s_sb); 12251f79467cSJason Yan ext4_free_blocks_count_set(sbi->s_es, EXT4_C2B(sbi, block)); 12261f79467cSJason Yan err = percpu_counter_init(&sbi->s_freeclusters_counter, block, 12271f79467cSJason Yan GFP_KERNEL); 12281f79467cSJason Yan if (!err) { 12291f79467cSJason Yan unsigned long freei = ext4_count_free_inodes(sbi->s_sb); 12301f79467cSJason Yan sbi->s_es->s_free_inodes_count = cpu_to_le32(freei); 12311f79467cSJason Yan err = percpu_counter_init(&sbi->s_freeinodes_counter, freei, 12321f79467cSJason Yan GFP_KERNEL); 12331f79467cSJason Yan } 12341f79467cSJason Yan if (!err) 12351f79467cSJason Yan err = percpu_counter_init(&sbi->s_dirs_counter, 12361f79467cSJason Yan ext4_count_dirs(sbi->s_sb), GFP_KERNEL); 12371f79467cSJason Yan if (!err) 12381f79467cSJason Yan err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0, 12391f79467cSJason Yan GFP_KERNEL); 12401f79467cSJason Yan if (!err) 12411f79467cSJason Yan err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0, 12421f79467cSJason Yan GFP_KERNEL); 12431f79467cSJason Yan if (!err) 12441f79467cSJason Yan err = percpu_init_rwsem(&sbi->s_writepages_rwsem); 12451f79467cSJason Yan 12461f79467cSJason Yan if (err) 12471f79467cSJason Yan ext4_msg(sbi->s_sb, KERN_ERR, "insufficient memory"); 12481f79467cSJason Yan 12491f79467cSJason Yan return err; 12501f79467cSJason Yan } 12511f79467cSJason Yan 12521f79467cSJason Yan static void ext4_percpu_param_destroy(struct ext4_sb_info *sbi) 12531f79467cSJason Yan { 12541f79467cSJason Yan percpu_counter_destroy(&sbi->s_freeclusters_counter); 12551f79467cSJason Yan percpu_counter_destroy(&sbi->s_freeinodes_counter); 12561f79467cSJason Yan percpu_counter_destroy(&sbi->s_dirs_counter); 12571f79467cSJason Yan percpu_counter_destroy(&sbi->s_dirtyclusters_counter); 12581f79467cSJason Yan percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit); 12591f79467cSJason Yan percpu_free_rwsem(&sbi->s_writepages_rwsem); 12601f79467cSJason Yan } 12611f79467cSJason Yan 12626ef68498SJason Yan static void ext4_group_desc_free(struct ext4_sb_info *sbi) 12636ef68498SJason Yan { 12646ef68498SJason Yan struct buffer_head **group_desc; 12656ef68498SJason Yan int i; 12666ef68498SJason Yan 12676ef68498SJason Yan rcu_read_lock(); 12686ef68498SJason Yan group_desc = rcu_dereference(sbi->s_group_desc); 12696ef68498SJason Yan for (i = 0; i < sbi->s_gdb_count; i++) 12706ef68498SJason Yan brelse(group_desc[i]); 12716ef68498SJason Yan kvfree(group_desc); 12726ef68498SJason Yan rcu_read_unlock(); 12736ef68498SJason Yan } 12746ef68498SJason Yan 1275dcbf8758SJason Yan static void ext4_flex_groups_free(struct ext4_sb_info *sbi) 1276dcbf8758SJason Yan { 1277dcbf8758SJason Yan struct flex_groups **flex_groups; 1278dcbf8758SJason Yan int i; 1279dcbf8758SJason Yan 1280dcbf8758SJason Yan rcu_read_lock(); 1281dcbf8758SJason Yan flex_groups = rcu_dereference(sbi->s_flex_groups); 1282dcbf8758SJason Yan if (flex_groups) { 1283dcbf8758SJason Yan for (i = 0; i < sbi->s_flex_groups_allocated; i++) 1284dcbf8758SJason Yan kvfree(flex_groups[i]); 1285dcbf8758SJason Yan kvfree(flex_groups); 1286dcbf8758SJason Yan } 1287dcbf8758SJason Yan rcu_read_unlock(); 1288dcbf8758SJason Yan } 1289dcbf8758SJason Yan 1290617ba13bSMingming Cao static void ext4_put_super(struct super_block *sb) 1291ac27a0ecSDave Kleikamp { 1292617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 1293617ba13bSMingming Cao struct ext4_super_block *es = sbi->s_es; 129497abd7d4STheodore Ts'o int aborted = 0; 1295856dd6c5SNathan Chancellor int err; 1296ac27a0ecSDave Kleikamp 1297b98535d0SYe Bin /* 1298b98535d0SYe Bin * Unregister sysfs before destroying jbd2 journal. 1299b98535d0SYe Bin * Since we could still access attr_journal_task attribute via sysfs 1300b98535d0SYe Bin * path which could have sbi->s_journal->j_task as NULL 1301bb15cea2STheodore Ts'o * Unregister sysfs before flush sbi->s_sb_upd_work. 1302b98535d0SYe Bin * Since user may read /proc/fs/ext4/xx/mb_groups during umount, If 1303b98535d0SYe Bin * read metadata verify failed then will queue error work. 1304bb15cea2STheodore Ts'o * update_super_work will call start_this_handle may trigger 1305b98535d0SYe Bin * BUG_ON. 1306b98535d0SYe Bin */ 1307b98535d0SYe Bin ext4_unregister_sysfs(sb); 1308b98535d0SYe Bin 13094808cb5bSZhang Yi if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs unmount")) 1310bb0fbc78SLukas Czerner ext4_msg(sb, KERN_INFO, "unmounting filesystem %pU.", 1311bb0fbc78SLukas Czerner &sb->s_uuid); 13124808cb5bSZhang Yi 1313857ac889SLukas Czerner ext4_unregister_li_request(sb); 1314f3c1c42eSBaokun Li ext4_quotas_off(sb, EXT4_MAXQUOTAS); 1315e0ccfd95SChristoph Hellwig 1316bb15cea2STheodore Ts'o flush_work(&sbi->s_sb_upd_work); 13172e8fa54eSJan Kara destroy_workqueue(sbi->rsv_conversion_wq); 131802f310fcSJan Kara ext4_release_orphan_info(sb); 13194c0425ffSMingming Cao 13200390131bSFrank Mayhar if (sbi->s_journal) { 132197abd7d4STheodore Ts'o aborted = is_journal_aborted(sbi->s_journal); 1322ef2cabf7SHidehiro Kawai err = jbd2_journal_destroy(sbi->s_journal); 132347b4a50bSJan Kara sbi->s_journal = NULL; 1324878520acSTheodore Ts'o if ((err < 0) && !aborted) { 132554d3adbcSTheodore Ts'o ext4_abort(sb, -err, "Couldn't clean up the journal"); 13260390131bSFrank Mayhar } 1327878520acSTheodore Ts'o } 1328d4edac31SJosef Bacik 1329d3922a77SZheng Liu ext4_es_unregister_shrinker(sbi); 1330292a089dSSteven Rostedt (Google) timer_shutdown_sync(&sbi->s_err_report); 1331d4edac31SJosef Bacik ext4_release_system_zone(sb); 1332d4edac31SJosef Bacik ext4_mb_release(sb); 1333d4edac31SJosef Bacik ext4_ext_release(sb); 1334d4edac31SJosef Bacik 1335bc98a42cSDavid Howells if (!sb_rdonly(sb) && !aborted) { 1336e2b911c5SDarrick J. Wong ext4_clear_feature_journal_needs_recovery(sb); 133702f310fcSJan Kara ext4_clear_feature_orphan_present(sb); 1338ac27a0ecSDave Kleikamp es->s_state = cpu_to_le16(sbi->s_mount_state); 1339ac27a0ecSDave Kleikamp } 1340bc98a42cSDavid Howells if (!sb_rdonly(sb)) 13414392fbc4SJan Kara ext4_commit_super(sb); 1342a8e25a83SArtem Bityutskiy 13436ef68498SJason Yan ext4_group_desc_free(sbi); 1344dcbf8758SJason Yan ext4_flex_groups_free(sbi); 13451f79467cSJason Yan ext4_percpu_param_destroy(sbi); 1346ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 1347856dd6c5SNathan Chancellor for (int i = 0; i < EXT4_MAXQUOTAS; i++) 134833458eabSTheodore Ts'o kfree(get_qf_name(sb, sbi, i)); 1349ac27a0ecSDave Kleikamp #endif 1350ac27a0ecSDave Kleikamp 1351ac27a0ecSDave Kleikamp /* Debugging code just in case the in-memory inode orphan list 1352ac27a0ecSDave Kleikamp * isn't empty. The on-disk one can be non-empty if we've 1353ac27a0ecSDave Kleikamp * detected an error and taken the fs readonly, but the 1354ac27a0ecSDave Kleikamp * in-memory list had better be clean by this point. */ 1355ac27a0ecSDave Kleikamp if (!list_empty(&sbi->s_orphan)) 1356ac27a0ecSDave Kleikamp dump_orphan_list(sb, sbi); 1357837c23fbSChunguang Xu ASSERT(list_empty(&sbi->s_orphan)); 1358ac27a0ecSDave Kleikamp 135989d96a6fSTheodore Ts'o sync_blockdev(sb->s_bdev); 1360f98393a6SPeter Zijlstra invalidate_bdev(sb->s_bdev); 136193e92cfcSZhihao Cheng if (sbi->s_journal_bdev) { 13621489dffdSChristoph Hellwig /* 13631489dffdSChristoph Hellwig * Invalidate the journal device's buffers. We don't want them 13641489dffdSChristoph Hellwig * floating about in memory - the physical journal device may 13651489dffdSChristoph Hellwig * hotswapped, and it breaks the `ro-after' testing code. 13661489dffdSChristoph Hellwig */ 1367ee7ed3aaSChunguang Xu sync_blockdev(sbi->s_journal_bdev); 13681489dffdSChristoph Hellwig invalidate_bdev(sbi->s_journal_bdev); 1369ac27a0ecSDave Kleikamp } 137050c15df6SChengguang Xu 1371dec214d0STahsin Erdogan ext4_xattr_destroy_cache(sbi->s_ea_inode_cache); 1372dec214d0STahsin Erdogan sbi->s_ea_inode_cache = NULL; 137350c15df6SChengguang Xu 137447387409STahsin Erdogan ext4_xattr_destroy_cache(sbi->s_ea_block_cache); 137547387409STahsin Erdogan sbi->s_ea_block_cache = NULL; 137650c15df6SChengguang Xu 1377618f0031SPavel Skripkin ext4_stop_mmpd(sbi); 1378618f0031SPavel Skripkin 13799060dd2cSEric Sandeen brelse(sbi->s_sbh); 1380ac27a0ecSDave Kleikamp sb->s_fs_info = NULL; 13813197ebdbSTheodore Ts'o /* 13823197ebdbSTheodore Ts'o * Now that we are completely done shutting down the 13833197ebdbSTheodore Ts'o * superblock, we need to actually destroy the kobject. 13843197ebdbSTheodore Ts'o */ 13853197ebdbSTheodore Ts'o kobject_put(&sbi->s_kobj); 13863197ebdbSTheodore Ts'o wait_for_completion(&sbi->s_kobj_unregister); 13870441984aSDarrick J. Wong if (sbi->s_chksum_driver) 13880441984aSDarrick J. Wong crypto_free_shash(sbi->s_chksum_driver); 1389705895b6SPekka Enberg kfree(sbi->s_blockgroup_lock); 13908012b866SShiyang Ruan fs_put_dax(sbi->s_daxdev, NULL); 1391ac4acb1fSEric Biggers fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy); 13925298d4bfSChristoph Hellwig #if IS_ENABLED(CONFIG_UNICODE) 1393f8f4acb6SDaniel Rosenberg utf8_unload(sb->s_encoding); 1394c83ad55eSGabriel Krisman Bertazi #endif 1395ac27a0ecSDave Kleikamp kfree(sbi); 1396ac27a0ecSDave Kleikamp } 1397ac27a0ecSDave Kleikamp 1398e18b890bSChristoph Lameter static struct kmem_cache *ext4_inode_cachep; 1399ac27a0ecSDave Kleikamp 1400ac27a0ecSDave Kleikamp /* 1401ac27a0ecSDave Kleikamp * Called inside transaction, so use GFP_NOFS 1402ac27a0ecSDave Kleikamp */ 1403617ba13bSMingming Cao static struct inode *ext4_alloc_inode(struct super_block *sb) 1404ac27a0ecSDave Kleikamp { 1405617ba13bSMingming Cao struct ext4_inode_info *ei; 1406ac27a0ecSDave Kleikamp 1407fd60b288SMuchun Song ei = alloc_inode_sb(sb, ext4_inode_cachep, GFP_NOFS); 1408ac27a0ecSDave Kleikamp if (!ei) 1409ac27a0ecSDave Kleikamp return NULL; 14100b8e58a1SAndreas Dilger 1411ee73f9a5SJeff Layton inode_set_iversion(&ei->vfs_inode, 1); 14127ea71af9SYe Bin ei->i_flags = 0; 1413202ee5dfSTheodore Ts'o spin_lock_init(&ei->i_raw_lock); 141438727786SOjaswin Mujoo ei->i_prealloc_node = RB_ROOT; 141527bc446eSbrookxu atomic_set(&ei->i_prealloc_active, 0); 141638727786SOjaswin Mujoo rwlock_init(&ei->i_prealloc_lock); 14179a26b661SZheng Liu ext4_es_init_tree(&ei->i_es_tree); 14189a26b661SZheng Liu rwlock_init(&ei->i_es_lock); 1419edaa53caSZheng Liu INIT_LIST_HEAD(&ei->i_es_list); 1420eb68d0e2SZheng Liu ei->i_es_all_nr = 0; 1421edaa53caSZheng Liu ei->i_es_shk_nr = 0; 1422dd475925SJan Kara ei->i_es_shrink_lblk = 0; 1423d2a17637SMingming Cao ei->i_reserved_data_blocks = 0; 1424d2a17637SMingming Cao spin_lock_init(&(ei->i_block_reservation_lock)); 14251dc0aa46SEric Whitney ext4_init_pending_tree(&ei->i_pending_tree); 1426a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA 1427a9e7f447SDmitry Monakhov ei->i_reserved_quota = 0; 142896c7e0d9SJan Kara memset(&ei->i_dquot, 0, sizeof(ei->i_dquot)); 1429a9e7f447SDmitry Monakhov #endif 14308aefcd55STheodore Ts'o ei->jinode = NULL; 14312e8fa54eSJan Kara INIT_LIST_HEAD(&ei->i_rsv_conversion_list); 1432744692dcSJiaying Zhang spin_lock_init(&ei->i_completed_io_lock); 1433b436b9beSJan Kara ei->i_sync_tid = 0; 1434b436b9beSJan Kara ei->i_datasync_tid = 0; 1435e27f41e1SDmitry Monakhov atomic_set(&ei->i_unwritten, 0); 14362e8fa54eSJan Kara INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work); 1437aa75f4d3SHarshad Shirwadkar ext4_fc_init_inode(&ei->vfs_inode); 1438aa75f4d3SHarshad Shirwadkar mutex_init(&ei->i_fc_lock); 1439ac27a0ecSDave Kleikamp return &ei->vfs_inode; 1440ac27a0ecSDave Kleikamp } 1441ac27a0ecSDave Kleikamp 14427ff9c073STheodore Ts'o static int ext4_drop_inode(struct inode *inode) 14437ff9c073STheodore Ts'o { 14447ff9c073STheodore Ts'o int drop = generic_drop_inode(inode); 14457ff9c073STheodore Ts'o 144629b3692eSEric Biggers if (!drop) 144729b3692eSEric Biggers drop = fscrypt_drop_inode(inode); 144829b3692eSEric Biggers 14497ff9c073STheodore Ts'o trace_ext4_drop_inode(inode, drop); 14507ff9c073STheodore Ts'o return drop; 14517ff9c073STheodore Ts'o } 14527ff9c073STheodore Ts'o 145394053139SAl Viro static void ext4_free_in_core_inode(struct inode *inode) 1454fa0d7e3dSNick Piggin { 14552c58d548SEric Biggers fscrypt_free_inode(inode); 1456aa75f4d3SHarshad Shirwadkar if (!list_empty(&(EXT4_I(inode)->i_fc_list))) { 1457aa75f4d3SHarshad Shirwadkar pr_warn("%s: inode %ld still in fc list", 1458aa75f4d3SHarshad Shirwadkar __func__, inode->i_ino); 1459aa75f4d3SHarshad Shirwadkar } 1460fa0d7e3dSNick Piggin kmem_cache_free(ext4_inode_cachep, EXT4_I(inode)); 1461fa0d7e3dSNick Piggin } 1462fa0d7e3dSNick Piggin 1463617ba13bSMingming Cao static void ext4_destroy_inode(struct inode *inode) 1464ac27a0ecSDave Kleikamp { 14659f7dd93dSVasily Averin if (!list_empty(&(EXT4_I(inode)->i_orphan))) { 1466b31e1552SEric Sandeen ext4_msg(inode->i_sb, KERN_ERR, 1467b31e1552SEric Sandeen "Inode %lu (%p): orphan list check failed!", 1468b31e1552SEric Sandeen inode->i_ino, EXT4_I(inode)); 14699f7dd93dSVasily Averin print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4, 14709f7dd93dSVasily Averin EXT4_I(inode), sizeof(struct ext4_inode_info), 14719f7dd93dSVasily Averin true); 14729f7dd93dSVasily Averin dump_stack(); 14739f7dd93dSVasily Averin } 14746fed8395SJeffle Xu 14756fed8395SJeffle Xu if (EXT4_I(inode)->i_reserved_data_blocks) 14766fed8395SJeffle Xu ext4_msg(inode->i_sb, KERN_ERR, 14776fed8395SJeffle Xu "Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!", 14786fed8395SJeffle Xu inode->i_ino, EXT4_I(inode), 14796fed8395SJeffle Xu EXT4_I(inode)->i_reserved_data_blocks); 1480ac27a0ecSDave Kleikamp } 1481ac27a0ecSDave Kleikamp 1482f5db130dSChristoph Hellwig static void ext4_shutdown(struct super_block *sb) 1483f5db130dSChristoph Hellwig { 1484f5db130dSChristoph Hellwig ext4_force_shutdown(sb, EXT4_GOING_FLAGS_NOLOGFLUSH); 1485f5db130dSChristoph Hellwig } 1486f5db130dSChristoph Hellwig 148751cc5068SAlexey Dobriyan static void init_once(void *foo) 1488ac27a0ecSDave Kleikamp { 1489c30365b9SYu Zhe struct ext4_inode_info *ei = foo; 1490ac27a0ecSDave Kleikamp 1491ac27a0ecSDave Kleikamp INIT_LIST_HEAD(&ei->i_orphan); 1492ac27a0ecSDave Kleikamp init_rwsem(&ei->xattr_sem); 14930e855ac8SAneesh Kumar K.V init_rwsem(&ei->i_data_sem); 1494ac27a0ecSDave Kleikamp inode_init_once(&ei->vfs_inode); 1495aa75f4d3SHarshad Shirwadkar ext4_fc_init_inode(&ei->vfs_inode); 1496ac27a0ecSDave Kleikamp } 1497ac27a0ecSDave Kleikamp 1498e67bc2b3SFabian Frederick static int __init init_inodecache(void) 1499ac27a0ecSDave Kleikamp { 1500f8dd7c70SDavid Windsor ext4_inode_cachep = kmem_cache_create_usercopy("ext4_inode_cache", 1501f8dd7c70SDavid Windsor sizeof(struct ext4_inode_info), 0, 1502f8dd7c70SDavid Windsor (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD| 1503f8dd7c70SDavid Windsor SLAB_ACCOUNT), 1504f8dd7c70SDavid Windsor offsetof(struct ext4_inode_info, i_data), 1505f8dd7c70SDavid Windsor sizeof_field(struct ext4_inode_info, i_data), 150620c2df83SPaul Mundt init_once); 1507617ba13bSMingming Cao if (ext4_inode_cachep == NULL) 1508ac27a0ecSDave Kleikamp return -ENOMEM; 1509ac27a0ecSDave Kleikamp return 0; 1510ac27a0ecSDave Kleikamp } 1511ac27a0ecSDave Kleikamp 1512ac27a0ecSDave Kleikamp static void destroy_inodecache(void) 1513ac27a0ecSDave Kleikamp { 15148c0a8537SKirill A. Shutemov /* 15158c0a8537SKirill A. Shutemov * Make sure all delayed rcu free inodes are flushed before we 15168c0a8537SKirill A. Shutemov * destroy cache. 15178c0a8537SKirill A. Shutemov */ 15188c0a8537SKirill A. Shutemov rcu_barrier(); 1519617ba13bSMingming Cao kmem_cache_destroy(ext4_inode_cachep); 1520ac27a0ecSDave Kleikamp } 1521ac27a0ecSDave Kleikamp 15220930fcc1SAl Viro void ext4_clear_inode(struct inode *inode) 1523ac27a0ecSDave Kleikamp { 1524aa75f4d3SHarshad Shirwadkar ext4_fc_del(inode); 15250930fcc1SAl Viro invalidate_inode_buffers(inode); 1526dbd5768fSJan Kara clear_inode(inode); 152727bc446eSbrookxu ext4_discard_preallocations(inode, 0); 152851865fdaSZheng Liu ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); 1529f4c2d372SJan Kara dquot_drop(inode); 15308aefcd55STheodore Ts'o if (EXT4_I(inode)->jinode) { 15318aefcd55STheodore Ts'o jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode), 15328aefcd55STheodore Ts'o EXT4_I(inode)->jinode); 15338aefcd55STheodore Ts'o jbd2_free_inode(EXT4_I(inode)->jinode); 15348aefcd55STheodore Ts'o EXT4_I(inode)->jinode = NULL; 15358aefcd55STheodore Ts'o } 15363d204e24SEric Biggers fscrypt_put_encryption_info(inode); 1537c93d8f88SEric Biggers fsverity_cleanup_inode(inode); 1538ac27a0ecSDave Kleikamp } 1539ac27a0ecSDave Kleikamp 15401b961ac0SChristoph Hellwig static struct inode *ext4_nfs_get_inode(struct super_block *sb, 15411b961ac0SChristoph Hellwig u64 ino, u32 generation) 1542ac27a0ecSDave Kleikamp { 1543ac27a0ecSDave Kleikamp struct inode *inode; 1544ac27a0ecSDave Kleikamp 15458a363970STheodore Ts'o /* 1546ac27a0ecSDave Kleikamp * Currently we don't know the generation for parent directory, so 1547ac27a0ecSDave Kleikamp * a generation of 0 means "accept any" 1548ac27a0ecSDave Kleikamp */ 15498a363970STheodore Ts'o inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE); 15501d1fe1eeSDavid Howells if (IS_ERR(inode)) 15511d1fe1eeSDavid Howells return ERR_CAST(inode); 15521d1fe1eeSDavid Howells if (generation && inode->i_generation != generation) { 1553ac27a0ecSDave Kleikamp iput(inode); 1554ac27a0ecSDave Kleikamp return ERR_PTR(-ESTALE); 1555ac27a0ecSDave Kleikamp } 15561b961ac0SChristoph Hellwig 15571b961ac0SChristoph Hellwig return inode; 1558ac27a0ecSDave Kleikamp } 15591b961ac0SChristoph Hellwig 15601b961ac0SChristoph Hellwig static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid, 15611b961ac0SChristoph Hellwig int fh_len, int fh_type) 15621b961ac0SChristoph Hellwig { 15631b961ac0SChristoph Hellwig return generic_fh_to_dentry(sb, fid, fh_len, fh_type, 15641b961ac0SChristoph Hellwig ext4_nfs_get_inode); 15651b961ac0SChristoph Hellwig } 15661b961ac0SChristoph Hellwig 15671b961ac0SChristoph Hellwig static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid, 15681b961ac0SChristoph Hellwig int fh_len, int fh_type) 15691b961ac0SChristoph Hellwig { 15701b961ac0SChristoph Hellwig return generic_fh_to_parent(sb, fid, fh_len, fh_type, 15711b961ac0SChristoph Hellwig ext4_nfs_get_inode); 1572ac27a0ecSDave Kleikamp } 1573ac27a0ecSDave Kleikamp 1574fde87268STheodore Ts'o static int ext4_nfs_commit_metadata(struct inode *inode) 1575fde87268STheodore Ts'o { 1576fde87268STheodore Ts'o struct writeback_control wbc = { 1577fde87268STheodore Ts'o .sync_mode = WB_SYNC_ALL 1578fde87268STheodore Ts'o }; 1579fde87268STheodore Ts'o 1580fde87268STheodore Ts'o trace_ext4_nfs_commit_metadata(inode); 1581fde87268STheodore Ts'o return ext4_write_inode(inode, &wbc); 1582fde87268STheodore Ts'o } 1583fde87268STheodore Ts'o 1584ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 1585d6006186SEric Biggers static const char * const quotatypes[] = INITQFNAMES; 1586689c958cSLi Xi #define QTYPE2NAME(t) (quotatypes[t]) 1587ac27a0ecSDave Kleikamp 1588617ba13bSMingming Cao static int ext4_write_dquot(struct dquot *dquot); 1589617ba13bSMingming Cao static int ext4_acquire_dquot(struct dquot *dquot); 1590617ba13bSMingming Cao static int ext4_release_dquot(struct dquot *dquot); 1591617ba13bSMingming Cao static int ext4_mark_dquot_dirty(struct dquot *dquot); 1592617ba13bSMingming Cao static int ext4_write_info(struct super_block *sb, int type); 15936f28e087SJan Kara static int ext4_quota_on(struct super_block *sb, int type, int format_id, 15948c54ca9cSAl Viro const struct path *path); 1595617ba13bSMingming Cao static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, 1596ac27a0ecSDave Kleikamp size_t len, loff_t off); 1597617ba13bSMingming Cao static ssize_t ext4_quota_write(struct super_block *sb, int type, 1598ac27a0ecSDave Kleikamp const char *data, size_t len, loff_t off); 15997c319d32SAditya Kali static int ext4_quota_enable(struct super_block *sb, int type, int format_id, 16007c319d32SAditya Kali unsigned int flags); 1601ac27a0ecSDave Kleikamp 160296c7e0d9SJan Kara static struct dquot **ext4_get_dquots(struct inode *inode) 160396c7e0d9SJan Kara { 160496c7e0d9SJan Kara return EXT4_I(inode)->i_dquot; 160596c7e0d9SJan Kara } 160696c7e0d9SJan Kara 160761e225dcSAlexey Dobriyan static const struct dquot_operations ext4_quota_operations = { 160860e58e0fSMingming Cao .get_reserved_space = ext4_get_reserved_space, 1609617ba13bSMingming Cao .write_dquot = ext4_write_dquot, 1610617ba13bSMingming Cao .acquire_dquot = ext4_acquire_dquot, 1611617ba13bSMingming Cao .release_dquot = ext4_release_dquot, 1612617ba13bSMingming Cao .mark_dirty = ext4_mark_dquot_dirty, 1613a5b5ee32SJan Kara .write_info = ext4_write_info, 1614a5b5ee32SJan Kara .alloc_dquot = dquot_alloc, 1615a5b5ee32SJan Kara .destroy_dquot = dquot_destroy, 1616040cb378SLi Xi .get_projid = ext4_get_projid, 16177a9ca53aSTahsin Erdogan .get_inode_usage = ext4_get_inode_usage, 1618ebc11f7bSChengguang Xu .get_next_id = dquot_get_next_id, 1619ac27a0ecSDave Kleikamp }; 1620ac27a0ecSDave Kleikamp 16210d54b217SAlexey Dobriyan static const struct quotactl_ops ext4_qctl_operations = { 1622617ba13bSMingming Cao .quota_on = ext4_quota_on, 1623ca0e05e4SDmitry Monakhov .quota_off = ext4_quota_off, 1624287a8095SChristoph Hellwig .quota_sync = dquot_quota_sync, 16250a240339SJan Kara .get_state = dquot_get_state, 1626287a8095SChristoph Hellwig .set_info = dquot_set_dqinfo, 1627287a8095SChristoph Hellwig .get_dqblk = dquot_get_dqblk, 16286332b9b5SEric Sandeen .set_dqblk = dquot_set_dqblk, 16296332b9b5SEric Sandeen .get_nextdqblk = dquot_get_next_dqblk, 1630ac27a0ecSDave Kleikamp }; 1631ac27a0ecSDave Kleikamp #endif 1632ac27a0ecSDave Kleikamp 1633ee9b6d61SJosef 'Jeff' Sipek static const struct super_operations ext4_sops = { 1634617ba13bSMingming Cao .alloc_inode = ext4_alloc_inode, 163594053139SAl Viro .free_inode = ext4_free_in_core_inode, 1636617ba13bSMingming Cao .destroy_inode = ext4_destroy_inode, 1637617ba13bSMingming Cao .write_inode = ext4_write_inode, 1638617ba13bSMingming Cao .dirty_inode = ext4_dirty_inode, 16397ff9c073STheodore Ts'o .drop_inode = ext4_drop_inode, 16400930fcc1SAl Viro .evict_inode = ext4_evict_inode, 1641617ba13bSMingming Cao .put_super = ext4_put_super, 1642617ba13bSMingming Cao .sync_fs = ext4_sync_fs, 1643c4be0c1dSTakashi Sato .freeze_fs = ext4_freeze, 1644c4be0c1dSTakashi Sato .unfreeze_fs = ext4_unfreeze, 1645617ba13bSMingming Cao .statfs = ext4_statfs, 1646617ba13bSMingming Cao .show_options = ext4_show_options, 1647f5db130dSChristoph Hellwig .shutdown = ext4_shutdown, 1648ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 1649617ba13bSMingming Cao .quota_read = ext4_quota_read, 1650617ba13bSMingming Cao .quota_write = ext4_quota_write, 165196c7e0d9SJan Kara .get_dquots = ext4_get_dquots, 1652ac27a0ecSDave Kleikamp #endif 1653ac27a0ecSDave Kleikamp }; 1654ac27a0ecSDave Kleikamp 165539655164SChristoph Hellwig static const struct export_operations ext4_export_ops = { 16561b961ac0SChristoph Hellwig .fh_to_dentry = ext4_fh_to_dentry, 16571b961ac0SChristoph Hellwig .fh_to_parent = ext4_fh_to_parent, 1658617ba13bSMingming Cao .get_parent = ext4_get_parent, 1659fde87268STheodore Ts'o .commit_metadata = ext4_nfs_commit_metadata, 1660ac27a0ecSDave Kleikamp }; 1661ac27a0ecSDave Kleikamp 1662ac27a0ecSDave Kleikamp enum { 1663ac27a0ecSDave Kleikamp Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, 1664ba2e524dSLukas Czerner Opt_resgid, Opt_resuid, Opt_sb, 166572578c33STheodore Ts'o Opt_nouid32, Opt_debug, Opt_removed, 16662d544ec9SYang Xu Opt_user_xattr, Opt_acl, 166772578c33STheodore Ts'o Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload, 1668ad4eec61SEric Sandeen Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev, 1669ad4eec61SEric Sandeen Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit, 1670ac27a0ecSDave Kleikamp Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, 16716ddb2447STheodore Ts'o Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption, 16724f74d15fSEric Biggers Opt_inlinecrypt, 1673ba2e524dSLukas Czerner Opt_usrjquota, Opt_grpjquota, Opt_quota, 1674ee4a3fcdSTheodore Ts'o Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, 16751ff20307SJeff Layton Opt_usrquota, Opt_grpquota, Opt_prjquota, 16769cb20f94SIra Weiny Opt_dax, Opt_dax_always, Opt_dax_inode, Opt_dax_never, 1677327eaf73STheodore Ts'o Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error, 16784437992bSLukas Czerner Opt_nowarn_on_error, Opt_mblk_io_submit, Opt_debug_want_extra_isize, 16791449032bSTheodore Ts'o Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, 16805328e635SEric Sandeen Opt_inode_readahead_blks, Opt_journal_ioprio, 1681744692dcSJiaying Zhang Opt_dioread_nolock, Opt_dioread_lock, 1682fc6cb1cdSTheodore Ts'o Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable, 1683cdb7ee4cSTahsin Erdogan Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache, 168421175ca4SHarshad Shirwadkar Opt_no_prefetch_block_bitmaps, Opt_mb_optimize_scan, 1685e5a185c2SLukas Czerner Opt_errors, Opt_data, Opt_data_err, Opt_jqfmt, Opt_dax_type, 16868016e29fSHarshad Shirwadkar #ifdef CONFIG_EXT4_DEBUG 168799c880deSHarshad Shirwadkar Opt_fc_debug_max_replay, Opt_fc_debug_force 16888016e29fSHarshad Shirwadkar #endif 1689ac27a0ecSDave Kleikamp }; 1690ac27a0ecSDave Kleikamp 1691e5a185c2SLukas Czerner static const struct constant_table ext4_param_errors[] = { 1692ba2e524dSLukas Czerner {"continue", EXT4_MOUNT_ERRORS_CONT}, 1693ba2e524dSLukas Czerner {"panic", EXT4_MOUNT_ERRORS_PANIC}, 1694ba2e524dSLukas Czerner {"remount-ro", EXT4_MOUNT_ERRORS_RO}, 1695e5a185c2SLukas Czerner {} 1696e5a185c2SLukas Czerner }; 1697e5a185c2SLukas Czerner 1698e5a185c2SLukas Czerner static const struct constant_table ext4_param_data[] = { 1699ba2e524dSLukas Czerner {"journal", EXT4_MOUNT_JOURNAL_DATA}, 1700ba2e524dSLukas Czerner {"ordered", EXT4_MOUNT_ORDERED_DATA}, 1701ba2e524dSLukas Czerner {"writeback", EXT4_MOUNT_WRITEBACK_DATA}, 1702e5a185c2SLukas Czerner {} 1703e5a185c2SLukas Czerner }; 1704e5a185c2SLukas Czerner 1705e5a185c2SLukas Czerner static const struct constant_table ext4_param_data_err[] = { 1706e5a185c2SLukas Czerner {"abort", Opt_data_err_abort}, 1707e5a185c2SLukas Czerner {"ignore", Opt_data_err_ignore}, 1708e5a185c2SLukas Czerner {} 1709e5a185c2SLukas Czerner }; 1710e5a185c2SLukas Czerner 1711e5a185c2SLukas Czerner static const struct constant_table ext4_param_jqfmt[] = { 1712ba2e524dSLukas Czerner {"vfsold", QFMT_VFS_OLD}, 1713ba2e524dSLukas Czerner {"vfsv0", QFMT_VFS_V0}, 1714ba2e524dSLukas Czerner {"vfsv1", QFMT_VFS_V1}, 1715e5a185c2SLukas Czerner {} 1716e5a185c2SLukas Czerner }; 1717e5a185c2SLukas Czerner 1718e5a185c2SLukas Czerner static const struct constant_table ext4_param_dax[] = { 1719e5a185c2SLukas Czerner {"always", Opt_dax_always}, 1720e5a185c2SLukas Czerner {"inode", Opt_dax_inode}, 1721e5a185c2SLukas Czerner {"never", Opt_dax_never}, 1722e5a185c2SLukas Czerner {} 1723e5a185c2SLukas Czerner }; 1724e5a185c2SLukas Czerner 1725e5a185c2SLukas Czerner /* String parameter that allows empty argument */ 1726e5a185c2SLukas Czerner #define fsparam_string_empty(NAME, OPT) \ 1727e5a185c2SLukas Czerner __fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL) 1728e5a185c2SLukas Czerner 1729e5a185c2SLukas Czerner /* 1730e5a185c2SLukas Czerner * Mount option specification 1731e5a185c2SLukas Czerner * We don't use fsparam_flag_no because of the way we set the 1732e5a185c2SLukas Czerner * options and the way we show them in _ext4_show_options(). To 1733e5a185c2SLukas Czerner * keep the changes to a minimum, let's keep the negative options 1734e5a185c2SLukas Czerner * separate for now. 1735e5a185c2SLukas Czerner */ 1736e5a185c2SLukas Czerner static const struct fs_parameter_spec ext4_param_specs[] = { 1737e5a185c2SLukas Czerner fsparam_flag ("bsddf", Opt_bsd_df), 1738e5a185c2SLukas Czerner fsparam_flag ("minixdf", Opt_minix_df), 1739e5a185c2SLukas Czerner fsparam_flag ("grpid", Opt_grpid), 1740e5a185c2SLukas Czerner fsparam_flag ("bsdgroups", Opt_grpid), 1741e5a185c2SLukas Czerner fsparam_flag ("nogrpid", Opt_nogrpid), 1742e5a185c2SLukas Czerner fsparam_flag ("sysvgroups", Opt_nogrpid), 1743e5a185c2SLukas Czerner fsparam_u32 ("resgid", Opt_resgid), 1744e5a185c2SLukas Czerner fsparam_u32 ("resuid", Opt_resuid), 1745e5a185c2SLukas Czerner fsparam_u32 ("sb", Opt_sb), 1746e5a185c2SLukas Czerner fsparam_enum ("errors", Opt_errors, ext4_param_errors), 1747e5a185c2SLukas Czerner fsparam_flag ("nouid32", Opt_nouid32), 1748e5a185c2SLukas Czerner fsparam_flag ("debug", Opt_debug), 1749e5a185c2SLukas Czerner fsparam_flag ("oldalloc", Opt_removed), 1750e5a185c2SLukas Czerner fsparam_flag ("orlov", Opt_removed), 1751e5a185c2SLukas Czerner fsparam_flag ("user_xattr", Opt_user_xattr), 1752e5a185c2SLukas Czerner fsparam_flag ("acl", Opt_acl), 1753e5a185c2SLukas Czerner fsparam_flag ("norecovery", Opt_noload), 1754e5a185c2SLukas Czerner fsparam_flag ("noload", Opt_noload), 1755e5a185c2SLukas Czerner fsparam_flag ("bh", Opt_removed), 1756e5a185c2SLukas Czerner fsparam_flag ("nobh", Opt_removed), 1757e5a185c2SLukas Czerner fsparam_u32 ("commit", Opt_commit), 1758e5a185c2SLukas Czerner fsparam_u32 ("min_batch_time", Opt_min_batch_time), 1759e5a185c2SLukas Czerner fsparam_u32 ("max_batch_time", Opt_max_batch_time), 1760e5a185c2SLukas Czerner fsparam_u32 ("journal_dev", Opt_journal_dev), 1761e5a185c2SLukas Czerner fsparam_bdev ("journal_path", Opt_journal_path), 1762e5a185c2SLukas Czerner fsparam_flag ("journal_checksum", Opt_journal_checksum), 1763e5a185c2SLukas Czerner fsparam_flag ("nojournal_checksum", Opt_nojournal_checksum), 1764e5a185c2SLukas Czerner fsparam_flag ("journal_async_commit",Opt_journal_async_commit), 1765e5a185c2SLukas Czerner fsparam_flag ("abort", Opt_abort), 1766e5a185c2SLukas Czerner fsparam_enum ("data", Opt_data, ext4_param_data), 1767e5a185c2SLukas Czerner fsparam_enum ("data_err", Opt_data_err, 1768e5a185c2SLukas Czerner ext4_param_data_err), 1769e5a185c2SLukas Czerner fsparam_string_empty 1770e5a185c2SLukas Czerner ("usrjquota", Opt_usrjquota), 1771e5a185c2SLukas Czerner fsparam_string_empty 1772e5a185c2SLukas Czerner ("grpjquota", Opt_grpjquota), 1773e5a185c2SLukas Czerner fsparam_enum ("jqfmt", Opt_jqfmt, ext4_param_jqfmt), 1774e5a185c2SLukas Czerner fsparam_flag ("grpquota", Opt_grpquota), 1775e5a185c2SLukas Czerner fsparam_flag ("quota", Opt_quota), 1776e5a185c2SLukas Czerner fsparam_flag ("noquota", Opt_noquota), 1777e5a185c2SLukas Czerner fsparam_flag ("usrquota", Opt_usrquota), 1778e5a185c2SLukas Czerner fsparam_flag ("prjquota", Opt_prjquota), 1779e5a185c2SLukas Czerner fsparam_flag ("barrier", Opt_barrier), 1780e5a185c2SLukas Czerner fsparam_u32 ("barrier", Opt_barrier), 1781e5a185c2SLukas Czerner fsparam_flag ("nobarrier", Opt_nobarrier), 17821ff20307SJeff Layton fsparam_flag ("i_version", Opt_removed), 1783e5a185c2SLukas Czerner fsparam_flag ("dax", Opt_dax), 1784e5a185c2SLukas Czerner fsparam_enum ("dax", Opt_dax_type, ext4_param_dax), 1785e5a185c2SLukas Czerner fsparam_u32 ("stripe", Opt_stripe), 1786e5a185c2SLukas Czerner fsparam_flag ("delalloc", Opt_delalloc), 1787e5a185c2SLukas Czerner fsparam_flag ("nodelalloc", Opt_nodelalloc), 1788e5a185c2SLukas Czerner fsparam_flag ("warn_on_error", Opt_warn_on_error), 1789e5a185c2SLukas Czerner fsparam_flag ("nowarn_on_error", Opt_nowarn_on_error), 1790e5a185c2SLukas Czerner fsparam_u32 ("debug_want_extra_isize", 1791e5a185c2SLukas Czerner Opt_debug_want_extra_isize), 1792e5a185c2SLukas Czerner fsparam_flag ("mblk_io_submit", Opt_removed), 1793e5a185c2SLukas Czerner fsparam_flag ("nomblk_io_submit", Opt_removed), 1794e5a185c2SLukas Czerner fsparam_flag ("block_validity", Opt_block_validity), 1795e5a185c2SLukas Czerner fsparam_flag ("noblock_validity", Opt_noblock_validity), 1796e5a185c2SLukas Czerner fsparam_u32 ("inode_readahead_blks", 1797e5a185c2SLukas Czerner Opt_inode_readahead_blks), 1798e5a185c2SLukas Czerner fsparam_u32 ("journal_ioprio", Opt_journal_ioprio), 1799e5a185c2SLukas Czerner fsparam_u32 ("auto_da_alloc", Opt_auto_da_alloc), 1800e5a185c2SLukas Czerner fsparam_flag ("auto_da_alloc", Opt_auto_da_alloc), 1801e5a185c2SLukas Czerner fsparam_flag ("noauto_da_alloc", Opt_noauto_da_alloc), 1802e5a185c2SLukas Czerner fsparam_flag ("dioread_nolock", Opt_dioread_nolock), 1803e5a185c2SLukas Czerner fsparam_flag ("nodioread_nolock", Opt_dioread_lock), 1804e5a185c2SLukas Czerner fsparam_flag ("dioread_lock", Opt_dioread_lock), 1805e5a185c2SLukas Czerner fsparam_flag ("discard", Opt_discard), 1806e5a185c2SLukas Czerner fsparam_flag ("nodiscard", Opt_nodiscard), 1807e5a185c2SLukas Czerner fsparam_u32 ("init_itable", Opt_init_itable), 1808e5a185c2SLukas Czerner fsparam_flag ("init_itable", Opt_init_itable), 1809e5a185c2SLukas Czerner fsparam_flag ("noinit_itable", Opt_noinit_itable), 1810e5a185c2SLukas Czerner #ifdef CONFIG_EXT4_DEBUG 1811e5a185c2SLukas Czerner fsparam_flag ("fc_debug_force", Opt_fc_debug_force), 1812e5a185c2SLukas Czerner fsparam_u32 ("fc_debug_max_replay", Opt_fc_debug_max_replay), 1813e5a185c2SLukas Czerner #endif 1814e5a185c2SLukas Czerner fsparam_u32 ("max_dir_size_kb", Opt_max_dir_size_kb), 1815e5a185c2SLukas Czerner fsparam_flag ("test_dummy_encryption", 1816e5a185c2SLukas Czerner Opt_test_dummy_encryption), 1817e5a185c2SLukas Czerner fsparam_string ("test_dummy_encryption", 1818e5a185c2SLukas Czerner Opt_test_dummy_encryption), 1819e5a185c2SLukas Czerner fsparam_flag ("inlinecrypt", Opt_inlinecrypt), 1820e5a185c2SLukas Czerner fsparam_flag ("nombcache", Opt_nombcache), 1821e5a185c2SLukas Czerner fsparam_flag ("no_mbcache", Opt_nombcache), /* for backward compatibility */ 1822e5a185c2SLukas Czerner fsparam_flag ("prefetch_block_bitmaps", 1823e5a185c2SLukas Czerner Opt_removed), 1824e5a185c2SLukas Czerner fsparam_flag ("no_prefetch_block_bitmaps", 1825e5a185c2SLukas Czerner Opt_no_prefetch_block_bitmaps), 1826e5a185c2SLukas Czerner fsparam_s32 ("mb_optimize_scan", Opt_mb_optimize_scan), 1827e5a185c2SLukas Czerner fsparam_string ("check", Opt_removed), /* mount option from ext2/3 */ 1828e5a185c2SLukas Czerner fsparam_flag ("nocheck", Opt_removed), /* mount option from ext2/3 */ 1829e5a185c2SLukas Czerner fsparam_flag ("reservation", Opt_removed), /* mount option from ext2/3 */ 1830e5a185c2SLukas Czerner fsparam_flag ("noreservation", Opt_removed), /* mount option from ext2/3 */ 1831e5a185c2SLukas Czerner fsparam_u32 ("journal", Opt_removed), /* mount option from ext2/3 */ 1832e5a185c2SLukas Czerner {} 1833e5a185c2SLukas Czerner }; 1834e5a185c2SLukas Czerner 1835b3881f74STheodore Ts'o #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3)) 1836196e402aSHarshad Shirwadkar 183726092bf5STheodore Ts'o #define MOPT_SET 0x0001 183826092bf5STheodore Ts'o #define MOPT_CLEAR 0x0002 183926092bf5STheodore Ts'o #define MOPT_NOSUPPORT 0x0004 184026092bf5STheodore Ts'o #define MOPT_EXPLICIT 0x0008 184126092bf5STheodore Ts'o #ifdef CONFIG_QUOTA 184226092bf5STheodore Ts'o #define MOPT_Q 0 1843ba2e524dSLukas Czerner #define MOPT_QFMT 0x0010 184426092bf5STheodore Ts'o #else 184526092bf5STheodore Ts'o #define MOPT_Q MOPT_NOSUPPORT 184626092bf5STheodore Ts'o #define MOPT_QFMT MOPT_NOSUPPORT 184726092bf5STheodore Ts'o #endif 1848ba2e524dSLukas Czerner #define MOPT_NO_EXT2 0x0020 1849ba2e524dSLukas Czerner #define MOPT_NO_EXT3 0x0040 18508dc0aa8cSTheodore Ts'o #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3) 1851ba2e524dSLukas Czerner #define MOPT_SKIP 0x0080 1852ba2e524dSLukas Czerner #define MOPT_2 0x0100 185326092bf5STheodore Ts'o 185426092bf5STheodore Ts'o static const struct mount_opts { 185526092bf5STheodore Ts'o int token; 185626092bf5STheodore Ts'o int mount_opt; 185726092bf5STheodore Ts'o int flags; 185826092bf5STheodore Ts'o } ext4_mount_opts[] = { 185926092bf5STheodore Ts'o {Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET}, 186026092bf5STheodore Ts'o {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR}, 186126092bf5STheodore Ts'o {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET}, 186226092bf5STheodore Ts'o {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR}, 186326092bf5STheodore Ts'o {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET}, 186426092bf5STheodore Ts'o {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR}, 18658dc0aa8cSTheodore Ts'o {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK, 18668dc0aa8cSTheodore Ts'o MOPT_EXT4_ONLY | MOPT_SET}, 18678dc0aa8cSTheodore Ts'o {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK, 18688dc0aa8cSTheodore Ts'o MOPT_EXT4_ONLY | MOPT_CLEAR}, 186926092bf5STheodore Ts'o {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET}, 187026092bf5STheodore Ts'o {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR}, 18718dc0aa8cSTheodore Ts'o {Opt_delalloc, EXT4_MOUNT_DELALLOC, 18728dc0aa8cSTheodore Ts'o MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, 18738dc0aa8cSTheodore Ts'o {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, 187459d9fa5cSTheodore Ts'o MOPT_EXT4_ONLY | MOPT_CLEAR}, 1875327eaf73STheodore Ts'o {Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET}, 1876327eaf73STheodore Ts'o {Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR}, 1877cb8435dcSEric Biggers {Opt_commit, 0, MOPT_NO_EXT2}, 1878c6d3d56dSDarrick J. Wong {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, 1879c6d3d56dSDarrick J. Wong MOPT_EXT4_ONLY | MOPT_CLEAR}, 18808dc0aa8cSTheodore Ts'o {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, 18811e381f60SDmitry Monakhov MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, 188226092bf5STheodore Ts'o {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT | 18838dc0aa8cSTheodore Ts'o EXT4_MOUNT_JOURNAL_CHECKSUM), 18841e381f60SDmitry Monakhov MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, 18858dc0aa8cSTheodore Ts'o {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET}, 1886ba2e524dSLukas Czerner {Opt_data_err, EXT4_MOUNT_DATA_ERR_ABORT, MOPT_NO_EXT2}, 188726092bf5STheodore Ts'o {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET}, 188826092bf5STheodore Ts'o {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR}, 188926092bf5STheodore Ts'o {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET}, 189026092bf5STheodore Ts'o {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR}, 189126092bf5STheodore Ts'o {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR}, 1892ba2e524dSLukas Czerner {Opt_dax_type, 0, MOPT_EXT4_ONLY}, 1893ba2e524dSLukas Czerner {Opt_journal_dev, 0, MOPT_NO_EXT2}, 1894ba2e524dSLukas Czerner {Opt_journal_path, 0, MOPT_NO_EXT2}, 1895ba2e524dSLukas Czerner {Opt_journal_ioprio, 0, MOPT_NO_EXT2}, 1896ba2e524dSLukas Czerner {Opt_data, 0, MOPT_NO_EXT2}, 189726092bf5STheodore Ts'o {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET}, 189826092bf5STheodore Ts'o #ifdef CONFIG_EXT4_FS_POSIX_ACL 189926092bf5STheodore Ts'o {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET}, 190026092bf5STheodore Ts'o #else 190126092bf5STheodore Ts'o {Opt_acl, 0, MOPT_NOSUPPORT}, 190226092bf5STheodore Ts'o #endif 190326092bf5STheodore Ts'o {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET}, 190426092bf5STheodore Ts'o {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET}, 190526092bf5STheodore Ts'o {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q}, 190626092bf5STheodore Ts'o {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, 190726092bf5STheodore Ts'o MOPT_SET | MOPT_Q}, 190826092bf5STheodore Ts'o {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA, 190926092bf5STheodore Ts'o MOPT_SET | MOPT_Q}, 191049da9392SJan Kara {Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA, 191149da9392SJan Kara MOPT_SET | MOPT_Q}, 191226092bf5STheodore Ts'o {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA | 191349da9392SJan Kara EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA), 191449da9392SJan Kara MOPT_CLEAR | MOPT_Q}, 1915ba2e524dSLukas Czerner {Opt_usrjquota, 0, MOPT_Q}, 1916ba2e524dSLukas Czerner {Opt_grpjquota, 0, MOPT_Q}, 1917ba2e524dSLukas Czerner {Opt_jqfmt, 0, MOPT_QFMT}, 1918cdb7ee4cSTahsin Erdogan {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET}, 191921175ca4SHarshad Shirwadkar {Opt_no_prefetch_block_bitmaps, EXT4_MOUNT_NO_PREFETCH_BLOCK_BITMAPS, 19203d392b26STheodore Ts'o MOPT_SET}, 192199c880deSHarshad Shirwadkar #ifdef CONFIG_EXT4_DEBUG 19220f0672ffSHarshad Shirwadkar {Opt_fc_debug_force, EXT4_MOUNT2_JOURNAL_FAST_COMMIT, 19230f0672ffSHarshad Shirwadkar MOPT_SET | MOPT_2 | MOPT_EXT4_ONLY}, 19248016e29fSHarshad Shirwadkar #endif 192522b8d707SJan Kara {Opt_abort, EXT4_MOUNT2_ABORT, MOPT_SET | MOPT_2}, 192626092bf5STheodore Ts'o {Opt_err, 0, 0} 192726092bf5STheodore Ts'o }; 192826092bf5STheodore Ts'o 19295298d4bfSChristoph Hellwig #if IS_ENABLED(CONFIG_UNICODE) 1930c83ad55eSGabriel Krisman Bertazi static const struct ext4_sb_encodings { 1931c83ad55eSGabriel Krisman Bertazi __u16 magic; 1932c83ad55eSGabriel Krisman Bertazi char *name; 193349bd03ccSChristoph Hellwig unsigned int version; 1934c83ad55eSGabriel Krisman Bertazi } ext4_sb_encoding_map[] = { 193549bd03ccSChristoph Hellwig {EXT4_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)}, 1936c83ad55eSGabriel Krisman Bertazi }; 1937c83ad55eSGabriel Krisman Bertazi 1938aa8bf298SChristoph Hellwig static const struct ext4_sb_encodings * 1939aa8bf298SChristoph Hellwig ext4_sb_read_encoding(const struct ext4_super_block *es) 1940c83ad55eSGabriel Krisman Bertazi { 1941c83ad55eSGabriel Krisman Bertazi __u16 magic = le16_to_cpu(es->s_encoding); 1942c83ad55eSGabriel Krisman Bertazi int i; 1943c83ad55eSGabriel Krisman Bertazi 1944c83ad55eSGabriel Krisman Bertazi for (i = 0; i < ARRAY_SIZE(ext4_sb_encoding_map); i++) 1945c83ad55eSGabriel Krisman Bertazi if (magic == ext4_sb_encoding_map[i].magic) 1946aa8bf298SChristoph Hellwig return &ext4_sb_encoding_map[i]; 1947c83ad55eSGabriel Krisman Bertazi 1948aa8bf298SChristoph Hellwig return NULL; 1949c83ad55eSGabriel Krisman Bertazi } 1950c83ad55eSGabriel Krisman Bertazi #endif 1951c83ad55eSGabriel Krisman Bertazi 19526e47a3ccSLukas Czerner #define EXT4_SPEC_JQUOTA (1 << 0) 19536e47a3ccSLukas Czerner #define EXT4_SPEC_JQFMT (1 << 1) 19546e47a3ccSLukas Czerner #define EXT4_SPEC_DATAJ (1 << 2) 19556e47a3ccSLukas Czerner #define EXT4_SPEC_SB_BLOCK (1 << 3) 19566e47a3ccSLukas Czerner #define EXT4_SPEC_JOURNAL_DEV (1 << 4) 19576e47a3ccSLukas Czerner #define EXT4_SPEC_JOURNAL_IOPRIO (1 << 5) 19586e47a3ccSLukas Czerner #define EXT4_SPEC_s_want_extra_isize (1 << 7) 19596e47a3ccSLukas Czerner #define EXT4_SPEC_s_max_batch_time (1 << 8) 19606e47a3ccSLukas Czerner #define EXT4_SPEC_s_min_batch_time (1 << 9) 19616e47a3ccSLukas Czerner #define EXT4_SPEC_s_inode_readahead_blks (1 << 10) 19626e47a3ccSLukas Czerner #define EXT4_SPEC_s_li_wait_mult (1 << 11) 19636e47a3ccSLukas Czerner #define EXT4_SPEC_s_max_dir_size_kb (1 << 12) 19646e47a3ccSLukas Czerner #define EXT4_SPEC_s_stripe (1 << 13) 19656e47a3ccSLukas Czerner #define EXT4_SPEC_s_resuid (1 << 14) 19666e47a3ccSLukas Czerner #define EXT4_SPEC_s_resgid (1 << 15) 19676e47a3ccSLukas Czerner #define EXT4_SPEC_s_commit_interval (1 << 16) 19686e47a3ccSLukas Czerner #define EXT4_SPEC_s_fc_debug_max_replay (1 << 17) 19697edfd85bSLukas Czerner #define EXT4_SPEC_s_sb_block (1 << 18) 197027b38686SOjaswin Mujoo #define EXT4_SPEC_mb_optimize_scan (1 << 19) 19716e47a3ccSLukas Czerner 1972461c3af0SLukas Czerner struct ext4_fs_context { 1973e6e268cbSLukas Czerner char *s_qf_names[EXT4_MAXQUOTAS]; 197485456054SEric Biggers struct fscrypt_dummy_policy dummy_enc_policy; 1975e6e268cbSLukas Czerner int s_jquota_fmt; /* Format of quota to use */ 19766e47a3ccSLukas Czerner #ifdef CONFIG_EXT4_DEBUG 19776e47a3ccSLukas Czerner int s_fc_debug_max_replay; 19786e47a3ccSLukas Czerner #endif 19796e47a3ccSLukas Czerner unsigned short qname_spec; 19806e47a3ccSLukas Czerner unsigned long vals_s_flags; /* Bits to set in s_flags */ 19816e47a3ccSLukas Czerner unsigned long mask_s_flags; /* Bits changed in s_flags */ 19826e47a3ccSLukas Czerner unsigned long journal_devnum; 19836e47a3ccSLukas Czerner unsigned long s_commit_interval; 19846e47a3ccSLukas Czerner unsigned long s_stripe; 19856e47a3ccSLukas Czerner unsigned int s_inode_readahead_blks; 19866e47a3ccSLukas Czerner unsigned int s_want_extra_isize; 19876e47a3ccSLukas Czerner unsigned int s_li_wait_mult; 19886e47a3ccSLukas Czerner unsigned int s_max_dir_size_kb; 19896e47a3ccSLukas Czerner unsigned int journal_ioprio; 19906e47a3ccSLukas Czerner unsigned int vals_s_mount_opt; 19916e47a3ccSLukas Czerner unsigned int mask_s_mount_opt; 19926e47a3ccSLukas Czerner unsigned int vals_s_mount_opt2; 19936e47a3ccSLukas Czerner unsigned int mask_s_mount_opt2; 1994b6bd2435SLukas Czerner unsigned int opt_flags; /* MOPT flags */ 19956e47a3ccSLukas Czerner unsigned int spec; 19966e47a3ccSLukas Czerner u32 s_max_batch_time; 19976e47a3ccSLukas Czerner u32 s_min_batch_time; 19986e47a3ccSLukas Czerner kuid_t s_resuid; 19996e47a3ccSLukas Czerner kgid_t s_resgid; 20007edfd85bSLukas Czerner ext4_fsblk_t s_sb_block; 2001b237e304SHarshad Shirwadkar }; 2002b237e304SHarshad Shirwadkar 2003cebe85d5SLukas Czerner static void ext4_fc_free(struct fs_context *fc) 2004cebe85d5SLukas Czerner { 2005cebe85d5SLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 2006cebe85d5SLukas Czerner int i; 2007cebe85d5SLukas Czerner 2008cebe85d5SLukas Czerner if (!ctx) 2009cebe85d5SLukas Czerner return; 2010cebe85d5SLukas Czerner 2011cebe85d5SLukas Czerner for (i = 0; i < EXT4_MAXQUOTAS; i++) 2012cebe85d5SLukas Czerner kfree(ctx->s_qf_names[i]); 2013cebe85d5SLukas Czerner 201485456054SEric Biggers fscrypt_free_dummy_policy(&ctx->dummy_enc_policy); 2015cebe85d5SLukas Czerner kfree(ctx); 2016cebe85d5SLukas Czerner } 2017cebe85d5SLukas Czerner 2018cebe85d5SLukas Czerner int ext4_init_fs_context(struct fs_context *fc) 2019cebe85d5SLukas Czerner { 2020da9e4802SDan Carpenter struct ext4_fs_context *ctx; 2021cebe85d5SLukas Czerner 2022cebe85d5SLukas Czerner ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL); 2023cebe85d5SLukas Czerner if (!ctx) 2024cebe85d5SLukas Czerner return -ENOMEM; 2025cebe85d5SLukas Czerner 2026cebe85d5SLukas Czerner fc->fs_private = ctx; 2027cebe85d5SLukas Czerner fc->ops = &ext4_context_ops; 2028cebe85d5SLukas Czerner 2029cebe85d5SLukas Czerner return 0; 2030cebe85d5SLukas Czerner } 2031cebe85d5SLukas Czerner 2032e6e268cbSLukas Czerner #ifdef CONFIG_QUOTA 2033e6e268cbSLukas Czerner /* 2034e6e268cbSLukas Czerner * Note the name of the specified quota file. 2035e6e268cbSLukas Czerner */ 2036e6e268cbSLukas Czerner static int note_qf_name(struct fs_context *fc, int qtype, 2037e6e268cbSLukas Czerner struct fs_parameter *param) 2038e6e268cbSLukas Czerner { 2039e6e268cbSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 2040e6e268cbSLukas Czerner char *qname; 2041e6e268cbSLukas Czerner 2042e6e268cbSLukas Czerner if (param->size < 1) { 2043e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, "Missing quota name"); 2044e6e268cbSLukas Czerner return -EINVAL; 2045e6e268cbSLukas Czerner } 2046e6e268cbSLukas Czerner if (strchr(param->string, '/')) { 2047e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, 2048e6e268cbSLukas Czerner "quotafile must be on filesystem root"); 2049e6e268cbSLukas Czerner return -EINVAL; 2050e6e268cbSLukas Czerner } 2051e6e268cbSLukas Czerner if (ctx->s_qf_names[qtype]) { 2052e6e268cbSLukas Czerner if (strcmp(ctx->s_qf_names[qtype], param->string) != 0) { 2053e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, 2054e6e268cbSLukas Czerner "%s quota file already specified", 2055e6e268cbSLukas Czerner QTYPE2NAME(qtype)); 2056e6e268cbSLukas Czerner return -EINVAL; 2057e6e268cbSLukas Czerner } 2058e6e268cbSLukas Czerner return 0; 2059e6e268cbSLukas Czerner } 2060e6e268cbSLukas Czerner 2061e6e268cbSLukas Czerner qname = kmemdup_nul(param->string, param->size, GFP_KERNEL); 2062e6e268cbSLukas Czerner if (!qname) { 2063e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, 2064e6e268cbSLukas Czerner "Not enough memory for storing quotafile name"); 2065e6e268cbSLukas Czerner return -ENOMEM; 2066e6e268cbSLukas Czerner } 2067e6e268cbSLukas Czerner ctx->s_qf_names[qtype] = qname; 2068e6e268cbSLukas Czerner ctx->qname_spec |= 1 << qtype; 20696e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_JQUOTA; 2070e6e268cbSLukas Czerner return 0; 2071e6e268cbSLukas Czerner } 2072e6e268cbSLukas Czerner 2073e6e268cbSLukas Czerner /* 2074e6e268cbSLukas Czerner * Clear the name of the specified quota file. 2075e6e268cbSLukas Czerner */ 2076e6e268cbSLukas Czerner static int unnote_qf_name(struct fs_context *fc, int qtype) 2077e6e268cbSLukas Czerner { 2078e6e268cbSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 2079e6e268cbSLukas Czerner 2080e6e268cbSLukas Czerner if (ctx->s_qf_names[qtype]) 2081e6e268cbSLukas Czerner kfree(ctx->s_qf_names[qtype]); 2082e6e268cbSLukas Czerner 2083e6e268cbSLukas Czerner ctx->s_qf_names[qtype] = NULL; 2084e6e268cbSLukas Czerner ctx->qname_spec |= 1 << qtype; 20856e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_JQUOTA; 2086e6e268cbSLukas Czerner return 0; 2087e6e268cbSLukas Czerner } 2088e6e268cbSLukas Czerner #endif 2089e6e268cbSLukas Czerner 209085456054SEric Biggers static int ext4_parse_test_dummy_encryption(const struct fs_parameter *param, 209185456054SEric Biggers struct ext4_fs_context *ctx) 209285456054SEric Biggers { 209385456054SEric Biggers int err; 209485456054SEric Biggers 209585456054SEric Biggers if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) { 209685456054SEric Biggers ext4_msg(NULL, KERN_WARNING, 209785456054SEric Biggers "test_dummy_encryption option not supported"); 209885456054SEric Biggers return -EINVAL; 209985456054SEric Biggers } 210085456054SEric Biggers err = fscrypt_parse_test_dummy_encryption(param, 210185456054SEric Biggers &ctx->dummy_enc_policy); 210285456054SEric Biggers if (err == -EINVAL) { 210385456054SEric Biggers ext4_msg(NULL, KERN_WARNING, 210485456054SEric Biggers "Value of option \"%s\" is unrecognized", param->key); 210585456054SEric Biggers } else if (err == -EEXIST) { 210685456054SEric Biggers ext4_msg(NULL, KERN_WARNING, 210785456054SEric Biggers "Conflicting test_dummy_encryption options"); 210885456054SEric Biggers return -EINVAL; 210985456054SEric Biggers } 211085456054SEric Biggers return err; 211185456054SEric Biggers } 211285456054SEric Biggers 21136e47a3ccSLukas Czerner #define EXT4_SET_CTX(name) \ 21144c246728SLukas Czerner static inline void ctx_set_##name(struct ext4_fs_context *ctx, \ 21154c246728SLukas Czerner unsigned long flag) \ 21166e47a3ccSLukas Czerner { \ 21176e47a3ccSLukas Czerner ctx->mask_s_##name |= flag; \ 21186e47a3ccSLukas Czerner ctx->vals_s_##name |= flag; \ 2119e3952fccSLukas Czerner } 2120e3952fccSLukas Czerner 2121e3952fccSLukas Czerner #define EXT4_CLEAR_CTX(name) \ 21224c246728SLukas Czerner static inline void ctx_clear_##name(struct ext4_fs_context *ctx, \ 21234c246728SLukas Czerner unsigned long flag) \ 21246e47a3ccSLukas Czerner { \ 21256e47a3ccSLukas Czerner ctx->mask_s_##name |= flag; \ 21266e47a3ccSLukas Czerner ctx->vals_s_##name &= ~flag; \ 2127e3952fccSLukas Czerner } 2128e3952fccSLukas Czerner 2129e3952fccSLukas Czerner #define EXT4_TEST_CTX(name) \ 21304c246728SLukas Czerner static inline unsigned long \ 21314c246728SLukas Czerner ctx_test_##name(struct ext4_fs_context *ctx, unsigned long flag) \ 21326e47a3ccSLukas Czerner { \ 21334c246728SLukas Czerner return (ctx->vals_s_##name & flag); \ 2134e3952fccSLukas Czerner } 21356e47a3ccSLukas Czerner 2136e3952fccSLukas Czerner EXT4_SET_CTX(flags); /* set only */ 21376e47a3ccSLukas Czerner EXT4_SET_CTX(mount_opt); 2138e3952fccSLukas Czerner EXT4_CLEAR_CTX(mount_opt); 2139e3952fccSLukas Czerner EXT4_TEST_CTX(mount_opt); 21406e47a3ccSLukas Czerner EXT4_SET_CTX(mount_opt2); 2141e3952fccSLukas Czerner EXT4_CLEAR_CTX(mount_opt2); 2142e3952fccSLukas Czerner EXT4_TEST_CTX(mount_opt2); 2143e3952fccSLukas Czerner 214402f960f8SLukas Czerner static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param) 214526092bf5STheodore Ts'o { 2146461c3af0SLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 2147461c3af0SLukas Czerner struct fs_parse_result result; 214826092bf5STheodore Ts'o const struct mount_opts *m; 2149461c3af0SLukas Czerner int is_remount; 215008cefc7aSEric W. Biederman kuid_t uid; 215108cefc7aSEric W. Biederman kgid_t gid; 2152461c3af0SLukas Czerner int token; 2153461c3af0SLukas Czerner 2154461c3af0SLukas Czerner token = fs_parse(fc, ext4_param_specs, param, &result); 2155461c3af0SLukas Czerner if (token < 0) 2156461c3af0SLukas Czerner return token; 2157461c3af0SLukas Czerner is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE; 215826092bf5STheodore Ts'o 2159ba2e524dSLukas Czerner for (m = ext4_mount_opts; m->token != Opt_err; m++) 2160ba2e524dSLukas Czerner if (token == m->token) 2161ba2e524dSLukas Czerner break; 2162ba2e524dSLukas Czerner 2163ba2e524dSLukas Czerner ctx->opt_flags |= m->flags; 2164ba2e524dSLukas Czerner 2165ba2e524dSLukas Czerner if (m->flags & MOPT_EXPLICIT) { 2166ba2e524dSLukas Czerner if (m->mount_opt & EXT4_MOUNT_DELALLOC) { 2167ba2e524dSLukas Czerner ctx_set_mount_opt2(ctx, EXT4_MOUNT2_EXPLICIT_DELALLOC); 2168ba2e524dSLukas Czerner } else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) { 2169ba2e524dSLukas Czerner ctx_set_mount_opt2(ctx, 2170ba2e524dSLukas Czerner EXT4_MOUNT2_EXPLICIT_JOURNAL_CHECKSUM); 2171ba2e524dSLukas Czerner } else 2172ba2e524dSLukas Czerner return -EINVAL; 2173ba2e524dSLukas Czerner } 2174ba2e524dSLukas Czerner 2175ba2e524dSLukas Czerner if (m->flags & MOPT_NOSUPPORT) { 2176ba2e524dSLukas Czerner ext4_msg(NULL, KERN_ERR, "%s option not supported", 2177ba2e524dSLukas Czerner param->key); 2178ba2e524dSLukas Czerner return 0; 2179ba2e524dSLukas Czerner } 2180ba2e524dSLukas Czerner 2181ba2e524dSLukas Czerner switch (token) { 218257f73c2cSTheodore Ts'o #ifdef CONFIG_QUOTA 2183ba2e524dSLukas Czerner case Opt_usrjquota: 2184461c3af0SLukas Czerner if (!*param->string) 2185e6e268cbSLukas Czerner return unnote_qf_name(fc, USRQUOTA); 2186461c3af0SLukas Czerner else 2187e6e268cbSLukas Czerner return note_qf_name(fc, USRQUOTA, param); 2188ba2e524dSLukas Czerner case Opt_grpjquota: 2189461c3af0SLukas Czerner if (!*param->string) 2190e6e268cbSLukas Czerner return unnote_qf_name(fc, GRPQUOTA); 2191461c3af0SLukas Czerner else 2192e6e268cbSLukas Czerner return note_qf_name(fc, GRPQUOTA, param); 219357f73c2cSTheodore Ts'o #endif 219426092bf5STheodore Ts'o case Opt_sb: 21957edfd85bSLukas Czerner if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { 21967edfd85bSLukas Czerner ext4_msg(NULL, KERN_WARNING, 21977edfd85bSLukas Czerner "Ignoring %s option on remount", param->key); 21987edfd85bSLukas Czerner } else { 21997edfd85bSLukas Czerner ctx->s_sb_block = result.uint_32; 22007edfd85bSLukas Czerner ctx->spec |= EXT4_SPEC_s_sb_block; 22017edfd85bSLukas Czerner } 220202f960f8SLukas Czerner return 0; 220326092bf5STheodore Ts'o case Opt_removed: 2204da812f61SLukas Czerner ext4_msg(NULL, KERN_WARNING, "Ignoring removed %s option", 2205461c3af0SLukas Czerner param->key); 220602f960f8SLukas Czerner return 0; 22074f74d15fSEric Biggers case Opt_inlinecrypt: 22084f74d15fSEric Biggers #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT 22096e47a3ccSLukas Czerner ctx_set_flags(ctx, SB_INLINECRYPT); 22104f74d15fSEric Biggers #else 2211da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, "inline encryption not supported"); 22124f74d15fSEric Biggers #endif 221302f960f8SLukas Czerner return 0; 2214461c3af0SLukas Czerner case Opt_errors: 22156e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_ERRORS_MASK); 2216ba2e524dSLukas Czerner ctx_set_mount_opt(ctx, result.uint_32); 2217ba2e524dSLukas Czerner return 0; 2218ba2e524dSLukas Czerner #ifdef CONFIG_QUOTA 2219ba2e524dSLukas Czerner case Opt_jqfmt: 2220ba2e524dSLukas Czerner ctx->s_jquota_fmt = result.uint_32; 2221ba2e524dSLukas Czerner ctx->spec |= EXT4_SPEC_JQFMT; 2222ba2e524dSLukas Czerner return 0; 2223ba2e524dSLukas Czerner #endif 2224ba2e524dSLukas Czerner case Opt_data: 2225ba2e524dSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS); 2226ba2e524dSLukas Czerner ctx_set_mount_opt(ctx, result.uint_32); 2227ba2e524dSLukas Czerner ctx->spec |= EXT4_SPEC_DATAJ; 2228ba2e524dSLukas Czerner return 0; 2229ba2e524dSLukas Czerner case Opt_commit: 2230461c3af0SLukas Czerner if (result.uint_32 == 0) 2231934b0de1SWang Jianjian result.uint_32 = JBD2_DEFAULT_MAX_COMMIT_AGE; 2232461c3af0SLukas Czerner else if (result.uint_32 > INT_MAX / HZ) { 2233da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, 22349ba55543Szhangyi (F) "Invalid commit interval %d, " 22359ba55543Szhangyi (F) "must be smaller than %d", 2236461c3af0SLukas Czerner result.uint_32, INT_MAX / HZ); 2237da812f61SLukas Czerner return -EINVAL; 22389ba55543Szhangyi (F) } 22396e47a3ccSLukas Czerner ctx->s_commit_interval = HZ * result.uint_32; 22406e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_commit_interval; 2241ba2e524dSLukas Czerner return 0; 2242ba2e524dSLukas Czerner case Opt_debug_want_extra_isize: 22436e47a3ccSLukas Czerner if ((result.uint_32 & 1) || (result.uint_32 < 4)) { 2244da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, 2245461c3af0SLukas Czerner "Invalid want_extra_isize %d", result.uint_32); 2246da812f61SLukas Czerner return -EINVAL; 22479803387cSTheodore Ts'o } 22486e47a3ccSLukas Czerner ctx->s_want_extra_isize = result.uint_32; 22496e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_want_extra_isize; 2250ba2e524dSLukas Czerner return 0; 2251ba2e524dSLukas Czerner case Opt_max_batch_time: 22526e47a3ccSLukas Czerner ctx->s_max_batch_time = result.uint_32; 22536e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_max_batch_time; 2254ba2e524dSLukas Czerner return 0; 2255ba2e524dSLukas Czerner case Opt_min_batch_time: 22566e47a3ccSLukas Czerner ctx->s_min_batch_time = result.uint_32; 22576e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_min_batch_time; 2258ba2e524dSLukas Czerner return 0; 2259ba2e524dSLukas Czerner case Opt_inode_readahead_blks: 2260461c3af0SLukas Czerner if (result.uint_32 && 2261461c3af0SLukas Czerner (result.uint_32 > (1 << 30) || 2262461c3af0SLukas Czerner !is_power_of_2(result.uint_32))) { 2263da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, 2264e33e60eaSJan Kara "EXT4-fs: inode_readahead_blks must be " 2265e33e60eaSJan Kara "0 or a power of 2 smaller than 2^31"); 2266da812f61SLukas Czerner return -EINVAL; 226726092bf5STheodore Ts'o } 22686e47a3ccSLukas Czerner ctx->s_inode_readahead_blks = result.uint_32; 22696e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_inode_readahead_blks; 2270ba2e524dSLukas Czerner return 0; 2271ba2e524dSLukas Czerner case Opt_init_itable: 22726e47a3ccSLukas Czerner ctx_set_mount_opt(ctx, EXT4_MOUNT_INIT_INODE_TABLE); 22736e47a3ccSLukas Czerner ctx->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; 2274461c3af0SLukas Czerner if (param->type == fs_value_is_string) 22756e47a3ccSLukas Czerner ctx->s_li_wait_mult = result.uint_32; 22766e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_li_wait_mult; 2277ba2e524dSLukas Czerner return 0; 2278ba2e524dSLukas Czerner case Opt_max_dir_size_kb: 22796e47a3ccSLukas Czerner ctx->s_max_dir_size_kb = result.uint_32; 22806e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_max_dir_size_kb; 2281ba2e524dSLukas Czerner return 0; 22828016e29fSHarshad Shirwadkar #ifdef CONFIG_EXT4_DEBUG 2283ba2e524dSLukas Czerner case Opt_fc_debug_max_replay: 22846e47a3ccSLukas Czerner ctx->s_fc_debug_max_replay = result.uint_32; 22856e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_fc_debug_max_replay; 2286ba2e524dSLukas Czerner return 0; 22878016e29fSHarshad Shirwadkar #endif 2288ba2e524dSLukas Czerner case Opt_stripe: 22896e47a3ccSLukas Czerner ctx->s_stripe = result.uint_32; 22906e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_stripe; 2291ba2e524dSLukas Czerner return 0; 2292ba2e524dSLukas Czerner case Opt_resuid: 2293461c3af0SLukas Czerner uid = make_kuid(current_user_ns(), result.uint_32); 22940efb3b23SJan Kara if (!uid_valid(uid)) { 2295da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, "Invalid uid value %d", 2296461c3af0SLukas Czerner result.uint_32); 2297da812f61SLukas Czerner return -EINVAL; 22980efb3b23SJan Kara } 22996e47a3ccSLukas Czerner ctx->s_resuid = uid; 23006e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_resuid; 2301ba2e524dSLukas Czerner return 0; 2302ba2e524dSLukas Czerner case Opt_resgid: 2303461c3af0SLukas Czerner gid = make_kgid(current_user_ns(), result.uint_32); 23040efb3b23SJan Kara if (!gid_valid(gid)) { 2305da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, "Invalid gid value %d", 2306461c3af0SLukas Czerner result.uint_32); 2307da812f61SLukas Czerner return -EINVAL; 23080efb3b23SJan Kara } 23096e47a3ccSLukas Czerner ctx->s_resgid = gid; 23106e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_resgid; 2311ba2e524dSLukas Czerner return 0; 2312ba2e524dSLukas Czerner case Opt_journal_dev: 23130efb3b23SJan Kara if (is_remount) { 2314da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, 23150efb3b23SJan Kara "Cannot specify journal on remount"); 2316da812f61SLukas Czerner return -EINVAL; 23170efb3b23SJan Kara } 2318461c3af0SLukas Czerner ctx->journal_devnum = result.uint_32; 23196e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_JOURNAL_DEV; 2320ba2e524dSLukas Czerner return 0; 2321ba2e524dSLukas Czerner case Opt_journal_path: 2322ba2e524dSLukas Czerner { 2323ad4eec61SEric Sandeen struct inode *journal_inode; 2324ad4eec61SEric Sandeen struct path path; 2325ad4eec61SEric Sandeen int error; 2326ad4eec61SEric Sandeen 2327ad4eec61SEric Sandeen if (is_remount) { 2328da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, 2329ad4eec61SEric Sandeen "Cannot specify journal on remount"); 2330da812f61SLukas Czerner return -EINVAL; 2331ad4eec61SEric Sandeen } 2332ad4eec61SEric Sandeen 2333e3ea75eeSLukas Czerner error = fs_lookup_param(fc, param, 1, LOOKUP_FOLLOW, &path); 2334ad4eec61SEric Sandeen if (error) { 2335da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, "error: could not find " 2336461c3af0SLukas Czerner "journal device path"); 2337da812f61SLukas Czerner return -EINVAL; 2338ad4eec61SEric Sandeen } 2339ad4eec61SEric Sandeen 23402b0143b5SDavid Howells journal_inode = d_inode(path.dentry); 2341461c3af0SLukas Czerner ctx->journal_devnum = new_encode_dev(journal_inode->i_rdev); 23426e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_JOURNAL_DEV; 2343ad4eec61SEric Sandeen path_put(&path); 2344ba2e524dSLukas Czerner return 0; 2345ba2e524dSLukas Czerner } 2346ba2e524dSLukas Czerner case Opt_journal_ioprio: 2347461c3af0SLukas Czerner if (result.uint_32 > 7) { 2348da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, "Invalid journal IO priority" 23490efb3b23SJan Kara " (must be 0-7)"); 2350da812f61SLukas Czerner return -EINVAL; 23510efb3b23SJan Kara } 2352461c3af0SLukas Czerner ctx->journal_ioprio = 2353461c3af0SLukas Czerner IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, result.uint_32); 23546e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_JOURNAL_IOPRIO; 2355ba2e524dSLukas Czerner return 0; 2356ba2e524dSLukas Czerner case Opt_test_dummy_encryption: 235785456054SEric Biggers return ext4_parse_test_dummy_encryption(param, ctx); 2358ba2e524dSLukas Czerner case Opt_dax: 2359ba2e524dSLukas Czerner case Opt_dax_type: 2360ef83b6e8SDan Williams #ifdef CONFIG_FS_DAX 2361ba2e524dSLukas Czerner { 2362ba2e524dSLukas Czerner int type = (token == Opt_dax) ? 2363ba2e524dSLukas Czerner Opt_dax : result.uint_32; 2364ba2e524dSLukas Czerner 2365ba2e524dSLukas Czerner switch (type) { 23669cb20f94SIra Weiny case Opt_dax: 23679cb20f94SIra Weiny case Opt_dax_always: 2368ba2e524dSLukas Czerner ctx_set_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS); 23696e47a3ccSLukas Czerner ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER); 23709cb20f94SIra Weiny break; 23719cb20f94SIra Weiny case Opt_dax_never: 2372ba2e524dSLukas Czerner ctx_set_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER); 23736e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS); 23749cb20f94SIra Weiny break; 23759cb20f94SIra Weiny case Opt_dax_inode: 23766e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS); 23776e47a3ccSLukas Czerner ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER); 23789cb20f94SIra Weiny /* Strictly for printing options */ 2379ba2e524dSLukas Czerner ctx_set_mount_opt2(ctx, EXT4_MOUNT2_DAX_INODE); 23809cb20f94SIra Weiny break; 23819cb20f94SIra Weiny } 2382ba2e524dSLukas Czerner return 0; 2383ba2e524dSLukas Czerner } 2384ef83b6e8SDan Williams #else 2385da812f61SLukas Czerner ext4_msg(NULL, KERN_INFO, "dax option not supported"); 2386da812f61SLukas Czerner return -EINVAL; 2387923ae0ffSRoss Zwisler #endif 2388ba2e524dSLukas Czerner case Opt_data_err: 2389ba2e524dSLukas Czerner if (result.uint_32 == Opt_data_err_abort) 23906e47a3ccSLukas Czerner ctx_set_mount_opt(ctx, m->mount_opt); 2391ba2e524dSLukas Czerner else if (result.uint_32 == Opt_data_err_ignore) 23926e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, m->mount_opt); 2393ba2e524dSLukas Czerner return 0; 2394ba2e524dSLukas Czerner case Opt_mb_optimize_scan: 239527b38686SOjaswin Mujoo if (result.int_32 == 1) { 239627b38686SOjaswin Mujoo ctx_set_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN); 239727b38686SOjaswin Mujoo ctx->spec |= EXT4_SPEC_mb_optimize_scan; 239827b38686SOjaswin Mujoo } else if (result.int_32 == 0) { 239927b38686SOjaswin Mujoo ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN); 240027b38686SOjaswin Mujoo ctx->spec |= EXT4_SPEC_mb_optimize_scan; 240127b38686SOjaswin Mujoo } else { 2402da812f61SLukas Czerner ext4_msg(NULL, KERN_WARNING, 2403196e402aSHarshad Shirwadkar "mb_optimize_scan should be set to 0 or 1."); 2404da812f61SLukas Czerner return -EINVAL; 2405196e402aSHarshad Shirwadkar } 2406ba2e524dSLukas Czerner return 0; 2407ba2e524dSLukas Czerner } 2408ba2e524dSLukas Czerner 2409ba2e524dSLukas Czerner /* 2410ba2e524dSLukas Czerner * At this point we should only be getting options requiring MOPT_SET, 2411ba2e524dSLukas Czerner * or MOPT_CLEAR. Anything else is a bug 2412ba2e524dSLukas Czerner */ 2413ba2e524dSLukas Czerner if (m->token == Opt_err) { 2414ba2e524dSLukas Czerner ext4_msg(NULL, KERN_WARNING, "buggy handling of option %s", 2415ba2e524dSLukas Czerner param->key); 2416ba2e524dSLukas Czerner WARN_ON(1); 2417ba2e524dSLukas Czerner return -EINVAL; 2418ba2e524dSLukas Czerner } 2419ba2e524dSLukas Czerner 2420ba2e524dSLukas Czerner else { 2421461c3af0SLukas Czerner unsigned int set = 0; 2422461c3af0SLukas Czerner 2423461c3af0SLukas Czerner if ((param->type == fs_value_is_flag) || 2424461c3af0SLukas Czerner result.uint_32 > 0) 2425461c3af0SLukas Czerner set = 1; 2426461c3af0SLukas Czerner 242726092bf5STheodore Ts'o if (m->flags & MOPT_CLEAR) 2428461c3af0SLukas Czerner set = !set; 242926092bf5STheodore Ts'o else if (unlikely(!(m->flags & MOPT_SET))) { 2430da812f61SLukas Czerner ext4_msg(NULL, KERN_WARNING, 2431461c3af0SLukas Czerner "buggy handling of option %s", 2432461c3af0SLukas Czerner param->key); 243326092bf5STheodore Ts'o WARN_ON(1); 2434da812f61SLukas Czerner return -EINVAL; 243526092bf5STheodore Ts'o } 2436995a3ed6SHarshad Shirwadkar if (m->flags & MOPT_2) { 2437461c3af0SLukas Czerner if (set != 0) 24386e47a3ccSLukas Czerner ctx_set_mount_opt2(ctx, m->mount_opt); 2439995a3ed6SHarshad Shirwadkar else 24406e47a3ccSLukas Czerner ctx_clear_mount_opt2(ctx, m->mount_opt); 2441995a3ed6SHarshad Shirwadkar } else { 2442461c3af0SLukas Czerner if (set != 0) 24436e47a3ccSLukas Czerner ctx_set_mount_opt(ctx, m->mount_opt); 244426092bf5STheodore Ts'o else 24456e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, m->mount_opt); 244626092bf5STheodore Ts'o } 2447995a3ed6SHarshad Shirwadkar } 2448ba2e524dSLukas Czerner 244902f960f8SLukas Czerner return 0; 245026092bf5STheodore Ts'o } 245126092bf5STheodore Ts'o 24527edfd85bSLukas Czerner static int parse_options(struct fs_context *fc, char *options) 2453ac27a0ecSDave Kleikamp { 2454461c3af0SLukas Czerner struct fs_parameter param; 2455461c3af0SLukas Czerner int ret; 2456461c3af0SLukas Czerner char *key; 2457ac27a0ecSDave Kleikamp 2458ac27a0ecSDave Kleikamp if (!options) 24597edfd85bSLukas Czerner return 0; 2460461c3af0SLukas Czerner 2461461c3af0SLukas Czerner while ((key = strsep(&options, ",")) != NULL) { 2462461c3af0SLukas Czerner if (*key) { 2463461c3af0SLukas Czerner size_t v_len = 0; 2464461c3af0SLukas Czerner char *value = strchr(key, '='); 2465461c3af0SLukas Czerner 2466461c3af0SLukas Czerner param.type = fs_value_is_flag; 2467461c3af0SLukas Czerner param.string = NULL; 2468461c3af0SLukas Czerner 2469461c3af0SLukas Czerner if (value) { 2470461c3af0SLukas Czerner if (value == key) 2471ac27a0ecSDave Kleikamp continue; 2472461c3af0SLukas Czerner 2473461c3af0SLukas Czerner *value++ = 0; 2474461c3af0SLukas Czerner v_len = strlen(value); 2475461c3af0SLukas Czerner param.string = kmemdup_nul(value, v_len, 2476461c3af0SLukas Czerner GFP_KERNEL); 2477461c3af0SLukas Czerner if (!param.string) 24787edfd85bSLukas Czerner return -ENOMEM; 2479461c3af0SLukas Czerner param.type = fs_value_is_string; 2480461c3af0SLukas Czerner } 2481461c3af0SLukas Czerner 2482461c3af0SLukas Czerner param.key = key; 2483461c3af0SLukas Czerner param.size = v_len; 2484461c3af0SLukas Czerner 248502f960f8SLukas Czerner ret = ext4_parse_param(fc, ¶m); 2486461c3af0SLukas Czerner if (param.string) 2487461c3af0SLukas Czerner kfree(param.string); 2488461c3af0SLukas Czerner if (ret < 0) 24897edfd85bSLukas Czerner return ret; 2490ac27a0ecSDave Kleikamp } 2491461c3af0SLukas Czerner } 2492461c3af0SLukas Czerner 24937edfd85bSLukas Czerner ret = ext4_validate_options(fc); 2494da812f61SLukas Czerner if (ret < 0) 24957edfd85bSLukas Czerner return ret; 24967edfd85bSLukas Czerner 24977edfd85bSLukas Czerner return 0; 24987edfd85bSLukas Czerner } 24997edfd85bSLukas Czerner 25007edfd85bSLukas Czerner static int parse_apply_sb_mount_options(struct super_block *sb, 25017edfd85bSLukas Czerner struct ext4_fs_context *m_ctx) 25027edfd85bSLukas Czerner { 25037edfd85bSLukas Czerner struct ext4_sb_info *sbi = EXT4_SB(sb); 25047edfd85bSLukas Czerner char *s_mount_opts = NULL; 25057edfd85bSLukas Czerner struct ext4_fs_context *s_ctx = NULL; 25067edfd85bSLukas Czerner struct fs_context *fc = NULL; 25077edfd85bSLukas Czerner int ret = -ENOMEM; 25087edfd85bSLukas Czerner 25097edfd85bSLukas Czerner if (!sbi->s_es->s_mount_opts[0]) 2510da812f61SLukas Czerner return 0; 2511da812f61SLukas Czerner 25127edfd85bSLukas Czerner s_mount_opts = kstrndup(sbi->s_es->s_mount_opts, 25137edfd85bSLukas Czerner sizeof(sbi->s_es->s_mount_opts), 25147edfd85bSLukas Czerner GFP_KERNEL); 25157edfd85bSLukas Czerner if (!s_mount_opts) 25167edfd85bSLukas Czerner return ret; 2517e6e268cbSLukas Czerner 25187edfd85bSLukas Czerner fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL); 25197edfd85bSLukas Czerner if (!fc) 25207edfd85bSLukas Czerner goto out_free; 2521e6e268cbSLukas Czerner 25227edfd85bSLukas Czerner s_ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL); 25237edfd85bSLukas Czerner if (!s_ctx) 25247edfd85bSLukas Czerner goto out_free; 25257edfd85bSLukas Czerner 25267edfd85bSLukas Czerner fc->fs_private = s_ctx; 25277edfd85bSLukas Czerner fc->s_fs_info = sbi; 25287edfd85bSLukas Czerner 25297edfd85bSLukas Czerner ret = parse_options(fc, s_mount_opts); 25307edfd85bSLukas Czerner if (ret < 0) 25317edfd85bSLukas Czerner goto parse_failed; 25327edfd85bSLukas Czerner 25337edfd85bSLukas Czerner ret = ext4_check_opt_consistency(fc, sb); 25347edfd85bSLukas Czerner if (ret < 0) { 25357edfd85bSLukas Czerner parse_failed: 25367edfd85bSLukas Czerner ext4_msg(sb, KERN_WARNING, 25377edfd85bSLukas Czerner "failed to parse options in superblock: %s", 25387edfd85bSLukas Czerner s_mount_opts); 25397edfd85bSLukas Czerner ret = 0; 25407edfd85bSLukas Czerner goto out_free; 25417edfd85bSLukas Czerner } 25427edfd85bSLukas Czerner 25437edfd85bSLukas Czerner if (s_ctx->spec & EXT4_SPEC_JOURNAL_DEV) 25447edfd85bSLukas Czerner m_ctx->journal_devnum = s_ctx->journal_devnum; 25457edfd85bSLukas Czerner if (s_ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO) 25467edfd85bSLukas Czerner m_ctx->journal_ioprio = s_ctx->journal_ioprio; 25477edfd85bSLukas Czerner 254885456054SEric Biggers ext4_apply_options(fc, sb); 254985456054SEric Biggers ret = 0; 25507edfd85bSLukas Czerner 25517edfd85bSLukas Czerner out_free: 2552c069db76SEric Biggers if (fc) { 2553c069db76SEric Biggers ext4_fc_free(fc); 25547edfd85bSLukas Czerner kfree(fc); 2555c069db76SEric Biggers } 25567edfd85bSLukas Czerner kfree(s_mount_opts); 25577edfd85bSLukas Czerner return ret; 25584c94bff9SLukas Czerner } 25594c94bff9SLukas Czerner 2560e6e268cbSLukas Czerner static void ext4_apply_quota_options(struct fs_context *fc, 2561e6e268cbSLukas Czerner struct super_block *sb) 2562e6e268cbSLukas Czerner { 2563e6e268cbSLukas Czerner #ifdef CONFIG_QUOTA 25646e47a3ccSLukas Czerner bool quota_feature = ext4_has_feature_quota(sb); 2565e6e268cbSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 2566e6e268cbSLukas Czerner struct ext4_sb_info *sbi = EXT4_SB(sb); 2567e6e268cbSLukas Czerner char *qname; 2568e6e268cbSLukas Czerner int i; 2569e6e268cbSLukas Czerner 25706e47a3ccSLukas Czerner if (quota_feature) 25716e47a3ccSLukas Czerner return; 25726e47a3ccSLukas Czerner 25736e47a3ccSLukas Czerner if (ctx->spec & EXT4_SPEC_JQUOTA) { 2574e6e268cbSLukas Czerner for (i = 0; i < EXT4_MAXQUOTAS; i++) { 2575e6e268cbSLukas Czerner if (!(ctx->qname_spec & (1 << i))) 2576e6e268cbSLukas Czerner continue; 25776e47a3ccSLukas Czerner 2578e6e268cbSLukas Czerner qname = ctx->s_qf_names[i]; /* May be NULL */ 25794c1bd5a9SLukas Czerner if (qname) 25804c1bd5a9SLukas Czerner set_opt(sb, QUOTA); 2581e6e268cbSLukas Czerner ctx->s_qf_names[i] = NULL; 258213b215a9SLukas Czerner qname = rcu_replace_pointer(sbi->s_qf_names[i], qname, 258313b215a9SLukas Czerner lockdep_is_held(&sb->s_umount)); 258413b215a9SLukas Czerner if (qname) 258510e4f310SUladzislau Rezki (Sony) kfree_rcu_mightsleep(qname); 2586e6e268cbSLukas Czerner } 25876e47a3ccSLukas Czerner } 25886e47a3ccSLukas Czerner 25896e47a3ccSLukas Czerner if (ctx->spec & EXT4_SPEC_JQFMT) 25906e47a3ccSLukas Czerner sbi->s_jquota_fmt = ctx->s_jquota_fmt; 2591e6e268cbSLukas Czerner #endif 2592e6e268cbSLukas Czerner } 2593e6e268cbSLukas Czerner 2594e6e268cbSLukas Czerner /* 2595e6e268cbSLukas Czerner * Check quota settings consistency. 2596e6e268cbSLukas Czerner */ 2597e6e268cbSLukas Czerner static int ext4_check_quota_consistency(struct fs_context *fc, 2598e6e268cbSLukas Czerner struct super_block *sb) 2599e6e268cbSLukas Czerner { 2600e6e268cbSLukas Czerner #ifdef CONFIG_QUOTA 2601e6e268cbSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 2602e6e268cbSLukas Czerner struct ext4_sb_info *sbi = EXT4_SB(sb); 2603e6e268cbSLukas Czerner bool quota_feature = ext4_has_feature_quota(sb); 2604e6e268cbSLukas Czerner bool quota_loaded = sb_any_quota_loaded(sb); 26056e47a3ccSLukas Czerner bool usr_qf_name, grp_qf_name, usrquota, grpquota; 26066e47a3ccSLukas Czerner int quota_flags, i; 2607e6e268cbSLukas Czerner 26086e47a3ccSLukas Czerner /* 26096e47a3ccSLukas Czerner * We do the test below only for project quotas. 'usrquota' and 26106e47a3ccSLukas Czerner * 'grpquota' mount options are allowed even without quota feature 26116e47a3ccSLukas Czerner * to support legacy quotas in quota files. 26126e47a3ccSLukas Czerner */ 26136e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_PRJQUOTA) && 26146e47a3ccSLukas Czerner !ext4_has_feature_project(sb)) { 26156e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, "Project quota feature not enabled. " 26166e47a3ccSLukas Czerner "Cannot enable project quota enforcement."); 26176e47a3ccSLukas Czerner return -EINVAL; 26186e47a3ccSLukas Czerner } 26196e47a3ccSLukas Czerner 26206e47a3ccSLukas Czerner quota_flags = EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA | 26216e47a3ccSLukas Czerner EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA; 26226e47a3ccSLukas Czerner if (quota_loaded && 26236e47a3ccSLukas Czerner ctx->mask_s_mount_opt & quota_flags && 26246e47a3ccSLukas Czerner !ctx_test_mount_opt(ctx, quota_flags)) 26256e47a3ccSLukas Czerner goto err_quota_change; 26266e47a3ccSLukas Czerner 26276e47a3ccSLukas Czerner if (ctx->spec & EXT4_SPEC_JQUOTA) { 2628e6e268cbSLukas Czerner 2629e6e268cbSLukas Czerner for (i = 0; i < EXT4_MAXQUOTAS; i++) { 2630e6e268cbSLukas Czerner if (!(ctx->qname_spec & (1 << i))) 2631e6e268cbSLukas Czerner continue; 2632e6e268cbSLukas Czerner 26336e47a3ccSLukas Czerner if (quota_loaded && 26346e47a3ccSLukas Czerner !!sbi->s_qf_names[i] != !!ctx->s_qf_names[i]) 2635e6e268cbSLukas Czerner goto err_jquota_change; 2636e6e268cbSLukas Czerner 2637e6e268cbSLukas Czerner if (sbi->s_qf_names[i] && ctx->s_qf_names[i] && 263813b215a9SLukas Czerner strcmp(get_qf_name(sb, sbi, i), 2639e6e268cbSLukas Czerner ctx->s_qf_names[i]) != 0) 2640e6e268cbSLukas Czerner goto err_jquota_specified; 2641e6e268cbSLukas Czerner } 26426e47a3ccSLukas Czerner 26436e47a3ccSLukas Czerner if (quota_feature) { 26446e47a3ccSLukas Czerner ext4_msg(NULL, KERN_INFO, 26456e47a3ccSLukas Czerner "Journaled quota options ignored when " 26466e47a3ccSLukas Czerner "QUOTA feature is enabled"); 26476e47a3ccSLukas Czerner return 0; 26486e47a3ccSLukas Czerner } 2649e6e268cbSLukas Czerner } 2650e6e268cbSLukas Czerner 26516e47a3ccSLukas Czerner if (ctx->spec & EXT4_SPEC_JQFMT) { 2652e6e268cbSLukas Czerner if (sbi->s_jquota_fmt != ctx->s_jquota_fmt && quota_loaded) 26536e47a3ccSLukas Czerner goto err_jquota_change; 2654e6e268cbSLukas Czerner if (quota_feature) { 2655e6e268cbSLukas Czerner ext4_msg(NULL, KERN_INFO, "Quota format mount options " 2656e6e268cbSLukas Czerner "ignored when QUOTA feature is enabled"); 2657e6e268cbSLukas Czerner return 0; 2658e6e268cbSLukas Czerner } 2659e6e268cbSLukas Czerner } 26606e47a3ccSLukas Czerner 26616e47a3ccSLukas Czerner /* Make sure we don't mix old and new quota format */ 26626e47a3ccSLukas Czerner usr_qf_name = (get_qf_name(sb, sbi, USRQUOTA) || 26636e47a3ccSLukas Czerner ctx->s_qf_names[USRQUOTA]); 26646e47a3ccSLukas Czerner grp_qf_name = (get_qf_name(sb, sbi, GRPQUOTA) || 26656e47a3ccSLukas Czerner ctx->s_qf_names[GRPQUOTA]); 26666e47a3ccSLukas Czerner 26676e47a3ccSLukas Czerner usrquota = (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) || 26686e47a3ccSLukas Czerner test_opt(sb, USRQUOTA)); 26696e47a3ccSLukas Czerner 26706e47a3ccSLukas Czerner grpquota = (ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA) || 26716e47a3ccSLukas Czerner test_opt(sb, GRPQUOTA)); 26726e47a3ccSLukas Czerner 26736e47a3ccSLukas Czerner if (usr_qf_name) { 26746e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_USRQUOTA); 26756e47a3ccSLukas Czerner usrquota = false; 26766e47a3ccSLukas Czerner } 26776e47a3ccSLukas Czerner if (grp_qf_name) { 26786e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA); 26796e47a3ccSLukas Czerner grpquota = false; 26806e47a3ccSLukas Czerner } 26816e47a3ccSLukas Czerner 26826e47a3ccSLukas Czerner if (usr_qf_name || grp_qf_name) { 26836e47a3ccSLukas Czerner if (usrquota || grpquota) { 26846e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, "old and new quota " 26856e47a3ccSLukas Czerner "format mixing"); 26866e47a3ccSLukas Czerner return -EINVAL; 26876e47a3ccSLukas Czerner } 26886e47a3ccSLukas Czerner 26896e47a3ccSLukas Czerner if (!(ctx->spec & EXT4_SPEC_JQFMT || sbi->s_jquota_fmt)) { 26906e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, "journaled quota format " 26916e47a3ccSLukas Czerner "not specified"); 26926e47a3ccSLukas Czerner return -EINVAL; 26936e47a3ccSLukas Czerner } 26946e47a3ccSLukas Czerner } 26956e47a3ccSLukas Czerner 2696e6e268cbSLukas Czerner return 0; 2697e6e268cbSLukas Czerner 2698e6e268cbSLukas Czerner err_quota_change: 2699e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, 2700e6e268cbSLukas Czerner "Cannot change quota options when quota turned on"); 2701e6e268cbSLukas Czerner return -EINVAL; 2702e6e268cbSLukas Czerner err_jquota_change: 2703e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, "Cannot change journaled quota " 2704e6e268cbSLukas Czerner "options when quota turned on"); 2705e6e268cbSLukas Czerner return -EINVAL; 2706e6e268cbSLukas Czerner err_jquota_specified: 2707e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, "%s quota file already specified", 2708e6e268cbSLukas Czerner QTYPE2NAME(i)); 2709e6e268cbSLukas Czerner return -EINVAL; 2710e6e268cbSLukas Czerner #else 2711e6e268cbSLukas Czerner return 0; 2712e6e268cbSLukas Czerner #endif 2713e6e268cbSLukas Czerner } 2714e6e268cbSLukas Czerner 27155f41fdaeSEric Biggers static int ext4_check_test_dummy_encryption(const struct fs_context *fc, 27165f41fdaeSEric Biggers struct super_block *sb) 27175f41fdaeSEric Biggers { 27185f41fdaeSEric Biggers const struct ext4_fs_context *ctx = fc->fs_private; 27195f41fdaeSEric Biggers const struct ext4_sb_info *sbi = EXT4_SB(sb); 27205f41fdaeSEric Biggers 272185456054SEric Biggers if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy)) 27225f41fdaeSEric Biggers return 0; 27235f41fdaeSEric Biggers 27245f41fdaeSEric Biggers if (!ext4_has_feature_encrypt(sb)) { 27255f41fdaeSEric Biggers ext4_msg(NULL, KERN_WARNING, 27265f41fdaeSEric Biggers "test_dummy_encryption requires encrypt feature"); 27275f41fdaeSEric Biggers return -EINVAL; 27285f41fdaeSEric Biggers } 27295f41fdaeSEric Biggers /* 27305f41fdaeSEric Biggers * This mount option is just for testing, and it's not worthwhile to 27315f41fdaeSEric Biggers * implement the extra complexity (e.g. RCU protection) that would be 27325f41fdaeSEric Biggers * needed to allow it to be set or changed during remount. We do allow 27335f41fdaeSEric Biggers * it to be specified during remount, but only if there is no change. 27345f41fdaeSEric Biggers */ 273585456054SEric Biggers if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { 273685456054SEric Biggers if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy, 273785456054SEric Biggers &ctx->dummy_enc_policy)) 273885456054SEric Biggers return 0; 27395f41fdaeSEric Biggers ext4_msg(NULL, KERN_WARNING, 274085456054SEric Biggers "Can't set or change test_dummy_encryption on remount"); 27415f41fdaeSEric Biggers return -EINVAL; 27425f41fdaeSEric Biggers } 274385456054SEric Biggers /* Also make sure s_mount_opts didn't contain a conflicting value. */ 274485456054SEric Biggers if (fscrypt_is_dummy_policy_set(&sbi->s_dummy_enc_policy)) { 274585456054SEric Biggers if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy, 274685456054SEric Biggers &ctx->dummy_enc_policy)) 27475f41fdaeSEric Biggers return 0; 274885456054SEric Biggers ext4_msg(NULL, KERN_WARNING, 274985456054SEric Biggers "Conflicting test_dummy_encryption options"); 275085456054SEric Biggers return -EINVAL; 275185456054SEric Biggers } 27527959eb19SEric Biggers return 0; 275385456054SEric Biggers } 275485456054SEric Biggers 275585456054SEric Biggers static void ext4_apply_test_dummy_encryption(struct ext4_fs_context *ctx, 275685456054SEric Biggers struct super_block *sb) 275785456054SEric Biggers { 275885456054SEric Biggers if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy) || 275985456054SEric Biggers /* if already set, it was already verified to be the same */ 276085456054SEric Biggers fscrypt_is_dummy_policy_set(&EXT4_SB(sb)->s_dummy_enc_policy)) 276185456054SEric Biggers return; 276285456054SEric Biggers EXT4_SB(sb)->s_dummy_enc_policy = ctx->dummy_enc_policy; 276385456054SEric Biggers memset(&ctx->dummy_enc_policy, 0, sizeof(ctx->dummy_enc_policy)); 276485456054SEric Biggers ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled"); 27655f41fdaeSEric Biggers } 27665f41fdaeSEric Biggers 2767b6bd2435SLukas Czerner static int ext4_check_opt_consistency(struct fs_context *fc, 2768b6bd2435SLukas Czerner struct super_block *sb) 2769b6bd2435SLukas Czerner { 2770b6bd2435SLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 27716e47a3ccSLukas Czerner struct ext4_sb_info *sbi = fc->s_fs_info; 27726e47a3ccSLukas Czerner int is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE; 27735f41fdaeSEric Biggers int err; 2774b6bd2435SLukas Czerner 2775b6bd2435SLukas Czerner if ((ctx->opt_flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) { 2776b6bd2435SLukas Czerner ext4_msg(NULL, KERN_ERR, 2777b6bd2435SLukas Czerner "Mount option(s) incompatible with ext2"); 2778b6bd2435SLukas Czerner return -EINVAL; 2779b6bd2435SLukas Czerner } 2780b6bd2435SLukas Czerner if ((ctx->opt_flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) { 2781b6bd2435SLukas Czerner ext4_msg(NULL, KERN_ERR, 2782b6bd2435SLukas Czerner "Mount option(s) incompatible with ext3"); 2783b6bd2435SLukas Czerner return -EINVAL; 2784b6bd2435SLukas Czerner } 2785b6bd2435SLukas Czerner 27866e47a3ccSLukas Czerner if (ctx->s_want_extra_isize > 27876e47a3ccSLukas Czerner (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE)) { 27886e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, 27896e47a3ccSLukas Czerner "Invalid want_extra_isize %d", 27906e47a3ccSLukas Czerner ctx->s_want_extra_isize); 27916e47a3ccSLukas Czerner return -EINVAL; 27926e47a3ccSLukas Czerner } 27936e47a3ccSLukas Czerner 27946e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DIOREAD_NOLOCK)) { 27956e47a3ccSLukas Czerner int blocksize = 27966e47a3ccSLukas Czerner BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); 27976e47a3ccSLukas Czerner if (blocksize < PAGE_SIZE) 27986e47a3ccSLukas Czerner ext4_msg(NULL, KERN_WARNING, "Warning: mounting with an " 27996e47a3ccSLukas Czerner "experimental mount option 'dioread_nolock' " 28006e47a3ccSLukas Czerner "for blocksize < PAGE_SIZE"); 28016e47a3ccSLukas Czerner } 28026e47a3ccSLukas Czerner 28035f41fdaeSEric Biggers err = ext4_check_test_dummy_encryption(fc, sb); 28045f41fdaeSEric Biggers if (err) 28055f41fdaeSEric Biggers return err; 28066e47a3ccSLukas Czerner 28076e47a3ccSLukas Czerner if ((ctx->spec & EXT4_SPEC_DATAJ) && is_remount) { 28086e47a3ccSLukas Czerner if (!sbi->s_journal) { 28096e47a3ccSLukas Czerner ext4_msg(NULL, KERN_WARNING, 28106e47a3ccSLukas Czerner "Remounting file system with no journal " 28116e47a3ccSLukas Czerner "so ignoring journalled data option"); 28126e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS); 28134c246728SLukas Czerner } else if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS) != 28144c246728SLukas Czerner test_opt(sb, DATA_FLAGS)) { 28156e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, "Cannot change data mode " 28166e47a3ccSLukas Czerner "on remount"); 28176e47a3ccSLukas Czerner return -EINVAL; 28186e47a3ccSLukas Czerner } 28196e47a3ccSLukas Czerner } 28206e47a3ccSLukas Czerner 28216e47a3ccSLukas Czerner if (is_remount) { 28226e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) && 28236e47a3ccSLukas Czerner (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) { 28246e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, "can't mount with " 28256e47a3ccSLukas Czerner "both data=journal and dax"); 28266e47a3ccSLukas Czerner return -EINVAL; 28276e47a3ccSLukas Czerner } 28286e47a3ccSLukas Czerner 28296e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) && 28306e47a3ccSLukas Czerner (!(sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) || 28316e47a3ccSLukas Czerner (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER))) { 28326e47a3ccSLukas Czerner fail_dax_change_remount: 28336e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, "can't change " 28346e47a3ccSLukas Czerner "dax mount option while remounting"); 28356e47a3ccSLukas Czerner return -EINVAL; 28366e47a3ccSLukas Czerner } else if (ctx_test_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER) && 28376e47a3ccSLukas Czerner (!(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) || 28386e47a3ccSLukas Czerner (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS))) { 28396e47a3ccSLukas Czerner goto fail_dax_change_remount; 28406e47a3ccSLukas Czerner } else if (ctx_test_mount_opt2(ctx, EXT4_MOUNT2_DAX_INODE) && 28416e47a3ccSLukas Czerner ((sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) || 28426e47a3ccSLukas Czerner (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) || 28436e47a3ccSLukas Czerner !(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_INODE))) { 28446e47a3ccSLukas Czerner goto fail_dax_change_remount; 28456e47a3ccSLukas Czerner } 28466e47a3ccSLukas Czerner } 28476e47a3ccSLukas Czerner 2848b6bd2435SLukas Czerner return ext4_check_quota_consistency(fc, sb); 2849b6bd2435SLukas Czerner } 2850b6bd2435SLukas Czerner 285185456054SEric Biggers static void ext4_apply_options(struct fs_context *fc, struct super_block *sb) 28526e47a3ccSLukas Czerner { 28536e47a3ccSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 28546e47a3ccSLukas Czerner struct ext4_sb_info *sbi = fc->s_fs_info; 28556e47a3ccSLukas Czerner 28566e47a3ccSLukas Czerner sbi->s_mount_opt &= ~ctx->mask_s_mount_opt; 28576e47a3ccSLukas Czerner sbi->s_mount_opt |= ctx->vals_s_mount_opt; 28586e47a3ccSLukas Czerner sbi->s_mount_opt2 &= ~ctx->mask_s_mount_opt2; 28596e47a3ccSLukas Czerner sbi->s_mount_opt2 |= ctx->vals_s_mount_opt2; 28606e47a3ccSLukas Czerner sb->s_flags &= ~ctx->mask_s_flags; 28616e47a3ccSLukas Czerner sb->s_flags |= ctx->vals_s_flags; 28626e47a3ccSLukas Czerner 28636e47a3ccSLukas Czerner #define APPLY(X) ({ if (ctx->spec & EXT4_SPEC_##X) sbi->X = ctx->X; }) 28646e47a3ccSLukas Czerner APPLY(s_commit_interval); 28656e47a3ccSLukas Czerner APPLY(s_stripe); 28666e47a3ccSLukas Czerner APPLY(s_max_batch_time); 28676e47a3ccSLukas Czerner APPLY(s_min_batch_time); 28686e47a3ccSLukas Czerner APPLY(s_want_extra_isize); 28696e47a3ccSLukas Czerner APPLY(s_inode_readahead_blks); 28706e47a3ccSLukas Czerner APPLY(s_max_dir_size_kb); 28716e47a3ccSLukas Czerner APPLY(s_li_wait_mult); 28726e47a3ccSLukas Czerner APPLY(s_resgid); 28736e47a3ccSLukas Czerner APPLY(s_resuid); 28746e47a3ccSLukas Czerner 28756e47a3ccSLukas Czerner #ifdef CONFIG_EXT4_DEBUG 28766e47a3ccSLukas Czerner APPLY(s_fc_debug_max_replay); 28776e47a3ccSLukas Czerner #endif 28786e47a3ccSLukas Czerner 28796e47a3ccSLukas Czerner ext4_apply_quota_options(fc, sb); 288085456054SEric Biggers ext4_apply_test_dummy_encryption(ctx, sb); 28816e47a3ccSLukas Czerner } 28826e47a3ccSLukas Czerner 28836e47a3ccSLukas Czerner 2884da812f61SLukas Czerner static int ext4_validate_options(struct fs_context *fc) 28854c94bff9SLukas Czerner { 2886ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 28876e47a3ccSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 28884c94bff9SLukas Czerner char *usr_qf_name, *grp_qf_name; 28896e47a3ccSLukas Czerner 28906e47a3ccSLukas Czerner usr_qf_name = ctx->s_qf_names[USRQUOTA]; 28916e47a3ccSLukas Czerner grp_qf_name = ctx->s_qf_names[GRPQUOTA]; 28926e47a3ccSLukas Czerner 289333458eabSTheodore Ts'o if (usr_qf_name || grp_qf_name) { 28946e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) && usr_qf_name) 28956e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_USRQUOTA); 2896ac27a0ecSDave Kleikamp 28976e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA) && grp_qf_name) 28986e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA); 2899ac27a0ecSDave Kleikamp 29006e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) || 29016e47a3ccSLukas Czerner ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA)) { 2902da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, "old and new quota " 2903b31e1552SEric Sandeen "format mixing"); 2904da812f61SLukas Czerner return -EINVAL; 2905ac27a0ecSDave Kleikamp } 2906ac27a0ecSDave Kleikamp } 2907ac27a0ecSDave Kleikamp #endif 29086e47a3ccSLukas Czerner return 1; 2909ac27a0ecSDave Kleikamp } 2910ac27a0ecSDave Kleikamp 29112adf6da8STheodore Ts'o static inline void ext4_show_quota_options(struct seq_file *seq, 29122adf6da8STheodore Ts'o struct super_block *sb) 29132adf6da8STheodore Ts'o { 29142adf6da8STheodore Ts'o #if defined(CONFIG_QUOTA) 29152adf6da8STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 291633458eabSTheodore Ts'o char *usr_qf_name, *grp_qf_name; 29172adf6da8STheodore Ts'o 29182adf6da8STheodore Ts'o if (sbi->s_jquota_fmt) { 29192adf6da8STheodore Ts'o char *fmtname = ""; 29202adf6da8STheodore Ts'o 29212adf6da8STheodore Ts'o switch (sbi->s_jquota_fmt) { 29222adf6da8STheodore Ts'o case QFMT_VFS_OLD: 29232adf6da8STheodore Ts'o fmtname = "vfsold"; 29242adf6da8STheodore Ts'o break; 29252adf6da8STheodore Ts'o case QFMT_VFS_V0: 29262adf6da8STheodore Ts'o fmtname = "vfsv0"; 29272adf6da8STheodore Ts'o break; 29282adf6da8STheodore Ts'o case QFMT_VFS_V1: 29292adf6da8STheodore Ts'o fmtname = "vfsv1"; 29302adf6da8STheodore Ts'o break; 29312adf6da8STheodore Ts'o } 29322adf6da8STheodore Ts'o seq_printf(seq, ",jqfmt=%s", fmtname); 29332adf6da8STheodore Ts'o } 29342adf6da8STheodore Ts'o 293533458eabSTheodore Ts'o rcu_read_lock(); 293633458eabSTheodore Ts'o usr_qf_name = rcu_dereference(sbi->s_qf_names[USRQUOTA]); 293733458eabSTheodore Ts'o grp_qf_name = rcu_dereference(sbi->s_qf_names[GRPQUOTA]); 293833458eabSTheodore Ts'o if (usr_qf_name) 293933458eabSTheodore Ts'o seq_show_option(seq, "usrjquota", usr_qf_name); 294033458eabSTheodore Ts'o if (grp_qf_name) 294133458eabSTheodore Ts'o seq_show_option(seq, "grpjquota", grp_qf_name); 294233458eabSTheodore Ts'o rcu_read_unlock(); 29432adf6da8STheodore Ts'o #endif 29442adf6da8STheodore Ts'o } 29452adf6da8STheodore Ts'o 29465a916be1STheodore Ts'o static const char *token2str(int token) 29475a916be1STheodore Ts'o { 294897d8a670SLukas Czerner const struct fs_parameter_spec *spec; 29495a916be1STheodore Ts'o 295097d8a670SLukas Czerner for (spec = ext4_param_specs; spec->name != NULL; spec++) 295197d8a670SLukas Czerner if (spec->opt == token && !spec->type) 29525a916be1STheodore Ts'o break; 295397d8a670SLukas Czerner return spec->name; 29545a916be1STheodore Ts'o } 29555a916be1STheodore Ts'o 29562adf6da8STheodore Ts'o /* 29572adf6da8STheodore Ts'o * Show an option if 29582adf6da8STheodore Ts'o * - it's set to a non-default value OR 29592adf6da8STheodore Ts'o * - if the per-sb default is different from the global default 29602adf6da8STheodore Ts'o */ 296166acdcf4STheodore Ts'o static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, 296266acdcf4STheodore Ts'o int nodefs) 29632adf6da8STheodore Ts'o { 29642adf6da8STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 29652adf6da8STheodore Ts'o struct ext4_super_block *es = sbi->s_es; 2966e3645d72SZhang Yi int def_errors; 29675a916be1STheodore Ts'o const struct mount_opts *m; 296866acdcf4STheodore Ts'o char sep = nodefs ? '\n' : ','; 29692adf6da8STheodore Ts'o 297066acdcf4STheodore Ts'o #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep) 297166acdcf4STheodore Ts'o #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg) 29722adf6da8STheodore Ts'o 29732adf6da8STheodore Ts'o if (sbi->s_sb_block != 1) 29745a916be1STheodore Ts'o SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block); 29755a916be1STheodore Ts'o 29765a916be1STheodore Ts'o for (m = ext4_mount_opts; m->token != Opt_err; m++) { 29775a916be1STheodore Ts'o int want_set = m->flags & MOPT_SET; 2978e3645d72SZhang Yi int opt_2 = m->flags & MOPT_2; 2979e3645d72SZhang Yi unsigned int mount_opt, def_mount_opt; 2980e3645d72SZhang Yi 29815a916be1STheodore Ts'o if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) || 2982ba2e524dSLukas Czerner m->flags & MOPT_SKIP) 29835a916be1STheodore Ts'o continue; 2984e3645d72SZhang Yi 2985e3645d72SZhang Yi if (opt_2) { 2986e3645d72SZhang Yi mount_opt = sbi->s_mount_opt2; 2987e3645d72SZhang Yi def_mount_opt = sbi->s_def_mount_opt2; 2988e3645d72SZhang Yi } else { 2989e3645d72SZhang Yi mount_opt = sbi->s_mount_opt; 2990e3645d72SZhang Yi def_mount_opt = sbi->s_def_mount_opt; 2991e3645d72SZhang Yi } 2992e3645d72SZhang Yi /* skip if same as the default */ 2993e3645d72SZhang Yi if (!nodefs && !(m->mount_opt & (mount_opt ^ def_mount_opt))) 2994e3645d72SZhang Yi continue; 2995e3645d72SZhang Yi /* select Opt_noFoo vs Opt_Foo */ 29965a916be1STheodore Ts'o if ((want_set && 2997e3645d72SZhang Yi (mount_opt & m->mount_opt) != m->mount_opt) || 2998e3645d72SZhang Yi (!want_set && (mount_opt & m->mount_opt))) 2999e3645d72SZhang Yi continue; 30005a916be1STheodore Ts'o SEQ_OPTS_PRINT("%s", token2str(m->token)); 30015a916be1STheodore Ts'o } 30025a916be1STheodore Ts'o 300308cefc7aSEric W. Biederman if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) || 30045a916be1STheodore Ts'o le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID) 300508cefc7aSEric W. Biederman SEQ_OPTS_PRINT("resuid=%u", 300608cefc7aSEric W. Biederman from_kuid_munged(&init_user_ns, sbi->s_resuid)); 300708cefc7aSEric W. Biederman if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) || 30085a916be1STheodore Ts'o le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID) 300908cefc7aSEric W. Biederman SEQ_OPTS_PRINT("resgid=%u", 301008cefc7aSEric W. Biederman from_kgid_munged(&init_user_ns, sbi->s_resgid)); 301166acdcf4STheodore Ts'o def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors); 30125a916be1STheodore Ts'o if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO) 30135a916be1STheodore Ts'o SEQ_OPTS_PUTS("errors=remount-ro"); 30142adf6da8STheodore Ts'o if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE) 30155a916be1STheodore Ts'o SEQ_OPTS_PUTS("errors=continue"); 30162adf6da8STheodore Ts'o if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC) 30175a916be1STheodore Ts'o SEQ_OPTS_PUTS("errors=panic"); 301866acdcf4STheodore Ts'o if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) 30195a916be1STheodore Ts'o SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ); 302066acdcf4STheodore Ts'o if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME) 30215a916be1STheodore Ts'o SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time); 302266acdcf4STheodore Ts'o if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) 30235a916be1STheodore Ts'o SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time); 302466acdcf4STheodore Ts'o if (nodefs || sbi->s_stripe) 30255a916be1STheodore Ts'o SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe); 302668afa7e0STyson Nottingham if (nodefs || EXT4_MOUNT_DATA_FLAGS & 3027e3645d72SZhang Yi (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) { 30282adf6da8STheodore Ts'o if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) 30295a916be1STheodore Ts'o SEQ_OPTS_PUTS("data=journal"); 30302adf6da8STheodore Ts'o else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) 30315a916be1STheodore Ts'o SEQ_OPTS_PUTS("data=ordered"); 30322adf6da8STheodore Ts'o else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) 30335a916be1STheodore Ts'o SEQ_OPTS_PUTS("data=writeback"); 30345a916be1STheodore Ts'o } 303566acdcf4STheodore Ts'o if (nodefs || 303666acdcf4STheodore Ts'o sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS) 30375a916be1STheodore Ts'o SEQ_OPTS_PRINT("inode_readahead_blks=%u", 30382adf6da8STheodore Ts'o sbi->s_inode_readahead_blks); 30392adf6da8STheodore Ts'o 3040ceec0376STyson Nottingham if (test_opt(sb, INIT_INODE_TABLE) && (nodefs || 304166acdcf4STheodore Ts'o (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT))) 30425a916be1STheodore Ts'o SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult); 3043df981d03STheodore Ts'o if (nodefs || sbi->s_max_dir_size_kb) 3044df981d03STheodore Ts'o SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb); 30457915a861SAles Novak if (test_opt(sb, DATA_ERR_ABORT)) 30467915a861SAles Novak SEQ_OPTS_PUTS("data_err=abort"); 3047ed318a6cSEric Biggers 3048ed318a6cSEric Biggers fscrypt_show_test_dummy_encryption(seq, sep, sb); 30492adf6da8STheodore Ts'o 30504f74d15fSEric Biggers if (sb->s_flags & SB_INLINECRYPT) 30514f74d15fSEric Biggers SEQ_OPTS_PUTS("inlinecrypt"); 30524f74d15fSEric Biggers 30539cb20f94SIra Weiny if (test_opt(sb, DAX_ALWAYS)) { 30549cb20f94SIra Weiny if (IS_EXT2_SB(sb)) 30559cb20f94SIra Weiny SEQ_OPTS_PUTS("dax"); 30569cb20f94SIra Weiny else 30579cb20f94SIra Weiny SEQ_OPTS_PUTS("dax=always"); 30589cb20f94SIra Weiny } else if (test_opt2(sb, DAX_NEVER)) { 30599cb20f94SIra Weiny SEQ_OPTS_PUTS("dax=never"); 30609cb20f94SIra Weiny } else if (test_opt2(sb, DAX_INODE)) { 30619cb20f94SIra Weiny SEQ_OPTS_PUTS("dax=inode"); 30629cb20f94SIra Weiny } 30633fa5d23eSOjaswin Mujoo 30643fa5d23eSOjaswin Mujoo if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD && 30653fa5d23eSOjaswin Mujoo !test_opt2(sb, MB_OPTIMIZE_SCAN)) { 30663fa5d23eSOjaswin Mujoo SEQ_OPTS_PUTS("mb_optimize_scan=0"); 30673fa5d23eSOjaswin Mujoo } else if (sbi->s_groups_count < MB_DEFAULT_LINEAR_SCAN_THRESHOLD && 30683fa5d23eSOjaswin Mujoo test_opt2(sb, MB_OPTIMIZE_SCAN)) { 30693fa5d23eSOjaswin Mujoo SEQ_OPTS_PUTS("mb_optimize_scan=1"); 30703fa5d23eSOjaswin Mujoo } 30713fa5d23eSOjaswin Mujoo 30722adf6da8STheodore Ts'o ext4_show_quota_options(seq, sb); 30732adf6da8STheodore Ts'o return 0; 30742adf6da8STheodore Ts'o } 30752adf6da8STheodore Ts'o 307666acdcf4STheodore Ts'o static int ext4_show_options(struct seq_file *seq, struct dentry *root) 307766acdcf4STheodore Ts'o { 307866acdcf4STheodore Ts'o return _ext4_show_options(seq, root->d_sb, 0); 307966acdcf4STheodore Ts'o } 308066acdcf4STheodore Ts'o 3081ebd173beSTheodore Ts'o int ext4_seq_options_show(struct seq_file *seq, void *offset) 308266acdcf4STheodore Ts'o { 308366acdcf4STheodore Ts'o struct super_block *sb = seq->private; 308466acdcf4STheodore Ts'o int rc; 308566acdcf4STheodore Ts'o 3086bc98a42cSDavid Howells seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw"); 308766acdcf4STheodore Ts'o rc = _ext4_show_options(seq, sb, 1); 308866acdcf4STheodore Ts'o seq_puts(seq, "\n"); 308966acdcf4STheodore Ts'o return rc; 309066acdcf4STheodore Ts'o } 309166acdcf4STheodore Ts'o 3092617ba13bSMingming Cao static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, 3093ac27a0ecSDave Kleikamp int read_only) 3094ac27a0ecSDave Kleikamp { 3095617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 3096c89128a0SJaegeuk Kim int err = 0; 3097ac27a0ecSDave Kleikamp 3098617ba13bSMingming Cao if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) { 3099b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "revision level too high, " 3100b31e1552SEric Sandeen "forcing read-only mode"); 3101c89128a0SJaegeuk Kim err = -EROFS; 31025adaccacSyangerkun goto done; 3103ac27a0ecSDave Kleikamp } 3104ac27a0ecSDave Kleikamp if (read_only) 3105281b5995STheodore Ts'o goto done; 3106617ba13bSMingming Cao if (!(sbi->s_mount_state & EXT4_VALID_FS)) 3107b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, " 3108b31e1552SEric Sandeen "running e2fsck is recommended"); 3109c8b459f4SLukas Czerner else if (sbi->s_mount_state & EXT4_ERROR_FS) 3110b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, 3111b31e1552SEric Sandeen "warning: mounting fs with errors, " 3112b31e1552SEric Sandeen "running e2fsck is recommended"); 3113ed3ce80aSTao Ma else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 && 3114ac27a0ecSDave Kleikamp le16_to_cpu(es->s_mnt_count) >= 3115ac27a0ecSDave Kleikamp (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count)) 3116b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, 3117b31e1552SEric Sandeen "warning: maximal mount count reached, " 3118b31e1552SEric Sandeen "running e2fsck is recommended"); 3119ac27a0ecSDave Kleikamp else if (le32_to_cpu(es->s_checkinterval) && 31206a0678a7SArnd Bergmann (ext4_get_tstamp(es, s_lastcheck) + 31216a0678a7SArnd Bergmann le32_to_cpu(es->s_checkinterval) <= ktime_get_real_seconds())) 3122b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, 3123b31e1552SEric Sandeen "warning: checktime reached, " 3124b31e1552SEric Sandeen "running e2fsck is recommended"); 31250390131bSFrank Mayhar if (!sbi->s_journal) 3126216c34b2SMarcin Slusarz es->s_state &= cpu_to_le16(~EXT4_VALID_FS); 3127ac27a0ecSDave Kleikamp if (!(__s16) le16_to_cpu(es->s_max_mnt_count)) 3128617ba13bSMingming Cao es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT); 3129e8546d06SMarcin Slusarz le16_add_cpu(&es->s_mnt_count, 1); 31306a0678a7SArnd Bergmann ext4_update_tstamp(es, s_mtime); 313102f310fcSJan Kara if (sbi->s_journal) { 3132e2b911c5SDarrick J. Wong ext4_set_feature_journal_needs_recovery(sb); 313302f310fcSJan Kara if (ext4_has_feature_orphan_file(sb)) 313402f310fcSJan Kara ext4_set_feature_orphan_present(sb); 313502f310fcSJan Kara } 3136ac27a0ecSDave Kleikamp 31374392fbc4SJan Kara err = ext4_commit_super(sb); 3138281b5995STheodore Ts'o done: 3139ac27a0ecSDave Kleikamp if (test_opt(sb, DEBUG)) 3140a9df9a49STheodore Ts'o printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, " 3141a2595b8aSTheodore Ts'o "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n", 3142ac27a0ecSDave Kleikamp sb->s_blocksize, 3143ac27a0ecSDave Kleikamp sbi->s_groups_count, 3144617ba13bSMingming Cao EXT4_BLOCKS_PER_GROUP(sb), 3145617ba13bSMingming Cao EXT4_INODES_PER_GROUP(sb), 3146a2595b8aSTheodore Ts'o sbi->s_mount_opt, sbi->s_mount_opt2); 3147c89128a0SJaegeuk Kim return err; 3148ac27a0ecSDave Kleikamp } 3149ac27a0ecSDave Kleikamp 3150117fff10STheodore Ts'o int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup) 3151117fff10STheodore Ts'o { 3152117fff10STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 31537c990728SSuraj Jitindar Singh struct flex_groups **old_groups, **new_groups; 315437b0b6b8SDan Carpenter int size, i, j; 3155117fff10STheodore Ts'o 3156117fff10STheodore Ts'o if (!sbi->s_log_groups_per_flex) 3157117fff10STheodore Ts'o return 0; 3158117fff10STheodore Ts'o 3159117fff10STheodore Ts'o size = ext4_flex_group(sbi, ngroup - 1) + 1; 3160117fff10STheodore Ts'o if (size <= sbi->s_flex_groups_allocated) 3161117fff10STheodore Ts'o return 0; 3162117fff10STheodore Ts'o 31637c990728SSuraj Jitindar Singh new_groups = kvzalloc(roundup_pow_of_two(size * 31647c990728SSuraj Jitindar Singh sizeof(*sbi->s_flex_groups)), GFP_KERNEL); 3165117fff10STheodore Ts'o if (!new_groups) { 31667c990728SSuraj Jitindar Singh ext4_msg(sb, KERN_ERR, 31677c990728SSuraj Jitindar Singh "not enough memory for %d flex group pointers", size); 3168117fff10STheodore Ts'o return -ENOMEM; 3169117fff10STheodore Ts'o } 31707c990728SSuraj Jitindar Singh for (i = sbi->s_flex_groups_allocated; i < size; i++) { 31717c990728SSuraj Jitindar Singh new_groups[i] = kvzalloc(roundup_pow_of_two( 31727c990728SSuraj Jitindar Singh sizeof(struct flex_groups)), 31737c990728SSuraj Jitindar Singh GFP_KERNEL); 31747c990728SSuraj Jitindar Singh if (!new_groups[i]) { 317537b0b6b8SDan Carpenter for (j = sbi->s_flex_groups_allocated; j < i; j++) 317637b0b6b8SDan Carpenter kvfree(new_groups[j]); 31777c990728SSuraj Jitindar Singh kvfree(new_groups); 31787c990728SSuraj Jitindar Singh ext4_msg(sb, KERN_ERR, 31797c990728SSuraj Jitindar Singh "not enough memory for %d flex groups", size); 31807c990728SSuraj Jitindar Singh return -ENOMEM; 3181117fff10STheodore Ts'o } 31827c990728SSuraj Jitindar Singh } 31837c990728SSuraj Jitindar Singh rcu_read_lock(); 31847c990728SSuraj Jitindar Singh old_groups = rcu_dereference(sbi->s_flex_groups); 31857c990728SSuraj Jitindar Singh if (old_groups) 31867c990728SSuraj Jitindar Singh memcpy(new_groups, old_groups, 31877c990728SSuraj Jitindar Singh (sbi->s_flex_groups_allocated * 31887c990728SSuraj Jitindar Singh sizeof(struct flex_groups *))); 31897c990728SSuraj Jitindar Singh rcu_read_unlock(); 31907c990728SSuraj Jitindar Singh rcu_assign_pointer(sbi->s_flex_groups, new_groups); 31917c990728SSuraj Jitindar Singh sbi->s_flex_groups_allocated = size; 31927c990728SSuraj Jitindar Singh if (old_groups) 31937c990728SSuraj Jitindar Singh ext4_kvfree_array_rcu(old_groups); 3194117fff10STheodore Ts'o return 0; 3195117fff10STheodore Ts'o } 3196117fff10STheodore Ts'o 3197772cb7c8SJose R. Santos static int ext4_fill_flex_info(struct super_block *sb) 3198772cb7c8SJose R. Santos { 3199772cb7c8SJose R. Santos struct ext4_sb_info *sbi = EXT4_SB(sb); 3200772cb7c8SJose R. Santos struct ext4_group_desc *gdp = NULL; 32017c990728SSuraj Jitindar Singh struct flex_groups *fg; 3202772cb7c8SJose R. Santos ext4_group_t flex_group; 3203117fff10STheodore Ts'o int i, err; 3204772cb7c8SJose R. Santos 3205503358aeSTheodore Ts'o sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex; 3206d50f2ab6SXi Wang if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) { 3207772cb7c8SJose R. Santos sbi->s_log_groups_per_flex = 0; 3208772cb7c8SJose R. Santos return 1; 3209772cb7c8SJose R. Santos } 3210772cb7c8SJose R. Santos 3211117fff10STheodore Ts'o err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count); 3212117fff10STheodore Ts'o if (err) 3213772cb7c8SJose R. Santos goto failed; 3214772cb7c8SJose R. Santos 3215772cb7c8SJose R. Santos for (i = 0; i < sbi->s_groups_count; i++) { 321688b6edd1STheodore Ts'o gdp = ext4_get_group_desc(sb, i, NULL); 3217772cb7c8SJose R. Santos 3218772cb7c8SJose R. Santos flex_group = ext4_flex_group(sbi, i); 32197c990728SSuraj Jitindar Singh fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group); 32207c990728SSuraj Jitindar Singh atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes); 322190ba983fSTheodore Ts'o atomic64_add(ext4_free_group_clusters(sb, gdp), 32227c990728SSuraj Jitindar Singh &fg->free_clusters); 32237c990728SSuraj Jitindar Singh atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs); 3224772cb7c8SJose R. Santos } 3225772cb7c8SJose R. Santos 3226772cb7c8SJose R. Santos return 1; 3227772cb7c8SJose R. Santos failed: 3228772cb7c8SJose R. Santos return 0; 3229772cb7c8SJose R. Santos } 3230772cb7c8SJose R. Santos 3231e2b911c5SDarrick J. Wong static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group, 3232717d50e4SAndreas Dilger struct ext4_group_desc *gdp) 3233717d50e4SAndreas Dilger { 3234b47820edSDaeho Jeong int offset = offsetof(struct ext4_group_desc, bg_checksum); 3235717d50e4SAndreas Dilger __u16 crc = 0; 3236717d50e4SAndreas Dilger __le32 le_group = cpu_to_le32(block_group); 3237e2b911c5SDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(sb); 3238717d50e4SAndreas Dilger 32399aa5d32bSDmitry Monakhov if (ext4_has_metadata_csum(sbi->s_sb)) { 3240feb0ab32SDarrick J. Wong /* Use new metadata_csum algorithm */ 3241feb0ab32SDarrick J. Wong __u32 csum32; 3242b47820edSDaeho Jeong __u16 dummy_csum = 0; 3243feb0ab32SDarrick J. Wong 3244feb0ab32SDarrick J. Wong csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group, 3245feb0ab32SDarrick J. Wong sizeof(le_group)); 3246b47820edSDaeho Jeong csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset); 3247b47820edSDaeho Jeong csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum, 3248b47820edSDaeho Jeong sizeof(dummy_csum)); 3249b47820edSDaeho Jeong offset += sizeof(dummy_csum); 3250b47820edSDaeho Jeong if (offset < sbi->s_desc_size) 3251b47820edSDaeho Jeong csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset, 3252b47820edSDaeho Jeong sbi->s_desc_size - offset); 3253feb0ab32SDarrick J. Wong 3254feb0ab32SDarrick J. Wong crc = csum32 & 0xFFFF; 3255feb0ab32SDarrick J. Wong goto out; 3256feb0ab32SDarrick J. Wong } 3257feb0ab32SDarrick J. Wong 3258feb0ab32SDarrick J. Wong /* old crc16 code */ 3259e2b911c5SDarrick J. Wong if (!ext4_has_feature_gdt_csum(sb)) 3260813d32f9SDarrick J. Wong return 0; 3261813d32f9SDarrick J. Wong 3262717d50e4SAndreas Dilger crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid)); 3263717d50e4SAndreas Dilger crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group)); 3264717d50e4SAndreas Dilger crc = crc16(crc, (__u8 *)gdp, offset); 3265717d50e4SAndreas Dilger offset += sizeof(gdp->bg_checksum); /* skip checksum */ 3266717d50e4SAndreas Dilger /* for checksum of struct ext4_group_desc do the rest...*/ 32674f043518STudor Ambarus if (ext4_has_feature_64bit(sb) && offset < sbi->s_desc_size) 3268717d50e4SAndreas Dilger crc = crc16(crc, (__u8 *)gdp + offset, 32694f043518STudor Ambarus sbi->s_desc_size - offset); 3270717d50e4SAndreas Dilger 3271feb0ab32SDarrick J. Wong out: 3272717d50e4SAndreas Dilger return cpu_to_le16(crc); 3273717d50e4SAndreas Dilger } 3274717d50e4SAndreas Dilger 3275feb0ab32SDarrick J. Wong int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group, 3276717d50e4SAndreas Dilger struct ext4_group_desc *gdp) 3277717d50e4SAndreas Dilger { 3278feb0ab32SDarrick J. Wong if (ext4_has_group_desc_csum(sb) && 3279e2b911c5SDarrick J. Wong (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp))) 3280717d50e4SAndreas Dilger return 0; 3281717d50e4SAndreas Dilger 3282717d50e4SAndreas Dilger return 1; 3283717d50e4SAndreas Dilger } 3284717d50e4SAndreas Dilger 3285feb0ab32SDarrick J. Wong void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group, 3286feb0ab32SDarrick J. Wong struct ext4_group_desc *gdp) 3287feb0ab32SDarrick J. Wong { 3288feb0ab32SDarrick J. Wong if (!ext4_has_group_desc_csum(sb)) 3289feb0ab32SDarrick J. Wong return; 3290e2b911c5SDarrick J. Wong gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp); 3291feb0ab32SDarrick J. Wong } 3292feb0ab32SDarrick J. Wong 3293ac27a0ecSDave Kleikamp /* Called at mount-time, super-block is locked */ 3294bfff6873SLukas Czerner static int ext4_check_descriptors(struct super_block *sb, 3295829fa70dSTheodore Ts'o ext4_fsblk_t sb_block, 3296bfff6873SLukas Czerner ext4_group_t *first_not_zeroed) 3297ac27a0ecSDave Kleikamp { 3298617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 3299617ba13bSMingming Cao ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); 3300617ba13bSMingming Cao ext4_fsblk_t last_block; 330144de022cSTheodore Ts'o ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0); 3302bd81d8eeSLaurent Vivier ext4_fsblk_t block_bitmap; 3303bd81d8eeSLaurent Vivier ext4_fsblk_t inode_bitmap; 3304bd81d8eeSLaurent Vivier ext4_fsblk_t inode_table; 3305ce421581SJose R. Santos int flexbg_flag = 0; 3306bfff6873SLukas Czerner ext4_group_t i, grp = sbi->s_groups_count; 3307ac27a0ecSDave Kleikamp 3308e2b911c5SDarrick J. Wong if (ext4_has_feature_flex_bg(sb)) 3309ce421581SJose R. Santos flexbg_flag = 1; 3310ce421581SJose R. Santos 3311617ba13bSMingming Cao ext4_debug("Checking group descriptors"); 3312ac27a0ecSDave Kleikamp 3313197cd65aSAkinobu Mita for (i = 0; i < sbi->s_groups_count; i++) { 3314197cd65aSAkinobu Mita struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL); 3315197cd65aSAkinobu Mita 3316ce421581SJose R. Santos if (i == sbi->s_groups_count - 1 || flexbg_flag) 3317bd81d8eeSLaurent Vivier last_block = ext4_blocks_count(sbi->s_es) - 1; 3318ac27a0ecSDave Kleikamp else 3319ac27a0ecSDave Kleikamp last_block = first_block + 3320617ba13bSMingming Cao (EXT4_BLOCKS_PER_GROUP(sb) - 1); 3321ac27a0ecSDave Kleikamp 3322bfff6873SLukas Czerner if ((grp == sbi->s_groups_count) && 3323bfff6873SLukas Czerner !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) 3324bfff6873SLukas Czerner grp = i; 3325bfff6873SLukas Czerner 33268fadc143SAlexandre Ratchov block_bitmap = ext4_block_bitmap(sb, gdp); 3327829fa70dSTheodore Ts'o if (block_bitmap == sb_block) { 3328829fa70dSTheodore Ts'o ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3329829fa70dSTheodore Ts'o "Block bitmap for group %u overlaps " 3330829fa70dSTheodore Ts'o "superblock", i); 333118db4b4eSTheodore Ts'o if (!sb_rdonly(sb)) 333218db4b4eSTheodore Ts'o return 0; 3333829fa70dSTheodore Ts'o } 333477260807STheodore Ts'o if (block_bitmap >= sb_block + 1 && 333577260807STheodore Ts'o block_bitmap <= last_bg_block) { 333677260807STheodore Ts'o ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 333777260807STheodore Ts'o "Block bitmap for group %u overlaps " 333877260807STheodore Ts'o "block group descriptors", i); 333977260807STheodore Ts'o if (!sb_rdonly(sb)) 334077260807STheodore Ts'o return 0; 334177260807STheodore Ts'o } 33422b2d6d01STheodore Ts'o if (block_bitmap < first_block || block_bitmap > last_block) { 3343b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3344a9df9a49STheodore Ts'o "Block bitmap for group %u not in group " 3345b31e1552SEric Sandeen "(block %llu)!", i, block_bitmap); 3346ac27a0ecSDave Kleikamp return 0; 3347ac27a0ecSDave Kleikamp } 33488fadc143SAlexandre Ratchov inode_bitmap = ext4_inode_bitmap(sb, gdp); 3349829fa70dSTheodore Ts'o if (inode_bitmap == sb_block) { 3350829fa70dSTheodore Ts'o ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3351829fa70dSTheodore Ts'o "Inode bitmap for group %u overlaps " 3352829fa70dSTheodore Ts'o "superblock", i); 335318db4b4eSTheodore Ts'o if (!sb_rdonly(sb)) 335418db4b4eSTheodore Ts'o return 0; 3355829fa70dSTheodore Ts'o } 335677260807STheodore Ts'o if (inode_bitmap >= sb_block + 1 && 335777260807STheodore Ts'o inode_bitmap <= last_bg_block) { 335877260807STheodore Ts'o ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 335977260807STheodore Ts'o "Inode bitmap for group %u overlaps " 336077260807STheodore Ts'o "block group descriptors", i); 336177260807STheodore Ts'o if (!sb_rdonly(sb)) 336277260807STheodore Ts'o return 0; 336377260807STheodore Ts'o } 33642b2d6d01STheodore Ts'o if (inode_bitmap < first_block || inode_bitmap > last_block) { 3365b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3366a9df9a49STheodore Ts'o "Inode bitmap for group %u not in group " 3367b31e1552SEric Sandeen "(block %llu)!", i, inode_bitmap); 3368ac27a0ecSDave Kleikamp return 0; 3369ac27a0ecSDave Kleikamp } 33708fadc143SAlexandre Ratchov inode_table = ext4_inode_table(sb, gdp); 3371829fa70dSTheodore Ts'o if (inode_table == sb_block) { 3372829fa70dSTheodore Ts'o ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3373829fa70dSTheodore Ts'o "Inode table for group %u overlaps " 3374829fa70dSTheodore Ts'o "superblock", i); 337518db4b4eSTheodore Ts'o if (!sb_rdonly(sb)) 337618db4b4eSTheodore Ts'o return 0; 3377829fa70dSTheodore Ts'o } 337877260807STheodore Ts'o if (inode_table >= sb_block + 1 && 337977260807STheodore Ts'o inode_table <= last_bg_block) { 338077260807STheodore Ts'o ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 338177260807STheodore Ts'o "Inode table for group %u overlaps " 338277260807STheodore Ts'o "block group descriptors", i); 338377260807STheodore Ts'o if (!sb_rdonly(sb)) 338477260807STheodore Ts'o return 0; 338577260807STheodore Ts'o } 3386bd81d8eeSLaurent Vivier if (inode_table < first_block || 33872b2d6d01STheodore Ts'o inode_table + sbi->s_itb_per_group - 1 > last_block) { 3388b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3389a9df9a49STheodore Ts'o "Inode table for group %u not in group " 3390b31e1552SEric Sandeen "(block %llu)!", i, inode_table); 3391ac27a0ecSDave Kleikamp return 0; 3392ac27a0ecSDave Kleikamp } 3393955ce5f5SAneesh Kumar K.V ext4_lock_group(sb, i); 3394feb0ab32SDarrick J. Wong if (!ext4_group_desc_csum_verify(sb, i, gdp)) { 3395b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3396b31e1552SEric Sandeen "Checksum for group %u failed (%u!=%u)", 3397e2b911c5SDarrick J. Wong i, le16_to_cpu(ext4_group_desc_csum(sb, i, 3398fd2d4291SAvantika Mathur gdp)), le16_to_cpu(gdp->bg_checksum)); 3399bc98a42cSDavid Howells if (!sb_rdonly(sb)) { 3400955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, i); 3401717d50e4SAndreas Dilger return 0; 3402717d50e4SAndreas Dilger } 34037ee1ec4cSLi Zefan } 3404955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, i); 3405ce421581SJose R. Santos if (!flexbg_flag) 3406617ba13bSMingming Cao first_block += EXT4_BLOCKS_PER_GROUP(sb); 3407ac27a0ecSDave Kleikamp } 3408bfff6873SLukas Czerner if (NULL != first_not_zeroed) 3409bfff6873SLukas Czerner *first_not_zeroed = grp; 3410ac27a0ecSDave Kleikamp return 1; 3411ac27a0ecSDave Kleikamp } 3412ac27a0ecSDave Kleikamp 3413cd2291a4SEric Sandeen /* 3414cd2291a4SEric Sandeen * Maximal extent format file size. 3415cd2291a4SEric Sandeen * Resulting logical blkno at s_maxbytes must fit in our on-disk 3416cd2291a4SEric Sandeen * extent format containers, within a sector_t, and within i_blocks 3417cd2291a4SEric Sandeen * in the vfs. ext4 inode has 48 bits of i_block in fsblock units, 3418cd2291a4SEric Sandeen * so that won't be a limiting factor. 3419cd2291a4SEric Sandeen * 3420f17722f9SLukas Czerner * However there is other limiting factor. We do store extents in the form 3421f17722f9SLukas Czerner * of starting block and length, hence the resulting length of the extent 3422f17722f9SLukas Czerner * covering maximum file size must fit into on-disk format containers as 3423f17722f9SLukas Czerner * well. Given that length is always by 1 unit bigger than max unit (because 3424f17722f9SLukas Czerner * we count 0 as well) we have to lower the s_maxbytes by one fs block. 3425f17722f9SLukas Czerner * 3426cd2291a4SEric Sandeen * Note, this does *not* consider any metadata overhead for vfs i_blocks. 3427cd2291a4SEric Sandeen */ 3428f287a1a5STheodore Ts'o static loff_t ext4_max_size(int blkbits, int has_huge_files) 3429cd2291a4SEric Sandeen { 3430cd2291a4SEric Sandeen loff_t res; 3431cd2291a4SEric Sandeen loff_t upper_limit = MAX_LFS_FILESIZE; 3432cd2291a4SEric Sandeen 343372deb455SChristoph Hellwig BUILD_BUG_ON(sizeof(blkcnt_t) < sizeof(u64)); 343472deb455SChristoph Hellwig 343572deb455SChristoph Hellwig if (!has_huge_files) { 3436cd2291a4SEric Sandeen upper_limit = (1LL << 32) - 1; 3437cd2291a4SEric Sandeen 3438cd2291a4SEric Sandeen /* total blocks in file system block size */ 3439cd2291a4SEric Sandeen upper_limit >>= (blkbits - 9); 3440cd2291a4SEric Sandeen upper_limit <<= blkbits; 3441cd2291a4SEric Sandeen } 3442cd2291a4SEric Sandeen 3443f17722f9SLukas Czerner /* 3444f17722f9SLukas Czerner * 32-bit extent-start container, ee_block. We lower the maxbytes 3445f17722f9SLukas Czerner * by one fs block, so ee_len can cover the extent of maximum file 3446f17722f9SLukas Czerner * size 3447f17722f9SLukas Czerner */ 3448f17722f9SLukas Czerner res = (1LL << 32) - 1; 3449cd2291a4SEric Sandeen res <<= blkbits; 3450cd2291a4SEric Sandeen 3451cd2291a4SEric Sandeen /* Sanity check against vm- & vfs- imposed limits */ 3452cd2291a4SEric Sandeen if (res > upper_limit) 3453cd2291a4SEric Sandeen res = upper_limit; 3454cd2291a4SEric Sandeen 3455cd2291a4SEric Sandeen return res; 3456cd2291a4SEric Sandeen } 3457ac27a0ecSDave Kleikamp 3458ac27a0ecSDave Kleikamp /* 3459cd2291a4SEric Sandeen * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect 34600fc1b451SAneesh Kumar K.V * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks. 34610fc1b451SAneesh Kumar K.V * We need to be 1 filesystem block less than the 2^48 sector limit. 3462ac27a0ecSDave Kleikamp */ 3463f287a1a5STheodore Ts'o static loff_t ext4_max_bitmap_size(int bits, int has_huge_files) 3464ac27a0ecSDave Kleikamp { 34655c93e8ecSZhang Yi loff_t upper_limit, res = EXT4_NDIR_BLOCKS; 34660fc1b451SAneesh Kumar K.V int meta_blocks; 34675c93e8ecSZhang Yi unsigned int ppb = 1 << (bits - 2); 346875ca6ad4SRitesh Harjani 346975ca6ad4SRitesh Harjani /* 347075ca6ad4SRitesh Harjani * This is calculated to be the largest file size for a dense, block 34710b8e58a1SAndreas Dilger * mapped file such that the file's total number of 512-byte sectors, 34720b8e58a1SAndreas Dilger * including data and all indirect blocks, does not exceed (2^48 - 1). 34730b8e58a1SAndreas Dilger * 34740b8e58a1SAndreas Dilger * __u32 i_blocks_lo and _u16 i_blocks_high represent the total 34750b8e58a1SAndreas Dilger * number of 512-byte sectors of the file. 34760fc1b451SAneesh Kumar K.V */ 347772deb455SChristoph Hellwig if (!has_huge_files) { 34780fc1b451SAneesh Kumar K.V /* 347972deb455SChristoph Hellwig * !has_huge_files or implies that the inode i_block field 348072deb455SChristoph Hellwig * represents total file blocks in 2^32 512-byte sectors == 348172deb455SChristoph Hellwig * size of vfs inode i_blocks * 8 34820fc1b451SAneesh Kumar K.V */ 34830fc1b451SAneesh Kumar K.V upper_limit = (1LL << 32) - 1; 34840fc1b451SAneesh Kumar K.V 34850fc1b451SAneesh Kumar K.V /* total blocks in file system block size */ 34860fc1b451SAneesh Kumar K.V upper_limit >>= (bits - 9); 34870fc1b451SAneesh Kumar K.V 34880fc1b451SAneesh Kumar K.V } else { 34898180a562SAneesh Kumar K.V /* 34908180a562SAneesh Kumar K.V * We use 48 bit ext4_inode i_blocks 34918180a562SAneesh Kumar K.V * With EXT4_HUGE_FILE_FL set the i_blocks 34928180a562SAneesh Kumar K.V * represent total number of blocks in 34938180a562SAneesh Kumar K.V * file system block size 34948180a562SAneesh Kumar K.V */ 34950fc1b451SAneesh Kumar K.V upper_limit = (1LL << 48) - 1; 34960fc1b451SAneesh Kumar K.V 34970fc1b451SAneesh Kumar K.V } 34980fc1b451SAneesh Kumar K.V 34995c93e8ecSZhang Yi /* Compute how many blocks we can address by block tree */ 35005c93e8ecSZhang Yi res += ppb; 35015c93e8ecSZhang Yi res += ppb * ppb; 35025c93e8ecSZhang Yi res += ((loff_t)ppb) * ppb * ppb; 35035c93e8ecSZhang Yi /* Compute how many metadata blocks are needed */ 35045c93e8ecSZhang Yi meta_blocks = 1; 35055c93e8ecSZhang Yi meta_blocks += 1 + ppb; 35065c93e8ecSZhang Yi meta_blocks += 1 + ppb + ppb * ppb; 35075c93e8ecSZhang Yi /* Does block tree limit file size? */ 35085c93e8ecSZhang Yi if (res + meta_blocks <= upper_limit) 35095c93e8ecSZhang Yi goto check_lfs; 35105c93e8ecSZhang Yi 35115c93e8ecSZhang Yi res = upper_limit; 35125c93e8ecSZhang Yi /* How many metadata blocks are needed for addressing upper_limit? */ 35135c93e8ecSZhang Yi upper_limit -= EXT4_NDIR_BLOCKS; 35140fc1b451SAneesh Kumar K.V /* indirect blocks */ 35150fc1b451SAneesh Kumar K.V meta_blocks = 1; 35165c93e8ecSZhang Yi upper_limit -= ppb; 35170fc1b451SAneesh Kumar K.V /* double indirect blocks */ 35185c93e8ecSZhang Yi if (upper_limit < ppb * ppb) { 35195c93e8ecSZhang Yi meta_blocks += 1 + DIV_ROUND_UP_ULL(upper_limit, ppb); 35205c93e8ecSZhang Yi res -= meta_blocks; 35215c93e8ecSZhang Yi goto check_lfs; 35225c93e8ecSZhang Yi } 35235c93e8ecSZhang Yi meta_blocks += 1 + ppb; 35245c93e8ecSZhang Yi upper_limit -= ppb * ppb; 35255c93e8ecSZhang Yi /* tripple indirect blocks for the rest */ 35265c93e8ecSZhang Yi meta_blocks += 1 + DIV_ROUND_UP_ULL(upper_limit, ppb) + 35275c93e8ecSZhang Yi DIV_ROUND_UP_ULL(upper_limit, ppb*ppb); 35285c93e8ecSZhang Yi res -= meta_blocks; 35295c93e8ecSZhang Yi check_lfs: 3530ac27a0ecSDave Kleikamp res <<= bits; 35310fc1b451SAneesh Kumar K.V if (res > MAX_LFS_FILESIZE) 35320fc1b451SAneesh Kumar K.V res = MAX_LFS_FILESIZE; 35330fc1b451SAneesh Kumar K.V 35345c93e8ecSZhang Yi return res; 3535ac27a0ecSDave Kleikamp } 3536ac27a0ecSDave Kleikamp 3537617ba13bSMingming Cao static ext4_fsblk_t descriptor_loc(struct super_block *sb, 353870bbb3e0SAndrew Morton ext4_fsblk_t logical_sb_block, int nr) 3539ac27a0ecSDave Kleikamp { 3540617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 3541fd2d4291SAvantika Mathur ext4_group_t bg, first_meta_bg; 3542ac27a0ecSDave Kleikamp int has_super = 0; 3543ac27a0ecSDave Kleikamp 3544ac27a0ecSDave Kleikamp first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg); 3545ac27a0ecSDave Kleikamp 3546e2b911c5SDarrick J. Wong if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg) 354770bbb3e0SAndrew Morton return logical_sb_block + nr + 1; 3548ac27a0ecSDave Kleikamp bg = sbi->s_desc_per_block * nr; 3549617ba13bSMingming Cao if (ext4_bg_has_super(sb, bg)) 3550ac27a0ecSDave Kleikamp has_super = 1; 35510b8e58a1SAndreas Dilger 3552bd63f6b0SDarrick J. Wong /* 3553bd63f6b0SDarrick J. Wong * If we have a meta_bg fs with 1k blocks, group 0's GDT is at 3554bd63f6b0SDarrick J. Wong * block 2, not 1. If s_first_data_block == 0 (bigalloc is enabled 3555bd63f6b0SDarrick J. Wong * on modern mke2fs or blksize > 1k on older mke2fs) then we must 3556bd63f6b0SDarrick J. Wong * compensate. 3557bd63f6b0SDarrick J. Wong */ 3558bd63f6b0SDarrick J. Wong if (sb->s_blocksize == 1024 && nr == 0 && 355949598e04SJun Piao le32_to_cpu(sbi->s_es->s_first_data_block) == 0) 3560bd63f6b0SDarrick J. Wong has_super++; 3561bd63f6b0SDarrick J. Wong 3562617ba13bSMingming Cao return (has_super + ext4_group_first_block_no(sb, bg)); 3563ac27a0ecSDave Kleikamp } 3564ac27a0ecSDave Kleikamp 3565c9de560dSAlex Tomas /** 3566c9de560dSAlex Tomas * ext4_get_stripe_size: Get the stripe size. 3567c9de560dSAlex Tomas * @sbi: In memory super block info 3568c9de560dSAlex Tomas * 3569c9de560dSAlex Tomas * If we have specified it via mount option, then 3570c9de560dSAlex Tomas * use the mount option value. If the value specified at mount time is 3571c9de560dSAlex Tomas * greater than the blocks per group use the super block value. 3572c9de560dSAlex Tomas * If the super block value is greater than blocks per group return 0. 3573c9de560dSAlex Tomas * Allocator needs it be less than blocks per group. 3574c9de560dSAlex Tomas * 3575c9de560dSAlex Tomas */ 3576c9de560dSAlex Tomas static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi) 3577c9de560dSAlex Tomas { 3578c9de560dSAlex Tomas unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride); 3579c9de560dSAlex Tomas unsigned long stripe_width = 3580c9de560dSAlex Tomas le32_to_cpu(sbi->s_es->s_raid_stripe_width); 35813eb08658SDan Ehrenberg int ret; 3582c9de560dSAlex Tomas 3583c9de560dSAlex Tomas if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group) 35843eb08658SDan Ehrenberg ret = sbi->s_stripe; 35855469d7c3SJan Kara else if (stripe_width && stripe_width <= sbi->s_blocks_per_group) 35863eb08658SDan Ehrenberg ret = stripe_width; 35875469d7c3SJan Kara else if (stride && stride <= sbi->s_blocks_per_group) 35883eb08658SDan Ehrenberg ret = stride; 35893eb08658SDan Ehrenberg else 35903eb08658SDan Ehrenberg ret = 0; 3591c9de560dSAlex Tomas 35923eb08658SDan Ehrenberg /* 35933eb08658SDan Ehrenberg * If the stripe width is 1, this makes no sense and 35943eb08658SDan Ehrenberg * we set it to 0 to turn off stripe handling code. 35953eb08658SDan Ehrenberg */ 35963eb08658SDan Ehrenberg if (ret <= 1) 35973eb08658SDan Ehrenberg ret = 0; 3598c9de560dSAlex Tomas 35993eb08658SDan Ehrenberg return ret; 3600c9de560dSAlex Tomas } 3601ac27a0ecSDave Kleikamp 3602a13fb1a4SEric Sandeen /* 3603a13fb1a4SEric Sandeen * Check whether this filesystem can be mounted based on 3604a13fb1a4SEric Sandeen * the features present and the RDONLY/RDWR mount requested. 3605a13fb1a4SEric Sandeen * Returns 1 if this filesystem can be mounted as requested, 3606a13fb1a4SEric Sandeen * 0 if it cannot be. 3607a13fb1a4SEric Sandeen */ 360825c6d98fSJan Kara int ext4_feature_set_ok(struct super_block *sb, int readonly) 3609a13fb1a4SEric Sandeen { 3610e2b911c5SDarrick J. Wong if (ext4_has_unknown_ext4_incompat_features(sb)) { 3611a13fb1a4SEric Sandeen ext4_msg(sb, KERN_ERR, 3612a13fb1a4SEric Sandeen "Couldn't mount because of " 3613a13fb1a4SEric Sandeen "unsupported optional features (%x)", 3614a13fb1a4SEric Sandeen (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) & 3615a13fb1a4SEric Sandeen ~EXT4_FEATURE_INCOMPAT_SUPP)); 3616a13fb1a4SEric Sandeen return 0; 3617a13fb1a4SEric Sandeen } 3618a13fb1a4SEric Sandeen 36195298d4bfSChristoph Hellwig #if !IS_ENABLED(CONFIG_UNICODE) 3620c83ad55eSGabriel Krisman Bertazi if (ext4_has_feature_casefold(sb)) { 3621c83ad55eSGabriel Krisman Bertazi ext4_msg(sb, KERN_ERR, 3622c83ad55eSGabriel Krisman Bertazi "Filesystem with casefold feature cannot be " 3623c83ad55eSGabriel Krisman Bertazi "mounted without CONFIG_UNICODE"); 3624c83ad55eSGabriel Krisman Bertazi return 0; 3625c83ad55eSGabriel Krisman Bertazi } 3626c83ad55eSGabriel Krisman Bertazi #endif 3627c83ad55eSGabriel Krisman Bertazi 3628a13fb1a4SEric Sandeen if (readonly) 3629a13fb1a4SEric Sandeen return 1; 3630a13fb1a4SEric Sandeen 3631e2b911c5SDarrick J. Wong if (ext4_has_feature_readonly(sb)) { 36322cb5cc8bSDarrick J. Wong ext4_msg(sb, KERN_INFO, "filesystem is read-only"); 36331751e8a6SLinus Torvalds sb->s_flags |= SB_RDONLY; 36342cb5cc8bSDarrick J. Wong return 1; 36352cb5cc8bSDarrick J. Wong } 36362cb5cc8bSDarrick J. Wong 3637a13fb1a4SEric Sandeen /* Check that feature set is OK for a read-write mount */ 3638e2b911c5SDarrick J. Wong if (ext4_has_unknown_ext4_ro_compat_features(sb)) { 3639a13fb1a4SEric Sandeen ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of " 3640a13fb1a4SEric Sandeen "unsupported optional features (%x)", 3641a13fb1a4SEric Sandeen (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) & 3642a13fb1a4SEric Sandeen ~EXT4_FEATURE_RO_COMPAT_SUPP)); 3643a13fb1a4SEric Sandeen return 0; 3644a13fb1a4SEric Sandeen } 3645e2b911c5SDarrick J. Wong if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) { 3646bab08ab9STheodore Ts'o ext4_msg(sb, KERN_ERR, 3647bab08ab9STheodore Ts'o "Can't support bigalloc feature without " 3648bab08ab9STheodore Ts'o "extents feature\n"); 3649bab08ab9STheodore Ts'o return 0; 3650bab08ab9STheodore Ts'o } 36517c319d32SAditya Kali 36529db176bcSJan Kara #if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2) 3653d65d87a0STheodore Ts'o if (!readonly && (ext4_has_feature_quota(sb) || 3654d65d87a0STheodore Ts'o ext4_has_feature_project(sb))) { 36557c319d32SAditya Kali ext4_msg(sb, KERN_ERR, 3656d65d87a0STheodore Ts'o "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2"); 3657689c958cSLi Xi return 0; 3658689c958cSLi Xi } 36597c319d32SAditya Kali #endif /* CONFIG_QUOTA */ 3660a13fb1a4SEric Sandeen return 1; 3661a13fb1a4SEric Sandeen } 3662a13fb1a4SEric Sandeen 366366e61a9eSTheodore Ts'o /* 366466e61a9eSTheodore Ts'o * This function is called once a day if we have errors logged 366566e61a9eSTheodore Ts'o * on the file system 366666e61a9eSTheodore Ts'o */ 3667235699a8SKees Cook static void print_daily_error_info(struct timer_list *t) 366866e61a9eSTheodore Ts'o { 3669235699a8SKees Cook struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report); 3670235699a8SKees Cook struct super_block *sb = sbi->s_sb; 3671235699a8SKees Cook struct ext4_super_block *es = sbi->s_es; 367266e61a9eSTheodore Ts'o 367366e61a9eSTheodore Ts'o if (es->s_error_count) 3674ae0f78deSTheodore Ts'o /* fsck newer than v1.41.13 is needed to clean this condition. */ 3675ae0f78deSTheodore Ts'o ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u", 367666e61a9eSTheodore Ts'o le32_to_cpu(es->s_error_count)); 367766e61a9eSTheodore Ts'o if (es->s_first_error_time) { 36786a0678a7SArnd Bergmann printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %llu: %.*s:%d", 36796a0678a7SArnd Bergmann sb->s_id, 36806a0678a7SArnd Bergmann ext4_get_tstamp(es, s_first_error_time), 368166e61a9eSTheodore Ts'o (int) sizeof(es->s_first_error_func), 368266e61a9eSTheodore Ts'o es->s_first_error_func, 368366e61a9eSTheodore Ts'o le32_to_cpu(es->s_first_error_line)); 368466e61a9eSTheodore Ts'o if (es->s_first_error_ino) 3685651e1c3bSJoe Perches printk(KERN_CONT ": inode %u", 368666e61a9eSTheodore Ts'o le32_to_cpu(es->s_first_error_ino)); 368766e61a9eSTheodore Ts'o if (es->s_first_error_block) 3688651e1c3bSJoe Perches printk(KERN_CONT ": block %llu", (unsigned long long) 368966e61a9eSTheodore Ts'o le64_to_cpu(es->s_first_error_block)); 3690651e1c3bSJoe Perches printk(KERN_CONT "\n"); 369166e61a9eSTheodore Ts'o } 369266e61a9eSTheodore Ts'o if (es->s_last_error_time) { 36936a0678a7SArnd Bergmann printk(KERN_NOTICE "EXT4-fs (%s): last error at time %llu: %.*s:%d", 36946a0678a7SArnd Bergmann sb->s_id, 36956a0678a7SArnd Bergmann ext4_get_tstamp(es, s_last_error_time), 369666e61a9eSTheodore Ts'o (int) sizeof(es->s_last_error_func), 369766e61a9eSTheodore Ts'o es->s_last_error_func, 369866e61a9eSTheodore Ts'o le32_to_cpu(es->s_last_error_line)); 369966e61a9eSTheodore Ts'o if (es->s_last_error_ino) 3700651e1c3bSJoe Perches printk(KERN_CONT ": inode %u", 370166e61a9eSTheodore Ts'o le32_to_cpu(es->s_last_error_ino)); 370266e61a9eSTheodore Ts'o if (es->s_last_error_block) 3703651e1c3bSJoe Perches printk(KERN_CONT ": block %llu", (unsigned long long) 370466e61a9eSTheodore Ts'o le64_to_cpu(es->s_last_error_block)); 3705651e1c3bSJoe Perches printk(KERN_CONT "\n"); 370666e61a9eSTheodore Ts'o } 370766e61a9eSTheodore Ts'o mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */ 370866e61a9eSTheodore Ts'o } 370966e61a9eSTheodore Ts'o 3710bfff6873SLukas Czerner /* Find next suitable group and run ext4_init_inode_table */ 3711bfff6873SLukas Czerner static int ext4_run_li_request(struct ext4_li_request *elr) 3712bfff6873SLukas Czerner { 3713bfff6873SLukas Czerner struct ext4_group_desc *gdp = NULL; 37143d392b26STheodore Ts'o struct super_block *sb = elr->lr_super; 37153d392b26STheodore Ts'o ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; 37163d392b26STheodore Ts'o ext4_group_t group = elr->lr_next_group; 37173d392b26STheodore Ts'o unsigned int prefetch_ios = 0; 3718bfff6873SLukas Czerner int ret = 0; 37194f3d1e45SOjaswin Mujoo int nr = EXT4_SB(sb)->s_mb_prefetch; 372039fec688SShaoying Xu u64 start_time; 3721bfff6873SLukas Czerner 37223d392b26STheodore Ts'o if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) { 37234f3d1e45SOjaswin Mujoo elr->lr_next_group = ext4_mb_prefetch(sb, group, nr, &prefetch_ios); 37244f3d1e45SOjaswin Mujoo ext4_mb_prefetch_fini(sb, elr->lr_next_group, nr); 37254f3d1e45SOjaswin Mujoo trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group, nr); 37263d392b26STheodore Ts'o if (group >= elr->lr_next_group) { 37273d392b26STheodore Ts'o ret = 1; 37283d392b26STheodore Ts'o if (elr->lr_first_not_zeroed != ngroups && 37293d392b26STheodore Ts'o !sb_rdonly(sb) && test_opt(sb, INIT_INODE_TABLE)) { 37303d392b26STheodore Ts'o elr->lr_next_group = elr->lr_first_not_zeroed; 37313d392b26STheodore Ts'o elr->lr_mode = EXT4_LI_MODE_ITABLE; 37323d392b26STheodore Ts'o ret = 0; 37333d392b26STheodore Ts'o } 37343d392b26STheodore Ts'o } 37353d392b26STheodore Ts'o return ret; 37363d392b26STheodore Ts'o } 3737bfff6873SLukas Czerner 37383d392b26STheodore Ts'o for (; group < ngroups; group++) { 3739bfff6873SLukas Czerner gdp = ext4_get_group_desc(sb, group, NULL); 3740bfff6873SLukas Czerner if (!gdp) { 3741bfff6873SLukas Czerner ret = 1; 3742bfff6873SLukas Czerner break; 3743bfff6873SLukas Czerner } 3744bfff6873SLukas Czerner 3745bfff6873SLukas Czerner if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) 3746bfff6873SLukas Czerner break; 3747bfff6873SLukas Czerner } 3748bfff6873SLukas Czerner 37497f511862STheodore Ts'o if (group >= ngroups) 3750bfff6873SLukas Czerner ret = 1; 3751bfff6873SLukas Czerner 3752bfff6873SLukas Czerner if (!ret) { 375339fec688SShaoying Xu start_time = ktime_get_real_ns(); 3754bfff6873SLukas Czerner ret = ext4_init_inode_table(sb, group, 3755bfff6873SLukas Czerner elr->lr_timeout ? 0 : 1); 37563d392b26STheodore Ts'o trace_ext4_lazy_itable_init(sb, group); 3757bfff6873SLukas Czerner if (elr->lr_timeout == 0) { 375839fec688SShaoying Xu elr->lr_timeout = nsecs_to_jiffies((ktime_get_real_ns() - start_time) * 375939fec688SShaoying Xu EXT4_SB(elr->lr_super)->s_li_wait_mult); 3760bfff6873SLukas Czerner } 3761bfff6873SLukas Czerner elr->lr_next_sched = jiffies + elr->lr_timeout; 3762bfff6873SLukas Czerner elr->lr_next_group = group + 1; 3763bfff6873SLukas Czerner } 3764bfff6873SLukas Czerner return ret; 3765bfff6873SLukas Czerner } 3766bfff6873SLukas Czerner 3767bfff6873SLukas Czerner /* 3768bfff6873SLukas Czerner * Remove lr_request from the list_request and free the 37694ed5c033SLukas Czerner * request structure. Should be called with li_list_mtx held 3770bfff6873SLukas Czerner */ 3771bfff6873SLukas Czerner static void ext4_remove_li_request(struct ext4_li_request *elr) 3772bfff6873SLukas Czerner { 3773bfff6873SLukas Czerner if (!elr) 3774bfff6873SLukas Czerner return; 3775bfff6873SLukas Czerner 3776bfff6873SLukas Czerner list_del(&elr->lr_request); 37773d392b26STheodore Ts'o EXT4_SB(elr->lr_super)->s_li_request = NULL; 3778bfff6873SLukas Czerner kfree(elr); 3779bfff6873SLukas Czerner } 3780bfff6873SLukas Czerner 3781bfff6873SLukas Czerner static void ext4_unregister_li_request(struct super_block *sb) 3782bfff6873SLukas Czerner { 37831bb933fbSLukas Czerner mutex_lock(&ext4_li_mtx); 37841bb933fbSLukas Czerner if (!ext4_li_info) { 37851bb933fbSLukas Czerner mutex_unlock(&ext4_li_mtx); 3786bfff6873SLukas Czerner return; 37871bb933fbSLukas Czerner } 3788bfff6873SLukas Czerner 3789bfff6873SLukas Czerner mutex_lock(&ext4_li_info->li_list_mtx); 37901bb933fbSLukas Czerner ext4_remove_li_request(EXT4_SB(sb)->s_li_request); 3791bfff6873SLukas Czerner mutex_unlock(&ext4_li_info->li_list_mtx); 37921bb933fbSLukas Czerner mutex_unlock(&ext4_li_mtx); 3793bfff6873SLukas Czerner } 3794bfff6873SLukas Czerner 37958f1f7453SEric Sandeen static struct task_struct *ext4_lazyinit_task; 37968f1f7453SEric Sandeen 3797bfff6873SLukas Czerner /* 3798bfff6873SLukas Czerner * This is the function where ext4lazyinit thread lives. It walks 3799bfff6873SLukas Czerner * through the request list searching for next scheduled filesystem. 3800bfff6873SLukas Czerner * When such a fs is found, run the lazy initialization request 3801bfff6873SLukas Czerner * (ext4_rn_li_request) and keep track of the time spend in this 3802bfff6873SLukas Czerner * function. Based on that time we compute next schedule time of 3803bfff6873SLukas Czerner * the request. When walking through the list is complete, compute 3804bfff6873SLukas Czerner * next waking time and put itself into sleep. 3805bfff6873SLukas Czerner */ 3806bfff6873SLukas Czerner static int ext4_lazyinit_thread(void *arg) 3807bfff6873SLukas Czerner { 3808c30365b9SYu Zhe struct ext4_lazy_init *eli = arg; 3809bfff6873SLukas Czerner struct list_head *pos, *n; 3810bfff6873SLukas Czerner struct ext4_li_request *elr; 38114ed5c033SLukas Czerner unsigned long next_wakeup, cur; 3812bfff6873SLukas Czerner 3813bfff6873SLukas Czerner BUG_ON(NULL == eli); 38143b575495SLalith Rajendran set_freezable(); 3815bfff6873SLukas Czerner 3816bfff6873SLukas Czerner cont_thread: 3817bfff6873SLukas Czerner while (true) { 3818bfff6873SLukas Czerner next_wakeup = MAX_JIFFY_OFFSET; 3819bfff6873SLukas Czerner 3820bfff6873SLukas Czerner mutex_lock(&eli->li_list_mtx); 3821bfff6873SLukas Czerner if (list_empty(&eli->li_request_list)) { 3822bfff6873SLukas Czerner mutex_unlock(&eli->li_list_mtx); 3823bfff6873SLukas Czerner goto exit_thread; 3824bfff6873SLukas Czerner } 3825bfff6873SLukas Czerner list_for_each_safe(pos, n, &eli->li_request_list) { 3826e22834f0SDmitry Monakhov int err = 0; 3827e22834f0SDmitry Monakhov int progress = 0; 3828bfff6873SLukas Czerner elr = list_entry(pos, struct ext4_li_request, 3829bfff6873SLukas Czerner lr_request); 3830bfff6873SLukas Czerner 3831e22834f0SDmitry Monakhov if (time_before(jiffies, elr->lr_next_sched)) { 3832e22834f0SDmitry Monakhov if (time_before(elr->lr_next_sched, next_wakeup)) 3833e22834f0SDmitry Monakhov next_wakeup = elr->lr_next_sched; 3834e22834f0SDmitry Monakhov continue; 3835e22834f0SDmitry Monakhov } 3836e22834f0SDmitry Monakhov if (down_read_trylock(&elr->lr_super->s_umount)) { 3837e22834f0SDmitry Monakhov if (sb_start_write_trylock(elr->lr_super)) { 3838e22834f0SDmitry Monakhov progress = 1; 3839e22834f0SDmitry Monakhov /* 3840e22834f0SDmitry Monakhov * We hold sb->s_umount, sb can not 3841e22834f0SDmitry Monakhov * be removed from the list, it is 3842e22834f0SDmitry Monakhov * now safe to drop li_list_mtx 3843e22834f0SDmitry Monakhov */ 3844e22834f0SDmitry Monakhov mutex_unlock(&eli->li_list_mtx); 3845e22834f0SDmitry Monakhov err = ext4_run_li_request(elr); 3846e22834f0SDmitry Monakhov sb_end_write(elr->lr_super); 3847e22834f0SDmitry Monakhov mutex_lock(&eli->li_list_mtx); 3848e22834f0SDmitry Monakhov n = pos->next; 3849e22834f0SDmitry Monakhov } 3850e22834f0SDmitry Monakhov up_read((&elr->lr_super->s_umount)); 3851e22834f0SDmitry Monakhov } 3852b2c78cd0STheodore Ts'o /* error, remove the lazy_init job */ 3853e22834f0SDmitry Monakhov if (err) { 3854bfff6873SLukas Czerner ext4_remove_li_request(elr); 3855bfff6873SLukas Czerner continue; 3856bfff6873SLukas Czerner } 3857e22834f0SDmitry Monakhov if (!progress) { 3858e22834f0SDmitry Monakhov elr->lr_next_sched = jiffies + 38598032bf12SJason A. Donenfeld get_random_u32_below(EXT4_DEF_LI_MAX_START_DELAY * HZ); 3860b2c78cd0STheodore Ts'o } 3861bfff6873SLukas Czerner if (time_before(elr->lr_next_sched, next_wakeup)) 3862bfff6873SLukas Czerner next_wakeup = elr->lr_next_sched; 3863bfff6873SLukas Czerner } 3864bfff6873SLukas Czerner mutex_unlock(&eli->li_list_mtx); 3865bfff6873SLukas Czerner 3866a0acae0eSTejun Heo try_to_freeze(); 3867bfff6873SLukas Czerner 38684ed5c033SLukas Czerner cur = jiffies; 38694ed5c033SLukas Czerner if ((time_after_eq(cur, next_wakeup)) || 3870f4245bd4SLukas Czerner (MAX_JIFFY_OFFSET == next_wakeup)) { 3871bfff6873SLukas Czerner cond_resched(); 3872bfff6873SLukas Czerner continue; 3873bfff6873SLukas Czerner } 3874bfff6873SLukas Czerner 38754ed5c033SLukas Czerner schedule_timeout_interruptible(next_wakeup - cur); 38764ed5c033SLukas Czerner 38778f1f7453SEric Sandeen if (kthread_should_stop()) { 38788f1f7453SEric Sandeen ext4_clear_request_list(); 38798f1f7453SEric Sandeen goto exit_thread; 38808f1f7453SEric Sandeen } 3881bfff6873SLukas Czerner } 3882bfff6873SLukas Czerner 3883bfff6873SLukas Czerner exit_thread: 3884bfff6873SLukas Czerner /* 3885bfff6873SLukas Czerner * It looks like the request list is empty, but we need 3886bfff6873SLukas Czerner * to check it under the li_list_mtx lock, to prevent any 3887bfff6873SLukas Czerner * additions into it, and of course we should lock ext4_li_mtx 3888bfff6873SLukas Czerner * to atomically free the list and ext4_li_info, because at 3889bfff6873SLukas Czerner * this point another ext4 filesystem could be registering 3890bfff6873SLukas Czerner * new one. 3891bfff6873SLukas Czerner */ 3892bfff6873SLukas Czerner mutex_lock(&ext4_li_mtx); 3893bfff6873SLukas Czerner mutex_lock(&eli->li_list_mtx); 3894bfff6873SLukas Czerner if (!list_empty(&eli->li_request_list)) { 3895bfff6873SLukas Czerner mutex_unlock(&eli->li_list_mtx); 3896bfff6873SLukas Czerner mutex_unlock(&ext4_li_mtx); 3897bfff6873SLukas Czerner goto cont_thread; 3898bfff6873SLukas Czerner } 3899bfff6873SLukas Czerner mutex_unlock(&eli->li_list_mtx); 3900bfff6873SLukas Czerner kfree(ext4_li_info); 3901bfff6873SLukas Czerner ext4_li_info = NULL; 3902bfff6873SLukas Czerner mutex_unlock(&ext4_li_mtx); 3903bfff6873SLukas Czerner 3904bfff6873SLukas Czerner return 0; 3905bfff6873SLukas Czerner } 3906bfff6873SLukas Czerner 3907bfff6873SLukas Czerner static void ext4_clear_request_list(void) 3908bfff6873SLukas Czerner { 3909bfff6873SLukas Czerner struct list_head *pos, *n; 3910bfff6873SLukas Czerner struct ext4_li_request *elr; 3911bfff6873SLukas Czerner 3912bfff6873SLukas Czerner mutex_lock(&ext4_li_info->li_list_mtx); 3913bfff6873SLukas Czerner list_for_each_safe(pos, n, &ext4_li_info->li_request_list) { 3914bfff6873SLukas Czerner elr = list_entry(pos, struct ext4_li_request, 3915bfff6873SLukas Czerner lr_request); 3916bfff6873SLukas Czerner ext4_remove_li_request(elr); 3917bfff6873SLukas Czerner } 3918bfff6873SLukas Czerner mutex_unlock(&ext4_li_info->li_list_mtx); 3919bfff6873SLukas Czerner } 3920bfff6873SLukas Czerner 3921bfff6873SLukas Czerner static int ext4_run_lazyinit_thread(void) 3922bfff6873SLukas Czerner { 39238f1f7453SEric Sandeen ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread, 39248f1f7453SEric Sandeen ext4_li_info, "ext4lazyinit"); 39258f1f7453SEric Sandeen if (IS_ERR(ext4_lazyinit_task)) { 39268f1f7453SEric Sandeen int err = PTR_ERR(ext4_lazyinit_task); 3927bfff6873SLukas Czerner ext4_clear_request_list(); 3928bfff6873SLukas Czerner kfree(ext4_li_info); 3929bfff6873SLukas Czerner ext4_li_info = NULL; 393092b97816STheodore Ts'o printk(KERN_CRIT "EXT4-fs: error %d creating inode table " 3931bfff6873SLukas Czerner "initialization thread\n", 3932bfff6873SLukas Czerner err); 3933bfff6873SLukas Czerner return err; 3934bfff6873SLukas Czerner } 3935bfff6873SLukas Czerner ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING; 3936bfff6873SLukas Czerner return 0; 3937bfff6873SLukas Czerner } 3938bfff6873SLukas Czerner 3939bfff6873SLukas Czerner /* 3940bfff6873SLukas Czerner * Check whether it make sense to run itable init. thread or not. 3941bfff6873SLukas Czerner * If there is at least one uninitialized inode table, return 3942bfff6873SLukas Czerner * corresponding group number, else the loop goes through all 3943bfff6873SLukas Czerner * groups and return total number of groups. 3944bfff6873SLukas Czerner */ 3945bfff6873SLukas Czerner static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) 3946bfff6873SLukas Czerner { 3947bfff6873SLukas Czerner ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count; 3948bfff6873SLukas Czerner struct ext4_group_desc *gdp = NULL; 3949bfff6873SLukas Czerner 39508844618dSTheodore Ts'o if (!ext4_has_group_desc_csum(sb)) 39518844618dSTheodore Ts'o return ngroups; 39528844618dSTheodore Ts'o 3953bfff6873SLukas Czerner for (group = 0; group < ngroups; group++) { 3954bfff6873SLukas Czerner gdp = ext4_get_group_desc(sb, group, NULL); 3955bfff6873SLukas Czerner if (!gdp) 3956bfff6873SLukas Czerner continue; 3957bfff6873SLukas Czerner 395850122847STheodore Ts'o if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) 3959bfff6873SLukas Czerner break; 3960bfff6873SLukas Czerner } 3961bfff6873SLukas Czerner 3962bfff6873SLukas Czerner return group; 3963bfff6873SLukas Czerner } 3964bfff6873SLukas Czerner 3965bfff6873SLukas Czerner static int ext4_li_info_new(void) 3966bfff6873SLukas Czerner { 3967bfff6873SLukas Czerner struct ext4_lazy_init *eli = NULL; 3968bfff6873SLukas Czerner 3969bfff6873SLukas Czerner eli = kzalloc(sizeof(*eli), GFP_KERNEL); 3970bfff6873SLukas Czerner if (!eli) 3971bfff6873SLukas Czerner return -ENOMEM; 3972bfff6873SLukas Czerner 3973bfff6873SLukas Czerner INIT_LIST_HEAD(&eli->li_request_list); 3974bfff6873SLukas Czerner mutex_init(&eli->li_list_mtx); 3975bfff6873SLukas Czerner 3976bfff6873SLukas Czerner eli->li_state |= EXT4_LAZYINIT_QUIT; 3977bfff6873SLukas Czerner 3978bfff6873SLukas Czerner ext4_li_info = eli; 3979bfff6873SLukas Czerner 3980bfff6873SLukas Czerner return 0; 3981bfff6873SLukas Czerner } 3982bfff6873SLukas Czerner 3983bfff6873SLukas Czerner static struct ext4_li_request *ext4_li_request_new(struct super_block *sb, 3984bfff6873SLukas Czerner ext4_group_t start) 3985bfff6873SLukas Czerner { 3986bfff6873SLukas Czerner struct ext4_li_request *elr; 3987bfff6873SLukas Czerner 3988bfff6873SLukas Czerner elr = kzalloc(sizeof(*elr), GFP_KERNEL); 3989bfff6873SLukas Czerner if (!elr) 3990bfff6873SLukas Czerner return NULL; 3991bfff6873SLukas Czerner 3992bfff6873SLukas Czerner elr->lr_super = sb; 39933d392b26STheodore Ts'o elr->lr_first_not_zeroed = start; 399421175ca4SHarshad Shirwadkar if (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS)) { 39953d392b26STheodore Ts'o elr->lr_mode = EXT4_LI_MODE_ITABLE; 3996bfff6873SLukas Czerner elr->lr_next_group = start; 399721175ca4SHarshad Shirwadkar } else { 399821175ca4SHarshad Shirwadkar elr->lr_mode = EXT4_LI_MODE_PREFETCH_BBITMAP; 39993d392b26STheodore Ts'o } 4000bfff6873SLukas Czerner 4001bfff6873SLukas Czerner /* 4002bfff6873SLukas Czerner * Randomize first schedule time of the request to 4003bfff6873SLukas Czerner * spread the inode table initialization requests 4004bfff6873SLukas Czerner * better. 4005bfff6873SLukas Czerner */ 40068032bf12SJason A. Donenfeld elr->lr_next_sched = jiffies + get_random_u32_below(EXT4_DEF_LI_MAX_START_DELAY * HZ); 4007bfff6873SLukas Czerner return elr; 4008bfff6873SLukas Czerner } 4009bfff6873SLukas Czerner 40107f511862STheodore Ts'o int ext4_register_li_request(struct super_block *sb, 4011bfff6873SLukas Czerner ext4_group_t first_not_zeroed) 4012bfff6873SLukas Czerner { 4013bfff6873SLukas Czerner struct ext4_sb_info *sbi = EXT4_SB(sb); 40147f511862STheodore Ts'o struct ext4_li_request *elr = NULL; 401549598e04SJun Piao ext4_group_t ngroups = sbi->s_groups_count; 40166c5a6cb9SAndrew Morton int ret = 0; 4017bfff6873SLukas Czerner 40187f511862STheodore Ts'o mutex_lock(&ext4_li_mtx); 401951ce6511SLukas Czerner if (sbi->s_li_request != NULL) { 402051ce6511SLukas Czerner /* 402151ce6511SLukas Czerner * Reset timeout so it can be computed again, because 402251ce6511SLukas Czerner * s_li_wait_mult might have changed. 402351ce6511SLukas Czerner */ 402451ce6511SLukas Czerner sbi->s_li_request->lr_timeout = 0; 40257f511862STheodore Ts'o goto out; 402651ce6511SLukas Czerner } 4027bfff6873SLukas Czerner 4028426d15adSJosh Triplett if (sb_rdonly(sb) || 4029426d15adSJosh Triplett (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS) && 4030426d15adSJosh Triplett (first_not_zeroed == ngroups || !test_opt(sb, INIT_INODE_TABLE)))) 40317f511862STheodore Ts'o goto out; 4032bfff6873SLukas Czerner 4033bfff6873SLukas Czerner elr = ext4_li_request_new(sb, first_not_zeroed); 40347f511862STheodore Ts'o if (!elr) { 40357f511862STheodore Ts'o ret = -ENOMEM; 40367f511862STheodore Ts'o goto out; 40377f511862STheodore Ts'o } 4038bfff6873SLukas Czerner 4039bfff6873SLukas Czerner if (NULL == ext4_li_info) { 4040bfff6873SLukas Czerner ret = ext4_li_info_new(); 4041bfff6873SLukas Czerner if (ret) 4042bfff6873SLukas Czerner goto out; 4043bfff6873SLukas Czerner } 4044bfff6873SLukas Czerner 4045bfff6873SLukas Czerner mutex_lock(&ext4_li_info->li_list_mtx); 4046bfff6873SLukas Czerner list_add(&elr->lr_request, &ext4_li_info->li_request_list); 4047bfff6873SLukas Czerner mutex_unlock(&ext4_li_info->li_list_mtx); 4048bfff6873SLukas Czerner 4049bfff6873SLukas Czerner sbi->s_li_request = elr; 405046e4690bSTao Ma /* 405146e4690bSTao Ma * set elr to NULL here since it has been inserted to 405246e4690bSTao Ma * the request_list and the removal and free of it is 405346e4690bSTao Ma * handled by ext4_clear_request_list from now on. 405446e4690bSTao Ma */ 405546e4690bSTao Ma elr = NULL; 4056bfff6873SLukas Czerner 4057bfff6873SLukas Czerner if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) { 4058bfff6873SLukas Czerner ret = ext4_run_lazyinit_thread(); 4059bfff6873SLukas Czerner if (ret) 4060bfff6873SLukas Czerner goto out; 4061bfff6873SLukas Czerner } 4062bfff6873SLukas Czerner out: 4063bfff6873SLukas Czerner mutex_unlock(&ext4_li_mtx); 4064beed5ecbSNicolas Kaiser if (ret) 4065bfff6873SLukas Czerner kfree(elr); 4066bfff6873SLukas Czerner return ret; 4067bfff6873SLukas Czerner } 4068bfff6873SLukas Czerner 4069bfff6873SLukas Czerner /* 4070bfff6873SLukas Czerner * We do not need to lock anything since this is called on 4071bfff6873SLukas Czerner * module unload. 4072bfff6873SLukas Czerner */ 4073bfff6873SLukas Czerner static void ext4_destroy_lazyinit_thread(void) 4074bfff6873SLukas Czerner { 4075bfff6873SLukas Czerner /* 4076bfff6873SLukas Czerner * If thread exited earlier 4077bfff6873SLukas Czerner * there's nothing to be done. 4078bfff6873SLukas Czerner */ 40798f1f7453SEric Sandeen if (!ext4_li_info || !ext4_lazyinit_task) 4080bfff6873SLukas Czerner return; 4081bfff6873SLukas Czerner 40828f1f7453SEric Sandeen kthread_stop(ext4_lazyinit_task); 4083bfff6873SLukas Czerner } 4084bfff6873SLukas Czerner 408525ed6e8aSDarrick J. Wong static int set_journal_csum_feature_set(struct super_block *sb) 408625ed6e8aSDarrick J. Wong { 408725ed6e8aSDarrick J. Wong int ret = 1; 408825ed6e8aSDarrick J. Wong int compat, incompat; 408925ed6e8aSDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(sb); 409025ed6e8aSDarrick J. Wong 40919aa5d32bSDmitry Monakhov if (ext4_has_metadata_csum(sb)) { 4092db9ee220SDarrick J. Wong /* journal checksum v3 */ 409325ed6e8aSDarrick J. Wong compat = 0; 4094db9ee220SDarrick J. Wong incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3; 409525ed6e8aSDarrick J. Wong } else { 409625ed6e8aSDarrick J. Wong /* journal checksum v1 */ 409725ed6e8aSDarrick J. Wong compat = JBD2_FEATURE_COMPAT_CHECKSUM; 409825ed6e8aSDarrick J. Wong incompat = 0; 409925ed6e8aSDarrick J. Wong } 410025ed6e8aSDarrick J. Wong 4101feb8c6d3SDarrick J. Wong jbd2_journal_clear_features(sbi->s_journal, 4102feb8c6d3SDarrick J. Wong JBD2_FEATURE_COMPAT_CHECKSUM, 0, 4103feb8c6d3SDarrick J. Wong JBD2_FEATURE_INCOMPAT_CSUM_V3 | 4104feb8c6d3SDarrick J. Wong JBD2_FEATURE_INCOMPAT_CSUM_V2); 410525ed6e8aSDarrick J. Wong if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 410625ed6e8aSDarrick J. Wong ret = jbd2_journal_set_features(sbi->s_journal, 410725ed6e8aSDarrick J. Wong compat, 0, 410825ed6e8aSDarrick J. Wong JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | 410925ed6e8aSDarrick J. Wong incompat); 411025ed6e8aSDarrick J. Wong } else if (test_opt(sb, JOURNAL_CHECKSUM)) { 411125ed6e8aSDarrick J. Wong ret = jbd2_journal_set_features(sbi->s_journal, 411225ed6e8aSDarrick J. Wong compat, 0, 411325ed6e8aSDarrick J. Wong incompat); 411425ed6e8aSDarrick J. Wong jbd2_journal_clear_features(sbi->s_journal, 0, 0, 411525ed6e8aSDarrick J. Wong JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); 411625ed6e8aSDarrick J. Wong } else { 4117feb8c6d3SDarrick J. Wong jbd2_journal_clear_features(sbi->s_journal, 0, 0, 4118feb8c6d3SDarrick J. Wong JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); 411925ed6e8aSDarrick J. Wong } 412025ed6e8aSDarrick J. Wong 412125ed6e8aSDarrick J. Wong return ret; 412225ed6e8aSDarrick J. Wong } 412325ed6e8aSDarrick J. Wong 4124952fc18eSTheodore Ts'o /* 4125952fc18eSTheodore Ts'o * Note: calculating the overhead so we can be compatible with 4126952fc18eSTheodore Ts'o * historical BSD practice is quite difficult in the face of 4127952fc18eSTheodore Ts'o * clusters/bigalloc. This is because multiple metadata blocks from 4128952fc18eSTheodore Ts'o * different block group can end up in the same allocation cluster. 4129952fc18eSTheodore Ts'o * Calculating the exact overhead in the face of clustered allocation 4130952fc18eSTheodore Ts'o * requires either O(all block bitmaps) in memory or O(number of block 4131952fc18eSTheodore Ts'o * groups**2) in time. We will still calculate the superblock for 4132952fc18eSTheodore Ts'o * older file systems --- and if we come across with a bigalloc file 4133952fc18eSTheodore Ts'o * system with zero in s_overhead_clusters the estimate will be close to 4134952fc18eSTheodore Ts'o * correct especially for very large cluster sizes --- but for newer 4135952fc18eSTheodore Ts'o * file systems, it's better to calculate this figure once at mkfs 4136952fc18eSTheodore Ts'o * time, and store it in the superblock. If the superblock value is 4137952fc18eSTheodore Ts'o * present (even for non-bigalloc file systems), we will use it. 4138952fc18eSTheodore Ts'o */ 4139952fc18eSTheodore Ts'o static int count_overhead(struct super_block *sb, ext4_group_t grp, 4140952fc18eSTheodore Ts'o char *buf) 4141952fc18eSTheodore Ts'o { 4142952fc18eSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 4143952fc18eSTheodore Ts'o struct ext4_group_desc *gdp; 4144952fc18eSTheodore Ts'o ext4_fsblk_t first_block, last_block, b; 4145952fc18eSTheodore Ts'o ext4_group_t i, ngroups = ext4_get_groups_count(sb); 4146952fc18eSTheodore Ts'o int s, j, count = 0; 414710b01ee9STheodore Ts'o int has_super = ext4_bg_has_super(sb, grp); 4148952fc18eSTheodore Ts'o 4149e2b911c5SDarrick J. Wong if (!ext4_has_feature_bigalloc(sb)) 415010b01ee9STheodore Ts'o return (has_super + ext4_bg_num_gdb(sb, grp) + 415110b01ee9STheodore Ts'o (has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) + 41520548bbb8STheodore Ts'o sbi->s_itb_per_group + 2); 41530548bbb8STheodore Ts'o 4154952fc18eSTheodore Ts'o first_block = le32_to_cpu(sbi->s_es->s_first_data_block) + 4155952fc18eSTheodore Ts'o (grp * EXT4_BLOCKS_PER_GROUP(sb)); 4156952fc18eSTheodore Ts'o last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1; 4157952fc18eSTheodore Ts'o for (i = 0; i < ngroups; i++) { 4158952fc18eSTheodore Ts'o gdp = ext4_get_group_desc(sb, i, NULL); 4159952fc18eSTheodore Ts'o b = ext4_block_bitmap(sb, gdp); 4160952fc18eSTheodore Ts'o if (b >= first_block && b <= last_block) { 4161952fc18eSTheodore Ts'o ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf); 4162952fc18eSTheodore Ts'o count++; 4163952fc18eSTheodore Ts'o } 4164952fc18eSTheodore Ts'o b = ext4_inode_bitmap(sb, gdp); 4165952fc18eSTheodore Ts'o if (b >= first_block && b <= last_block) { 4166952fc18eSTheodore Ts'o ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf); 4167952fc18eSTheodore Ts'o count++; 4168952fc18eSTheodore Ts'o } 4169952fc18eSTheodore Ts'o b = ext4_inode_table(sb, gdp); 4170952fc18eSTheodore Ts'o if (b >= first_block && b + sbi->s_itb_per_group <= last_block) 4171952fc18eSTheodore Ts'o for (j = 0; j < sbi->s_itb_per_group; j++, b++) { 4172952fc18eSTheodore Ts'o int c = EXT4_B2C(sbi, b - first_block); 4173952fc18eSTheodore Ts'o ext4_set_bit(c, buf); 4174952fc18eSTheodore Ts'o count++; 4175952fc18eSTheodore Ts'o } 4176952fc18eSTheodore Ts'o if (i != grp) 4177952fc18eSTheodore Ts'o continue; 4178952fc18eSTheodore Ts'o s = 0; 4179952fc18eSTheodore Ts'o if (ext4_bg_has_super(sb, grp)) { 4180952fc18eSTheodore Ts'o ext4_set_bit(s++, buf); 4181952fc18eSTheodore Ts'o count++; 4182952fc18eSTheodore Ts'o } 4183c48ae41bSTheodore Ts'o j = ext4_bg_num_gdb(sb, grp); 4184c48ae41bSTheodore Ts'o if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) { 4185c48ae41bSTheodore Ts'o ext4_error(sb, "Invalid number of block group " 4186c48ae41bSTheodore Ts'o "descriptor blocks: %d", j); 4187c48ae41bSTheodore Ts'o j = EXT4_BLOCKS_PER_GROUP(sb) - s; 4188952fc18eSTheodore Ts'o } 4189c48ae41bSTheodore Ts'o count += j; 4190c48ae41bSTheodore Ts'o for (; j > 0; j--) 4191c48ae41bSTheodore Ts'o ext4_set_bit(EXT4_B2C(sbi, s++), buf); 4192952fc18eSTheodore Ts'o } 4193952fc18eSTheodore Ts'o if (!count) 4194952fc18eSTheodore Ts'o return 0; 4195952fc18eSTheodore Ts'o return EXT4_CLUSTERS_PER_GROUP(sb) - 4196952fc18eSTheodore Ts'o ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8); 4197952fc18eSTheodore Ts'o } 4198952fc18eSTheodore Ts'o 4199952fc18eSTheodore Ts'o /* 4200952fc18eSTheodore Ts'o * Compute the overhead and stash it in sbi->s_overhead 4201952fc18eSTheodore Ts'o */ 4202952fc18eSTheodore Ts'o int ext4_calculate_overhead(struct super_block *sb) 4203952fc18eSTheodore Ts'o { 4204952fc18eSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 4205952fc18eSTheodore Ts'o struct ext4_super_block *es = sbi->s_es; 42063c816dedSEric Whitney struct inode *j_inode; 42073c816dedSEric Whitney unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum); 4208952fc18eSTheodore Ts'o ext4_group_t i, ngroups = ext4_get_groups_count(sb); 4209952fc18eSTheodore Ts'o ext4_fsblk_t overhead = 0; 42104fdb5543SDmitry Monakhov char *buf = (char *) get_zeroed_page(GFP_NOFS); 4211952fc18eSTheodore Ts'o 4212952fc18eSTheodore Ts'o if (!buf) 4213952fc18eSTheodore Ts'o return -ENOMEM; 4214952fc18eSTheodore Ts'o 4215952fc18eSTheodore Ts'o /* 4216952fc18eSTheodore Ts'o * Compute the overhead (FS structures). This is constant 4217952fc18eSTheodore Ts'o * for a given filesystem unless the number of block groups 4218952fc18eSTheodore Ts'o * changes so we cache the previous value until it does. 4219952fc18eSTheodore Ts'o */ 4220952fc18eSTheodore Ts'o 4221952fc18eSTheodore Ts'o /* 4222952fc18eSTheodore Ts'o * All of the blocks before first_data_block are overhead 4223952fc18eSTheodore Ts'o */ 4224952fc18eSTheodore Ts'o overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block)); 4225952fc18eSTheodore Ts'o 4226952fc18eSTheodore Ts'o /* 4227952fc18eSTheodore Ts'o * Add the overhead found in each block group 4228952fc18eSTheodore Ts'o */ 4229952fc18eSTheodore Ts'o for (i = 0; i < ngroups; i++) { 4230952fc18eSTheodore Ts'o int blks; 4231952fc18eSTheodore Ts'o 4232952fc18eSTheodore Ts'o blks = count_overhead(sb, i, buf); 4233952fc18eSTheodore Ts'o overhead += blks; 4234952fc18eSTheodore Ts'o if (blks) 4235952fc18eSTheodore Ts'o memset(buf, 0, PAGE_SIZE); 4236952fc18eSTheodore Ts'o cond_resched(); 4237952fc18eSTheodore Ts'o } 42383c816dedSEric Whitney 42393c816dedSEric Whitney /* 42403c816dedSEric Whitney * Add the internal journal blocks whether the journal has been 42413c816dedSEric Whitney * loaded or not 42423c816dedSEric Whitney */ 4243ee7ed3aaSChunguang Xu if (sbi->s_journal && !sbi->s_journal_bdev) 4244ede7dc7fSHarshad Shirwadkar overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_total_len); 4245f1eec3b0SRitesh Harjani else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) { 4246f1eec3b0SRitesh Harjani /* j_inum for internal journal is non-zero */ 42473c816dedSEric Whitney j_inode = ext4_get_journal_inode(sb, j_inum); 4248ee5c8071SZhang Yi if (!IS_ERR(j_inode)) { 42493c816dedSEric Whitney j_blocks = j_inode->i_size >> sb->s_blocksize_bits; 42503c816dedSEric Whitney overhead += EXT4_NUM_B2C(sbi, j_blocks); 42513c816dedSEric Whitney iput(j_inode); 42523c816dedSEric Whitney } else { 42533c816dedSEric Whitney ext4_msg(sb, KERN_ERR, "can't get journal size"); 42543c816dedSEric Whitney } 42553c816dedSEric Whitney } 4256952fc18eSTheodore Ts'o sbi->s_overhead = overhead; 4257952fc18eSTheodore Ts'o smp_wmb(); 4258952fc18eSTheodore Ts'o free_page((unsigned long) buf); 4259952fc18eSTheodore Ts'o return 0; 4260952fc18eSTheodore Ts'o } 4261952fc18eSTheodore Ts'o 4262b5799018STheodore Ts'o static void ext4_set_resv_clusters(struct super_block *sb) 426327dd4385SLukas Czerner { 426427dd4385SLukas Czerner ext4_fsblk_t resv_clusters; 4265b5799018STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 426627dd4385SLukas Czerner 426727dd4385SLukas Czerner /* 426830fac0f7SJan Kara * There's no need to reserve anything when we aren't using extents. 426930fac0f7SJan Kara * The space estimates are exact, there are no unwritten extents, 427030fac0f7SJan Kara * hole punching doesn't need new metadata... This is needed especially 427130fac0f7SJan Kara * to keep ext2/3 backward compatibility. 427230fac0f7SJan Kara */ 4273e2b911c5SDarrick J. Wong if (!ext4_has_feature_extents(sb)) 4274b5799018STheodore Ts'o return; 427530fac0f7SJan Kara /* 427627dd4385SLukas Czerner * By default we reserve 2% or 4096 clusters, whichever is smaller. 427727dd4385SLukas Czerner * This should cover the situations where we can not afford to run 427827dd4385SLukas Czerner * out of space like for example punch hole, or converting 4279556615dcSLukas Czerner * unwritten extents in delalloc path. In most cases such 428027dd4385SLukas Czerner * allocation would require 1, or 2 blocks, higher numbers are 428127dd4385SLukas Czerner * very rare. 428227dd4385SLukas Czerner */ 4283b5799018STheodore Ts'o resv_clusters = (ext4_blocks_count(sbi->s_es) >> 4284b5799018STheodore Ts'o sbi->s_cluster_bits); 428527dd4385SLukas Czerner 428627dd4385SLukas Czerner do_div(resv_clusters, 50); 428727dd4385SLukas Czerner resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096); 428827dd4385SLukas Czerner 4289b5799018STheodore Ts'o atomic64_set(&sbi->s_resv_clusters, resv_clusters); 429027dd4385SLukas Czerner } 429127dd4385SLukas Czerner 4292ca9b404fSRoman Anufriev static const char *ext4_quota_mode(struct super_block *sb) 4293ca9b404fSRoman Anufriev { 4294ca9b404fSRoman Anufriev #ifdef CONFIG_QUOTA 4295ca9b404fSRoman Anufriev if (!ext4_quota_capable(sb)) 4296ca9b404fSRoman Anufriev return "none"; 4297ca9b404fSRoman Anufriev 4298ca9b404fSRoman Anufriev if (EXT4_SB(sb)->s_journal && ext4_is_quota_journalled(sb)) 4299ca9b404fSRoman Anufriev return "journalled"; 4300ca9b404fSRoman Anufriev else 4301ca9b404fSRoman Anufriev return "writeback"; 4302ca9b404fSRoman Anufriev #else 4303ca9b404fSRoman Anufriev return "disabled"; 4304ca9b404fSRoman Anufriev #endif 4305ca9b404fSRoman Anufriev } 4306ca9b404fSRoman Anufriev 4307188c299eSJan Kara static void ext4_setup_csum_trigger(struct super_block *sb, 4308188c299eSJan Kara enum ext4_journal_trigger_type type, 4309188c299eSJan Kara void (*trigger)( 4310188c299eSJan Kara struct jbd2_buffer_trigger_type *type, 4311188c299eSJan Kara struct buffer_head *bh, 4312188c299eSJan Kara void *mapped_data, 4313188c299eSJan Kara size_t size)) 4314188c299eSJan Kara { 4315188c299eSJan Kara struct ext4_sb_info *sbi = EXT4_SB(sb); 4316188c299eSJan Kara 4317188c299eSJan Kara sbi->s_journal_triggers[type].sb = sb; 4318188c299eSJan Kara sbi->s_journal_triggers[type].tr_triggers.t_frozen = trigger; 4319188c299eSJan Kara } 4320188c299eSJan Kara 43217edfd85bSLukas Czerner static void ext4_free_sbi(struct ext4_sb_info *sbi) 4322ac27a0ecSDave Kleikamp { 43237edfd85bSLukas Czerner if (!sbi) 43247edfd85bSLukas Czerner return; 43257edfd85bSLukas Czerner 43267edfd85bSLukas Czerner kfree(sbi->s_blockgroup_lock); 43278012b866SShiyang Ruan fs_put_dax(sbi->s_daxdev, NULL); 43287edfd85bSLukas Czerner kfree(sbi); 43297edfd85bSLukas Czerner } 43307edfd85bSLukas Czerner 43317edfd85bSLukas Czerner static struct ext4_sb_info *ext4_alloc_sbi(struct super_block *sb) 43327edfd85bSLukas Czerner { 43337edfd85bSLukas Czerner struct ext4_sb_info *sbi; 43347edfd85bSLukas Czerner 43357edfd85bSLukas Czerner sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 43367edfd85bSLukas Czerner if (!sbi) 43377edfd85bSLukas Czerner return NULL; 43387edfd85bSLukas Czerner 43398012b866SShiyang Ruan sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off, 43408012b866SShiyang Ruan NULL, NULL); 43417edfd85bSLukas Czerner 43427edfd85bSLukas Czerner sbi->s_blockgroup_lock = 43437edfd85bSLukas Czerner kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); 43447edfd85bSLukas Czerner 43457edfd85bSLukas Czerner if (!sbi->s_blockgroup_lock) 43467edfd85bSLukas Czerner goto err_out; 43477edfd85bSLukas Czerner 43487edfd85bSLukas Czerner sb->s_fs_info = sbi; 43497edfd85bSLukas Czerner sbi->s_sb = sb; 43507edfd85bSLukas Czerner return sbi; 43517edfd85bSLukas Czerner err_out: 43528012b866SShiyang Ruan fs_put_dax(sbi->s_daxdev, NULL); 43537edfd85bSLukas Czerner kfree(sbi); 43547edfd85bSLukas Czerner return NULL; 43557edfd85bSLukas Czerner } 43567edfd85bSLukas Czerner 43575f6d662dSJason Yan static void ext4_set_def_opts(struct super_block *sb, 43585f6d662dSJason Yan struct ext4_super_block *es) 43595f6d662dSJason Yan { 43605f6d662dSJason Yan unsigned long def_mount_opts; 43615f6d662dSJason Yan 43625f6d662dSJason Yan /* Set defaults before we parse the mount options */ 43635f6d662dSJason Yan def_mount_opts = le32_to_cpu(es->s_default_mount_opts); 43645f6d662dSJason Yan set_opt(sb, INIT_INODE_TABLE); 43655f6d662dSJason Yan if (def_mount_opts & EXT4_DEFM_DEBUG) 43665f6d662dSJason Yan set_opt(sb, DEBUG); 43675f6d662dSJason Yan if (def_mount_opts & EXT4_DEFM_BSDGROUPS) 43685f6d662dSJason Yan set_opt(sb, GRPID); 43695f6d662dSJason Yan if (def_mount_opts & EXT4_DEFM_UID16) 43705f6d662dSJason Yan set_opt(sb, NO_UID32); 43715f6d662dSJason Yan /* xattr user namespace & acls are now defaulted on */ 43725f6d662dSJason Yan set_opt(sb, XATTR_USER); 43735f6d662dSJason Yan #ifdef CONFIG_EXT4_FS_POSIX_ACL 43745f6d662dSJason Yan set_opt(sb, POSIX_ACL); 43755f6d662dSJason Yan #endif 43765f6d662dSJason Yan if (ext4_has_feature_fast_commit(sb)) 43775f6d662dSJason Yan set_opt2(sb, JOURNAL_FAST_COMMIT); 43785f6d662dSJason Yan /* don't forget to enable journal_csum when metadata_csum is enabled. */ 43795f6d662dSJason Yan if (ext4_has_metadata_csum(sb)) 43805f6d662dSJason Yan set_opt(sb, JOURNAL_CHECKSUM); 43815f6d662dSJason Yan 43825f6d662dSJason Yan if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) 43835f6d662dSJason Yan set_opt(sb, JOURNAL_DATA); 43845f6d662dSJason Yan else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) 43855f6d662dSJason Yan set_opt(sb, ORDERED_DATA); 43865f6d662dSJason Yan else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK) 43875f6d662dSJason Yan set_opt(sb, WRITEBACK_DATA); 43885f6d662dSJason Yan 43895f6d662dSJason Yan if (le16_to_cpu(es->s_errors) == EXT4_ERRORS_PANIC) 43905f6d662dSJason Yan set_opt(sb, ERRORS_PANIC); 43915f6d662dSJason Yan else if (le16_to_cpu(es->s_errors) == EXT4_ERRORS_CONTINUE) 43925f6d662dSJason Yan set_opt(sb, ERRORS_CONT); 43935f6d662dSJason Yan else 43945f6d662dSJason Yan set_opt(sb, ERRORS_RO); 43955f6d662dSJason Yan /* block_validity enabled by default; disable with noblock_validity */ 43965f6d662dSJason Yan set_opt(sb, BLOCK_VALIDITY); 43975f6d662dSJason Yan if (def_mount_opts & EXT4_DEFM_DISCARD) 43985f6d662dSJason Yan set_opt(sb, DISCARD); 43995f6d662dSJason Yan 44005f6d662dSJason Yan if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0) 44015f6d662dSJason Yan set_opt(sb, BARRIER); 44025f6d662dSJason Yan 44035f6d662dSJason Yan /* 44045f6d662dSJason Yan * enable delayed allocation by default 44055f6d662dSJason Yan * Use -o nodelalloc to turn it off 44065f6d662dSJason Yan */ 44075f6d662dSJason Yan if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) && 44085f6d662dSJason Yan ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0)) 44095f6d662dSJason Yan set_opt(sb, DELALLOC); 44103df11e27SJason Yan 44113df11e27SJason Yan if (sb->s_blocksize == PAGE_SIZE) 44123df11e27SJason Yan set_opt(sb, DIOREAD_NOLOCK); 44135f6d662dSJason Yan } 44145f6d662dSJason Yan 4415c8267c51SJason Yan static int ext4_handle_clustersize(struct super_block *sb) 44164a8557b0SJason Yan { 44174a8557b0SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 44184a8557b0SJason Yan struct ext4_super_block *es = sbi->s_es; 44194a8557b0SJason Yan int clustersize; 44204a8557b0SJason Yan 44214a8557b0SJason Yan /* Handle clustersize */ 44224a8557b0SJason Yan clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size); 44234a8557b0SJason Yan if (ext4_has_feature_bigalloc(sb)) { 4424c8267c51SJason Yan if (clustersize < sb->s_blocksize) { 44254a8557b0SJason Yan ext4_msg(sb, KERN_ERR, 44264a8557b0SJason Yan "cluster size (%d) smaller than " 4427c8267c51SJason Yan "block size (%lu)", clustersize, sb->s_blocksize); 44284a8557b0SJason Yan return -EINVAL; 44294a8557b0SJason Yan } 44304a8557b0SJason Yan sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - 44314a8557b0SJason Yan le32_to_cpu(es->s_log_block_size); 44324a8557b0SJason Yan sbi->s_clusters_per_group = 44334a8557b0SJason Yan le32_to_cpu(es->s_clusters_per_group); 4434c8267c51SJason Yan if (sbi->s_clusters_per_group > sb->s_blocksize * 8) { 44354a8557b0SJason Yan ext4_msg(sb, KERN_ERR, 44364a8557b0SJason Yan "#clusters per group too big: %lu", 44374a8557b0SJason Yan sbi->s_clusters_per_group); 44384a8557b0SJason Yan return -EINVAL; 44394a8557b0SJason Yan } 44404a8557b0SJason Yan if (sbi->s_blocks_per_group != 4441c8267c51SJason Yan (sbi->s_clusters_per_group * (clustersize / sb->s_blocksize))) { 44424a8557b0SJason Yan ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and " 44434a8557b0SJason Yan "clusters per group (%lu) inconsistent", 44444a8557b0SJason Yan sbi->s_blocks_per_group, 44454a8557b0SJason Yan sbi->s_clusters_per_group); 44464a8557b0SJason Yan return -EINVAL; 44474a8557b0SJason Yan } 44484a8557b0SJason Yan } else { 4449c8267c51SJason Yan if (clustersize != sb->s_blocksize) { 44504a8557b0SJason Yan ext4_msg(sb, KERN_ERR, 44514a8557b0SJason Yan "fragment/cluster size (%d) != " 4452c8267c51SJason Yan "block size (%lu)", clustersize, sb->s_blocksize); 44534a8557b0SJason Yan return -EINVAL; 44544a8557b0SJason Yan } 4455c8267c51SJason Yan if (sbi->s_blocks_per_group > sb->s_blocksize * 8) { 44564a8557b0SJason Yan ext4_msg(sb, KERN_ERR, 44574a8557b0SJason Yan "#blocks per group too big: %lu", 44584a8557b0SJason Yan sbi->s_blocks_per_group); 44594a8557b0SJason Yan return -EINVAL; 44604a8557b0SJason Yan } 44614a8557b0SJason Yan sbi->s_clusters_per_group = sbi->s_blocks_per_group; 44624a8557b0SJason Yan sbi->s_cluster_bits = 0; 44634a8557b0SJason Yan } 4464c8267c51SJason Yan sbi->s_cluster_ratio = clustersize / sb->s_blocksize; 44654a8557b0SJason Yan 44664a8557b0SJason Yan /* Do we have standard group size of clustersize * 8 blocks ? */ 44674a8557b0SJason Yan if (sbi->s_blocks_per_group == clustersize << 3) 44684a8557b0SJason Yan set_opt2(sb, STD_GROUP_SIZE); 44694a8557b0SJason Yan 44704a8557b0SJason Yan return 0; 44714a8557b0SJason Yan } 44724a8557b0SJason Yan 4473f7314a67SJason Yan static void ext4_fast_commit_init(struct super_block *sb) 4474f7314a67SJason Yan { 4475f7314a67SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 4476f7314a67SJason Yan 4477f7314a67SJason Yan /* Initialize fast commit stuff */ 4478f7314a67SJason Yan atomic_set(&sbi->s_fc_subtid, 0); 4479f7314a67SJason Yan INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_MAIN]); 4480f7314a67SJason Yan INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_STAGING]); 4481f7314a67SJason Yan INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_MAIN]); 4482f7314a67SJason Yan INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]); 4483f7314a67SJason Yan sbi->s_fc_bytes = 0; 4484f7314a67SJason Yan ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE); 4485f7314a67SJason Yan sbi->s_fc_ineligible_tid = 0; 4486f7314a67SJason Yan spin_lock_init(&sbi->s_fc_lock); 4487f7314a67SJason Yan memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats)); 4488f7314a67SJason Yan sbi->s_fc_replay_state.fc_regions = NULL; 4489f7314a67SJason Yan sbi->s_fc_replay_state.fc_regions_size = 0; 4490f7314a67SJason Yan sbi->s_fc_replay_state.fc_regions_used = 0; 4491f7314a67SJason Yan sbi->s_fc_replay_state.fc_regions_valid = 0; 4492f7314a67SJason Yan sbi->s_fc_replay_state.fc_modified_inodes = NULL; 4493f7314a67SJason Yan sbi->s_fc_replay_state.fc_modified_inodes_size = 0; 4494f7314a67SJason Yan sbi->s_fc_replay_state.fc_modified_inodes_used = 0; 4495f7314a67SJason Yan } 4496f7314a67SJason Yan 44970e495f7cSJason Yan static int ext4_inode_info_init(struct super_block *sb, 4498c8267c51SJason Yan struct ext4_super_block *es) 44990e495f7cSJason Yan { 45000e495f7cSJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 45010e495f7cSJason Yan 45020e495f7cSJason Yan if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) { 45030e495f7cSJason Yan sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE; 45040e495f7cSJason Yan sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO; 45050e495f7cSJason Yan } else { 45060e495f7cSJason Yan sbi->s_inode_size = le16_to_cpu(es->s_inode_size); 45070e495f7cSJason Yan sbi->s_first_ino = le32_to_cpu(es->s_first_ino); 45080e495f7cSJason Yan if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) { 45090e495f7cSJason Yan ext4_msg(sb, KERN_ERR, "invalid first ino: %u", 45100e495f7cSJason Yan sbi->s_first_ino); 45110e495f7cSJason Yan return -EINVAL; 45120e495f7cSJason Yan } 45130e495f7cSJason Yan if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || 45140e495f7cSJason Yan (!is_power_of_2(sbi->s_inode_size)) || 4515c8267c51SJason Yan (sbi->s_inode_size > sb->s_blocksize)) { 45160e495f7cSJason Yan ext4_msg(sb, KERN_ERR, 45170e495f7cSJason Yan "unsupported inode size: %d", 45180e495f7cSJason Yan sbi->s_inode_size); 4519c8267c51SJason Yan ext4_msg(sb, KERN_ERR, "blocksize: %lu", sb->s_blocksize); 45200e495f7cSJason Yan return -EINVAL; 45210e495f7cSJason Yan } 45220e495f7cSJason Yan /* 45230e495f7cSJason Yan * i_atime_extra is the last extra field available for 45240e495f7cSJason Yan * [acm]times in struct ext4_inode. Checking for that 45250e495f7cSJason Yan * field should suffice to ensure we have extra space 45260e495f7cSJason Yan * for all three. 45270e495f7cSJason Yan */ 45280e495f7cSJason Yan if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) + 45290e495f7cSJason Yan sizeof(((struct ext4_inode *)0)->i_atime_extra)) { 45300e495f7cSJason Yan sb->s_time_gran = 1; 45310e495f7cSJason Yan sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX; 45320e495f7cSJason Yan } else { 45330e495f7cSJason Yan sb->s_time_gran = NSEC_PER_SEC; 45340e495f7cSJason Yan sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX; 45350e495f7cSJason Yan } 45360e495f7cSJason Yan sb->s_time_min = EXT4_TIMESTAMP_MIN; 45370e495f7cSJason Yan } 45380e495f7cSJason Yan 45390e495f7cSJason Yan if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) { 45400e495f7cSJason Yan sbi->s_want_extra_isize = sizeof(struct ext4_inode) - 45410e495f7cSJason Yan EXT4_GOOD_OLD_INODE_SIZE; 45420e495f7cSJason Yan if (ext4_has_feature_extra_isize(sb)) { 45430e495f7cSJason Yan unsigned v, max = (sbi->s_inode_size - 45440e495f7cSJason Yan EXT4_GOOD_OLD_INODE_SIZE); 45450e495f7cSJason Yan 45460e495f7cSJason Yan v = le16_to_cpu(es->s_want_extra_isize); 45470e495f7cSJason Yan if (v > max) { 45480e495f7cSJason Yan ext4_msg(sb, KERN_ERR, 45490e495f7cSJason Yan "bad s_want_extra_isize: %d", v); 45500e495f7cSJason Yan return -EINVAL; 45510e495f7cSJason Yan } 45520e495f7cSJason Yan if (sbi->s_want_extra_isize < v) 45530e495f7cSJason Yan sbi->s_want_extra_isize = v; 45540e495f7cSJason Yan 45550e495f7cSJason Yan v = le16_to_cpu(es->s_min_extra_isize); 45560e495f7cSJason Yan if (v > max) { 45570e495f7cSJason Yan ext4_msg(sb, KERN_ERR, 45580e495f7cSJason Yan "bad s_min_extra_isize: %d", v); 45590e495f7cSJason Yan return -EINVAL; 45600e495f7cSJason Yan } 45610e495f7cSJason Yan if (sbi->s_want_extra_isize < v) 45620e495f7cSJason Yan sbi->s_want_extra_isize = v; 45630e495f7cSJason Yan } 45640e495f7cSJason Yan } 45650e495f7cSJason Yan 45660e495f7cSJason Yan return 0; 45670e495f7cSJason Yan } 45680e495f7cSJason Yan 456939c135b0SJason Yan #if IS_ENABLED(CONFIG_UNICODE) 457039c135b0SJason Yan static int ext4_encoding_init(struct super_block *sb, struct ext4_super_block *es) 457139c135b0SJason Yan { 457239c135b0SJason Yan const struct ext4_sb_encodings *encoding_info; 457339c135b0SJason Yan struct unicode_map *encoding; 457439c135b0SJason Yan __u16 encoding_flags = le16_to_cpu(es->s_encoding_flags); 457539c135b0SJason Yan 457639c135b0SJason Yan if (!ext4_has_feature_casefold(sb) || sb->s_encoding) 457739c135b0SJason Yan return 0; 457839c135b0SJason Yan 457939c135b0SJason Yan encoding_info = ext4_sb_read_encoding(es); 458039c135b0SJason Yan if (!encoding_info) { 458139c135b0SJason Yan ext4_msg(sb, KERN_ERR, 458239c135b0SJason Yan "Encoding requested by superblock is unknown"); 458339c135b0SJason Yan return -EINVAL; 458439c135b0SJason Yan } 458539c135b0SJason Yan 458639c135b0SJason Yan encoding = utf8_load(encoding_info->version); 458739c135b0SJason Yan if (IS_ERR(encoding)) { 458839c135b0SJason Yan ext4_msg(sb, KERN_ERR, 458939c135b0SJason Yan "can't mount with superblock charset: %s-%u.%u.%u " 459039c135b0SJason Yan "not supported by the kernel. flags: 0x%x.", 459139c135b0SJason Yan encoding_info->name, 459239c135b0SJason Yan unicode_major(encoding_info->version), 459339c135b0SJason Yan unicode_minor(encoding_info->version), 459439c135b0SJason Yan unicode_rev(encoding_info->version), 459539c135b0SJason Yan encoding_flags); 459639c135b0SJason Yan return -EINVAL; 459739c135b0SJason Yan } 459839c135b0SJason Yan ext4_msg(sb, KERN_INFO,"Using encoding defined by superblock: " 459939c135b0SJason Yan "%s-%u.%u.%u with flags 0x%hx", encoding_info->name, 460039c135b0SJason Yan unicode_major(encoding_info->version), 460139c135b0SJason Yan unicode_minor(encoding_info->version), 460239c135b0SJason Yan unicode_rev(encoding_info->version), 460339c135b0SJason Yan encoding_flags); 460439c135b0SJason Yan 460539c135b0SJason Yan sb->s_encoding = encoding; 460639c135b0SJason Yan sb->s_encoding_flags = encoding_flags; 460739c135b0SJason Yan 460839c135b0SJason Yan return 0; 460939c135b0SJason Yan } 461039c135b0SJason Yan #else 461139c135b0SJason Yan static inline int ext4_encoding_init(struct super_block *sb, struct ext4_super_block *es) 461239c135b0SJason Yan { 461339c135b0SJason Yan return 0; 461439c135b0SJason Yan } 461539c135b0SJason Yan #endif 461639c135b0SJason Yan 4617b26458d1SJason Yan static int ext4_init_metadata_csum(struct super_block *sb, struct ext4_super_block *es) 4618b26458d1SJason Yan { 4619b26458d1SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 4620b26458d1SJason Yan 4621b26458d1SJason Yan /* Warn if metadata_csum and gdt_csum are both set. */ 4622b26458d1SJason Yan if (ext4_has_feature_metadata_csum(sb) && 4623b26458d1SJason Yan ext4_has_feature_gdt_csum(sb)) 4624b26458d1SJason Yan ext4_warning(sb, "metadata_csum and uninit_bg are " 4625b26458d1SJason Yan "redundant flags; please run fsck."); 4626b26458d1SJason Yan 4627b26458d1SJason Yan /* Check for a known checksum algorithm */ 4628b26458d1SJason Yan if (!ext4_verify_csum_type(sb, es)) { 4629b26458d1SJason Yan ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " 4630b26458d1SJason Yan "unknown checksum algorithm."); 4631b26458d1SJason Yan return -EINVAL; 4632b26458d1SJason Yan } 4633b26458d1SJason Yan ext4_setup_csum_trigger(sb, EXT4_JTR_ORPHAN_FILE, 4634b26458d1SJason Yan ext4_orphan_file_block_trigger); 4635b26458d1SJason Yan 4636b26458d1SJason Yan /* Load the checksum driver */ 4637b26458d1SJason Yan sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); 4638b26458d1SJason Yan if (IS_ERR(sbi->s_chksum_driver)) { 4639b26458d1SJason Yan int ret = PTR_ERR(sbi->s_chksum_driver); 4640b26458d1SJason Yan ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver."); 4641b26458d1SJason Yan sbi->s_chksum_driver = NULL; 4642b26458d1SJason Yan return ret; 4643b26458d1SJason Yan } 4644b26458d1SJason Yan 4645b26458d1SJason Yan /* Check superblock checksum */ 4646b26458d1SJason Yan if (!ext4_superblock_csum_verify(sb, es)) { 4647b26458d1SJason Yan ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " 4648b26458d1SJason Yan "invalid superblock checksum. Run e2fsck?"); 4649b26458d1SJason Yan return -EFSBADCRC; 4650b26458d1SJason Yan } 4651b26458d1SJason Yan 4652b26458d1SJason Yan /* Precompute checksum seed for all metadata */ 4653b26458d1SJason Yan if (ext4_has_feature_csum_seed(sb)) 4654b26458d1SJason Yan sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed); 4655b26458d1SJason Yan else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb)) 4656b26458d1SJason Yan sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid, 4657b26458d1SJason Yan sizeof(es->s_uuid)); 4658b26458d1SJason Yan return 0; 4659b26458d1SJason Yan } 4660b26458d1SJason Yan 4661d7f3542bSJason Yan static int ext4_check_feature_compatibility(struct super_block *sb, 4662d7f3542bSJason Yan struct ext4_super_block *es, 4663d7f3542bSJason Yan int silent) 4664d7f3542bSJason Yan { 466554902099SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 466654902099SJason Yan 4667d7f3542bSJason Yan if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV && 4668d7f3542bSJason Yan (ext4_has_compat_features(sb) || 4669d7f3542bSJason Yan ext4_has_ro_compat_features(sb) || 4670d7f3542bSJason Yan ext4_has_incompat_features(sb))) 4671d7f3542bSJason Yan ext4_msg(sb, KERN_WARNING, 4672d7f3542bSJason Yan "feature flags set on rev 0 fs, " 4673d7f3542bSJason Yan "running e2fsck is recommended"); 4674d7f3542bSJason Yan 4675d7f3542bSJason Yan if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) { 4676d7f3542bSJason Yan set_opt2(sb, HURD_COMPAT); 4677d7f3542bSJason Yan if (ext4_has_feature_64bit(sb)) { 4678d7f3542bSJason Yan ext4_msg(sb, KERN_ERR, 4679d7f3542bSJason Yan "The Hurd can't support 64-bit file systems"); 4680d7f3542bSJason Yan return -EINVAL; 4681d7f3542bSJason Yan } 4682d7f3542bSJason Yan 4683d7f3542bSJason Yan /* 4684d7f3542bSJason Yan * ea_inode feature uses l_i_version field which is not 4685d7f3542bSJason Yan * available in HURD_COMPAT mode. 4686d7f3542bSJason Yan */ 4687d7f3542bSJason Yan if (ext4_has_feature_ea_inode(sb)) { 4688d7f3542bSJason Yan ext4_msg(sb, KERN_ERR, 4689d7f3542bSJason Yan "ea_inode feature is not supported for Hurd"); 4690d7f3542bSJason Yan return -EINVAL; 4691d7f3542bSJason Yan } 4692d7f3542bSJason Yan } 4693d7f3542bSJason Yan 4694d7f3542bSJason Yan if (IS_EXT2_SB(sb)) { 4695d7f3542bSJason Yan if (ext2_feature_set_ok(sb)) 4696d7f3542bSJason Yan ext4_msg(sb, KERN_INFO, "mounting ext2 file system " 4697d7f3542bSJason Yan "using the ext4 subsystem"); 4698d7f3542bSJason Yan else { 4699d7f3542bSJason Yan /* 4700d7f3542bSJason Yan * If we're probing be silent, if this looks like 4701d7f3542bSJason Yan * it's actually an ext[34] filesystem. 4702d7f3542bSJason Yan */ 4703d7f3542bSJason Yan if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb))) 4704d7f3542bSJason Yan return -EINVAL; 4705d7f3542bSJason Yan ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due " 4706d7f3542bSJason Yan "to feature incompatibilities"); 4707d7f3542bSJason Yan return -EINVAL; 4708d7f3542bSJason Yan } 4709d7f3542bSJason Yan } 4710d7f3542bSJason Yan 4711d7f3542bSJason Yan if (IS_EXT3_SB(sb)) { 4712d7f3542bSJason Yan if (ext3_feature_set_ok(sb)) 4713d7f3542bSJason Yan ext4_msg(sb, KERN_INFO, "mounting ext3 file system " 4714d7f3542bSJason Yan "using the ext4 subsystem"); 4715d7f3542bSJason Yan else { 4716d7f3542bSJason Yan /* 4717d7f3542bSJason Yan * If we're probing be silent, if this looks like 4718d7f3542bSJason Yan * it's actually an ext4 filesystem. 4719d7f3542bSJason Yan */ 4720d7f3542bSJason Yan if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb))) 4721d7f3542bSJason Yan return -EINVAL; 4722d7f3542bSJason Yan ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due " 4723d7f3542bSJason Yan "to feature incompatibilities"); 4724d7f3542bSJason Yan return -EINVAL; 4725d7f3542bSJason Yan } 4726d7f3542bSJason Yan } 4727d7f3542bSJason Yan 4728d7f3542bSJason Yan /* 4729d7f3542bSJason Yan * Check feature flags regardless of the revision level, since we 4730d7f3542bSJason Yan * previously didn't change the revision level when setting the flags, 4731d7f3542bSJason Yan * so there is a chance incompat flags are set on a rev 0 filesystem. 4732d7f3542bSJason Yan */ 4733d7f3542bSJason Yan if (!ext4_feature_set_ok(sb, (sb_rdonly(sb)))) 4734d7f3542bSJason Yan return -EINVAL; 4735d7f3542bSJason Yan 473654902099SJason Yan if (sbi->s_daxdev) { 473754902099SJason Yan if (sb->s_blocksize == PAGE_SIZE) 473854902099SJason Yan set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags); 473954902099SJason Yan else 474054902099SJason Yan ext4_msg(sb, KERN_ERR, "unsupported blocksize for DAX\n"); 474154902099SJason Yan } 474254902099SJason Yan 474354902099SJason Yan if (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) { 474454902099SJason Yan if (ext4_has_feature_inline_data(sb)) { 474554902099SJason Yan ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem" 474654902099SJason Yan " that may contain inline data"); 474754902099SJason Yan return -EINVAL; 474854902099SJason Yan } 474954902099SJason Yan if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) { 475054902099SJason Yan ext4_msg(sb, KERN_ERR, 475154902099SJason Yan "DAX unsupported by block device."); 475254902099SJason Yan return -EINVAL; 475354902099SJason Yan } 475454902099SJason Yan } 475554902099SJason Yan 475654902099SJason Yan if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) { 475754902099SJason Yan ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d", 475854902099SJason Yan es->s_encryption_level); 475954902099SJason Yan return -EINVAL; 476054902099SJason Yan } 476154902099SJason Yan 4762d7f3542bSJason Yan return 0; 4763d7f3542bSJason Yan } 4764d7f3542bSJason Yan 476568e62439SJason Yan static int ext4_check_geometry(struct super_block *sb, 4766bc62dbf9SJason Yan struct ext4_super_block *es) 4767bc62dbf9SJason Yan { 4768bc62dbf9SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 4769bc62dbf9SJason Yan __u64 blocks_count; 4770269e9226SJason Yan int err; 4771269e9226SJason Yan 4772269e9226SJason Yan if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (sb->s_blocksize / 4)) { 4773269e9226SJason Yan ext4_msg(sb, KERN_ERR, 4774269e9226SJason Yan "Number of reserved GDT blocks insanely large: %d", 4775269e9226SJason Yan le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks)); 4776269e9226SJason Yan return -EINVAL; 4777269e9226SJason Yan } 4778269e9226SJason Yan /* 4779269e9226SJason Yan * Test whether we have more sectors than will fit in sector_t, 4780269e9226SJason Yan * and whether the max offset is addressable by the page cache. 4781269e9226SJason Yan */ 4782269e9226SJason Yan err = generic_check_addressable(sb->s_blocksize_bits, 4783269e9226SJason Yan ext4_blocks_count(es)); 4784269e9226SJason Yan if (err) { 4785269e9226SJason Yan ext4_msg(sb, KERN_ERR, "filesystem" 4786269e9226SJason Yan " too large to mount safely on this system"); 4787269e9226SJason Yan return err; 4788269e9226SJason Yan } 4789bc62dbf9SJason Yan 4790bc62dbf9SJason Yan /* check blocks count against device size */ 4791bc62dbf9SJason Yan blocks_count = sb_bdev_nr_blocks(sb); 4792bc62dbf9SJason Yan if (blocks_count && ext4_blocks_count(es) > blocks_count) { 4793bc62dbf9SJason Yan ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu " 4794bc62dbf9SJason Yan "exceeds size of device (%llu blocks)", 4795bc62dbf9SJason Yan ext4_blocks_count(es), blocks_count); 4796bc62dbf9SJason Yan return -EINVAL; 4797bc62dbf9SJason Yan } 4798bc62dbf9SJason Yan 4799bc62dbf9SJason Yan /* 4800bc62dbf9SJason Yan * It makes no sense for the first data block to be beyond the end 4801bc62dbf9SJason Yan * of the filesystem. 4802bc62dbf9SJason Yan */ 4803bc62dbf9SJason Yan if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) { 4804bc62dbf9SJason Yan ext4_msg(sb, KERN_WARNING, "bad geometry: first data " 4805bc62dbf9SJason Yan "block %u is beyond end of filesystem (%llu)", 4806bc62dbf9SJason Yan le32_to_cpu(es->s_first_data_block), 4807bc62dbf9SJason Yan ext4_blocks_count(es)); 4808bc62dbf9SJason Yan return -EINVAL; 4809bc62dbf9SJason Yan } 4810bc62dbf9SJason Yan if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) && 4811bc62dbf9SJason Yan (sbi->s_cluster_ratio == 1)) { 4812bc62dbf9SJason Yan ext4_msg(sb, KERN_WARNING, "bad geometry: first data " 4813bc62dbf9SJason Yan "block is 0 with a 1k block and cluster size"); 4814bc62dbf9SJason Yan return -EINVAL; 4815bc62dbf9SJason Yan } 4816bc62dbf9SJason Yan 4817bc62dbf9SJason Yan blocks_count = (ext4_blocks_count(es) - 4818bc62dbf9SJason Yan le32_to_cpu(es->s_first_data_block) + 4819bc62dbf9SJason Yan EXT4_BLOCKS_PER_GROUP(sb) - 1); 4820bc62dbf9SJason Yan do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb)); 4821bc62dbf9SJason Yan if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) { 4822bc62dbf9SJason Yan ext4_msg(sb, KERN_WARNING, "groups count too large: %llu " 4823bc62dbf9SJason Yan "(block count %llu, first data block %u, " 4824bc62dbf9SJason Yan "blocks per group %lu)", blocks_count, 4825bc62dbf9SJason Yan ext4_blocks_count(es), 4826bc62dbf9SJason Yan le32_to_cpu(es->s_first_data_block), 4827bc62dbf9SJason Yan EXT4_BLOCKS_PER_GROUP(sb)); 4828bc62dbf9SJason Yan return -EINVAL; 4829bc62dbf9SJason Yan } 4830bc62dbf9SJason Yan sbi->s_groups_count = blocks_count; 4831bc62dbf9SJason Yan sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, 4832bc62dbf9SJason Yan (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); 4833bc62dbf9SJason Yan if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) != 4834bc62dbf9SJason Yan le32_to_cpu(es->s_inodes_count)) { 4835bc62dbf9SJason Yan ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu", 4836bc62dbf9SJason Yan le32_to_cpu(es->s_inodes_count), 4837bc62dbf9SJason Yan ((u64)sbi->s_groups_count * sbi->s_inodes_per_group)); 4838bc62dbf9SJason Yan return -EINVAL; 4839bc62dbf9SJason Yan } 4840bc62dbf9SJason Yan 4841bc62dbf9SJason Yan return 0; 4842bc62dbf9SJason Yan } 4843bc62dbf9SJason Yan 4844a4e6a511SJason Yan static int ext4_group_desc_init(struct super_block *sb, 4845a4e6a511SJason Yan struct ext4_super_block *es, 4846a4e6a511SJason Yan ext4_fsblk_t logical_sb_block, 4847a4e6a511SJason Yan ext4_group_t *first_not_zeroed) 4848a4e6a511SJason Yan { 4849a4e6a511SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 4850a4e6a511SJason Yan unsigned int db_count; 4851a4e6a511SJason Yan ext4_fsblk_t block; 4852a4e6a511SJason Yan int i; 4853a4e6a511SJason Yan 4854a4e6a511SJason Yan db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / 4855a4e6a511SJason Yan EXT4_DESC_PER_BLOCK(sb); 4856a4e6a511SJason Yan if (ext4_has_feature_meta_bg(sb)) { 4857a4e6a511SJason Yan if (le32_to_cpu(es->s_first_meta_bg) > db_count) { 4858a4e6a511SJason Yan ext4_msg(sb, KERN_WARNING, 4859a4e6a511SJason Yan "first meta block group too large: %u " 4860a4e6a511SJason Yan "(group descriptor block count %u)", 4861a4e6a511SJason Yan le32_to_cpu(es->s_first_meta_bg), db_count); 4862a4e6a511SJason Yan return -EINVAL; 4863a4e6a511SJason Yan } 4864a4e6a511SJason Yan } 4865a4e6a511SJason Yan rcu_assign_pointer(sbi->s_group_desc, 4866a4e6a511SJason Yan kvmalloc_array(db_count, 4867a4e6a511SJason Yan sizeof(struct buffer_head *), 4868a4e6a511SJason Yan GFP_KERNEL)); 4869a4e6a511SJason Yan if (sbi->s_group_desc == NULL) { 4870a4e6a511SJason Yan ext4_msg(sb, KERN_ERR, "not enough memory"); 4871a4e6a511SJason Yan return -ENOMEM; 4872a4e6a511SJason Yan } 4873a4e6a511SJason Yan 4874a4e6a511SJason Yan bgl_lock_init(sbi->s_blockgroup_lock); 4875a4e6a511SJason Yan 4876a4e6a511SJason Yan /* Pre-read the descriptors into the buffer cache */ 4877a4e6a511SJason Yan for (i = 0; i < db_count; i++) { 4878a4e6a511SJason Yan block = descriptor_loc(sb, logical_sb_block, i); 4879a4e6a511SJason Yan ext4_sb_breadahead_unmovable(sb, block); 4880a4e6a511SJason Yan } 4881a4e6a511SJason Yan 4882a4e6a511SJason Yan for (i = 0; i < db_count; i++) { 4883a4e6a511SJason Yan struct buffer_head *bh; 4884a4e6a511SJason Yan 4885a4e6a511SJason Yan block = descriptor_loc(sb, logical_sb_block, i); 4886a4e6a511SJason Yan bh = ext4_sb_bread_unmovable(sb, block); 4887a4e6a511SJason Yan if (IS_ERR(bh)) { 4888a4e6a511SJason Yan ext4_msg(sb, KERN_ERR, 4889a4e6a511SJason Yan "can't read group descriptor %d", i); 4890a4e6a511SJason Yan sbi->s_gdb_count = i; 4891172e344eSYe Bin return PTR_ERR(bh); 4892a4e6a511SJason Yan } 4893a4e6a511SJason Yan rcu_read_lock(); 4894a4e6a511SJason Yan rcu_dereference(sbi->s_group_desc)[i] = bh; 4895a4e6a511SJason Yan rcu_read_unlock(); 4896a4e6a511SJason Yan } 4897a4e6a511SJason Yan sbi->s_gdb_count = db_count; 4898a4e6a511SJason Yan if (!ext4_check_descriptors(sb, logical_sb_block, first_not_zeroed)) { 4899a4e6a511SJason Yan ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); 4900172e344eSYe Bin return -EFSCORRUPTED; 4901a4e6a511SJason Yan } 4902172e344eSYe Bin 4903a4e6a511SJason Yan return 0; 4904a4e6a511SJason Yan } 4905a4e6a511SJason Yan 49069c1dd22dSJason Yan static int ext4_load_and_init_journal(struct super_block *sb, 49079c1dd22dSJason Yan struct ext4_super_block *es, 49089c1dd22dSJason Yan struct ext4_fs_context *ctx) 49099c1dd22dSJason Yan { 49109c1dd22dSJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 49119c1dd22dSJason Yan int err; 49129c1dd22dSJason Yan 49139c1dd22dSJason Yan err = ext4_load_journal(sb, es, ctx->journal_devnum); 49149c1dd22dSJason Yan if (err) 49159c1dd22dSJason Yan return err; 49169c1dd22dSJason Yan 49179c1dd22dSJason Yan if (ext4_has_feature_64bit(sb) && 49189c1dd22dSJason Yan !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, 49199c1dd22dSJason Yan JBD2_FEATURE_INCOMPAT_64BIT)) { 49209c1dd22dSJason Yan ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature"); 49219c1dd22dSJason Yan goto out; 49229c1dd22dSJason Yan } 49239c1dd22dSJason Yan 49249c1dd22dSJason Yan if (!set_journal_csum_feature_set(sb)) { 49259c1dd22dSJason Yan ext4_msg(sb, KERN_ERR, "Failed to set journal checksum " 49269c1dd22dSJason Yan "feature set"); 49279c1dd22dSJason Yan goto out; 49289c1dd22dSJason Yan } 49299c1dd22dSJason Yan 49309c1dd22dSJason Yan if (test_opt2(sb, JOURNAL_FAST_COMMIT) && 49319c1dd22dSJason Yan !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, 49329c1dd22dSJason Yan JBD2_FEATURE_INCOMPAT_FAST_COMMIT)) { 49339c1dd22dSJason Yan ext4_msg(sb, KERN_ERR, 49349c1dd22dSJason Yan "Failed to set fast commit journal feature"); 49359c1dd22dSJason Yan goto out; 49369c1dd22dSJason Yan } 49379c1dd22dSJason Yan 49389c1dd22dSJason Yan /* We have now updated the journal if required, so we can 49399c1dd22dSJason Yan * validate the data journaling mode. */ 49409c1dd22dSJason Yan switch (test_opt(sb, DATA_FLAGS)) { 49419c1dd22dSJason Yan case 0: 49429c1dd22dSJason Yan /* No mode set, assume a default based on the journal 49439c1dd22dSJason Yan * capabilities: ORDERED_DATA if the journal can 49449c1dd22dSJason Yan * cope, else JOURNAL_DATA 49459c1dd22dSJason Yan */ 49469c1dd22dSJason Yan if (jbd2_journal_check_available_features 49479c1dd22dSJason Yan (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { 49489c1dd22dSJason Yan set_opt(sb, ORDERED_DATA); 49499c1dd22dSJason Yan sbi->s_def_mount_opt |= EXT4_MOUNT_ORDERED_DATA; 49509c1dd22dSJason Yan } else { 49519c1dd22dSJason Yan set_opt(sb, JOURNAL_DATA); 49529c1dd22dSJason Yan sbi->s_def_mount_opt |= EXT4_MOUNT_JOURNAL_DATA; 49539c1dd22dSJason Yan } 49549c1dd22dSJason Yan break; 49559c1dd22dSJason Yan 49569c1dd22dSJason Yan case EXT4_MOUNT_ORDERED_DATA: 49579c1dd22dSJason Yan case EXT4_MOUNT_WRITEBACK_DATA: 49589c1dd22dSJason Yan if (!jbd2_journal_check_available_features 49599c1dd22dSJason Yan (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { 49609c1dd22dSJason Yan ext4_msg(sb, KERN_ERR, "Journal does not support " 49619c1dd22dSJason Yan "requested data journaling mode"); 49629c1dd22dSJason Yan goto out; 49639c1dd22dSJason Yan } 49649c1dd22dSJason Yan break; 49659c1dd22dSJason Yan default: 49669c1dd22dSJason Yan break; 49679c1dd22dSJason Yan } 49689c1dd22dSJason Yan 49699c1dd22dSJason Yan if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA && 49709c1dd22dSJason Yan test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 49719c1dd22dSJason Yan ext4_msg(sb, KERN_ERR, "can't mount with " 49729c1dd22dSJason Yan "journal_async_commit in data=ordered mode"); 49739c1dd22dSJason Yan goto out; 49749c1dd22dSJason Yan } 49759c1dd22dSJason Yan 49769c1dd22dSJason Yan set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio); 49779c1dd22dSJason Yan 49789c1dd22dSJason Yan sbi->s_journal->j_submit_inode_data_buffers = 49799c1dd22dSJason Yan ext4_journal_submit_inode_data_buffers; 49809c1dd22dSJason Yan sbi->s_journal->j_finish_inode_data_buffers = 49819c1dd22dSJason Yan ext4_journal_finish_inode_data_buffers; 49829c1dd22dSJason Yan 49839c1dd22dSJason Yan return 0; 49849c1dd22dSJason Yan 49859c1dd22dSJason Yan out: 4986bb15cea2STheodore Ts'o /* flush s_sb_upd_work before destroying the journal. */ 4987bb15cea2STheodore Ts'o flush_work(&sbi->s_sb_upd_work); 49889c1dd22dSJason Yan jbd2_journal_destroy(sbi->s_journal); 49899c1dd22dSJason Yan sbi->s_journal = NULL; 49909f2a1d9fSJason Yan return -EINVAL; 49919c1dd22dSJason Yan } 49929c1dd22dSJason Yan 499368e62439SJason Yan static int ext4_check_journal_data_mode(struct super_block *sb) 4994a5991e53SJason Yan { 4995a5991e53SJason Yan if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { 4996a5991e53SJason Yan printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with " 4997a5991e53SJason Yan "data=journal disables delayed allocation, " 4998a5991e53SJason Yan "dioread_nolock, O_DIRECT and fast_commit support!\n"); 4999a5991e53SJason Yan /* can't mount with both data=journal and dioread_nolock. */ 5000a5991e53SJason Yan clear_opt(sb, DIOREAD_NOLOCK); 5001a5991e53SJason Yan clear_opt2(sb, JOURNAL_FAST_COMMIT); 5002a5991e53SJason Yan if (test_opt2(sb, EXPLICIT_DELALLOC)) { 5003a5991e53SJason Yan ext4_msg(sb, KERN_ERR, "can't mount with " 5004a5991e53SJason Yan "both data=journal and delalloc"); 5005a5991e53SJason Yan return -EINVAL; 5006a5991e53SJason Yan } 5007a5991e53SJason Yan if (test_opt(sb, DAX_ALWAYS)) { 5008a5991e53SJason Yan ext4_msg(sb, KERN_ERR, "can't mount with " 5009a5991e53SJason Yan "both data=journal and dax"); 5010a5991e53SJason Yan return -EINVAL; 5011a5991e53SJason Yan } 5012a5991e53SJason Yan if (ext4_has_feature_encrypt(sb)) { 5013a5991e53SJason Yan ext4_msg(sb, KERN_WARNING, 5014a5991e53SJason Yan "encrypted files will use data=ordered " 5015a5991e53SJason Yan "instead of data journaling mode"); 5016a5991e53SJason Yan } 5017a5991e53SJason Yan if (test_opt(sb, DELALLOC)) 5018a5991e53SJason Yan clear_opt(sb, DELALLOC); 5019a5991e53SJason Yan } else { 5020a5991e53SJason Yan sb->s_iflags |= SB_I_CGROUPWB; 5021a5991e53SJason Yan } 5022a5991e53SJason Yan 5023a5991e53SJason Yan return 0; 5024a5991e53SJason Yan } 5025a5991e53SJason Yan 5026a7a79c29SJason Yan static int ext4_load_super(struct super_block *sb, ext4_fsblk_t *lsb, 5027a7a79c29SJason Yan int silent) 5028a7a79c29SJason Yan { 5029a7a79c29SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 5030a7a79c29SJason Yan struct ext4_super_block *es; 5031a7a79c29SJason Yan ext4_fsblk_t logical_sb_block; 5032a7a79c29SJason Yan unsigned long offset = 0; 5033a7a79c29SJason Yan struct buffer_head *bh; 5034a7a79c29SJason Yan int ret = -EINVAL; 5035a7a79c29SJason Yan int blocksize; 5036a7a79c29SJason Yan 5037a7a79c29SJason Yan blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE); 5038a7a79c29SJason Yan if (!blocksize) { 5039a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, "unable to set blocksize"); 5040a7a79c29SJason Yan return -EINVAL; 5041a7a79c29SJason Yan } 5042a7a79c29SJason Yan 5043a7a79c29SJason Yan /* 5044a7a79c29SJason Yan * The ext4 superblock will not be buffer aligned for other than 1kB 5045a7a79c29SJason Yan * block sizes. We need to calculate the offset from buffer start. 5046a7a79c29SJason Yan */ 5047a7a79c29SJason Yan if (blocksize != EXT4_MIN_BLOCK_SIZE) { 5048a7a79c29SJason Yan logical_sb_block = sbi->s_sb_block * EXT4_MIN_BLOCK_SIZE; 5049a7a79c29SJason Yan offset = do_div(logical_sb_block, blocksize); 5050a7a79c29SJason Yan } else { 5051a7a79c29SJason Yan logical_sb_block = sbi->s_sb_block; 5052a7a79c29SJason Yan } 5053a7a79c29SJason Yan 5054a7a79c29SJason Yan bh = ext4_sb_bread_unmovable(sb, logical_sb_block); 5055a7a79c29SJason Yan if (IS_ERR(bh)) { 5056a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, "unable to read superblock"); 5057a7a79c29SJason Yan return PTR_ERR(bh); 5058a7a79c29SJason Yan } 5059a7a79c29SJason Yan /* 5060a7a79c29SJason Yan * Note: s_es must be initialized as soon as possible because 5061a7a79c29SJason Yan * some ext4 macro-instructions depend on its value 5062a7a79c29SJason Yan */ 5063a7a79c29SJason Yan es = (struct ext4_super_block *) (bh->b_data + offset); 5064a7a79c29SJason Yan sbi->s_es = es; 5065a7a79c29SJason Yan sb->s_magic = le16_to_cpu(es->s_magic); 5066a7a79c29SJason Yan if (sb->s_magic != EXT4_SUPER_MAGIC) { 5067a7a79c29SJason Yan if (!silent) 5068a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); 5069a7a79c29SJason Yan goto out; 5070a7a79c29SJason Yan } 5071a7a79c29SJason Yan 5072a7a79c29SJason Yan if (le32_to_cpu(es->s_log_block_size) > 5073a7a79c29SJason Yan (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { 5074a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, 5075a7a79c29SJason Yan "Invalid log block size: %u", 5076a7a79c29SJason Yan le32_to_cpu(es->s_log_block_size)); 5077a7a79c29SJason Yan goto out; 5078a7a79c29SJason Yan } 5079a7a79c29SJason Yan if (le32_to_cpu(es->s_log_cluster_size) > 5080a7a79c29SJason Yan (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { 5081a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, 5082a7a79c29SJason Yan "Invalid log cluster size: %u", 5083a7a79c29SJason Yan le32_to_cpu(es->s_log_cluster_size)); 5084a7a79c29SJason Yan goto out; 5085a7a79c29SJason Yan } 5086a7a79c29SJason Yan 5087a7a79c29SJason Yan blocksize = EXT4_MIN_BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); 5088a7a79c29SJason Yan 5089a7a79c29SJason Yan /* 5090a7a79c29SJason Yan * If the default block size is not the same as the real block size, 5091a7a79c29SJason Yan * we need to reload it. 5092a7a79c29SJason Yan */ 5093a7a79c29SJason Yan if (sb->s_blocksize == blocksize) { 5094a7a79c29SJason Yan *lsb = logical_sb_block; 5095a7a79c29SJason Yan sbi->s_sbh = bh; 5096a7a79c29SJason Yan return 0; 5097a7a79c29SJason Yan } 5098a7a79c29SJason Yan 5099a7a79c29SJason Yan /* 5100a7a79c29SJason Yan * bh must be released before kill_bdev(), otherwise 5101a7a79c29SJason Yan * it won't be freed and its page also. kill_bdev() 5102a7a79c29SJason Yan * is called by sb_set_blocksize(). 5103a7a79c29SJason Yan */ 5104a7a79c29SJason Yan brelse(bh); 5105a7a79c29SJason Yan /* Validate the filesystem blocksize */ 5106a7a79c29SJason Yan if (!sb_set_blocksize(sb, blocksize)) { 5107a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, "bad block size %d", 5108a7a79c29SJason Yan blocksize); 5109a7a79c29SJason Yan bh = NULL; 5110a7a79c29SJason Yan goto out; 5111a7a79c29SJason Yan } 5112a7a79c29SJason Yan 5113a7a79c29SJason Yan logical_sb_block = sbi->s_sb_block * EXT4_MIN_BLOCK_SIZE; 5114a7a79c29SJason Yan offset = do_div(logical_sb_block, blocksize); 5115a7a79c29SJason Yan bh = ext4_sb_bread_unmovable(sb, logical_sb_block); 5116a7a79c29SJason Yan if (IS_ERR(bh)) { 5117a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, "Can't read superblock on 2nd try"); 5118a7a79c29SJason Yan ret = PTR_ERR(bh); 5119a7a79c29SJason Yan bh = NULL; 5120a7a79c29SJason Yan goto out; 5121a7a79c29SJason Yan } 5122a7a79c29SJason Yan es = (struct ext4_super_block *)(bh->b_data + offset); 5123a7a79c29SJason Yan sbi->s_es = es; 5124a7a79c29SJason Yan if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) { 5125a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, "Magic mismatch, very weird!"); 5126a7a79c29SJason Yan goto out; 5127a7a79c29SJason Yan } 5128a7a79c29SJason Yan *lsb = logical_sb_block; 5129a7a79c29SJason Yan sbi->s_sbh = bh; 5130a7a79c29SJason Yan return 0; 5131a7a79c29SJason Yan out: 5132a7a79c29SJason Yan brelse(bh); 5133a7a79c29SJason Yan return ret; 5134a7a79c29SJason Yan } 5135a7a79c29SJason Yan 5136db9345d9SJason Yan static void ext4_hash_info_init(struct super_block *sb) 5137db9345d9SJason Yan { 5138db9345d9SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 5139db9345d9SJason Yan struct ext4_super_block *es = sbi->s_es; 5140db9345d9SJason Yan unsigned int i; 5141db9345d9SJason Yan 5142db9345d9SJason Yan for (i = 0; i < 4; i++) 5143db9345d9SJason Yan sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); 5144db9345d9SJason Yan 5145db9345d9SJason Yan sbi->s_def_hash_version = es->s_def_hash_version; 5146db9345d9SJason Yan if (ext4_has_feature_dir_index(sb)) { 5147db9345d9SJason Yan i = le32_to_cpu(es->s_flags); 5148db9345d9SJason Yan if (i & EXT2_FLAGS_UNSIGNED_HASH) 5149db9345d9SJason Yan sbi->s_hash_unsigned = 3; 5150db9345d9SJason Yan else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { 5151db9345d9SJason Yan #ifdef __CHAR_UNSIGNED__ 5152db9345d9SJason Yan if (!sb_rdonly(sb)) 5153db9345d9SJason Yan es->s_flags |= 5154db9345d9SJason Yan cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); 5155db9345d9SJason Yan sbi->s_hash_unsigned = 3; 5156db9345d9SJason Yan #else 5157db9345d9SJason Yan if (!sb_rdonly(sb)) 5158db9345d9SJason Yan es->s_flags |= 5159db9345d9SJason Yan cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); 5160db9345d9SJason Yan #endif 5161db9345d9SJason Yan } 5162db9345d9SJason Yan } 5163db9345d9SJason Yan } 5164db9345d9SJason Yan 5165107d2be9SJason Yan static int ext4_block_group_meta_init(struct super_block *sb, int silent) 5166107d2be9SJason Yan { 5167107d2be9SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 5168107d2be9SJason Yan struct ext4_super_block *es = sbi->s_es; 5169107d2be9SJason Yan int has_huge_files; 5170107d2be9SJason Yan 5171107d2be9SJason Yan has_huge_files = ext4_has_feature_huge_file(sb); 5172107d2be9SJason Yan sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits, 5173107d2be9SJason Yan has_huge_files); 5174107d2be9SJason Yan sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files); 5175107d2be9SJason Yan 5176107d2be9SJason Yan sbi->s_desc_size = le16_to_cpu(es->s_desc_size); 5177107d2be9SJason Yan if (ext4_has_feature_64bit(sb)) { 5178107d2be9SJason Yan if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT || 5179107d2be9SJason Yan sbi->s_desc_size > EXT4_MAX_DESC_SIZE || 5180107d2be9SJason Yan !is_power_of_2(sbi->s_desc_size)) { 5181107d2be9SJason Yan ext4_msg(sb, KERN_ERR, 5182107d2be9SJason Yan "unsupported descriptor size %lu", 5183107d2be9SJason Yan sbi->s_desc_size); 5184107d2be9SJason Yan return -EINVAL; 5185107d2be9SJason Yan } 5186107d2be9SJason Yan } else 5187107d2be9SJason Yan sbi->s_desc_size = EXT4_MIN_DESC_SIZE; 5188107d2be9SJason Yan 5189107d2be9SJason Yan sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); 5190107d2be9SJason Yan sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); 5191107d2be9SJason Yan 5192107d2be9SJason Yan sbi->s_inodes_per_block = sb->s_blocksize / EXT4_INODE_SIZE(sb); 5193107d2be9SJason Yan if (sbi->s_inodes_per_block == 0 || sbi->s_blocks_per_group == 0) { 5194107d2be9SJason Yan if (!silent) 5195107d2be9SJason Yan ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); 5196107d2be9SJason Yan return -EINVAL; 5197107d2be9SJason Yan } 5198107d2be9SJason Yan if (sbi->s_inodes_per_group < sbi->s_inodes_per_block || 5199107d2be9SJason Yan sbi->s_inodes_per_group > sb->s_blocksize * 8) { 5200107d2be9SJason Yan ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n", 5201107d2be9SJason Yan sbi->s_inodes_per_group); 5202107d2be9SJason Yan return -EINVAL; 5203107d2be9SJason Yan } 5204107d2be9SJason Yan sbi->s_itb_per_group = sbi->s_inodes_per_group / 5205107d2be9SJason Yan sbi->s_inodes_per_block; 5206107d2be9SJason Yan sbi->s_desc_per_block = sb->s_blocksize / EXT4_DESC_SIZE(sb); 5207107d2be9SJason Yan sbi->s_mount_state = le16_to_cpu(es->s_state) & ~EXT4_FC_REPLAY; 5208107d2be9SJason Yan sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb)); 5209107d2be9SJason Yan sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb)); 5210107d2be9SJason Yan 5211107d2be9SJason Yan return 0; 5212107d2be9SJason Yan } 5213107d2be9SJason Yan 5214960e0ab6SLukas Czerner static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) 52157edfd85bSLukas Czerner { 5216617ba13bSMingming Cao struct ext4_super_block *es = NULL; 52177edfd85bSLukas Czerner struct ext4_sb_info *sbi = EXT4_SB(sb); 521870bbb3e0SAndrew Morton ext4_fsblk_t logical_sb_block; 5219ac27a0ecSDave Kleikamp struct inode *root; 5220107d2be9SJason Yan int needs_recovery; 5221d4fab7b2STheodore Ts'o int err; 5222bfff6873SLukas Czerner ext4_group_t first_not_zeroed; 52237edfd85bSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 5224960e0ab6SLukas Czerner int silent = fc->sb_flags & SB_SILENT; 5225b237e304SHarshad Shirwadkar 5226b237e304SHarshad Shirwadkar /* Set defaults for the variables that will be set during parsing */ 5227e4e58e5dSOjaswin Mujoo if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)) 52287edfd85bSLukas Czerner ctx->journal_ioprio = DEFAULT_JOURNAL_IOPRIO; 5229ac27a0ecSDave Kleikamp 5230240799cdSTheodore Ts'o sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS; 5231f613dfcbSTheodore Ts'o sbi->s_sectors_written_start = 52328446fe92SChristoph Hellwig part_stat_read(sb->s_bdev, sectors[STAT_WRITE]); 5233ac27a0ecSDave Kleikamp 5234a7a79c29SJason Yan err = ext4_load_super(sb, &logical_sb_block, silent); 5235a7a79c29SJason Yan if (err) 5236ac27a0ecSDave Kleikamp goto out_fail; 5237ac27a0ecSDave Kleikamp 5238a7a79c29SJason Yan es = sbi->s_es; 5239afc32f7eSTheodore Ts'o sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written); 5240ac27a0ecSDave Kleikamp 5241b26458d1SJason Yan err = ext4_init_metadata_csum(sb, es); 5242b26458d1SJason Yan if (err) 5243a5fc5119SJason Yan goto failed_mount; 5244a9c47317SDarrick J. Wong 52455f6d662dSJason Yan ext4_set_def_opts(sb, es); 5246ac27a0ecSDave Kleikamp 524708cefc7aSEric W. Biederman sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid)); 524808cefc7aSEric W. Biederman sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid)); 524930773840STheodore Ts'o sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ; 525030773840STheodore Ts'o sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME; 525130773840STheodore Ts'o sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME; 5252ac27a0ecSDave Kleikamp 525351ce6511SLukas Czerner /* 525451ce6511SLukas Czerner * set default s_li_wait_mult for lazyinit, for the case there is 525551ce6511SLukas Czerner * no mount option specified. 525651ce6511SLukas Czerner */ 525751ce6511SLukas Czerner sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; 525851ce6511SLukas Czerner 5259d4fab7b2STheodore Ts'o err = ext4_inode_info_init(sb, es); 5260d4fab7b2STheodore Ts'o if (err) 52619803387cSTheodore Ts'o goto failed_mount; 52629803387cSTheodore Ts'o 52637edfd85bSLukas Czerner err = parse_apply_sb_mount_options(sb, ctx); 52647edfd85bSLukas Czerner if (err < 0) 52655aee0f8aSTheodore Ts'o goto failed_mount; 52667edfd85bSLukas Czerner 52675a916be1STheodore Ts'o sbi->s_def_mount_opt = sbi->s_mount_opt; 5268e3645d72SZhang Yi sbi->s_def_mount_opt2 = sbi->s_mount_opt2; 52697edfd85bSLukas Czerner 52707edfd85bSLukas Czerner err = ext4_check_opt_consistency(fc, sb); 52717edfd85bSLukas Czerner if (err < 0) 52727edfd85bSLukas Czerner goto failed_mount; 52737edfd85bSLukas Czerner 527485456054SEric Biggers ext4_apply_options(fc, sb); 5275ac27a0ecSDave Kleikamp 5276d4fab7b2STheodore Ts'o err = ext4_encoding_init(sb, es); 5277d4fab7b2STheodore Ts'o if (err) 5278c83ad55eSGabriel Krisman Bertazi goto failed_mount; 5279c83ad55eSGabriel Krisman Bertazi 5280d4fab7b2STheodore Ts'o err = ext4_check_journal_data_mode(sb); 5281d4fab7b2STheodore Ts'o if (err) 528256889787STheodore Ts'o goto failed_mount; 528356889787STheodore Ts'o 52841751e8a6SLinus Torvalds sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | 52851751e8a6SLinus Torvalds (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0); 5286ac27a0ecSDave Kleikamp 52871ff20307SJeff Layton /* i_version is always enabled now */ 52881ff20307SJeff Layton sb->s_flags |= SB_I_VERSION; 52891ff20307SJeff Layton 5290d4fab7b2STheodore Ts'o err = ext4_check_feature_compatibility(sb, es, silent); 5291d4fab7b2STheodore Ts'o if (err) 5292ac27a0ecSDave Kleikamp goto failed_mount; 5293a13fb1a4SEric Sandeen 5294d4fab7b2STheodore Ts'o err = ext4_block_group_meta_init(sb, silent); 5295d4fab7b2STheodore Ts'o if (err) 52965b9554dcSTheodore Ts'o goto failed_mount; 52975b9554dcSTheodore Ts'o 5298db9345d9SJason Yan ext4_hash_info_init(sb); 5299ac27a0ecSDave Kleikamp 5300d4fab7b2STheodore Ts'o err = ext4_handle_clustersize(sb); 5301d4fab7b2STheodore Ts'o if (err) 5302281b5995STheodore Ts'o goto failed_mount; 5303960fd856STheodore Ts'o 5304d4fab7b2STheodore Ts'o err = ext4_check_geometry(sb, es); 5305d4fab7b2STheodore Ts'o if (err) 53060f2ddca6SFrom: Thiemo Nagel goto failed_mount; 53070f2ddca6SFrom: Thiemo Nagel 5308235699a8SKees Cook timer_setup(&sbi->s_err_report, print_daily_error_info, 0); 5309c92dc856SJan Kara spin_lock_init(&sbi->s_error_lock); 5310bb15cea2STheodore Ts'o INIT_WORK(&sbi->s_sb_upd_work, update_super_work); 531104496411STao Ma 5312172e344eSYe Bin err = ext4_group_desc_init(sb, es, logical_sb_block, &first_not_zeroed); 5313172e344eSYe Bin if (err) 5314172e344eSYe Bin goto failed_mount3; 5315172e344eSYe Bin 5316d4fab7b2STheodore Ts'o err = ext4_es_register_shrinker(sbi); 5317d4fab7b2STheodore Ts'o if (err) 5318ce7e010aSTheodore Ts'o goto failed_mount3; 5319ce7e010aSTheodore Ts'o 5320c9de560dSAlex Tomas sbi->s_stripe = ext4_get_stripe_size(sbi); 5321c3defd99SKemeng Shi /* 5322c3defd99SKemeng Shi * It's hard to get stripe aligned blocks if stripe is not aligned with 5323c3defd99SKemeng Shi * cluster, just disable stripe and alert user to simpfy code and avoid 5324c3defd99SKemeng Shi * stripe aligned allocation which will rarely successes. 5325c3defd99SKemeng Shi */ 5326c3defd99SKemeng Shi if (sbi->s_stripe > 0 && sbi->s_cluster_ratio > 1 && 5327c3defd99SKemeng Shi sbi->s_stripe % sbi->s_cluster_ratio != 0) { 5328c3defd99SKemeng Shi ext4_msg(sb, KERN_WARNING, 5329c3defd99SKemeng Shi "stripe (%lu) is not aligned with cluster size (%u), " 5330c3defd99SKemeng Shi "stripe is disabled", 5331c3defd99SKemeng Shi sbi->s_stripe, sbi->s_cluster_ratio); 5332c3defd99SKemeng Shi sbi->s_stripe = 0; 5333c3defd99SKemeng Shi } 533467a5da56SZheng Liu sbi->s_extent_max_zeroout_kb = 32; 5335c9de560dSAlex Tomas 5336f9ae9cf5STheodore Ts'o /* 5337f9ae9cf5STheodore Ts'o * set up enough so that it can read an inode 5338f9ae9cf5STheodore Ts'o */ 5339f9ae9cf5STheodore Ts'o sb->s_op = &ext4_sops; 5340617ba13bSMingming Cao sb->s_export_op = &ext4_export_ops; 5341617ba13bSMingming Cao sb->s_xattr = ext4_xattr_handlers; 5342643fa961SChandan Rajendra #ifdef CONFIG_FS_ENCRYPTION 5343a7550b30SJaegeuk Kim sb->s_cop = &ext4_cryptops; 5344ffcc4182SEric Biggers #endif 5345c93d8f88SEric Biggers #ifdef CONFIG_FS_VERITY 5346c93d8f88SEric Biggers sb->s_vop = &ext4_verityops; 5347c93d8f88SEric Biggers #endif 5348ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 5349617ba13bSMingming Cao sb->dq_op = &ext4_quota_operations; 5350e2b911c5SDarrick J. Wong if (ext4_has_feature_quota(sb)) 53511fa5efe3SJan Kara sb->s_qcop = &dquot_quotactl_sysfile_ops; 5352262b4662SJan Kara else 5353262b4662SJan Kara sb->s_qcop = &ext4_qctl_operations; 5354689c958cSLi Xi sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; 5355ac27a0ecSDave Kleikamp #endif 535685787090SChristoph Hellwig memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid)); 5357f2fa2ffcSAneesh Kumar K.V 5358ac27a0ecSDave Kleikamp INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ 53593b9d4ed2STheodore Ts'o mutex_init(&sbi->s_orphan_lock); 5360ac27a0ecSDave Kleikamp 5361f7314a67SJason Yan ext4_fast_commit_init(sb); 5362aa75f4d3SHarshad Shirwadkar 5363ac27a0ecSDave Kleikamp sb->s_root = NULL; 5364ac27a0ecSDave Kleikamp 5365ac27a0ecSDave Kleikamp needs_recovery = (es->s_last_orphan != 0 || 536602f310fcSJan Kara ext4_has_feature_orphan_present(sb) || 5367e2b911c5SDarrick J. Wong ext4_has_feature_journal_needs_recovery(sb)); 5368ac27a0ecSDave Kleikamp 53693b50d501STheodore Ts'o if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb)) { 53703b50d501STheodore Ts'o err = ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)); 53713b50d501STheodore Ts'o if (err) 537250460fe8SDarrick J. Wong goto failed_mount3a; 53733b50d501STheodore Ts'o } 5374c5e06d10SJohann Lombardi 5375d4fab7b2STheodore Ts'o err = -EINVAL; 5376ac27a0ecSDave Kleikamp /* 5377ac27a0ecSDave Kleikamp * The first inode we look at is the journal inode. Don't try 5378ac27a0ecSDave Kleikamp * root first: it may be modified in the journal! 5379ac27a0ecSDave Kleikamp */ 5380e2b911c5SDarrick J. Wong if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) { 53819c1dd22dSJason Yan err = ext4_load_and_init_journal(sb, es, ctx); 53824753d8a2STheodore Ts'o if (err) 538350460fe8SDarrick J. Wong goto failed_mount3a; 5384bc98a42cSDavid Howells } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) && 5385e2b911c5SDarrick J. Wong ext4_has_feature_journal_needs_recovery(sb)) { 5386b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "required journal recovery " 5387b31e1552SEric Sandeen "suppressed and not mounted read-only"); 538843bd6f1bSJason Yan goto failed_mount3a; 5389ac27a0ecSDave Kleikamp } else { 53901e381f60SDmitry Monakhov /* Nojournal mode, all journal mount options are illegal */ 53911e381f60SDmitry Monakhov if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 53921e381f60SDmitry Monakhov ext4_msg(sb, KERN_ERR, "can't mount with " 53931e381f60SDmitry Monakhov "journal_async_commit, fs mounted w/o journal"); 539443bd6f1bSJason Yan goto failed_mount3a; 53951e381f60SDmitry Monakhov } 539689481b5fSBaokun Li 539789481b5fSBaokun Li if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) { 539889481b5fSBaokun Li ext4_msg(sb, KERN_ERR, "can't mount with " 539989481b5fSBaokun Li "journal_checksum, fs mounted w/o journal"); 540089481b5fSBaokun Li goto failed_mount3a; 540189481b5fSBaokun Li } 54021e381f60SDmitry Monakhov if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) { 54031e381f60SDmitry Monakhov ext4_msg(sb, KERN_ERR, "can't mount with " 54041e381f60SDmitry Monakhov "commit=%lu, fs mounted w/o journal", 54051e381f60SDmitry Monakhov sbi->s_commit_interval / HZ); 540643bd6f1bSJason Yan goto failed_mount3a; 54071e381f60SDmitry Monakhov } 54081e381f60SDmitry Monakhov if (EXT4_MOUNT_DATA_FLAGS & 54091e381f60SDmitry Monakhov (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) { 54101e381f60SDmitry Monakhov ext4_msg(sb, KERN_ERR, "can't mount with " 54111e381f60SDmitry Monakhov "data=, fs mounted w/o journal"); 541243bd6f1bSJason Yan goto failed_mount3a; 54131e381f60SDmitry Monakhov } 541450b29d8fSDebabrata Banerjee sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM; 54151e381f60SDmitry Monakhov clear_opt(sb, JOURNAL_CHECKSUM); 5416fd8c37ecSTheodore Ts'o clear_opt(sb, DATA_FLAGS); 5417995a3ed6SHarshad Shirwadkar clear_opt2(sb, JOURNAL_FAST_COMMIT); 54180390131bSFrank Mayhar sbi->s_journal = NULL; 54190390131bSFrank Mayhar needs_recovery = 0; 5420ac27a0ecSDave Kleikamp } 5421ac27a0ecSDave Kleikamp 5422cdb7ee4cSTahsin Erdogan if (!test_opt(sb, NO_MBCACHE)) { 542347387409STahsin Erdogan sbi->s_ea_block_cache = ext4_xattr_create_cache(); 542447387409STahsin Erdogan if (!sbi->s_ea_block_cache) { 5425cdb7ee4cSTahsin Erdogan ext4_msg(sb, KERN_ERR, 5426cdb7ee4cSTahsin Erdogan "Failed to create ea_block_cache"); 5427d4fab7b2STheodore Ts'o err = -EINVAL; 54289c191f70ST Makphaibulchoke goto failed_mount_wq; 54299c191f70ST Makphaibulchoke } 54309c191f70ST Makphaibulchoke 5431dec214d0STahsin Erdogan if (ext4_has_feature_ea_inode(sb)) { 5432dec214d0STahsin Erdogan sbi->s_ea_inode_cache = ext4_xattr_create_cache(); 5433dec214d0STahsin Erdogan if (!sbi->s_ea_inode_cache) { 5434dec214d0STahsin Erdogan ext4_msg(sb, KERN_ERR, 5435dec214d0STahsin Erdogan "Failed to create ea_inode_cache"); 5436d4fab7b2STheodore Ts'o err = -EINVAL; 5437dec214d0STahsin Erdogan goto failed_mount_wq; 5438dec214d0STahsin Erdogan } 5439dec214d0STahsin Erdogan } 5440cdb7ee4cSTahsin Erdogan } 5441dec214d0STahsin Erdogan 5442fd89d5f2STejun Heo /* 5443952fc18eSTheodore Ts'o * Get the # of file system overhead blocks from the 5444952fc18eSTheodore Ts'o * superblock if present. 5445952fc18eSTheodore Ts'o */ 5446952fc18eSTheodore Ts'o sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters); 544785d825dbSTheodore Ts'o /* ignore the precalculated value if it is ridiculous */ 544885d825dbSTheodore Ts'o if (sbi->s_overhead > ext4_blocks_count(es)) 544985d825dbSTheodore Ts'o sbi->s_overhead = 0; 545085d825dbSTheodore Ts'o /* 545185d825dbSTheodore Ts'o * If the bigalloc feature is not enabled recalculating the 545285d825dbSTheodore Ts'o * overhead doesn't take long, so we might as well just redo 545385d825dbSTheodore Ts'o * it to make sure we are using the correct value. 545485d825dbSTheodore Ts'o */ 545585d825dbSTheodore Ts'o if (!ext4_has_feature_bigalloc(sb)) 545685d825dbSTheodore Ts'o sbi->s_overhead = 0; 545785d825dbSTheodore Ts'o if (sbi->s_overhead == 0) { 545807aa2ea1SLukas Czerner err = ext4_calculate_overhead(sb); 545907aa2ea1SLukas Czerner if (err) 5460952fc18eSTheodore Ts'o goto failed_mount_wq; 5461952fc18eSTheodore Ts'o } 5462952fc18eSTheodore Ts'o 5463952fc18eSTheodore Ts'o /* 5464fd89d5f2STejun Heo * The maximum number of concurrent works can be high and 5465fd89d5f2STejun Heo * concurrency isn't really necessary. Limit it to 1. 5466fd89d5f2STejun Heo */ 54672e8fa54eSJan Kara EXT4_SB(sb)->rsv_conversion_wq = 54682e8fa54eSJan Kara alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); 54692e8fa54eSJan Kara if (!EXT4_SB(sb)->rsv_conversion_wq) { 54702e8fa54eSJan Kara printk(KERN_ERR "EXT4-fs: failed to create workqueue\n"); 5471d4fab7b2STheodore Ts'o err = -ENOMEM; 54722e8fa54eSJan Kara goto failed_mount4; 54732e8fa54eSJan Kara } 54742e8fa54eSJan Kara 5475ac27a0ecSDave Kleikamp /* 5476dab291afSMingming Cao * The jbd2_journal_load will have done any necessary log recovery, 5477ac27a0ecSDave Kleikamp * so we can safely mount the rest of the filesystem now. 5478ac27a0ecSDave Kleikamp */ 5479ac27a0ecSDave Kleikamp 54808a363970STheodore Ts'o root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL); 54811d1fe1eeSDavid Howells if (IS_ERR(root)) { 5482b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "get root inode failed"); 5483d4fab7b2STheodore Ts'o err = PTR_ERR(root); 548432a9bb57SManish Katiyar root = NULL; 5485ac27a0ecSDave Kleikamp goto failed_mount4; 5486ac27a0ecSDave Kleikamp } 5487ac27a0ecSDave Kleikamp if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { 5488b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck"); 548994bf608aSAl Viro iput(root); 5490d4fab7b2STheodore Ts'o err = -EFSCORRUPTED; 5491ac27a0ecSDave Kleikamp goto failed_mount4; 5492ac27a0ecSDave Kleikamp } 5493b886ee3eSGabriel Krisman Bertazi 549448fde701SAl Viro sb->s_root = d_make_root(root); 54951d1fe1eeSDavid Howells if (!sb->s_root) { 5496b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "get root dentry failed"); 5497d4fab7b2STheodore Ts'o err = -ENOMEM; 54981d1fe1eeSDavid Howells goto failed_mount4; 54991d1fe1eeSDavid Howells } 5500ac27a0ecSDave Kleikamp 5501d4fab7b2STheodore Ts'o err = ext4_setup_super(sb, es, sb_rdonly(sb)); 5502d4fab7b2STheodore Ts'o if (err == -EROFS) { 55031751e8a6SLinus Torvalds sb->s_flags |= SB_RDONLY; 5504d4fab7b2STheodore Ts'o } else if (err) 5505c89128a0SJaegeuk Kim goto failed_mount4a; 5506ef7f3835SKalpak Shah 5507b5799018STheodore Ts'o ext4_set_resv_clusters(sb); 550827dd4385SLukas Czerner 55090f5bde1dSJan Kara if (test_opt(sb, BLOCK_VALIDITY)) { 55106fd058f7STheodore Ts'o err = ext4_setup_system_zone(sb); 55116fd058f7STheodore Ts'o if (err) { 5512b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "failed to initialize system " 5513fbe845ddSCurt Wohlgemuth "zone (%d)", err); 5514f9ae9cf5STheodore Ts'o goto failed_mount4a; 5515f9ae9cf5STheodore Ts'o } 55160f5bde1dSJan Kara } 55178016e29fSHarshad Shirwadkar ext4_fc_replay_cleanup(sb); 5518f9ae9cf5STheodore Ts'o 5519f9ae9cf5STheodore Ts'o ext4_ext_init(sb); 5520196e402aSHarshad Shirwadkar 5521196e402aSHarshad Shirwadkar /* 5522196e402aSHarshad Shirwadkar * Enable optimize_scan if number of groups is > threshold. This can be 5523196e402aSHarshad Shirwadkar * turned off by passing "mb_optimize_scan=0". This can also be 5524196e402aSHarshad Shirwadkar * turned on forcefully by passing "mb_optimize_scan=1". 5525196e402aSHarshad Shirwadkar */ 552627b38686SOjaswin Mujoo if (!(ctx->spec & EXT4_SPEC_mb_optimize_scan)) { 552727b38686SOjaswin Mujoo if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD) 5528196e402aSHarshad Shirwadkar set_opt2(sb, MB_OPTIMIZE_SCAN); 552927b38686SOjaswin Mujoo else 5530196e402aSHarshad Shirwadkar clear_opt2(sb, MB_OPTIMIZE_SCAN); 553127b38686SOjaswin Mujoo } 5532196e402aSHarshad Shirwadkar 5533f9ae9cf5STheodore Ts'o err = ext4_mb_init(sb); 5534f9ae9cf5STheodore Ts'o if (err) { 5535f9ae9cf5STheodore Ts'o ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)", 5536f9ae9cf5STheodore Ts'o err); 5537dcf2d804STao Ma goto failed_mount5; 5538c2774d84SAneesh Kumar K.V } 5539c2774d84SAneesh Kumar K.V 5540027f14f5STheodore Ts'o /* 5541027f14f5STheodore Ts'o * We can only set up the journal commit callback once 5542027f14f5STheodore Ts'o * mballoc is initialized 5543027f14f5STheodore Ts'o */ 5544027f14f5STheodore Ts'o if (sbi->s_journal) 5545027f14f5STheodore Ts'o sbi->s_journal->j_commit_callback = 5546027f14f5STheodore Ts'o ext4_journal_commit_callback; 5547027f14f5STheodore Ts'o 5548d5e72c4eSTheodore Ts'o err = ext4_percpu_param_init(sbi); 5549d5e72c4eSTheodore Ts'o if (err) 5550d5e03cbbSTheodore Ts'o goto failed_mount6; 5551d5e03cbbSTheodore Ts'o 5552e2b911c5SDarrick J. Wong if (ext4_has_feature_flex_bg(sb)) 5553d5e03cbbSTheodore Ts'o if (!ext4_fill_flex_info(sb)) { 5554d5e03cbbSTheodore Ts'o ext4_msg(sb, KERN_ERR, 5555d5e03cbbSTheodore Ts'o "unable to initialize " 5556d5e03cbbSTheodore Ts'o "flex_bg meta info!"); 5557d4fab7b2STheodore Ts'o err = -ENOMEM; 5558d5e03cbbSTheodore Ts'o goto failed_mount6; 5559d5e03cbbSTheodore Ts'o } 5560d5e03cbbSTheodore Ts'o 5561bfff6873SLukas Czerner err = ext4_register_li_request(sb, first_not_zeroed); 5562bfff6873SLukas Czerner if (err) 5563dcf2d804STao Ma goto failed_mount6; 5564bfff6873SLukas Czerner 5565b5799018STheodore Ts'o err = ext4_register_sysfs(sb); 5566dcf2d804STao Ma if (err) 5567dcf2d804STao Ma goto failed_mount7; 55683197ebdbSTheodore Ts'o 556902f310fcSJan Kara err = ext4_init_orphan_info(sb); 557002f310fcSJan Kara if (err) 557102f310fcSJan Kara goto failed_mount8; 55729b2ff357SJan Kara #ifdef CONFIG_QUOTA 55739b2ff357SJan Kara /* Enable quota usage during mount. */ 5574bc98a42cSDavid Howells if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) { 55759b2ff357SJan Kara err = ext4_enable_quotas(sb); 55769b2ff357SJan Kara if (err) 557702f310fcSJan Kara goto failed_mount9; 55789b2ff357SJan Kara } 55799b2ff357SJan Kara #endif /* CONFIG_QUOTA */ 55809b2ff357SJan Kara 5581bc71726cSzhangyi (F) /* 5582bc71726cSzhangyi (F) * Save the original bdev mapping's wb_err value which could be 5583bc71726cSzhangyi (F) * used to detect the metadata async write error. 5584bc71726cSzhangyi (F) */ 5585bc71726cSzhangyi (F) spin_lock_init(&sbi->s_bdev_wb_lock); 5586bc71726cSzhangyi (F) errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err, 5587bc71726cSzhangyi (F) &sbi->s_bdev_wb_err); 5588617ba13bSMingming Cao EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS; 5589617ba13bSMingming Cao ext4_orphan_cleanup(sb, es); 5590617ba13bSMingming Cao EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS; 55919b6641ddSYe Bin /* 55929b6641ddSYe Bin * Update the checksum after updating free space/inode counters and 55939b6641ddSYe Bin * ext4_orphan_cleanup. Otherwise the superblock can have an incorrect 55949b6641ddSYe Bin * checksum in the buffer cache until it is written out and 55959b6641ddSYe Bin * e2fsprogs programs trying to open a file system immediately 55969b6641ddSYe Bin * after it is mounted can fail. 55979b6641ddSYe Bin */ 55989b6641ddSYe Bin ext4_superblock_csum_set(sb); 55990390131bSFrank Mayhar if (needs_recovery) { 5600b31e1552SEric Sandeen ext4_msg(sb, KERN_INFO, "recovery complete"); 560111215630SJan Kara err = ext4_mark_recovery_complete(sb, es); 560211215630SJan Kara if (err) 5603d13f9963SBaokun Li goto failed_mount10; 56040390131bSFrank Mayhar } 56050390131bSFrank Mayhar 560670200574SChristoph Hellwig if (test_opt(sb, DISCARD) && !bdev_max_discard_sectors(sb->s_bdev)) 560779add3a3SLukas Czerner ext4_msg(sb, KERN_WARNING, 560870200574SChristoph Hellwig "mounting with \"discard\" option, but the device does not support discard"); 560979add3a3SLukas Czerner 561066e61a9eSTheodore Ts'o if (es->s_error_count) 561166e61a9eSTheodore Ts'o mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */ 5612ac27a0ecSDave Kleikamp 5613efbed4dcSTheodore Ts'o /* Enable message ratelimiting. Default is 10 messages per 5 secs. */ 5614efbed4dcSTheodore Ts'o ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10); 5615efbed4dcSTheodore Ts'o ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10); 5616efbed4dcSTheodore Ts'o ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10); 56171cf006edSDmitry Monakhov atomic_set(&sbi->s_warning_count, 0); 56181cf006edSDmitry Monakhov atomic_set(&sbi->s_msg_count, 0); 5619efbed4dcSTheodore Ts'o 5620ac27a0ecSDave Kleikamp return 0; 5621ac27a0ecSDave Kleikamp 5622d13f9963SBaokun Li failed_mount10: 5623f3c1c42eSBaokun Li ext4_quotas_off(sb, EXT4_MAXQUOTAS); 5624d13f9963SBaokun Li failed_mount9: __maybe_unused 562502f310fcSJan Kara ext4_release_orphan_info(sb); 562672ba7450STheodore Ts'o failed_mount8: 5627ebd173beSTheodore Ts'o ext4_unregister_sysfs(sb); 5628cb8d53d2SEric Biggers kobject_put(&sbi->s_kobj); 5629dcf2d804STao Ma failed_mount7: 5630dcf2d804STao Ma ext4_unregister_li_request(sb); 5631dcf2d804STao Ma failed_mount6: 5632f9ae9cf5STheodore Ts'o ext4_mb_release(sb); 5633dcbf8758SJason Yan ext4_flex_groups_free(sbi); 56341f79467cSJason Yan ext4_percpu_param_destroy(sbi); 563500764937SAzat Khuzhin failed_mount5: 5636f9ae9cf5STheodore Ts'o ext4_ext_release(sb); 5637f9ae9cf5STheodore Ts'o ext4_release_system_zone(sb); 5638f9ae9cf5STheodore Ts'o failed_mount4a: 563994bf608aSAl Viro dput(sb->s_root); 564032a9bb57SManish Katiyar sb->s_root = NULL; 564194bf608aSAl Viro failed_mount4: 5642b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "mount failed"); 56432e8fa54eSJan Kara if (EXT4_SB(sb)->rsv_conversion_wq) 56442e8fa54eSJan Kara destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq); 56454c0425ffSMingming Cao failed_mount_wq: 5646dec214d0STahsin Erdogan ext4_xattr_destroy_cache(sbi->s_ea_inode_cache); 5647dec214d0STahsin Erdogan sbi->s_ea_inode_cache = NULL; 564850c15df6SChengguang Xu 564947387409STahsin Erdogan ext4_xattr_destroy_cache(sbi->s_ea_block_cache); 565047387409STahsin Erdogan sbi->s_ea_block_cache = NULL; 565150c15df6SChengguang Xu 56520390131bSFrank Mayhar if (sbi->s_journal) { 5653bb15cea2STheodore Ts'o /* flush s_sb_upd_work before journal destroy. */ 5654bb15cea2STheodore Ts'o flush_work(&sbi->s_sb_upd_work); 5655dab291afSMingming Cao jbd2_journal_destroy(sbi->s_journal); 565647b4a50bSJan Kara sbi->s_journal = NULL; 56570390131bSFrank Mayhar } 565850460fe8SDarrick J. Wong failed_mount3a: 5659d3922a77SZheng Liu ext4_es_unregister_shrinker(sbi); 5660eb68d0e2SZheng Liu failed_mount3: 5661bb15cea2STheodore Ts'o /* flush s_sb_upd_work before sbi destroy */ 5662bb15cea2STheodore Ts'o flush_work(&sbi->s_sb_upd_work); 56632a4ae3bcSJan Kara del_timer_sync(&sbi->s_err_report); 5664618f0031SPavel Skripkin ext4_stop_mmpd(sbi); 5665a4e6a511SJason Yan ext4_group_desc_free(sbi); 5666ac27a0ecSDave Kleikamp failed_mount: 56670441984aSDarrick J. Wong if (sbi->s_chksum_driver) 56680441984aSDarrick J. Wong crypto_free_shash(sbi->s_chksum_driver); 5669c83ad55eSGabriel Krisman Bertazi 56705298d4bfSChristoph Hellwig #if IS_ENABLED(CONFIG_UNICODE) 5671f8f4acb6SDaniel Rosenberg utf8_unload(sb->s_encoding); 5672c83ad55eSGabriel Krisman Bertazi #endif 5673c83ad55eSGabriel Krisman Bertazi 5674ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 5675856dd6c5SNathan Chancellor for (unsigned int i = 0; i < EXT4_MAXQUOTAS; i++) 56760ba33facSTheodore Ts'o kfree(get_qf_name(sb, sbi, i)); 5677ac27a0ecSDave Kleikamp #endif 5678ac4acb1fSEric Biggers fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy); 5679a7a79c29SJason Yan brelse(sbi->s_sbh); 56801489dffdSChristoph Hellwig if (sbi->s_journal_bdev) { 56811489dffdSChristoph Hellwig invalidate_bdev(sbi->s_journal_bdev); 56821489dffdSChristoph Hellwig blkdev_put(sbi->s_journal_bdev, sb); 56831489dffdSChristoph Hellwig } 5684ac27a0ecSDave Kleikamp out_fail: 568526fb5290SZhihao Cheng invalidate_bdev(sb->s_bdev); 5686ac27a0ecSDave Kleikamp sb->s_fs_info = NULL; 5687d4fab7b2STheodore Ts'o return err; 5688ac27a0ecSDave Kleikamp } 5689ac27a0ecSDave Kleikamp 5690cebe85d5SLukas Czerner static int ext4_fill_super(struct super_block *sb, struct fs_context *fc) 56917edfd85bSLukas Czerner { 5692cebe85d5SLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 56937edfd85bSLukas Czerner struct ext4_sb_info *sbi; 56947edfd85bSLukas Czerner const char *descr; 5695cebe85d5SLukas Czerner int ret; 56967edfd85bSLukas Czerner 5697cebe85d5SLukas Czerner sbi = ext4_alloc_sbi(sb); 5698cebe85d5SLukas Czerner if (!sbi) 56997c268d4cSLukas Czerner return -ENOMEM; 5700cebe85d5SLukas Czerner 5701cebe85d5SLukas Czerner fc->s_fs_info = sbi; 57027edfd85bSLukas Czerner 57037edfd85bSLukas Czerner /* Cleanup superblock name */ 57047edfd85bSLukas Czerner strreplace(sb->s_id, '/', '!'); 57057edfd85bSLukas Czerner 57067edfd85bSLukas Czerner sbi->s_sb_block = 1; /* Default super block location */ 5707cebe85d5SLukas Czerner if (ctx->spec & EXT4_SPEC_s_sb_block) 5708cebe85d5SLukas Czerner sbi->s_sb_block = ctx->s_sb_block; 57097edfd85bSLukas Czerner 5710960e0ab6SLukas Czerner ret = __ext4_fill_super(fc, sb); 57117edfd85bSLukas Czerner if (ret < 0) 57127edfd85bSLukas Czerner goto free_sbi; 57137edfd85bSLukas Czerner 5714cebe85d5SLukas Czerner if (sbi->s_journal) { 57157edfd85bSLukas Czerner if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) 57167edfd85bSLukas Czerner descr = " journalled data mode"; 57177edfd85bSLukas Czerner else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) 57187edfd85bSLukas Czerner descr = " ordered data mode"; 57197edfd85bSLukas Czerner else 57207edfd85bSLukas Czerner descr = " writeback data mode"; 57217edfd85bSLukas Czerner } else 57227edfd85bSLukas Czerner descr = "out journal"; 57237edfd85bSLukas Czerner 57247edfd85bSLukas Czerner if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount")) 57256dcc98fbSTheodore Ts'o ext4_msg(sb, KERN_INFO, "mounted filesystem %pU %s with%s. " 57266dcc98fbSTheodore Ts'o "Quota mode: %s.", &sb->s_uuid, 57276dcc98fbSTheodore Ts'o sb_rdonly(sb) ? "ro" : "r/w", descr, 5728bb0fbc78SLukas Czerner ext4_quota_mode(sb)); 57297edfd85bSLukas Czerner 5730eb705421STheodore Ts'o /* Update the s_overhead_clusters if necessary */ 5731827891a3STheodore Ts'o ext4_update_overhead(sb, false); 57327edfd85bSLukas Czerner return 0; 5733cebe85d5SLukas Czerner 57347edfd85bSLukas Czerner free_sbi: 57357edfd85bSLukas Czerner ext4_free_sbi(sbi); 5736cebe85d5SLukas Czerner fc->s_fs_info = NULL; 57377edfd85bSLukas Czerner return ret; 57387edfd85bSLukas Czerner } 57397edfd85bSLukas Czerner 5740cebe85d5SLukas Czerner static int ext4_get_tree(struct fs_context *fc) 5741cebe85d5SLukas Czerner { 5742cebe85d5SLukas Czerner return get_tree_bdev(fc, ext4_fill_super); 5743cebe85d5SLukas Czerner } 5744cebe85d5SLukas Czerner 5745ac27a0ecSDave Kleikamp /* 5746ac27a0ecSDave Kleikamp * Setup any per-fs journal parameters now. We'll do this both on 5747ac27a0ecSDave Kleikamp * initial mount, once the journal has been initialised but before we've 5748ac27a0ecSDave Kleikamp * done any recovery; and again on any subsequent remount. 5749ac27a0ecSDave Kleikamp */ 5750617ba13bSMingming Cao static void ext4_init_journal_params(struct super_block *sb, journal_t *journal) 5751ac27a0ecSDave Kleikamp { 5752617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 5753ac27a0ecSDave Kleikamp 5754ac27a0ecSDave Kleikamp journal->j_commit_interval = sbi->s_commit_interval; 575530773840STheodore Ts'o journal->j_min_batch_time = sbi->s_min_batch_time; 575630773840STheodore Ts'o journal->j_max_batch_time = sbi->s_max_batch_time; 57576866d7b3SHarshad Shirwadkar ext4_fc_init(sb, journal); 5758ac27a0ecSDave Kleikamp 5759a931da6aSTheodore Ts'o write_lock(&journal->j_state_lock); 5760ac27a0ecSDave Kleikamp if (test_opt(sb, BARRIER)) 5761dab291afSMingming Cao journal->j_flags |= JBD2_BARRIER; 5762ac27a0ecSDave Kleikamp else 5763dab291afSMingming Cao journal->j_flags &= ~JBD2_BARRIER; 57645bf5683aSHidehiro Kawai if (test_opt(sb, DATA_ERR_ABORT)) 57655bf5683aSHidehiro Kawai journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR; 57665bf5683aSHidehiro Kawai else 57675bf5683aSHidehiro Kawai journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR; 576872945058SZhang Yi /* 576972945058SZhang Yi * Always enable journal cycle record option, letting the journal 577072945058SZhang Yi * records log transactions continuously between each mount. 577172945058SZhang Yi */ 577272945058SZhang Yi journal->j_flags |= JBD2_CYCLE_RECORD; 5773a931da6aSTheodore Ts'o write_unlock(&journal->j_state_lock); 5774ac27a0ecSDave Kleikamp } 5775ac27a0ecSDave Kleikamp 5776c6cb7e77SEric Whitney static struct inode *ext4_get_journal_inode(struct super_block *sb, 5777ac27a0ecSDave Kleikamp unsigned int journal_inum) 5778ac27a0ecSDave Kleikamp { 5779ac27a0ecSDave Kleikamp struct inode *journal_inode; 5780ac27a0ecSDave Kleikamp 5781c6cb7e77SEric Whitney /* 5782c6cb7e77SEric Whitney * Test for the existence of a valid inode on disk. Bad things 5783c6cb7e77SEric Whitney * happen if we iget() an unused inode, as the subsequent iput() 5784c6cb7e77SEric Whitney * will try to delete it. 5785c6cb7e77SEric Whitney */ 57868a363970STheodore Ts'o journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL); 57871d1fe1eeSDavid Howells if (IS_ERR(journal_inode)) { 5788b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "no journal found"); 5789ee5c8071SZhang Yi return ERR_CAST(journal_inode); 5790ac27a0ecSDave Kleikamp } 5791ac27a0ecSDave Kleikamp if (!journal_inode->i_nlink) { 5792ac27a0ecSDave Kleikamp make_bad_inode(journal_inode); 5793ac27a0ecSDave Kleikamp iput(journal_inode); 5794b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "journal inode is deleted"); 5795ee5c8071SZhang Yi return ERR_PTR(-EFSCORRUPTED); 5796ac27a0ecSDave Kleikamp } 5797105c78e1SEric Biggers if (!S_ISREG(journal_inode->i_mode) || IS_ENCRYPTED(journal_inode)) { 5798b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "invalid journal inode"); 5799ac27a0ecSDave Kleikamp iput(journal_inode); 5800ee5c8071SZhang Yi return ERR_PTR(-EFSCORRUPTED); 5801c6cb7e77SEric Whitney } 5802c6cb7e77SEric Whitney 5803c6cb7e77SEric Whitney ext4_debug("Journal inode found at %p: %lld bytes\n", 5804c6cb7e77SEric Whitney journal_inode, journal_inode->i_size); 5805c6cb7e77SEric Whitney return journal_inode; 5806c6cb7e77SEric Whitney } 5807c6cb7e77SEric Whitney 580862913ae9STheodore Ts'o static int ext4_journal_bmap(journal_t *journal, sector_t *block) 580962913ae9STheodore Ts'o { 581062913ae9STheodore Ts'o struct ext4_map_blocks map; 581162913ae9STheodore Ts'o int ret; 581262913ae9STheodore Ts'o 581362913ae9STheodore Ts'o if (journal->j_inode == NULL) 581462913ae9STheodore Ts'o return 0; 581562913ae9STheodore Ts'o 581662913ae9STheodore Ts'o map.m_lblk = *block; 581762913ae9STheodore Ts'o map.m_len = 1; 581862913ae9STheodore Ts'o ret = ext4_map_blocks(NULL, journal->j_inode, &map, 0); 581962913ae9STheodore Ts'o if (ret <= 0) { 582062913ae9STheodore Ts'o ext4_msg(journal->j_inode->i_sb, KERN_CRIT, 582162913ae9STheodore Ts'o "journal bmap failed: block %llu ret %d\n", 582262913ae9STheodore Ts'o *block, ret); 582362913ae9STheodore Ts'o jbd2_journal_abort(journal, ret ? ret : -EIO); 582462913ae9STheodore Ts'o return ret; 582562913ae9STheodore Ts'o } 582662913ae9STheodore Ts'o *block = map.m_pblk; 582762913ae9STheodore Ts'o return 0; 582862913ae9STheodore Ts'o } 582962913ae9STheodore Ts'o 5830ee5c8071SZhang Yi static journal_t *ext4_open_inode_journal(struct super_block *sb, 5831c6cb7e77SEric Whitney unsigned int journal_inum) 5832c6cb7e77SEric Whitney { 5833c6cb7e77SEric Whitney struct inode *journal_inode; 5834c6cb7e77SEric Whitney journal_t *journal; 5835c6cb7e77SEric Whitney 5836c6cb7e77SEric Whitney journal_inode = ext4_get_journal_inode(sb, journal_inum); 5837ee5c8071SZhang Yi if (IS_ERR(journal_inode)) 5838ee5c8071SZhang Yi return ERR_CAST(journal_inode); 5839ac27a0ecSDave Kleikamp 5840dab291afSMingming Cao journal = jbd2_journal_init_inode(journal_inode); 58418e6cf5fbSZhang Yi if (IS_ERR(journal)) { 5842b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "Could not load journal inode"); 5843ac27a0ecSDave Kleikamp iput(journal_inode); 5844ee5c8071SZhang Yi return ERR_CAST(journal); 5845ac27a0ecSDave Kleikamp } 5846ac27a0ecSDave Kleikamp journal->j_private = sb; 584762913ae9STheodore Ts'o journal->j_bmap = ext4_journal_bmap; 5848617ba13bSMingming Cao ext4_init_journal_params(sb, journal); 5849ac27a0ecSDave Kleikamp return journal; 5850ac27a0ecSDave Kleikamp } 5851ac27a0ecSDave Kleikamp 5852bc74e6a3SZhang Yi static struct block_device *ext4_get_journal_blkdev(struct super_block *sb, 5853bc74e6a3SZhang Yi dev_t j_dev, ext4_fsblk_t *j_start, 5854bc74e6a3SZhang Yi ext4_fsblk_t *j_len) 5855ac27a0ecSDave Kleikamp { 5856ac27a0ecSDave Kleikamp struct buffer_head *bh; 5857bc74e6a3SZhang Yi struct block_device *bdev; 5858ac27a0ecSDave Kleikamp int hblock, blocksize; 5859617ba13bSMingming Cao ext4_fsblk_t sb_block; 5860ac27a0ecSDave Kleikamp unsigned long offset; 5861617ba13bSMingming Cao struct ext4_super_block *es; 5862ee5c8071SZhang Yi int errno; 58630390131bSFrank Mayhar 58646f5fc7deSChristoph Hellwig /* see get_tree_bdev why this is needed and safe */ 58656f5fc7deSChristoph Hellwig up_write(&sb->s_umount); 5866bc74e6a3SZhang Yi bdev = blkdev_get_by_dev(j_dev, BLK_OPEN_READ | BLK_OPEN_WRITE, sb, 58673ef96fcfSLinus Torvalds &fs_holder_ops); 58686f5fc7deSChristoph Hellwig down_write(&sb->s_umount); 5869bc74e6a3SZhang Yi if (IS_ERR(bdev)) { 5870bc74e6a3SZhang Yi ext4_msg(sb, KERN_ERR, 5871bc74e6a3SZhang Yi "failed to open journal device unknown-block(%u,%u) %ld", 5872bc74e6a3SZhang Yi MAJOR(j_dev), MINOR(j_dev), PTR_ERR(bdev)); 5873ee5c8071SZhang Yi return ERR_CAST(bdev); 5874bc74e6a3SZhang Yi } 5875ac27a0ecSDave Kleikamp 5876ac27a0ecSDave Kleikamp blocksize = sb->s_blocksize; 5877e1defc4fSMartin K. Petersen hblock = bdev_logical_block_size(bdev); 5878ac27a0ecSDave Kleikamp if (blocksize < hblock) { 5879b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, 5880b31e1552SEric Sandeen "blocksize too small for journal device"); 5881ee5c8071SZhang Yi errno = -EINVAL; 5882ac27a0ecSDave Kleikamp goto out_bdev; 5883ac27a0ecSDave Kleikamp } 5884ac27a0ecSDave Kleikamp 5885617ba13bSMingming Cao sb_block = EXT4_MIN_BLOCK_SIZE / blocksize; 5886617ba13bSMingming Cao offset = EXT4_MIN_BLOCK_SIZE % blocksize; 5887ac27a0ecSDave Kleikamp set_blocksize(bdev, blocksize); 5888bc74e6a3SZhang Yi bh = __bread(bdev, sb_block, blocksize); 5889bc74e6a3SZhang Yi if (!bh) { 5890b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "couldn't read superblock of " 5891b31e1552SEric Sandeen "external journal"); 5892ee5c8071SZhang Yi errno = -EINVAL; 5893ac27a0ecSDave Kleikamp goto out_bdev; 5894ac27a0ecSDave Kleikamp } 5895ac27a0ecSDave Kleikamp 58962716b802STheodore Ts'o es = (struct ext4_super_block *) (bh->b_data + offset); 5897617ba13bSMingming Cao if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) || 5898ac27a0ecSDave Kleikamp !(le32_to_cpu(es->s_feature_incompat) & 5899617ba13bSMingming Cao EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) { 5900bc74e6a3SZhang Yi ext4_msg(sb, KERN_ERR, "external journal has bad superblock"); 5901ee5c8071SZhang Yi errno = -EFSCORRUPTED; 5902bc74e6a3SZhang Yi goto out_bh; 5903ac27a0ecSDave Kleikamp } 5904ac27a0ecSDave Kleikamp 5905df4763beSDarrick J. Wong if ((le32_to_cpu(es->s_feature_ro_compat) & 5906df4763beSDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && 5907df4763beSDarrick J. Wong es->s_checksum != ext4_superblock_csum(sb, es)) { 5908bc74e6a3SZhang Yi ext4_msg(sb, KERN_ERR, "external journal has corrupt superblock"); 5909ee5c8071SZhang Yi errno = -EFSCORRUPTED; 5910bc74e6a3SZhang Yi goto out_bh; 5911df4763beSDarrick J. Wong } 5912df4763beSDarrick J. Wong 5913617ba13bSMingming Cao if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) { 5914b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "journal UUID does not match"); 5915ee5c8071SZhang Yi errno = -EFSCORRUPTED; 5916bc74e6a3SZhang Yi goto out_bh; 5917ac27a0ecSDave Kleikamp } 5918ac27a0ecSDave Kleikamp 5919bc74e6a3SZhang Yi *j_start = sb_block + 1; 5920bc74e6a3SZhang Yi *j_len = ext4_blocks_count(es); 5921ac27a0ecSDave Kleikamp brelse(bh); 5922bc74e6a3SZhang Yi return bdev; 5923ac27a0ecSDave Kleikamp 5924bc74e6a3SZhang Yi out_bh: 5925bc74e6a3SZhang Yi brelse(bh); 5926bc74e6a3SZhang Yi out_bdev: 5927bc74e6a3SZhang Yi blkdev_put(bdev, sb); 5928ee5c8071SZhang Yi return ERR_PTR(errno); 5929ac27a0ecSDave Kleikamp } 5930ac27a0ecSDave Kleikamp 5931ee5c8071SZhang Yi static journal_t *ext4_open_dev_journal(struct super_block *sb, 5932bc74e6a3SZhang Yi dev_t j_dev) 5933bc74e6a3SZhang Yi { 5934bc74e6a3SZhang Yi journal_t *journal; 5935bc74e6a3SZhang Yi ext4_fsblk_t j_start; 5936bc74e6a3SZhang Yi ext4_fsblk_t j_len; 5937bc74e6a3SZhang Yi struct block_device *journal_bdev; 5938ee5c8071SZhang Yi int errno = 0; 5939ac27a0ecSDave Kleikamp 5940bc74e6a3SZhang Yi journal_bdev = ext4_get_journal_blkdev(sb, j_dev, &j_start, &j_len); 5941ee5c8071SZhang Yi if (IS_ERR(journal_bdev)) 5942ee5c8071SZhang Yi return ERR_CAST(journal_bdev); 5943bc74e6a3SZhang Yi 5944bc74e6a3SZhang Yi journal = jbd2_journal_init_dev(journal_bdev, sb->s_bdev, j_start, 5945bc74e6a3SZhang Yi j_len, sb->s_blocksize); 59468e6cf5fbSZhang Yi if (IS_ERR(journal)) { 5947b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "failed to create device journal"); 5948ee5c8071SZhang Yi errno = PTR_ERR(journal); 5949ac27a0ecSDave Kleikamp goto out_bdev; 5950ac27a0ecSDave Kleikamp } 5951ac27a0ecSDave Kleikamp if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) { 5952b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "External journal has more than one " 5953b31e1552SEric Sandeen "user (unsupported) - %d", 5954ac27a0ecSDave Kleikamp be32_to_cpu(journal->j_superblock->s_nr_users)); 5955ee5c8071SZhang Yi errno = -EINVAL; 5956ac27a0ecSDave Kleikamp goto out_journal; 5957ac27a0ecSDave Kleikamp } 5958bc74e6a3SZhang Yi journal->j_private = sb; 5959bc74e6a3SZhang Yi EXT4_SB(sb)->s_journal_bdev = journal_bdev; 5960617ba13bSMingming Cao ext4_init_journal_params(sb, journal); 5961ac27a0ecSDave Kleikamp return journal; 59620b8e58a1SAndreas Dilger 5963ac27a0ecSDave Kleikamp out_journal: 5964dab291afSMingming Cao jbd2_journal_destroy(journal); 5965ac27a0ecSDave Kleikamp out_bdev: 5966bc74e6a3SZhang Yi blkdev_put(journal_bdev, sb); 5967ee5c8071SZhang Yi return ERR_PTR(errno); 5968ac27a0ecSDave Kleikamp } 5969ac27a0ecSDave Kleikamp 5970617ba13bSMingming Cao static int ext4_load_journal(struct super_block *sb, 5971617ba13bSMingming Cao struct ext4_super_block *es, 5972ac27a0ecSDave Kleikamp unsigned long journal_devnum) 5973ac27a0ecSDave Kleikamp { 5974ac27a0ecSDave Kleikamp journal_t *journal; 5975ac27a0ecSDave Kleikamp unsigned int journal_inum = le32_to_cpu(es->s_journal_inum); 5976ac27a0ecSDave Kleikamp dev_t journal_dev; 5977ac27a0ecSDave Kleikamp int err = 0; 5978ac27a0ecSDave Kleikamp int really_read_only; 5979273108faSLukas Czerner int journal_dev_ro; 5980ac27a0ecSDave Kleikamp 598111215630SJan Kara if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) 598211215630SJan Kara return -EFSCORRUPTED; 59830390131bSFrank Mayhar 5984ac27a0ecSDave Kleikamp if (journal_devnum && 5985ac27a0ecSDave Kleikamp journal_devnum != le32_to_cpu(es->s_journal_dev)) { 5986b31e1552SEric Sandeen ext4_msg(sb, KERN_INFO, "external journal device major/minor " 5987b31e1552SEric Sandeen "numbers have changed"); 5988ac27a0ecSDave Kleikamp journal_dev = new_decode_dev(journal_devnum); 5989ac27a0ecSDave Kleikamp } else 5990ac27a0ecSDave Kleikamp journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); 5991ac27a0ecSDave Kleikamp 5992273108faSLukas Czerner if (journal_inum && journal_dev) { 5993273108faSLukas Czerner ext4_msg(sb, KERN_ERR, 5994273108faSLukas Czerner "filesystem has both journal inode and journal device!"); 5995273108faSLukas Czerner return -EINVAL; 5996273108faSLukas Czerner } 5997273108faSLukas Czerner 5998273108faSLukas Czerner if (journal_inum) { 5999ee5c8071SZhang Yi journal = ext4_open_inode_journal(sb, journal_inum); 6000ee5c8071SZhang Yi if (IS_ERR(journal)) 6001ee5c8071SZhang Yi return PTR_ERR(journal); 6002273108faSLukas Czerner } else { 6003ee5c8071SZhang Yi journal = ext4_open_dev_journal(sb, journal_dev); 6004ee5c8071SZhang Yi if (IS_ERR(journal)) 6005ee5c8071SZhang Yi return PTR_ERR(journal); 6006273108faSLukas Czerner } 6007273108faSLukas Czerner 6008273108faSLukas Czerner journal_dev_ro = bdev_read_only(journal->j_dev); 6009273108faSLukas Czerner really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro; 6010273108faSLukas Czerner 6011273108faSLukas Czerner if (journal_dev_ro && !sb_rdonly(sb)) { 6012273108faSLukas Czerner ext4_msg(sb, KERN_ERR, 6013273108faSLukas Czerner "journal device read-only, try mounting with '-o ro'"); 6014273108faSLukas Czerner err = -EROFS; 6015273108faSLukas Czerner goto err_out; 6016273108faSLukas Czerner } 6017ac27a0ecSDave Kleikamp 6018ac27a0ecSDave Kleikamp /* 6019ac27a0ecSDave Kleikamp * Are we loading a blank journal or performing recovery after a 6020ac27a0ecSDave Kleikamp * crash? For recovery, we need to check in advance whether we 6021ac27a0ecSDave Kleikamp * can get read-write access to the device. 6022ac27a0ecSDave Kleikamp */ 6023e2b911c5SDarrick J. Wong if (ext4_has_feature_journal_needs_recovery(sb)) { 6024bc98a42cSDavid Howells if (sb_rdonly(sb)) { 6025b31e1552SEric Sandeen ext4_msg(sb, KERN_INFO, "INFO: recovery " 6026b31e1552SEric Sandeen "required on readonly filesystem"); 6027ac27a0ecSDave Kleikamp if (really_read_only) { 6028b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "write access " 6029d98bf8cdSSimon Ruderich "unavailable, cannot proceed " 6030d98bf8cdSSimon Ruderich "(try mounting with noload)"); 6031273108faSLukas Czerner err = -EROFS; 6032273108faSLukas Czerner goto err_out; 6033ac27a0ecSDave Kleikamp } 6034b31e1552SEric Sandeen ext4_msg(sb, KERN_INFO, "write access will " 6035b31e1552SEric Sandeen "be enabled during recovery"); 6036ac27a0ecSDave Kleikamp } 6037ac27a0ecSDave Kleikamp } 6038ac27a0ecSDave Kleikamp 603990576c0bSTheodore Ts'o if (!(journal->j_flags & JBD2_BARRIER)) 6040b31e1552SEric Sandeen ext4_msg(sb, KERN_INFO, "barriers disabled"); 60414776004fSTheodore Ts'o 6042e2b911c5SDarrick J. Wong if (!ext4_has_feature_journal_needs_recovery(sb)) 6043dab291afSMingming Cao err = jbd2_journal_wipe(journal, !really_read_only); 60441c13d5c0STheodore Ts'o if (!err) { 60451c13d5c0STheodore Ts'o char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL); 60462ef6c32aSTheodore Ts'o __le16 orig_state; 60472ef6c32aSTheodore Ts'o bool changed = false; 6048eee00237SYe Bin 60491c13d5c0STheodore Ts'o if (save) 60501c13d5c0STheodore Ts'o memcpy(save, ((char *) es) + 60511c13d5c0STheodore Ts'o EXT4_S_ERR_START, EXT4_S_ERR_LEN); 6052dab291afSMingming Cao err = jbd2_journal_load(journal); 60532ef6c32aSTheodore Ts'o if (save && memcmp(((char *) es) + EXT4_S_ERR_START, 60542ef6c32aSTheodore Ts'o save, EXT4_S_ERR_LEN)) { 60551c13d5c0STheodore Ts'o memcpy(((char *) es) + EXT4_S_ERR_START, 60561c13d5c0STheodore Ts'o save, EXT4_S_ERR_LEN); 60572ef6c32aSTheodore Ts'o changed = true; 60582ef6c32aSTheodore Ts'o } 60591c13d5c0STheodore Ts'o kfree(save); 60602ef6c32aSTheodore Ts'o orig_state = es->s_state; 6061eee00237SYe Bin es->s_state |= cpu_to_le16(EXT4_SB(sb)->s_mount_state & 6062eee00237SYe Bin EXT4_ERROR_FS); 60632ef6c32aSTheodore Ts'o if (orig_state != es->s_state) 60642ef6c32aSTheodore Ts'o changed = true; 6065eee00237SYe Bin /* Write out restored error information to the superblock */ 60662ef6c32aSTheodore Ts'o if (changed && !really_read_only) { 6067eee00237SYe Bin int err2; 6068eee00237SYe Bin err2 = ext4_commit_super(sb); 6069eee00237SYe Bin err = err ? : err2; 6070eee00237SYe Bin } 60711c13d5c0STheodore Ts'o } 6072ac27a0ecSDave Kleikamp 6073ac27a0ecSDave Kleikamp if (err) { 6074b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "error loading journal"); 6075273108faSLukas Czerner goto err_out; 6076ac27a0ecSDave Kleikamp } 6077ac27a0ecSDave Kleikamp 6078617ba13bSMingming Cao EXT4_SB(sb)->s_journal = journal; 607911215630SJan Kara err = ext4_clear_journal_err(sb, es); 608011215630SJan Kara if (err) { 608111215630SJan Kara EXT4_SB(sb)->s_journal = NULL; 608211215630SJan Kara jbd2_journal_destroy(journal); 608311215630SJan Kara return err; 608411215630SJan Kara } 6085ac27a0ecSDave Kleikamp 6086c41303ceSMaciej Żenczykowski if (!really_read_only && journal_devnum && 6087ac27a0ecSDave Kleikamp journal_devnum != le32_to_cpu(es->s_journal_dev)) { 6088ac27a0ecSDave Kleikamp es->s_journal_dev = cpu_to_le32(journal_devnum); 60893039d8b8SBaokun Li ext4_commit_super(sb); 60903039d8b8SBaokun Li } 60913039d8b8SBaokun Li if (!really_read_only && journal_inum && 60923039d8b8SBaokun Li journal_inum != le32_to_cpu(es->s_journal_inum)) { 60933039d8b8SBaokun Li es->s_journal_inum = cpu_to_le32(journal_inum); 60944392fbc4SJan Kara ext4_commit_super(sb); 6095ac27a0ecSDave Kleikamp } 6096ac27a0ecSDave Kleikamp 6097ac27a0ecSDave Kleikamp return 0; 6098273108faSLukas Czerner 6099273108faSLukas Czerner err_out: 6100273108faSLukas Czerner jbd2_journal_destroy(journal); 6101273108faSLukas Czerner return err; 6102ac27a0ecSDave Kleikamp } 6103ac27a0ecSDave Kleikamp 61042d01ddc8SJan Kara /* Copy state of EXT4_SB(sb) into buffer for on-disk superblock */ 61052d01ddc8SJan Kara static void ext4_update_super(struct super_block *sb) 6106ac27a0ecSDave Kleikamp { 6107c92dc856SJan Kara struct ext4_sb_info *sbi = EXT4_SB(sb); 6108e92ad03fSJan Kara struct ext4_super_block *es = sbi->s_es; 6109e92ad03fSJan Kara struct buffer_head *sbh = sbi->s_sbh; 6110ac27a0ecSDave Kleikamp 611105c2c00fSJan Kara lock_buffer(sbh); 6112a17712c8SJon Derrick /* 611371290b36STheodore Ts'o * If the file system is mounted read-only, don't update the 611471290b36STheodore Ts'o * superblock write time. This avoids updating the superblock 611571290b36STheodore Ts'o * write time when we are mounting the root file system 611671290b36STheodore Ts'o * read/only but we need to replay the journal; at that point, 611771290b36STheodore Ts'o * for people who are east of GMT and who make their clock 611871290b36STheodore Ts'o * tick in localtime for Windows bug-for-bug compatibility, 611971290b36STheodore Ts'o * the clock is set in the future, and this will cause e2fsck 612071290b36STheodore Ts'o * to complain and force a full file system check. 612171290b36STheodore Ts'o */ 6122d5d020b3SJan Kara if (!sb_rdonly(sb)) 61236a0678a7SArnd Bergmann ext4_update_tstamp(es, s_wtime); 6124afc32f7eSTheodore Ts'o es->s_kbytes_written = 6125e92ad03fSJan Kara cpu_to_le64(sbi->s_kbytes_written + 61268446fe92SChristoph Hellwig ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) - 6127e92ad03fSJan Kara sbi->s_sectors_written_start) >> 1)); 6128e92ad03fSJan Kara if (percpu_counter_initialized(&sbi->s_freeclusters_counter)) 612957042651STheodore Ts'o ext4_free_blocks_count_set(es, 6130e92ad03fSJan Kara EXT4_C2B(sbi, percpu_counter_sum_positive( 6131e92ad03fSJan Kara &sbi->s_freeclusters_counter))); 6132e92ad03fSJan Kara if (percpu_counter_initialized(&sbi->s_freeinodes_counter)) 61337f93cff9STheodore Ts'o es->s_free_inodes_count = 61347f93cff9STheodore Ts'o cpu_to_le32(percpu_counter_sum_positive( 6135e92ad03fSJan Kara &sbi->s_freeinodes_counter)); 6136c92dc856SJan Kara /* Copy error information to the on-disk superblock */ 6137c92dc856SJan Kara spin_lock(&sbi->s_error_lock); 6138c92dc856SJan Kara if (sbi->s_add_error_count > 0) { 6139c92dc856SJan Kara es->s_state |= cpu_to_le16(EXT4_ERROR_FS); 6140c92dc856SJan Kara if (!es->s_first_error_time && !es->s_first_error_time_hi) { 6141c92dc856SJan Kara __ext4_update_tstamp(&es->s_first_error_time, 6142c92dc856SJan Kara &es->s_first_error_time_hi, 6143c92dc856SJan Kara sbi->s_first_error_time); 6144c92dc856SJan Kara strncpy(es->s_first_error_func, sbi->s_first_error_func, 6145c92dc856SJan Kara sizeof(es->s_first_error_func)); 6146c92dc856SJan Kara es->s_first_error_line = 6147c92dc856SJan Kara cpu_to_le32(sbi->s_first_error_line); 6148c92dc856SJan Kara es->s_first_error_ino = 6149c92dc856SJan Kara cpu_to_le32(sbi->s_first_error_ino); 6150c92dc856SJan Kara es->s_first_error_block = 6151c92dc856SJan Kara cpu_to_le64(sbi->s_first_error_block); 6152c92dc856SJan Kara es->s_first_error_errcode = 6153c92dc856SJan Kara ext4_errno_to_code(sbi->s_first_error_code); 6154c92dc856SJan Kara } 6155c92dc856SJan Kara __ext4_update_tstamp(&es->s_last_error_time, 6156c92dc856SJan Kara &es->s_last_error_time_hi, 6157c92dc856SJan Kara sbi->s_last_error_time); 6158c92dc856SJan Kara strncpy(es->s_last_error_func, sbi->s_last_error_func, 6159c92dc856SJan Kara sizeof(es->s_last_error_func)); 6160c92dc856SJan Kara es->s_last_error_line = cpu_to_le32(sbi->s_last_error_line); 6161c92dc856SJan Kara es->s_last_error_ino = cpu_to_le32(sbi->s_last_error_ino); 6162c92dc856SJan Kara es->s_last_error_block = cpu_to_le64(sbi->s_last_error_block); 6163c92dc856SJan Kara es->s_last_error_errcode = 6164c92dc856SJan Kara ext4_errno_to_code(sbi->s_last_error_code); 6165c92dc856SJan Kara /* 6166c92dc856SJan Kara * Start the daily error reporting function if it hasn't been 6167c92dc856SJan Kara * started already 6168c92dc856SJan Kara */ 6169c92dc856SJan Kara if (!es->s_error_count) 6170c92dc856SJan Kara mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); 6171c92dc856SJan Kara le32_add_cpu(&es->s_error_count, sbi->s_add_error_count); 6172c92dc856SJan Kara sbi->s_add_error_count = 0; 6173c92dc856SJan Kara } 6174c92dc856SJan Kara spin_unlock(&sbi->s_error_lock); 6175c92dc856SJan Kara 617606db49e6STheodore Ts'o ext4_superblock_csum_set(sb); 61772d01ddc8SJan Kara unlock_buffer(sbh); 61782d01ddc8SJan Kara } 61792d01ddc8SJan Kara 61802d01ddc8SJan Kara static int ext4_commit_super(struct super_block *sb) 61812d01ddc8SJan Kara { 61822d01ddc8SJan Kara struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; 61832d01ddc8SJan Kara 6184f88f1466SFengnan Chang if (!sbh) 6185f88f1466SFengnan Chang return -EINVAL; 6186f88f1466SFengnan Chang if (block_device_ejected(sb)) 6187f88f1466SFengnan Chang return -ENODEV; 61882d01ddc8SJan Kara 61892d01ddc8SJan Kara ext4_update_super(sb); 61902d01ddc8SJan Kara 619115baa7dcSZhang Yi lock_buffer(sbh); 619215baa7dcSZhang Yi /* Buffer got discarded which means block device got invalidated */ 619315baa7dcSZhang Yi if (!buffer_mapped(sbh)) { 619415baa7dcSZhang Yi unlock_buffer(sbh); 619515baa7dcSZhang Yi return -EIO; 619615baa7dcSZhang Yi } 619715baa7dcSZhang Yi 6198e8680786STheodore Ts'o if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) { 61994743f839SPranay Kr. Srivastava /* 62004743f839SPranay Kr. Srivastava * Oh, dear. A previous attempt to write the 62014743f839SPranay Kr. Srivastava * superblock failed. This could happen because the 62024743f839SPranay Kr. Srivastava * USB device was yanked out. Or it could happen to 62034743f839SPranay Kr. Srivastava * be a transient write error and maybe the block will 62044743f839SPranay Kr. Srivastava * be remapped. Nothing we can do but to retry the 62054743f839SPranay Kr. Srivastava * write and hope for the best. 62064743f839SPranay Kr. Srivastava */ 62074743f839SPranay Kr. Srivastava ext4_msg(sb, KERN_ERR, "previous I/O error to " 62084743f839SPranay Kr. Srivastava "superblock detected"); 62094743f839SPranay Kr. Srivastava clear_buffer_write_io_error(sbh); 62104743f839SPranay Kr. Srivastava set_buffer_uptodate(sbh); 62114743f839SPranay Kr. Srivastava } 621215baa7dcSZhang Yi get_bh(sbh); 621315baa7dcSZhang Yi /* Clear potential dirty bit if it was journalled update */ 621415baa7dcSZhang Yi clear_buffer_dirty(sbh); 621515baa7dcSZhang Yi sbh->b_end_io = end_buffer_write_sync; 62161420c4a5SBart Van Assche submit_bh(REQ_OP_WRITE | REQ_SYNC | 62171420c4a5SBart Van Assche (test_opt(sb, BARRIER) ? REQ_FUA : 0), sbh); 621815baa7dcSZhang Yi wait_on_buffer(sbh); 6219c89128a0SJaegeuk Kim if (buffer_write_io_error(sbh)) { 6220b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "I/O error while writing " 6221b31e1552SEric Sandeen "superblock"); 6222914258bfSTheodore Ts'o clear_buffer_write_io_error(sbh); 6223914258bfSTheodore Ts'o set_buffer_uptodate(sbh); 622415baa7dcSZhang Yi return -EIO; 6225914258bfSTheodore Ts'o } 622615baa7dcSZhang Yi return 0; 6227ac27a0ecSDave Kleikamp } 6228ac27a0ecSDave Kleikamp 6229ac27a0ecSDave Kleikamp /* 6230ac27a0ecSDave Kleikamp * Have we just finished recovery? If so, and if we are mounting (or 6231ac27a0ecSDave Kleikamp * remounting) the filesystem readonly, then we will end up with a 6232ac27a0ecSDave Kleikamp * consistent fs on disk. Record that fact. 6233ac27a0ecSDave Kleikamp */ 623411215630SJan Kara static int ext4_mark_recovery_complete(struct super_block *sb, 6235617ba13bSMingming Cao struct ext4_super_block *es) 6236ac27a0ecSDave Kleikamp { 623711215630SJan Kara int err; 6238617ba13bSMingming Cao journal_t *journal = EXT4_SB(sb)->s_journal; 6239ac27a0ecSDave Kleikamp 6240e2b911c5SDarrick J. Wong if (!ext4_has_feature_journal(sb)) { 624111215630SJan Kara if (journal != NULL) { 624211215630SJan Kara ext4_error(sb, "Journal got removed while the fs was " 624311215630SJan Kara "mounted!"); 624411215630SJan Kara return -EFSCORRUPTED; 624511215630SJan Kara } 624611215630SJan Kara return 0; 62470390131bSFrank Mayhar } 6248dab291afSMingming Cao jbd2_journal_lock_updates(journal); 624901d5d965SLeah Rumancik err = jbd2_journal_flush(journal, 0); 625011215630SJan Kara if (err < 0) 62517ffe1ea8SHidehiro Kawai goto out; 62527ffe1ea8SHidehiro Kawai 625302f310fcSJan Kara if (sb_rdonly(sb) && (ext4_has_feature_journal_needs_recovery(sb) || 625402f310fcSJan Kara ext4_has_feature_orphan_present(sb))) { 625502f310fcSJan Kara if (!ext4_orphan_file_empty(sb)) { 625602f310fcSJan Kara ext4_error(sb, "Orphan file not empty on read-only fs."); 625702f310fcSJan Kara err = -EFSCORRUPTED; 625802f310fcSJan Kara goto out; 625902f310fcSJan Kara } 6260e2b911c5SDarrick J. Wong ext4_clear_feature_journal_needs_recovery(sb); 626102f310fcSJan Kara ext4_clear_feature_orphan_present(sb); 62624392fbc4SJan Kara ext4_commit_super(sb); 6263ac27a0ecSDave Kleikamp } 62647ffe1ea8SHidehiro Kawai out: 6265dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 626611215630SJan Kara return err; 6267ac27a0ecSDave Kleikamp } 6268ac27a0ecSDave Kleikamp 6269ac27a0ecSDave Kleikamp /* 6270ac27a0ecSDave Kleikamp * If we are mounting (or read-write remounting) a filesystem whose journal 6271ac27a0ecSDave Kleikamp * has recorded an error from a previous lifetime, move that error to the 6272ac27a0ecSDave Kleikamp * main filesystem now. 6273ac27a0ecSDave Kleikamp */ 627411215630SJan Kara static int ext4_clear_journal_err(struct super_block *sb, 6275617ba13bSMingming Cao struct ext4_super_block *es) 6276ac27a0ecSDave Kleikamp { 6277ac27a0ecSDave Kleikamp journal_t *journal; 6278ac27a0ecSDave Kleikamp int j_errno; 6279ac27a0ecSDave Kleikamp const char *errstr; 6280ac27a0ecSDave Kleikamp 628111215630SJan Kara if (!ext4_has_feature_journal(sb)) { 628211215630SJan Kara ext4_error(sb, "Journal got removed while the fs was mounted!"); 628311215630SJan Kara return -EFSCORRUPTED; 628411215630SJan Kara } 62850390131bSFrank Mayhar 6286617ba13bSMingming Cao journal = EXT4_SB(sb)->s_journal; 6287ac27a0ecSDave Kleikamp 6288ac27a0ecSDave Kleikamp /* 6289ac27a0ecSDave Kleikamp * Now check for any error status which may have been recorded in the 6290617ba13bSMingming Cao * journal by a prior ext4_error() or ext4_abort() 6291ac27a0ecSDave Kleikamp */ 6292ac27a0ecSDave Kleikamp 6293dab291afSMingming Cao j_errno = jbd2_journal_errno(journal); 6294ac27a0ecSDave Kleikamp if (j_errno) { 6295ac27a0ecSDave Kleikamp char nbuf[16]; 6296ac27a0ecSDave Kleikamp 6297617ba13bSMingming Cao errstr = ext4_decode_error(sb, j_errno, nbuf); 629812062dddSEric Sandeen ext4_warning(sb, "Filesystem error recorded " 6299ac27a0ecSDave Kleikamp "from previous mount: %s", errstr); 6300ac27a0ecSDave Kleikamp 6301617ba13bSMingming Cao EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; 6302617ba13bSMingming Cao es->s_state |= cpu_to_le16(EXT4_ERROR_FS); 6303f57886caSYe Bin j_errno = ext4_commit_super(sb); 6304f57886caSYe Bin if (j_errno) 6305f57886caSYe Bin return j_errno; 6306f57886caSYe Bin ext4_warning(sb, "Marked fs in need of filesystem check."); 6307ac27a0ecSDave Kleikamp 6308dab291afSMingming Cao jbd2_journal_clear_err(journal); 6309d796c52eSTheodore Ts'o jbd2_journal_update_sb_errno(journal); 6310ac27a0ecSDave Kleikamp } 631111215630SJan Kara return 0; 6312ac27a0ecSDave Kleikamp } 6313ac27a0ecSDave Kleikamp 6314ac27a0ecSDave Kleikamp /* 6315ac27a0ecSDave Kleikamp * Force the running and committing transactions to commit, 6316ac27a0ecSDave Kleikamp * and wait on the commit. 6317ac27a0ecSDave Kleikamp */ 6318617ba13bSMingming Cao int ext4_force_commit(struct super_block *sb) 6319ac27a0ecSDave Kleikamp { 6320889860e4SJan Kara return ext4_journal_force_commit(EXT4_SB(sb)->s_journal); 6321ac27a0ecSDave Kleikamp } 6322ac27a0ecSDave Kleikamp 6323617ba13bSMingming Cao static int ext4_sync_fs(struct super_block *sb, int wait) 6324ac27a0ecSDave Kleikamp { 632514ce0cb4STheodore Ts'o int ret = 0; 63269eddacf9SJan Kara tid_t target; 632706a407f1SDmitry Monakhov bool needs_barrier = false; 63288d5d02e6SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 6329ac27a0ecSDave Kleikamp 6330eb8ab444SJan Kara if (unlikely(ext4_forced_shutdown(sb))) 63310db1ff22STheodore Ts'o return 0; 63320db1ff22STheodore Ts'o 63339bffad1eSTheodore Ts'o trace_ext4_sync_fs(sb, wait); 63342e8fa54eSJan Kara flush_workqueue(sbi->rsv_conversion_wq); 6335a1177825SJan Kara /* 6336a1177825SJan Kara * Writeback quota in non-journalled quota case - journalled quota has 6337a1177825SJan Kara * no dirty dquots 6338a1177825SJan Kara */ 6339a1177825SJan Kara dquot_writeback_dquots(sb, -1); 634006a407f1SDmitry Monakhov /* 634106a407f1SDmitry Monakhov * Data writeback is possible w/o journal transaction, so barrier must 634206a407f1SDmitry Monakhov * being sent at the end of the function. But we can skip it if 634306a407f1SDmitry Monakhov * transaction_commit will do it for us. 634406a407f1SDmitry Monakhov */ 6345bda32530STheodore Ts'o if (sbi->s_journal) { 634606a407f1SDmitry Monakhov target = jbd2_get_latest_transaction(sbi->s_journal); 634706a407f1SDmitry Monakhov if (wait && sbi->s_journal->j_flags & JBD2_BARRIER && 634806a407f1SDmitry Monakhov !jbd2_trans_will_send_data_barrier(sbi->s_journal, target)) 634906a407f1SDmitry Monakhov needs_barrier = true; 635006a407f1SDmitry Monakhov 63518d5d02e6SMingming Cao if (jbd2_journal_start_commit(sbi->s_journal, &target)) { 6352ac27a0ecSDave Kleikamp if (wait) 6353bda32530STheodore Ts'o ret = jbd2_log_wait_commit(sbi->s_journal, 6354bda32530STheodore Ts'o target); 63550390131bSFrank Mayhar } 6356bda32530STheodore Ts'o } else if (wait && test_opt(sb, BARRIER)) 6357bda32530STheodore Ts'o needs_barrier = true; 635806a407f1SDmitry Monakhov if (needs_barrier) { 635906a407f1SDmitry Monakhov int err; 6360c6bf3f0eSChristoph Hellwig err = blkdev_issue_flush(sb->s_bdev); 636106a407f1SDmitry Monakhov if (!ret) 636206a407f1SDmitry Monakhov ret = err; 636306a407f1SDmitry Monakhov } 636406a407f1SDmitry Monakhov 636506a407f1SDmitry Monakhov return ret; 636606a407f1SDmitry Monakhov } 636706a407f1SDmitry Monakhov 6368ac27a0ecSDave Kleikamp /* 6369ac27a0ecSDave Kleikamp * LVM calls this function before a (read-only) snapshot is created. This 6370ac27a0ecSDave Kleikamp * gives us a chance to flush the journal completely and mark the fs clean. 6371be4f27d3SYongqiang Yang * 6372be4f27d3SYongqiang Yang * Note that only this function cannot bring a filesystem to be in a clean 63738e8ad8a5SJan Kara * state independently. It relies on upper layer to stop all data & metadata 63748e8ad8a5SJan Kara * modifications. 6375ac27a0ecSDave Kleikamp */ 6376c4be0c1dSTakashi Sato static int ext4_freeze(struct super_block *sb) 6377ac27a0ecSDave Kleikamp { 6378c4be0c1dSTakashi Sato int error = 0; 637998175720SJan Kara journal_t *journal = EXT4_SB(sb)->s_journal; 6380ac27a0ecSDave Kleikamp 6381bb044576STheodore Ts'o if (journal) { 6382ac27a0ecSDave Kleikamp /* Now we set up the journal barrier. */ 6383dab291afSMingming Cao jbd2_journal_lock_updates(journal); 63847ffe1ea8SHidehiro Kawai 63857ffe1ea8SHidehiro Kawai /* 6386bb044576STheodore Ts'o * Don't clear the needs_recovery flag if we failed to 6387bb044576STheodore Ts'o * flush the journal. 63887ffe1ea8SHidehiro Kawai */ 638901d5d965SLeah Rumancik error = jbd2_journal_flush(journal, 0); 63906b0310fbSEric Sandeen if (error < 0) 63916b0310fbSEric Sandeen goto out; 6392ac27a0ecSDave Kleikamp 6393ac27a0ecSDave Kleikamp /* Journal blocked and flushed, clear needs_recovery flag. */ 6394e2b911c5SDarrick J. Wong ext4_clear_feature_journal_needs_recovery(sb); 639502f310fcSJan Kara if (ext4_orphan_file_empty(sb)) 639602f310fcSJan Kara ext4_clear_feature_orphan_present(sb); 6397c642dc9eSEric Sandeen } 6398c642dc9eSEric Sandeen 63994392fbc4SJan Kara error = ext4_commit_super(sb); 64006b0310fbSEric Sandeen out: 6401bb044576STheodore Ts'o if (journal) 64028e8ad8a5SJan Kara /* we rely on upper layer to stop further updates */ 6403bb044576STheodore Ts'o jbd2_journal_unlock_updates(journal); 64046b0310fbSEric Sandeen return error; 6405ac27a0ecSDave Kleikamp } 6406ac27a0ecSDave Kleikamp 6407ac27a0ecSDave Kleikamp /* 6408ac27a0ecSDave Kleikamp * Called by LVM after the snapshot is done. We need to reset the RECOVER 6409ac27a0ecSDave Kleikamp * flag here, even though the filesystem is not technically dirty yet. 6410ac27a0ecSDave Kleikamp */ 6411c4be0c1dSTakashi Sato static int ext4_unfreeze(struct super_block *sb) 6412ac27a0ecSDave Kleikamp { 6413eb8ab444SJan Kara if (ext4_forced_shutdown(sb)) 64149ca92389STheodore Ts'o return 0; 64159ca92389STheodore Ts'o 6416c642dc9eSEric Sandeen if (EXT4_SB(sb)->s_journal) { 64179ca92389STheodore Ts'o /* Reset the needs_recovery flag before the fs is unlocked. */ 6418e2b911c5SDarrick J. Wong ext4_set_feature_journal_needs_recovery(sb); 641902f310fcSJan Kara if (ext4_has_feature_orphan_file(sb)) 642002f310fcSJan Kara ext4_set_feature_orphan_present(sb); 6421c642dc9eSEric Sandeen } 6422c642dc9eSEric Sandeen 64234392fbc4SJan Kara ext4_commit_super(sb); 6424c4be0c1dSTakashi Sato return 0; 6425ac27a0ecSDave Kleikamp } 6426ac27a0ecSDave Kleikamp 6427673c6100STheodore Ts'o /* 6428673c6100STheodore Ts'o * Structure to save mount options for ext4_remount's benefit 6429673c6100STheodore Ts'o */ 6430673c6100STheodore Ts'o struct ext4_mount_options { 6431673c6100STheodore Ts'o unsigned long s_mount_opt; 6432a2595b8aSTheodore Ts'o unsigned long s_mount_opt2; 643308cefc7aSEric W. Biederman kuid_t s_resuid; 643408cefc7aSEric W. Biederman kgid_t s_resgid; 6435673c6100STheodore Ts'o unsigned long s_commit_interval; 6436673c6100STheodore Ts'o u32 s_min_batch_time, s_max_batch_time; 6437673c6100STheodore Ts'o #ifdef CONFIG_QUOTA 6438673c6100STheodore Ts'o int s_jquota_fmt; 6439a2d4a646SJan Kara char *s_qf_names[EXT4_MAXQUOTAS]; 6440673c6100STheodore Ts'o #endif 6441673c6100STheodore Ts'o }; 6442673c6100STheodore Ts'o 6443960e0ab6SLukas Czerner static int __ext4_remount(struct fs_context *fc, struct super_block *sb) 6444ac27a0ecSDave Kleikamp { 64457edfd85bSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 6446617ba13bSMingming Cao struct ext4_super_block *es; 6447617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 6448960e0ab6SLukas Czerner unsigned long old_sb_flags; 6449617ba13bSMingming Cao struct ext4_mount_options old_opts; 64508a266467STheodore Ts'o ext4_group_t g; 6451c5e06d10SJohann Lombardi int err = 0; 6452ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 64533bbef91bSAustin Kim int enable_quota = 0; 645403dafb5fSChen Gang int i, j; 645533458eabSTheodore Ts'o char *to_free[EXT4_MAXQUOTAS]; 6456ac27a0ecSDave Kleikamp #endif 6457b237e304SHarshad Shirwadkar 645821ac738eSChengguang Xu 6459ac27a0ecSDave Kleikamp /* Store the original options */ 6460ac27a0ecSDave Kleikamp old_sb_flags = sb->s_flags; 6461ac27a0ecSDave Kleikamp old_opts.s_mount_opt = sbi->s_mount_opt; 6462a2595b8aSTheodore Ts'o old_opts.s_mount_opt2 = sbi->s_mount_opt2; 6463ac27a0ecSDave Kleikamp old_opts.s_resuid = sbi->s_resuid; 6464ac27a0ecSDave Kleikamp old_opts.s_resgid = sbi->s_resgid; 6465ac27a0ecSDave Kleikamp old_opts.s_commit_interval = sbi->s_commit_interval; 646630773840STheodore Ts'o old_opts.s_min_batch_time = sbi->s_min_batch_time; 646730773840STheodore Ts'o old_opts.s_max_batch_time = sbi->s_max_batch_time; 6468ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 6469ac27a0ecSDave Kleikamp old_opts.s_jquota_fmt = sbi->s_jquota_fmt; 6470a2d4a646SJan Kara for (i = 0; i < EXT4_MAXQUOTAS; i++) 647103dafb5fSChen Gang if (sbi->s_qf_names[i]) { 647233458eabSTheodore Ts'o char *qf_name = get_qf_name(sb, sbi, i); 647333458eabSTheodore Ts'o 647433458eabSTheodore Ts'o old_opts.s_qf_names[i] = kstrdup(qf_name, GFP_KERNEL); 647503dafb5fSChen Gang if (!old_opts.s_qf_names[i]) { 647603dafb5fSChen Gang for (j = 0; j < i; j++) 647703dafb5fSChen Gang kfree(old_opts.s_qf_names[j]); 647803dafb5fSChen Gang return -ENOMEM; 647903dafb5fSChen Gang } 648003dafb5fSChen Gang } else 648103dafb5fSChen Gang old_opts.s_qf_names[i] = NULL; 6482ac27a0ecSDave Kleikamp #endif 6483e4e58e5dSOjaswin Mujoo if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)) { 6484b3881f74STheodore Ts'o if (sbi->s_journal && sbi->s_journal->j_task->io_context) 64857edfd85bSLukas Czerner ctx->journal_ioprio = 6486b237e304SHarshad Shirwadkar sbi->s_journal->j_task->io_context->ioprio; 6487e4e58e5dSOjaswin Mujoo else 6488e4e58e5dSOjaswin Mujoo ctx->journal_ioprio = DEFAULT_JOURNAL_IOPRIO; 6489e4e58e5dSOjaswin Mujoo 6490e4e58e5dSOjaswin Mujoo } 6491ac27a0ecSDave Kleikamp 64927edfd85bSLukas Czerner ext4_apply_options(fc, sb); 6493ac27a0ecSDave Kleikamp 64946b992ff2SDarrick J. Wong if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^ 64956b992ff2SDarrick J. Wong test_opt(sb, JOURNAL_CHECKSUM)) { 64966b992ff2SDarrick J. Wong ext4_msg(sb, KERN_ERR, "changing journal_checksum " 64972d5b86e0SEric Sandeen "during remount not supported; ignoring"); 64982d5b86e0SEric Sandeen sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM; 6499c6d3d56dSDarrick J. Wong } 6500c6d3d56dSDarrick J. Wong 65016ae6514bSPiotr Sarna if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { 65026ae6514bSPiotr Sarna if (test_opt2(sb, EXPLICIT_DELALLOC)) { 65036ae6514bSPiotr Sarna ext4_msg(sb, KERN_ERR, "can't mount with " 65046ae6514bSPiotr Sarna "both data=journal and delalloc"); 65056ae6514bSPiotr Sarna err = -EINVAL; 65066ae6514bSPiotr Sarna goto restore_opts; 65076ae6514bSPiotr Sarna } 65086ae6514bSPiotr Sarna if (test_opt(sb, DIOREAD_NOLOCK)) { 65096ae6514bSPiotr Sarna ext4_msg(sb, KERN_ERR, "can't mount with " 65106ae6514bSPiotr Sarna "both data=journal and dioread_nolock"); 65116ae6514bSPiotr Sarna err = -EINVAL; 65126ae6514bSPiotr Sarna goto restore_opts; 65136ae6514bSPiotr Sarna } 6514ab04df78SJan Kara } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) { 6515ab04df78SJan Kara if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 6516ab04df78SJan Kara ext4_msg(sb, KERN_ERR, "can't mount with " 6517ab04df78SJan Kara "journal_async_commit in data=ordered mode"); 6518ab04df78SJan Kara err = -EINVAL; 6519ab04df78SJan Kara goto restore_opts; 6520ab04df78SJan Kara } 6521923ae0ffSRoss Zwisler } 6522923ae0ffSRoss Zwisler 6523cdb7ee4cSTahsin Erdogan if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) { 6524cdb7ee4cSTahsin Erdogan ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount"); 6525cdb7ee4cSTahsin Erdogan err = -EINVAL; 6526cdb7ee4cSTahsin Erdogan goto restore_opts; 6527cdb7ee4cSTahsin Erdogan } 6528cdb7ee4cSTahsin Erdogan 652922b8d707SJan Kara if (test_opt2(sb, ABORT)) 6530124e7c61SGabriel Krisman Bertazi ext4_abort(sb, ESHUTDOWN, "Abort forced by user"); 6531ac27a0ecSDave Kleikamp 65321751e8a6SLinus Torvalds sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | 65331751e8a6SLinus Torvalds (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0); 6534ac27a0ecSDave Kleikamp 6535ac27a0ecSDave Kleikamp es = sbi->s_es; 6536ac27a0ecSDave Kleikamp 6537b3881f74STheodore Ts'o if (sbi->s_journal) { 6538617ba13bSMingming Cao ext4_init_journal_params(sb, sbi->s_journal); 65397edfd85bSLukas Czerner set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio); 6540b3881f74STheodore Ts'o } 6541ac27a0ecSDave Kleikamp 6542c92dc856SJan Kara /* Flush outstanding errors before changing fs state */ 6543bb15cea2STheodore Ts'o flush_work(&sbi->s_sb_upd_work); 6544c92dc856SJan Kara 6545960e0ab6SLukas Czerner if ((bool)(fc->sb_flags & SB_RDONLY) != sb_rdonly(sb)) { 654695257987SJan Kara if (ext4_forced_shutdown(sb)) { 6547ac27a0ecSDave Kleikamp err = -EROFS; 6548ac27a0ecSDave Kleikamp goto restore_opts; 6549ac27a0ecSDave Kleikamp } 6550ac27a0ecSDave Kleikamp 6551960e0ab6SLukas Czerner if (fc->sb_flags & SB_RDONLY) { 655238c03b34STheodore Ts'o err = sync_filesystem(sb); 655338c03b34STheodore Ts'o if (err < 0) 655438c03b34STheodore Ts'o goto restore_opts; 65550f0dd62fSChristoph Hellwig err = dquot_suspend(sb, -1); 65560f0dd62fSChristoph Hellwig if (err < 0) 6557c79d967dSChristoph Hellwig goto restore_opts; 6558c79d967dSChristoph Hellwig 6559ac27a0ecSDave Kleikamp /* 6560ac27a0ecSDave Kleikamp * First of all, the unconditional stuff we have to do 6561ac27a0ecSDave Kleikamp * to disable replay of the journal when we next remount 6562ac27a0ecSDave Kleikamp */ 65631751e8a6SLinus Torvalds sb->s_flags |= SB_RDONLY; 6564ac27a0ecSDave Kleikamp 6565ac27a0ecSDave Kleikamp /* 6566ac27a0ecSDave Kleikamp * OK, test if we are remounting a valid rw partition 6567ac27a0ecSDave Kleikamp * readonly, and if so set the rdonly flag and then 6568ac27a0ecSDave Kleikamp * mark the partition as valid again. 6569ac27a0ecSDave Kleikamp */ 6570617ba13bSMingming Cao if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) && 6571617ba13bSMingming Cao (sbi->s_mount_state & EXT4_VALID_FS)) 6572ac27a0ecSDave Kleikamp es->s_state = cpu_to_le16(sbi->s_mount_state); 6573ac27a0ecSDave Kleikamp 657411215630SJan Kara if (sbi->s_journal) { 657511215630SJan Kara /* 657611215630SJan Kara * We let remount-ro finish even if marking fs 657711215630SJan Kara * as clean failed... 657811215630SJan Kara */ 6579617ba13bSMingming Cao ext4_mark_recovery_complete(sb, es); 658011215630SJan Kara } 6581ac27a0ecSDave Kleikamp } else { 6582a13fb1a4SEric Sandeen /* Make sure we can mount this feature set readwrite */ 6583e2b911c5SDarrick J. Wong if (ext4_has_feature_readonly(sb) || 65842cb5cc8bSDarrick J. Wong !ext4_feature_set_ok(sb, 0)) { 6585ac27a0ecSDave Kleikamp err = -EROFS; 6586ac27a0ecSDave Kleikamp goto restore_opts; 6587ac27a0ecSDave Kleikamp } 6588ead6596bSEric Sandeen /* 65898a266467STheodore Ts'o * Make sure the group descriptor checksums 65900b8e58a1SAndreas Dilger * are sane. If they aren't, refuse to remount r/w. 65918a266467STheodore Ts'o */ 65928a266467STheodore Ts'o for (g = 0; g < sbi->s_groups_count; g++) { 65938a266467STheodore Ts'o struct ext4_group_desc *gdp = 65948a266467STheodore Ts'o ext4_get_group_desc(sb, g, NULL); 65958a266467STheodore Ts'o 6596feb0ab32SDarrick J. Wong if (!ext4_group_desc_csum_verify(sb, g, gdp)) { 6597b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, 6598b31e1552SEric Sandeen "ext4_remount: Checksum for group %u failed (%u!=%u)", 6599e2b911c5SDarrick J. Wong g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)), 66008a266467STheodore Ts'o le16_to_cpu(gdp->bg_checksum)); 66016a797d27SDarrick J. Wong err = -EFSBADCRC; 66028a266467STheodore Ts'o goto restore_opts; 66038a266467STheodore Ts'o } 66048a266467STheodore Ts'o } 66058a266467STheodore Ts'o 66068a266467STheodore Ts'o /* 6607ead6596bSEric Sandeen * If we have an unprocessed orphan list hanging 6608ead6596bSEric Sandeen * around from a previously readonly bdev mount, 6609ead6596bSEric Sandeen * require a full umount/remount for now. 6610ead6596bSEric Sandeen */ 661102f310fcSJan Kara if (es->s_last_orphan || !ext4_orphan_file_empty(sb)) { 6612b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, "Couldn't " 6613ead6596bSEric Sandeen "remount RDWR because of unprocessed " 6614ead6596bSEric Sandeen "orphan inode list. Please " 6615b31e1552SEric Sandeen "umount/remount instead"); 6616ead6596bSEric Sandeen err = -EINVAL; 6617ead6596bSEric Sandeen goto restore_opts; 6618ead6596bSEric Sandeen } 6619ead6596bSEric Sandeen 6620ac27a0ecSDave Kleikamp /* 6621ac27a0ecSDave Kleikamp * Mounting a RDONLY partition read-write, so reread 6622ac27a0ecSDave Kleikamp * and store the current valid flag. (It may have 6623ac27a0ecSDave Kleikamp * been changed by e2fsck since we originally mounted 6624ac27a0ecSDave Kleikamp * the partition.) 6625ac27a0ecSDave Kleikamp */ 662611215630SJan Kara if (sbi->s_journal) { 662711215630SJan Kara err = ext4_clear_journal_err(sb, es); 662811215630SJan Kara if (err) 662911215630SJan Kara goto restore_opts; 663011215630SJan Kara } 6631c878bea3STheodore Ts'o sbi->s_mount_state = (le16_to_cpu(es->s_state) & 6632c878bea3STheodore Ts'o ~EXT4_FC_REPLAY); 6633c89128a0SJaegeuk Kim 6634c89128a0SJaegeuk Kim err = ext4_setup_super(sb, es, 0); 6635c89128a0SJaegeuk Kim if (err) 6636c89128a0SJaegeuk Kim goto restore_opts; 6637c89128a0SJaegeuk Kim 66381b292439STheodore Ts'o sb->s_flags &= ~SB_RDONLY; 66393b50d501STheodore Ts'o if (ext4_has_feature_mmp(sb)) { 66403b50d501STheodore Ts'o err = ext4_multi_mount_protect(sb, 66413b50d501STheodore Ts'o le64_to_cpu(es->s_mmp_block)); 66423b50d501STheodore Ts'o if (err) 6643c5e06d10SJohann Lombardi goto restore_opts; 6644c5e06d10SJohann Lombardi } 66453bbef91bSAustin Kim #ifdef CONFIG_QUOTA 6646c79d967dSChristoph Hellwig enable_quota = 1; 66473bbef91bSAustin Kim #endif 6648ac27a0ecSDave Kleikamp } 6649ac27a0ecSDave Kleikamp } 6650bfff6873SLukas Czerner 6651bfff6873SLukas Czerner /* 66520f5bde1dSJan Kara * Handle creation of system zone data early because it can fail. 66530f5bde1dSJan Kara * Releasing of existing data is done when we are sure remount will 66540f5bde1dSJan Kara * succeed. 66550f5bde1dSJan Kara */ 6656dd0db94fSChunguang Xu if (test_opt(sb, BLOCK_VALIDITY) && !sbi->s_system_blks) { 6657d176b1f6SJan Kara err = ext4_setup_system_zone(sb); 6658d176b1f6SJan Kara if (err) 6659d176b1f6SJan Kara goto restore_opts; 66600f5bde1dSJan Kara } 6661d176b1f6SJan Kara 6662c89128a0SJaegeuk Kim if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) { 66634392fbc4SJan Kara err = ext4_commit_super(sb); 6664c89128a0SJaegeuk Kim if (err) 6665c89128a0SJaegeuk Kim goto restore_opts; 6666c89128a0SJaegeuk Kim } 66670390131bSFrank Mayhar 6668ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 66697c319d32SAditya Kali if (enable_quota) { 66707c319d32SAditya Kali if (sb_any_quota_suspended(sb)) 66710f0dd62fSChristoph Hellwig dquot_resume(sb, -1); 6672e2b911c5SDarrick J. Wong else if (ext4_has_feature_quota(sb)) { 66737c319d32SAditya Kali err = ext4_enable_quotas(sb); 667407724f98STheodore Ts'o if (err) 66757c319d32SAditya Kali goto restore_opts; 66767c319d32SAditya Kali } 66777c319d32SAditya Kali } 66784c0b4818STheodore Ts'o /* Release old quota file names */ 66794c0b4818STheodore Ts'o for (i = 0; i < EXT4_MAXQUOTAS; i++) 66804c0b4818STheodore Ts'o kfree(old_opts.s_qf_names[i]); 66817c319d32SAditya Kali #endif 6682dd0db94fSChunguang Xu if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks) 66830f5bde1dSJan Kara ext4_release_system_zone(sb); 6684d4c402d9SCurt Wohlgemuth 6685eb1f822cSTheodore Ts'o /* 6686eb1f822cSTheodore Ts'o * Reinitialize lazy itable initialization thread based on 6687eb1f822cSTheodore Ts'o * current settings 6688eb1f822cSTheodore Ts'o */ 6689eb1f822cSTheodore Ts'o if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE)) 6690eb1f822cSTheodore Ts'o ext4_unregister_li_request(sb); 6691eb1f822cSTheodore Ts'o else { 6692eb1f822cSTheodore Ts'o ext4_group_t first_not_zeroed; 6693eb1f822cSTheodore Ts'o first_not_zeroed = ext4_has_uninit_itable(sb); 6694eb1f822cSTheodore Ts'o ext4_register_li_request(sb, first_not_zeroed); 6695eb1f822cSTheodore Ts'o } 6696eb1f822cSTheodore Ts'o 669761bb4a1cSTheodore Ts'o if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb)) 669861bb4a1cSTheodore Ts'o ext4_stop_mmpd(sbi); 669961bb4a1cSTheodore Ts'o 6700ac27a0ecSDave Kleikamp return 0; 67010b8e58a1SAndreas Dilger 6702ac27a0ecSDave Kleikamp restore_opts: 67034c0b4818STheodore Ts'o /* 67044c0b4818STheodore Ts'o * If there was a failing r/w to ro transition, we may need to 67054c0b4818STheodore Ts'o * re-enable quota 67064c0b4818STheodore Ts'o */ 6707d5d020b3SJan Kara if (sb_rdonly(sb) && !(old_sb_flags & SB_RDONLY) && 67084c0b4818STheodore Ts'o sb_any_quota_suspended(sb)) 67094c0b4818STheodore Ts'o dquot_resume(sb, -1); 6710ac27a0ecSDave Kleikamp sb->s_flags = old_sb_flags; 6711ac27a0ecSDave Kleikamp sbi->s_mount_opt = old_opts.s_mount_opt; 6712a2595b8aSTheodore Ts'o sbi->s_mount_opt2 = old_opts.s_mount_opt2; 6713ac27a0ecSDave Kleikamp sbi->s_resuid = old_opts.s_resuid; 6714ac27a0ecSDave Kleikamp sbi->s_resgid = old_opts.s_resgid; 6715ac27a0ecSDave Kleikamp sbi->s_commit_interval = old_opts.s_commit_interval; 671630773840STheodore Ts'o sbi->s_min_batch_time = old_opts.s_min_batch_time; 671730773840STheodore Ts'o sbi->s_max_batch_time = old_opts.s_max_batch_time; 6718dd0db94fSChunguang Xu if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks) 67190f5bde1dSJan Kara ext4_release_system_zone(sb); 6720ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 6721ac27a0ecSDave Kleikamp sbi->s_jquota_fmt = old_opts.s_jquota_fmt; 6722a2d4a646SJan Kara for (i = 0; i < EXT4_MAXQUOTAS; i++) { 672333458eabSTheodore Ts'o to_free[i] = get_qf_name(sb, sbi, i); 672433458eabSTheodore Ts'o rcu_assign_pointer(sbi->s_qf_names[i], old_opts.s_qf_names[i]); 6725ac27a0ecSDave Kleikamp } 672633458eabSTheodore Ts'o synchronize_rcu(); 672733458eabSTheodore Ts'o for (i = 0; i < EXT4_MAXQUOTAS; i++) 672833458eabSTheodore Ts'o kfree(to_free[i]); 6729ac27a0ecSDave Kleikamp #endif 673061bb4a1cSTheodore Ts'o if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb)) 673161bb4a1cSTheodore Ts'o ext4_stop_mmpd(sbi); 6732ac27a0ecSDave Kleikamp return err; 6733ac27a0ecSDave Kleikamp } 6734ac27a0ecSDave Kleikamp 6735cebe85d5SLukas Czerner static int ext4_reconfigure(struct fs_context *fc) 67367edfd85bSLukas Czerner { 6737cebe85d5SLukas Czerner struct super_block *sb = fc->root->d_sb; 67387edfd85bSLukas Czerner int ret; 67397edfd85bSLukas Czerner 6740cebe85d5SLukas Czerner fc->s_fs_info = EXT4_SB(sb); 67417edfd85bSLukas Czerner 6742cebe85d5SLukas Czerner ret = ext4_check_opt_consistency(fc, sb); 67437edfd85bSLukas Czerner if (ret < 0) 67447edfd85bSLukas Czerner return ret; 6745cebe85d5SLukas Czerner 6746960e0ab6SLukas Czerner ret = __ext4_remount(fc, sb); 6747cebe85d5SLukas Czerner if (ret < 0) 6748cebe85d5SLukas Czerner return ret; 6749cebe85d5SLukas Czerner 67506dcc98fbSTheodore Ts'o ext4_msg(sb, KERN_INFO, "re-mounted %pU %s. Quota mode: %s.", 67516dcc98fbSTheodore Ts'o &sb->s_uuid, sb_rdonly(sb) ? "ro" : "r/w", 67526dcc98fbSTheodore Ts'o ext4_quota_mode(sb)); 6753cebe85d5SLukas Czerner 6754cebe85d5SLukas Czerner return 0; 67557edfd85bSLukas Czerner } 67567edfd85bSLukas Czerner 6757689c958cSLi Xi #ifdef CONFIG_QUOTA 6758689c958cSLi Xi static int ext4_statfs_project(struct super_block *sb, 6759689c958cSLi Xi kprojid_t projid, struct kstatfs *buf) 6760689c958cSLi Xi { 6761689c958cSLi Xi struct kqid qid; 6762689c958cSLi Xi struct dquot *dquot; 6763689c958cSLi Xi u64 limit; 6764689c958cSLi Xi u64 curblock; 6765689c958cSLi Xi 6766689c958cSLi Xi qid = make_kqid_projid(projid); 6767689c958cSLi Xi dquot = dqget(sb, qid); 6768689c958cSLi Xi if (IS_ERR(dquot)) 6769689c958cSLi Xi return PTR_ERR(dquot); 67707b9ca4c6SJan Kara spin_lock(&dquot->dq_dqb_lock); 6771689c958cSLi Xi 6772a08fe66eSChengguang Xu limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit, 6773a08fe66eSChengguang Xu dquot->dq_dqb.dqb_bhardlimit); 677457c32ea4SChengguang Xu limit >>= sb->s_blocksize_bits; 677557c32ea4SChengguang Xu 6776689c958cSLi Xi if (limit && buf->f_blocks > limit) { 6777f06925c7SKonstantin Khlebnikov curblock = (dquot->dq_dqb.dqb_curspace + 6778f06925c7SKonstantin Khlebnikov dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; 6779689c958cSLi Xi buf->f_blocks = limit; 6780689c958cSLi Xi buf->f_bfree = buf->f_bavail = 6781689c958cSLi Xi (buf->f_blocks > curblock) ? 6782689c958cSLi Xi (buf->f_blocks - curblock) : 0; 6783689c958cSLi Xi } 6784689c958cSLi Xi 6785a08fe66eSChengguang Xu limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit, 6786a08fe66eSChengguang Xu dquot->dq_dqb.dqb_ihardlimit); 6787689c958cSLi Xi if (limit && buf->f_files > limit) { 6788689c958cSLi Xi buf->f_files = limit; 6789689c958cSLi Xi buf->f_ffree = 6790689c958cSLi Xi (buf->f_files > dquot->dq_dqb.dqb_curinodes) ? 6791689c958cSLi Xi (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0; 6792689c958cSLi Xi } 6793689c958cSLi Xi 67947b9ca4c6SJan Kara spin_unlock(&dquot->dq_dqb_lock); 6795689c958cSLi Xi dqput(dquot); 6796689c958cSLi Xi return 0; 6797689c958cSLi Xi } 6798689c958cSLi Xi #endif 6799689c958cSLi Xi 6800617ba13bSMingming Cao static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) 6801ac27a0ecSDave Kleikamp { 6802ac27a0ecSDave Kleikamp struct super_block *sb = dentry->d_sb; 6803617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 6804617ba13bSMingming Cao struct ext4_super_block *es = sbi->s_es; 680527dd4385SLukas Czerner ext4_fsblk_t overhead = 0, resv_blocks; 6806d02a9391SKazuya Mio s64 bfree; 680727dd4385SLukas Czerner resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters)); 6808ac27a0ecSDave Kleikamp 6809952fc18eSTheodore Ts'o if (!test_opt(sb, MINIX_DF)) 6810952fc18eSTheodore Ts'o overhead = sbi->s_overhead; 6811ac27a0ecSDave Kleikamp 6812617ba13bSMingming Cao buf->f_type = EXT4_SUPER_MAGIC; 6813ac27a0ecSDave Kleikamp buf->f_bsize = sb->s_blocksize; 6814b72f78cbSEric Sandeen buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead); 681557042651STheodore Ts'o bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) - 681657042651STheodore Ts'o percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter); 6817d02a9391SKazuya Mio /* prevent underflow in case that few free space is available */ 681857042651STheodore Ts'o buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0)); 681927dd4385SLukas Czerner buf->f_bavail = buf->f_bfree - 682027dd4385SLukas Czerner (ext4_r_blocks_count(es) + resv_blocks); 682127dd4385SLukas Czerner if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks)) 6822ac27a0ecSDave Kleikamp buf->f_bavail = 0; 6823ac27a0ecSDave Kleikamp buf->f_files = le32_to_cpu(es->s_inodes_count); 682452d9f3b4SPeter Zijlstra buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter); 6825617ba13bSMingming Cao buf->f_namelen = EXT4_NAME_LEN; 68269591c3a3SAmir Goldstein buf->f_fsid = uuid_to_fsid(es->s_uuid); 68270b8e58a1SAndreas Dilger 6828689c958cSLi Xi #ifdef CONFIG_QUOTA 6829689c958cSLi Xi if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) && 6830689c958cSLi Xi sb_has_quota_limits_enabled(sb, PRJQUOTA)) 6831689c958cSLi Xi ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf); 6832689c958cSLi Xi #endif 6833ac27a0ecSDave Kleikamp return 0; 6834ac27a0ecSDave Kleikamp } 6835ac27a0ecSDave Kleikamp 6836ac27a0ecSDave Kleikamp 6837ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 6838ac27a0ecSDave Kleikamp 6839bc8230eeSJan Kara /* 6840bc8230eeSJan Kara * Helper functions so that transaction is started before we acquire dqio_sem 6841bc8230eeSJan Kara * to keep correct lock ordering of transaction > dqio_sem 6842bc8230eeSJan Kara */ 6843ac27a0ecSDave Kleikamp static inline struct inode *dquot_to_inode(struct dquot *dquot) 6844ac27a0ecSDave Kleikamp { 68454c376dcaSEric W. Biederman return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type]; 6846ac27a0ecSDave Kleikamp } 6847ac27a0ecSDave Kleikamp 6848617ba13bSMingming Cao static int ext4_write_dquot(struct dquot *dquot) 6849ac27a0ecSDave Kleikamp { 6850ac27a0ecSDave Kleikamp int ret, err; 6851ac27a0ecSDave Kleikamp handle_t *handle; 6852ac27a0ecSDave Kleikamp struct inode *inode; 6853ac27a0ecSDave Kleikamp 6854ac27a0ecSDave Kleikamp inode = dquot_to_inode(dquot); 68559924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 6856617ba13bSMingming Cao EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); 6857ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 6858ac27a0ecSDave Kleikamp return PTR_ERR(handle); 6859ac27a0ecSDave Kleikamp ret = dquot_commit(dquot); 6860617ba13bSMingming Cao err = ext4_journal_stop(handle); 6861ac27a0ecSDave Kleikamp if (!ret) 6862ac27a0ecSDave Kleikamp ret = err; 6863ac27a0ecSDave Kleikamp return ret; 6864ac27a0ecSDave Kleikamp } 6865ac27a0ecSDave Kleikamp 6866617ba13bSMingming Cao static int ext4_acquire_dquot(struct dquot *dquot) 6867ac27a0ecSDave Kleikamp { 6868ac27a0ecSDave Kleikamp int ret, err; 6869ac27a0ecSDave Kleikamp handle_t *handle; 6870ac27a0ecSDave Kleikamp 68719924a92aSTheodore Ts'o handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA, 6872617ba13bSMingming Cao EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb)); 6873ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 6874ac27a0ecSDave Kleikamp return PTR_ERR(handle); 6875ac27a0ecSDave Kleikamp ret = dquot_acquire(dquot); 6876617ba13bSMingming Cao err = ext4_journal_stop(handle); 6877ac27a0ecSDave Kleikamp if (!ret) 6878ac27a0ecSDave Kleikamp ret = err; 6879ac27a0ecSDave Kleikamp return ret; 6880ac27a0ecSDave Kleikamp } 6881ac27a0ecSDave Kleikamp 6882617ba13bSMingming Cao static int ext4_release_dquot(struct dquot *dquot) 6883ac27a0ecSDave Kleikamp { 6884ac27a0ecSDave Kleikamp int ret, err; 6885ac27a0ecSDave Kleikamp handle_t *handle; 6886ac27a0ecSDave Kleikamp 68879924a92aSTheodore Ts'o handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA, 6888617ba13bSMingming Cao EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb)); 68899c3013e9SJan Kara if (IS_ERR(handle)) { 68909c3013e9SJan Kara /* Release dquot anyway to avoid endless cycle in dqput() */ 68919c3013e9SJan Kara dquot_release(dquot); 6892ac27a0ecSDave Kleikamp return PTR_ERR(handle); 68939c3013e9SJan Kara } 6894ac27a0ecSDave Kleikamp ret = dquot_release(dquot); 6895617ba13bSMingming Cao err = ext4_journal_stop(handle); 6896ac27a0ecSDave Kleikamp if (!ret) 6897ac27a0ecSDave Kleikamp ret = err; 6898ac27a0ecSDave Kleikamp return ret; 6899ac27a0ecSDave Kleikamp } 6900ac27a0ecSDave Kleikamp 6901617ba13bSMingming Cao static int ext4_mark_dquot_dirty(struct dquot *dquot) 6902ac27a0ecSDave Kleikamp { 6903262b4662SJan Kara struct super_block *sb = dquot->dq_sb; 6904262b4662SJan Kara 6905f177ee08SRoman Anufriev if (ext4_is_quota_journalled(sb)) { 6906ac27a0ecSDave Kleikamp dquot_mark_dquot_dirty(dquot); 6907617ba13bSMingming Cao return ext4_write_dquot(dquot); 6908ac27a0ecSDave Kleikamp } else { 6909ac27a0ecSDave Kleikamp return dquot_mark_dquot_dirty(dquot); 6910ac27a0ecSDave Kleikamp } 6911ac27a0ecSDave Kleikamp } 6912ac27a0ecSDave Kleikamp 6913617ba13bSMingming Cao static int ext4_write_info(struct super_block *sb, int type) 6914ac27a0ecSDave Kleikamp { 6915ac27a0ecSDave Kleikamp int ret, err; 6916ac27a0ecSDave Kleikamp handle_t *handle; 6917ac27a0ecSDave Kleikamp 6918ac27a0ecSDave Kleikamp /* Data block + inode block */ 6919f9c1f248SBaokun Li handle = ext4_journal_start_sb(sb, EXT4_HT_QUOTA, 2); 6920ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 6921ac27a0ecSDave Kleikamp return PTR_ERR(handle); 6922ac27a0ecSDave Kleikamp ret = dquot_commit_info(sb, type); 6923617ba13bSMingming Cao err = ext4_journal_stop(handle); 6924ac27a0ecSDave Kleikamp if (!ret) 6925ac27a0ecSDave Kleikamp ret = err; 6926ac27a0ecSDave Kleikamp return ret; 6927ac27a0ecSDave Kleikamp } 6928ac27a0ecSDave Kleikamp 6929daf647d2STheodore Ts'o static void lockdep_set_quota_inode(struct inode *inode, int subclass) 6930daf647d2STheodore Ts'o { 6931daf647d2STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 6932daf647d2STheodore Ts'o 6933daf647d2STheodore Ts'o /* The first argument of lockdep_set_subclass has to be 6934daf647d2STheodore Ts'o * *exactly* the same as the argument to init_rwsem() --- in 6935daf647d2STheodore Ts'o * this case, in init_once() --- or lockdep gets unhappy 6936daf647d2STheodore Ts'o * because the name of the lock is set using the 6937daf647d2STheodore Ts'o * stringification of the argument to init_rwsem(). 6938daf647d2STheodore Ts'o */ 6939daf647d2STheodore Ts'o (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */ 6940daf647d2STheodore Ts'o lockdep_set_subclass(&ei->i_data_sem, subclass); 6941daf647d2STheodore Ts'o } 6942daf647d2STheodore Ts'o 6943ac27a0ecSDave Kleikamp /* 6944ac27a0ecSDave Kleikamp * Standard function to be called on quota_on 6945ac27a0ecSDave Kleikamp */ 6946617ba13bSMingming Cao static int ext4_quota_on(struct super_block *sb, int type, int format_id, 69478c54ca9cSAl Viro const struct path *path) 6948ac27a0ecSDave Kleikamp { 6949ac27a0ecSDave Kleikamp int err; 6950ac27a0ecSDave Kleikamp 6951ac27a0ecSDave Kleikamp if (!test_opt(sb, QUOTA)) 6952ac27a0ecSDave Kleikamp return -EINVAL; 69530623543bSJan Kara 6954ac27a0ecSDave Kleikamp /* Quotafile not on the same filesystem? */ 6955d8c9584eSAl Viro if (path->dentry->d_sb != sb) 6956ac27a0ecSDave Kleikamp return -EXDEV; 6957e0770e91SJan Kara 6958e0770e91SJan Kara /* Quota already enabled for this file? */ 6959e0770e91SJan Kara if (IS_NOQUOTA(d_inode(path->dentry))) 6960e0770e91SJan Kara return -EBUSY; 6961e0770e91SJan Kara 69620623543bSJan Kara /* Journaling quota? */ 69630623543bSJan Kara if (EXT4_SB(sb)->s_qf_names[type]) { 69642b2d6d01STheodore Ts'o /* Quotafile not in fs root? */ 6965f00c9e44SJan Kara if (path->dentry->d_parent != sb->s_root) 6966b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, 6967b31e1552SEric Sandeen "Quota file not on filesystem root. " 6968b31e1552SEric Sandeen "Journaled quota will not work"); 696991389240SJan Kara sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY; 697091389240SJan Kara } else { 697191389240SJan Kara /* 697291389240SJan Kara * Clear the flag just in case mount options changed since 697391389240SJan Kara * last time. 697491389240SJan Kara */ 697591389240SJan Kara sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY; 69760623543bSJan Kara } 69770623543bSJan Kara 6978daf647d2STheodore Ts'o lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA); 6979daf647d2STheodore Ts'o err = dquot_quota_on(sb, type, format_id, path); 698015fc69bbSJan Kara if (!err) { 6981957153fcSJan Kara struct inode *inode = d_inode(path->dentry); 6982957153fcSJan Kara handle_t *handle; 6983957153fcSJan Kara 698461a92987SJan Kara /* 698561a92987SJan Kara * Set inode flags to prevent userspace from messing with quota 698661a92987SJan Kara * files. If this fails, we return success anyway since quotas 698761a92987SJan Kara * are already enabled and this is not a hard failure. 698861a92987SJan Kara */ 6989957153fcSJan Kara inode_lock(inode); 6990957153fcSJan Kara handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1); 6991957153fcSJan Kara if (IS_ERR(handle)) 6992957153fcSJan Kara goto unlock_inode; 6993957153fcSJan Kara EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL; 6994957153fcSJan Kara inode_set_flags(inode, S_NOATIME | S_IMMUTABLE, 6995957153fcSJan Kara S_NOATIME | S_IMMUTABLE); 69964209ae12SHarshad Shirwadkar err = ext4_mark_inode_dirty(handle, inode); 6997957153fcSJan Kara ext4_journal_stop(handle); 6998957153fcSJan Kara unlock_inode: 6999957153fcSJan Kara inode_unlock(inode); 700015fc69bbSJan Kara if (err) 700115fc69bbSJan Kara dquot_quota_off(sb, type); 7002957153fcSJan Kara } 700315fc69bbSJan Kara if (err) 700415fc69bbSJan Kara lockdep_set_quota_inode(path->dentry->d_inode, 700515fc69bbSJan Kara I_DATA_SEM_NORMAL); 7006daf647d2STheodore Ts'o return err; 7007ac27a0ecSDave Kleikamp } 7008ac27a0ecSDave Kleikamp 700907342ec2SBaokun Li static inline bool ext4_check_quota_inum(int type, unsigned long qf_inum) 701007342ec2SBaokun Li { 701107342ec2SBaokun Li switch (type) { 701207342ec2SBaokun Li case USRQUOTA: 701307342ec2SBaokun Li return qf_inum == EXT4_USR_QUOTA_INO; 701407342ec2SBaokun Li case GRPQUOTA: 701507342ec2SBaokun Li return qf_inum == EXT4_GRP_QUOTA_INO; 701607342ec2SBaokun Li case PRJQUOTA: 701707342ec2SBaokun Li return qf_inum >= EXT4_GOOD_OLD_FIRST_INO; 701807342ec2SBaokun Li default: 701907342ec2SBaokun Li BUG(); 702007342ec2SBaokun Li } 702107342ec2SBaokun Li } 702207342ec2SBaokun Li 70237c319d32SAditya Kali static int ext4_quota_enable(struct super_block *sb, int type, int format_id, 70247c319d32SAditya Kali unsigned int flags) 70257c319d32SAditya Kali { 70267c319d32SAditya Kali int err; 70277c319d32SAditya Kali struct inode *qf_inode; 7028a2d4a646SJan Kara unsigned long qf_inums[EXT4_MAXQUOTAS] = { 70297c319d32SAditya Kali le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum), 7030689c958cSLi Xi le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum), 7031689c958cSLi Xi le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum) 70327c319d32SAditya Kali }; 70337c319d32SAditya Kali 7034e2b911c5SDarrick J. Wong BUG_ON(!ext4_has_feature_quota(sb)); 70357c319d32SAditya Kali 70367c319d32SAditya Kali if (!qf_inums[type]) 70377c319d32SAditya Kali return -EPERM; 70387c319d32SAditya Kali 703907342ec2SBaokun Li if (!ext4_check_quota_inum(type, qf_inums[type])) { 704007342ec2SBaokun Li ext4_error(sb, "Bad quota inum: %lu, type: %d", 704107342ec2SBaokun Li qf_inums[type], type); 704207342ec2SBaokun Li return -EUCLEAN; 704307342ec2SBaokun Li } 704407342ec2SBaokun Li 70458a363970STheodore Ts'o qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL); 70467c319d32SAditya Kali if (IS_ERR(qf_inode)) { 704707342ec2SBaokun Li ext4_error(sb, "Bad quota inode: %lu, type: %d", 704807342ec2SBaokun Li qf_inums[type], type); 70497c319d32SAditya Kali return PTR_ERR(qf_inode); 70507c319d32SAditya Kali } 70517c319d32SAditya Kali 7052bcb13850SJan Kara /* Don't account quota for quota files to avoid recursion */ 7053bcb13850SJan Kara qf_inode->i_flags |= S_NOQUOTA; 7054daf647d2STheodore Ts'o lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA); 70557212b95eSJan Kara err = dquot_load_quota_inode(qf_inode, type, format_id, flags); 7056daf647d2STheodore Ts'o if (err) 7057daf647d2STheodore Ts'o lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL); 705861157b24SPan Bian iput(qf_inode); 70597c319d32SAditya Kali 70607c319d32SAditya Kali return err; 70617c319d32SAditya Kali } 70627c319d32SAditya Kali 70637c319d32SAditya Kali /* Enable usage tracking for all quota types. */ 706425c6d98fSJan Kara int ext4_enable_quotas(struct super_block *sb) 70657c319d32SAditya Kali { 70667c319d32SAditya Kali int type, err = 0; 7067a2d4a646SJan Kara unsigned long qf_inums[EXT4_MAXQUOTAS] = { 70687c319d32SAditya Kali le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum), 7069689c958cSLi Xi le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum), 7070689c958cSLi Xi le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum) 70717c319d32SAditya Kali }; 707249da9392SJan Kara bool quota_mopt[EXT4_MAXQUOTAS] = { 707349da9392SJan Kara test_opt(sb, USRQUOTA), 707449da9392SJan Kara test_opt(sb, GRPQUOTA), 707549da9392SJan Kara test_opt(sb, PRJQUOTA), 707649da9392SJan Kara }; 70777c319d32SAditya Kali 707891389240SJan Kara sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY; 7079a2d4a646SJan Kara for (type = 0; type < EXT4_MAXQUOTAS; type++) { 70807c319d32SAditya Kali if (qf_inums[type]) { 70817c319d32SAditya Kali err = ext4_quota_enable(sb, type, QFMT_VFS_V1, 708249da9392SJan Kara DQUOT_USAGE_ENABLED | 708349da9392SJan Kara (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0)); 70847c319d32SAditya Kali if (err) { 70857c319d32SAditya Kali ext4_warning(sb, 708672ba7450STheodore Ts'o "Failed to enable quota tracking " 708707342ec2SBaokun Li "(type=%d, err=%d, ino=%lu). " 708807342ec2SBaokun Li "Please run e2fsck to fix.", type, 708907342ec2SBaokun Li err, qf_inums[type]); 70904013d47aSJan Kara 7091f3c1c42eSBaokun Li ext4_quotas_off(sb, type); 70927c319d32SAditya Kali return err; 70937c319d32SAditya Kali } 70947c319d32SAditya Kali } 70957c319d32SAditya Kali } 70967c319d32SAditya Kali return 0; 70977c319d32SAditya Kali } 70987c319d32SAditya Kali 7099ca0e05e4SDmitry Monakhov static int ext4_quota_off(struct super_block *sb, int type) 7100ca0e05e4SDmitry Monakhov { 710121f97697SJan Kara struct inode *inode = sb_dqopt(sb)->files[type]; 710221f97697SJan Kara handle_t *handle; 7103957153fcSJan Kara int err; 710421f97697SJan Kara 710587009d86SDmitry Monakhov /* Force all delayed allocation blocks to be allocated. 710687009d86SDmitry Monakhov * Caller already holds s_umount sem */ 710787009d86SDmitry Monakhov if (test_opt(sb, DELALLOC)) 7108ca0e05e4SDmitry Monakhov sync_filesystem(sb); 7109ca0e05e4SDmitry Monakhov 7110957153fcSJan Kara if (!inode || !igrab(inode)) 71110b268590SAmir Goldstein goto out; 71120b268590SAmir Goldstein 7113957153fcSJan Kara err = dquot_quota_off(sb, type); 7114964edf66SJan Kara if (err || ext4_has_feature_quota(sb)) 7115957153fcSJan Kara goto out_put; 7116e0e985f3SJan Kara /* 7117e0e985f3SJan Kara * When the filesystem was remounted read-only first, we cannot cleanup 7118e0e985f3SJan Kara * inode flags here. Bad luck but people should be using QUOTA feature 7119e0e985f3SJan Kara * these days anyway. 7120e0e985f3SJan Kara */ 7121e0e985f3SJan Kara if (sb_rdonly(sb)) 7122e0e985f3SJan Kara goto out_put; 7123957153fcSJan Kara 7124957153fcSJan Kara inode_lock(inode); 712561a92987SJan Kara /* 712661a92987SJan Kara * Update modification times of quota files when userspace can 712761a92987SJan Kara * start looking at them. If we fail, we return success anyway since 712861a92987SJan Kara * this is not a hard failure and quotas are already disabled. 712961a92987SJan Kara */ 71309924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1); 71314209ae12SHarshad Shirwadkar if (IS_ERR(handle)) { 71324209ae12SHarshad Shirwadkar err = PTR_ERR(handle); 7133957153fcSJan Kara goto out_unlock; 71344209ae12SHarshad Shirwadkar } 7135957153fcSJan Kara EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL); 7136957153fcSJan Kara inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE); 71371bc33893SJeff Layton inode->i_mtime = inode_set_ctime_current(inode); 71384209ae12SHarshad Shirwadkar err = ext4_mark_inode_dirty(handle, inode); 713921f97697SJan Kara ext4_journal_stop(handle); 7140957153fcSJan Kara out_unlock: 7141957153fcSJan Kara inode_unlock(inode); 7142957153fcSJan Kara out_put: 7143964edf66SJan Kara lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL); 7144957153fcSJan Kara iput(inode); 7145957153fcSJan Kara return err; 714621f97697SJan Kara out: 7147ca0e05e4SDmitry Monakhov return dquot_quota_off(sb, type); 7148ca0e05e4SDmitry Monakhov } 7149ca0e05e4SDmitry Monakhov 7150ac27a0ecSDave Kleikamp /* Read data from quotafile - avoid pagecache and such because we cannot afford 7151ac27a0ecSDave Kleikamp * acquiring the locks... As quota files are never truncated and quota code 7152ac27a0ecSDave Kleikamp * itself serializes the operations (and no one else should touch the files) 7153ac27a0ecSDave Kleikamp * we don't have to be afraid of races */ 7154617ba13bSMingming Cao static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, 7155ac27a0ecSDave Kleikamp size_t len, loff_t off) 7156ac27a0ecSDave Kleikamp { 7157ac27a0ecSDave Kleikamp struct inode *inode = sb_dqopt(sb)->files[type]; 7158725d26d3SAneesh Kumar K.V ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); 7159ac27a0ecSDave Kleikamp int offset = off & (sb->s_blocksize - 1); 7160ac27a0ecSDave Kleikamp int tocopy; 7161ac27a0ecSDave Kleikamp size_t toread; 7162ac27a0ecSDave Kleikamp struct buffer_head *bh; 7163ac27a0ecSDave Kleikamp loff_t i_size = i_size_read(inode); 7164ac27a0ecSDave Kleikamp 7165ac27a0ecSDave Kleikamp if (off > i_size) 7166ac27a0ecSDave Kleikamp return 0; 7167ac27a0ecSDave Kleikamp if (off+len > i_size) 7168ac27a0ecSDave Kleikamp len = i_size-off; 7169ac27a0ecSDave Kleikamp toread = len; 7170ac27a0ecSDave Kleikamp while (toread > 0) { 717166267814SJiangshan Yi tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread); 71721c215028STheodore Ts'o bh = ext4_bread(NULL, inode, blk, 0); 71731c215028STheodore Ts'o if (IS_ERR(bh)) 71741c215028STheodore Ts'o return PTR_ERR(bh); 7175ac27a0ecSDave Kleikamp if (!bh) /* A hole? */ 7176ac27a0ecSDave Kleikamp memset(data, 0, tocopy); 7177ac27a0ecSDave Kleikamp else 7178ac27a0ecSDave Kleikamp memcpy(data, bh->b_data+offset, tocopy); 7179ac27a0ecSDave Kleikamp brelse(bh); 7180ac27a0ecSDave Kleikamp offset = 0; 7181ac27a0ecSDave Kleikamp toread -= tocopy; 7182ac27a0ecSDave Kleikamp data += tocopy; 7183ac27a0ecSDave Kleikamp blk++; 7184ac27a0ecSDave Kleikamp } 7185ac27a0ecSDave Kleikamp return len; 7186ac27a0ecSDave Kleikamp } 7187ac27a0ecSDave Kleikamp 7188ac27a0ecSDave Kleikamp /* Write to quotafile (we know the transaction is already started and has 7189ac27a0ecSDave Kleikamp * enough credits) */ 7190617ba13bSMingming Cao static ssize_t ext4_quota_write(struct super_block *sb, int type, 7191ac27a0ecSDave Kleikamp const char *data, size_t len, loff_t off) 7192ac27a0ecSDave Kleikamp { 7193ac27a0ecSDave Kleikamp struct inode *inode = sb_dqopt(sb)->files[type]; 7194725d26d3SAneesh Kumar K.V ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); 71954209ae12SHarshad Shirwadkar int err = 0, err2 = 0, offset = off & (sb->s_blocksize - 1); 7196c5e298aeSTheodore Ts'o int retries = 0; 7197ac27a0ecSDave Kleikamp struct buffer_head *bh; 7198ac27a0ecSDave Kleikamp handle_t *handle = journal_current_handle(); 7199ac27a0ecSDave Kleikamp 7200380a0091SYe Bin if (!handle) { 7201b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" 7202b31e1552SEric Sandeen " cancelled because transaction is not started", 72039c3013e9SJan Kara (unsigned long long)off, (unsigned long long)len); 72049c3013e9SJan Kara return -EIO; 72059c3013e9SJan Kara } 720667eeb568SDmitry Monakhov /* 720767eeb568SDmitry Monakhov * Since we account only one data block in transaction credits, 720867eeb568SDmitry Monakhov * then it is impossible to cross a block boundary. 720967eeb568SDmitry Monakhov */ 721067eeb568SDmitry Monakhov if (sb->s_blocksize - offset < len) { 721167eeb568SDmitry Monakhov ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" 721267eeb568SDmitry Monakhov " cancelled because not block aligned", 721367eeb568SDmitry Monakhov (unsigned long long)off, (unsigned long long)len); 721467eeb568SDmitry Monakhov return -EIO; 721567eeb568SDmitry Monakhov } 721667eeb568SDmitry Monakhov 7217c5e298aeSTheodore Ts'o do { 7218c5e298aeSTheodore Ts'o bh = ext4_bread(handle, inode, blk, 7219c5e298aeSTheodore Ts'o EXT4_GET_BLOCKS_CREATE | 7220c5e298aeSTheodore Ts'o EXT4_GET_BLOCKS_METADATA_NOFAIL); 722145586c70SMasahiro Yamada } while (PTR_ERR(bh) == -ENOSPC && 7222c5e298aeSTheodore Ts'o ext4_should_retry_alloc(inode->i_sb, &retries)); 72231c215028STheodore Ts'o if (IS_ERR(bh)) 72241c215028STheodore Ts'o return PTR_ERR(bh); 7225ac27a0ecSDave Kleikamp if (!bh) 7226ac27a0ecSDave Kleikamp goto out; 72275d601255Sliang xie BUFFER_TRACE(bh, "get write access"); 7228188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE); 7229ac27a0ecSDave Kleikamp if (err) { 7230ac27a0ecSDave Kleikamp brelse(bh); 72311c215028STheodore Ts'o return err; 7232ac27a0ecSDave Kleikamp } 7233ac27a0ecSDave Kleikamp lock_buffer(bh); 723467eeb568SDmitry Monakhov memcpy(bh->b_data+offset, data, len); 7235ac27a0ecSDave Kleikamp flush_dcache_page(bh->b_page); 7236ac27a0ecSDave Kleikamp unlock_buffer(bh); 72370390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, NULL, bh); 7238ac27a0ecSDave Kleikamp brelse(bh); 7239ac27a0ecSDave Kleikamp out: 724067eeb568SDmitry Monakhov if (inode->i_size < off + len) { 724167eeb568SDmitry Monakhov i_size_write(inode, off + len); 7242617ba13bSMingming Cao EXT4_I(inode)->i_disksize = inode->i_size; 72434209ae12SHarshad Shirwadkar err2 = ext4_mark_inode_dirty(handle, inode); 72444209ae12SHarshad Shirwadkar if (unlikely(err2 && !err)) 72454209ae12SHarshad Shirwadkar err = err2; 724621f97697SJan Kara } 72474209ae12SHarshad Shirwadkar return err ? err : len; 7248ac27a0ecSDave Kleikamp } 7249ac27a0ecSDave Kleikamp #endif 7250ac27a0ecSDave Kleikamp 7251c290ea01SJan Kara #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2) 725224b58424STheodore Ts'o static inline void register_as_ext2(void) 725324b58424STheodore Ts'o { 725424b58424STheodore Ts'o int err = register_filesystem(&ext2_fs_type); 725524b58424STheodore Ts'o if (err) 725624b58424STheodore Ts'o printk(KERN_WARNING 725724b58424STheodore Ts'o "EXT4-fs: Unable to register as ext2 (%d)\n", err); 725824b58424STheodore Ts'o } 725924b58424STheodore Ts'o 726024b58424STheodore Ts'o static inline void unregister_as_ext2(void) 726124b58424STheodore Ts'o { 726224b58424STheodore Ts'o unregister_filesystem(&ext2_fs_type); 726324b58424STheodore Ts'o } 72642035e776STheodore Ts'o 72652035e776STheodore Ts'o static inline int ext2_feature_set_ok(struct super_block *sb) 72662035e776STheodore Ts'o { 7267e2b911c5SDarrick J. Wong if (ext4_has_unknown_ext2_incompat_features(sb)) 72682035e776STheodore Ts'o return 0; 7269bc98a42cSDavid Howells if (sb_rdonly(sb)) 72702035e776STheodore Ts'o return 1; 7271e2b911c5SDarrick J. Wong if (ext4_has_unknown_ext2_ro_compat_features(sb)) 72722035e776STheodore Ts'o return 0; 72732035e776STheodore Ts'o return 1; 72742035e776STheodore Ts'o } 727524b58424STheodore Ts'o #else 727624b58424STheodore Ts'o static inline void register_as_ext2(void) { } 727724b58424STheodore Ts'o static inline void unregister_as_ext2(void) { } 72782035e776STheodore Ts'o static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; } 727924b58424STheodore Ts'o #endif 728024b58424STheodore Ts'o 728124b58424STheodore Ts'o static inline void register_as_ext3(void) 728224b58424STheodore Ts'o { 728324b58424STheodore Ts'o int err = register_filesystem(&ext3_fs_type); 728424b58424STheodore Ts'o if (err) 728524b58424STheodore Ts'o printk(KERN_WARNING 728624b58424STheodore Ts'o "EXT4-fs: Unable to register as ext3 (%d)\n", err); 728724b58424STheodore Ts'o } 728824b58424STheodore Ts'o 728924b58424STheodore Ts'o static inline void unregister_as_ext3(void) 729024b58424STheodore Ts'o { 729124b58424STheodore Ts'o unregister_filesystem(&ext3_fs_type); 729224b58424STheodore Ts'o } 72932035e776STheodore Ts'o 72942035e776STheodore Ts'o static inline int ext3_feature_set_ok(struct super_block *sb) 72952035e776STheodore Ts'o { 7296e2b911c5SDarrick J. Wong if (ext4_has_unknown_ext3_incompat_features(sb)) 72972035e776STheodore Ts'o return 0; 7298e2b911c5SDarrick J. Wong if (!ext4_has_feature_journal(sb)) 72992035e776STheodore Ts'o return 0; 7300bc98a42cSDavid Howells if (sb_rdonly(sb)) 73012035e776STheodore Ts'o return 1; 7302e2b911c5SDarrick J. Wong if (ext4_has_unknown_ext3_ro_compat_features(sb)) 73032035e776STheodore Ts'o return 0; 73042035e776STheodore Ts'o return 1; 73052035e776STheodore Ts'o } 730624b58424STheodore Ts'o 73071489dffdSChristoph Hellwig static void ext4_kill_sb(struct super_block *sb) 73081489dffdSChristoph Hellwig { 73091489dffdSChristoph Hellwig struct ext4_sb_info *sbi = EXT4_SB(sb); 73101489dffdSChristoph Hellwig struct block_device *journal_bdev = sbi ? sbi->s_journal_bdev : NULL; 73111489dffdSChristoph Hellwig 73121489dffdSChristoph Hellwig kill_block_super(sb); 73131489dffdSChristoph Hellwig 73141489dffdSChristoph Hellwig if (journal_bdev) 73151489dffdSChristoph Hellwig blkdev_put(journal_bdev, sb); 73161489dffdSChristoph Hellwig } 73171489dffdSChristoph Hellwig 731803010a33STheodore Ts'o static struct file_system_type ext4_fs_type = { 7319ac27a0ecSDave Kleikamp .owner = THIS_MODULE, 732003010a33STheodore Ts'o .name = "ext4", 7321cebe85d5SLukas Czerner .init_fs_context = ext4_init_fs_context, 7322cebe85d5SLukas Czerner .parameters = ext4_param_specs, 73231489dffdSChristoph Hellwig .kill_sb = ext4_kill_sb, 732450ec1d72SChristian Brauner .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, 7325ac27a0ecSDave Kleikamp }; 73267f78e035SEric W. Biederman MODULE_ALIAS_FS("ext4"); 7327ac27a0ecSDave Kleikamp 7328e9e3bcecSEric Sandeen /* Shared across all ext4 file systems */ 7329e9e3bcecSEric Sandeen wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ]; 7330e9e3bcecSEric Sandeen 73315dabfc78STheodore Ts'o static int __init ext4_init_fs(void) 7332ac27a0ecSDave Kleikamp { 7333e9e3bcecSEric Sandeen int i, err; 7334c9de560dSAlex Tomas 7335e294a537STheodore Ts'o ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64); 733607c0c5d8SAl Viro ext4_li_info = NULL; 733707c0c5d8SAl Viro 73389a4c8019SCarlos Maiolino /* Build-time check for flags consistency */ 733912e9b892SDmitry Monakhov ext4_check_flag_values(); 7340e9e3bcecSEric Sandeen 7341e142d052SJan Kara for (i = 0; i < EXT4_WQ_HASH_SZ; i++) 7342e9e3bcecSEric Sandeen init_waitqueue_head(&ext4__ioend_wq[i]); 7343e9e3bcecSEric Sandeen 734451865fdaSZheng Liu err = ext4_init_es(); 73456fd058f7STheodore Ts'o if (err) 73466fd058f7STheodore Ts'o return err; 734751865fdaSZheng Liu 73481dc0aa46SEric Whitney err = ext4_init_pending(); 73491dc0aa46SEric Whitney if (err) 735022cfe4b4SEric Biggers goto out7; 735122cfe4b4SEric Biggers 735222cfe4b4SEric Biggers err = ext4_init_post_read_processing(); 735322cfe4b4SEric Biggers if (err) 73541dc0aa46SEric Whitney goto out6; 73551dc0aa46SEric Whitney 735651865fdaSZheng Liu err = ext4_init_pageio(); 735751865fdaSZheng Liu if (err) 7358b5799018STheodore Ts'o goto out5; 735951865fdaSZheng Liu 73605dabfc78STheodore Ts'o err = ext4_init_system_zone(); 7361bd2d0210STheodore Ts'o if (err) 7362b5799018STheodore Ts'o goto out4; 7363857ac889SLukas Czerner 7364b5799018STheodore Ts'o err = ext4_init_sysfs(); 7365dd68314cSTheodore Ts'o if (err) 7366b5799018STheodore Ts'o goto out3; 7367857ac889SLukas Czerner 73685dabfc78STheodore Ts'o err = ext4_init_mballoc(); 7369ac27a0ecSDave Kleikamp if (err) 7370c9de560dSAlex Tomas goto out2; 7371ac27a0ecSDave Kleikamp err = init_inodecache(); 7372ac27a0ecSDave Kleikamp if (err) 7373ac27a0ecSDave Kleikamp goto out1; 7374aa75f4d3SHarshad Shirwadkar 7375aa75f4d3SHarshad Shirwadkar err = ext4_fc_init_dentry_cache(); 7376aa75f4d3SHarshad Shirwadkar if (err) 7377aa75f4d3SHarshad Shirwadkar goto out05; 7378aa75f4d3SHarshad Shirwadkar 737924b58424STheodore Ts'o register_as_ext3(); 73802035e776STheodore Ts'o register_as_ext2(); 738103010a33STheodore Ts'o err = register_filesystem(&ext4_fs_type); 7382ac27a0ecSDave Kleikamp if (err) 7383ac27a0ecSDave Kleikamp goto out; 7384bfff6873SLukas Czerner 7385ac27a0ecSDave Kleikamp return 0; 7386ac27a0ecSDave Kleikamp out: 738724b58424STheodore Ts'o unregister_as_ext2(); 738824b58424STheodore Ts'o unregister_as_ext3(); 7389ab047d51SSebastian Andrzej Siewior ext4_fc_destroy_dentry_cache(); 7390aa75f4d3SHarshad Shirwadkar out05: 7391ac27a0ecSDave Kleikamp destroy_inodecache(); 7392ac27a0ecSDave Kleikamp out1: 73935dabfc78STheodore Ts'o ext4_exit_mballoc(); 73949c191f70ST Makphaibulchoke out2: 7395b5799018STheodore Ts'o ext4_exit_sysfs(); 7396b5799018STheodore Ts'o out3: 7397dd68314cSTheodore Ts'o ext4_exit_system_zone(); 7398b5799018STheodore Ts'o out4: 73995dabfc78STheodore Ts'o ext4_exit_pageio(); 7400b5799018STheodore Ts'o out5: 740122cfe4b4SEric Biggers ext4_exit_post_read_processing(); 74021dc0aa46SEric Whitney out6: 740322cfe4b4SEric Biggers ext4_exit_pending(); 740422cfe4b4SEric Biggers out7: 740551865fdaSZheng Liu ext4_exit_es(); 740651865fdaSZheng Liu 7407ac27a0ecSDave Kleikamp return err; 7408ac27a0ecSDave Kleikamp } 7409ac27a0ecSDave Kleikamp 74105dabfc78STheodore Ts'o static void __exit ext4_exit_fs(void) 7411ac27a0ecSDave Kleikamp { 7412bfff6873SLukas Czerner ext4_destroy_lazyinit_thread(); 741324b58424STheodore Ts'o unregister_as_ext2(); 741424b58424STheodore Ts'o unregister_as_ext3(); 741503010a33STheodore Ts'o unregister_filesystem(&ext4_fs_type); 7416ab047d51SSebastian Andrzej Siewior ext4_fc_destroy_dentry_cache(); 7417ac27a0ecSDave Kleikamp destroy_inodecache(); 74185dabfc78STheodore Ts'o ext4_exit_mballoc(); 7419b5799018STheodore Ts'o ext4_exit_sysfs(); 74205dabfc78STheodore Ts'o ext4_exit_system_zone(); 74215dabfc78STheodore Ts'o ext4_exit_pageio(); 742222cfe4b4SEric Biggers ext4_exit_post_read_processing(); 7423dd12ed14SEric Sandeen ext4_exit_es(); 74241dc0aa46SEric Whitney ext4_exit_pending(); 7425ac27a0ecSDave Kleikamp } 7426ac27a0ecSDave Kleikamp 7427ac27a0ecSDave Kleikamp MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); 742883982b6fSTheodore Ts'o MODULE_DESCRIPTION("Fourth Extended Filesystem"); 7429ac27a0ecSDave Kleikamp MODULE_LICENSE("GPL"); 74307ef79ad5STheodore Ts'o MODULE_SOFTDEP("pre: crc32c"); 74315dabfc78STheodore Ts'o module_init(ext4_init_fs) 74325dabfc78STheodore Ts'o module_exit(ext4_exit_fs) 7433