1f5166768STheodore Ts'o // SPDX-License-Identifier: GPL-2.0 2ac27a0ecSDave Kleikamp /* 3617ba13bSMingming Cao * linux/fs/ext4/super.c 4ac27a0ecSDave Kleikamp * 5ac27a0ecSDave Kleikamp * Copyright (C) 1992, 1993, 1994, 1995 6ac27a0ecSDave Kleikamp * Remy Card (card@masi.ibp.fr) 7ac27a0ecSDave Kleikamp * Laboratoire MASI - Institut Blaise Pascal 8ac27a0ecSDave Kleikamp * Universite Pierre et Marie Curie (Paris VI) 9ac27a0ecSDave Kleikamp * 10ac27a0ecSDave Kleikamp * from 11ac27a0ecSDave Kleikamp * 12ac27a0ecSDave Kleikamp * linux/fs/minix/inode.c 13ac27a0ecSDave Kleikamp * 14ac27a0ecSDave Kleikamp * Copyright (C) 1991, 1992 Linus Torvalds 15ac27a0ecSDave Kleikamp * 16ac27a0ecSDave Kleikamp * Big-endian to little-endian byte-swapping/bitmaps by 17ac27a0ecSDave Kleikamp * David S. Miller (davem@caip.rutgers.edu), 1995 18ac27a0ecSDave Kleikamp */ 19ac27a0ecSDave Kleikamp 20ac27a0ecSDave Kleikamp #include <linux/module.h> 21ac27a0ecSDave Kleikamp #include <linux/string.h> 22ac27a0ecSDave Kleikamp #include <linux/fs.h> 23ac27a0ecSDave Kleikamp #include <linux/time.h> 24c5ca7c76STheodore Ts'o #include <linux/vmalloc.h> 25ac27a0ecSDave Kleikamp #include <linux/slab.h> 26ac27a0ecSDave Kleikamp #include <linux/init.h> 27ac27a0ecSDave Kleikamp #include <linux/blkdev.h> 2866114cadSTejun Heo #include <linux/backing-dev.h> 29ac27a0ecSDave Kleikamp #include <linux/parser.h> 30ac27a0ecSDave Kleikamp #include <linux/buffer_head.h> 31a5694255SChristoph Hellwig #include <linux/exportfs.h> 32ac27a0ecSDave Kleikamp #include <linux/vfs.h> 33ac27a0ecSDave Kleikamp #include <linux/random.h> 34ac27a0ecSDave Kleikamp #include <linux/mount.h> 35ac27a0ecSDave Kleikamp #include <linux/namei.h> 36ac27a0ecSDave Kleikamp #include <linux/quotaops.h> 37ac27a0ecSDave Kleikamp #include <linux/seq_file.h> 383197ebdbSTheodore Ts'o #include <linux/ctype.h> 391330593eSVignesh Babu #include <linux/log2.h> 40717d50e4SAndreas Dilger #include <linux/crc16.h> 41ef510424SDan Williams #include <linux/dax.h> 427c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 43ee73f9a5SJeff Layton #include <linux/iversion.h> 44c83ad55eSGabriel Krisman Bertazi #include <linux/unicode.h> 45c6a564ffSChristoph Hellwig #include <linux/part_stat.h> 46bfff6873SLukas Czerner #include <linux/kthread.h> 47bfff6873SLukas Czerner #include <linux/freezer.h> 489a089b21SGabriel Krisman Bertazi #include <linux/fsnotify.h> 49e5a185c2SLukas Czerner #include <linux/fs_context.h> 50e5a185c2SLukas Czerner #include <linux/fs_parser.h> 51bfff6873SLukas Czerner 523dcf5451SChristoph Hellwig #include "ext4.h" 534a092d73STheodore Ts'o #include "ext4_extents.h" /* Needed for trace points definition */ 543dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 55ac27a0ecSDave Kleikamp #include "xattr.h" 56ac27a0ecSDave Kleikamp #include "acl.h" 573661d286STheodore Ts'o #include "mballoc.h" 580c9ec4beSDarrick J. Wong #include "fsmap.h" 59ac27a0ecSDave Kleikamp 609bffad1eSTheodore Ts'o #define CREATE_TRACE_POINTS 619bffad1eSTheodore Ts'o #include <trace/events/ext4.h> 629bffad1eSTheodore Ts'o 630b75a840SLukas Czerner static struct ext4_lazy_init *ext4_li_info; 6459ebc7fdSZheng Yongjun static DEFINE_MUTEX(ext4_li_mtx); 65e294a537STheodore Ts'o static struct ratelimit_state ext4_mount_msg_ratelimit; 669f6200bbSTheodore Ts'o 67617ba13bSMingming Cao static int ext4_load_journal(struct super_block *, struct ext4_super_block *, 68ac27a0ecSDave Kleikamp unsigned long journal_devnum); 692adf6da8STheodore Ts'o static int ext4_show_options(struct seq_file *seq, struct dentry *root); 702d01ddc8SJan Kara static void ext4_update_super(struct super_block *sb); 714392fbc4SJan Kara static int ext4_commit_super(struct super_block *sb); 7211215630SJan Kara static int ext4_mark_recovery_complete(struct super_block *sb, 73617ba13bSMingming Cao struct ext4_super_block *es); 7411215630SJan Kara static int ext4_clear_journal_err(struct super_block *sb, 75617ba13bSMingming Cao struct ext4_super_block *es); 76617ba13bSMingming Cao static int ext4_sync_fs(struct super_block *sb, int wait); 77617ba13bSMingming Cao static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); 78c4be0c1dSTakashi Sato static int ext4_unfreeze(struct super_block *sb); 79c4be0c1dSTakashi Sato static int ext4_freeze(struct super_block *sb); 802035e776STheodore Ts'o static inline int ext2_feature_set_ok(struct super_block *sb); 812035e776STheodore Ts'o static inline int ext3_feature_set_ok(struct super_block *sb); 82bfff6873SLukas Czerner static void ext4_destroy_lazyinit_thread(void); 83bfff6873SLukas Czerner static void ext4_unregister_li_request(struct super_block *sb); 848f1f7453SEric Sandeen static void ext4_clear_request_list(void); 85c6cb7e77SEric Whitney static struct inode *ext4_get_journal_inode(struct super_block *sb, 86c6cb7e77SEric Whitney unsigned int journal_inum); 87da812f61SLukas Czerner static int ext4_validate_options(struct fs_context *fc); 88b6bd2435SLukas Czerner static int ext4_check_opt_consistency(struct fs_context *fc, 89e6e268cbSLukas Czerner struct super_block *sb); 9085456054SEric Biggers static void ext4_apply_options(struct fs_context *fc, struct super_block *sb); 9102f960f8SLukas Czerner static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param); 92cebe85d5SLukas Czerner static int ext4_get_tree(struct fs_context *fc); 93cebe85d5SLukas Czerner static int ext4_reconfigure(struct fs_context *fc); 94cebe85d5SLukas Czerner static void ext4_fc_free(struct fs_context *fc); 95cebe85d5SLukas Czerner static int ext4_init_fs_context(struct fs_context *fc); 96cebe85d5SLukas Czerner static const struct fs_parameter_spec ext4_param_specs[]; 97ac27a0ecSDave Kleikamp 98e74031fdSJan Kara /* 99e74031fdSJan Kara * Lock ordering 100e74031fdSJan Kara * 101e74031fdSJan Kara * page fault path: 102d4f5258eSJan Kara * mmap_lock -> sb_start_pagefault -> invalidate_lock (r) -> transaction start 103d4f5258eSJan Kara * -> page lock -> i_data_sem (rw) 104e74031fdSJan Kara * 105e74031fdSJan Kara * buffered write path: 106c1e8d7c6SMichel Lespinasse * sb_start_write -> i_mutex -> mmap_lock 107e74031fdSJan Kara * sb_start_write -> i_mutex -> transaction start -> page lock -> 108e74031fdSJan Kara * i_data_sem (rw) 109e74031fdSJan Kara * 110e74031fdSJan Kara * truncate: 111d4f5258eSJan Kara * sb_start_write -> i_mutex -> invalidate_lock (w) -> i_mmap_rwsem (w) -> 112d4f5258eSJan Kara * page lock 113d4f5258eSJan Kara * sb_start_write -> i_mutex -> invalidate_lock (w) -> transaction start -> 1141d39834fSNikolay Borisov * i_data_sem (rw) 115e74031fdSJan Kara * 116e74031fdSJan Kara * direct IO: 117c1e8d7c6SMichel Lespinasse * sb_start_write -> i_mutex -> mmap_lock 1181d39834fSNikolay Borisov * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw) 119e74031fdSJan Kara * 120e74031fdSJan Kara * writepages: 121e74031fdSJan Kara * transaction start -> page lock(s) -> i_data_sem (rw) 122e74031fdSJan Kara */ 123e74031fdSJan Kara 12402f960f8SLukas Czerner static const struct fs_context_operations ext4_context_ops = { 12502f960f8SLukas Czerner .parse_param = ext4_parse_param, 126cebe85d5SLukas Czerner .get_tree = ext4_get_tree, 127cebe85d5SLukas Czerner .reconfigure = ext4_reconfigure, 128cebe85d5SLukas Czerner .free = ext4_fc_free, 12902f960f8SLukas Czerner }; 13002f960f8SLukas Czerner 13102f960f8SLukas Czerner 132c290ea01SJan Kara #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2) 1332035e776STheodore Ts'o static struct file_system_type ext2_fs_type = { 1342035e776STheodore Ts'o .owner = THIS_MODULE, 1352035e776STheodore Ts'o .name = "ext2", 136cebe85d5SLukas Czerner .init_fs_context = ext4_init_fs_context, 137cebe85d5SLukas Czerner .parameters = ext4_param_specs, 1382035e776STheodore Ts'o .kill_sb = kill_block_super, 1392035e776STheodore Ts'o .fs_flags = FS_REQUIRES_DEV, 1402035e776STheodore Ts'o }; 1417f78e035SEric W. Biederman MODULE_ALIAS_FS("ext2"); 142fa7614ddSEric W. Biederman MODULE_ALIAS("ext2"); 1432035e776STheodore Ts'o #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type) 1442035e776STheodore Ts'o #else 1452035e776STheodore Ts'o #define IS_EXT2_SB(sb) (0) 1462035e776STheodore Ts'o #endif 1472035e776STheodore Ts'o 1482035e776STheodore Ts'o 149ba69f9abSJan Kara static struct file_system_type ext3_fs_type = { 150ba69f9abSJan Kara .owner = THIS_MODULE, 151ba69f9abSJan Kara .name = "ext3", 152cebe85d5SLukas Czerner .init_fs_context = ext4_init_fs_context, 153cebe85d5SLukas Czerner .parameters = ext4_param_specs, 154ba69f9abSJan Kara .kill_sb = kill_block_super, 155ba69f9abSJan Kara .fs_flags = FS_REQUIRES_DEV, 156ba69f9abSJan Kara }; 1577f78e035SEric W. Biederman MODULE_ALIAS_FS("ext3"); 158fa7614ddSEric W. Biederman MODULE_ALIAS("ext3"); 159ba69f9abSJan Kara #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type) 160bd81d8eeSLaurent Vivier 161fa491b14Szhangyi (F) 16267c0f556SBart Van Assche static inline void __ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags, 163fa491b14Szhangyi (F) bh_end_io_t *end_io) 164fa491b14Szhangyi (F) { 165fb265c9cSTheodore Ts'o /* 166fa491b14Szhangyi (F) * buffer's verified bit is no longer valid after reading from 167fa491b14Szhangyi (F) * disk again due to write out error, clear it to make sure we 168fa491b14Szhangyi (F) * recheck the buffer contents. 169fa491b14Szhangyi (F) */ 170fa491b14Szhangyi (F) clear_buffer_verified(bh); 171fa491b14Szhangyi (F) 172fa491b14Szhangyi (F) bh->b_end_io = end_io ? end_io : end_buffer_read_sync; 173fa491b14Szhangyi (F) get_bh(bh); 1741420c4a5SBart Van Assche submit_bh(REQ_OP_READ | op_flags, bh); 175fa491b14Szhangyi (F) } 176fa491b14Szhangyi (F) 17767c0f556SBart Van Assche void ext4_read_bh_nowait(struct buffer_head *bh, blk_opf_t op_flags, 178fa491b14Szhangyi (F) bh_end_io_t *end_io) 179fa491b14Szhangyi (F) { 180fa491b14Szhangyi (F) BUG_ON(!buffer_locked(bh)); 181fa491b14Szhangyi (F) 182fa491b14Szhangyi (F) if (ext4_buffer_uptodate(bh)) { 183fa491b14Szhangyi (F) unlock_buffer(bh); 184fa491b14Szhangyi (F) return; 185fa491b14Szhangyi (F) } 186fa491b14Szhangyi (F) __ext4_read_bh(bh, op_flags, end_io); 187fa491b14Szhangyi (F) } 188fa491b14Szhangyi (F) 18967c0f556SBart Van Assche int ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags, bh_end_io_t *end_io) 190fa491b14Szhangyi (F) { 191fa491b14Szhangyi (F) BUG_ON(!buffer_locked(bh)); 192fa491b14Szhangyi (F) 193fa491b14Szhangyi (F) if (ext4_buffer_uptodate(bh)) { 194fa491b14Szhangyi (F) unlock_buffer(bh); 195fa491b14Szhangyi (F) return 0; 196fa491b14Szhangyi (F) } 197fa491b14Szhangyi (F) 198fa491b14Szhangyi (F) __ext4_read_bh(bh, op_flags, end_io); 199fa491b14Szhangyi (F) 200fa491b14Szhangyi (F) wait_on_buffer(bh); 201fa491b14Szhangyi (F) if (buffer_uptodate(bh)) 202fa491b14Szhangyi (F) return 0; 203fa491b14Szhangyi (F) return -EIO; 204fa491b14Szhangyi (F) } 205fa491b14Szhangyi (F) 20667c0f556SBart Van Assche int ext4_read_bh_lock(struct buffer_head *bh, blk_opf_t op_flags, bool wait) 207fa491b14Szhangyi (F) { 2080b73284cSZhang Yi lock_buffer(bh); 2090b73284cSZhang Yi if (!wait) { 210fa491b14Szhangyi (F) ext4_read_bh_nowait(bh, op_flags, NULL); 211fa491b14Szhangyi (F) return 0; 212fa491b14Szhangyi (F) } 2130b73284cSZhang Yi return ext4_read_bh(bh, op_flags, NULL); 214fa491b14Szhangyi (F) } 215fa491b14Szhangyi (F) 216fb265c9cSTheodore Ts'o /* 2178394a6abSzhangyi (F) * This works like __bread_gfp() except it uses ERR_PTR for error 218fb265c9cSTheodore Ts'o * returns. Currently with sb_bread it's impossible to distinguish 219fb265c9cSTheodore Ts'o * between ENOMEM and EIO situations (since both result in a NULL 220fb265c9cSTheodore Ts'o * return. 221fb265c9cSTheodore Ts'o */ 2228394a6abSzhangyi (F) static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb, 22367c0f556SBart Van Assche sector_t block, 22467c0f556SBart Van Assche blk_opf_t op_flags, gfp_t gfp) 225fb265c9cSTheodore Ts'o { 2262d069c08Szhangyi (F) struct buffer_head *bh; 2272d069c08Szhangyi (F) int ret; 228fb265c9cSTheodore Ts'o 2298394a6abSzhangyi (F) bh = sb_getblk_gfp(sb, block, gfp); 230fb265c9cSTheodore Ts'o if (bh == NULL) 231fb265c9cSTheodore Ts'o return ERR_PTR(-ENOMEM); 232cf2834a5STheodore Ts'o if (ext4_buffer_uptodate(bh)) 233fb265c9cSTheodore Ts'o return bh; 2342d069c08Szhangyi (F) 2352d069c08Szhangyi (F) ret = ext4_read_bh_lock(bh, REQ_META | op_flags, true); 2362d069c08Szhangyi (F) if (ret) { 237fb265c9cSTheodore Ts'o put_bh(bh); 2382d069c08Szhangyi (F) return ERR_PTR(ret); 2392d069c08Szhangyi (F) } 2402d069c08Szhangyi (F) return bh; 241fb265c9cSTheodore Ts'o } 242fb265c9cSTheodore Ts'o 2438394a6abSzhangyi (F) struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block, 24467c0f556SBart Van Assche blk_opf_t op_flags) 2458394a6abSzhangyi (F) { 2468394a6abSzhangyi (F) return __ext4_sb_bread_gfp(sb, block, op_flags, __GFP_MOVABLE); 2478394a6abSzhangyi (F) } 2488394a6abSzhangyi (F) 2498394a6abSzhangyi (F) struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb, 2508394a6abSzhangyi (F) sector_t block) 2518394a6abSzhangyi (F) { 2528394a6abSzhangyi (F) return __ext4_sb_bread_gfp(sb, block, 0, 0); 2538394a6abSzhangyi (F) } 2548394a6abSzhangyi (F) 2555df1d412Szhangyi (F) void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block) 2565df1d412Szhangyi (F) { 2575df1d412Szhangyi (F) struct buffer_head *bh = sb_getblk_gfp(sb, block, 0); 2585df1d412Szhangyi (F) 2595df1d412Szhangyi (F) if (likely(bh)) { 2600b73284cSZhang Yi if (trylock_buffer(bh)) 2610b73284cSZhang Yi ext4_read_bh_nowait(bh, REQ_RAHEAD, NULL); 2625df1d412Szhangyi (F) brelse(bh); 2635df1d412Szhangyi (F) } 264c197855eSStephen Hemminger } 265a9c47317SDarrick J. Wong 266a9c47317SDarrick J. Wong static int ext4_verify_csum_type(struct super_block *sb, 2679aa5d32bSDmitry Monakhov struct ext4_super_block *es) 268a9c47317SDarrick J. Wong { 269a9c47317SDarrick J. Wong if (!ext4_has_feature_metadata_csum(sb)) 270a9c47317SDarrick J. Wong return 1; 271a9c47317SDarrick J. Wong 272a9c47317SDarrick J. Wong return es->s_checksum_type == EXT4_CRC32C_CHKSUM; 27306db49e6STheodore Ts'o } 274a9c47317SDarrick J. Wong 275bbc605cdSLukas Czerner __le32 ext4_superblock_csum(struct super_block *sb, 276a9c47317SDarrick J. Wong struct ext4_super_block *es) 277a9c47317SDarrick J. Wong { 278a9c47317SDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(sb); 279a9c47317SDarrick J. Wong int offset = offsetof(struct ext4_super_block, s_checksum); 280a9c47317SDarrick J. Wong __u32 csum; 281a9c47317SDarrick J. Wong 282a9c47317SDarrick J. Wong csum = ext4_chksum(sbi, ~0, (char *)es, offset); 283a9c47317SDarrick J. Wong 284a9c47317SDarrick J. Wong return cpu_to_le32(csum); 285a9c47317SDarrick J. Wong } 286a9c47317SDarrick J. Wong 287a9c47317SDarrick J. Wong static int ext4_superblock_csum_verify(struct super_block *sb, 288a9c47317SDarrick J. Wong struct ext4_super_block *es) 289a9c47317SDarrick J. Wong { 290a9c47317SDarrick J. Wong if (!ext4_has_metadata_csum(sb)) 291a9c47317SDarrick J. Wong return 1; 292a9c47317SDarrick J. Wong 293a9c47317SDarrick J. Wong return es->s_checksum == ext4_superblock_csum(sb, es); 294a9c47317SDarrick J. Wong } 295a9c47317SDarrick J. Wong 296a9c47317SDarrick J. Wong void ext4_superblock_csum_set(struct super_block *sb) 297a9c47317SDarrick J. Wong { 29806db49e6STheodore Ts'o struct ext4_super_block *es = EXT4_SB(sb)->s_es; 29906db49e6STheodore Ts'o 3009aa5d32bSDmitry Monakhov if (!ext4_has_metadata_csum(sb)) 301a9c47317SDarrick J. Wong return; 302a9c47317SDarrick J. Wong 303a9c47317SDarrick J. Wong es->s_checksum = ext4_superblock_csum(sb, es); 304a9c47317SDarrick J. Wong } 305a9c47317SDarrick J. Wong 3068fadc143SAlexandre Ratchov ext4_fsblk_t ext4_block_bitmap(struct super_block *sb, 3078fadc143SAlexandre Ratchov struct ext4_group_desc *bg) 308bd81d8eeSLaurent Vivier { 3093a14589cSAneesh Kumar K.V return le32_to_cpu(bg->bg_block_bitmap_lo) | 3108fadc143SAlexandre Ratchov (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 3118fadc143SAlexandre Ratchov (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0); 312bd81d8eeSLaurent Vivier } 313bd81d8eeSLaurent Vivier 3148fadc143SAlexandre Ratchov ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb, 3158fadc143SAlexandre Ratchov struct ext4_group_desc *bg) 316bd81d8eeSLaurent Vivier { 3175272f837SAneesh Kumar K.V return le32_to_cpu(bg->bg_inode_bitmap_lo) | 3188fadc143SAlexandre Ratchov (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 3198fadc143SAlexandre Ratchov (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0); 320bd81d8eeSLaurent Vivier } 321bd81d8eeSLaurent Vivier 3228fadc143SAlexandre Ratchov ext4_fsblk_t ext4_inode_table(struct super_block *sb, 3238fadc143SAlexandre Ratchov struct ext4_group_desc *bg) 324bd81d8eeSLaurent Vivier { 3255272f837SAneesh Kumar K.V return le32_to_cpu(bg->bg_inode_table_lo) | 3268fadc143SAlexandre Ratchov (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 3278fadc143SAlexandre Ratchov (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0); 328bd81d8eeSLaurent Vivier } 329bd81d8eeSLaurent Vivier 330021b65bbSTheodore Ts'o __u32 ext4_free_group_clusters(struct super_block *sb, 331560671a0SAneesh Kumar K.V struct ext4_group_desc *bg) 332560671a0SAneesh Kumar K.V { 333560671a0SAneesh Kumar K.V return le16_to_cpu(bg->bg_free_blocks_count_lo) | 334560671a0SAneesh Kumar K.V (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 335560671a0SAneesh Kumar K.V (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0); 336560671a0SAneesh Kumar K.V } 337560671a0SAneesh Kumar K.V 338560671a0SAneesh Kumar K.V __u32 ext4_free_inodes_count(struct super_block *sb, 339560671a0SAneesh Kumar K.V struct ext4_group_desc *bg) 340560671a0SAneesh Kumar K.V { 341560671a0SAneesh Kumar K.V return le16_to_cpu(bg->bg_free_inodes_count_lo) | 342560671a0SAneesh Kumar K.V (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 343560671a0SAneesh Kumar K.V (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0); 344560671a0SAneesh Kumar K.V } 345560671a0SAneesh Kumar K.V 346560671a0SAneesh Kumar K.V __u32 ext4_used_dirs_count(struct super_block *sb, 347560671a0SAneesh Kumar K.V struct ext4_group_desc *bg) 348560671a0SAneesh Kumar K.V { 349560671a0SAneesh Kumar K.V return le16_to_cpu(bg->bg_used_dirs_count_lo) | 350560671a0SAneesh Kumar K.V (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 351560671a0SAneesh Kumar K.V (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0); 352560671a0SAneesh Kumar K.V } 353560671a0SAneesh Kumar K.V 354560671a0SAneesh Kumar K.V __u32 ext4_itable_unused_count(struct super_block *sb, 355560671a0SAneesh Kumar K.V struct ext4_group_desc *bg) 356560671a0SAneesh Kumar K.V { 357560671a0SAneesh Kumar K.V return le16_to_cpu(bg->bg_itable_unused_lo) | 358560671a0SAneesh Kumar K.V (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 359560671a0SAneesh Kumar K.V (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0); 360560671a0SAneesh Kumar K.V } 361560671a0SAneesh Kumar K.V 3628fadc143SAlexandre Ratchov void ext4_block_bitmap_set(struct super_block *sb, 3638fadc143SAlexandre Ratchov struct ext4_group_desc *bg, ext4_fsblk_t blk) 364bd81d8eeSLaurent Vivier { 3653a14589cSAneesh Kumar K.V bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk); 3668fadc143SAlexandre Ratchov if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 3678fadc143SAlexandre Ratchov bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32); 368bd81d8eeSLaurent Vivier } 369bd81d8eeSLaurent Vivier 3708fadc143SAlexandre Ratchov void ext4_inode_bitmap_set(struct super_block *sb, 3718fadc143SAlexandre Ratchov struct ext4_group_desc *bg, ext4_fsblk_t blk) 372bd81d8eeSLaurent Vivier { 3735272f837SAneesh Kumar K.V bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk); 3748fadc143SAlexandre Ratchov if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 3758fadc143SAlexandre Ratchov bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32); 376bd81d8eeSLaurent Vivier } 377bd81d8eeSLaurent Vivier 3788fadc143SAlexandre Ratchov void ext4_inode_table_set(struct super_block *sb, 3798fadc143SAlexandre Ratchov struct ext4_group_desc *bg, ext4_fsblk_t blk) 380bd81d8eeSLaurent Vivier { 3815272f837SAneesh Kumar K.V bg->bg_inode_table_lo = cpu_to_le32((u32)blk); 3828fadc143SAlexandre Ratchov if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 3838fadc143SAlexandre Ratchov bg->bg_inode_table_hi = cpu_to_le32(blk >> 32); 384bd81d8eeSLaurent Vivier } 385bd81d8eeSLaurent Vivier 386021b65bbSTheodore Ts'o void ext4_free_group_clusters_set(struct super_block *sb, 387560671a0SAneesh Kumar K.V struct ext4_group_desc *bg, __u32 count) 388560671a0SAneesh Kumar K.V { 389560671a0SAneesh Kumar K.V bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count); 390560671a0SAneesh Kumar K.V if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 391560671a0SAneesh Kumar K.V bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16); 392560671a0SAneesh Kumar K.V } 393560671a0SAneesh Kumar K.V 394560671a0SAneesh Kumar K.V void ext4_free_inodes_set(struct super_block *sb, 395560671a0SAneesh Kumar K.V struct ext4_group_desc *bg, __u32 count) 396560671a0SAneesh Kumar K.V { 397560671a0SAneesh Kumar K.V bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count); 398560671a0SAneesh Kumar K.V if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 399560671a0SAneesh Kumar K.V bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16); 400560671a0SAneesh Kumar K.V } 401560671a0SAneesh Kumar K.V 402560671a0SAneesh Kumar K.V void ext4_used_dirs_set(struct super_block *sb, 403560671a0SAneesh Kumar K.V struct ext4_group_desc *bg, __u32 count) 404560671a0SAneesh Kumar K.V { 405560671a0SAneesh Kumar K.V bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count); 406560671a0SAneesh Kumar K.V if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 407560671a0SAneesh Kumar K.V bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16); 408560671a0SAneesh Kumar K.V } 409560671a0SAneesh Kumar K.V 410560671a0SAneesh Kumar K.V void ext4_itable_unused_set(struct super_block *sb, 411560671a0SAneesh Kumar K.V struct ext4_group_desc *bg, __u32 count) 412560671a0SAneesh Kumar K.V { 413560671a0SAneesh Kumar K.V bg->bg_itable_unused_lo = cpu_to_le16((__u16)count); 414560671a0SAneesh Kumar K.V if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 415560671a0SAneesh Kumar K.V bg->bg_itable_unused_hi = cpu_to_le16(count >> 16); 416560671a0SAneesh Kumar K.V } 417560671a0SAneesh Kumar K.V 418c92dc856SJan Kara static void __ext4_update_tstamp(__le32 *lo, __u8 *hi, time64_t now) 4196a0678a7SArnd Bergmann { 4206a0678a7SArnd Bergmann now = clamp_val(now, 0, (1ull << 40) - 1); 4216a0678a7SArnd Bergmann 4226a0678a7SArnd Bergmann *lo = cpu_to_le32(lower_32_bits(now)); 4236a0678a7SArnd Bergmann *hi = upper_32_bits(now); 4246a0678a7SArnd Bergmann } 4256a0678a7SArnd Bergmann 4266a0678a7SArnd Bergmann static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi) 4276a0678a7SArnd Bergmann { 4286a0678a7SArnd Bergmann return ((time64_t)(*hi) << 32) + le32_to_cpu(*lo); 4296a0678a7SArnd Bergmann } 4306a0678a7SArnd Bergmann #define ext4_update_tstamp(es, tstamp) \ 431c92dc856SJan Kara __ext4_update_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi, \ 432c92dc856SJan Kara ktime_get_real_seconds()) 4336a0678a7SArnd Bergmann #define ext4_get_tstamp(es, tstamp) \ 4346a0678a7SArnd Bergmann __ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi) 435d3d1faf6SCurt Wohlgemuth 436bdfe0cbdSTheodore Ts'o /* 437bdfe0cbdSTheodore Ts'o * The del_gendisk() function uninitializes the disk-specific data 438bdfe0cbdSTheodore Ts'o * structures, including the bdi structure, without telling anyone 439bdfe0cbdSTheodore Ts'o * else. Once this happens, any attempt to call mark_buffer_dirty() 440bdfe0cbdSTheodore Ts'o * (for example, by ext4_commit_super), will cause a kernel OOPS. 441bdfe0cbdSTheodore Ts'o * This is a kludge to prevent these oops until we can put in a proper 442bdfe0cbdSTheodore Ts'o * hook in del_gendisk() to inform the VFS and file system layers. 443bdfe0cbdSTheodore Ts'o */ 444bdfe0cbdSTheodore Ts'o static int block_device_ejected(struct super_block *sb) 445bdfe0cbdSTheodore Ts'o { 446bdfe0cbdSTheodore Ts'o struct inode *bd_inode = sb->s_bdev->bd_inode; 447bdfe0cbdSTheodore Ts'o struct backing_dev_info *bdi = inode_to_bdi(bd_inode); 448bdfe0cbdSTheodore Ts'o 449bdfe0cbdSTheodore Ts'o return bdi->dev == NULL; 450bdfe0cbdSTheodore Ts'o } 451bdfe0cbdSTheodore Ts'o 45218aadd47SBobi Jam static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) 45318aadd47SBobi Jam { 45418aadd47SBobi Jam struct super_block *sb = journal->j_private; 45518aadd47SBobi Jam struct ext4_sb_info *sbi = EXT4_SB(sb); 45618aadd47SBobi Jam int error = is_journal_aborted(journal); 4575d3ee208SDmitry Monakhov struct ext4_journal_cb_entry *jce; 45818aadd47SBobi Jam 4595d3ee208SDmitry Monakhov BUG_ON(txn->t_state == T_FINISHED); 460a0154344SDaeho Jeong 461a0154344SDaeho Jeong ext4_process_freed_data(sb, txn->t_tid); 462a0154344SDaeho Jeong 46318aadd47SBobi Jam spin_lock(&sbi->s_md_lock); 4645d3ee208SDmitry Monakhov while (!list_empty(&txn->t_private_list)) { 4655d3ee208SDmitry Monakhov jce = list_entry(txn->t_private_list.next, 4665d3ee208SDmitry Monakhov struct ext4_journal_cb_entry, jce_list); 46718aadd47SBobi Jam list_del_init(&jce->jce_list); 46818aadd47SBobi Jam spin_unlock(&sbi->s_md_lock); 46918aadd47SBobi Jam jce->jce_func(sb, jce, error); 47018aadd47SBobi Jam spin_lock(&sbi->s_md_lock); 47118aadd47SBobi Jam } 47218aadd47SBobi Jam spin_unlock(&sbi->s_md_lock); 47318aadd47SBobi Jam } 4741c13d5c0STheodore Ts'o 475afb585a9SMauricio Faria de Oliveira /* 476afb585a9SMauricio Faria de Oliveira * This writepage callback for write_cache_pages() 477afb585a9SMauricio Faria de Oliveira * takes care of a few cases after page cleaning. 478afb585a9SMauricio Faria de Oliveira * 479afb585a9SMauricio Faria de Oliveira * write_cache_pages() already checks for dirty pages 480afb585a9SMauricio Faria de Oliveira * and calls clear_page_dirty_for_io(), which we want, 481afb585a9SMauricio Faria de Oliveira * to write protect the pages. 482afb585a9SMauricio Faria de Oliveira * 483afb585a9SMauricio Faria de Oliveira * However, we may have to redirty a page (see below.) 484afb585a9SMauricio Faria de Oliveira */ 485afb585a9SMauricio Faria de Oliveira static int ext4_journalled_writepage_callback(struct page *page, 486afb585a9SMauricio Faria de Oliveira struct writeback_control *wbc, 487afb585a9SMauricio Faria de Oliveira void *data) 488afb585a9SMauricio Faria de Oliveira { 489afb585a9SMauricio Faria de Oliveira transaction_t *transaction = (transaction_t *) data; 490afb585a9SMauricio Faria de Oliveira struct buffer_head *bh, *head; 491afb585a9SMauricio Faria de Oliveira struct journal_head *jh; 492afb585a9SMauricio Faria de Oliveira 493afb585a9SMauricio Faria de Oliveira bh = head = page_buffers(page); 494afb585a9SMauricio Faria de Oliveira do { 495afb585a9SMauricio Faria de Oliveira /* 496afb585a9SMauricio Faria de Oliveira * We have to redirty a page in these cases: 497afb585a9SMauricio Faria de Oliveira * 1) If buffer is dirty, it means the page was dirty because it 498afb585a9SMauricio Faria de Oliveira * contains a buffer that needs checkpointing. So the dirty bit 499afb585a9SMauricio Faria de Oliveira * needs to be preserved so that checkpointing writes the buffer 500afb585a9SMauricio Faria de Oliveira * properly. 501afb585a9SMauricio Faria de Oliveira * 2) If buffer is not part of the committing transaction 502afb585a9SMauricio Faria de Oliveira * (we may have just accidentally come across this buffer because 503afb585a9SMauricio Faria de Oliveira * inode range tracking is not exact) or if the currently running 504afb585a9SMauricio Faria de Oliveira * transaction already contains this buffer as well, dirty bit 505afb585a9SMauricio Faria de Oliveira * needs to be preserved so that the buffer gets writeprotected 506afb585a9SMauricio Faria de Oliveira * properly on running transaction's commit. 507afb585a9SMauricio Faria de Oliveira */ 508afb585a9SMauricio Faria de Oliveira jh = bh2jh(bh); 509afb585a9SMauricio Faria de Oliveira if (buffer_dirty(bh) || 510afb585a9SMauricio Faria de Oliveira (jh && (jh->b_transaction != transaction || 511afb585a9SMauricio Faria de Oliveira jh->b_next_transaction))) { 512afb585a9SMauricio Faria de Oliveira redirty_page_for_writepage(wbc, page); 513afb585a9SMauricio Faria de Oliveira goto out; 514afb585a9SMauricio Faria de Oliveira } 515afb585a9SMauricio Faria de Oliveira } while ((bh = bh->b_this_page) != head); 516afb585a9SMauricio Faria de Oliveira 517afb585a9SMauricio Faria de Oliveira out: 518afb585a9SMauricio Faria de Oliveira return AOP_WRITEPAGE_ACTIVATE; 519afb585a9SMauricio Faria de Oliveira } 520afb585a9SMauricio Faria de Oliveira 521afb585a9SMauricio Faria de Oliveira static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode *jinode) 522afb585a9SMauricio Faria de Oliveira { 523afb585a9SMauricio Faria de Oliveira struct address_space *mapping = jinode->i_vfs_inode->i_mapping; 524afb585a9SMauricio Faria de Oliveira struct writeback_control wbc = { 525afb585a9SMauricio Faria de Oliveira .sync_mode = WB_SYNC_ALL, 526afb585a9SMauricio Faria de Oliveira .nr_to_write = LONG_MAX, 527afb585a9SMauricio Faria de Oliveira .range_start = jinode->i_dirty_start, 528afb585a9SMauricio Faria de Oliveira .range_end = jinode->i_dirty_end, 529afb585a9SMauricio Faria de Oliveira }; 530afb585a9SMauricio Faria de Oliveira 531afb585a9SMauricio Faria de Oliveira return write_cache_pages(mapping, &wbc, 532afb585a9SMauricio Faria de Oliveira ext4_journalled_writepage_callback, 533afb585a9SMauricio Faria de Oliveira jinode->i_transaction); 534afb585a9SMauricio Faria de Oliveira } 535afb585a9SMauricio Faria de Oliveira 536afb585a9SMauricio Faria de Oliveira static int ext4_journal_submit_inode_data_buffers(struct jbd2_inode *jinode) 537afb585a9SMauricio Faria de Oliveira { 538afb585a9SMauricio Faria de Oliveira int ret; 539afb585a9SMauricio Faria de Oliveira 540afb585a9SMauricio Faria de Oliveira if (ext4_should_journal_data(jinode->i_vfs_inode)) 541afb585a9SMauricio Faria de Oliveira ret = ext4_journalled_submit_inode_data_buffers(jinode); 542afb585a9SMauricio Faria de Oliveira else 543afb585a9SMauricio Faria de Oliveira ret = jbd2_journal_submit_inode_data_buffers(jinode); 544afb585a9SMauricio Faria de Oliveira 545afb585a9SMauricio Faria de Oliveira return ret; 546afb585a9SMauricio Faria de Oliveira } 547afb585a9SMauricio Faria de Oliveira 548afb585a9SMauricio Faria de Oliveira static int ext4_journal_finish_inode_data_buffers(struct jbd2_inode *jinode) 549afb585a9SMauricio Faria de Oliveira { 550afb585a9SMauricio Faria de Oliveira int ret = 0; 551afb585a9SMauricio Faria de Oliveira 552afb585a9SMauricio Faria de Oliveira if (!ext4_should_journal_data(jinode->i_vfs_inode)) 553afb585a9SMauricio Faria de Oliveira ret = jbd2_journal_finish_inode_data_buffers(jinode); 554afb585a9SMauricio Faria de Oliveira 555afb585a9SMauricio Faria de Oliveira return ret; 556afb585a9SMauricio Faria de Oliveira } 557afb585a9SMauricio Faria de Oliveira 5581dc1097fSJan Kara static bool system_going_down(void) 5591dc1097fSJan Kara { 5601dc1097fSJan Kara return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF 5611dc1097fSJan Kara || system_state == SYSTEM_RESTART; 5621dc1097fSJan Kara } 5631dc1097fSJan Kara 56402a7780eSJan Kara struct ext4_err_translation { 56502a7780eSJan Kara int code; 56602a7780eSJan Kara int errno; 56702a7780eSJan Kara }; 56802a7780eSJan Kara 56902a7780eSJan Kara #define EXT4_ERR_TRANSLATE(err) { .code = EXT4_ERR_##err, .errno = err } 57002a7780eSJan Kara 57102a7780eSJan Kara static struct ext4_err_translation err_translation[] = { 57202a7780eSJan Kara EXT4_ERR_TRANSLATE(EIO), 57302a7780eSJan Kara EXT4_ERR_TRANSLATE(ENOMEM), 57402a7780eSJan Kara EXT4_ERR_TRANSLATE(EFSBADCRC), 57502a7780eSJan Kara EXT4_ERR_TRANSLATE(EFSCORRUPTED), 57602a7780eSJan Kara EXT4_ERR_TRANSLATE(ENOSPC), 57702a7780eSJan Kara EXT4_ERR_TRANSLATE(ENOKEY), 57802a7780eSJan Kara EXT4_ERR_TRANSLATE(EROFS), 57902a7780eSJan Kara EXT4_ERR_TRANSLATE(EFBIG), 58002a7780eSJan Kara EXT4_ERR_TRANSLATE(EEXIST), 58102a7780eSJan Kara EXT4_ERR_TRANSLATE(ERANGE), 58202a7780eSJan Kara EXT4_ERR_TRANSLATE(EOVERFLOW), 58302a7780eSJan Kara EXT4_ERR_TRANSLATE(EBUSY), 58402a7780eSJan Kara EXT4_ERR_TRANSLATE(ENOTDIR), 58502a7780eSJan Kara EXT4_ERR_TRANSLATE(ENOTEMPTY), 58602a7780eSJan Kara EXT4_ERR_TRANSLATE(ESHUTDOWN), 58702a7780eSJan Kara EXT4_ERR_TRANSLATE(EFAULT), 58802a7780eSJan Kara }; 58902a7780eSJan Kara 59002a7780eSJan Kara static int ext4_errno_to_code(int errno) 59102a7780eSJan Kara { 59202a7780eSJan Kara int i; 59302a7780eSJan Kara 59402a7780eSJan Kara for (i = 0; i < ARRAY_SIZE(err_translation); i++) 59502a7780eSJan Kara if (err_translation[i].errno == errno) 59602a7780eSJan Kara return err_translation[i].code; 59702a7780eSJan Kara return EXT4_ERR_UNKNOWN; 59802a7780eSJan Kara } 59902a7780eSJan Kara 6002d01ddc8SJan Kara static void save_error_info(struct super_block *sb, int error, 60140676623SJan Kara __u32 ino, __u64 block, 60240676623SJan Kara const char *func, unsigned int line) 60340676623SJan Kara { 604c92dc856SJan Kara struct ext4_sb_info *sbi = EXT4_SB(sb); 60540676623SJan Kara 60602a7780eSJan Kara /* We default to EFSCORRUPTED error... */ 60702a7780eSJan Kara if (error == 0) 60802a7780eSJan Kara error = EFSCORRUPTED; 609c92dc856SJan Kara 610c92dc856SJan Kara spin_lock(&sbi->s_error_lock); 611c92dc856SJan Kara sbi->s_add_error_count++; 612c92dc856SJan Kara sbi->s_last_error_code = error; 613c92dc856SJan Kara sbi->s_last_error_line = line; 614c92dc856SJan Kara sbi->s_last_error_ino = ino; 615c92dc856SJan Kara sbi->s_last_error_block = block; 616c92dc856SJan Kara sbi->s_last_error_func = func; 617c92dc856SJan Kara sbi->s_last_error_time = ktime_get_real_seconds(); 618c92dc856SJan Kara if (!sbi->s_first_error_time) { 619c92dc856SJan Kara sbi->s_first_error_code = error; 620c92dc856SJan Kara sbi->s_first_error_line = line; 621c92dc856SJan Kara sbi->s_first_error_ino = ino; 622c92dc856SJan Kara sbi->s_first_error_block = block; 623c92dc856SJan Kara sbi->s_first_error_func = func; 624c92dc856SJan Kara sbi->s_first_error_time = sbi->s_last_error_time; 62540676623SJan Kara } 626c92dc856SJan Kara spin_unlock(&sbi->s_error_lock); 62740676623SJan Kara } 62840676623SJan Kara 629ac27a0ecSDave Kleikamp /* Deal with the reporting of failure conditions on a filesystem such as 630ac27a0ecSDave Kleikamp * inconsistencies detected or read IO failures. 631ac27a0ecSDave Kleikamp * 632ac27a0ecSDave Kleikamp * On ext2, we can store the error state of the filesystem in the 633617ba13bSMingming Cao * superblock. That is not possible on ext4, because we may have other 634ac27a0ecSDave Kleikamp * write ordering constraints on the superblock which prevent us from 635ac27a0ecSDave Kleikamp * writing it out straight away; and given that the journal is about to 636ac27a0ecSDave Kleikamp * be aborted, we can't rely on the current, or future, transactions to 637ac27a0ecSDave Kleikamp * write out the superblock safely. 638ac27a0ecSDave Kleikamp * 639dab291afSMingming Cao * We'll just use the jbd2_journal_abort() error code to record an error in 640d6b198bcSThadeu Lima de Souza Cascardo * the journal instead. On recovery, the journal will complain about 641ac27a0ecSDave Kleikamp * that error until we've noted it down and cleared it. 642014c9caaSJan Kara * 643014c9caaSJan Kara * If force_ro is set, we unconditionally force the filesystem into an 644014c9caaSJan Kara * ABORT|READONLY state, unless the error response on the fs has been set to 645014c9caaSJan Kara * panic in which case we take the easy way out and panic immediately. This is 646014c9caaSJan Kara * used to deal with unrecoverable failures such as journal IO errors or ENOMEM 647014c9caaSJan Kara * at a critical moment in log management. 648ac27a0ecSDave Kleikamp */ 649e789ca0cSJan Kara static void ext4_handle_error(struct super_block *sb, bool force_ro, int error, 650e789ca0cSJan Kara __u32 ino, __u64 block, 651e789ca0cSJan Kara const char *func, unsigned int line) 652ac27a0ecSDave Kleikamp { 653b08070ecSJan Kara journal_t *journal = EXT4_SB(sb)->s_journal; 6542d01ddc8SJan Kara bool continue_fs = !force_ro && test_opt(sb, ERRORS_CONT); 655b08070ecSJan Kara 656e789ca0cSJan Kara EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; 657327eaf73STheodore Ts'o if (test_opt(sb, WARN_ON_ERROR)) 658327eaf73STheodore Ts'o WARN_ON_ONCE(1); 659327eaf73STheodore Ts'o 6602d01ddc8SJan Kara if (!continue_fs && !sb_rdonly(sb)) { 6619b5f6c9bSHarshad Shirwadkar ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED); 662ac27a0ecSDave Kleikamp if (journal) 663dab291afSMingming Cao jbd2_journal_abort(journal, -EIO); 6642d01ddc8SJan Kara } 6652d01ddc8SJan Kara 6662d01ddc8SJan Kara if (!bdev_read_only(sb->s_bdev)) { 6672d01ddc8SJan Kara save_error_info(sb, error, ino, block, func, line); 6682d01ddc8SJan Kara /* 6692d01ddc8SJan Kara * In case the fs should keep running, we need to writeout 6702d01ddc8SJan Kara * superblock through the journal. Due to lock ordering 6712d01ddc8SJan Kara * constraints, it may not be safe to do it right here so we 6722d01ddc8SJan Kara * defer superblock flushing to a workqueue. 6732d01ddc8SJan Kara */ 674bb9464e0Syangerkun if (continue_fs && journal) 6752d01ddc8SJan Kara schedule_work(&EXT4_SB(sb)->s_error_work); 6762d01ddc8SJan Kara else 6772d01ddc8SJan Kara ext4_commit_super(sb); 6782d01ddc8SJan Kara } 6792d01ddc8SJan Kara 6801dc1097fSJan Kara /* 6811dc1097fSJan Kara * We force ERRORS_RO behavior when system is rebooting. Otherwise we 6821dc1097fSJan Kara * could panic during 'reboot -f' as the underlying device got already 6831dc1097fSJan Kara * disabled. 6841dc1097fSJan Kara */ 685014c9caaSJan Kara if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) { 686617ba13bSMingming Cao panic("EXT4-fs (device %s): panic forced after error\n", 687ac27a0ecSDave Kleikamp sb->s_id); 688ac27a0ecSDave Kleikamp } 689ac2f7ca5SYe Bin 690ac2f7ca5SYe Bin if (sb_rdonly(sb) || continue_fs) 691ac2f7ca5SYe Bin return; 692ac2f7ca5SYe Bin 693014c9caaSJan Kara ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); 694014c9caaSJan Kara /* 695014c9caaSJan Kara * Make sure updated value of ->s_mount_flags will be visible before 696014c9caaSJan Kara * ->s_flags update 697014c9caaSJan Kara */ 698014c9caaSJan Kara smp_wmb(); 699014c9caaSJan Kara sb->s_flags |= SB_RDONLY; 7004327ba52SDaeho Jeong } 701ac27a0ecSDave Kleikamp 702c92dc856SJan Kara static void flush_stashed_error_work(struct work_struct *work) 703c92dc856SJan Kara { 704c92dc856SJan Kara struct ext4_sb_info *sbi = container_of(work, struct ext4_sb_info, 705c92dc856SJan Kara s_error_work); 7062d01ddc8SJan Kara journal_t *journal = sbi->s_journal; 7072d01ddc8SJan Kara handle_t *handle; 708c92dc856SJan Kara 7092d01ddc8SJan Kara /* 7102d01ddc8SJan Kara * If the journal is still running, we have to write out superblock 7112d01ddc8SJan Kara * through the journal to avoid collisions of other journalled sb 7122d01ddc8SJan Kara * updates. 7132d01ddc8SJan Kara * 7142d01ddc8SJan Kara * We use directly jbd2 functions here to avoid recursing back into 7152d01ddc8SJan Kara * ext4 error handling code during handling of previous errors. 7162d01ddc8SJan Kara */ 7172d01ddc8SJan Kara if (!sb_rdonly(sbi->s_sb) && journal) { 718558d6450SYe Bin struct buffer_head *sbh = sbi->s_sbh; 7192d01ddc8SJan Kara handle = jbd2_journal_start(journal, 1); 7202d01ddc8SJan Kara if (IS_ERR(handle)) 7212d01ddc8SJan Kara goto write_directly; 722558d6450SYe Bin if (jbd2_journal_get_write_access(handle, sbh)) { 7232d01ddc8SJan Kara jbd2_journal_stop(handle); 7242d01ddc8SJan Kara goto write_directly; 7252d01ddc8SJan Kara } 7262d01ddc8SJan Kara ext4_update_super(sbi->s_sb); 727558d6450SYe Bin if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) { 728558d6450SYe Bin ext4_msg(sbi->s_sb, KERN_ERR, "previous I/O error to " 729558d6450SYe Bin "superblock detected"); 730558d6450SYe Bin clear_buffer_write_io_error(sbh); 731558d6450SYe Bin set_buffer_uptodate(sbh); 732558d6450SYe Bin } 733558d6450SYe Bin 734558d6450SYe Bin if (jbd2_journal_dirty_metadata(handle, sbh)) { 7352d01ddc8SJan Kara jbd2_journal_stop(handle); 7362d01ddc8SJan Kara goto write_directly; 7372d01ddc8SJan Kara } 7382d01ddc8SJan Kara jbd2_journal_stop(handle); 739d578b994SJonathan Davies ext4_notify_error_sysfs(sbi); 7402d01ddc8SJan Kara return; 7412d01ddc8SJan Kara } 7422d01ddc8SJan Kara write_directly: 7432d01ddc8SJan Kara /* 7442d01ddc8SJan Kara * Write through journal failed. Write sb directly to get error info 7452d01ddc8SJan Kara * out and hope for the best. 7462d01ddc8SJan Kara */ 7474392fbc4SJan Kara ext4_commit_super(sbi->s_sb); 748d578b994SJonathan Davies ext4_notify_error_sysfs(sbi); 749ac27a0ecSDave Kleikamp } 750ac27a0ecSDave Kleikamp 751efbed4dcSTheodore Ts'o #define ext4_error_ratelimit(sb) \ 752efbed4dcSTheodore Ts'o ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state), \ 753efbed4dcSTheodore Ts'o "EXT4-fs error") 754efbed4dcSTheodore Ts'o 75512062dddSEric Sandeen void __ext4_error(struct super_block *sb, const char *function, 756014c9caaSJan Kara unsigned int line, bool force_ro, int error, __u64 block, 75754d3adbcSTheodore Ts'o const char *fmt, ...) 758ac27a0ecSDave Kleikamp { 7590ff2ea7dSJoe Perches struct va_format vaf; 760ac27a0ecSDave Kleikamp va_list args; 761ac27a0ecSDave Kleikamp 7620db1ff22STheodore Ts'o if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) 7630db1ff22STheodore Ts'o return; 7640db1ff22STheodore Ts'o 765ccf0f32aSTheodore Ts'o trace_ext4_error(sb, function, line); 766efbed4dcSTheodore Ts'o if (ext4_error_ratelimit(sb)) { 767ac27a0ecSDave Kleikamp va_start(args, fmt); 7680ff2ea7dSJoe Perches vaf.fmt = fmt; 7690ff2ea7dSJoe Perches vaf.va = &args; 770efbed4dcSTheodore Ts'o printk(KERN_CRIT 771efbed4dcSTheodore Ts'o "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n", 7720ff2ea7dSJoe Perches sb->s_id, function, line, current->comm, &vaf); 773ac27a0ecSDave Kleikamp va_end(args); 774efbed4dcSTheodore Ts'o } 7759a089b21SGabriel Krisman Bertazi fsnotify_sb_error(sb, NULL, error ? error : EFSCORRUPTED); 7769a089b21SGabriel Krisman Bertazi 777e789ca0cSJan Kara ext4_handle_error(sb, force_ro, error, 0, block, function, line); 778ac27a0ecSDave Kleikamp } 779ac27a0ecSDave Kleikamp 780e7c96e8eSJoe Perches void __ext4_error_inode(struct inode *inode, const char *function, 78154d3adbcSTheodore Ts'o unsigned int line, ext4_fsblk_t block, int error, 782273df556SFrank Mayhar const char *fmt, ...) 783273df556SFrank Mayhar { 784273df556SFrank Mayhar va_list args; 785f7c21177STheodore Ts'o struct va_format vaf; 786273df556SFrank Mayhar 7870db1ff22STheodore Ts'o if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 7880db1ff22STheodore Ts'o return; 7890db1ff22STheodore Ts'o 790ccf0f32aSTheodore Ts'o trace_ext4_error(inode->i_sb, function, line); 791efbed4dcSTheodore Ts'o if (ext4_error_ratelimit(inode->i_sb)) { 792273df556SFrank Mayhar va_start(args, fmt); 793f7c21177STheodore Ts'o vaf.fmt = fmt; 794f7c21177STheodore Ts'o vaf.va = &args; 795c398eda0STheodore Ts'o if (block) 796d9ee81daSJoe Perches printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: " 797d9ee81daSJoe Perches "inode #%lu: block %llu: comm %s: %pV\n", 798d9ee81daSJoe Perches inode->i_sb->s_id, function, line, inode->i_ino, 799d9ee81daSJoe Perches block, current->comm, &vaf); 800d9ee81daSJoe Perches else 801d9ee81daSJoe Perches printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: " 802d9ee81daSJoe Perches "inode #%lu: comm %s: %pV\n", 803d9ee81daSJoe Perches inode->i_sb->s_id, function, line, inode->i_ino, 804d9ee81daSJoe Perches current->comm, &vaf); 805273df556SFrank Mayhar va_end(args); 806efbed4dcSTheodore Ts'o } 8079a089b21SGabriel Krisman Bertazi fsnotify_sb_error(inode->i_sb, inode, error ? error : EFSCORRUPTED); 8089a089b21SGabriel Krisman Bertazi 809e789ca0cSJan Kara ext4_handle_error(inode->i_sb, false, error, inode->i_ino, block, 81054d3adbcSTheodore Ts'o function, line); 811273df556SFrank Mayhar } 812273df556SFrank Mayhar 813e7c96e8eSJoe Perches void __ext4_error_file(struct file *file, const char *function, 814f7c21177STheodore Ts'o unsigned int line, ext4_fsblk_t block, 815f7c21177STheodore Ts'o const char *fmt, ...) 816273df556SFrank Mayhar { 817273df556SFrank Mayhar va_list args; 818f7c21177STheodore Ts'o struct va_format vaf; 819496ad9aaSAl Viro struct inode *inode = file_inode(file); 820273df556SFrank Mayhar char pathname[80], *path; 821273df556SFrank Mayhar 8220db1ff22STheodore Ts'o if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 8230db1ff22STheodore Ts'o return; 8240db1ff22STheodore Ts'o 825ccf0f32aSTheodore Ts'o trace_ext4_error(inode->i_sb, function, line); 826efbed4dcSTheodore Ts'o if (ext4_error_ratelimit(inode->i_sb)) { 8279bf39ab2SMiklos Szeredi path = file_path(file, pathname, sizeof(pathname)); 828f9a62d09SDan Carpenter if (IS_ERR(path)) 829273df556SFrank Mayhar path = "(unknown)"; 830f7c21177STheodore Ts'o va_start(args, fmt); 831f7c21177STheodore Ts'o vaf.fmt = fmt; 832f7c21177STheodore Ts'o vaf.va = &args; 833d9ee81daSJoe Perches if (block) 834d9ee81daSJoe Perches printk(KERN_CRIT 835d9ee81daSJoe Perches "EXT4-fs error (device %s): %s:%d: inode #%lu: " 836d9ee81daSJoe Perches "block %llu: comm %s: path %s: %pV\n", 837d9ee81daSJoe Perches inode->i_sb->s_id, function, line, inode->i_ino, 838d9ee81daSJoe Perches block, current->comm, path, &vaf); 839d9ee81daSJoe Perches else 840d9ee81daSJoe Perches printk(KERN_CRIT 841d9ee81daSJoe Perches "EXT4-fs error (device %s): %s:%d: inode #%lu: " 842d9ee81daSJoe Perches "comm %s: path %s: %pV\n", 843d9ee81daSJoe Perches inode->i_sb->s_id, function, line, inode->i_ino, 844d9ee81daSJoe Perches current->comm, path, &vaf); 845273df556SFrank Mayhar va_end(args); 846efbed4dcSTheodore Ts'o } 8479a089b21SGabriel Krisman Bertazi fsnotify_sb_error(inode->i_sb, inode, EFSCORRUPTED); 8489a089b21SGabriel Krisman Bertazi 849e789ca0cSJan Kara ext4_handle_error(inode->i_sb, false, EFSCORRUPTED, inode->i_ino, block, 85054d3adbcSTheodore Ts'o function, line); 851273df556SFrank Mayhar } 852273df556SFrank Mayhar 853722887ddSTheodore Ts'o const char *ext4_decode_error(struct super_block *sb, int errno, 854ac27a0ecSDave Kleikamp char nbuf[16]) 855ac27a0ecSDave Kleikamp { 856ac27a0ecSDave Kleikamp char *errstr = NULL; 857ac27a0ecSDave Kleikamp 858ac27a0ecSDave Kleikamp switch (errno) { 8596a797d27SDarrick J. Wong case -EFSCORRUPTED: 8606a797d27SDarrick J. Wong errstr = "Corrupt filesystem"; 8616a797d27SDarrick J. Wong break; 8626a797d27SDarrick J. Wong case -EFSBADCRC: 8636a797d27SDarrick J. Wong errstr = "Filesystem failed CRC"; 8646a797d27SDarrick J. Wong break; 865ac27a0ecSDave Kleikamp case -EIO: 866ac27a0ecSDave Kleikamp errstr = "IO failure"; 867ac27a0ecSDave Kleikamp break; 868ac27a0ecSDave Kleikamp case -ENOMEM: 869ac27a0ecSDave Kleikamp errstr = "Out of memory"; 870ac27a0ecSDave Kleikamp break; 871ac27a0ecSDave Kleikamp case -EROFS: 87278f1ddbbSTheodore Ts'o if (!sb || (EXT4_SB(sb)->s_journal && 87378f1ddbbSTheodore Ts'o EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT)) 874ac27a0ecSDave Kleikamp errstr = "Journal has aborted"; 875ac27a0ecSDave Kleikamp else 876ac27a0ecSDave Kleikamp errstr = "Readonly filesystem"; 877ac27a0ecSDave Kleikamp break; 878ac27a0ecSDave Kleikamp default: 879ac27a0ecSDave Kleikamp /* If the caller passed in an extra buffer for unknown 880ac27a0ecSDave Kleikamp * errors, textualise them now. Else we just return 881ac27a0ecSDave Kleikamp * NULL. */ 882ac27a0ecSDave Kleikamp if (nbuf) { 883ac27a0ecSDave Kleikamp /* Check for truncated error codes... */ 884ac27a0ecSDave Kleikamp if (snprintf(nbuf, 16, "error %d", -errno) >= 0) 885ac27a0ecSDave Kleikamp errstr = nbuf; 886ac27a0ecSDave Kleikamp } 887ac27a0ecSDave Kleikamp break; 888ac27a0ecSDave Kleikamp } 889ac27a0ecSDave Kleikamp 890ac27a0ecSDave Kleikamp return errstr; 891ac27a0ecSDave Kleikamp } 892ac27a0ecSDave Kleikamp 893617ba13bSMingming Cao /* __ext4_std_error decodes expected errors from journaling functions 894ac27a0ecSDave Kleikamp * automatically and invokes the appropriate error response. */ 895ac27a0ecSDave Kleikamp 896c398eda0STheodore Ts'o void __ext4_std_error(struct super_block *sb, const char *function, 897c398eda0STheodore Ts'o unsigned int line, int errno) 898ac27a0ecSDave Kleikamp { 899ac27a0ecSDave Kleikamp char nbuf[16]; 900ac27a0ecSDave Kleikamp const char *errstr; 901ac27a0ecSDave Kleikamp 9020db1ff22STheodore Ts'o if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) 9030db1ff22STheodore Ts'o return; 9040db1ff22STheodore Ts'o 905ac27a0ecSDave Kleikamp /* Special case: if the error is EROFS, and we're not already 906ac27a0ecSDave Kleikamp * inside a transaction, then there's really no point in logging 907ac27a0ecSDave Kleikamp * an error. */ 908bc98a42cSDavid Howells if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb)) 909ac27a0ecSDave Kleikamp return; 910ac27a0ecSDave Kleikamp 911efbed4dcSTheodore Ts'o if (ext4_error_ratelimit(sb)) { 912617ba13bSMingming Cao errstr = ext4_decode_error(sb, errno, nbuf); 913c398eda0STheodore Ts'o printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n", 914c398eda0STheodore Ts'o sb->s_id, function, line, errstr); 915efbed4dcSTheodore Ts'o } 9169a089b21SGabriel Krisman Bertazi fsnotify_sb_error(sb, NULL, errno ? errno : EFSCORRUPTED); 917ac27a0ecSDave Kleikamp 918e789ca0cSJan Kara ext4_handle_error(sb, false, -errno, 0, 0, function, line); 919ac27a0ecSDave Kleikamp } 920ac27a0ecSDave Kleikamp 921e7c96e8eSJoe Perches void __ext4_msg(struct super_block *sb, 922e7c96e8eSJoe Perches const char *prefix, const char *fmt, ...) 923b31e1552SEric Sandeen { 9240ff2ea7dSJoe Perches struct va_format vaf; 925b31e1552SEric Sandeen va_list args; 926b31e1552SEric Sandeen 927da812f61SLukas Czerner if (sb) { 9281cf006edSDmitry Monakhov atomic_inc(&EXT4_SB(sb)->s_msg_count); 929da812f61SLukas Czerner if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), 930da812f61SLukas Czerner "EXT4-fs")) 931efbed4dcSTheodore Ts'o return; 932da812f61SLukas Czerner } 933efbed4dcSTheodore Ts'o 934b31e1552SEric Sandeen va_start(args, fmt); 9350ff2ea7dSJoe Perches vaf.fmt = fmt; 9360ff2ea7dSJoe Perches vaf.va = &args; 937da812f61SLukas Czerner if (sb) 9380ff2ea7dSJoe Perches printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf); 939da812f61SLukas Czerner else 940da812f61SLukas Czerner printk("%sEXT4-fs: %pV\n", prefix, &vaf); 941b31e1552SEric Sandeen va_end(args); 942b31e1552SEric Sandeen } 943b31e1552SEric Sandeen 9441cf006edSDmitry Monakhov static int ext4_warning_ratelimit(struct super_block *sb) 9451cf006edSDmitry Monakhov { 9461cf006edSDmitry Monakhov atomic_inc(&EXT4_SB(sb)->s_warning_count); 9471cf006edSDmitry Monakhov return ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), 9481cf006edSDmitry Monakhov "EXT4-fs warning"); 9491cf006edSDmitry Monakhov } 950b03a2f7eSAndreas Dilger 95112062dddSEric Sandeen void __ext4_warning(struct super_block *sb, const char *function, 952c398eda0STheodore Ts'o unsigned int line, const char *fmt, ...) 953ac27a0ecSDave Kleikamp { 9540ff2ea7dSJoe Perches struct va_format vaf; 955ac27a0ecSDave Kleikamp va_list args; 956ac27a0ecSDave Kleikamp 957b03a2f7eSAndreas Dilger if (!ext4_warning_ratelimit(sb)) 958efbed4dcSTheodore Ts'o return; 959efbed4dcSTheodore Ts'o 960ac27a0ecSDave Kleikamp va_start(args, fmt); 9610ff2ea7dSJoe Perches vaf.fmt = fmt; 9620ff2ea7dSJoe Perches vaf.va = &args; 9630ff2ea7dSJoe Perches printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n", 9640ff2ea7dSJoe Perches sb->s_id, function, line, &vaf); 965ac27a0ecSDave Kleikamp va_end(args); 966ac27a0ecSDave Kleikamp } 967ac27a0ecSDave Kleikamp 968b03a2f7eSAndreas Dilger void __ext4_warning_inode(const struct inode *inode, const char *function, 969b03a2f7eSAndreas Dilger unsigned int line, const char *fmt, ...) 970b03a2f7eSAndreas Dilger { 971b03a2f7eSAndreas Dilger struct va_format vaf; 972b03a2f7eSAndreas Dilger va_list args; 973b03a2f7eSAndreas Dilger 974b03a2f7eSAndreas Dilger if (!ext4_warning_ratelimit(inode->i_sb)) 975b03a2f7eSAndreas Dilger return; 976b03a2f7eSAndreas Dilger 977b03a2f7eSAndreas Dilger va_start(args, fmt); 978b03a2f7eSAndreas Dilger vaf.fmt = fmt; 979b03a2f7eSAndreas Dilger vaf.va = &args; 980b03a2f7eSAndreas Dilger printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: " 981b03a2f7eSAndreas Dilger "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id, 982b03a2f7eSAndreas Dilger function, line, inode->i_ino, current->comm, &vaf); 983b03a2f7eSAndreas Dilger va_end(args); 984b03a2f7eSAndreas Dilger } 985b03a2f7eSAndreas Dilger 986e29136f8STheodore Ts'o void __ext4_grp_locked_error(const char *function, unsigned int line, 987e29136f8STheodore Ts'o struct super_block *sb, ext4_group_t grp, 988e29136f8STheodore Ts'o unsigned long ino, ext4_fsblk_t block, 989e29136f8STheodore Ts'o const char *fmt, ...) 9905d1b1b3fSAneesh Kumar K.V __releases(bitlock) 9915d1b1b3fSAneesh Kumar K.V __acquires(bitlock) 9925d1b1b3fSAneesh Kumar K.V { 9930ff2ea7dSJoe Perches struct va_format vaf; 9945d1b1b3fSAneesh Kumar K.V va_list args; 9955d1b1b3fSAneesh Kumar K.V 9960db1ff22STheodore Ts'o if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) 9970db1ff22STheodore Ts'o return; 9980db1ff22STheodore Ts'o 999ccf0f32aSTheodore Ts'o trace_ext4_error(sb, function, line); 1000efbed4dcSTheodore Ts'o if (ext4_error_ratelimit(sb)) { 10015d1b1b3fSAneesh Kumar K.V va_start(args, fmt); 10020ff2ea7dSJoe Perches vaf.fmt = fmt; 10030ff2ea7dSJoe Perches vaf.va = &args; 100421149d61SRobin Dong printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ", 1005e29136f8STheodore Ts'o sb->s_id, function, line, grp); 1006e29136f8STheodore Ts'o if (ino) 10070ff2ea7dSJoe Perches printk(KERN_CONT "inode %lu: ", ino); 1008e29136f8STheodore Ts'o if (block) 1009efbed4dcSTheodore Ts'o printk(KERN_CONT "block %llu:", 1010efbed4dcSTheodore Ts'o (unsigned long long) block); 10110ff2ea7dSJoe Perches printk(KERN_CONT "%pV\n", &vaf); 10125d1b1b3fSAneesh Kumar K.V va_end(args); 1013efbed4dcSTheodore Ts'o } 10145d1b1b3fSAneesh Kumar K.V 1015c92dc856SJan Kara if (test_opt(sb, ERRORS_CONT)) { 1016327eaf73STheodore Ts'o if (test_opt(sb, WARN_ON_ERROR)) 1017327eaf73STheodore Ts'o WARN_ON_ONCE(1); 1018e789ca0cSJan Kara EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; 10192d01ddc8SJan Kara if (!bdev_read_only(sb->s_bdev)) { 10202d01ddc8SJan Kara save_error_info(sb, EFSCORRUPTED, ino, block, function, 10212d01ddc8SJan Kara line); 1022c92dc856SJan Kara schedule_work(&EXT4_SB(sb)->s_error_work); 10232d01ddc8SJan Kara } 10245d1b1b3fSAneesh Kumar K.V return; 10255d1b1b3fSAneesh Kumar K.V } 10265d1b1b3fSAneesh Kumar K.V ext4_unlock_group(sb, grp); 1027e789ca0cSJan Kara ext4_handle_error(sb, false, EFSCORRUPTED, ino, block, function, line); 10285d1b1b3fSAneesh Kumar K.V /* 10295d1b1b3fSAneesh Kumar K.V * We only get here in the ERRORS_RO case; relocking the group 10305d1b1b3fSAneesh Kumar K.V * may be dangerous, but nothing bad will happen since the 10315d1b1b3fSAneesh Kumar K.V * filesystem will have already been marked read/only and the 10325d1b1b3fSAneesh Kumar K.V * journal has been aborted. We return 1 as a hint to callers 10335d1b1b3fSAneesh Kumar K.V * who might what to use the return value from 103425985edcSLucas De Marchi * ext4_grp_locked_error() to distinguish between the 10355d1b1b3fSAneesh Kumar K.V * ERRORS_CONT and ERRORS_RO case, and perhaps return more 10365d1b1b3fSAneesh Kumar K.V * aggressively from the ext4 function in question, with a 10375d1b1b3fSAneesh Kumar K.V * more appropriate error code. 10385d1b1b3fSAneesh Kumar K.V */ 10395d1b1b3fSAneesh Kumar K.V ext4_lock_group(sb, grp); 10405d1b1b3fSAneesh Kumar K.V return; 10415d1b1b3fSAneesh Kumar K.V } 10425d1b1b3fSAneesh Kumar K.V 1043db79e6d1SWang Shilong void ext4_mark_group_bitmap_corrupted(struct super_block *sb, 1044db79e6d1SWang Shilong ext4_group_t group, 1045db79e6d1SWang Shilong unsigned int flags) 1046db79e6d1SWang Shilong { 1047db79e6d1SWang Shilong struct ext4_sb_info *sbi = EXT4_SB(sb); 1048db79e6d1SWang Shilong struct ext4_group_info *grp = ext4_get_group_info(sb, group); 1049db79e6d1SWang Shilong struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL); 10509af0b3d1SWang Shilong int ret; 1051db79e6d1SWang Shilong 10529af0b3d1SWang Shilong if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) { 10539af0b3d1SWang Shilong ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, 10549af0b3d1SWang Shilong &grp->bb_state); 10559af0b3d1SWang Shilong if (!ret) 1056db79e6d1SWang Shilong percpu_counter_sub(&sbi->s_freeclusters_counter, 1057db79e6d1SWang Shilong grp->bb_free); 1058db79e6d1SWang Shilong } 1059db79e6d1SWang Shilong 10609af0b3d1SWang Shilong if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) { 10619af0b3d1SWang Shilong ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, 10629af0b3d1SWang Shilong &grp->bb_state); 10639af0b3d1SWang Shilong if (!ret && gdp) { 1064db79e6d1SWang Shilong int count; 1065db79e6d1SWang Shilong 1066db79e6d1SWang Shilong count = ext4_free_inodes_count(sb, gdp); 1067db79e6d1SWang Shilong percpu_counter_sub(&sbi->s_freeinodes_counter, 1068db79e6d1SWang Shilong count); 1069db79e6d1SWang Shilong } 1070db79e6d1SWang Shilong } 1071db79e6d1SWang Shilong } 1072db79e6d1SWang Shilong 1073617ba13bSMingming Cao void ext4_update_dynamic_rev(struct super_block *sb) 1074ac27a0ecSDave Kleikamp { 1075617ba13bSMingming Cao struct ext4_super_block *es = EXT4_SB(sb)->s_es; 1076ac27a0ecSDave Kleikamp 1077617ba13bSMingming Cao if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV) 1078ac27a0ecSDave Kleikamp return; 1079ac27a0ecSDave Kleikamp 108012062dddSEric Sandeen ext4_warning(sb, 1081ac27a0ecSDave Kleikamp "updating to rev %d because of new feature flag, " 1082ac27a0ecSDave Kleikamp "running e2fsck is recommended", 1083617ba13bSMingming Cao EXT4_DYNAMIC_REV); 1084ac27a0ecSDave Kleikamp 1085617ba13bSMingming Cao es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO); 1086617ba13bSMingming Cao es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE); 1087617ba13bSMingming Cao es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV); 1088ac27a0ecSDave Kleikamp /* leave es->s_feature_*compat flags alone */ 1089ac27a0ecSDave Kleikamp /* es->s_uuid will be set by e2fsck if empty */ 1090ac27a0ecSDave Kleikamp 1091ac27a0ecSDave Kleikamp /* 1092ac27a0ecSDave Kleikamp * The rest of the superblock fields should be zero, and if not it 1093ac27a0ecSDave Kleikamp * means they are likely already in use, so leave them alone. We 1094ac27a0ecSDave Kleikamp * can leave it up to e2fsck to clean up any inconsistencies there. 1095ac27a0ecSDave Kleikamp */ 1096ac27a0ecSDave Kleikamp } 1097ac27a0ecSDave Kleikamp 1098ac27a0ecSDave Kleikamp /* 1099ac27a0ecSDave Kleikamp * Open the external journal device 1100ac27a0ecSDave Kleikamp */ 1101b31e1552SEric Sandeen static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb) 1102ac27a0ecSDave Kleikamp { 1103ac27a0ecSDave Kleikamp struct block_device *bdev; 1104ac27a0ecSDave Kleikamp 1105d4d77629STejun Heo bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); 1106ac27a0ecSDave Kleikamp if (IS_ERR(bdev)) 1107ac27a0ecSDave Kleikamp goto fail; 1108ac27a0ecSDave Kleikamp return bdev; 1109ac27a0ecSDave Kleikamp 1110ac27a0ecSDave Kleikamp fail: 1111ea3edd4dSChristoph Hellwig ext4_msg(sb, KERN_ERR, 1112ea3edd4dSChristoph Hellwig "failed to open journal device unknown-block(%u,%u) %ld", 1113ea3edd4dSChristoph Hellwig MAJOR(dev), MINOR(dev), PTR_ERR(bdev)); 1114ac27a0ecSDave Kleikamp return NULL; 1115ac27a0ecSDave Kleikamp } 1116ac27a0ecSDave Kleikamp 1117ac27a0ecSDave Kleikamp /* 1118ac27a0ecSDave Kleikamp * Release the journal device 1119ac27a0ecSDave Kleikamp */ 11204385bab1SAl Viro static void ext4_blkdev_put(struct block_device *bdev) 1121ac27a0ecSDave Kleikamp { 11224385bab1SAl Viro blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1123ac27a0ecSDave Kleikamp } 1124ac27a0ecSDave Kleikamp 11254385bab1SAl Viro static void ext4_blkdev_remove(struct ext4_sb_info *sbi) 1126ac27a0ecSDave Kleikamp { 1127ac27a0ecSDave Kleikamp struct block_device *bdev; 1128ee7ed3aaSChunguang Xu bdev = sbi->s_journal_bdev; 1129ac27a0ecSDave Kleikamp if (bdev) { 11304385bab1SAl Viro ext4_blkdev_put(bdev); 1131ee7ed3aaSChunguang Xu sbi->s_journal_bdev = NULL; 1132ac27a0ecSDave Kleikamp } 1133ac27a0ecSDave Kleikamp } 1134ac27a0ecSDave Kleikamp 1135ac27a0ecSDave Kleikamp static inline struct inode *orphan_list_entry(struct list_head *l) 1136ac27a0ecSDave Kleikamp { 1137617ba13bSMingming Cao return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode; 1138ac27a0ecSDave Kleikamp } 1139ac27a0ecSDave Kleikamp 1140617ba13bSMingming Cao static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi) 1141ac27a0ecSDave Kleikamp { 1142ac27a0ecSDave Kleikamp struct list_head *l; 1143ac27a0ecSDave Kleikamp 1144b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "sb orphan head is %d", 1145ac27a0ecSDave Kleikamp le32_to_cpu(sbi->s_es->s_last_orphan)); 1146ac27a0ecSDave Kleikamp 1147ac27a0ecSDave Kleikamp printk(KERN_ERR "sb_info orphan list:\n"); 1148ac27a0ecSDave Kleikamp list_for_each(l, &sbi->s_orphan) { 1149ac27a0ecSDave Kleikamp struct inode *inode = orphan_list_entry(l); 1150ac27a0ecSDave Kleikamp printk(KERN_ERR " " 1151ac27a0ecSDave Kleikamp "inode %s:%lu at %p: mode %o, nlink %d, next %d\n", 1152ac27a0ecSDave Kleikamp inode->i_sb->s_id, inode->i_ino, inode, 1153ac27a0ecSDave Kleikamp inode->i_mode, inode->i_nlink, 1154ac27a0ecSDave Kleikamp NEXT_ORPHAN(inode)); 1155ac27a0ecSDave Kleikamp } 1156ac27a0ecSDave Kleikamp } 1157ac27a0ecSDave Kleikamp 1158957153fcSJan Kara #ifdef CONFIG_QUOTA 1159957153fcSJan Kara static int ext4_quota_off(struct super_block *sb, int type); 1160957153fcSJan Kara 1161957153fcSJan Kara static inline void ext4_quota_off_umount(struct super_block *sb) 1162957153fcSJan Kara { 1163957153fcSJan Kara int type; 1164957153fcSJan Kara 1165957153fcSJan Kara /* Use our quota_off function to clear inode flags etc. */ 1166957153fcSJan Kara for (type = 0; type < EXT4_MAXQUOTAS; type++) 1167957153fcSJan Kara ext4_quota_off(sb, type); 1168957153fcSJan Kara } 116933458eabSTheodore Ts'o 117033458eabSTheodore Ts'o /* 117133458eabSTheodore Ts'o * This is a helper function which is used in the mount/remount 117233458eabSTheodore Ts'o * codepaths (which holds s_umount) to fetch the quota file name. 117333458eabSTheodore Ts'o */ 117433458eabSTheodore Ts'o static inline char *get_qf_name(struct super_block *sb, 117533458eabSTheodore Ts'o struct ext4_sb_info *sbi, 117633458eabSTheodore Ts'o int type) 117733458eabSTheodore Ts'o { 117833458eabSTheodore Ts'o return rcu_dereference_protected(sbi->s_qf_names[type], 117933458eabSTheodore Ts'o lockdep_is_held(&sb->s_umount)); 118033458eabSTheodore Ts'o } 1181957153fcSJan Kara #else 1182957153fcSJan Kara static inline void ext4_quota_off_umount(struct super_block *sb) 1183957153fcSJan Kara { 1184957153fcSJan Kara } 1185957153fcSJan Kara #endif 1186957153fcSJan Kara 1187617ba13bSMingming Cao static void ext4_put_super(struct super_block *sb) 1188ac27a0ecSDave Kleikamp { 1189617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 1190617ba13bSMingming Cao struct ext4_super_block *es = sbi->s_es; 11911d0c3924STheodore Ts'o struct buffer_head **group_desc; 11927c990728SSuraj Jitindar Singh struct flex_groups **flex_groups; 119397abd7d4STheodore Ts'o int aborted = 0; 1194ef2cabf7SHidehiro Kawai int i, err; 1195ac27a0ecSDave Kleikamp 1196b98535d0SYe Bin /* 1197b98535d0SYe Bin * Unregister sysfs before destroying jbd2 journal. 1198b98535d0SYe Bin * Since we could still access attr_journal_task attribute via sysfs 1199b98535d0SYe Bin * path which could have sbi->s_journal->j_task as NULL 1200b98535d0SYe Bin * Unregister sysfs before flush sbi->s_error_work. 1201b98535d0SYe Bin * Since user may read /proc/fs/ext4/xx/mb_groups during umount, If 1202b98535d0SYe Bin * read metadata verify failed then will queue error work. 1203b98535d0SYe Bin * flush_stashed_error_work will call start_this_handle may trigger 1204b98535d0SYe Bin * BUG_ON. 1205b98535d0SYe Bin */ 1206b98535d0SYe Bin ext4_unregister_sysfs(sb); 1207b98535d0SYe Bin 12084808cb5bSZhang Yi if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs unmount")) 12094808cb5bSZhang Yi ext4_msg(sb, KERN_INFO, "unmounting filesystem."); 12104808cb5bSZhang Yi 1211857ac889SLukas Czerner ext4_unregister_li_request(sb); 1212957153fcSJan Kara ext4_quota_off_umount(sb); 1213e0ccfd95SChristoph Hellwig 1214c92dc856SJan Kara flush_work(&sbi->s_error_work); 12152e8fa54eSJan Kara destroy_workqueue(sbi->rsv_conversion_wq); 121602f310fcSJan Kara ext4_release_orphan_info(sb); 12174c0425ffSMingming Cao 12180390131bSFrank Mayhar if (sbi->s_journal) { 121997abd7d4STheodore Ts'o aborted = is_journal_aborted(sbi->s_journal); 1220ef2cabf7SHidehiro Kawai err = jbd2_journal_destroy(sbi->s_journal); 122147b4a50bSJan Kara sbi->s_journal = NULL; 1222878520acSTheodore Ts'o if ((err < 0) && !aborted) { 122354d3adbcSTheodore Ts'o ext4_abort(sb, -err, "Couldn't clean up the journal"); 12240390131bSFrank Mayhar } 1225878520acSTheodore Ts'o } 1226d4edac31SJosef Bacik 1227d3922a77SZheng Liu ext4_es_unregister_shrinker(sbi); 12289105bb14SAl Viro del_timer_sync(&sbi->s_err_report); 1229d4edac31SJosef Bacik ext4_release_system_zone(sb); 1230d4edac31SJosef Bacik ext4_mb_release(sb); 1231d4edac31SJosef Bacik ext4_ext_release(sb); 1232d4edac31SJosef Bacik 1233bc98a42cSDavid Howells if (!sb_rdonly(sb) && !aborted) { 1234e2b911c5SDarrick J. Wong ext4_clear_feature_journal_needs_recovery(sb); 123502f310fcSJan Kara ext4_clear_feature_orphan_present(sb); 1236ac27a0ecSDave Kleikamp es->s_state = cpu_to_le16(sbi->s_mount_state); 1237ac27a0ecSDave Kleikamp } 1238bc98a42cSDavid Howells if (!sb_rdonly(sb)) 12394392fbc4SJan Kara ext4_commit_super(sb); 1240a8e25a83SArtem Bityutskiy 12411d0c3924STheodore Ts'o rcu_read_lock(); 12421d0c3924STheodore Ts'o group_desc = rcu_dereference(sbi->s_group_desc); 1243ac27a0ecSDave Kleikamp for (i = 0; i < sbi->s_gdb_count; i++) 12441d0c3924STheodore Ts'o brelse(group_desc[i]); 12451d0c3924STheodore Ts'o kvfree(group_desc); 12467c990728SSuraj Jitindar Singh flex_groups = rcu_dereference(sbi->s_flex_groups); 12477c990728SSuraj Jitindar Singh if (flex_groups) { 12487c990728SSuraj Jitindar Singh for (i = 0; i < sbi->s_flex_groups_allocated; i++) 12497c990728SSuraj Jitindar Singh kvfree(flex_groups[i]); 12507c990728SSuraj Jitindar Singh kvfree(flex_groups); 12517c990728SSuraj Jitindar Singh } 12521d0c3924STheodore Ts'o rcu_read_unlock(); 125357042651STheodore Ts'o percpu_counter_destroy(&sbi->s_freeclusters_counter); 1254ac27a0ecSDave Kleikamp percpu_counter_destroy(&sbi->s_freeinodes_counter); 1255ac27a0ecSDave Kleikamp percpu_counter_destroy(&sbi->s_dirs_counter); 125657042651STheodore Ts'o percpu_counter_destroy(&sbi->s_dirtyclusters_counter); 1257efc61345SEric Whitney percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit); 1258bbd55937SEric Biggers percpu_free_rwsem(&sbi->s_writepages_rwsem); 1259ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 1260a2d4a646SJan Kara for (i = 0; i < EXT4_MAXQUOTAS; i++) 126133458eabSTheodore Ts'o kfree(get_qf_name(sb, sbi, i)); 1262ac27a0ecSDave Kleikamp #endif 1263ac27a0ecSDave Kleikamp 1264ac27a0ecSDave Kleikamp /* Debugging code just in case the in-memory inode orphan list 1265ac27a0ecSDave Kleikamp * isn't empty. The on-disk one can be non-empty if we've 1266ac27a0ecSDave Kleikamp * detected an error and taken the fs readonly, but the 1267ac27a0ecSDave Kleikamp * in-memory list had better be clean by this point. */ 1268ac27a0ecSDave Kleikamp if (!list_empty(&sbi->s_orphan)) 1269ac27a0ecSDave Kleikamp dump_orphan_list(sb, sbi); 1270837c23fbSChunguang Xu ASSERT(list_empty(&sbi->s_orphan)); 1271ac27a0ecSDave Kleikamp 127289d96a6fSTheodore Ts'o sync_blockdev(sb->s_bdev); 1273f98393a6SPeter Zijlstra invalidate_bdev(sb->s_bdev); 1274ee7ed3aaSChunguang Xu if (sbi->s_journal_bdev && sbi->s_journal_bdev != sb->s_bdev) { 1275ac27a0ecSDave Kleikamp /* 1276ac27a0ecSDave Kleikamp * Invalidate the journal device's buffers. We don't want them 1277ac27a0ecSDave Kleikamp * floating about in memory - the physical journal device may 1278ac27a0ecSDave Kleikamp * hotswapped, and it breaks the `ro-after' testing code. 1279ac27a0ecSDave Kleikamp */ 1280ee7ed3aaSChunguang Xu sync_blockdev(sbi->s_journal_bdev); 1281ee7ed3aaSChunguang Xu invalidate_bdev(sbi->s_journal_bdev); 1282617ba13bSMingming Cao ext4_blkdev_remove(sbi); 1283ac27a0ecSDave Kleikamp } 128450c15df6SChengguang Xu 1285dec214d0STahsin Erdogan ext4_xattr_destroy_cache(sbi->s_ea_inode_cache); 1286dec214d0STahsin Erdogan sbi->s_ea_inode_cache = NULL; 128750c15df6SChengguang Xu 128847387409STahsin Erdogan ext4_xattr_destroy_cache(sbi->s_ea_block_cache); 128947387409STahsin Erdogan sbi->s_ea_block_cache = NULL; 129050c15df6SChengguang Xu 1291618f0031SPavel Skripkin ext4_stop_mmpd(sbi); 1292618f0031SPavel Skripkin 12939060dd2cSEric Sandeen brelse(sbi->s_sbh); 1294ac27a0ecSDave Kleikamp sb->s_fs_info = NULL; 12953197ebdbSTheodore Ts'o /* 12963197ebdbSTheodore Ts'o * Now that we are completely done shutting down the 12973197ebdbSTheodore Ts'o * superblock, we need to actually destroy the kobject. 12983197ebdbSTheodore Ts'o */ 12993197ebdbSTheodore Ts'o kobject_put(&sbi->s_kobj); 13003197ebdbSTheodore Ts'o wait_for_completion(&sbi->s_kobj_unregister); 13010441984aSDarrick J. Wong if (sbi->s_chksum_driver) 13020441984aSDarrick J. Wong crypto_free_shash(sbi->s_chksum_driver); 1303705895b6SPekka Enberg kfree(sbi->s_blockgroup_lock); 13048012b866SShiyang Ruan fs_put_dax(sbi->s_daxdev, NULL); 1305ac4acb1fSEric Biggers fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy); 13065298d4bfSChristoph Hellwig #if IS_ENABLED(CONFIG_UNICODE) 1307f8f4acb6SDaniel Rosenberg utf8_unload(sb->s_encoding); 1308c83ad55eSGabriel Krisman Bertazi #endif 1309ac27a0ecSDave Kleikamp kfree(sbi); 1310ac27a0ecSDave Kleikamp } 1311ac27a0ecSDave Kleikamp 1312e18b890bSChristoph Lameter static struct kmem_cache *ext4_inode_cachep; 1313ac27a0ecSDave Kleikamp 1314ac27a0ecSDave Kleikamp /* 1315ac27a0ecSDave Kleikamp * Called inside transaction, so use GFP_NOFS 1316ac27a0ecSDave Kleikamp */ 1317617ba13bSMingming Cao static struct inode *ext4_alloc_inode(struct super_block *sb) 1318ac27a0ecSDave Kleikamp { 1319617ba13bSMingming Cao struct ext4_inode_info *ei; 1320ac27a0ecSDave Kleikamp 1321fd60b288SMuchun Song ei = alloc_inode_sb(sb, ext4_inode_cachep, GFP_NOFS); 1322ac27a0ecSDave Kleikamp if (!ei) 1323ac27a0ecSDave Kleikamp return NULL; 13240b8e58a1SAndreas Dilger 1325ee73f9a5SJeff Layton inode_set_iversion(&ei->vfs_inode, 1); 1326202ee5dfSTheodore Ts'o spin_lock_init(&ei->i_raw_lock); 1327c9de560dSAlex Tomas INIT_LIST_HEAD(&ei->i_prealloc_list); 132827bc446eSbrookxu atomic_set(&ei->i_prealloc_active, 0); 1329c9de560dSAlex Tomas spin_lock_init(&ei->i_prealloc_lock); 13309a26b661SZheng Liu ext4_es_init_tree(&ei->i_es_tree); 13319a26b661SZheng Liu rwlock_init(&ei->i_es_lock); 1332edaa53caSZheng Liu INIT_LIST_HEAD(&ei->i_es_list); 1333eb68d0e2SZheng Liu ei->i_es_all_nr = 0; 1334edaa53caSZheng Liu ei->i_es_shk_nr = 0; 1335dd475925SJan Kara ei->i_es_shrink_lblk = 0; 1336d2a17637SMingming Cao ei->i_reserved_data_blocks = 0; 1337d2a17637SMingming Cao spin_lock_init(&(ei->i_block_reservation_lock)); 13381dc0aa46SEric Whitney ext4_init_pending_tree(&ei->i_pending_tree); 1339a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA 1340a9e7f447SDmitry Monakhov ei->i_reserved_quota = 0; 134196c7e0d9SJan Kara memset(&ei->i_dquot, 0, sizeof(ei->i_dquot)); 1342a9e7f447SDmitry Monakhov #endif 13438aefcd55STheodore Ts'o ei->jinode = NULL; 13442e8fa54eSJan Kara INIT_LIST_HEAD(&ei->i_rsv_conversion_list); 1345744692dcSJiaying Zhang spin_lock_init(&ei->i_completed_io_lock); 1346b436b9beSJan Kara ei->i_sync_tid = 0; 1347b436b9beSJan Kara ei->i_datasync_tid = 0; 1348e27f41e1SDmitry Monakhov atomic_set(&ei->i_unwritten, 0); 13492e8fa54eSJan Kara INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work); 1350aa75f4d3SHarshad Shirwadkar ext4_fc_init_inode(&ei->vfs_inode); 1351aa75f4d3SHarshad Shirwadkar mutex_init(&ei->i_fc_lock); 1352ac27a0ecSDave Kleikamp return &ei->vfs_inode; 1353ac27a0ecSDave Kleikamp } 1354ac27a0ecSDave Kleikamp 13557ff9c073STheodore Ts'o static int ext4_drop_inode(struct inode *inode) 13567ff9c073STheodore Ts'o { 13577ff9c073STheodore Ts'o int drop = generic_drop_inode(inode); 13587ff9c073STheodore Ts'o 135929b3692eSEric Biggers if (!drop) 136029b3692eSEric Biggers drop = fscrypt_drop_inode(inode); 136129b3692eSEric Biggers 13627ff9c073STheodore Ts'o trace_ext4_drop_inode(inode, drop); 13637ff9c073STheodore Ts'o return drop; 13647ff9c073STheodore Ts'o } 13657ff9c073STheodore Ts'o 136694053139SAl Viro static void ext4_free_in_core_inode(struct inode *inode) 1367fa0d7e3dSNick Piggin { 13682c58d548SEric Biggers fscrypt_free_inode(inode); 1369aa75f4d3SHarshad Shirwadkar if (!list_empty(&(EXT4_I(inode)->i_fc_list))) { 1370aa75f4d3SHarshad Shirwadkar pr_warn("%s: inode %ld still in fc list", 1371aa75f4d3SHarshad Shirwadkar __func__, inode->i_ino); 1372aa75f4d3SHarshad Shirwadkar } 1373fa0d7e3dSNick Piggin kmem_cache_free(ext4_inode_cachep, EXT4_I(inode)); 1374fa0d7e3dSNick Piggin } 1375fa0d7e3dSNick Piggin 1376617ba13bSMingming Cao static void ext4_destroy_inode(struct inode *inode) 1377ac27a0ecSDave Kleikamp { 13789f7dd93dSVasily Averin if (!list_empty(&(EXT4_I(inode)->i_orphan))) { 1379b31e1552SEric Sandeen ext4_msg(inode->i_sb, KERN_ERR, 1380b31e1552SEric Sandeen "Inode %lu (%p): orphan list check failed!", 1381b31e1552SEric Sandeen inode->i_ino, EXT4_I(inode)); 13829f7dd93dSVasily Averin print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4, 13839f7dd93dSVasily Averin EXT4_I(inode), sizeof(struct ext4_inode_info), 13849f7dd93dSVasily Averin true); 13859f7dd93dSVasily Averin dump_stack(); 13869f7dd93dSVasily Averin } 13876fed8395SJeffle Xu 13886fed8395SJeffle Xu if (EXT4_I(inode)->i_reserved_data_blocks) 13896fed8395SJeffle Xu ext4_msg(inode->i_sb, KERN_ERR, 13906fed8395SJeffle Xu "Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!", 13916fed8395SJeffle Xu inode->i_ino, EXT4_I(inode), 13926fed8395SJeffle Xu EXT4_I(inode)->i_reserved_data_blocks); 1393ac27a0ecSDave Kleikamp } 1394ac27a0ecSDave Kleikamp 139551cc5068SAlexey Dobriyan static void init_once(void *foo) 1396ac27a0ecSDave Kleikamp { 1397c30365b9SYu Zhe struct ext4_inode_info *ei = foo; 1398ac27a0ecSDave Kleikamp 1399ac27a0ecSDave Kleikamp INIT_LIST_HEAD(&ei->i_orphan); 1400ac27a0ecSDave Kleikamp init_rwsem(&ei->xattr_sem); 14010e855ac8SAneesh Kumar K.V init_rwsem(&ei->i_data_sem); 1402ac27a0ecSDave Kleikamp inode_init_once(&ei->vfs_inode); 1403aa75f4d3SHarshad Shirwadkar ext4_fc_init_inode(&ei->vfs_inode); 1404ac27a0ecSDave Kleikamp } 1405ac27a0ecSDave Kleikamp 1406e67bc2b3SFabian Frederick static int __init init_inodecache(void) 1407ac27a0ecSDave Kleikamp { 1408f8dd7c70SDavid Windsor ext4_inode_cachep = kmem_cache_create_usercopy("ext4_inode_cache", 1409f8dd7c70SDavid Windsor sizeof(struct ext4_inode_info), 0, 1410f8dd7c70SDavid Windsor (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD| 1411f8dd7c70SDavid Windsor SLAB_ACCOUNT), 1412f8dd7c70SDavid Windsor offsetof(struct ext4_inode_info, i_data), 1413f8dd7c70SDavid Windsor sizeof_field(struct ext4_inode_info, i_data), 141420c2df83SPaul Mundt init_once); 1415617ba13bSMingming Cao if (ext4_inode_cachep == NULL) 1416ac27a0ecSDave Kleikamp return -ENOMEM; 1417ac27a0ecSDave Kleikamp return 0; 1418ac27a0ecSDave Kleikamp } 1419ac27a0ecSDave Kleikamp 1420ac27a0ecSDave Kleikamp static void destroy_inodecache(void) 1421ac27a0ecSDave Kleikamp { 14228c0a8537SKirill A. Shutemov /* 14238c0a8537SKirill A. Shutemov * Make sure all delayed rcu free inodes are flushed before we 14248c0a8537SKirill A. Shutemov * destroy cache. 14258c0a8537SKirill A. Shutemov */ 14268c0a8537SKirill A. Shutemov rcu_barrier(); 1427617ba13bSMingming Cao kmem_cache_destroy(ext4_inode_cachep); 1428ac27a0ecSDave Kleikamp } 1429ac27a0ecSDave Kleikamp 14300930fcc1SAl Viro void ext4_clear_inode(struct inode *inode) 1431ac27a0ecSDave Kleikamp { 1432aa75f4d3SHarshad Shirwadkar ext4_fc_del(inode); 14330930fcc1SAl Viro invalidate_inode_buffers(inode); 1434dbd5768fSJan Kara clear_inode(inode); 143527bc446eSbrookxu ext4_discard_preallocations(inode, 0); 143651865fdaSZheng Liu ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); 1437f4c2d372SJan Kara dquot_drop(inode); 14388aefcd55STheodore Ts'o if (EXT4_I(inode)->jinode) { 14398aefcd55STheodore Ts'o jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode), 14408aefcd55STheodore Ts'o EXT4_I(inode)->jinode); 14418aefcd55STheodore Ts'o jbd2_free_inode(EXT4_I(inode)->jinode); 14428aefcd55STheodore Ts'o EXT4_I(inode)->jinode = NULL; 14438aefcd55STheodore Ts'o } 14443d204e24SEric Biggers fscrypt_put_encryption_info(inode); 1445c93d8f88SEric Biggers fsverity_cleanup_inode(inode); 1446ac27a0ecSDave Kleikamp } 1447ac27a0ecSDave Kleikamp 14481b961ac0SChristoph Hellwig static struct inode *ext4_nfs_get_inode(struct super_block *sb, 14491b961ac0SChristoph Hellwig u64 ino, u32 generation) 1450ac27a0ecSDave Kleikamp { 1451ac27a0ecSDave Kleikamp struct inode *inode; 1452ac27a0ecSDave Kleikamp 14538a363970STheodore Ts'o /* 1454ac27a0ecSDave Kleikamp * Currently we don't know the generation for parent directory, so 1455ac27a0ecSDave Kleikamp * a generation of 0 means "accept any" 1456ac27a0ecSDave Kleikamp */ 14578a363970STheodore Ts'o inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE); 14581d1fe1eeSDavid Howells if (IS_ERR(inode)) 14591d1fe1eeSDavid Howells return ERR_CAST(inode); 14601d1fe1eeSDavid Howells if (generation && inode->i_generation != generation) { 1461ac27a0ecSDave Kleikamp iput(inode); 1462ac27a0ecSDave Kleikamp return ERR_PTR(-ESTALE); 1463ac27a0ecSDave Kleikamp } 14641b961ac0SChristoph Hellwig 14651b961ac0SChristoph Hellwig return inode; 1466ac27a0ecSDave Kleikamp } 14671b961ac0SChristoph Hellwig 14681b961ac0SChristoph Hellwig static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid, 14691b961ac0SChristoph Hellwig int fh_len, int fh_type) 14701b961ac0SChristoph Hellwig { 14711b961ac0SChristoph Hellwig return generic_fh_to_dentry(sb, fid, fh_len, fh_type, 14721b961ac0SChristoph Hellwig ext4_nfs_get_inode); 14731b961ac0SChristoph Hellwig } 14741b961ac0SChristoph Hellwig 14751b961ac0SChristoph Hellwig static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid, 14761b961ac0SChristoph Hellwig int fh_len, int fh_type) 14771b961ac0SChristoph Hellwig { 14781b961ac0SChristoph Hellwig return generic_fh_to_parent(sb, fid, fh_len, fh_type, 14791b961ac0SChristoph Hellwig ext4_nfs_get_inode); 1480ac27a0ecSDave Kleikamp } 1481ac27a0ecSDave Kleikamp 1482fde87268STheodore Ts'o static int ext4_nfs_commit_metadata(struct inode *inode) 1483fde87268STheodore Ts'o { 1484fde87268STheodore Ts'o struct writeback_control wbc = { 1485fde87268STheodore Ts'o .sync_mode = WB_SYNC_ALL 1486fde87268STheodore Ts'o }; 1487fde87268STheodore Ts'o 1488fde87268STheodore Ts'o trace_ext4_nfs_commit_metadata(inode); 1489fde87268STheodore Ts'o return ext4_write_inode(inode, &wbc); 1490fde87268STheodore Ts'o } 1491fde87268STheodore Ts'o 1492ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 1493d6006186SEric Biggers static const char * const quotatypes[] = INITQFNAMES; 1494689c958cSLi Xi #define QTYPE2NAME(t) (quotatypes[t]) 1495ac27a0ecSDave Kleikamp 1496617ba13bSMingming Cao static int ext4_write_dquot(struct dquot *dquot); 1497617ba13bSMingming Cao static int ext4_acquire_dquot(struct dquot *dquot); 1498617ba13bSMingming Cao static int ext4_release_dquot(struct dquot *dquot); 1499617ba13bSMingming Cao static int ext4_mark_dquot_dirty(struct dquot *dquot); 1500617ba13bSMingming Cao static int ext4_write_info(struct super_block *sb, int type); 15016f28e087SJan Kara static int ext4_quota_on(struct super_block *sb, int type, int format_id, 15028c54ca9cSAl Viro const struct path *path); 1503617ba13bSMingming Cao static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, 1504ac27a0ecSDave Kleikamp size_t len, loff_t off); 1505617ba13bSMingming Cao static ssize_t ext4_quota_write(struct super_block *sb, int type, 1506ac27a0ecSDave Kleikamp const char *data, size_t len, loff_t off); 15077c319d32SAditya Kali static int ext4_quota_enable(struct super_block *sb, int type, int format_id, 15087c319d32SAditya Kali unsigned int flags); 1509ac27a0ecSDave Kleikamp 151096c7e0d9SJan Kara static struct dquot **ext4_get_dquots(struct inode *inode) 151196c7e0d9SJan Kara { 151296c7e0d9SJan Kara return EXT4_I(inode)->i_dquot; 151396c7e0d9SJan Kara } 151496c7e0d9SJan Kara 151561e225dcSAlexey Dobriyan static const struct dquot_operations ext4_quota_operations = { 151660e58e0fSMingming Cao .get_reserved_space = ext4_get_reserved_space, 1517617ba13bSMingming Cao .write_dquot = ext4_write_dquot, 1518617ba13bSMingming Cao .acquire_dquot = ext4_acquire_dquot, 1519617ba13bSMingming Cao .release_dquot = ext4_release_dquot, 1520617ba13bSMingming Cao .mark_dirty = ext4_mark_dquot_dirty, 1521a5b5ee32SJan Kara .write_info = ext4_write_info, 1522a5b5ee32SJan Kara .alloc_dquot = dquot_alloc, 1523a5b5ee32SJan Kara .destroy_dquot = dquot_destroy, 1524040cb378SLi Xi .get_projid = ext4_get_projid, 15257a9ca53aSTahsin Erdogan .get_inode_usage = ext4_get_inode_usage, 1526ebc11f7bSChengguang Xu .get_next_id = dquot_get_next_id, 1527ac27a0ecSDave Kleikamp }; 1528ac27a0ecSDave Kleikamp 15290d54b217SAlexey Dobriyan static const struct quotactl_ops ext4_qctl_operations = { 1530617ba13bSMingming Cao .quota_on = ext4_quota_on, 1531ca0e05e4SDmitry Monakhov .quota_off = ext4_quota_off, 1532287a8095SChristoph Hellwig .quota_sync = dquot_quota_sync, 15330a240339SJan Kara .get_state = dquot_get_state, 1534287a8095SChristoph Hellwig .set_info = dquot_set_dqinfo, 1535287a8095SChristoph Hellwig .get_dqblk = dquot_get_dqblk, 15366332b9b5SEric Sandeen .set_dqblk = dquot_set_dqblk, 15376332b9b5SEric Sandeen .get_nextdqblk = dquot_get_next_dqblk, 1538ac27a0ecSDave Kleikamp }; 1539ac27a0ecSDave Kleikamp #endif 1540ac27a0ecSDave Kleikamp 1541ee9b6d61SJosef 'Jeff' Sipek static const struct super_operations ext4_sops = { 1542617ba13bSMingming Cao .alloc_inode = ext4_alloc_inode, 154394053139SAl Viro .free_inode = ext4_free_in_core_inode, 1544617ba13bSMingming Cao .destroy_inode = ext4_destroy_inode, 1545617ba13bSMingming Cao .write_inode = ext4_write_inode, 1546617ba13bSMingming Cao .dirty_inode = ext4_dirty_inode, 15477ff9c073STheodore Ts'o .drop_inode = ext4_drop_inode, 15480930fcc1SAl Viro .evict_inode = ext4_evict_inode, 1549617ba13bSMingming Cao .put_super = ext4_put_super, 1550617ba13bSMingming Cao .sync_fs = ext4_sync_fs, 1551c4be0c1dSTakashi Sato .freeze_fs = ext4_freeze, 1552c4be0c1dSTakashi Sato .unfreeze_fs = ext4_unfreeze, 1553617ba13bSMingming Cao .statfs = ext4_statfs, 1554617ba13bSMingming Cao .show_options = ext4_show_options, 1555ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 1556617ba13bSMingming Cao .quota_read = ext4_quota_read, 1557617ba13bSMingming Cao .quota_write = ext4_quota_write, 155896c7e0d9SJan Kara .get_dquots = ext4_get_dquots, 1559ac27a0ecSDave Kleikamp #endif 1560ac27a0ecSDave Kleikamp }; 1561ac27a0ecSDave Kleikamp 156239655164SChristoph Hellwig static const struct export_operations ext4_export_ops = { 15631b961ac0SChristoph Hellwig .fh_to_dentry = ext4_fh_to_dentry, 15641b961ac0SChristoph Hellwig .fh_to_parent = ext4_fh_to_parent, 1565617ba13bSMingming Cao .get_parent = ext4_get_parent, 1566fde87268STheodore Ts'o .commit_metadata = ext4_nfs_commit_metadata, 1567ac27a0ecSDave Kleikamp }; 1568ac27a0ecSDave Kleikamp 1569ac27a0ecSDave Kleikamp enum { 1570ac27a0ecSDave Kleikamp Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, 1571ba2e524dSLukas Czerner Opt_resgid, Opt_resuid, Opt_sb, 157272578c33STheodore Ts'o Opt_nouid32, Opt_debug, Opt_removed, 15732d544ec9SYang Xu Opt_user_xattr, Opt_acl, 157472578c33STheodore Ts'o Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload, 1575ad4eec61SEric Sandeen Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev, 1576ad4eec61SEric Sandeen Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit, 1577ac27a0ecSDave Kleikamp Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, 15786ddb2447STheodore Ts'o Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption, 15794f74d15fSEric Biggers Opt_inlinecrypt, 1580ba2e524dSLukas Czerner Opt_usrjquota, Opt_grpjquota, Opt_quota, 1581ee4a3fcdSTheodore Ts'o Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, 15821ff20307SJeff Layton Opt_usrquota, Opt_grpquota, Opt_prjquota, 15839cb20f94SIra Weiny Opt_dax, Opt_dax_always, Opt_dax_inode, Opt_dax_never, 1584327eaf73STheodore Ts'o Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error, 15854437992bSLukas Czerner Opt_nowarn_on_error, Opt_mblk_io_submit, Opt_debug_want_extra_isize, 15861449032bSTheodore Ts'o Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, 15875328e635SEric Sandeen Opt_inode_readahead_blks, Opt_journal_ioprio, 1588744692dcSJiaying Zhang Opt_dioread_nolock, Opt_dioread_lock, 1589fc6cb1cdSTheodore Ts'o Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable, 1590cdb7ee4cSTahsin Erdogan Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache, 159121175ca4SHarshad Shirwadkar Opt_no_prefetch_block_bitmaps, Opt_mb_optimize_scan, 1592e5a185c2SLukas Czerner Opt_errors, Opt_data, Opt_data_err, Opt_jqfmt, Opt_dax_type, 15938016e29fSHarshad Shirwadkar #ifdef CONFIG_EXT4_DEBUG 159499c880deSHarshad Shirwadkar Opt_fc_debug_max_replay, Opt_fc_debug_force 15958016e29fSHarshad Shirwadkar #endif 1596ac27a0ecSDave Kleikamp }; 1597ac27a0ecSDave Kleikamp 1598e5a185c2SLukas Czerner static const struct constant_table ext4_param_errors[] = { 1599ba2e524dSLukas Czerner {"continue", EXT4_MOUNT_ERRORS_CONT}, 1600ba2e524dSLukas Czerner {"panic", EXT4_MOUNT_ERRORS_PANIC}, 1601ba2e524dSLukas Czerner {"remount-ro", EXT4_MOUNT_ERRORS_RO}, 1602e5a185c2SLukas Czerner {} 1603e5a185c2SLukas Czerner }; 1604e5a185c2SLukas Czerner 1605e5a185c2SLukas Czerner static const struct constant_table ext4_param_data[] = { 1606ba2e524dSLukas Czerner {"journal", EXT4_MOUNT_JOURNAL_DATA}, 1607ba2e524dSLukas Czerner {"ordered", EXT4_MOUNT_ORDERED_DATA}, 1608ba2e524dSLukas Czerner {"writeback", EXT4_MOUNT_WRITEBACK_DATA}, 1609e5a185c2SLukas Czerner {} 1610e5a185c2SLukas Czerner }; 1611e5a185c2SLukas Czerner 1612e5a185c2SLukas Czerner static const struct constant_table ext4_param_data_err[] = { 1613e5a185c2SLukas Czerner {"abort", Opt_data_err_abort}, 1614e5a185c2SLukas Czerner {"ignore", Opt_data_err_ignore}, 1615e5a185c2SLukas Czerner {} 1616e5a185c2SLukas Czerner }; 1617e5a185c2SLukas Czerner 1618e5a185c2SLukas Czerner static const struct constant_table ext4_param_jqfmt[] = { 1619ba2e524dSLukas Czerner {"vfsold", QFMT_VFS_OLD}, 1620ba2e524dSLukas Czerner {"vfsv0", QFMT_VFS_V0}, 1621ba2e524dSLukas Czerner {"vfsv1", QFMT_VFS_V1}, 1622e5a185c2SLukas Czerner {} 1623e5a185c2SLukas Czerner }; 1624e5a185c2SLukas Czerner 1625e5a185c2SLukas Czerner static const struct constant_table ext4_param_dax[] = { 1626e5a185c2SLukas Czerner {"always", Opt_dax_always}, 1627e5a185c2SLukas Czerner {"inode", Opt_dax_inode}, 1628e5a185c2SLukas Czerner {"never", Opt_dax_never}, 1629e5a185c2SLukas Czerner {} 1630e5a185c2SLukas Czerner }; 1631e5a185c2SLukas Czerner 1632e5a185c2SLukas Czerner /* String parameter that allows empty argument */ 1633e5a185c2SLukas Czerner #define fsparam_string_empty(NAME, OPT) \ 1634e5a185c2SLukas Czerner __fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL) 1635e5a185c2SLukas Czerner 1636e5a185c2SLukas Czerner /* 1637e5a185c2SLukas Czerner * Mount option specification 1638e5a185c2SLukas Czerner * We don't use fsparam_flag_no because of the way we set the 1639e5a185c2SLukas Czerner * options and the way we show them in _ext4_show_options(). To 1640e5a185c2SLukas Czerner * keep the changes to a minimum, let's keep the negative options 1641e5a185c2SLukas Czerner * separate for now. 1642e5a185c2SLukas Czerner */ 1643e5a185c2SLukas Czerner static const struct fs_parameter_spec ext4_param_specs[] = { 1644e5a185c2SLukas Czerner fsparam_flag ("bsddf", Opt_bsd_df), 1645e5a185c2SLukas Czerner fsparam_flag ("minixdf", Opt_minix_df), 1646e5a185c2SLukas Czerner fsparam_flag ("grpid", Opt_grpid), 1647e5a185c2SLukas Czerner fsparam_flag ("bsdgroups", Opt_grpid), 1648e5a185c2SLukas Czerner fsparam_flag ("nogrpid", Opt_nogrpid), 1649e5a185c2SLukas Czerner fsparam_flag ("sysvgroups", Opt_nogrpid), 1650e5a185c2SLukas Czerner fsparam_u32 ("resgid", Opt_resgid), 1651e5a185c2SLukas Czerner fsparam_u32 ("resuid", Opt_resuid), 1652e5a185c2SLukas Czerner fsparam_u32 ("sb", Opt_sb), 1653e5a185c2SLukas Czerner fsparam_enum ("errors", Opt_errors, ext4_param_errors), 1654e5a185c2SLukas Czerner fsparam_flag ("nouid32", Opt_nouid32), 1655e5a185c2SLukas Czerner fsparam_flag ("debug", Opt_debug), 1656e5a185c2SLukas Czerner fsparam_flag ("oldalloc", Opt_removed), 1657e5a185c2SLukas Czerner fsparam_flag ("orlov", Opt_removed), 1658e5a185c2SLukas Czerner fsparam_flag ("user_xattr", Opt_user_xattr), 1659e5a185c2SLukas Czerner fsparam_flag ("acl", Opt_acl), 1660e5a185c2SLukas Czerner fsparam_flag ("norecovery", Opt_noload), 1661e5a185c2SLukas Czerner fsparam_flag ("noload", Opt_noload), 1662e5a185c2SLukas Czerner fsparam_flag ("bh", Opt_removed), 1663e5a185c2SLukas Czerner fsparam_flag ("nobh", Opt_removed), 1664e5a185c2SLukas Czerner fsparam_u32 ("commit", Opt_commit), 1665e5a185c2SLukas Czerner fsparam_u32 ("min_batch_time", Opt_min_batch_time), 1666e5a185c2SLukas Czerner fsparam_u32 ("max_batch_time", Opt_max_batch_time), 1667e5a185c2SLukas Czerner fsparam_u32 ("journal_dev", Opt_journal_dev), 1668e5a185c2SLukas Czerner fsparam_bdev ("journal_path", Opt_journal_path), 1669e5a185c2SLukas Czerner fsparam_flag ("journal_checksum", Opt_journal_checksum), 1670e5a185c2SLukas Czerner fsparam_flag ("nojournal_checksum", Opt_nojournal_checksum), 1671e5a185c2SLukas Czerner fsparam_flag ("journal_async_commit",Opt_journal_async_commit), 1672e5a185c2SLukas Czerner fsparam_flag ("abort", Opt_abort), 1673e5a185c2SLukas Czerner fsparam_enum ("data", Opt_data, ext4_param_data), 1674e5a185c2SLukas Czerner fsparam_enum ("data_err", Opt_data_err, 1675e5a185c2SLukas Czerner ext4_param_data_err), 1676e5a185c2SLukas Czerner fsparam_string_empty 1677e5a185c2SLukas Czerner ("usrjquota", Opt_usrjquota), 1678e5a185c2SLukas Czerner fsparam_string_empty 1679e5a185c2SLukas Czerner ("grpjquota", Opt_grpjquota), 1680e5a185c2SLukas Czerner fsparam_enum ("jqfmt", Opt_jqfmt, ext4_param_jqfmt), 1681e5a185c2SLukas Czerner fsparam_flag ("grpquota", Opt_grpquota), 1682e5a185c2SLukas Czerner fsparam_flag ("quota", Opt_quota), 1683e5a185c2SLukas Czerner fsparam_flag ("noquota", Opt_noquota), 1684e5a185c2SLukas Czerner fsparam_flag ("usrquota", Opt_usrquota), 1685e5a185c2SLukas Czerner fsparam_flag ("prjquota", Opt_prjquota), 1686e5a185c2SLukas Czerner fsparam_flag ("barrier", Opt_barrier), 1687e5a185c2SLukas Czerner fsparam_u32 ("barrier", Opt_barrier), 1688e5a185c2SLukas Czerner fsparam_flag ("nobarrier", Opt_nobarrier), 16891ff20307SJeff Layton fsparam_flag ("i_version", Opt_removed), 1690e5a185c2SLukas Czerner fsparam_flag ("dax", Opt_dax), 1691e5a185c2SLukas Czerner fsparam_enum ("dax", Opt_dax_type, ext4_param_dax), 1692e5a185c2SLukas Czerner fsparam_u32 ("stripe", Opt_stripe), 1693e5a185c2SLukas Czerner fsparam_flag ("delalloc", Opt_delalloc), 1694e5a185c2SLukas Czerner fsparam_flag ("nodelalloc", Opt_nodelalloc), 1695e5a185c2SLukas Czerner fsparam_flag ("warn_on_error", Opt_warn_on_error), 1696e5a185c2SLukas Czerner fsparam_flag ("nowarn_on_error", Opt_nowarn_on_error), 1697e5a185c2SLukas Czerner fsparam_u32 ("debug_want_extra_isize", 1698e5a185c2SLukas Czerner Opt_debug_want_extra_isize), 1699e5a185c2SLukas Czerner fsparam_flag ("mblk_io_submit", Opt_removed), 1700e5a185c2SLukas Czerner fsparam_flag ("nomblk_io_submit", Opt_removed), 1701e5a185c2SLukas Czerner fsparam_flag ("block_validity", Opt_block_validity), 1702e5a185c2SLukas Czerner fsparam_flag ("noblock_validity", Opt_noblock_validity), 1703e5a185c2SLukas Czerner fsparam_u32 ("inode_readahead_blks", 1704e5a185c2SLukas Czerner Opt_inode_readahead_blks), 1705e5a185c2SLukas Czerner fsparam_u32 ("journal_ioprio", Opt_journal_ioprio), 1706e5a185c2SLukas Czerner fsparam_u32 ("auto_da_alloc", Opt_auto_da_alloc), 1707e5a185c2SLukas Czerner fsparam_flag ("auto_da_alloc", Opt_auto_da_alloc), 1708e5a185c2SLukas Czerner fsparam_flag ("noauto_da_alloc", Opt_noauto_da_alloc), 1709e5a185c2SLukas Czerner fsparam_flag ("dioread_nolock", Opt_dioread_nolock), 1710e5a185c2SLukas Czerner fsparam_flag ("nodioread_nolock", Opt_dioread_lock), 1711e5a185c2SLukas Czerner fsparam_flag ("dioread_lock", Opt_dioread_lock), 1712e5a185c2SLukas Czerner fsparam_flag ("discard", Opt_discard), 1713e5a185c2SLukas Czerner fsparam_flag ("nodiscard", Opt_nodiscard), 1714e5a185c2SLukas Czerner fsparam_u32 ("init_itable", Opt_init_itable), 1715e5a185c2SLukas Czerner fsparam_flag ("init_itable", Opt_init_itable), 1716e5a185c2SLukas Czerner fsparam_flag ("noinit_itable", Opt_noinit_itable), 1717e5a185c2SLukas Czerner #ifdef CONFIG_EXT4_DEBUG 1718e5a185c2SLukas Czerner fsparam_flag ("fc_debug_force", Opt_fc_debug_force), 1719e5a185c2SLukas Czerner fsparam_u32 ("fc_debug_max_replay", Opt_fc_debug_max_replay), 1720e5a185c2SLukas Czerner #endif 1721e5a185c2SLukas Czerner fsparam_u32 ("max_dir_size_kb", Opt_max_dir_size_kb), 1722e5a185c2SLukas Czerner fsparam_flag ("test_dummy_encryption", 1723e5a185c2SLukas Czerner Opt_test_dummy_encryption), 1724e5a185c2SLukas Czerner fsparam_string ("test_dummy_encryption", 1725e5a185c2SLukas Czerner Opt_test_dummy_encryption), 1726e5a185c2SLukas Czerner fsparam_flag ("inlinecrypt", Opt_inlinecrypt), 1727e5a185c2SLukas Czerner fsparam_flag ("nombcache", Opt_nombcache), 1728e5a185c2SLukas Czerner fsparam_flag ("no_mbcache", Opt_nombcache), /* for backward compatibility */ 1729e5a185c2SLukas Czerner fsparam_flag ("prefetch_block_bitmaps", 1730e5a185c2SLukas Czerner Opt_removed), 1731e5a185c2SLukas Czerner fsparam_flag ("no_prefetch_block_bitmaps", 1732e5a185c2SLukas Czerner Opt_no_prefetch_block_bitmaps), 1733e5a185c2SLukas Czerner fsparam_s32 ("mb_optimize_scan", Opt_mb_optimize_scan), 1734e5a185c2SLukas Czerner fsparam_string ("check", Opt_removed), /* mount option from ext2/3 */ 1735e5a185c2SLukas Czerner fsparam_flag ("nocheck", Opt_removed), /* mount option from ext2/3 */ 1736e5a185c2SLukas Czerner fsparam_flag ("reservation", Opt_removed), /* mount option from ext2/3 */ 1737e5a185c2SLukas Czerner fsparam_flag ("noreservation", Opt_removed), /* mount option from ext2/3 */ 1738e5a185c2SLukas Czerner fsparam_u32 ("journal", Opt_removed), /* mount option from ext2/3 */ 1739e5a185c2SLukas Czerner {} 1740e5a185c2SLukas Czerner }; 1741e5a185c2SLukas Czerner 1742b3881f74STheodore Ts'o #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3)) 1743196e402aSHarshad Shirwadkar 174426092bf5STheodore Ts'o #define MOPT_SET 0x0001 174526092bf5STheodore Ts'o #define MOPT_CLEAR 0x0002 174626092bf5STheodore Ts'o #define MOPT_NOSUPPORT 0x0004 174726092bf5STheodore Ts'o #define MOPT_EXPLICIT 0x0008 174826092bf5STheodore Ts'o #ifdef CONFIG_QUOTA 174926092bf5STheodore Ts'o #define MOPT_Q 0 1750ba2e524dSLukas Czerner #define MOPT_QFMT 0x0010 175126092bf5STheodore Ts'o #else 175226092bf5STheodore Ts'o #define MOPT_Q MOPT_NOSUPPORT 175326092bf5STheodore Ts'o #define MOPT_QFMT MOPT_NOSUPPORT 175426092bf5STheodore Ts'o #endif 1755ba2e524dSLukas Czerner #define MOPT_NO_EXT2 0x0020 1756ba2e524dSLukas Czerner #define MOPT_NO_EXT3 0x0040 17578dc0aa8cSTheodore Ts'o #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3) 1758ba2e524dSLukas Czerner #define MOPT_SKIP 0x0080 1759ba2e524dSLukas Czerner #define MOPT_2 0x0100 176026092bf5STheodore Ts'o 176126092bf5STheodore Ts'o static const struct mount_opts { 176226092bf5STheodore Ts'o int token; 176326092bf5STheodore Ts'o int mount_opt; 176426092bf5STheodore Ts'o int flags; 176526092bf5STheodore Ts'o } ext4_mount_opts[] = { 176626092bf5STheodore Ts'o {Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET}, 176726092bf5STheodore Ts'o {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR}, 176826092bf5STheodore Ts'o {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET}, 176926092bf5STheodore Ts'o {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR}, 177026092bf5STheodore Ts'o {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET}, 177126092bf5STheodore Ts'o {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR}, 17728dc0aa8cSTheodore Ts'o {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK, 17738dc0aa8cSTheodore Ts'o MOPT_EXT4_ONLY | MOPT_SET}, 17748dc0aa8cSTheodore Ts'o {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK, 17758dc0aa8cSTheodore Ts'o MOPT_EXT4_ONLY | MOPT_CLEAR}, 177626092bf5STheodore Ts'o {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET}, 177726092bf5STheodore Ts'o {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR}, 17788dc0aa8cSTheodore Ts'o {Opt_delalloc, EXT4_MOUNT_DELALLOC, 17798dc0aa8cSTheodore Ts'o MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, 17808dc0aa8cSTheodore Ts'o {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, 178159d9fa5cSTheodore Ts'o MOPT_EXT4_ONLY | MOPT_CLEAR}, 1782327eaf73STheodore Ts'o {Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET}, 1783327eaf73STheodore Ts'o {Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR}, 1784cb8435dcSEric Biggers {Opt_commit, 0, MOPT_NO_EXT2}, 1785c6d3d56dSDarrick J. Wong {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, 1786c6d3d56dSDarrick J. Wong MOPT_EXT4_ONLY | MOPT_CLEAR}, 17878dc0aa8cSTheodore Ts'o {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, 17881e381f60SDmitry Monakhov MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, 178926092bf5STheodore Ts'o {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT | 17908dc0aa8cSTheodore Ts'o EXT4_MOUNT_JOURNAL_CHECKSUM), 17911e381f60SDmitry Monakhov MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, 17928dc0aa8cSTheodore Ts'o {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET}, 1793ba2e524dSLukas Czerner {Opt_data_err, EXT4_MOUNT_DATA_ERR_ABORT, MOPT_NO_EXT2}, 179426092bf5STheodore Ts'o {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET}, 179526092bf5STheodore Ts'o {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR}, 179626092bf5STheodore Ts'o {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET}, 179726092bf5STheodore Ts'o {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR}, 179826092bf5STheodore Ts'o {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR}, 1799ba2e524dSLukas Czerner {Opt_dax_type, 0, MOPT_EXT4_ONLY}, 1800ba2e524dSLukas Czerner {Opt_journal_dev, 0, MOPT_NO_EXT2}, 1801ba2e524dSLukas Czerner {Opt_journal_path, 0, MOPT_NO_EXT2}, 1802ba2e524dSLukas Czerner {Opt_journal_ioprio, 0, MOPT_NO_EXT2}, 1803ba2e524dSLukas Czerner {Opt_data, 0, MOPT_NO_EXT2}, 180426092bf5STheodore Ts'o {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET}, 180526092bf5STheodore Ts'o #ifdef CONFIG_EXT4_FS_POSIX_ACL 180626092bf5STheodore Ts'o {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET}, 180726092bf5STheodore Ts'o #else 180826092bf5STheodore Ts'o {Opt_acl, 0, MOPT_NOSUPPORT}, 180926092bf5STheodore Ts'o #endif 181026092bf5STheodore Ts'o {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET}, 181126092bf5STheodore Ts'o {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET}, 181226092bf5STheodore Ts'o {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q}, 181326092bf5STheodore Ts'o {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, 181426092bf5STheodore Ts'o MOPT_SET | MOPT_Q}, 181526092bf5STheodore Ts'o {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA, 181626092bf5STheodore Ts'o MOPT_SET | MOPT_Q}, 181749da9392SJan Kara {Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA, 181849da9392SJan Kara MOPT_SET | MOPT_Q}, 181926092bf5STheodore Ts'o {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA | 182049da9392SJan Kara EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA), 182149da9392SJan Kara MOPT_CLEAR | MOPT_Q}, 1822ba2e524dSLukas Czerner {Opt_usrjquota, 0, MOPT_Q}, 1823ba2e524dSLukas Czerner {Opt_grpjquota, 0, MOPT_Q}, 1824ba2e524dSLukas Czerner {Opt_jqfmt, 0, MOPT_QFMT}, 1825cdb7ee4cSTahsin Erdogan {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET}, 182621175ca4SHarshad Shirwadkar {Opt_no_prefetch_block_bitmaps, EXT4_MOUNT_NO_PREFETCH_BLOCK_BITMAPS, 18273d392b26STheodore Ts'o MOPT_SET}, 182899c880deSHarshad Shirwadkar #ifdef CONFIG_EXT4_DEBUG 18290f0672ffSHarshad Shirwadkar {Opt_fc_debug_force, EXT4_MOUNT2_JOURNAL_FAST_COMMIT, 18300f0672ffSHarshad Shirwadkar MOPT_SET | MOPT_2 | MOPT_EXT4_ONLY}, 18318016e29fSHarshad Shirwadkar #endif 183226092bf5STheodore Ts'o {Opt_err, 0, 0} 183326092bf5STheodore Ts'o }; 183426092bf5STheodore Ts'o 18355298d4bfSChristoph Hellwig #if IS_ENABLED(CONFIG_UNICODE) 1836c83ad55eSGabriel Krisman Bertazi static const struct ext4_sb_encodings { 1837c83ad55eSGabriel Krisman Bertazi __u16 magic; 1838c83ad55eSGabriel Krisman Bertazi char *name; 183949bd03ccSChristoph Hellwig unsigned int version; 1840c83ad55eSGabriel Krisman Bertazi } ext4_sb_encoding_map[] = { 184149bd03ccSChristoph Hellwig {EXT4_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)}, 1842c83ad55eSGabriel Krisman Bertazi }; 1843c83ad55eSGabriel Krisman Bertazi 1844aa8bf298SChristoph Hellwig static const struct ext4_sb_encodings * 1845aa8bf298SChristoph Hellwig ext4_sb_read_encoding(const struct ext4_super_block *es) 1846c83ad55eSGabriel Krisman Bertazi { 1847c83ad55eSGabriel Krisman Bertazi __u16 magic = le16_to_cpu(es->s_encoding); 1848c83ad55eSGabriel Krisman Bertazi int i; 1849c83ad55eSGabriel Krisman Bertazi 1850c83ad55eSGabriel Krisman Bertazi for (i = 0; i < ARRAY_SIZE(ext4_sb_encoding_map); i++) 1851c83ad55eSGabriel Krisman Bertazi if (magic == ext4_sb_encoding_map[i].magic) 1852aa8bf298SChristoph Hellwig return &ext4_sb_encoding_map[i]; 1853c83ad55eSGabriel Krisman Bertazi 1854aa8bf298SChristoph Hellwig return NULL; 1855c83ad55eSGabriel Krisman Bertazi } 1856c83ad55eSGabriel Krisman Bertazi #endif 1857c83ad55eSGabriel Krisman Bertazi 18586e47a3ccSLukas Czerner #define EXT4_SPEC_JQUOTA (1 << 0) 18596e47a3ccSLukas Czerner #define EXT4_SPEC_JQFMT (1 << 1) 18606e47a3ccSLukas Czerner #define EXT4_SPEC_DATAJ (1 << 2) 18616e47a3ccSLukas Czerner #define EXT4_SPEC_SB_BLOCK (1 << 3) 18626e47a3ccSLukas Czerner #define EXT4_SPEC_JOURNAL_DEV (1 << 4) 18636e47a3ccSLukas Czerner #define EXT4_SPEC_JOURNAL_IOPRIO (1 << 5) 18646e47a3ccSLukas Czerner #define EXT4_SPEC_s_want_extra_isize (1 << 7) 18656e47a3ccSLukas Czerner #define EXT4_SPEC_s_max_batch_time (1 << 8) 18666e47a3ccSLukas Czerner #define EXT4_SPEC_s_min_batch_time (1 << 9) 18676e47a3ccSLukas Czerner #define EXT4_SPEC_s_inode_readahead_blks (1 << 10) 18686e47a3ccSLukas Czerner #define EXT4_SPEC_s_li_wait_mult (1 << 11) 18696e47a3ccSLukas Czerner #define EXT4_SPEC_s_max_dir_size_kb (1 << 12) 18706e47a3ccSLukas Czerner #define EXT4_SPEC_s_stripe (1 << 13) 18716e47a3ccSLukas Czerner #define EXT4_SPEC_s_resuid (1 << 14) 18726e47a3ccSLukas Czerner #define EXT4_SPEC_s_resgid (1 << 15) 18736e47a3ccSLukas Czerner #define EXT4_SPEC_s_commit_interval (1 << 16) 18746e47a3ccSLukas Czerner #define EXT4_SPEC_s_fc_debug_max_replay (1 << 17) 18757edfd85bSLukas Czerner #define EXT4_SPEC_s_sb_block (1 << 18) 187627b38686SOjaswin Mujoo #define EXT4_SPEC_mb_optimize_scan (1 << 19) 18776e47a3ccSLukas Czerner 1878461c3af0SLukas Czerner struct ext4_fs_context { 1879e6e268cbSLukas Czerner char *s_qf_names[EXT4_MAXQUOTAS]; 188085456054SEric Biggers struct fscrypt_dummy_policy dummy_enc_policy; 1881e6e268cbSLukas Czerner int s_jquota_fmt; /* Format of quota to use */ 18826e47a3ccSLukas Czerner #ifdef CONFIG_EXT4_DEBUG 18836e47a3ccSLukas Czerner int s_fc_debug_max_replay; 18846e47a3ccSLukas Czerner #endif 18856e47a3ccSLukas Czerner unsigned short qname_spec; 18866e47a3ccSLukas Czerner unsigned long vals_s_flags; /* Bits to set in s_flags */ 18876e47a3ccSLukas Czerner unsigned long mask_s_flags; /* Bits changed in s_flags */ 18886e47a3ccSLukas Czerner unsigned long journal_devnum; 18896e47a3ccSLukas Czerner unsigned long s_commit_interval; 18906e47a3ccSLukas Czerner unsigned long s_stripe; 18916e47a3ccSLukas Czerner unsigned int s_inode_readahead_blks; 18926e47a3ccSLukas Czerner unsigned int s_want_extra_isize; 18936e47a3ccSLukas Czerner unsigned int s_li_wait_mult; 18946e47a3ccSLukas Czerner unsigned int s_max_dir_size_kb; 18956e47a3ccSLukas Czerner unsigned int journal_ioprio; 18966e47a3ccSLukas Czerner unsigned int vals_s_mount_opt; 18976e47a3ccSLukas Czerner unsigned int mask_s_mount_opt; 18986e47a3ccSLukas Czerner unsigned int vals_s_mount_opt2; 18996e47a3ccSLukas Czerner unsigned int mask_s_mount_opt2; 1900e3952fccSLukas Czerner unsigned long vals_s_mount_flags; 1901e3952fccSLukas Czerner unsigned long mask_s_mount_flags; 1902b6bd2435SLukas Czerner unsigned int opt_flags; /* MOPT flags */ 19036e47a3ccSLukas Czerner unsigned int spec; 19046e47a3ccSLukas Czerner u32 s_max_batch_time; 19056e47a3ccSLukas Czerner u32 s_min_batch_time; 19066e47a3ccSLukas Czerner kuid_t s_resuid; 19076e47a3ccSLukas Czerner kgid_t s_resgid; 19087edfd85bSLukas Czerner ext4_fsblk_t s_sb_block; 1909b237e304SHarshad Shirwadkar }; 1910b237e304SHarshad Shirwadkar 1911cebe85d5SLukas Czerner static void ext4_fc_free(struct fs_context *fc) 1912cebe85d5SLukas Czerner { 1913cebe85d5SLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 1914cebe85d5SLukas Czerner int i; 1915cebe85d5SLukas Czerner 1916cebe85d5SLukas Czerner if (!ctx) 1917cebe85d5SLukas Czerner return; 1918cebe85d5SLukas Czerner 1919cebe85d5SLukas Czerner for (i = 0; i < EXT4_MAXQUOTAS; i++) 1920cebe85d5SLukas Czerner kfree(ctx->s_qf_names[i]); 1921cebe85d5SLukas Czerner 192285456054SEric Biggers fscrypt_free_dummy_policy(&ctx->dummy_enc_policy); 1923cebe85d5SLukas Czerner kfree(ctx); 1924cebe85d5SLukas Czerner } 1925cebe85d5SLukas Czerner 1926cebe85d5SLukas Czerner int ext4_init_fs_context(struct fs_context *fc) 1927cebe85d5SLukas Czerner { 1928da9e4802SDan Carpenter struct ext4_fs_context *ctx; 1929cebe85d5SLukas Czerner 1930cebe85d5SLukas Czerner ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL); 1931cebe85d5SLukas Czerner if (!ctx) 1932cebe85d5SLukas Czerner return -ENOMEM; 1933cebe85d5SLukas Czerner 1934cebe85d5SLukas Czerner fc->fs_private = ctx; 1935cebe85d5SLukas Czerner fc->ops = &ext4_context_ops; 1936cebe85d5SLukas Czerner 1937cebe85d5SLukas Czerner return 0; 1938cebe85d5SLukas Czerner } 1939cebe85d5SLukas Czerner 1940e6e268cbSLukas Czerner #ifdef CONFIG_QUOTA 1941e6e268cbSLukas Czerner /* 1942e6e268cbSLukas Czerner * Note the name of the specified quota file. 1943e6e268cbSLukas Czerner */ 1944e6e268cbSLukas Czerner static int note_qf_name(struct fs_context *fc, int qtype, 1945e6e268cbSLukas Czerner struct fs_parameter *param) 1946e6e268cbSLukas Czerner { 1947e6e268cbSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 1948e6e268cbSLukas Czerner char *qname; 1949e6e268cbSLukas Czerner 1950e6e268cbSLukas Czerner if (param->size < 1) { 1951e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, "Missing quota name"); 1952e6e268cbSLukas Czerner return -EINVAL; 1953e6e268cbSLukas Czerner } 1954e6e268cbSLukas Czerner if (strchr(param->string, '/')) { 1955e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, 1956e6e268cbSLukas Czerner "quotafile must be on filesystem root"); 1957e6e268cbSLukas Czerner return -EINVAL; 1958e6e268cbSLukas Czerner } 1959e6e268cbSLukas Czerner if (ctx->s_qf_names[qtype]) { 1960e6e268cbSLukas Czerner if (strcmp(ctx->s_qf_names[qtype], param->string) != 0) { 1961e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, 1962e6e268cbSLukas Czerner "%s quota file already specified", 1963e6e268cbSLukas Czerner QTYPE2NAME(qtype)); 1964e6e268cbSLukas Czerner return -EINVAL; 1965e6e268cbSLukas Czerner } 1966e6e268cbSLukas Czerner return 0; 1967e6e268cbSLukas Czerner } 1968e6e268cbSLukas Czerner 1969e6e268cbSLukas Czerner qname = kmemdup_nul(param->string, param->size, GFP_KERNEL); 1970e6e268cbSLukas Czerner if (!qname) { 1971e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, 1972e6e268cbSLukas Czerner "Not enough memory for storing quotafile name"); 1973e6e268cbSLukas Czerner return -ENOMEM; 1974e6e268cbSLukas Czerner } 1975e6e268cbSLukas Czerner ctx->s_qf_names[qtype] = qname; 1976e6e268cbSLukas Czerner ctx->qname_spec |= 1 << qtype; 19776e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_JQUOTA; 1978e6e268cbSLukas Czerner return 0; 1979e6e268cbSLukas Czerner } 1980e6e268cbSLukas Czerner 1981e6e268cbSLukas Czerner /* 1982e6e268cbSLukas Czerner * Clear the name of the specified quota file. 1983e6e268cbSLukas Czerner */ 1984e6e268cbSLukas Czerner static int unnote_qf_name(struct fs_context *fc, int qtype) 1985e6e268cbSLukas Czerner { 1986e6e268cbSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 1987e6e268cbSLukas Czerner 1988e6e268cbSLukas Czerner if (ctx->s_qf_names[qtype]) 1989e6e268cbSLukas Czerner kfree(ctx->s_qf_names[qtype]); 1990e6e268cbSLukas Czerner 1991e6e268cbSLukas Czerner ctx->s_qf_names[qtype] = NULL; 1992e6e268cbSLukas Czerner ctx->qname_spec |= 1 << qtype; 19936e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_JQUOTA; 1994e6e268cbSLukas Czerner return 0; 1995e6e268cbSLukas Czerner } 1996e6e268cbSLukas Czerner #endif 1997e6e268cbSLukas Czerner 199885456054SEric Biggers static int ext4_parse_test_dummy_encryption(const struct fs_parameter *param, 199985456054SEric Biggers struct ext4_fs_context *ctx) 200085456054SEric Biggers { 200185456054SEric Biggers int err; 200285456054SEric Biggers 200385456054SEric Biggers if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) { 200485456054SEric Biggers ext4_msg(NULL, KERN_WARNING, 200585456054SEric Biggers "test_dummy_encryption option not supported"); 200685456054SEric Biggers return -EINVAL; 200785456054SEric Biggers } 200885456054SEric Biggers err = fscrypt_parse_test_dummy_encryption(param, 200985456054SEric Biggers &ctx->dummy_enc_policy); 201085456054SEric Biggers if (err == -EINVAL) { 201185456054SEric Biggers ext4_msg(NULL, KERN_WARNING, 201285456054SEric Biggers "Value of option \"%s\" is unrecognized", param->key); 201385456054SEric Biggers } else if (err == -EEXIST) { 201485456054SEric Biggers ext4_msg(NULL, KERN_WARNING, 201585456054SEric Biggers "Conflicting test_dummy_encryption options"); 201685456054SEric Biggers return -EINVAL; 201785456054SEric Biggers } 201885456054SEric Biggers return err; 201985456054SEric Biggers } 202085456054SEric Biggers 20216e47a3ccSLukas Czerner #define EXT4_SET_CTX(name) \ 20224c246728SLukas Czerner static inline void ctx_set_##name(struct ext4_fs_context *ctx, \ 20234c246728SLukas Czerner unsigned long flag) \ 20246e47a3ccSLukas Czerner { \ 20256e47a3ccSLukas Czerner ctx->mask_s_##name |= flag; \ 20266e47a3ccSLukas Czerner ctx->vals_s_##name |= flag; \ 2027e3952fccSLukas Czerner } 2028e3952fccSLukas Czerner 2029e3952fccSLukas Czerner #define EXT4_CLEAR_CTX(name) \ 20304c246728SLukas Czerner static inline void ctx_clear_##name(struct ext4_fs_context *ctx, \ 20314c246728SLukas Czerner unsigned long flag) \ 20326e47a3ccSLukas Czerner { \ 20336e47a3ccSLukas Czerner ctx->mask_s_##name |= flag; \ 20346e47a3ccSLukas Czerner ctx->vals_s_##name &= ~flag; \ 2035e3952fccSLukas Czerner } 2036e3952fccSLukas Czerner 2037e3952fccSLukas Czerner #define EXT4_TEST_CTX(name) \ 20384c246728SLukas Czerner static inline unsigned long \ 20394c246728SLukas Czerner ctx_test_##name(struct ext4_fs_context *ctx, unsigned long flag) \ 20406e47a3ccSLukas Czerner { \ 20414c246728SLukas Czerner return (ctx->vals_s_##name & flag); \ 2042e3952fccSLukas Czerner } 20436e47a3ccSLukas Czerner 2044e3952fccSLukas Czerner EXT4_SET_CTX(flags); /* set only */ 20456e47a3ccSLukas Czerner EXT4_SET_CTX(mount_opt); 2046e3952fccSLukas Czerner EXT4_CLEAR_CTX(mount_opt); 2047e3952fccSLukas Czerner EXT4_TEST_CTX(mount_opt); 20486e47a3ccSLukas Czerner EXT4_SET_CTX(mount_opt2); 2049e3952fccSLukas Czerner EXT4_CLEAR_CTX(mount_opt2); 2050e3952fccSLukas Czerner EXT4_TEST_CTX(mount_opt2); 2051e3952fccSLukas Czerner 2052e3952fccSLukas Czerner static inline void ctx_set_mount_flag(struct ext4_fs_context *ctx, int bit) 2053e3952fccSLukas Czerner { 2054e3952fccSLukas Czerner set_bit(bit, &ctx->mask_s_mount_flags); 2055e3952fccSLukas Czerner set_bit(bit, &ctx->vals_s_mount_flags); 2056e3952fccSLukas Czerner } 20576e47a3ccSLukas Czerner 205802f960f8SLukas Czerner static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param) 205926092bf5STheodore Ts'o { 2060461c3af0SLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 2061461c3af0SLukas Czerner struct fs_parse_result result; 206226092bf5STheodore Ts'o const struct mount_opts *m; 2063461c3af0SLukas Czerner int is_remount; 206408cefc7aSEric W. Biederman kuid_t uid; 206508cefc7aSEric W. Biederman kgid_t gid; 2066461c3af0SLukas Czerner int token; 2067461c3af0SLukas Czerner 2068461c3af0SLukas Czerner token = fs_parse(fc, ext4_param_specs, param, &result); 2069461c3af0SLukas Czerner if (token < 0) 2070461c3af0SLukas Czerner return token; 2071461c3af0SLukas Czerner is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE; 207226092bf5STheodore Ts'o 2073ba2e524dSLukas Czerner for (m = ext4_mount_opts; m->token != Opt_err; m++) 2074ba2e524dSLukas Czerner if (token == m->token) 2075ba2e524dSLukas Czerner break; 2076ba2e524dSLukas Czerner 2077ba2e524dSLukas Czerner ctx->opt_flags |= m->flags; 2078ba2e524dSLukas Czerner 2079ba2e524dSLukas Czerner if (m->flags & MOPT_EXPLICIT) { 2080ba2e524dSLukas Czerner if (m->mount_opt & EXT4_MOUNT_DELALLOC) { 2081ba2e524dSLukas Czerner ctx_set_mount_opt2(ctx, EXT4_MOUNT2_EXPLICIT_DELALLOC); 2082ba2e524dSLukas Czerner } else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) { 2083ba2e524dSLukas Czerner ctx_set_mount_opt2(ctx, 2084ba2e524dSLukas Czerner EXT4_MOUNT2_EXPLICIT_JOURNAL_CHECKSUM); 2085ba2e524dSLukas Czerner } else 2086ba2e524dSLukas Czerner return -EINVAL; 2087ba2e524dSLukas Czerner } 2088ba2e524dSLukas Czerner 2089ba2e524dSLukas Czerner if (m->flags & MOPT_NOSUPPORT) { 2090ba2e524dSLukas Czerner ext4_msg(NULL, KERN_ERR, "%s option not supported", 2091ba2e524dSLukas Czerner param->key); 2092ba2e524dSLukas Czerner return 0; 2093ba2e524dSLukas Czerner } 2094ba2e524dSLukas Czerner 2095ba2e524dSLukas Czerner switch (token) { 209657f73c2cSTheodore Ts'o #ifdef CONFIG_QUOTA 2097ba2e524dSLukas Czerner case Opt_usrjquota: 2098461c3af0SLukas Czerner if (!*param->string) 2099e6e268cbSLukas Czerner return unnote_qf_name(fc, USRQUOTA); 2100461c3af0SLukas Czerner else 2101e6e268cbSLukas Czerner return note_qf_name(fc, USRQUOTA, param); 2102ba2e524dSLukas Czerner case Opt_grpjquota: 2103461c3af0SLukas Czerner if (!*param->string) 2104e6e268cbSLukas Czerner return unnote_qf_name(fc, GRPQUOTA); 2105461c3af0SLukas Czerner else 2106e6e268cbSLukas Czerner return note_qf_name(fc, GRPQUOTA, param); 210757f73c2cSTheodore Ts'o #endif 210826092bf5STheodore Ts'o case Opt_sb: 21097edfd85bSLukas Czerner if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { 21107edfd85bSLukas Czerner ext4_msg(NULL, KERN_WARNING, 21117edfd85bSLukas Czerner "Ignoring %s option on remount", param->key); 21127edfd85bSLukas Czerner } else { 21137edfd85bSLukas Czerner ctx->s_sb_block = result.uint_32; 21147edfd85bSLukas Czerner ctx->spec |= EXT4_SPEC_s_sb_block; 21157edfd85bSLukas Czerner } 211602f960f8SLukas Czerner return 0; 211726092bf5STheodore Ts'o case Opt_removed: 2118da812f61SLukas Czerner ext4_msg(NULL, KERN_WARNING, "Ignoring removed %s option", 2119461c3af0SLukas Czerner param->key); 212002f960f8SLukas Czerner return 0; 212126092bf5STheodore Ts'o case Opt_abort: 2122e3952fccSLukas Czerner ctx_set_mount_flag(ctx, EXT4_MF_FS_ABORTED); 212302f960f8SLukas Czerner return 0; 21244f74d15fSEric Biggers case Opt_inlinecrypt: 21254f74d15fSEric Biggers #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT 21266e47a3ccSLukas Czerner ctx_set_flags(ctx, SB_INLINECRYPT); 21274f74d15fSEric Biggers #else 2128da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, "inline encryption not supported"); 21294f74d15fSEric Biggers #endif 213002f960f8SLukas Czerner return 0; 2131461c3af0SLukas Czerner case Opt_errors: 21326e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_ERRORS_MASK); 2133ba2e524dSLukas Czerner ctx_set_mount_opt(ctx, result.uint_32); 2134ba2e524dSLukas Czerner return 0; 2135ba2e524dSLukas Czerner #ifdef CONFIG_QUOTA 2136ba2e524dSLukas Czerner case Opt_jqfmt: 2137ba2e524dSLukas Czerner ctx->s_jquota_fmt = result.uint_32; 2138ba2e524dSLukas Czerner ctx->spec |= EXT4_SPEC_JQFMT; 2139ba2e524dSLukas Czerner return 0; 2140ba2e524dSLukas Czerner #endif 2141ba2e524dSLukas Czerner case Opt_data: 2142ba2e524dSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS); 2143ba2e524dSLukas Czerner ctx_set_mount_opt(ctx, result.uint_32); 2144ba2e524dSLukas Czerner ctx->spec |= EXT4_SPEC_DATAJ; 2145ba2e524dSLukas Czerner return 0; 2146ba2e524dSLukas Czerner case Opt_commit: 2147461c3af0SLukas Czerner if (result.uint_32 == 0) 21486e47a3ccSLukas Czerner ctx->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE; 2149461c3af0SLukas Czerner else if (result.uint_32 > INT_MAX / HZ) { 2150da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, 21519ba55543Szhangyi (F) "Invalid commit interval %d, " 21529ba55543Szhangyi (F) "must be smaller than %d", 2153461c3af0SLukas Czerner result.uint_32, INT_MAX / HZ); 2154da812f61SLukas Czerner return -EINVAL; 21559ba55543Szhangyi (F) } 21566e47a3ccSLukas Czerner ctx->s_commit_interval = HZ * result.uint_32; 21576e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_commit_interval; 2158ba2e524dSLukas Czerner return 0; 2159ba2e524dSLukas Czerner case Opt_debug_want_extra_isize: 21606e47a3ccSLukas Czerner if ((result.uint_32 & 1) || (result.uint_32 < 4)) { 2161da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, 2162461c3af0SLukas Czerner "Invalid want_extra_isize %d", result.uint_32); 2163da812f61SLukas Czerner return -EINVAL; 21649803387cSTheodore Ts'o } 21656e47a3ccSLukas Czerner ctx->s_want_extra_isize = result.uint_32; 21666e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_want_extra_isize; 2167ba2e524dSLukas Czerner return 0; 2168ba2e524dSLukas Czerner case Opt_max_batch_time: 21696e47a3ccSLukas Czerner ctx->s_max_batch_time = result.uint_32; 21706e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_max_batch_time; 2171ba2e524dSLukas Czerner return 0; 2172ba2e524dSLukas Czerner case Opt_min_batch_time: 21736e47a3ccSLukas Czerner ctx->s_min_batch_time = result.uint_32; 21746e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_min_batch_time; 2175ba2e524dSLukas Czerner return 0; 2176ba2e524dSLukas Czerner case Opt_inode_readahead_blks: 2177461c3af0SLukas Czerner if (result.uint_32 && 2178461c3af0SLukas Czerner (result.uint_32 > (1 << 30) || 2179461c3af0SLukas Czerner !is_power_of_2(result.uint_32))) { 2180da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, 2181e33e60eaSJan Kara "EXT4-fs: inode_readahead_blks must be " 2182e33e60eaSJan Kara "0 or a power of 2 smaller than 2^31"); 2183da812f61SLukas Czerner return -EINVAL; 218426092bf5STheodore Ts'o } 21856e47a3ccSLukas Czerner ctx->s_inode_readahead_blks = result.uint_32; 21866e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_inode_readahead_blks; 2187ba2e524dSLukas Czerner return 0; 2188ba2e524dSLukas Czerner case Opt_init_itable: 21896e47a3ccSLukas Czerner ctx_set_mount_opt(ctx, EXT4_MOUNT_INIT_INODE_TABLE); 21906e47a3ccSLukas Czerner ctx->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; 2191461c3af0SLukas Czerner if (param->type == fs_value_is_string) 21926e47a3ccSLukas Czerner ctx->s_li_wait_mult = result.uint_32; 21936e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_li_wait_mult; 2194ba2e524dSLukas Czerner return 0; 2195ba2e524dSLukas Czerner case Opt_max_dir_size_kb: 21966e47a3ccSLukas Czerner ctx->s_max_dir_size_kb = result.uint_32; 21976e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_max_dir_size_kb; 2198ba2e524dSLukas Czerner return 0; 21998016e29fSHarshad Shirwadkar #ifdef CONFIG_EXT4_DEBUG 2200ba2e524dSLukas Czerner case Opt_fc_debug_max_replay: 22016e47a3ccSLukas Czerner ctx->s_fc_debug_max_replay = result.uint_32; 22026e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_fc_debug_max_replay; 2203ba2e524dSLukas Czerner return 0; 22048016e29fSHarshad Shirwadkar #endif 2205ba2e524dSLukas Czerner case Opt_stripe: 22066e47a3ccSLukas Czerner ctx->s_stripe = result.uint_32; 22076e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_stripe; 2208ba2e524dSLukas Czerner return 0; 2209ba2e524dSLukas Czerner case Opt_resuid: 2210461c3af0SLukas Czerner uid = make_kuid(current_user_ns(), result.uint_32); 22110efb3b23SJan Kara if (!uid_valid(uid)) { 2212da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, "Invalid uid value %d", 2213461c3af0SLukas Czerner result.uint_32); 2214da812f61SLukas Czerner return -EINVAL; 22150efb3b23SJan Kara } 22166e47a3ccSLukas Czerner ctx->s_resuid = uid; 22176e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_resuid; 2218ba2e524dSLukas Czerner return 0; 2219ba2e524dSLukas Czerner case Opt_resgid: 2220461c3af0SLukas Czerner gid = make_kgid(current_user_ns(), result.uint_32); 22210efb3b23SJan Kara if (!gid_valid(gid)) { 2222da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, "Invalid gid value %d", 2223461c3af0SLukas Czerner result.uint_32); 2224da812f61SLukas Czerner return -EINVAL; 22250efb3b23SJan Kara } 22266e47a3ccSLukas Czerner ctx->s_resgid = gid; 22276e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_resgid; 2228ba2e524dSLukas Czerner return 0; 2229ba2e524dSLukas Czerner case Opt_journal_dev: 22300efb3b23SJan Kara if (is_remount) { 2231da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, 22320efb3b23SJan Kara "Cannot specify journal on remount"); 2233da812f61SLukas Czerner return -EINVAL; 22340efb3b23SJan Kara } 2235461c3af0SLukas Czerner ctx->journal_devnum = result.uint_32; 22366e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_JOURNAL_DEV; 2237ba2e524dSLukas Czerner return 0; 2238ba2e524dSLukas Czerner case Opt_journal_path: 2239ba2e524dSLukas Czerner { 2240ad4eec61SEric Sandeen struct inode *journal_inode; 2241ad4eec61SEric Sandeen struct path path; 2242ad4eec61SEric Sandeen int error; 2243ad4eec61SEric Sandeen 2244ad4eec61SEric Sandeen if (is_remount) { 2245da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, 2246ad4eec61SEric Sandeen "Cannot specify journal on remount"); 2247da812f61SLukas Czerner return -EINVAL; 2248ad4eec61SEric Sandeen } 2249ad4eec61SEric Sandeen 2250e3ea75eeSLukas Czerner error = fs_lookup_param(fc, param, 1, LOOKUP_FOLLOW, &path); 2251ad4eec61SEric Sandeen if (error) { 2252da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, "error: could not find " 2253461c3af0SLukas Czerner "journal device path"); 2254da812f61SLukas Czerner return -EINVAL; 2255ad4eec61SEric Sandeen } 2256ad4eec61SEric Sandeen 22572b0143b5SDavid Howells journal_inode = d_inode(path.dentry); 2258461c3af0SLukas Czerner ctx->journal_devnum = new_encode_dev(journal_inode->i_rdev); 22596e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_JOURNAL_DEV; 2260ad4eec61SEric Sandeen path_put(&path); 2261ba2e524dSLukas Czerner return 0; 2262ba2e524dSLukas Czerner } 2263ba2e524dSLukas Czerner case Opt_journal_ioprio: 2264461c3af0SLukas Czerner if (result.uint_32 > 7) { 2265da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, "Invalid journal IO priority" 22660efb3b23SJan Kara " (must be 0-7)"); 2267da812f61SLukas Czerner return -EINVAL; 22680efb3b23SJan Kara } 2269461c3af0SLukas Czerner ctx->journal_ioprio = 2270461c3af0SLukas Czerner IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, result.uint_32); 22716e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_JOURNAL_IOPRIO; 2272ba2e524dSLukas Czerner return 0; 2273ba2e524dSLukas Czerner case Opt_test_dummy_encryption: 227485456054SEric Biggers return ext4_parse_test_dummy_encryption(param, ctx); 2275ba2e524dSLukas Czerner case Opt_dax: 2276ba2e524dSLukas Czerner case Opt_dax_type: 2277ef83b6e8SDan Williams #ifdef CONFIG_FS_DAX 2278ba2e524dSLukas Czerner { 2279ba2e524dSLukas Czerner int type = (token == Opt_dax) ? 2280ba2e524dSLukas Czerner Opt_dax : result.uint_32; 2281ba2e524dSLukas Czerner 2282ba2e524dSLukas Czerner switch (type) { 22839cb20f94SIra Weiny case Opt_dax: 22849cb20f94SIra Weiny case Opt_dax_always: 2285ba2e524dSLukas Czerner ctx_set_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS); 22866e47a3ccSLukas Czerner ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER); 22879cb20f94SIra Weiny break; 22889cb20f94SIra Weiny case Opt_dax_never: 2289ba2e524dSLukas Czerner ctx_set_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER); 22906e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS); 22919cb20f94SIra Weiny break; 22929cb20f94SIra Weiny case Opt_dax_inode: 22936e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS); 22946e47a3ccSLukas Czerner ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER); 22959cb20f94SIra Weiny /* Strictly for printing options */ 2296ba2e524dSLukas Czerner ctx_set_mount_opt2(ctx, EXT4_MOUNT2_DAX_INODE); 22979cb20f94SIra Weiny break; 22989cb20f94SIra Weiny } 2299ba2e524dSLukas Czerner return 0; 2300ba2e524dSLukas Czerner } 2301ef83b6e8SDan Williams #else 2302da812f61SLukas Czerner ext4_msg(NULL, KERN_INFO, "dax option not supported"); 2303da812f61SLukas Czerner return -EINVAL; 2304923ae0ffSRoss Zwisler #endif 2305ba2e524dSLukas Czerner case Opt_data_err: 2306ba2e524dSLukas Czerner if (result.uint_32 == Opt_data_err_abort) 23076e47a3ccSLukas Czerner ctx_set_mount_opt(ctx, m->mount_opt); 2308ba2e524dSLukas Czerner else if (result.uint_32 == Opt_data_err_ignore) 23096e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, m->mount_opt); 2310ba2e524dSLukas Czerner return 0; 2311ba2e524dSLukas Czerner case Opt_mb_optimize_scan: 231227b38686SOjaswin Mujoo if (result.int_32 == 1) { 231327b38686SOjaswin Mujoo ctx_set_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN); 231427b38686SOjaswin Mujoo ctx->spec |= EXT4_SPEC_mb_optimize_scan; 231527b38686SOjaswin Mujoo } else if (result.int_32 == 0) { 231627b38686SOjaswin Mujoo ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN); 231727b38686SOjaswin Mujoo ctx->spec |= EXT4_SPEC_mb_optimize_scan; 231827b38686SOjaswin Mujoo } else { 2319da812f61SLukas Czerner ext4_msg(NULL, KERN_WARNING, 2320196e402aSHarshad Shirwadkar "mb_optimize_scan should be set to 0 or 1."); 2321da812f61SLukas Czerner return -EINVAL; 2322196e402aSHarshad Shirwadkar } 2323ba2e524dSLukas Czerner return 0; 2324ba2e524dSLukas Czerner } 2325ba2e524dSLukas Czerner 2326ba2e524dSLukas Czerner /* 2327ba2e524dSLukas Czerner * At this point we should only be getting options requiring MOPT_SET, 2328ba2e524dSLukas Czerner * or MOPT_CLEAR. Anything else is a bug 2329ba2e524dSLukas Czerner */ 2330ba2e524dSLukas Czerner if (m->token == Opt_err) { 2331ba2e524dSLukas Czerner ext4_msg(NULL, KERN_WARNING, "buggy handling of option %s", 2332ba2e524dSLukas Czerner param->key); 2333ba2e524dSLukas Czerner WARN_ON(1); 2334ba2e524dSLukas Czerner return -EINVAL; 2335ba2e524dSLukas Czerner } 2336ba2e524dSLukas Czerner 2337ba2e524dSLukas Czerner else { 2338461c3af0SLukas Czerner unsigned int set = 0; 2339461c3af0SLukas Czerner 2340461c3af0SLukas Czerner if ((param->type == fs_value_is_flag) || 2341461c3af0SLukas Czerner result.uint_32 > 0) 2342461c3af0SLukas Czerner set = 1; 2343461c3af0SLukas Czerner 234426092bf5STheodore Ts'o if (m->flags & MOPT_CLEAR) 2345461c3af0SLukas Czerner set = !set; 234626092bf5STheodore Ts'o else if (unlikely(!(m->flags & MOPT_SET))) { 2347da812f61SLukas Czerner ext4_msg(NULL, KERN_WARNING, 2348461c3af0SLukas Czerner "buggy handling of option %s", 2349461c3af0SLukas Czerner param->key); 235026092bf5STheodore Ts'o WARN_ON(1); 2351da812f61SLukas Czerner return -EINVAL; 235226092bf5STheodore Ts'o } 2353995a3ed6SHarshad Shirwadkar if (m->flags & MOPT_2) { 2354461c3af0SLukas Czerner if (set != 0) 23556e47a3ccSLukas Czerner ctx_set_mount_opt2(ctx, m->mount_opt); 2356995a3ed6SHarshad Shirwadkar else 23576e47a3ccSLukas Czerner ctx_clear_mount_opt2(ctx, m->mount_opt); 2358995a3ed6SHarshad Shirwadkar } else { 2359461c3af0SLukas Czerner if (set != 0) 23606e47a3ccSLukas Czerner ctx_set_mount_opt(ctx, m->mount_opt); 236126092bf5STheodore Ts'o else 23626e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, m->mount_opt); 236326092bf5STheodore Ts'o } 2364995a3ed6SHarshad Shirwadkar } 2365ba2e524dSLukas Czerner 236602f960f8SLukas Czerner return 0; 236726092bf5STheodore Ts'o } 236826092bf5STheodore Ts'o 23697edfd85bSLukas Czerner static int parse_options(struct fs_context *fc, char *options) 2370ac27a0ecSDave Kleikamp { 2371461c3af0SLukas Czerner struct fs_parameter param; 2372461c3af0SLukas Czerner int ret; 2373461c3af0SLukas Czerner char *key; 2374ac27a0ecSDave Kleikamp 2375ac27a0ecSDave Kleikamp if (!options) 23767edfd85bSLukas Czerner return 0; 2377461c3af0SLukas Czerner 2378461c3af0SLukas Czerner while ((key = strsep(&options, ",")) != NULL) { 2379461c3af0SLukas Czerner if (*key) { 2380461c3af0SLukas Czerner size_t v_len = 0; 2381461c3af0SLukas Czerner char *value = strchr(key, '='); 2382461c3af0SLukas Czerner 2383461c3af0SLukas Czerner param.type = fs_value_is_flag; 2384461c3af0SLukas Czerner param.string = NULL; 2385461c3af0SLukas Czerner 2386461c3af0SLukas Czerner if (value) { 2387461c3af0SLukas Czerner if (value == key) 2388ac27a0ecSDave Kleikamp continue; 2389461c3af0SLukas Czerner 2390461c3af0SLukas Czerner *value++ = 0; 2391461c3af0SLukas Czerner v_len = strlen(value); 2392461c3af0SLukas Czerner param.string = kmemdup_nul(value, v_len, 2393461c3af0SLukas Czerner GFP_KERNEL); 2394461c3af0SLukas Czerner if (!param.string) 23957edfd85bSLukas Czerner return -ENOMEM; 2396461c3af0SLukas Czerner param.type = fs_value_is_string; 2397461c3af0SLukas Czerner } 2398461c3af0SLukas Czerner 2399461c3af0SLukas Czerner param.key = key; 2400461c3af0SLukas Czerner param.size = v_len; 2401461c3af0SLukas Czerner 240202f960f8SLukas Czerner ret = ext4_parse_param(fc, ¶m); 2403461c3af0SLukas Czerner if (param.string) 2404461c3af0SLukas Czerner kfree(param.string); 2405461c3af0SLukas Czerner if (ret < 0) 24067edfd85bSLukas Czerner return ret; 2407ac27a0ecSDave Kleikamp } 2408461c3af0SLukas Czerner } 2409461c3af0SLukas Czerner 24107edfd85bSLukas Czerner ret = ext4_validate_options(fc); 2411da812f61SLukas Czerner if (ret < 0) 24127edfd85bSLukas Czerner return ret; 24137edfd85bSLukas Czerner 24147edfd85bSLukas Czerner return 0; 24157edfd85bSLukas Czerner } 24167edfd85bSLukas Czerner 24177edfd85bSLukas Czerner static int parse_apply_sb_mount_options(struct super_block *sb, 24187edfd85bSLukas Czerner struct ext4_fs_context *m_ctx) 24197edfd85bSLukas Czerner { 24207edfd85bSLukas Czerner struct ext4_sb_info *sbi = EXT4_SB(sb); 24217edfd85bSLukas Czerner char *s_mount_opts = NULL; 24227edfd85bSLukas Czerner struct ext4_fs_context *s_ctx = NULL; 24237edfd85bSLukas Czerner struct fs_context *fc = NULL; 24247edfd85bSLukas Czerner int ret = -ENOMEM; 24257edfd85bSLukas Czerner 24267edfd85bSLukas Czerner if (!sbi->s_es->s_mount_opts[0]) 2427da812f61SLukas Czerner return 0; 2428da812f61SLukas Czerner 24297edfd85bSLukas Czerner s_mount_opts = kstrndup(sbi->s_es->s_mount_opts, 24307edfd85bSLukas Czerner sizeof(sbi->s_es->s_mount_opts), 24317edfd85bSLukas Czerner GFP_KERNEL); 24327edfd85bSLukas Czerner if (!s_mount_opts) 24337edfd85bSLukas Czerner return ret; 2434e6e268cbSLukas Czerner 24357edfd85bSLukas Czerner fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL); 24367edfd85bSLukas Czerner if (!fc) 24377edfd85bSLukas Czerner goto out_free; 2438e6e268cbSLukas Czerner 24397edfd85bSLukas Czerner s_ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL); 24407edfd85bSLukas Czerner if (!s_ctx) 24417edfd85bSLukas Czerner goto out_free; 24427edfd85bSLukas Czerner 24437edfd85bSLukas Czerner fc->fs_private = s_ctx; 24447edfd85bSLukas Czerner fc->s_fs_info = sbi; 24457edfd85bSLukas Czerner 24467edfd85bSLukas Czerner ret = parse_options(fc, s_mount_opts); 24477edfd85bSLukas Czerner if (ret < 0) 24487edfd85bSLukas Czerner goto parse_failed; 24497edfd85bSLukas Czerner 24507edfd85bSLukas Czerner ret = ext4_check_opt_consistency(fc, sb); 24517edfd85bSLukas Czerner if (ret < 0) { 24527edfd85bSLukas Czerner parse_failed: 24537edfd85bSLukas Czerner ext4_msg(sb, KERN_WARNING, 24547edfd85bSLukas Czerner "failed to parse options in superblock: %s", 24557edfd85bSLukas Czerner s_mount_opts); 24567edfd85bSLukas Czerner ret = 0; 24577edfd85bSLukas Czerner goto out_free; 24587edfd85bSLukas Czerner } 24597edfd85bSLukas Czerner 24607edfd85bSLukas Czerner if (s_ctx->spec & EXT4_SPEC_JOURNAL_DEV) 24617edfd85bSLukas Czerner m_ctx->journal_devnum = s_ctx->journal_devnum; 24627edfd85bSLukas Czerner if (s_ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO) 24637edfd85bSLukas Czerner m_ctx->journal_ioprio = s_ctx->journal_ioprio; 24647edfd85bSLukas Czerner 246585456054SEric Biggers ext4_apply_options(fc, sb); 246685456054SEric Biggers ret = 0; 24677edfd85bSLukas Czerner 24687edfd85bSLukas Czerner out_free: 2469c069db76SEric Biggers if (fc) { 2470c069db76SEric Biggers ext4_fc_free(fc); 24717edfd85bSLukas Czerner kfree(fc); 2472c069db76SEric Biggers } 24737edfd85bSLukas Czerner kfree(s_mount_opts); 24747edfd85bSLukas Czerner return ret; 24754c94bff9SLukas Czerner } 24764c94bff9SLukas Czerner 2477e6e268cbSLukas Czerner static void ext4_apply_quota_options(struct fs_context *fc, 2478e6e268cbSLukas Czerner struct super_block *sb) 2479e6e268cbSLukas Czerner { 2480e6e268cbSLukas Czerner #ifdef CONFIG_QUOTA 24816e47a3ccSLukas Czerner bool quota_feature = ext4_has_feature_quota(sb); 2482e6e268cbSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 2483e6e268cbSLukas Czerner struct ext4_sb_info *sbi = EXT4_SB(sb); 2484e6e268cbSLukas Czerner char *qname; 2485e6e268cbSLukas Czerner int i; 2486e6e268cbSLukas Czerner 24876e47a3ccSLukas Czerner if (quota_feature) 24886e47a3ccSLukas Czerner return; 24896e47a3ccSLukas Czerner 24906e47a3ccSLukas Czerner if (ctx->spec & EXT4_SPEC_JQUOTA) { 2491e6e268cbSLukas Czerner for (i = 0; i < EXT4_MAXQUOTAS; i++) { 2492e6e268cbSLukas Czerner if (!(ctx->qname_spec & (1 << i))) 2493e6e268cbSLukas Czerner continue; 24946e47a3ccSLukas Czerner 2495e6e268cbSLukas Czerner qname = ctx->s_qf_names[i]; /* May be NULL */ 24964c1bd5a9SLukas Czerner if (qname) 24974c1bd5a9SLukas Czerner set_opt(sb, QUOTA); 2498e6e268cbSLukas Czerner ctx->s_qf_names[i] = NULL; 249913b215a9SLukas Czerner qname = rcu_replace_pointer(sbi->s_qf_names[i], qname, 250013b215a9SLukas Czerner lockdep_is_held(&sb->s_umount)); 250113b215a9SLukas Czerner if (qname) 250213b215a9SLukas Czerner kfree_rcu(qname); 2503e6e268cbSLukas Czerner } 25046e47a3ccSLukas Czerner } 25056e47a3ccSLukas Czerner 25066e47a3ccSLukas Czerner if (ctx->spec & EXT4_SPEC_JQFMT) 25076e47a3ccSLukas Czerner sbi->s_jquota_fmt = ctx->s_jquota_fmt; 2508e6e268cbSLukas Czerner #endif 2509e6e268cbSLukas Czerner } 2510e6e268cbSLukas Czerner 2511e6e268cbSLukas Czerner /* 2512e6e268cbSLukas Czerner * Check quota settings consistency. 2513e6e268cbSLukas Czerner */ 2514e6e268cbSLukas Czerner static int ext4_check_quota_consistency(struct fs_context *fc, 2515e6e268cbSLukas Czerner struct super_block *sb) 2516e6e268cbSLukas Czerner { 2517e6e268cbSLukas Czerner #ifdef CONFIG_QUOTA 2518e6e268cbSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 2519e6e268cbSLukas Czerner struct ext4_sb_info *sbi = EXT4_SB(sb); 2520e6e268cbSLukas Czerner bool quota_feature = ext4_has_feature_quota(sb); 2521e6e268cbSLukas Czerner bool quota_loaded = sb_any_quota_loaded(sb); 25226e47a3ccSLukas Czerner bool usr_qf_name, grp_qf_name, usrquota, grpquota; 25236e47a3ccSLukas Czerner int quota_flags, i; 2524e6e268cbSLukas Czerner 25256e47a3ccSLukas Czerner /* 25266e47a3ccSLukas Czerner * We do the test below only for project quotas. 'usrquota' and 25276e47a3ccSLukas Czerner * 'grpquota' mount options are allowed even without quota feature 25286e47a3ccSLukas Czerner * to support legacy quotas in quota files. 25296e47a3ccSLukas Czerner */ 25306e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_PRJQUOTA) && 25316e47a3ccSLukas Czerner !ext4_has_feature_project(sb)) { 25326e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, "Project quota feature not enabled. " 25336e47a3ccSLukas Czerner "Cannot enable project quota enforcement."); 25346e47a3ccSLukas Czerner return -EINVAL; 25356e47a3ccSLukas Czerner } 25366e47a3ccSLukas Czerner 25376e47a3ccSLukas Czerner quota_flags = EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA | 25386e47a3ccSLukas Czerner EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA; 25396e47a3ccSLukas Czerner if (quota_loaded && 25406e47a3ccSLukas Czerner ctx->mask_s_mount_opt & quota_flags && 25416e47a3ccSLukas Czerner !ctx_test_mount_opt(ctx, quota_flags)) 25426e47a3ccSLukas Czerner goto err_quota_change; 25436e47a3ccSLukas Czerner 25446e47a3ccSLukas Czerner if (ctx->spec & EXT4_SPEC_JQUOTA) { 2545e6e268cbSLukas Czerner 2546e6e268cbSLukas Czerner for (i = 0; i < EXT4_MAXQUOTAS; i++) { 2547e6e268cbSLukas Czerner if (!(ctx->qname_spec & (1 << i))) 2548e6e268cbSLukas Czerner continue; 2549e6e268cbSLukas Czerner 25506e47a3ccSLukas Czerner if (quota_loaded && 25516e47a3ccSLukas Czerner !!sbi->s_qf_names[i] != !!ctx->s_qf_names[i]) 2552e6e268cbSLukas Czerner goto err_jquota_change; 2553e6e268cbSLukas Czerner 2554e6e268cbSLukas Czerner if (sbi->s_qf_names[i] && ctx->s_qf_names[i] && 255513b215a9SLukas Czerner strcmp(get_qf_name(sb, sbi, i), 2556e6e268cbSLukas Czerner ctx->s_qf_names[i]) != 0) 2557e6e268cbSLukas Czerner goto err_jquota_specified; 2558e6e268cbSLukas Czerner } 25596e47a3ccSLukas Czerner 25606e47a3ccSLukas Czerner if (quota_feature) { 25616e47a3ccSLukas Czerner ext4_msg(NULL, KERN_INFO, 25626e47a3ccSLukas Czerner "Journaled quota options ignored when " 25636e47a3ccSLukas Czerner "QUOTA feature is enabled"); 25646e47a3ccSLukas Czerner return 0; 25656e47a3ccSLukas Czerner } 2566e6e268cbSLukas Czerner } 2567e6e268cbSLukas Czerner 25686e47a3ccSLukas Czerner if (ctx->spec & EXT4_SPEC_JQFMT) { 2569e6e268cbSLukas Czerner if (sbi->s_jquota_fmt != ctx->s_jquota_fmt && quota_loaded) 25706e47a3ccSLukas Czerner goto err_jquota_change; 2571e6e268cbSLukas Czerner if (quota_feature) { 2572e6e268cbSLukas Czerner ext4_msg(NULL, KERN_INFO, "Quota format mount options " 2573e6e268cbSLukas Czerner "ignored when QUOTA feature is enabled"); 2574e6e268cbSLukas Czerner return 0; 2575e6e268cbSLukas Czerner } 2576e6e268cbSLukas Czerner } 25776e47a3ccSLukas Czerner 25786e47a3ccSLukas Czerner /* Make sure we don't mix old and new quota format */ 25796e47a3ccSLukas Czerner usr_qf_name = (get_qf_name(sb, sbi, USRQUOTA) || 25806e47a3ccSLukas Czerner ctx->s_qf_names[USRQUOTA]); 25816e47a3ccSLukas Czerner grp_qf_name = (get_qf_name(sb, sbi, GRPQUOTA) || 25826e47a3ccSLukas Czerner ctx->s_qf_names[GRPQUOTA]); 25836e47a3ccSLukas Czerner 25846e47a3ccSLukas Czerner usrquota = (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) || 25856e47a3ccSLukas Czerner test_opt(sb, USRQUOTA)); 25866e47a3ccSLukas Czerner 25876e47a3ccSLukas Czerner grpquota = (ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA) || 25886e47a3ccSLukas Czerner test_opt(sb, GRPQUOTA)); 25896e47a3ccSLukas Czerner 25906e47a3ccSLukas Czerner if (usr_qf_name) { 25916e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_USRQUOTA); 25926e47a3ccSLukas Czerner usrquota = false; 25936e47a3ccSLukas Czerner } 25946e47a3ccSLukas Czerner if (grp_qf_name) { 25956e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA); 25966e47a3ccSLukas Czerner grpquota = false; 25976e47a3ccSLukas Czerner } 25986e47a3ccSLukas Czerner 25996e47a3ccSLukas Czerner if (usr_qf_name || grp_qf_name) { 26006e47a3ccSLukas Czerner if (usrquota || grpquota) { 26016e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, "old and new quota " 26026e47a3ccSLukas Czerner "format mixing"); 26036e47a3ccSLukas Czerner return -EINVAL; 26046e47a3ccSLukas Czerner } 26056e47a3ccSLukas Czerner 26066e47a3ccSLukas Czerner if (!(ctx->spec & EXT4_SPEC_JQFMT || sbi->s_jquota_fmt)) { 26076e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, "journaled quota format " 26086e47a3ccSLukas Czerner "not specified"); 26096e47a3ccSLukas Czerner return -EINVAL; 26106e47a3ccSLukas Czerner } 26116e47a3ccSLukas Czerner } 26126e47a3ccSLukas Czerner 2613e6e268cbSLukas Czerner return 0; 2614e6e268cbSLukas Czerner 2615e6e268cbSLukas Czerner err_quota_change: 2616e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, 2617e6e268cbSLukas Czerner "Cannot change quota options when quota turned on"); 2618e6e268cbSLukas Czerner return -EINVAL; 2619e6e268cbSLukas Czerner err_jquota_change: 2620e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, "Cannot change journaled quota " 2621e6e268cbSLukas Czerner "options when quota turned on"); 2622e6e268cbSLukas Czerner return -EINVAL; 2623e6e268cbSLukas Czerner err_jquota_specified: 2624e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, "%s quota file already specified", 2625e6e268cbSLukas Czerner QTYPE2NAME(i)); 2626e6e268cbSLukas Czerner return -EINVAL; 2627e6e268cbSLukas Czerner #else 2628e6e268cbSLukas Czerner return 0; 2629e6e268cbSLukas Czerner #endif 2630e6e268cbSLukas Czerner } 2631e6e268cbSLukas Czerner 26325f41fdaeSEric Biggers static int ext4_check_test_dummy_encryption(const struct fs_context *fc, 26335f41fdaeSEric Biggers struct super_block *sb) 26345f41fdaeSEric Biggers { 26355f41fdaeSEric Biggers const struct ext4_fs_context *ctx = fc->fs_private; 26365f41fdaeSEric Biggers const struct ext4_sb_info *sbi = EXT4_SB(sb); 263785456054SEric Biggers int err; 26385f41fdaeSEric Biggers 263985456054SEric Biggers if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy)) 26405f41fdaeSEric Biggers return 0; 26415f41fdaeSEric Biggers 26425f41fdaeSEric Biggers if (!ext4_has_feature_encrypt(sb)) { 26435f41fdaeSEric Biggers ext4_msg(NULL, KERN_WARNING, 26445f41fdaeSEric Biggers "test_dummy_encryption requires encrypt feature"); 26455f41fdaeSEric Biggers return -EINVAL; 26465f41fdaeSEric Biggers } 26475f41fdaeSEric Biggers /* 26485f41fdaeSEric Biggers * This mount option is just for testing, and it's not worthwhile to 26495f41fdaeSEric Biggers * implement the extra complexity (e.g. RCU protection) that would be 26505f41fdaeSEric Biggers * needed to allow it to be set or changed during remount. We do allow 26515f41fdaeSEric Biggers * it to be specified during remount, but only if there is no change. 26525f41fdaeSEric Biggers */ 265385456054SEric Biggers if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { 265485456054SEric Biggers if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy, 265585456054SEric Biggers &ctx->dummy_enc_policy)) 265685456054SEric Biggers return 0; 26575f41fdaeSEric Biggers ext4_msg(NULL, KERN_WARNING, 265885456054SEric Biggers "Can't set or change test_dummy_encryption on remount"); 26595f41fdaeSEric Biggers return -EINVAL; 26605f41fdaeSEric Biggers } 266185456054SEric Biggers /* Also make sure s_mount_opts didn't contain a conflicting value. */ 266285456054SEric Biggers if (fscrypt_is_dummy_policy_set(&sbi->s_dummy_enc_policy)) { 266385456054SEric Biggers if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy, 266485456054SEric Biggers &ctx->dummy_enc_policy)) 26655f41fdaeSEric Biggers return 0; 266685456054SEric Biggers ext4_msg(NULL, KERN_WARNING, 266785456054SEric Biggers "Conflicting test_dummy_encryption options"); 266885456054SEric Biggers return -EINVAL; 266985456054SEric Biggers } 267085456054SEric Biggers /* 267185456054SEric Biggers * fscrypt_add_test_dummy_key() technically changes the super_block, so 267285456054SEric Biggers * technically it should be delayed until ext4_apply_options() like the 267385456054SEric Biggers * other changes. But since we never get here for remounts (see above), 267485456054SEric Biggers * and this is the last chance to report errors, we do it here. 267585456054SEric Biggers */ 267685456054SEric Biggers err = fscrypt_add_test_dummy_key(sb, &ctx->dummy_enc_policy); 267785456054SEric Biggers if (err) 267885456054SEric Biggers ext4_msg(NULL, KERN_WARNING, 267985456054SEric Biggers "Error adding test dummy encryption key [%d]", err); 268085456054SEric Biggers return err; 268185456054SEric Biggers } 268285456054SEric Biggers 268385456054SEric Biggers static void ext4_apply_test_dummy_encryption(struct ext4_fs_context *ctx, 268485456054SEric Biggers struct super_block *sb) 268585456054SEric Biggers { 268685456054SEric Biggers if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy) || 268785456054SEric Biggers /* if already set, it was already verified to be the same */ 268885456054SEric Biggers fscrypt_is_dummy_policy_set(&EXT4_SB(sb)->s_dummy_enc_policy)) 268985456054SEric Biggers return; 269085456054SEric Biggers EXT4_SB(sb)->s_dummy_enc_policy = ctx->dummy_enc_policy; 269185456054SEric Biggers memset(&ctx->dummy_enc_policy, 0, sizeof(ctx->dummy_enc_policy)); 269285456054SEric Biggers ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled"); 26935f41fdaeSEric Biggers } 26945f41fdaeSEric Biggers 2695b6bd2435SLukas Czerner static int ext4_check_opt_consistency(struct fs_context *fc, 2696b6bd2435SLukas Czerner struct super_block *sb) 2697b6bd2435SLukas Czerner { 2698b6bd2435SLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 26996e47a3ccSLukas Czerner struct ext4_sb_info *sbi = fc->s_fs_info; 27006e47a3ccSLukas Czerner int is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE; 27015f41fdaeSEric Biggers int err; 2702b6bd2435SLukas Czerner 2703b6bd2435SLukas Czerner if ((ctx->opt_flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) { 2704b6bd2435SLukas Czerner ext4_msg(NULL, KERN_ERR, 2705b6bd2435SLukas Czerner "Mount option(s) incompatible with ext2"); 2706b6bd2435SLukas Czerner return -EINVAL; 2707b6bd2435SLukas Czerner } 2708b6bd2435SLukas Czerner if ((ctx->opt_flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) { 2709b6bd2435SLukas Czerner ext4_msg(NULL, KERN_ERR, 2710b6bd2435SLukas Czerner "Mount option(s) incompatible with ext3"); 2711b6bd2435SLukas Czerner return -EINVAL; 2712b6bd2435SLukas Czerner } 2713b6bd2435SLukas Czerner 27146e47a3ccSLukas Czerner if (ctx->s_want_extra_isize > 27156e47a3ccSLukas Czerner (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE)) { 27166e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, 27176e47a3ccSLukas Czerner "Invalid want_extra_isize %d", 27186e47a3ccSLukas Czerner ctx->s_want_extra_isize); 27196e47a3ccSLukas Czerner return -EINVAL; 27206e47a3ccSLukas Czerner } 27216e47a3ccSLukas Czerner 27226e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DIOREAD_NOLOCK)) { 27236e47a3ccSLukas Czerner int blocksize = 27246e47a3ccSLukas Czerner BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); 27256e47a3ccSLukas Czerner if (blocksize < PAGE_SIZE) 27266e47a3ccSLukas Czerner ext4_msg(NULL, KERN_WARNING, "Warning: mounting with an " 27276e47a3ccSLukas Czerner "experimental mount option 'dioread_nolock' " 27286e47a3ccSLukas Czerner "for blocksize < PAGE_SIZE"); 27296e47a3ccSLukas Czerner } 27306e47a3ccSLukas Czerner 27315f41fdaeSEric Biggers err = ext4_check_test_dummy_encryption(fc, sb); 27325f41fdaeSEric Biggers if (err) 27335f41fdaeSEric Biggers return err; 27346e47a3ccSLukas Czerner 27356e47a3ccSLukas Czerner if ((ctx->spec & EXT4_SPEC_DATAJ) && is_remount) { 27366e47a3ccSLukas Czerner if (!sbi->s_journal) { 27376e47a3ccSLukas Czerner ext4_msg(NULL, KERN_WARNING, 27386e47a3ccSLukas Czerner "Remounting file system with no journal " 27396e47a3ccSLukas Czerner "so ignoring journalled data option"); 27406e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS); 27414c246728SLukas Czerner } else if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS) != 27424c246728SLukas Czerner test_opt(sb, DATA_FLAGS)) { 27436e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, "Cannot change data mode " 27446e47a3ccSLukas Czerner "on remount"); 27456e47a3ccSLukas Czerner return -EINVAL; 27466e47a3ccSLukas Czerner } 27476e47a3ccSLukas Czerner } 27486e47a3ccSLukas Czerner 27496e47a3ccSLukas Czerner if (is_remount) { 27506e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) && 27516e47a3ccSLukas Czerner (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) { 27526e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, "can't mount with " 27536e47a3ccSLukas Czerner "both data=journal and dax"); 27546e47a3ccSLukas Czerner return -EINVAL; 27556e47a3ccSLukas Czerner } 27566e47a3ccSLukas Czerner 27576e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) && 27586e47a3ccSLukas Czerner (!(sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) || 27596e47a3ccSLukas Czerner (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER))) { 27606e47a3ccSLukas Czerner fail_dax_change_remount: 27616e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, "can't change " 27626e47a3ccSLukas Czerner "dax mount option while remounting"); 27636e47a3ccSLukas Czerner return -EINVAL; 27646e47a3ccSLukas Czerner } else if (ctx_test_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER) && 27656e47a3ccSLukas Czerner (!(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) || 27666e47a3ccSLukas Czerner (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS))) { 27676e47a3ccSLukas Czerner goto fail_dax_change_remount; 27686e47a3ccSLukas Czerner } else if (ctx_test_mount_opt2(ctx, EXT4_MOUNT2_DAX_INODE) && 27696e47a3ccSLukas Czerner ((sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) || 27706e47a3ccSLukas Czerner (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) || 27716e47a3ccSLukas Czerner !(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_INODE))) { 27726e47a3ccSLukas Czerner goto fail_dax_change_remount; 27736e47a3ccSLukas Czerner } 27746e47a3ccSLukas Czerner } 27756e47a3ccSLukas Czerner 2776b6bd2435SLukas Czerner return ext4_check_quota_consistency(fc, sb); 2777b6bd2435SLukas Czerner } 2778b6bd2435SLukas Czerner 277985456054SEric Biggers static void ext4_apply_options(struct fs_context *fc, struct super_block *sb) 27806e47a3ccSLukas Czerner { 27816e47a3ccSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 27826e47a3ccSLukas Czerner struct ext4_sb_info *sbi = fc->s_fs_info; 27836e47a3ccSLukas Czerner 27846e47a3ccSLukas Czerner sbi->s_mount_opt &= ~ctx->mask_s_mount_opt; 27856e47a3ccSLukas Czerner sbi->s_mount_opt |= ctx->vals_s_mount_opt; 27866e47a3ccSLukas Czerner sbi->s_mount_opt2 &= ~ctx->mask_s_mount_opt2; 27876e47a3ccSLukas Czerner sbi->s_mount_opt2 |= ctx->vals_s_mount_opt2; 27886e47a3ccSLukas Czerner sbi->s_mount_flags &= ~ctx->mask_s_mount_flags; 27896e47a3ccSLukas Czerner sbi->s_mount_flags |= ctx->vals_s_mount_flags; 27906e47a3ccSLukas Czerner sb->s_flags &= ~ctx->mask_s_flags; 27916e47a3ccSLukas Czerner sb->s_flags |= ctx->vals_s_flags; 27926e47a3ccSLukas Czerner 27936e47a3ccSLukas Czerner #define APPLY(X) ({ if (ctx->spec & EXT4_SPEC_##X) sbi->X = ctx->X; }) 27946e47a3ccSLukas Czerner APPLY(s_commit_interval); 27956e47a3ccSLukas Czerner APPLY(s_stripe); 27966e47a3ccSLukas Czerner APPLY(s_max_batch_time); 27976e47a3ccSLukas Czerner APPLY(s_min_batch_time); 27986e47a3ccSLukas Czerner APPLY(s_want_extra_isize); 27996e47a3ccSLukas Czerner APPLY(s_inode_readahead_blks); 28006e47a3ccSLukas Czerner APPLY(s_max_dir_size_kb); 28016e47a3ccSLukas Czerner APPLY(s_li_wait_mult); 28026e47a3ccSLukas Czerner APPLY(s_resgid); 28036e47a3ccSLukas Czerner APPLY(s_resuid); 28046e47a3ccSLukas Czerner 28056e47a3ccSLukas Czerner #ifdef CONFIG_EXT4_DEBUG 28066e47a3ccSLukas Czerner APPLY(s_fc_debug_max_replay); 28076e47a3ccSLukas Czerner #endif 28086e47a3ccSLukas Czerner 28096e47a3ccSLukas Czerner ext4_apply_quota_options(fc, sb); 281085456054SEric Biggers ext4_apply_test_dummy_encryption(ctx, sb); 28116e47a3ccSLukas Czerner } 28126e47a3ccSLukas Czerner 28136e47a3ccSLukas Czerner 2814da812f61SLukas Czerner static int ext4_validate_options(struct fs_context *fc) 28154c94bff9SLukas Czerner { 2816ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 28176e47a3ccSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 28184c94bff9SLukas Czerner char *usr_qf_name, *grp_qf_name; 28196e47a3ccSLukas Czerner 28206e47a3ccSLukas Czerner usr_qf_name = ctx->s_qf_names[USRQUOTA]; 28216e47a3ccSLukas Czerner grp_qf_name = ctx->s_qf_names[GRPQUOTA]; 28226e47a3ccSLukas Czerner 282333458eabSTheodore Ts'o if (usr_qf_name || grp_qf_name) { 28246e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) && usr_qf_name) 28256e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_USRQUOTA); 2826ac27a0ecSDave Kleikamp 28276e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA) && grp_qf_name) 28286e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA); 2829ac27a0ecSDave Kleikamp 28306e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) || 28316e47a3ccSLukas Czerner ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA)) { 2832da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, "old and new quota " 2833b31e1552SEric Sandeen "format mixing"); 2834da812f61SLukas Czerner return -EINVAL; 2835ac27a0ecSDave Kleikamp } 2836ac27a0ecSDave Kleikamp } 2837ac27a0ecSDave Kleikamp #endif 28386e47a3ccSLukas Czerner return 1; 2839ac27a0ecSDave Kleikamp } 2840ac27a0ecSDave Kleikamp 28412adf6da8STheodore Ts'o static inline void ext4_show_quota_options(struct seq_file *seq, 28422adf6da8STheodore Ts'o struct super_block *sb) 28432adf6da8STheodore Ts'o { 28442adf6da8STheodore Ts'o #if defined(CONFIG_QUOTA) 28452adf6da8STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 284633458eabSTheodore Ts'o char *usr_qf_name, *grp_qf_name; 28472adf6da8STheodore Ts'o 28482adf6da8STheodore Ts'o if (sbi->s_jquota_fmt) { 28492adf6da8STheodore Ts'o char *fmtname = ""; 28502adf6da8STheodore Ts'o 28512adf6da8STheodore Ts'o switch (sbi->s_jquota_fmt) { 28522adf6da8STheodore Ts'o case QFMT_VFS_OLD: 28532adf6da8STheodore Ts'o fmtname = "vfsold"; 28542adf6da8STheodore Ts'o break; 28552adf6da8STheodore Ts'o case QFMT_VFS_V0: 28562adf6da8STheodore Ts'o fmtname = "vfsv0"; 28572adf6da8STheodore Ts'o break; 28582adf6da8STheodore Ts'o case QFMT_VFS_V1: 28592adf6da8STheodore Ts'o fmtname = "vfsv1"; 28602adf6da8STheodore Ts'o break; 28612adf6da8STheodore Ts'o } 28622adf6da8STheodore Ts'o seq_printf(seq, ",jqfmt=%s", fmtname); 28632adf6da8STheodore Ts'o } 28642adf6da8STheodore Ts'o 286533458eabSTheodore Ts'o rcu_read_lock(); 286633458eabSTheodore Ts'o usr_qf_name = rcu_dereference(sbi->s_qf_names[USRQUOTA]); 286733458eabSTheodore Ts'o grp_qf_name = rcu_dereference(sbi->s_qf_names[GRPQUOTA]); 286833458eabSTheodore Ts'o if (usr_qf_name) 286933458eabSTheodore Ts'o seq_show_option(seq, "usrjquota", usr_qf_name); 287033458eabSTheodore Ts'o if (grp_qf_name) 287133458eabSTheodore Ts'o seq_show_option(seq, "grpjquota", grp_qf_name); 287233458eabSTheodore Ts'o rcu_read_unlock(); 28732adf6da8STheodore Ts'o #endif 28742adf6da8STheodore Ts'o } 28752adf6da8STheodore Ts'o 28765a916be1STheodore Ts'o static const char *token2str(int token) 28775a916be1STheodore Ts'o { 287897d8a670SLukas Czerner const struct fs_parameter_spec *spec; 28795a916be1STheodore Ts'o 288097d8a670SLukas Czerner for (spec = ext4_param_specs; spec->name != NULL; spec++) 288197d8a670SLukas Czerner if (spec->opt == token && !spec->type) 28825a916be1STheodore Ts'o break; 288397d8a670SLukas Czerner return spec->name; 28845a916be1STheodore Ts'o } 28855a916be1STheodore Ts'o 28862adf6da8STheodore Ts'o /* 28872adf6da8STheodore Ts'o * Show an option if 28882adf6da8STheodore Ts'o * - it's set to a non-default value OR 28892adf6da8STheodore Ts'o * - if the per-sb default is different from the global default 28902adf6da8STheodore Ts'o */ 289166acdcf4STheodore Ts'o static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, 289266acdcf4STheodore Ts'o int nodefs) 28932adf6da8STheodore Ts'o { 28942adf6da8STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 28952adf6da8STheodore Ts'o struct ext4_super_block *es = sbi->s_es; 289668afa7e0STyson Nottingham int def_errors, def_mount_opt = sbi->s_def_mount_opt; 28975a916be1STheodore Ts'o const struct mount_opts *m; 289866acdcf4STheodore Ts'o char sep = nodefs ? '\n' : ','; 28992adf6da8STheodore Ts'o 290066acdcf4STheodore Ts'o #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep) 290166acdcf4STheodore Ts'o #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg) 29022adf6da8STheodore Ts'o 29032adf6da8STheodore Ts'o if (sbi->s_sb_block != 1) 29045a916be1STheodore Ts'o SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block); 29055a916be1STheodore Ts'o 29065a916be1STheodore Ts'o for (m = ext4_mount_opts; m->token != Opt_err; m++) { 29075a916be1STheodore Ts'o int want_set = m->flags & MOPT_SET; 29085a916be1STheodore Ts'o if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) || 2909ba2e524dSLukas Czerner m->flags & MOPT_SKIP) 29105a916be1STheodore Ts'o continue; 291168afa7e0STyson Nottingham if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt))) 29125a916be1STheodore Ts'o continue; /* skip if same as the default */ 29135a916be1STheodore Ts'o if ((want_set && 29145a916be1STheodore Ts'o (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) || 29155a916be1STheodore Ts'o (!want_set && (sbi->s_mount_opt & m->mount_opt))) 29165a916be1STheodore Ts'o continue; /* select Opt_noFoo vs Opt_Foo */ 29175a916be1STheodore Ts'o SEQ_OPTS_PRINT("%s", token2str(m->token)); 29185a916be1STheodore Ts'o } 29195a916be1STheodore Ts'o 292008cefc7aSEric W. Biederman if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) || 29215a916be1STheodore Ts'o le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID) 292208cefc7aSEric W. Biederman SEQ_OPTS_PRINT("resuid=%u", 292308cefc7aSEric W. Biederman from_kuid_munged(&init_user_ns, sbi->s_resuid)); 292408cefc7aSEric W. Biederman if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) || 29255a916be1STheodore Ts'o le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID) 292608cefc7aSEric W. Biederman SEQ_OPTS_PRINT("resgid=%u", 292708cefc7aSEric W. Biederman from_kgid_munged(&init_user_ns, sbi->s_resgid)); 292866acdcf4STheodore Ts'o def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors); 29295a916be1STheodore Ts'o if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO) 29305a916be1STheodore Ts'o SEQ_OPTS_PUTS("errors=remount-ro"); 29312adf6da8STheodore Ts'o if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE) 29325a916be1STheodore Ts'o SEQ_OPTS_PUTS("errors=continue"); 29332adf6da8STheodore Ts'o if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC) 29345a916be1STheodore Ts'o SEQ_OPTS_PUTS("errors=panic"); 293566acdcf4STheodore Ts'o if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) 29365a916be1STheodore Ts'o SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ); 293766acdcf4STheodore Ts'o if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME) 29385a916be1STheodore Ts'o SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time); 293966acdcf4STheodore Ts'o if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) 29405a916be1STheodore Ts'o SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time); 294166acdcf4STheodore Ts'o if (nodefs || sbi->s_stripe) 29425a916be1STheodore Ts'o SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe); 294368afa7e0STyson Nottingham if (nodefs || EXT4_MOUNT_DATA_FLAGS & 294468afa7e0STyson Nottingham (sbi->s_mount_opt ^ def_mount_opt)) { 29452adf6da8STheodore Ts'o if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) 29465a916be1STheodore Ts'o SEQ_OPTS_PUTS("data=journal"); 29472adf6da8STheodore Ts'o else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) 29485a916be1STheodore Ts'o SEQ_OPTS_PUTS("data=ordered"); 29492adf6da8STheodore Ts'o else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) 29505a916be1STheodore Ts'o SEQ_OPTS_PUTS("data=writeback"); 29515a916be1STheodore Ts'o } 295266acdcf4STheodore Ts'o if (nodefs || 295366acdcf4STheodore Ts'o sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS) 29545a916be1STheodore Ts'o SEQ_OPTS_PRINT("inode_readahead_blks=%u", 29552adf6da8STheodore Ts'o sbi->s_inode_readahead_blks); 29562adf6da8STheodore Ts'o 2957ceec0376STyson Nottingham if (test_opt(sb, INIT_INODE_TABLE) && (nodefs || 295866acdcf4STheodore Ts'o (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT))) 29595a916be1STheodore Ts'o SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult); 2960df981d03STheodore Ts'o if (nodefs || sbi->s_max_dir_size_kb) 2961df981d03STheodore Ts'o SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb); 29627915a861SAles Novak if (test_opt(sb, DATA_ERR_ABORT)) 29637915a861SAles Novak SEQ_OPTS_PUTS("data_err=abort"); 2964ed318a6cSEric Biggers 2965ed318a6cSEric Biggers fscrypt_show_test_dummy_encryption(seq, sep, sb); 29662adf6da8STheodore Ts'o 29674f74d15fSEric Biggers if (sb->s_flags & SB_INLINECRYPT) 29684f74d15fSEric Biggers SEQ_OPTS_PUTS("inlinecrypt"); 29694f74d15fSEric Biggers 29709cb20f94SIra Weiny if (test_opt(sb, DAX_ALWAYS)) { 29719cb20f94SIra Weiny if (IS_EXT2_SB(sb)) 29729cb20f94SIra Weiny SEQ_OPTS_PUTS("dax"); 29739cb20f94SIra Weiny else 29749cb20f94SIra Weiny SEQ_OPTS_PUTS("dax=always"); 29759cb20f94SIra Weiny } else if (test_opt2(sb, DAX_NEVER)) { 29769cb20f94SIra Weiny SEQ_OPTS_PUTS("dax=never"); 29779cb20f94SIra Weiny } else if (test_opt2(sb, DAX_INODE)) { 29789cb20f94SIra Weiny SEQ_OPTS_PUTS("dax=inode"); 29799cb20f94SIra Weiny } 29803fa5d23eSOjaswin Mujoo 29813fa5d23eSOjaswin Mujoo if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD && 29823fa5d23eSOjaswin Mujoo !test_opt2(sb, MB_OPTIMIZE_SCAN)) { 29833fa5d23eSOjaswin Mujoo SEQ_OPTS_PUTS("mb_optimize_scan=0"); 29843fa5d23eSOjaswin Mujoo } else if (sbi->s_groups_count < MB_DEFAULT_LINEAR_SCAN_THRESHOLD && 29853fa5d23eSOjaswin Mujoo test_opt2(sb, MB_OPTIMIZE_SCAN)) { 29863fa5d23eSOjaswin Mujoo SEQ_OPTS_PUTS("mb_optimize_scan=1"); 29873fa5d23eSOjaswin Mujoo } 29883fa5d23eSOjaswin Mujoo 29892adf6da8STheodore Ts'o ext4_show_quota_options(seq, sb); 29902adf6da8STheodore Ts'o return 0; 29912adf6da8STheodore Ts'o } 29922adf6da8STheodore Ts'o 299366acdcf4STheodore Ts'o static int ext4_show_options(struct seq_file *seq, struct dentry *root) 299466acdcf4STheodore Ts'o { 299566acdcf4STheodore Ts'o return _ext4_show_options(seq, root->d_sb, 0); 299666acdcf4STheodore Ts'o } 299766acdcf4STheodore Ts'o 2998ebd173beSTheodore Ts'o int ext4_seq_options_show(struct seq_file *seq, void *offset) 299966acdcf4STheodore Ts'o { 300066acdcf4STheodore Ts'o struct super_block *sb = seq->private; 300166acdcf4STheodore Ts'o int rc; 300266acdcf4STheodore Ts'o 3003bc98a42cSDavid Howells seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw"); 300466acdcf4STheodore Ts'o rc = _ext4_show_options(seq, sb, 1); 300566acdcf4STheodore Ts'o seq_puts(seq, "\n"); 300666acdcf4STheodore Ts'o return rc; 300766acdcf4STheodore Ts'o } 300866acdcf4STheodore Ts'o 3009617ba13bSMingming Cao static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, 3010ac27a0ecSDave Kleikamp int read_only) 3011ac27a0ecSDave Kleikamp { 3012617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 3013c89128a0SJaegeuk Kim int err = 0; 3014ac27a0ecSDave Kleikamp 3015617ba13bSMingming Cao if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) { 3016b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "revision level too high, " 3017b31e1552SEric Sandeen "forcing read-only mode"); 3018c89128a0SJaegeuk Kim err = -EROFS; 30195adaccacSyangerkun goto done; 3020ac27a0ecSDave Kleikamp } 3021ac27a0ecSDave Kleikamp if (read_only) 3022281b5995STheodore Ts'o goto done; 3023617ba13bSMingming Cao if (!(sbi->s_mount_state & EXT4_VALID_FS)) 3024b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, " 3025b31e1552SEric Sandeen "running e2fsck is recommended"); 3026c8b459f4SLukas Czerner else if (sbi->s_mount_state & EXT4_ERROR_FS) 3027b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, 3028b31e1552SEric Sandeen "warning: mounting fs with errors, " 3029b31e1552SEric Sandeen "running e2fsck is recommended"); 3030ed3ce80aSTao Ma else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 && 3031ac27a0ecSDave Kleikamp le16_to_cpu(es->s_mnt_count) >= 3032ac27a0ecSDave Kleikamp (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count)) 3033b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, 3034b31e1552SEric Sandeen "warning: maximal mount count reached, " 3035b31e1552SEric Sandeen "running e2fsck is recommended"); 3036ac27a0ecSDave Kleikamp else if (le32_to_cpu(es->s_checkinterval) && 30376a0678a7SArnd Bergmann (ext4_get_tstamp(es, s_lastcheck) + 30386a0678a7SArnd Bergmann le32_to_cpu(es->s_checkinterval) <= ktime_get_real_seconds())) 3039b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, 3040b31e1552SEric Sandeen "warning: checktime reached, " 3041b31e1552SEric Sandeen "running e2fsck is recommended"); 30420390131bSFrank Mayhar if (!sbi->s_journal) 3043216c34b2SMarcin Slusarz es->s_state &= cpu_to_le16(~EXT4_VALID_FS); 3044ac27a0ecSDave Kleikamp if (!(__s16) le16_to_cpu(es->s_max_mnt_count)) 3045617ba13bSMingming Cao es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT); 3046e8546d06SMarcin Slusarz le16_add_cpu(&es->s_mnt_count, 1); 30476a0678a7SArnd Bergmann ext4_update_tstamp(es, s_mtime); 304802f310fcSJan Kara if (sbi->s_journal) { 3049e2b911c5SDarrick J. Wong ext4_set_feature_journal_needs_recovery(sb); 305002f310fcSJan Kara if (ext4_has_feature_orphan_file(sb)) 305102f310fcSJan Kara ext4_set_feature_orphan_present(sb); 305202f310fcSJan Kara } 3053ac27a0ecSDave Kleikamp 30544392fbc4SJan Kara err = ext4_commit_super(sb); 3055281b5995STheodore Ts'o done: 3056ac27a0ecSDave Kleikamp if (test_opt(sb, DEBUG)) 3057a9df9a49STheodore Ts'o printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, " 3058a2595b8aSTheodore Ts'o "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n", 3059ac27a0ecSDave Kleikamp sb->s_blocksize, 3060ac27a0ecSDave Kleikamp sbi->s_groups_count, 3061617ba13bSMingming Cao EXT4_BLOCKS_PER_GROUP(sb), 3062617ba13bSMingming Cao EXT4_INODES_PER_GROUP(sb), 3063a2595b8aSTheodore Ts'o sbi->s_mount_opt, sbi->s_mount_opt2); 3064c89128a0SJaegeuk Kim return err; 3065ac27a0ecSDave Kleikamp } 3066ac27a0ecSDave Kleikamp 3067117fff10STheodore Ts'o int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup) 3068117fff10STheodore Ts'o { 3069117fff10STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 30707c990728SSuraj Jitindar Singh struct flex_groups **old_groups, **new_groups; 307137b0b6b8SDan Carpenter int size, i, j; 3072117fff10STheodore Ts'o 3073117fff10STheodore Ts'o if (!sbi->s_log_groups_per_flex) 3074117fff10STheodore Ts'o return 0; 3075117fff10STheodore Ts'o 3076117fff10STheodore Ts'o size = ext4_flex_group(sbi, ngroup - 1) + 1; 3077117fff10STheodore Ts'o if (size <= sbi->s_flex_groups_allocated) 3078117fff10STheodore Ts'o return 0; 3079117fff10STheodore Ts'o 30807c990728SSuraj Jitindar Singh new_groups = kvzalloc(roundup_pow_of_two(size * 30817c990728SSuraj Jitindar Singh sizeof(*sbi->s_flex_groups)), GFP_KERNEL); 3082117fff10STheodore Ts'o if (!new_groups) { 30837c990728SSuraj Jitindar Singh ext4_msg(sb, KERN_ERR, 30847c990728SSuraj Jitindar Singh "not enough memory for %d flex group pointers", size); 3085117fff10STheodore Ts'o return -ENOMEM; 3086117fff10STheodore Ts'o } 30877c990728SSuraj Jitindar Singh for (i = sbi->s_flex_groups_allocated; i < size; i++) { 30887c990728SSuraj Jitindar Singh new_groups[i] = kvzalloc(roundup_pow_of_two( 30897c990728SSuraj Jitindar Singh sizeof(struct flex_groups)), 30907c990728SSuraj Jitindar Singh GFP_KERNEL); 30917c990728SSuraj Jitindar Singh if (!new_groups[i]) { 309237b0b6b8SDan Carpenter for (j = sbi->s_flex_groups_allocated; j < i; j++) 309337b0b6b8SDan Carpenter kvfree(new_groups[j]); 30947c990728SSuraj Jitindar Singh kvfree(new_groups); 30957c990728SSuraj Jitindar Singh ext4_msg(sb, KERN_ERR, 30967c990728SSuraj Jitindar Singh "not enough memory for %d flex groups", size); 30977c990728SSuraj Jitindar Singh return -ENOMEM; 3098117fff10STheodore Ts'o } 30997c990728SSuraj Jitindar Singh } 31007c990728SSuraj Jitindar Singh rcu_read_lock(); 31017c990728SSuraj Jitindar Singh old_groups = rcu_dereference(sbi->s_flex_groups); 31027c990728SSuraj Jitindar Singh if (old_groups) 31037c990728SSuraj Jitindar Singh memcpy(new_groups, old_groups, 31047c990728SSuraj Jitindar Singh (sbi->s_flex_groups_allocated * 31057c990728SSuraj Jitindar Singh sizeof(struct flex_groups *))); 31067c990728SSuraj Jitindar Singh rcu_read_unlock(); 31077c990728SSuraj Jitindar Singh rcu_assign_pointer(sbi->s_flex_groups, new_groups); 31087c990728SSuraj Jitindar Singh sbi->s_flex_groups_allocated = size; 31097c990728SSuraj Jitindar Singh if (old_groups) 31107c990728SSuraj Jitindar Singh ext4_kvfree_array_rcu(old_groups); 3111117fff10STheodore Ts'o return 0; 3112117fff10STheodore Ts'o } 3113117fff10STheodore Ts'o 3114772cb7c8SJose R. Santos static int ext4_fill_flex_info(struct super_block *sb) 3115772cb7c8SJose R. Santos { 3116772cb7c8SJose R. Santos struct ext4_sb_info *sbi = EXT4_SB(sb); 3117772cb7c8SJose R. Santos struct ext4_group_desc *gdp = NULL; 31187c990728SSuraj Jitindar Singh struct flex_groups *fg; 3119772cb7c8SJose R. Santos ext4_group_t flex_group; 3120117fff10STheodore Ts'o int i, err; 3121772cb7c8SJose R. Santos 3122503358aeSTheodore Ts'o sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex; 3123d50f2ab6SXi Wang if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) { 3124772cb7c8SJose R. Santos sbi->s_log_groups_per_flex = 0; 3125772cb7c8SJose R. Santos return 1; 3126772cb7c8SJose R. Santos } 3127772cb7c8SJose R. Santos 3128117fff10STheodore Ts'o err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count); 3129117fff10STheodore Ts'o if (err) 3130772cb7c8SJose R. Santos goto failed; 3131772cb7c8SJose R. Santos 3132772cb7c8SJose R. Santos for (i = 0; i < sbi->s_groups_count; i++) { 313388b6edd1STheodore Ts'o gdp = ext4_get_group_desc(sb, i, NULL); 3134772cb7c8SJose R. Santos 3135772cb7c8SJose R. Santos flex_group = ext4_flex_group(sbi, i); 31367c990728SSuraj Jitindar Singh fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group); 31377c990728SSuraj Jitindar Singh atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes); 313890ba983fSTheodore Ts'o atomic64_add(ext4_free_group_clusters(sb, gdp), 31397c990728SSuraj Jitindar Singh &fg->free_clusters); 31407c990728SSuraj Jitindar Singh atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs); 3141772cb7c8SJose R. Santos } 3142772cb7c8SJose R. Santos 3143772cb7c8SJose R. Santos return 1; 3144772cb7c8SJose R. Santos failed: 3145772cb7c8SJose R. Santos return 0; 3146772cb7c8SJose R. Santos } 3147772cb7c8SJose R. Santos 3148e2b911c5SDarrick J. Wong static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group, 3149717d50e4SAndreas Dilger struct ext4_group_desc *gdp) 3150717d50e4SAndreas Dilger { 3151b47820edSDaeho Jeong int offset = offsetof(struct ext4_group_desc, bg_checksum); 3152717d50e4SAndreas Dilger __u16 crc = 0; 3153717d50e4SAndreas Dilger __le32 le_group = cpu_to_le32(block_group); 3154e2b911c5SDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(sb); 3155717d50e4SAndreas Dilger 31569aa5d32bSDmitry Monakhov if (ext4_has_metadata_csum(sbi->s_sb)) { 3157feb0ab32SDarrick J. Wong /* Use new metadata_csum algorithm */ 3158feb0ab32SDarrick J. Wong __u32 csum32; 3159b47820edSDaeho Jeong __u16 dummy_csum = 0; 3160feb0ab32SDarrick J. Wong 3161feb0ab32SDarrick J. Wong csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group, 3162feb0ab32SDarrick J. Wong sizeof(le_group)); 3163b47820edSDaeho Jeong csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset); 3164b47820edSDaeho Jeong csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum, 3165b47820edSDaeho Jeong sizeof(dummy_csum)); 3166b47820edSDaeho Jeong offset += sizeof(dummy_csum); 3167b47820edSDaeho Jeong if (offset < sbi->s_desc_size) 3168b47820edSDaeho Jeong csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset, 3169b47820edSDaeho Jeong sbi->s_desc_size - offset); 3170feb0ab32SDarrick J. Wong 3171feb0ab32SDarrick J. Wong crc = csum32 & 0xFFFF; 3172feb0ab32SDarrick J. Wong goto out; 3173feb0ab32SDarrick J. Wong } 3174feb0ab32SDarrick J. Wong 3175feb0ab32SDarrick J. Wong /* old crc16 code */ 3176e2b911c5SDarrick J. Wong if (!ext4_has_feature_gdt_csum(sb)) 3177813d32f9SDarrick J. Wong return 0; 3178813d32f9SDarrick J. Wong 3179717d50e4SAndreas Dilger crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid)); 3180717d50e4SAndreas Dilger crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group)); 3181717d50e4SAndreas Dilger crc = crc16(crc, (__u8 *)gdp, offset); 3182717d50e4SAndreas Dilger offset += sizeof(gdp->bg_checksum); /* skip checksum */ 3183717d50e4SAndreas Dilger /* for checksum of struct ext4_group_desc do the rest...*/ 3184e2b911c5SDarrick J. Wong if (ext4_has_feature_64bit(sb) && 3185717d50e4SAndreas Dilger offset < le16_to_cpu(sbi->s_es->s_desc_size)) 3186717d50e4SAndreas Dilger crc = crc16(crc, (__u8 *)gdp + offset, 3187717d50e4SAndreas Dilger le16_to_cpu(sbi->s_es->s_desc_size) - 3188717d50e4SAndreas Dilger offset); 3189717d50e4SAndreas Dilger 3190feb0ab32SDarrick J. Wong out: 3191717d50e4SAndreas Dilger return cpu_to_le16(crc); 3192717d50e4SAndreas Dilger } 3193717d50e4SAndreas Dilger 3194feb0ab32SDarrick J. Wong int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group, 3195717d50e4SAndreas Dilger struct ext4_group_desc *gdp) 3196717d50e4SAndreas Dilger { 3197feb0ab32SDarrick J. Wong if (ext4_has_group_desc_csum(sb) && 3198e2b911c5SDarrick J. Wong (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp))) 3199717d50e4SAndreas Dilger return 0; 3200717d50e4SAndreas Dilger 3201717d50e4SAndreas Dilger return 1; 3202717d50e4SAndreas Dilger } 3203717d50e4SAndreas Dilger 3204feb0ab32SDarrick J. Wong void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group, 3205feb0ab32SDarrick J. Wong struct ext4_group_desc *gdp) 3206feb0ab32SDarrick J. Wong { 3207feb0ab32SDarrick J. Wong if (!ext4_has_group_desc_csum(sb)) 3208feb0ab32SDarrick J. Wong return; 3209e2b911c5SDarrick J. Wong gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp); 3210feb0ab32SDarrick J. Wong } 3211feb0ab32SDarrick J. Wong 3212ac27a0ecSDave Kleikamp /* Called at mount-time, super-block is locked */ 3213bfff6873SLukas Czerner static int ext4_check_descriptors(struct super_block *sb, 3214829fa70dSTheodore Ts'o ext4_fsblk_t sb_block, 3215bfff6873SLukas Czerner ext4_group_t *first_not_zeroed) 3216ac27a0ecSDave Kleikamp { 3217617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 3218617ba13bSMingming Cao ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); 3219617ba13bSMingming Cao ext4_fsblk_t last_block; 322044de022cSTheodore Ts'o ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0); 3221bd81d8eeSLaurent Vivier ext4_fsblk_t block_bitmap; 3222bd81d8eeSLaurent Vivier ext4_fsblk_t inode_bitmap; 3223bd81d8eeSLaurent Vivier ext4_fsblk_t inode_table; 3224ce421581SJose R. Santos int flexbg_flag = 0; 3225bfff6873SLukas Czerner ext4_group_t i, grp = sbi->s_groups_count; 3226ac27a0ecSDave Kleikamp 3227e2b911c5SDarrick J. Wong if (ext4_has_feature_flex_bg(sb)) 3228ce421581SJose R. Santos flexbg_flag = 1; 3229ce421581SJose R. Santos 3230617ba13bSMingming Cao ext4_debug("Checking group descriptors"); 3231ac27a0ecSDave Kleikamp 3232197cd65aSAkinobu Mita for (i = 0; i < sbi->s_groups_count; i++) { 3233197cd65aSAkinobu Mita struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL); 3234197cd65aSAkinobu Mita 3235ce421581SJose R. Santos if (i == sbi->s_groups_count - 1 || flexbg_flag) 3236bd81d8eeSLaurent Vivier last_block = ext4_blocks_count(sbi->s_es) - 1; 3237ac27a0ecSDave Kleikamp else 3238ac27a0ecSDave Kleikamp last_block = first_block + 3239617ba13bSMingming Cao (EXT4_BLOCKS_PER_GROUP(sb) - 1); 3240ac27a0ecSDave Kleikamp 3241bfff6873SLukas Czerner if ((grp == sbi->s_groups_count) && 3242bfff6873SLukas Czerner !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) 3243bfff6873SLukas Czerner grp = i; 3244bfff6873SLukas Czerner 32458fadc143SAlexandre Ratchov block_bitmap = ext4_block_bitmap(sb, gdp); 3246829fa70dSTheodore Ts'o if (block_bitmap == sb_block) { 3247829fa70dSTheodore Ts'o ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3248829fa70dSTheodore Ts'o "Block bitmap for group %u overlaps " 3249829fa70dSTheodore Ts'o "superblock", i); 325018db4b4eSTheodore Ts'o if (!sb_rdonly(sb)) 325118db4b4eSTheodore Ts'o return 0; 3252829fa70dSTheodore Ts'o } 325377260807STheodore Ts'o if (block_bitmap >= sb_block + 1 && 325477260807STheodore Ts'o block_bitmap <= last_bg_block) { 325577260807STheodore Ts'o ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 325677260807STheodore Ts'o "Block bitmap for group %u overlaps " 325777260807STheodore Ts'o "block group descriptors", i); 325877260807STheodore Ts'o if (!sb_rdonly(sb)) 325977260807STheodore Ts'o return 0; 326077260807STheodore Ts'o } 32612b2d6d01STheodore Ts'o if (block_bitmap < first_block || block_bitmap > last_block) { 3262b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3263a9df9a49STheodore Ts'o "Block bitmap for group %u not in group " 3264b31e1552SEric Sandeen "(block %llu)!", i, block_bitmap); 3265ac27a0ecSDave Kleikamp return 0; 3266ac27a0ecSDave Kleikamp } 32678fadc143SAlexandre Ratchov inode_bitmap = ext4_inode_bitmap(sb, gdp); 3268829fa70dSTheodore Ts'o if (inode_bitmap == sb_block) { 3269829fa70dSTheodore Ts'o ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3270829fa70dSTheodore Ts'o "Inode bitmap for group %u overlaps " 3271829fa70dSTheodore Ts'o "superblock", i); 327218db4b4eSTheodore Ts'o if (!sb_rdonly(sb)) 327318db4b4eSTheodore Ts'o return 0; 3274829fa70dSTheodore Ts'o } 327577260807STheodore Ts'o if (inode_bitmap >= sb_block + 1 && 327677260807STheodore Ts'o inode_bitmap <= last_bg_block) { 327777260807STheodore Ts'o ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 327877260807STheodore Ts'o "Inode bitmap for group %u overlaps " 327977260807STheodore Ts'o "block group descriptors", i); 328077260807STheodore Ts'o if (!sb_rdonly(sb)) 328177260807STheodore Ts'o return 0; 328277260807STheodore Ts'o } 32832b2d6d01STheodore Ts'o if (inode_bitmap < first_block || inode_bitmap > last_block) { 3284b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3285a9df9a49STheodore Ts'o "Inode bitmap for group %u not in group " 3286b31e1552SEric Sandeen "(block %llu)!", i, inode_bitmap); 3287ac27a0ecSDave Kleikamp return 0; 3288ac27a0ecSDave Kleikamp } 32898fadc143SAlexandre Ratchov inode_table = ext4_inode_table(sb, gdp); 3290829fa70dSTheodore Ts'o if (inode_table == sb_block) { 3291829fa70dSTheodore Ts'o ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3292829fa70dSTheodore Ts'o "Inode table for group %u overlaps " 3293829fa70dSTheodore Ts'o "superblock", i); 329418db4b4eSTheodore Ts'o if (!sb_rdonly(sb)) 329518db4b4eSTheodore Ts'o return 0; 3296829fa70dSTheodore Ts'o } 329777260807STheodore Ts'o if (inode_table >= sb_block + 1 && 329877260807STheodore Ts'o inode_table <= last_bg_block) { 329977260807STheodore Ts'o ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 330077260807STheodore Ts'o "Inode table for group %u overlaps " 330177260807STheodore Ts'o "block group descriptors", i); 330277260807STheodore Ts'o if (!sb_rdonly(sb)) 330377260807STheodore Ts'o return 0; 330477260807STheodore Ts'o } 3305bd81d8eeSLaurent Vivier if (inode_table < first_block || 33062b2d6d01STheodore Ts'o inode_table + sbi->s_itb_per_group - 1 > last_block) { 3307b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3308a9df9a49STheodore Ts'o "Inode table for group %u not in group " 3309b31e1552SEric Sandeen "(block %llu)!", i, inode_table); 3310ac27a0ecSDave Kleikamp return 0; 3311ac27a0ecSDave Kleikamp } 3312955ce5f5SAneesh Kumar K.V ext4_lock_group(sb, i); 3313feb0ab32SDarrick J. Wong if (!ext4_group_desc_csum_verify(sb, i, gdp)) { 3314b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3315b31e1552SEric Sandeen "Checksum for group %u failed (%u!=%u)", 3316e2b911c5SDarrick J. Wong i, le16_to_cpu(ext4_group_desc_csum(sb, i, 3317fd2d4291SAvantika Mathur gdp)), le16_to_cpu(gdp->bg_checksum)); 3318bc98a42cSDavid Howells if (!sb_rdonly(sb)) { 3319955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, i); 3320717d50e4SAndreas Dilger return 0; 3321717d50e4SAndreas Dilger } 33227ee1ec4cSLi Zefan } 3323955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, i); 3324ce421581SJose R. Santos if (!flexbg_flag) 3325617ba13bSMingming Cao first_block += EXT4_BLOCKS_PER_GROUP(sb); 3326ac27a0ecSDave Kleikamp } 3327bfff6873SLukas Czerner if (NULL != first_not_zeroed) 3328bfff6873SLukas Czerner *first_not_zeroed = grp; 3329ac27a0ecSDave Kleikamp return 1; 3330ac27a0ecSDave Kleikamp } 3331ac27a0ecSDave Kleikamp 3332cd2291a4SEric Sandeen /* 3333cd2291a4SEric Sandeen * Maximal extent format file size. 3334cd2291a4SEric Sandeen * Resulting logical blkno at s_maxbytes must fit in our on-disk 3335cd2291a4SEric Sandeen * extent format containers, within a sector_t, and within i_blocks 3336cd2291a4SEric Sandeen * in the vfs. ext4 inode has 48 bits of i_block in fsblock units, 3337cd2291a4SEric Sandeen * so that won't be a limiting factor. 3338cd2291a4SEric Sandeen * 3339f17722f9SLukas Czerner * However there is other limiting factor. We do store extents in the form 3340f17722f9SLukas Czerner * of starting block and length, hence the resulting length of the extent 3341f17722f9SLukas Czerner * covering maximum file size must fit into on-disk format containers as 3342f17722f9SLukas Czerner * well. Given that length is always by 1 unit bigger than max unit (because 3343f17722f9SLukas Czerner * we count 0 as well) we have to lower the s_maxbytes by one fs block. 3344f17722f9SLukas Czerner * 3345cd2291a4SEric Sandeen * Note, this does *not* consider any metadata overhead for vfs i_blocks. 3346cd2291a4SEric Sandeen */ 3347f287a1a5STheodore Ts'o static loff_t ext4_max_size(int blkbits, int has_huge_files) 3348cd2291a4SEric Sandeen { 3349cd2291a4SEric Sandeen loff_t res; 3350cd2291a4SEric Sandeen loff_t upper_limit = MAX_LFS_FILESIZE; 3351cd2291a4SEric Sandeen 335272deb455SChristoph Hellwig BUILD_BUG_ON(sizeof(blkcnt_t) < sizeof(u64)); 335372deb455SChristoph Hellwig 335472deb455SChristoph Hellwig if (!has_huge_files) { 3355cd2291a4SEric Sandeen upper_limit = (1LL << 32) - 1; 3356cd2291a4SEric Sandeen 3357cd2291a4SEric Sandeen /* total blocks in file system block size */ 3358cd2291a4SEric Sandeen upper_limit >>= (blkbits - 9); 3359cd2291a4SEric Sandeen upper_limit <<= blkbits; 3360cd2291a4SEric Sandeen } 3361cd2291a4SEric Sandeen 3362f17722f9SLukas Czerner /* 3363f17722f9SLukas Czerner * 32-bit extent-start container, ee_block. We lower the maxbytes 3364f17722f9SLukas Czerner * by one fs block, so ee_len can cover the extent of maximum file 3365f17722f9SLukas Czerner * size 3366f17722f9SLukas Czerner */ 3367f17722f9SLukas Czerner res = (1LL << 32) - 1; 3368cd2291a4SEric Sandeen res <<= blkbits; 3369cd2291a4SEric Sandeen 3370cd2291a4SEric Sandeen /* Sanity check against vm- & vfs- imposed limits */ 3371cd2291a4SEric Sandeen if (res > upper_limit) 3372cd2291a4SEric Sandeen res = upper_limit; 3373cd2291a4SEric Sandeen 3374cd2291a4SEric Sandeen return res; 3375cd2291a4SEric Sandeen } 3376ac27a0ecSDave Kleikamp 3377ac27a0ecSDave Kleikamp /* 3378cd2291a4SEric Sandeen * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect 33790fc1b451SAneesh Kumar K.V * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks. 33800fc1b451SAneesh Kumar K.V * We need to be 1 filesystem block less than the 2^48 sector limit. 3381ac27a0ecSDave Kleikamp */ 3382f287a1a5STheodore Ts'o static loff_t ext4_max_bitmap_size(int bits, int has_huge_files) 3383ac27a0ecSDave Kleikamp { 33845c93e8ecSZhang Yi loff_t upper_limit, res = EXT4_NDIR_BLOCKS; 33850fc1b451SAneesh Kumar K.V int meta_blocks; 33865c93e8ecSZhang Yi unsigned int ppb = 1 << (bits - 2); 338775ca6ad4SRitesh Harjani 338875ca6ad4SRitesh Harjani /* 338975ca6ad4SRitesh Harjani * This is calculated to be the largest file size for a dense, block 33900b8e58a1SAndreas Dilger * mapped file such that the file's total number of 512-byte sectors, 33910b8e58a1SAndreas Dilger * including data and all indirect blocks, does not exceed (2^48 - 1). 33920b8e58a1SAndreas Dilger * 33930b8e58a1SAndreas Dilger * __u32 i_blocks_lo and _u16 i_blocks_high represent the total 33940b8e58a1SAndreas Dilger * number of 512-byte sectors of the file. 33950fc1b451SAneesh Kumar K.V */ 339672deb455SChristoph Hellwig if (!has_huge_files) { 33970fc1b451SAneesh Kumar K.V /* 339872deb455SChristoph Hellwig * !has_huge_files or implies that the inode i_block field 339972deb455SChristoph Hellwig * represents total file blocks in 2^32 512-byte sectors == 340072deb455SChristoph Hellwig * size of vfs inode i_blocks * 8 34010fc1b451SAneesh Kumar K.V */ 34020fc1b451SAneesh Kumar K.V upper_limit = (1LL << 32) - 1; 34030fc1b451SAneesh Kumar K.V 34040fc1b451SAneesh Kumar K.V /* total blocks in file system block size */ 34050fc1b451SAneesh Kumar K.V upper_limit >>= (bits - 9); 34060fc1b451SAneesh Kumar K.V 34070fc1b451SAneesh Kumar K.V } else { 34088180a562SAneesh Kumar K.V /* 34098180a562SAneesh Kumar K.V * We use 48 bit ext4_inode i_blocks 34108180a562SAneesh Kumar K.V * With EXT4_HUGE_FILE_FL set the i_blocks 34118180a562SAneesh Kumar K.V * represent total number of blocks in 34128180a562SAneesh Kumar K.V * file system block size 34138180a562SAneesh Kumar K.V */ 34140fc1b451SAneesh Kumar K.V upper_limit = (1LL << 48) - 1; 34150fc1b451SAneesh Kumar K.V 34160fc1b451SAneesh Kumar K.V } 34170fc1b451SAneesh Kumar K.V 34185c93e8ecSZhang Yi /* Compute how many blocks we can address by block tree */ 34195c93e8ecSZhang Yi res += ppb; 34205c93e8ecSZhang Yi res += ppb * ppb; 34215c93e8ecSZhang Yi res += ((loff_t)ppb) * ppb * ppb; 34225c93e8ecSZhang Yi /* Compute how many metadata blocks are needed */ 34235c93e8ecSZhang Yi meta_blocks = 1; 34245c93e8ecSZhang Yi meta_blocks += 1 + ppb; 34255c93e8ecSZhang Yi meta_blocks += 1 + ppb + ppb * ppb; 34265c93e8ecSZhang Yi /* Does block tree limit file size? */ 34275c93e8ecSZhang Yi if (res + meta_blocks <= upper_limit) 34285c93e8ecSZhang Yi goto check_lfs; 34295c93e8ecSZhang Yi 34305c93e8ecSZhang Yi res = upper_limit; 34315c93e8ecSZhang Yi /* How many metadata blocks are needed for addressing upper_limit? */ 34325c93e8ecSZhang Yi upper_limit -= EXT4_NDIR_BLOCKS; 34330fc1b451SAneesh Kumar K.V /* indirect blocks */ 34340fc1b451SAneesh Kumar K.V meta_blocks = 1; 34355c93e8ecSZhang Yi upper_limit -= ppb; 34360fc1b451SAneesh Kumar K.V /* double indirect blocks */ 34375c93e8ecSZhang Yi if (upper_limit < ppb * ppb) { 34385c93e8ecSZhang Yi meta_blocks += 1 + DIV_ROUND_UP_ULL(upper_limit, ppb); 34395c93e8ecSZhang Yi res -= meta_blocks; 34405c93e8ecSZhang Yi goto check_lfs; 34415c93e8ecSZhang Yi } 34425c93e8ecSZhang Yi meta_blocks += 1 + ppb; 34435c93e8ecSZhang Yi upper_limit -= ppb * ppb; 34445c93e8ecSZhang Yi /* tripple indirect blocks for the rest */ 34455c93e8ecSZhang Yi meta_blocks += 1 + DIV_ROUND_UP_ULL(upper_limit, ppb) + 34465c93e8ecSZhang Yi DIV_ROUND_UP_ULL(upper_limit, ppb*ppb); 34475c93e8ecSZhang Yi res -= meta_blocks; 34485c93e8ecSZhang Yi check_lfs: 3449ac27a0ecSDave Kleikamp res <<= bits; 34500fc1b451SAneesh Kumar K.V if (res > MAX_LFS_FILESIZE) 34510fc1b451SAneesh Kumar K.V res = MAX_LFS_FILESIZE; 34520fc1b451SAneesh Kumar K.V 34535c93e8ecSZhang Yi return res; 3454ac27a0ecSDave Kleikamp } 3455ac27a0ecSDave Kleikamp 3456617ba13bSMingming Cao static ext4_fsblk_t descriptor_loc(struct super_block *sb, 345770bbb3e0SAndrew Morton ext4_fsblk_t logical_sb_block, int nr) 3458ac27a0ecSDave Kleikamp { 3459617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 3460fd2d4291SAvantika Mathur ext4_group_t bg, first_meta_bg; 3461ac27a0ecSDave Kleikamp int has_super = 0; 3462ac27a0ecSDave Kleikamp 3463ac27a0ecSDave Kleikamp first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg); 3464ac27a0ecSDave Kleikamp 3465e2b911c5SDarrick J. Wong if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg) 346670bbb3e0SAndrew Morton return logical_sb_block + nr + 1; 3467ac27a0ecSDave Kleikamp bg = sbi->s_desc_per_block * nr; 3468617ba13bSMingming Cao if (ext4_bg_has_super(sb, bg)) 3469ac27a0ecSDave Kleikamp has_super = 1; 34700b8e58a1SAndreas Dilger 3471bd63f6b0SDarrick J. Wong /* 3472bd63f6b0SDarrick J. Wong * If we have a meta_bg fs with 1k blocks, group 0's GDT is at 3473bd63f6b0SDarrick J. Wong * block 2, not 1. If s_first_data_block == 0 (bigalloc is enabled 3474bd63f6b0SDarrick J. Wong * on modern mke2fs or blksize > 1k on older mke2fs) then we must 3475bd63f6b0SDarrick J. Wong * compensate. 3476bd63f6b0SDarrick J. Wong */ 3477bd63f6b0SDarrick J. Wong if (sb->s_blocksize == 1024 && nr == 0 && 347849598e04SJun Piao le32_to_cpu(sbi->s_es->s_first_data_block) == 0) 3479bd63f6b0SDarrick J. Wong has_super++; 3480bd63f6b0SDarrick J. Wong 3481617ba13bSMingming Cao return (has_super + ext4_group_first_block_no(sb, bg)); 3482ac27a0ecSDave Kleikamp } 3483ac27a0ecSDave Kleikamp 3484c9de560dSAlex Tomas /** 3485c9de560dSAlex Tomas * ext4_get_stripe_size: Get the stripe size. 3486c9de560dSAlex Tomas * @sbi: In memory super block info 3487c9de560dSAlex Tomas * 3488c9de560dSAlex Tomas * If we have specified it via mount option, then 3489c9de560dSAlex Tomas * use the mount option value. If the value specified at mount time is 3490c9de560dSAlex Tomas * greater than the blocks per group use the super block value. 3491c9de560dSAlex Tomas * If the super block value is greater than blocks per group return 0. 3492c9de560dSAlex Tomas * Allocator needs it be less than blocks per group. 3493c9de560dSAlex Tomas * 3494c9de560dSAlex Tomas */ 3495c9de560dSAlex Tomas static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi) 3496c9de560dSAlex Tomas { 3497c9de560dSAlex Tomas unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride); 3498c9de560dSAlex Tomas unsigned long stripe_width = 3499c9de560dSAlex Tomas le32_to_cpu(sbi->s_es->s_raid_stripe_width); 35003eb08658SDan Ehrenberg int ret; 3501c9de560dSAlex Tomas 3502c9de560dSAlex Tomas if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group) 35033eb08658SDan Ehrenberg ret = sbi->s_stripe; 35045469d7c3SJan Kara else if (stripe_width && stripe_width <= sbi->s_blocks_per_group) 35053eb08658SDan Ehrenberg ret = stripe_width; 35065469d7c3SJan Kara else if (stride && stride <= sbi->s_blocks_per_group) 35073eb08658SDan Ehrenberg ret = stride; 35083eb08658SDan Ehrenberg else 35093eb08658SDan Ehrenberg ret = 0; 3510c9de560dSAlex Tomas 35113eb08658SDan Ehrenberg /* 35123eb08658SDan Ehrenberg * If the stripe width is 1, this makes no sense and 35133eb08658SDan Ehrenberg * we set it to 0 to turn off stripe handling code. 35143eb08658SDan Ehrenberg */ 35153eb08658SDan Ehrenberg if (ret <= 1) 35163eb08658SDan Ehrenberg ret = 0; 3517c9de560dSAlex Tomas 35183eb08658SDan Ehrenberg return ret; 3519c9de560dSAlex Tomas } 3520ac27a0ecSDave Kleikamp 3521a13fb1a4SEric Sandeen /* 3522a13fb1a4SEric Sandeen * Check whether this filesystem can be mounted based on 3523a13fb1a4SEric Sandeen * the features present and the RDONLY/RDWR mount requested. 3524a13fb1a4SEric Sandeen * Returns 1 if this filesystem can be mounted as requested, 3525a13fb1a4SEric Sandeen * 0 if it cannot be. 3526a13fb1a4SEric Sandeen */ 352725c6d98fSJan Kara int ext4_feature_set_ok(struct super_block *sb, int readonly) 3528a13fb1a4SEric Sandeen { 3529e2b911c5SDarrick J. Wong if (ext4_has_unknown_ext4_incompat_features(sb)) { 3530a13fb1a4SEric Sandeen ext4_msg(sb, KERN_ERR, 3531a13fb1a4SEric Sandeen "Couldn't mount because of " 3532a13fb1a4SEric Sandeen "unsupported optional features (%x)", 3533a13fb1a4SEric Sandeen (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) & 3534a13fb1a4SEric Sandeen ~EXT4_FEATURE_INCOMPAT_SUPP)); 3535a13fb1a4SEric Sandeen return 0; 3536a13fb1a4SEric Sandeen } 3537a13fb1a4SEric Sandeen 35385298d4bfSChristoph Hellwig #if !IS_ENABLED(CONFIG_UNICODE) 3539c83ad55eSGabriel Krisman Bertazi if (ext4_has_feature_casefold(sb)) { 3540c83ad55eSGabriel Krisman Bertazi ext4_msg(sb, KERN_ERR, 3541c83ad55eSGabriel Krisman Bertazi "Filesystem with casefold feature cannot be " 3542c83ad55eSGabriel Krisman Bertazi "mounted without CONFIG_UNICODE"); 3543c83ad55eSGabriel Krisman Bertazi return 0; 3544c83ad55eSGabriel Krisman Bertazi } 3545c83ad55eSGabriel Krisman Bertazi #endif 3546c83ad55eSGabriel Krisman Bertazi 3547a13fb1a4SEric Sandeen if (readonly) 3548a13fb1a4SEric Sandeen return 1; 3549a13fb1a4SEric Sandeen 3550e2b911c5SDarrick J. Wong if (ext4_has_feature_readonly(sb)) { 35512cb5cc8bSDarrick J. Wong ext4_msg(sb, KERN_INFO, "filesystem is read-only"); 35521751e8a6SLinus Torvalds sb->s_flags |= SB_RDONLY; 35532cb5cc8bSDarrick J. Wong return 1; 35542cb5cc8bSDarrick J. Wong } 35552cb5cc8bSDarrick J. Wong 3556a13fb1a4SEric Sandeen /* Check that feature set is OK for a read-write mount */ 3557e2b911c5SDarrick J. Wong if (ext4_has_unknown_ext4_ro_compat_features(sb)) { 3558a13fb1a4SEric Sandeen ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of " 3559a13fb1a4SEric Sandeen "unsupported optional features (%x)", 3560a13fb1a4SEric Sandeen (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) & 3561a13fb1a4SEric Sandeen ~EXT4_FEATURE_RO_COMPAT_SUPP)); 3562a13fb1a4SEric Sandeen return 0; 3563a13fb1a4SEric Sandeen } 3564e2b911c5SDarrick J. Wong if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) { 3565bab08ab9STheodore Ts'o ext4_msg(sb, KERN_ERR, 3566bab08ab9STheodore Ts'o "Can't support bigalloc feature without " 3567bab08ab9STheodore Ts'o "extents feature\n"); 3568bab08ab9STheodore Ts'o return 0; 3569bab08ab9STheodore Ts'o } 35707c319d32SAditya Kali 35719db176bcSJan Kara #if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2) 3572d65d87a0STheodore Ts'o if (!readonly && (ext4_has_feature_quota(sb) || 3573d65d87a0STheodore Ts'o ext4_has_feature_project(sb))) { 35747c319d32SAditya Kali ext4_msg(sb, KERN_ERR, 3575d65d87a0STheodore Ts'o "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2"); 3576689c958cSLi Xi return 0; 3577689c958cSLi Xi } 35787c319d32SAditya Kali #endif /* CONFIG_QUOTA */ 3579a13fb1a4SEric Sandeen return 1; 3580a13fb1a4SEric Sandeen } 3581a13fb1a4SEric Sandeen 358266e61a9eSTheodore Ts'o /* 358366e61a9eSTheodore Ts'o * This function is called once a day if we have errors logged 358466e61a9eSTheodore Ts'o * on the file system 358566e61a9eSTheodore Ts'o */ 3586235699a8SKees Cook static void print_daily_error_info(struct timer_list *t) 358766e61a9eSTheodore Ts'o { 3588235699a8SKees Cook struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report); 3589235699a8SKees Cook struct super_block *sb = sbi->s_sb; 3590235699a8SKees Cook struct ext4_super_block *es = sbi->s_es; 359166e61a9eSTheodore Ts'o 359266e61a9eSTheodore Ts'o if (es->s_error_count) 3593ae0f78deSTheodore Ts'o /* fsck newer than v1.41.13 is needed to clean this condition. */ 3594ae0f78deSTheodore Ts'o ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u", 359566e61a9eSTheodore Ts'o le32_to_cpu(es->s_error_count)); 359666e61a9eSTheodore Ts'o if (es->s_first_error_time) { 35976a0678a7SArnd Bergmann printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %llu: %.*s:%d", 35986a0678a7SArnd Bergmann sb->s_id, 35996a0678a7SArnd Bergmann ext4_get_tstamp(es, s_first_error_time), 360066e61a9eSTheodore Ts'o (int) sizeof(es->s_first_error_func), 360166e61a9eSTheodore Ts'o es->s_first_error_func, 360266e61a9eSTheodore Ts'o le32_to_cpu(es->s_first_error_line)); 360366e61a9eSTheodore Ts'o if (es->s_first_error_ino) 3604651e1c3bSJoe Perches printk(KERN_CONT ": inode %u", 360566e61a9eSTheodore Ts'o le32_to_cpu(es->s_first_error_ino)); 360666e61a9eSTheodore Ts'o if (es->s_first_error_block) 3607651e1c3bSJoe Perches printk(KERN_CONT ": block %llu", (unsigned long long) 360866e61a9eSTheodore Ts'o le64_to_cpu(es->s_first_error_block)); 3609651e1c3bSJoe Perches printk(KERN_CONT "\n"); 361066e61a9eSTheodore Ts'o } 361166e61a9eSTheodore Ts'o if (es->s_last_error_time) { 36126a0678a7SArnd Bergmann printk(KERN_NOTICE "EXT4-fs (%s): last error at time %llu: %.*s:%d", 36136a0678a7SArnd Bergmann sb->s_id, 36146a0678a7SArnd Bergmann ext4_get_tstamp(es, s_last_error_time), 361566e61a9eSTheodore Ts'o (int) sizeof(es->s_last_error_func), 361666e61a9eSTheodore Ts'o es->s_last_error_func, 361766e61a9eSTheodore Ts'o le32_to_cpu(es->s_last_error_line)); 361866e61a9eSTheodore Ts'o if (es->s_last_error_ino) 3619651e1c3bSJoe Perches printk(KERN_CONT ": inode %u", 362066e61a9eSTheodore Ts'o le32_to_cpu(es->s_last_error_ino)); 362166e61a9eSTheodore Ts'o if (es->s_last_error_block) 3622651e1c3bSJoe Perches printk(KERN_CONT ": block %llu", (unsigned long long) 362366e61a9eSTheodore Ts'o le64_to_cpu(es->s_last_error_block)); 3624651e1c3bSJoe Perches printk(KERN_CONT "\n"); 362566e61a9eSTheodore Ts'o } 362666e61a9eSTheodore Ts'o mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */ 362766e61a9eSTheodore Ts'o } 362866e61a9eSTheodore Ts'o 3629bfff6873SLukas Czerner /* Find next suitable group and run ext4_init_inode_table */ 3630bfff6873SLukas Czerner static int ext4_run_li_request(struct ext4_li_request *elr) 3631bfff6873SLukas Czerner { 3632bfff6873SLukas Czerner struct ext4_group_desc *gdp = NULL; 36333d392b26STheodore Ts'o struct super_block *sb = elr->lr_super; 36343d392b26STheodore Ts'o ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; 36353d392b26STheodore Ts'o ext4_group_t group = elr->lr_next_group; 36363d392b26STheodore Ts'o unsigned int prefetch_ios = 0; 3637bfff6873SLukas Czerner int ret = 0; 363839fec688SShaoying Xu u64 start_time; 3639bfff6873SLukas Czerner 36403d392b26STheodore Ts'o if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) { 36413d392b26STheodore Ts'o elr->lr_next_group = ext4_mb_prefetch(sb, group, 36423d392b26STheodore Ts'o EXT4_SB(sb)->s_mb_prefetch, &prefetch_ios); 36433d392b26STheodore Ts'o if (prefetch_ios) 36443d392b26STheodore Ts'o ext4_mb_prefetch_fini(sb, elr->lr_next_group, 36453d392b26STheodore Ts'o prefetch_ios); 36463d392b26STheodore Ts'o trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group, 36473d392b26STheodore Ts'o prefetch_ios); 36483d392b26STheodore Ts'o if (group >= elr->lr_next_group) { 36493d392b26STheodore Ts'o ret = 1; 36503d392b26STheodore Ts'o if (elr->lr_first_not_zeroed != ngroups && 36513d392b26STheodore Ts'o !sb_rdonly(sb) && test_opt(sb, INIT_INODE_TABLE)) { 36523d392b26STheodore Ts'o elr->lr_next_group = elr->lr_first_not_zeroed; 36533d392b26STheodore Ts'o elr->lr_mode = EXT4_LI_MODE_ITABLE; 36543d392b26STheodore Ts'o ret = 0; 36553d392b26STheodore Ts'o } 36563d392b26STheodore Ts'o } 36573d392b26STheodore Ts'o return ret; 36583d392b26STheodore Ts'o } 3659bfff6873SLukas Czerner 36603d392b26STheodore Ts'o for (; group < ngroups; group++) { 3661bfff6873SLukas Czerner gdp = ext4_get_group_desc(sb, group, NULL); 3662bfff6873SLukas Czerner if (!gdp) { 3663bfff6873SLukas Czerner ret = 1; 3664bfff6873SLukas Czerner break; 3665bfff6873SLukas Czerner } 3666bfff6873SLukas Czerner 3667bfff6873SLukas Czerner if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) 3668bfff6873SLukas Czerner break; 3669bfff6873SLukas Czerner } 3670bfff6873SLukas Czerner 36717f511862STheodore Ts'o if (group >= ngroups) 3672bfff6873SLukas Czerner ret = 1; 3673bfff6873SLukas Czerner 3674bfff6873SLukas Czerner if (!ret) { 367539fec688SShaoying Xu start_time = ktime_get_real_ns(); 3676bfff6873SLukas Czerner ret = ext4_init_inode_table(sb, group, 3677bfff6873SLukas Czerner elr->lr_timeout ? 0 : 1); 36783d392b26STheodore Ts'o trace_ext4_lazy_itable_init(sb, group); 3679bfff6873SLukas Czerner if (elr->lr_timeout == 0) { 368039fec688SShaoying Xu elr->lr_timeout = nsecs_to_jiffies((ktime_get_real_ns() - start_time) * 368139fec688SShaoying Xu EXT4_SB(elr->lr_super)->s_li_wait_mult); 3682bfff6873SLukas Czerner } 3683bfff6873SLukas Czerner elr->lr_next_sched = jiffies + elr->lr_timeout; 3684bfff6873SLukas Czerner elr->lr_next_group = group + 1; 3685bfff6873SLukas Czerner } 3686bfff6873SLukas Czerner return ret; 3687bfff6873SLukas Czerner } 3688bfff6873SLukas Czerner 3689bfff6873SLukas Czerner /* 3690bfff6873SLukas Czerner * Remove lr_request from the list_request and free the 36914ed5c033SLukas Czerner * request structure. Should be called with li_list_mtx held 3692bfff6873SLukas Czerner */ 3693bfff6873SLukas Czerner static void ext4_remove_li_request(struct ext4_li_request *elr) 3694bfff6873SLukas Czerner { 3695bfff6873SLukas Czerner if (!elr) 3696bfff6873SLukas Czerner return; 3697bfff6873SLukas Czerner 3698bfff6873SLukas Czerner list_del(&elr->lr_request); 36993d392b26STheodore Ts'o EXT4_SB(elr->lr_super)->s_li_request = NULL; 3700bfff6873SLukas Czerner kfree(elr); 3701bfff6873SLukas Czerner } 3702bfff6873SLukas Czerner 3703bfff6873SLukas Czerner static void ext4_unregister_li_request(struct super_block *sb) 3704bfff6873SLukas Czerner { 37051bb933fbSLukas Czerner mutex_lock(&ext4_li_mtx); 37061bb933fbSLukas Czerner if (!ext4_li_info) { 37071bb933fbSLukas Czerner mutex_unlock(&ext4_li_mtx); 3708bfff6873SLukas Czerner return; 37091bb933fbSLukas Czerner } 3710bfff6873SLukas Czerner 3711bfff6873SLukas Czerner mutex_lock(&ext4_li_info->li_list_mtx); 37121bb933fbSLukas Czerner ext4_remove_li_request(EXT4_SB(sb)->s_li_request); 3713bfff6873SLukas Czerner mutex_unlock(&ext4_li_info->li_list_mtx); 37141bb933fbSLukas Czerner mutex_unlock(&ext4_li_mtx); 3715bfff6873SLukas Czerner } 3716bfff6873SLukas Czerner 37178f1f7453SEric Sandeen static struct task_struct *ext4_lazyinit_task; 37188f1f7453SEric Sandeen 3719bfff6873SLukas Czerner /* 3720bfff6873SLukas Czerner * This is the function where ext4lazyinit thread lives. It walks 3721bfff6873SLukas Czerner * through the request list searching for next scheduled filesystem. 3722bfff6873SLukas Czerner * When such a fs is found, run the lazy initialization request 3723bfff6873SLukas Czerner * (ext4_rn_li_request) and keep track of the time spend in this 3724bfff6873SLukas Czerner * function. Based on that time we compute next schedule time of 3725bfff6873SLukas Czerner * the request. When walking through the list is complete, compute 3726bfff6873SLukas Czerner * next waking time and put itself into sleep. 3727bfff6873SLukas Czerner */ 3728bfff6873SLukas Czerner static int ext4_lazyinit_thread(void *arg) 3729bfff6873SLukas Czerner { 3730c30365b9SYu Zhe struct ext4_lazy_init *eli = arg; 3731bfff6873SLukas Czerner struct list_head *pos, *n; 3732bfff6873SLukas Czerner struct ext4_li_request *elr; 37334ed5c033SLukas Czerner unsigned long next_wakeup, cur; 3734bfff6873SLukas Czerner 3735bfff6873SLukas Czerner BUG_ON(NULL == eli); 37363b575495SLalith Rajendran set_freezable(); 3737bfff6873SLukas Czerner 3738bfff6873SLukas Czerner cont_thread: 3739bfff6873SLukas Czerner while (true) { 3740bfff6873SLukas Czerner next_wakeup = MAX_JIFFY_OFFSET; 3741bfff6873SLukas Czerner 3742bfff6873SLukas Czerner mutex_lock(&eli->li_list_mtx); 3743bfff6873SLukas Czerner if (list_empty(&eli->li_request_list)) { 3744bfff6873SLukas Czerner mutex_unlock(&eli->li_list_mtx); 3745bfff6873SLukas Czerner goto exit_thread; 3746bfff6873SLukas Czerner } 3747bfff6873SLukas Czerner list_for_each_safe(pos, n, &eli->li_request_list) { 3748e22834f0SDmitry Monakhov int err = 0; 3749e22834f0SDmitry Monakhov int progress = 0; 3750bfff6873SLukas Czerner elr = list_entry(pos, struct ext4_li_request, 3751bfff6873SLukas Czerner lr_request); 3752bfff6873SLukas Czerner 3753e22834f0SDmitry Monakhov if (time_before(jiffies, elr->lr_next_sched)) { 3754e22834f0SDmitry Monakhov if (time_before(elr->lr_next_sched, next_wakeup)) 3755e22834f0SDmitry Monakhov next_wakeup = elr->lr_next_sched; 3756e22834f0SDmitry Monakhov continue; 3757e22834f0SDmitry Monakhov } 3758e22834f0SDmitry Monakhov if (down_read_trylock(&elr->lr_super->s_umount)) { 3759e22834f0SDmitry Monakhov if (sb_start_write_trylock(elr->lr_super)) { 3760e22834f0SDmitry Monakhov progress = 1; 3761e22834f0SDmitry Monakhov /* 3762e22834f0SDmitry Monakhov * We hold sb->s_umount, sb can not 3763e22834f0SDmitry Monakhov * be removed from the list, it is 3764e22834f0SDmitry Monakhov * now safe to drop li_list_mtx 3765e22834f0SDmitry Monakhov */ 3766e22834f0SDmitry Monakhov mutex_unlock(&eli->li_list_mtx); 3767e22834f0SDmitry Monakhov err = ext4_run_li_request(elr); 3768e22834f0SDmitry Monakhov sb_end_write(elr->lr_super); 3769e22834f0SDmitry Monakhov mutex_lock(&eli->li_list_mtx); 3770e22834f0SDmitry Monakhov n = pos->next; 3771e22834f0SDmitry Monakhov } 3772e22834f0SDmitry Monakhov up_read((&elr->lr_super->s_umount)); 3773e22834f0SDmitry Monakhov } 3774b2c78cd0STheodore Ts'o /* error, remove the lazy_init job */ 3775e22834f0SDmitry Monakhov if (err) { 3776bfff6873SLukas Czerner ext4_remove_li_request(elr); 3777bfff6873SLukas Czerner continue; 3778bfff6873SLukas Czerner } 3779e22834f0SDmitry Monakhov if (!progress) { 3780e22834f0SDmitry Monakhov elr->lr_next_sched = jiffies + 378181895a65SJason A. Donenfeld prandom_u32_max(EXT4_DEF_LI_MAX_START_DELAY * HZ); 3782b2c78cd0STheodore Ts'o } 3783bfff6873SLukas Czerner if (time_before(elr->lr_next_sched, next_wakeup)) 3784bfff6873SLukas Czerner next_wakeup = elr->lr_next_sched; 3785bfff6873SLukas Czerner } 3786bfff6873SLukas Czerner mutex_unlock(&eli->li_list_mtx); 3787bfff6873SLukas Czerner 3788a0acae0eSTejun Heo try_to_freeze(); 3789bfff6873SLukas Czerner 37904ed5c033SLukas Czerner cur = jiffies; 37914ed5c033SLukas Czerner if ((time_after_eq(cur, next_wakeup)) || 3792f4245bd4SLukas Czerner (MAX_JIFFY_OFFSET == next_wakeup)) { 3793bfff6873SLukas Czerner cond_resched(); 3794bfff6873SLukas Czerner continue; 3795bfff6873SLukas Czerner } 3796bfff6873SLukas Czerner 37974ed5c033SLukas Czerner schedule_timeout_interruptible(next_wakeup - cur); 37984ed5c033SLukas Czerner 37998f1f7453SEric Sandeen if (kthread_should_stop()) { 38008f1f7453SEric Sandeen ext4_clear_request_list(); 38018f1f7453SEric Sandeen goto exit_thread; 38028f1f7453SEric Sandeen } 3803bfff6873SLukas Czerner } 3804bfff6873SLukas Czerner 3805bfff6873SLukas Czerner exit_thread: 3806bfff6873SLukas Czerner /* 3807bfff6873SLukas Czerner * It looks like the request list is empty, but we need 3808bfff6873SLukas Czerner * to check it under the li_list_mtx lock, to prevent any 3809bfff6873SLukas Czerner * additions into it, and of course we should lock ext4_li_mtx 3810bfff6873SLukas Czerner * to atomically free the list and ext4_li_info, because at 3811bfff6873SLukas Czerner * this point another ext4 filesystem could be registering 3812bfff6873SLukas Czerner * new one. 3813bfff6873SLukas Czerner */ 3814bfff6873SLukas Czerner mutex_lock(&ext4_li_mtx); 3815bfff6873SLukas Czerner mutex_lock(&eli->li_list_mtx); 3816bfff6873SLukas Czerner if (!list_empty(&eli->li_request_list)) { 3817bfff6873SLukas Czerner mutex_unlock(&eli->li_list_mtx); 3818bfff6873SLukas Czerner mutex_unlock(&ext4_li_mtx); 3819bfff6873SLukas Czerner goto cont_thread; 3820bfff6873SLukas Czerner } 3821bfff6873SLukas Czerner mutex_unlock(&eli->li_list_mtx); 3822bfff6873SLukas Czerner kfree(ext4_li_info); 3823bfff6873SLukas Czerner ext4_li_info = NULL; 3824bfff6873SLukas Czerner mutex_unlock(&ext4_li_mtx); 3825bfff6873SLukas Czerner 3826bfff6873SLukas Czerner return 0; 3827bfff6873SLukas Czerner } 3828bfff6873SLukas Czerner 3829bfff6873SLukas Czerner static void ext4_clear_request_list(void) 3830bfff6873SLukas Czerner { 3831bfff6873SLukas Czerner struct list_head *pos, *n; 3832bfff6873SLukas Czerner struct ext4_li_request *elr; 3833bfff6873SLukas Czerner 3834bfff6873SLukas Czerner mutex_lock(&ext4_li_info->li_list_mtx); 3835bfff6873SLukas Czerner list_for_each_safe(pos, n, &ext4_li_info->li_request_list) { 3836bfff6873SLukas Czerner elr = list_entry(pos, struct ext4_li_request, 3837bfff6873SLukas Czerner lr_request); 3838bfff6873SLukas Czerner ext4_remove_li_request(elr); 3839bfff6873SLukas Czerner } 3840bfff6873SLukas Czerner mutex_unlock(&ext4_li_info->li_list_mtx); 3841bfff6873SLukas Czerner } 3842bfff6873SLukas Czerner 3843bfff6873SLukas Czerner static int ext4_run_lazyinit_thread(void) 3844bfff6873SLukas Czerner { 38458f1f7453SEric Sandeen ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread, 38468f1f7453SEric Sandeen ext4_li_info, "ext4lazyinit"); 38478f1f7453SEric Sandeen if (IS_ERR(ext4_lazyinit_task)) { 38488f1f7453SEric Sandeen int err = PTR_ERR(ext4_lazyinit_task); 3849bfff6873SLukas Czerner ext4_clear_request_list(); 3850bfff6873SLukas Czerner kfree(ext4_li_info); 3851bfff6873SLukas Czerner ext4_li_info = NULL; 385292b97816STheodore Ts'o printk(KERN_CRIT "EXT4-fs: error %d creating inode table " 3853bfff6873SLukas Czerner "initialization thread\n", 3854bfff6873SLukas Czerner err); 3855bfff6873SLukas Czerner return err; 3856bfff6873SLukas Czerner } 3857bfff6873SLukas Czerner ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING; 3858bfff6873SLukas Czerner return 0; 3859bfff6873SLukas Czerner } 3860bfff6873SLukas Czerner 3861bfff6873SLukas Czerner /* 3862bfff6873SLukas Czerner * Check whether it make sense to run itable init. thread or not. 3863bfff6873SLukas Czerner * If there is at least one uninitialized inode table, return 3864bfff6873SLukas Czerner * corresponding group number, else the loop goes through all 3865bfff6873SLukas Czerner * groups and return total number of groups. 3866bfff6873SLukas Czerner */ 3867bfff6873SLukas Czerner static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) 3868bfff6873SLukas Czerner { 3869bfff6873SLukas Czerner ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count; 3870bfff6873SLukas Czerner struct ext4_group_desc *gdp = NULL; 3871bfff6873SLukas Czerner 38728844618dSTheodore Ts'o if (!ext4_has_group_desc_csum(sb)) 38738844618dSTheodore Ts'o return ngroups; 38748844618dSTheodore Ts'o 3875bfff6873SLukas Czerner for (group = 0; group < ngroups; group++) { 3876bfff6873SLukas Czerner gdp = ext4_get_group_desc(sb, group, NULL); 3877bfff6873SLukas Czerner if (!gdp) 3878bfff6873SLukas Czerner continue; 3879bfff6873SLukas Czerner 388050122847STheodore Ts'o if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) 3881bfff6873SLukas Czerner break; 3882bfff6873SLukas Czerner } 3883bfff6873SLukas Czerner 3884bfff6873SLukas Czerner return group; 3885bfff6873SLukas Czerner } 3886bfff6873SLukas Czerner 3887bfff6873SLukas Czerner static int ext4_li_info_new(void) 3888bfff6873SLukas Czerner { 3889bfff6873SLukas Czerner struct ext4_lazy_init *eli = NULL; 3890bfff6873SLukas Czerner 3891bfff6873SLukas Czerner eli = kzalloc(sizeof(*eli), GFP_KERNEL); 3892bfff6873SLukas Czerner if (!eli) 3893bfff6873SLukas Czerner return -ENOMEM; 3894bfff6873SLukas Czerner 3895bfff6873SLukas Czerner INIT_LIST_HEAD(&eli->li_request_list); 3896bfff6873SLukas Czerner mutex_init(&eli->li_list_mtx); 3897bfff6873SLukas Czerner 3898bfff6873SLukas Czerner eli->li_state |= EXT4_LAZYINIT_QUIT; 3899bfff6873SLukas Czerner 3900bfff6873SLukas Czerner ext4_li_info = eli; 3901bfff6873SLukas Czerner 3902bfff6873SLukas Czerner return 0; 3903bfff6873SLukas Czerner } 3904bfff6873SLukas Czerner 3905bfff6873SLukas Czerner static struct ext4_li_request *ext4_li_request_new(struct super_block *sb, 3906bfff6873SLukas Czerner ext4_group_t start) 3907bfff6873SLukas Czerner { 3908bfff6873SLukas Czerner struct ext4_li_request *elr; 3909bfff6873SLukas Czerner 3910bfff6873SLukas Czerner elr = kzalloc(sizeof(*elr), GFP_KERNEL); 3911bfff6873SLukas Czerner if (!elr) 3912bfff6873SLukas Czerner return NULL; 3913bfff6873SLukas Czerner 3914bfff6873SLukas Czerner elr->lr_super = sb; 39153d392b26STheodore Ts'o elr->lr_first_not_zeroed = start; 391621175ca4SHarshad Shirwadkar if (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS)) { 39173d392b26STheodore Ts'o elr->lr_mode = EXT4_LI_MODE_ITABLE; 3918bfff6873SLukas Czerner elr->lr_next_group = start; 391921175ca4SHarshad Shirwadkar } else { 392021175ca4SHarshad Shirwadkar elr->lr_mode = EXT4_LI_MODE_PREFETCH_BBITMAP; 39213d392b26STheodore Ts'o } 3922bfff6873SLukas Czerner 3923bfff6873SLukas Czerner /* 3924bfff6873SLukas Czerner * Randomize first schedule time of the request to 3925bfff6873SLukas Czerner * spread the inode table initialization requests 3926bfff6873SLukas Czerner * better. 3927bfff6873SLukas Czerner */ 392881895a65SJason A. Donenfeld elr->lr_next_sched = jiffies + prandom_u32_max( 392981895a65SJason A. Donenfeld EXT4_DEF_LI_MAX_START_DELAY * HZ); 3930bfff6873SLukas Czerner return elr; 3931bfff6873SLukas Czerner } 3932bfff6873SLukas Czerner 39337f511862STheodore Ts'o int ext4_register_li_request(struct super_block *sb, 3934bfff6873SLukas Czerner ext4_group_t first_not_zeroed) 3935bfff6873SLukas Czerner { 3936bfff6873SLukas Czerner struct ext4_sb_info *sbi = EXT4_SB(sb); 39377f511862STheodore Ts'o struct ext4_li_request *elr = NULL; 393849598e04SJun Piao ext4_group_t ngroups = sbi->s_groups_count; 39396c5a6cb9SAndrew Morton int ret = 0; 3940bfff6873SLukas Czerner 39417f511862STheodore Ts'o mutex_lock(&ext4_li_mtx); 394251ce6511SLukas Czerner if (sbi->s_li_request != NULL) { 394351ce6511SLukas Czerner /* 394451ce6511SLukas Czerner * Reset timeout so it can be computed again, because 394551ce6511SLukas Czerner * s_li_wait_mult might have changed. 394651ce6511SLukas Czerner */ 394751ce6511SLukas Czerner sbi->s_li_request->lr_timeout = 0; 39487f511862STheodore Ts'o goto out; 394951ce6511SLukas Czerner } 3950bfff6873SLukas Czerner 3951426d15adSJosh Triplett if (sb_rdonly(sb) || 3952426d15adSJosh Triplett (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS) && 3953426d15adSJosh Triplett (first_not_zeroed == ngroups || !test_opt(sb, INIT_INODE_TABLE)))) 39547f511862STheodore Ts'o goto out; 3955bfff6873SLukas Czerner 3956bfff6873SLukas Czerner elr = ext4_li_request_new(sb, first_not_zeroed); 39577f511862STheodore Ts'o if (!elr) { 39587f511862STheodore Ts'o ret = -ENOMEM; 39597f511862STheodore Ts'o goto out; 39607f511862STheodore Ts'o } 3961bfff6873SLukas Czerner 3962bfff6873SLukas Czerner if (NULL == ext4_li_info) { 3963bfff6873SLukas Czerner ret = ext4_li_info_new(); 3964bfff6873SLukas Czerner if (ret) 3965bfff6873SLukas Czerner goto out; 3966bfff6873SLukas Czerner } 3967bfff6873SLukas Czerner 3968bfff6873SLukas Czerner mutex_lock(&ext4_li_info->li_list_mtx); 3969bfff6873SLukas Czerner list_add(&elr->lr_request, &ext4_li_info->li_request_list); 3970bfff6873SLukas Czerner mutex_unlock(&ext4_li_info->li_list_mtx); 3971bfff6873SLukas Czerner 3972bfff6873SLukas Czerner sbi->s_li_request = elr; 397346e4690bSTao Ma /* 397446e4690bSTao Ma * set elr to NULL here since it has been inserted to 397546e4690bSTao Ma * the request_list and the removal and free of it is 397646e4690bSTao Ma * handled by ext4_clear_request_list from now on. 397746e4690bSTao Ma */ 397846e4690bSTao Ma elr = NULL; 3979bfff6873SLukas Czerner 3980bfff6873SLukas Czerner if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) { 3981bfff6873SLukas Czerner ret = ext4_run_lazyinit_thread(); 3982bfff6873SLukas Czerner if (ret) 3983bfff6873SLukas Czerner goto out; 3984bfff6873SLukas Czerner } 3985bfff6873SLukas Czerner out: 3986bfff6873SLukas Czerner mutex_unlock(&ext4_li_mtx); 3987beed5ecbSNicolas Kaiser if (ret) 3988bfff6873SLukas Czerner kfree(elr); 3989bfff6873SLukas Czerner return ret; 3990bfff6873SLukas Czerner } 3991bfff6873SLukas Czerner 3992bfff6873SLukas Czerner /* 3993bfff6873SLukas Czerner * We do not need to lock anything since this is called on 3994bfff6873SLukas Czerner * module unload. 3995bfff6873SLukas Czerner */ 3996bfff6873SLukas Czerner static void ext4_destroy_lazyinit_thread(void) 3997bfff6873SLukas Czerner { 3998bfff6873SLukas Czerner /* 3999bfff6873SLukas Czerner * If thread exited earlier 4000bfff6873SLukas Czerner * there's nothing to be done. 4001bfff6873SLukas Czerner */ 40028f1f7453SEric Sandeen if (!ext4_li_info || !ext4_lazyinit_task) 4003bfff6873SLukas Czerner return; 4004bfff6873SLukas Czerner 40058f1f7453SEric Sandeen kthread_stop(ext4_lazyinit_task); 4006bfff6873SLukas Czerner } 4007bfff6873SLukas Czerner 400825ed6e8aSDarrick J. Wong static int set_journal_csum_feature_set(struct super_block *sb) 400925ed6e8aSDarrick J. Wong { 401025ed6e8aSDarrick J. Wong int ret = 1; 401125ed6e8aSDarrick J. Wong int compat, incompat; 401225ed6e8aSDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(sb); 401325ed6e8aSDarrick J. Wong 40149aa5d32bSDmitry Monakhov if (ext4_has_metadata_csum(sb)) { 4015db9ee220SDarrick J. Wong /* journal checksum v3 */ 401625ed6e8aSDarrick J. Wong compat = 0; 4017db9ee220SDarrick J. Wong incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3; 401825ed6e8aSDarrick J. Wong } else { 401925ed6e8aSDarrick J. Wong /* journal checksum v1 */ 402025ed6e8aSDarrick J. Wong compat = JBD2_FEATURE_COMPAT_CHECKSUM; 402125ed6e8aSDarrick J. Wong incompat = 0; 402225ed6e8aSDarrick J. Wong } 402325ed6e8aSDarrick J. Wong 4024feb8c6d3SDarrick J. Wong jbd2_journal_clear_features(sbi->s_journal, 4025feb8c6d3SDarrick J. Wong JBD2_FEATURE_COMPAT_CHECKSUM, 0, 4026feb8c6d3SDarrick J. Wong JBD2_FEATURE_INCOMPAT_CSUM_V3 | 4027feb8c6d3SDarrick J. Wong JBD2_FEATURE_INCOMPAT_CSUM_V2); 402825ed6e8aSDarrick J. Wong if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 402925ed6e8aSDarrick J. Wong ret = jbd2_journal_set_features(sbi->s_journal, 403025ed6e8aSDarrick J. Wong compat, 0, 403125ed6e8aSDarrick J. Wong JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | 403225ed6e8aSDarrick J. Wong incompat); 403325ed6e8aSDarrick J. Wong } else if (test_opt(sb, JOURNAL_CHECKSUM)) { 403425ed6e8aSDarrick J. Wong ret = jbd2_journal_set_features(sbi->s_journal, 403525ed6e8aSDarrick J. Wong compat, 0, 403625ed6e8aSDarrick J. Wong incompat); 403725ed6e8aSDarrick J. Wong jbd2_journal_clear_features(sbi->s_journal, 0, 0, 403825ed6e8aSDarrick J. Wong JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); 403925ed6e8aSDarrick J. Wong } else { 4040feb8c6d3SDarrick J. Wong jbd2_journal_clear_features(sbi->s_journal, 0, 0, 4041feb8c6d3SDarrick J. Wong JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); 404225ed6e8aSDarrick J. Wong } 404325ed6e8aSDarrick J. Wong 404425ed6e8aSDarrick J. Wong return ret; 404525ed6e8aSDarrick J. Wong } 404625ed6e8aSDarrick J. Wong 4047952fc18eSTheodore Ts'o /* 4048952fc18eSTheodore Ts'o * Note: calculating the overhead so we can be compatible with 4049952fc18eSTheodore Ts'o * historical BSD practice is quite difficult in the face of 4050952fc18eSTheodore Ts'o * clusters/bigalloc. This is because multiple metadata blocks from 4051952fc18eSTheodore Ts'o * different block group can end up in the same allocation cluster. 4052952fc18eSTheodore Ts'o * Calculating the exact overhead in the face of clustered allocation 4053952fc18eSTheodore Ts'o * requires either O(all block bitmaps) in memory or O(number of block 4054952fc18eSTheodore Ts'o * groups**2) in time. We will still calculate the superblock for 4055952fc18eSTheodore Ts'o * older file systems --- and if we come across with a bigalloc file 4056952fc18eSTheodore Ts'o * system with zero in s_overhead_clusters the estimate will be close to 4057952fc18eSTheodore Ts'o * correct especially for very large cluster sizes --- but for newer 4058952fc18eSTheodore Ts'o * file systems, it's better to calculate this figure once at mkfs 4059952fc18eSTheodore Ts'o * time, and store it in the superblock. If the superblock value is 4060952fc18eSTheodore Ts'o * present (even for non-bigalloc file systems), we will use it. 4061952fc18eSTheodore Ts'o */ 4062952fc18eSTheodore Ts'o static int count_overhead(struct super_block *sb, ext4_group_t grp, 4063952fc18eSTheodore Ts'o char *buf) 4064952fc18eSTheodore Ts'o { 4065952fc18eSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 4066952fc18eSTheodore Ts'o struct ext4_group_desc *gdp; 4067952fc18eSTheodore Ts'o ext4_fsblk_t first_block, last_block, b; 4068952fc18eSTheodore Ts'o ext4_group_t i, ngroups = ext4_get_groups_count(sb); 4069952fc18eSTheodore Ts'o int s, j, count = 0; 407010b01ee9STheodore Ts'o int has_super = ext4_bg_has_super(sb, grp); 4071952fc18eSTheodore Ts'o 4072e2b911c5SDarrick J. Wong if (!ext4_has_feature_bigalloc(sb)) 407310b01ee9STheodore Ts'o return (has_super + ext4_bg_num_gdb(sb, grp) + 407410b01ee9STheodore Ts'o (has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) + 40750548bbb8STheodore Ts'o sbi->s_itb_per_group + 2); 40760548bbb8STheodore Ts'o 4077952fc18eSTheodore Ts'o first_block = le32_to_cpu(sbi->s_es->s_first_data_block) + 4078952fc18eSTheodore Ts'o (grp * EXT4_BLOCKS_PER_GROUP(sb)); 4079952fc18eSTheodore Ts'o last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1; 4080952fc18eSTheodore Ts'o for (i = 0; i < ngroups; i++) { 4081952fc18eSTheodore Ts'o gdp = ext4_get_group_desc(sb, i, NULL); 4082952fc18eSTheodore Ts'o b = ext4_block_bitmap(sb, gdp); 4083952fc18eSTheodore Ts'o if (b >= first_block && b <= last_block) { 4084952fc18eSTheodore Ts'o ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf); 4085952fc18eSTheodore Ts'o count++; 4086952fc18eSTheodore Ts'o } 4087952fc18eSTheodore Ts'o b = ext4_inode_bitmap(sb, gdp); 4088952fc18eSTheodore Ts'o if (b >= first_block && b <= last_block) { 4089952fc18eSTheodore Ts'o ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf); 4090952fc18eSTheodore Ts'o count++; 4091952fc18eSTheodore Ts'o } 4092952fc18eSTheodore Ts'o b = ext4_inode_table(sb, gdp); 4093952fc18eSTheodore Ts'o if (b >= first_block && b + sbi->s_itb_per_group <= last_block) 4094952fc18eSTheodore Ts'o for (j = 0; j < sbi->s_itb_per_group; j++, b++) { 4095952fc18eSTheodore Ts'o int c = EXT4_B2C(sbi, b - first_block); 4096952fc18eSTheodore Ts'o ext4_set_bit(c, buf); 4097952fc18eSTheodore Ts'o count++; 4098952fc18eSTheodore Ts'o } 4099952fc18eSTheodore Ts'o if (i != grp) 4100952fc18eSTheodore Ts'o continue; 4101952fc18eSTheodore Ts'o s = 0; 4102952fc18eSTheodore Ts'o if (ext4_bg_has_super(sb, grp)) { 4103952fc18eSTheodore Ts'o ext4_set_bit(s++, buf); 4104952fc18eSTheodore Ts'o count++; 4105952fc18eSTheodore Ts'o } 4106c48ae41bSTheodore Ts'o j = ext4_bg_num_gdb(sb, grp); 4107c48ae41bSTheodore Ts'o if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) { 4108c48ae41bSTheodore Ts'o ext4_error(sb, "Invalid number of block group " 4109c48ae41bSTheodore Ts'o "descriptor blocks: %d", j); 4110c48ae41bSTheodore Ts'o j = EXT4_BLOCKS_PER_GROUP(sb) - s; 4111952fc18eSTheodore Ts'o } 4112c48ae41bSTheodore Ts'o count += j; 4113c48ae41bSTheodore Ts'o for (; j > 0; j--) 4114c48ae41bSTheodore Ts'o ext4_set_bit(EXT4_B2C(sbi, s++), buf); 4115952fc18eSTheodore Ts'o } 4116952fc18eSTheodore Ts'o if (!count) 4117952fc18eSTheodore Ts'o return 0; 4118952fc18eSTheodore Ts'o return EXT4_CLUSTERS_PER_GROUP(sb) - 4119952fc18eSTheodore Ts'o ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8); 4120952fc18eSTheodore Ts'o } 4121952fc18eSTheodore Ts'o 4122952fc18eSTheodore Ts'o /* 4123952fc18eSTheodore Ts'o * Compute the overhead and stash it in sbi->s_overhead 4124952fc18eSTheodore Ts'o */ 4125952fc18eSTheodore Ts'o int ext4_calculate_overhead(struct super_block *sb) 4126952fc18eSTheodore Ts'o { 4127952fc18eSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 4128952fc18eSTheodore Ts'o struct ext4_super_block *es = sbi->s_es; 41293c816dedSEric Whitney struct inode *j_inode; 41303c816dedSEric Whitney unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum); 4131952fc18eSTheodore Ts'o ext4_group_t i, ngroups = ext4_get_groups_count(sb); 4132952fc18eSTheodore Ts'o ext4_fsblk_t overhead = 0; 41334fdb5543SDmitry Monakhov char *buf = (char *) get_zeroed_page(GFP_NOFS); 4134952fc18eSTheodore Ts'o 4135952fc18eSTheodore Ts'o if (!buf) 4136952fc18eSTheodore Ts'o return -ENOMEM; 4137952fc18eSTheodore Ts'o 4138952fc18eSTheodore Ts'o /* 4139952fc18eSTheodore Ts'o * Compute the overhead (FS structures). This is constant 4140952fc18eSTheodore Ts'o * for a given filesystem unless the number of block groups 4141952fc18eSTheodore Ts'o * changes so we cache the previous value until it does. 4142952fc18eSTheodore Ts'o */ 4143952fc18eSTheodore Ts'o 4144952fc18eSTheodore Ts'o /* 4145952fc18eSTheodore Ts'o * All of the blocks before first_data_block are overhead 4146952fc18eSTheodore Ts'o */ 4147952fc18eSTheodore Ts'o overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block)); 4148952fc18eSTheodore Ts'o 4149952fc18eSTheodore Ts'o /* 4150952fc18eSTheodore Ts'o * Add the overhead found in each block group 4151952fc18eSTheodore Ts'o */ 4152952fc18eSTheodore Ts'o for (i = 0; i < ngroups; i++) { 4153952fc18eSTheodore Ts'o int blks; 4154952fc18eSTheodore Ts'o 4155952fc18eSTheodore Ts'o blks = count_overhead(sb, i, buf); 4156952fc18eSTheodore Ts'o overhead += blks; 4157952fc18eSTheodore Ts'o if (blks) 4158952fc18eSTheodore Ts'o memset(buf, 0, PAGE_SIZE); 4159952fc18eSTheodore Ts'o cond_resched(); 4160952fc18eSTheodore Ts'o } 41613c816dedSEric Whitney 41623c816dedSEric Whitney /* 41633c816dedSEric Whitney * Add the internal journal blocks whether the journal has been 41643c816dedSEric Whitney * loaded or not 41653c816dedSEric Whitney */ 4166ee7ed3aaSChunguang Xu if (sbi->s_journal && !sbi->s_journal_bdev) 4167ede7dc7fSHarshad Shirwadkar overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_total_len); 4168f1eec3b0SRitesh Harjani else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) { 4169f1eec3b0SRitesh Harjani /* j_inum for internal journal is non-zero */ 41703c816dedSEric Whitney j_inode = ext4_get_journal_inode(sb, j_inum); 41713c816dedSEric Whitney if (j_inode) { 41723c816dedSEric Whitney j_blocks = j_inode->i_size >> sb->s_blocksize_bits; 41733c816dedSEric Whitney overhead += EXT4_NUM_B2C(sbi, j_blocks); 41743c816dedSEric Whitney iput(j_inode); 41753c816dedSEric Whitney } else { 41763c816dedSEric Whitney ext4_msg(sb, KERN_ERR, "can't get journal size"); 41773c816dedSEric Whitney } 41783c816dedSEric Whitney } 4179952fc18eSTheodore Ts'o sbi->s_overhead = overhead; 4180952fc18eSTheodore Ts'o smp_wmb(); 4181952fc18eSTheodore Ts'o free_page((unsigned long) buf); 4182952fc18eSTheodore Ts'o return 0; 4183952fc18eSTheodore Ts'o } 4184952fc18eSTheodore Ts'o 4185b5799018STheodore Ts'o static void ext4_set_resv_clusters(struct super_block *sb) 418627dd4385SLukas Czerner { 418727dd4385SLukas Czerner ext4_fsblk_t resv_clusters; 4188b5799018STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 418927dd4385SLukas Czerner 419027dd4385SLukas Czerner /* 419130fac0f7SJan Kara * There's no need to reserve anything when we aren't using extents. 419230fac0f7SJan Kara * The space estimates are exact, there are no unwritten extents, 419330fac0f7SJan Kara * hole punching doesn't need new metadata... This is needed especially 419430fac0f7SJan Kara * to keep ext2/3 backward compatibility. 419530fac0f7SJan Kara */ 4196e2b911c5SDarrick J. Wong if (!ext4_has_feature_extents(sb)) 4197b5799018STheodore Ts'o return; 419830fac0f7SJan Kara /* 419927dd4385SLukas Czerner * By default we reserve 2% or 4096 clusters, whichever is smaller. 420027dd4385SLukas Czerner * This should cover the situations where we can not afford to run 420127dd4385SLukas Czerner * out of space like for example punch hole, or converting 4202556615dcSLukas Czerner * unwritten extents in delalloc path. In most cases such 420327dd4385SLukas Czerner * allocation would require 1, or 2 blocks, higher numbers are 420427dd4385SLukas Czerner * very rare. 420527dd4385SLukas Czerner */ 4206b5799018STheodore Ts'o resv_clusters = (ext4_blocks_count(sbi->s_es) >> 4207b5799018STheodore Ts'o sbi->s_cluster_bits); 420827dd4385SLukas Czerner 420927dd4385SLukas Czerner do_div(resv_clusters, 50); 421027dd4385SLukas Czerner resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096); 421127dd4385SLukas Czerner 4212b5799018STheodore Ts'o atomic64_set(&sbi->s_resv_clusters, resv_clusters); 421327dd4385SLukas Czerner } 421427dd4385SLukas Czerner 4215ca9b404fSRoman Anufriev static const char *ext4_quota_mode(struct super_block *sb) 4216ca9b404fSRoman Anufriev { 4217ca9b404fSRoman Anufriev #ifdef CONFIG_QUOTA 4218ca9b404fSRoman Anufriev if (!ext4_quota_capable(sb)) 4219ca9b404fSRoman Anufriev return "none"; 4220ca9b404fSRoman Anufriev 4221ca9b404fSRoman Anufriev if (EXT4_SB(sb)->s_journal && ext4_is_quota_journalled(sb)) 4222ca9b404fSRoman Anufriev return "journalled"; 4223ca9b404fSRoman Anufriev else 4224ca9b404fSRoman Anufriev return "writeback"; 4225ca9b404fSRoman Anufriev #else 4226ca9b404fSRoman Anufriev return "disabled"; 4227ca9b404fSRoman Anufriev #endif 4228ca9b404fSRoman Anufriev } 4229ca9b404fSRoman Anufriev 4230188c299eSJan Kara static void ext4_setup_csum_trigger(struct super_block *sb, 4231188c299eSJan Kara enum ext4_journal_trigger_type type, 4232188c299eSJan Kara void (*trigger)( 4233188c299eSJan Kara struct jbd2_buffer_trigger_type *type, 4234188c299eSJan Kara struct buffer_head *bh, 4235188c299eSJan Kara void *mapped_data, 4236188c299eSJan Kara size_t size)) 4237188c299eSJan Kara { 4238188c299eSJan Kara struct ext4_sb_info *sbi = EXT4_SB(sb); 4239188c299eSJan Kara 4240188c299eSJan Kara sbi->s_journal_triggers[type].sb = sb; 4241188c299eSJan Kara sbi->s_journal_triggers[type].tr_triggers.t_frozen = trigger; 4242188c299eSJan Kara } 4243188c299eSJan Kara 42447edfd85bSLukas Czerner static void ext4_free_sbi(struct ext4_sb_info *sbi) 4245ac27a0ecSDave Kleikamp { 42467edfd85bSLukas Czerner if (!sbi) 42477edfd85bSLukas Czerner return; 42487edfd85bSLukas Czerner 42497edfd85bSLukas Czerner kfree(sbi->s_blockgroup_lock); 42508012b866SShiyang Ruan fs_put_dax(sbi->s_daxdev, NULL); 42517edfd85bSLukas Czerner kfree(sbi); 42527edfd85bSLukas Czerner } 42537edfd85bSLukas Czerner 42547edfd85bSLukas Czerner static struct ext4_sb_info *ext4_alloc_sbi(struct super_block *sb) 42557edfd85bSLukas Czerner { 42567edfd85bSLukas Czerner struct ext4_sb_info *sbi; 42577edfd85bSLukas Czerner 42587edfd85bSLukas Czerner sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 42597edfd85bSLukas Czerner if (!sbi) 42607edfd85bSLukas Czerner return NULL; 42617edfd85bSLukas Czerner 42628012b866SShiyang Ruan sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off, 42638012b866SShiyang Ruan NULL, NULL); 42647edfd85bSLukas Czerner 42657edfd85bSLukas Czerner sbi->s_blockgroup_lock = 42667edfd85bSLukas Czerner kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); 42677edfd85bSLukas Czerner 42687edfd85bSLukas Czerner if (!sbi->s_blockgroup_lock) 42697edfd85bSLukas Czerner goto err_out; 42707edfd85bSLukas Czerner 42717edfd85bSLukas Czerner sb->s_fs_info = sbi; 42727edfd85bSLukas Czerner sbi->s_sb = sb; 42737edfd85bSLukas Czerner return sbi; 42747edfd85bSLukas Czerner err_out: 42758012b866SShiyang Ruan fs_put_dax(sbi->s_daxdev, NULL); 42767edfd85bSLukas Czerner kfree(sbi); 42777edfd85bSLukas Czerner return NULL; 42787edfd85bSLukas Czerner } 42797edfd85bSLukas Czerner 42805f6d662dSJason Yan static void ext4_set_def_opts(struct super_block *sb, 42815f6d662dSJason Yan struct ext4_super_block *es) 42825f6d662dSJason Yan { 42835f6d662dSJason Yan unsigned long def_mount_opts; 42845f6d662dSJason Yan 42855f6d662dSJason Yan /* Set defaults before we parse the mount options */ 42865f6d662dSJason Yan def_mount_opts = le32_to_cpu(es->s_default_mount_opts); 42875f6d662dSJason Yan set_opt(sb, INIT_INODE_TABLE); 42885f6d662dSJason Yan if (def_mount_opts & EXT4_DEFM_DEBUG) 42895f6d662dSJason Yan set_opt(sb, DEBUG); 42905f6d662dSJason Yan if (def_mount_opts & EXT4_DEFM_BSDGROUPS) 42915f6d662dSJason Yan set_opt(sb, GRPID); 42925f6d662dSJason Yan if (def_mount_opts & EXT4_DEFM_UID16) 42935f6d662dSJason Yan set_opt(sb, NO_UID32); 42945f6d662dSJason Yan /* xattr user namespace & acls are now defaulted on */ 42955f6d662dSJason Yan set_opt(sb, XATTR_USER); 42965f6d662dSJason Yan #ifdef CONFIG_EXT4_FS_POSIX_ACL 42975f6d662dSJason Yan set_opt(sb, POSIX_ACL); 42985f6d662dSJason Yan #endif 42995f6d662dSJason Yan if (ext4_has_feature_fast_commit(sb)) 43005f6d662dSJason Yan set_opt2(sb, JOURNAL_FAST_COMMIT); 43015f6d662dSJason Yan /* don't forget to enable journal_csum when metadata_csum is enabled. */ 43025f6d662dSJason Yan if (ext4_has_metadata_csum(sb)) 43035f6d662dSJason Yan set_opt(sb, JOURNAL_CHECKSUM); 43045f6d662dSJason Yan 43055f6d662dSJason Yan if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) 43065f6d662dSJason Yan set_opt(sb, JOURNAL_DATA); 43075f6d662dSJason Yan else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) 43085f6d662dSJason Yan set_opt(sb, ORDERED_DATA); 43095f6d662dSJason Yan else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK) 43105f6d662dSJason Yan set_opt(sb, WRITEBACK_DATA); 43115f6d662dSJason Yan 43125f6d662dSJason Yan if (le16_to_cpu(es->s_errors) == EXT4_ERRORS_PANIC) 43135f6d662dSJason Yan set_opt(sb, ERRORS_PANIC); 43145f6d662dSJason Yan else if (le16_to_cpu(es->s_errors) == EXT4_ERRORS_CONTINUE) 43155f6d662dSJason Yan set_opt(sb, ERRORS_CONT); 43165f6d662dSJason Yan else 43175f6d662dSJason Yan set_opt(sb, ERRORS_RO); 43185f6d662dSJason Yan /* block_validity enabled by default; disable with noblock_validity */ 43195f6d662dSJason Yan set_opt(sb, BLOCK_VALIDITY); 43205f6d662dSJason Yan if (def_mount_opts & EXT4_DEFM_DISCARD) 43215f6d662dSJason Yan set_opt(sb, DISCARD); 43225f6d662dSJason Yan 43235f6d662dSJason Yan if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0) 43245f6d662dSJason Yan set_opt(sb, BARRIER); 43255f6d662dSJason Yan 43265f6d662dSJason Yan /* 43275f6d662dSJason Yan * enable delayed allocation by default 43285f6d662dSJason Yan * Use -o nodelalloc to turn it off 43295f6d662dSJason Yan */ 43305f6d662dSJason Yan if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) && 43315f6d662dSJason Yan ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0)) 43325f6d662dSJason Yan set_opt(sb, DELALLOC); 43333df11e27SJason Yan 43343df11e27SJason Yan if (sb->s_blocksize == PAGE_SIZE) 43353df11e27SJason Yan set_opt(sb, DIOREAD_NOLOCK); 43365f6d662dSJason Yan } 43375f6d662dSJason Yan 4338c8267c51SJason Yan static int ext4_handle_clustersize(struct super_block *sb) 43394a8557b0SJason Yan { 43404a8557b0SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 43414a8557b0SJason Yan struct ext4_super_block *es = sbi->s_es; 43424a8557b0SJason Yan int clustersize; 43434a8557b0SJason Yan 43444a8557b0SJason Yan /* Handle clustersize */ 43454a8557b0SJason Yan clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size); 43464a8557b0SJason Yan if (ext4_has_feature_bigalloc(sb)) { 4347c8267c51SJason Yan if (clustersize < sb->s_blocksize) { 43484a8557b0SJason Yan ext4_msg(sb, KERN_ERR, 43494a8557b0SJason Yan "cluster size (%d) smaller than " 4350c8267c51SJason Yan "block size (%lu)", clustersize, sb->s_blocksize); 43514a8557b0SJason Yan return -EINVAL; 43524a8557b0SJason Yan } 43534a8557b0SJason Yan sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - 43544a8557b0SJason Yan le32_to_cpu(es->s_log_block_size); 43554a8557b0SJason Yan sbi->s_clusters_per_group = 43564a8557b0SJason Yan le32_to_cpu(es->s_clusters_per_group); 4357c8267c51SJason Yan if (sbi->s_clusters_per_group > sb->s_blocksize * 8) { 43584a8557b0SJason Yan ext4_msg(sb, KERN_ERR, 43594a8557b0SJason Yan "#clusters per group too big: %lu", 43604a8557b0SJason Yan sbi->s_clusters_per_group); 43614a8557b0SJason Yan return -EINVAL; 43624a8557b0SJason Yan } 43634a8557b0SJason Yan if (sbi->s_blocks_per_group != 4364c8267c51SJason Yan (sbi->s_clusters_per_group * (clustersize / sb->s_blocksize))) { 43654a8557b0SJason Yan ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and " 43664a8557b0SJason Yan "clusters per group (%lu) inconsistent", 43674a8557b0SJason Yan sbi->s_blocks_per_group, 43684a8557b0SJason Yan sbi->s_clusters_per_group); 43694a8557b0SJason Yan return -EINVAL; 43704a8557b0SJason Yan } 43714a8557b0SJason Yan } else { 4372c8267c51SJason Yan if (clustersize != sb->s_blocksize) { 43734a8557b0SJason Yan ext4_msg(sb, KERN_ERR, 43744a8557b0SJason Yan "fragment/cluster size (%d) != " 4375c8267c51SJason Yan "block size (%lu)", clustersize, sb->s_blocksize); 43764a8557b0SJason Yan return -EINVAL; 43774a8557b0SJason Yan } 4378c8267c51SJason Yan if (sbi->s_blocks_per_group > sb->s_blocksize * 8) { 43794a8557b0SJason Yan ext4_msg(sb, KERN_ERR, 43804a8557b0SJason Yan "#blocks per group too big: %lu", 43814a8557b0SJason Yan sbi->s_blocks_per_group); 43824a8557b0SJason Yan return -EINVAL; 43834a8557b0SJason Yan } 43844a8557b0SJason Yan sbi->s_clusters_per_group = sbi->s_blocks_per_group; 43854a8557b0SJason Yan sbi->s_cluster_bits = 0; 43864a8557b0SJason Yan } 4387c8267c51SJason Yan sbi->s_cluster_ratio = clustersize / sb->s_blocksize; 43884a8557b0SJason Yan 43894a8557b0SJason Yan /* Do we have standard group size of clustersize * 8 blocks ? */ 43904a8557b0SJason Yan if (sbi->s_blocks_per_group == clustersize << 3) 43914a8557b0SJason Yan set_opt2(sb, STD_GROUP_SIZE); 43924a8557b0SJason Yan 43934a8557b0SJason Yan return 0; 43944a8557b0SJason Yan } 43954a8557b0SJason Yan 4396f7314a67SJason Yan static void ext4_fast_commit_init(struct super_block *sb) 4397f7314a67SJason Yan { 4398f7314a67SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 4399f7314a67SJason Yan 4400f7314a67SJason Yan /* Initialize fast commit stuff */ 4401f7314a67SJason Yan atomic_set(&sbi->s_fc_subtid, 0); 4402f7314a67SJason Yan INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_MAIN]); 4403f7314a67SJason Yan INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_STAGING]); 4404f7314a67SJason Yan INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_MAIN]); 4405f7314a67SJason Yan INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]); 4406f7314a67SJason Yan sbi->s_fc_bytes = 0; 4407f7314a67SJason Yan ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE); 4408f7314a67SJason Yan sbi->s_fc_ineligible_tid = 0; 4409f7314a67SJason Yan spin_lock_init(&sbi->s_fc_lock); 4410f7314a67SJason Yan memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats)); 4411f7314a67SJason Yan sbi->s_fc_replay_state.fc_regions = NULL; 4412f7314a67SJason Yan sbi->s_fc_replay_state.fc_regions_size = 0; 4413f7314a67SJason Yan sbi->s_fc_replay_state.fc_regions_used = 0; 4414f7314a67SJason Yan sbi->s_fc_replay_state.fc_regions_valid = 0; 4415f7314a67SJason Yan sbi->s_fc_replay_state.fc_modified_inodes = NULL; 4416f7314a67SJason Yan sbi->s_fc_replay_state.fc_modified_inodes_size = 0; 4417f7314a67SJason Yan sbi->s_fc_replay_state.fc_modified_inodes_used = 0; 4418f7314a67SJason Yan } 4419f7314a67SJason Yan 44200e495f7cSJason Yan static int ext4_inode_info_init(struct super_block *sb, 4421c8267c51SJason Yan struct ext4_super_block *es) 44220e495f7cSJason Yan { 44230e495f7cSJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 44240e495f7cSJason Yan 44250e495f7cSJason Yan if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) { 44260e495f7cSJason Yan sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE; 44270e495f7cSJason Yan sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO; 44280e495f7cSJason Yan } else { 44290e495f7cSJason Yan sbi->s_inode_size = le16_to_cpu(es->s_inode_size); 44300e495f7cSJason Yan sbi->s_first_ino = le32_to_cpu(es->s_first_ino); 44310e495f7cSJason Yan if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) { 44320e495f7cSJason Yan ext4_msg(sb, KERN_ERR, "invalid first ino: %u", 44330e495f7cSJason Yan sbi->s_first_ino); 44340e495f7cSJason Yan return -EINVAL; 44350e495f7cSJason Yan } 44360e495f7cSJason Yan if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || 44370e495f7cSJason Yan (!is_power_of_2(sbi->s_inode_size)) || 4438c8267c51SJason Yan (sbi->s_inode_size > sb->s_blocksize)) { 44390e495f7cSJason Yan ext4_msg(sb, KERN_ERR, 44400e495f7cSJason Yan "unsupported inode size: %d", 44410e495f7cSJason Yan sbi->s_inode_size); 4442c8267c51SJason Yan ext4_msg(sb, KERN_ERR, "blocksize: %lu", sb->s_blocksize); 44430e495f7cSJason Yan return -EINVAL; 44440e495f7cSJason Yan } 44450e495f7cSJason Yan /* 44460e495f7cSJason Yan * i_atime_extra is the last extra field available for 44470e495f7cSJason Yan * [acm]times in struct ext4_inode. Checking for that 44480e495f7cSJason Yan * field should suffice to ensure we have extra space 44490e495f7cSJason Yan * for all three. 44500e495f7cSJason Yan */ 44510e495f7cSJason Yan if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) + 44520e495f7cSJason Yan sizeof(((struct ext4_inode *)0)->i_atime_extra)) { 44530e495f7cSJason Yan sb->s_time_gran = 1; 44540e495f7cSJason Yan sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX; 44550e495f7cSJason Yan } else { 44560e495f7cSJason Yan sb->s_time_gran = NSEC_PER_SEC; 44570e495f7cSJason Yan sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX; 44580e495f7cSJason Yan } 44590e495f7cSJason Yan sb->s_time_min = EXT4_TIMESTAMP_MIN; 44600e495f7cSJason Yan } 44610e495f7cSJason Yan 44620e495f7cSJason Yan if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) { 44630e495f7cSJason Yan sbi->s_want_extra_isize = sizeof(struct ext4_inode) - 44640e495f7cSJason Yan EXT4_GOOD_OLD_INODE_SIZE; 44650e495f7cSJason Yan if (ext4_has_feature_extra_isize(sb)) { 44660e495f7cSJason Yan unsigned v, max = (sbi->s_inode_size - 44670e495f7cSJason Yan EXT4_GOOD_OLD_INODE_SIZE); 44680e495f7cSJason Yan 44690e495f7cSJason Yan v = le16_to_cpu(es->s_want_extra_isize); 44700e495f7cSJason Yan if (v > max) { 44710e495f7cSJason Yan ext4_msg(sb, KERN_ERR, 44720e495f7cSJason Yan "bad s_want_extra_isize: %d", v); 44730e495f7cSJason Yan return -EINVAL; 44740e495f7cSJason Yan } 44750e495f7cSJason Yan if (sbi->s_want_extra_isize < v) 44760e495f7cSJason Yan sbi->s_want_extra_isize = v; 44770e495f7cSJason Yan 44780e495f7cSJason Yan v = le16_to_cpu(es->s_min_extra_isize); 44790e495f7cSJason Yan if (v > max) { 44800e495f7cSJason Yan ext4_msg(sb, KERN_ERR, 44810e495f7cSJason Yan "bad s_min_extra_isize: %d", v); 44820e495f7cSJason Yan return -EINVAL; 44830e495f7cSJason Yan } 44840e495f7cSJason Yan if (sbi->s_want_extra_isize < v) 44850e495f7cSJason Yan sbi->s_want_extra_isize = v; 44860e495f7cSJason Yan } 44870e495f7cSJason Yan } 44880e495f7cSJason Yan 44890e495f7cSJason Yan return 0; 44900e495f7cSJason Yan } 44910e495f7cSJason Yan 449239c135b0SJason Yan #if IS_ENABLED(CONFIG_UNICODE) 449339c135b0SJason Yan static int ext4_encoding_init(struct super_block *sb, struct ext4_super_block *es) 449439c135b0SJason Yan { 449539c135b0SJason Yan const struct ext4_sb_encodings *encoding_info; 449639c135b0SJason Yan struct unicode_map *encoding; 449739c135b0SJason Yan __u16 encoding_flags = le16_to_cpu(es->s_encoding_flags); 449839c135b0SJason Yan 449939c135b0SJason Yan if (!ext4_has_feature_casefold(sb) || sb->s_encoding) 450039c135b0SJason Yan return 0; 450139c135b0SJason Yan 450239c135b0SJason Yan encoding_info = ext4_sb_read_encoding(es); 450339c135b0SJason Yan if (!encoding_info) { 450439c135b0SJason Yan ext4_msg(sb, KERN_ERR, 450539c135b0SJason Yan "Encoding requested by superblock is unknown"); 450639c135b0SJason Yan return -EINVAL; 450739c135b0SJason Yan } 450839c135b0SJason Yan 450939c135b0SJason Yan encoding = utf8_load(encoding_info->version); 451039c135b0SJason Yan if (IS_ERR(encoding)) { 451139c135b0SJason Yan ext4_msg(sb, KERN_ERR, 451239c135b0SJason Yan "can't mount with superblock charset: %s-%u.%u.%u " 451339c135b0SJason Yan "not supported by the kernel. flags: 0x%x.", 451439c135b0SJason Yan encoding_info->name, 451539c135b0SJason Yan unicode_major(encoding_info->version), 451639c135b0SJason Yan unicode_minor(encoding_info->version), 451739c135b0SJason Yan unicode_rev(encoding_info->version), 451839c135b0SJason Yan encoding_flags); 451939c135b0SJason Yan return -EINVAL; 452039c135b0SJason Yan } 452139c135b0SJason Yan ext4_msg(sb, KERN_INFO,"Using encoding defined by superblock: " 452239c135b0SJason Yan "%s-%u.%u.%u with flags 0x%hx", encoding_info->name, 452339c135b0SJason Yan unicode_major(encoding_info->version), 452439c135b0SJason Yan unicode_minor(encoding_info->version), 452539c135b0SJason Yan unicode_rev(encoding_info->version), 452639c135b0SJason Yan encoding_flags); 452739c135b0SJason Yan 452839c135b0SJason Yan sb->s_encoding = encoding; 452939c135b0SJason Yan sb->s_encoding_flags = encoding_flags; 453039c135b0SJason Yan 453139c135b0SJason Yan return 0; 453239c135b0SJason Yan } 453339c135b0SJason Yan #else 453439c135b0SJason Yan static inline int ext4_encoding_init(struct super_block *sb, struct ext4_super_block *es) 453539c135b0SJason Yan { 453639c135b0SJason Yan return 0; 453739c135b0SJason Yan } 453839c135b0SJason Yan #endif 453939c135b0SJason Yan 4540b26458d1SJason Yan static int ext4_init_metadata_csum(struct super_block *sb, struct ext4_super_block *es) 4541b26458d1SJason Yan { 4542b26458d1SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 4543b26458d1SJason Yan 4544b26458d1SJason Yan /* Warn if metadata_csum and gdt_csum are both set. */ 4545b26458d1SJason Yan if (ext4_has_feature_metadata_csum(sb) && 4546b26458d1SJason Yan ext4_has_feature_gdt_csum(sb)) 4547b26458d1SJason Yan ext4_warning(sb, "metadata_csum and uninit_bg are " 4548b26458d1SJason Yan "redundant flags; please run fsck."); 4549b26458d1SJason Yan 4550b26458d1SJason Yan /* Check for a known checksum algorithm */ 4551b26458d1SJason Yan if (!ext4_verify_csum_type(sb, es)) { 4552b26458d1SJason Yan ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " 4553b26458d1SJason Yan "unknown checksum algorithm."); 4554b26458d1SJason Yan return -EINVAL; 4555b26458d1SJason Yan } 4556b26458d1SJason Yan ext4_setup_csum_trigger(sb, EXT4_JTR_ORPHAN_FILE, 4557b26458d1SJason Yan ext4_orphan_file_block_trigger); 4558b26458d1SJason Yan 4559b26458d1SJason Yan /* Load the checksum driver */ 4560b26458d1SJason Yan sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); 4561b26458d1SJason Yan if (IS_ERR(sbi->s_chksum_driver)) { 4562b26458d1SJason Yan int ret = PTR_ERR(sbi->s_chksum_driver); 4563b26458d1SJason Yan ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver."); 4564b26458d1SJason Yan sbi->s_chksum_driver = NULL; 4565b26458d1SJason Yan return ret; 4566b26458d1SJason Yan } 4567b26458d1SJason Yan 4568b26458d1SJason Yan /* Check superblock checksum */ 4569b26458d1SJason Yan if (!ext4_superblock_csum_verify(sb, es)) { 4570b26458d1SJason Yan ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " 4571b26458d1SJason Yan "invalid superblock checksum. Run e2fsck?"); 4572b26458d1SJason Yan return -EFSBADCRC; 4573b26458d1SJason Yan } 4574b26458d1SJason Yan 4575b26458d1SJason Yan /* Precompute checksum seed for all metadata */ 4576b26458d1SJason Yan if (ext4_has_feature_csum_seed(sb)) 4577b26458d1SJason Yan sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed); 4578b26458d1SJason Yan else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb)) 4579b26458d1SJason Yan sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid, 4580b26458d1SJason Yan sizeof(es->s_uuid)); 4581b26458d1SJason Yan return 0; 4582b26458d1SJason Yan } 4583b26458d1SJason Yan 4584d7f3542bSJason Yan static int ext4_check_feature_compatibility(struct super_block *sb, 4585d7f3542bSJason Yan struct ext4_super_block *es, 4586d7f3542bSJason Yan int silent) 4587d7f3542bSJason Yan { 4588d7f3542bSJason Yan if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV && 4589d7f3542bSJason Yan (ext4_has_compat_features(sb) || 4590d7f3542bSJason Yan ext4_has_ro_compat_features(sb) || 4591d7f3542bSJason Yan ext4_has_incompat_features(sb))) 4592d7f3542bSJason Yan ext4_msg(sb, KERN_WARNING, 4593d7f3542bSJason Yan "feature flags set on rev 0 fs, " 4594d7f3542bSJason Yan "running e2fsck is recommended"); 4595d7f3542bSJason Yan 4596d7f3542bSJason Yan if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) { 4597d7f3542bSJason Yan set_opt2(sb, HURD_COMPAT); 4598d7f3542bSJason Yan if (ext4_has_feature_64bit(sb)) { 4599d7f3542bSJason Yan ext4_msg(sb, KERN_ERR, 4600d7f3542bSJason Yan "The Hurd can't support 64-bit file systems"); 4601d7f3542bSJason Yan return -EINVAL; 4602d7f3542bSJason Yan } 4603d7f3542bSJason Yan 4604d7f3542bSJason Yan /* 4605d7f3542bSJason Yan * ea_inode feature uses l_i_version field which is not 4606d7f3542bSJason Yan * available in HURD_COMPAT mode. 4607d7f3542bSJason Yan */ 4608d7f3542bSJason Yan if (ext4_has_feature_ea_inode(sb)) { 4609d7f3542bSJason Yan ext4_msg(sb, KERN_ERR, 4610d7f3542bSJason Yan "ea_inode feature is not supported for Hurd"); 4611d7f3542bSJason Yan return -EINVAL; 4612d7f3542bSJason Yan } 4613d7f3542bSJason Yan } 4614d7f3542bSJason Yan 4615d7f3542bSJason Yan if (IS_EXT2_SB(sb)) { 4616d7f3542bSJason Yan if (ext2_feature_set_ok(sb)) 4617d7f3542bSJason Yan ext4_msg(sb, KERN_INFO, "mounting ext2 file system " 4618d7f3542bSJason Yan "using the ext4 subsystem"); 4619d7f3542bSJason Yan else { 4620d7f3542bSJason Yan /* 4621d7f3542bSJason Yan * If we're probing be silent, if this looks like 4622d7f3542bSJason Yan * it's actually an ext[34] filesystem. 4623d7f3542bSJason Yan */ 4624d7f3542bSJason Yan if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb))) 4625d7f3542bSJason Yan return -EINVAL; 4626d7f3542bSJason Yan ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due " 4627d7f3542bSJason Yan "to feature incompatibilities"); 4628d7f3542bSJason Yan return -EINVAL; 4629d7f3542bSJason Yan } 4630d7f3542bSJason Yan } 4631d7f3542bSJason Yan 4632d7f3542bSJason Yan if (IS_EXT3_SB(sb)) { 4633d7f3542bSJason Yan if (ext3_feature_set_ok(sb)) 4634d7f3542bSJason Yan ext4_msg(sb, KERN_INFO, "mounting ext3 file system " 4635d7f3542bSJason Yan "using the ext4 subsystem"); 4636d7f3542bSJason Yan else { 4637d7f3542bSJason Yan /* 4638d7f3542bSJason Yan * If we're probing be silent, if this looks like 4639d7f3542bSJason Yan * it's actually an ext4 filesystem. 4640d7f3542bSJason Yan */ 4641d7f3542bSJason Yan if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb))) 4642d7f3542bSJason Yan return -EINVAL; 4643d7f3542bSJason Yan ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due " 4644d7f3542bSJason Yan "to feature incompatibilities"); 4645d7f3542bSJason Yan return -EINVAL; 4646d7f3542bSJason Yan } 4647d7f3542bSJason Yan } 4648d7f3542bSJason Yan 4649d7f3542bSJason Yan /* 4650d7f3542bSJason Yan * Check feature flags regardless of the revision level, since we 4651d7f3542bSJason Yan * previously didn't change the revision level when setting the flags, 4652d7f3542bSJason Yan * so there is a chance incompat flags are set on a rev 0 filesystem. 4653d7f3542bSJason Yan */ 4654d7f3542bSJason Yan if (!ext4_feature_set_ok(sb, (sb_rdonly(sb)))) 4655d7f3542bSJason Yan return -EINVAL; 4656d7f3542bSJason Yan 4657d7f3542bSJason Yan return 0; 4658d7f3542bSJason Yan } 4659d7f3542bSJason Yan 4660bc62dbf9SJason Yan static int ext4_geometry_check(struct super_block *sb, 4661bc62dbf9SJason Yan struct ext4_super_block *es) 4662bc62dbf9SJason Yan { 4663bc62dbf9SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 4664bc62dbf9SJason Yan __u64 blocks_count; 4665bc62dbf9SJason Yan 4666bc62dbf9SJason Yan /* check blocks count against device size */ 4667bc62dbf9SJason Yan blocks_count = sb_bdev_nr_blocks(sb); 4668bc62dbf9SJason Yan if (blocks_count && ext4_blocks_count(es) > blocks_count) { 4669bc62dbf9SJason Yan ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu " 4670bc62dbf9SJason Yan "exceeds size of device (%llu blocks)", 4671bc62dbf9SJason Yan ext4_blocks_count(es), blocks_count); 4672bc62dbf9SJason Yan return -EINVAL; 4673bc62dbf9SJason Yan } 4674bc62dbf9SJason Yan 4675bc62dbf9SJason Yan /* 4676bc62dbf9SJason Yan * It makes no sense for the first data block to be beyond the end 4677bc62dbf9SJason Yan * of the filesystem. 4678bc62dbf9SJason Yan */ 4679bc62dbf9SJason Yan if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) { 4680bc62dbf9SJason Yan ext4_msg(sb, KERN_WARNING, "bad geometry: first data " 4681bc62dbf9SJason Yan "block %u is beyond end of filesystem (%llu)", 4682bc62dbf9SJason Yan le32_to_cpu(es->s_first_data_block), 4683bc62dbf9SJason Yan ext4_blocks_count(es)); 4684bc62dbf9SJason Yan return -EINVAL; 4685bc62dbf9SJason Yan } 4686bc62dbf9SJason Yan if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) && 4687bc62dbf9SJason Yan (sbi->s_cluster_ratio == 1)) { 4688bc62dbf9SJason Yan ext4_msg(sb, KERN_WARNING, "bad geometry: first data " 4689bc62dbf9SJason Yan "block is 0 with a 1k block and cluster size"); 4690bc62dbf9SJason Yan return -EINVAL; 4691bc62dbf9SJason Yan } 4692bc62dbf9SJason Yan 4693bc62dbf9SJason Yan blocks_count = (ext4_blocks_count(es) - 4694bc62dbf9SJason Yan le32_to_cpu(es->s_first_data_block) + 4695bc62dbf9SJason Yan EXT4_BLOCKS_PER_GROUP(sb) - 1); 4696bc62dbf9SJason Yan do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb)); 4697bc62dbf9SJason Yan if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) { 4698bc62dbf9SJason Yan ext4_msg(sb, KERN_WARNING, "groups count too large: %llu " 4699bc62dbf9SJason Yan "(block count %llu, first data block %u, " 4700bc62dbf9SJason Yan "blocks per group %lu)", blocks_count, 4701bc62dbf9SJason Yan ext4_blocks_count(es), 4702bc62dbf9SJason Yan le32_to_cpu(es->s_first_data_block), 4703bc62dbf9SJason Yan EXT4_BLOCKS_PER_GROUP(sb)); 4704bc62dbf9SJason Yan return -EINVAL; 4705bc62dbf9SJason Yan } 4706bc62dbf9SJason Yan sbi->s_groups_count = blocks_count; 4707bc62dbf9SJason Yan sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, 4708bc62dbf9SJason Yan (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); 4709bc62dbf9SJason Yan if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) != 4710bc62dbf9SJason Yan le32_to_cpu(es->s_inodes_count)) { 4711bc62dbf9SJason Yan ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu", 4712bc62dbf9SJason Yan le32_to_cpu(es->s_inodes_count), 4713bc62dbf9SJason Yan ((u64)sbi->s_groups_count * sbi->s_inodes_per_group)); 4714bc62dbf9SJason Yan return -EINVAL; 4715bc62dbf9SJason Yan } 4716bc62dbf9SJason Yan 4717bc62dbf9SJason Yan return 0; 4718bc62dbf9SJason Yan } 4719bc62dbf9SJason Yan 4720a4e6a511SJason Yan static void ext4_group_desc_free(struct ext4_sb_info *sbi) 4721a4e6a511SJason Yan { 4722a4e6a511SJason Yan struct buffer_head **group_desc; 4723a4e6a511SJason Yan int i; 4724a4e6a511SJason Yan 4725a4e6a511SJason Yan rcu_read_lock(); 4726a4e6a511SJason Yan group_desc = rcu_dereference(sbi->s_group_desc); 4727a4e6a511SJason Yan for (i = 0; i < sbi->s_gdb_count; i++) 4728a4e6a511SJason Yan brelse(group_desc[i]); 4729a4e6a511SJason Yan kvfree(group_desc); 4730a4e6a511SJason Yan rcu_read_unlock(); 4731a4e6a511SJason Yan } 4732a4e6a511SJason Yan 4733a4e6a511SJason Yan static int ext4_group_desc_init(struct super_block *sb, 4734a4e6a511SJason Yan struct ext4_super_block *es, 4735a4e6a511SJason Yan ext4_fsblk_t logical_sb_block, 4736a4e6a511SJason Yan ext4_group_t *first_not_zeroed) 4737a4e6a511SJason Yan { 4738a4e6a511SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 4739a4e6a511SJason Yan unsigned int db_count; 4740a4e6a511SJason Yan ext4_fsblk_t block; 4741a4e6a511SJason Yan int ret; 4742a4e6a511SJason Yan int i; 4743a4e6a511SJason Yan 4744a4e6a511SJason Yan db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / 4745a4e6a511SJason Yan EXT4_DESC_PER_BLOCK(sb); 4746a4e6a511SJason Yan if (ext4_has_feature_meta_bg(sb)) { 4747a4e6a511SJason Yan if (le32_to_cpu(es->s_first_meta_bg) > db_count) { 4748a4e6a511SJason Yan ext4_msg(sb, KERN_WARNING, 4749a4e6a511SJason Yan "first meta block group too large: %u " 4750a4e6a511SJason Yan "(group descriptor block count %u)", 4751a4e6a511SJason Yan le32_to_cpu(es->s_first_meta_bg), db_count); 4752a4e6a511SJason Yan return -EINVAL; 4753a4e6a511SJason Yan } 4754a4e6a511SJason Yan } 4755a4e6a511SJason Yan rcu_assign_pointer(sbi->s_group_desc, 4756a4e6a511SJason Yan kvmalloc_array(db_count, 4757a4e6a511SJason Yan sizeof(struct buffer_head *), 4758a4e6a511SJason Yan GFP_KERNEL)); 4759a4e6a511SJason Yan if (sbi->s_group_desc == NULL) { 4760a4e6a511SJason Yan ext4_msg(sb, KERN_ERR, "not enough memory"); 4761a4e6a511SJason Yan return -ENOMEM; 4762a4e6a511SJason Yan } 4763a4e6a511SJason Yan 4764a4e6a511SJason Yan bgl_lock_init(sbi->s_blockgroup_lock); 4765a4e6a511SJason Yan 4766a4e6a511SJason Yan /* Pre-read the descriptors into the buffer cache */ 4767a4e6a511SJason Yan for (i = 0; i < db_count; i++) { 4768a4e6a511SJason Yan block = descriptor_loc(sb, logical_sb_block, i); 4769a4e6a511SJason Yan ext4_sb_breadahead_unmovable(sb, block); 4770a4e6a511SJason Yan } 4771a4e6a511SJason Yan 4772a4e6a511SJason Yan for (i = 0; i < db_count; i++) { 4773a4e6a511SJason Yan struct buffer_head *bh; 4774a4e6a511SJason Yan 4775a4e6a511SJason Yan block = descriptor_loc(sb, logical_sb_block, i); 4776a4e6a511SJason Yan bh = ext4_sb_bread_unmovable(sb, block); 4777a4e6a511SJason Yan if (IS_ERR(bh)) { 4778a4e6a511SJason Yan ext4_msg(sb, KERN_ERR, 4779a4e6a511SJason Yan "can't read group descriptor %d", i); 4780a4e6a511SJason Yan sbi->s_gdb_count = i; 4781a4e6a511SJason Yan ret = PTR_ERR(bh); 4782a4e6a511SJason Yan goto out; 4783a4e6a511SJason Yan } 4784a4e6a511SJason Yan rcu_read_lock(); 4785a4e6a511SJason Yan rcu_dereference(sbi->s_group_desc)[i] = bh; 4786a4e6a511SJason Yan rcu_read_unlock(); 4787a4e6a511SJason Yan } 4788a4e6a511SJason Yan sbi->s_gdb_count = db_count; 4789a4e6a511SJason Yan if (!ext4_check_descriptors(sb, logical_sb_block, first_not_zeroed)) { 4790a4e6a511SJason Yan ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); 4791a4e6a511SJason Yan ret = -EFSCORRUPTED; 4792a4e6a511SJason Yan goto out; 4793a4e6a511SJason Yan } 4794a4e6a511SJason Yan return 0; 4795a4e6a511SJason Yan out: 4796a4e6a511SJason Yan ext4_group_desc_free(sbi); 4797a4e6a511SJason Yan return ret; 4798a4e6a511SJason Yan } 4799a4e6a511SJason Yan 48009c1dd22dSJason Yan static int ext4_load_and_init_journal(struct super_block *sb, 48019c1dd22dSJason Yan struct ext4_super_block *es, 48029c1dd22dSJason Yan struct ext4_fs_context *ctx) 48039c1dd22dSJason Yan { 48049c1dd22dSJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 48059c1dd22dSJason Yan int err; 48069c1dd22dSJason Yan 48079c1dd22dSJason Yan err = ext4_load_journal(sb, es, ctx->journal_devnum); 48089c1dd22dSJason Yan if (err) 48099c1dd22dSJason Yan return err; 48109c1dd22dSJason Yan 48119c1dd22dSJason Yan if (ext4_has_feature_64bit(sb) && 48129c1dd22dSJason Yan !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, 48139c1dd22dSJason Yan JBD2_FEATURE_INCOMPAT_64BIT)) { 48149c1dd22dSJason Yan ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature"); 48159c1dd22dSJason Yan goto out; 48169c1dd22dSJason Yan } 48179c1dd22dSJason Yan 48189c1dd22dSJason Yan if (!set_journal_csum_feature_set(sb)) { 48199c1dd22dSJason Yan ext4_msg(sb, KERN_ERR, "Failed to set journal checksum " 48209c1dd22dSJason Yan "feature set"); 48219c1dd22dSJason Yan goto out; 48229c1dd22dSJason Yan } 48239c1dd22dSJason Yan 48249c1dd22dSJason Yan if (test_opt2(sb, JOURNAL_FAST_COMMIT) && 48259c1dd22dSJason Yan !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, 48269c1dd22dSJason Yan JBD2_FEATURE_INCOMPAT_FAST_COMMIT)) { 48279c1dd22dSJason Yan ext4_msg(sb, KERN_ERR, 48289c1dd22dSJason Yan "Failed to set fast commit journal feature"); 48299c1dd22dSJason Yan goto out; 48309c1dd22dSJason Yan } 48319c1dd22dSJason Yan 48329c1dd22dSJason Yan /* We have now updated the journal if required, so we can 48339c1dd22dSJason Yan * validate the data journaling mode. */ 48349c1dd22dSJason Yan switch (test_opt(sb, DATA_FLAGS)) { 48359c1dd22dSJason Yan case 0: 48369c1dd22dSJason Yan /* No mode set, assume a default based on the journal 48379c1dd22dSJason Yan * capabilities: ORDERED_DATA if the journal can 48389c1dd22dSJason Yan * cope, else JOURNAL_DATA 48399c1dd22dSJason Yan */ 48409c1dd22dSJason Yan if (jbd2_journal_check_available_features 48419c1dd22dSJason Yan (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { 48429c1dd22dSJason Yan set_opt(sb, ORDERED_DATA); 48439c1dd22dSJason Yan sbi->s_def_mount_opt |= EXT4_MOUNT_ORDERED_DATA; 48449c1dd22dSJason Yan } else { 48459c1dd22dSJason Yan set_opt(sb, JOURNAL_DATA); 48469c1dd22dSJason Yan sbi->s_def_mount_opt |= EXT4_MOUNT_JOURNAL_DATA; 48479c1dd22dSJason Yan } 48489c1dd22dSJason Yan break; 48499c1dd22dSJason Yan 48509c1dd22dSJason Yan case EXT4_MOUNT_ORDERED_DATA: 48519c1dd22dSJason Yan case EXT4_MOUNT_WRITEBACK_DATA: 48529c1dd22dSJason Yan if (!jbd2_journal_check_available_features 48539c1dd22dSJason Yan (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { 48549c1dd22dSJason Yan ext4_msg(sb, KERN_ERR, "Journal does not support " 48559c1dd22dSJason Yan "requested data journaling mode"); 48569c1dd22dSJason Yan goto out; 48579c1dd22dSJason Yan } 48589c1dd22dSJason Yan break; 48599c1dd22dSJason Yan default: 48609c1dd22dSJason Yan break; 48619c1dd22dSJason Yan } 48629c1dd22dSJason Yan 48639c1dd22dSJason Yan if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA && 48649c1dd22dSJason Yan test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 48659c1dd22dSJason Yan ext4_msg(sb, KERN_ERR, "can't mount with " 48669c1dd22dSJason Yan "journal_async_commit in data=ordered mode"); 48679c1dd22dSJason Yan goto out; 48689c1dd22dSJason Yan } 48699c1dd22dSJason Yan 48709c1dd22dSJason Yan set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio); 48719c1dd22dSJason Yan 48729c1dd22dSJason Yan sbi->s_journal->j_submit_inode_data_buffers = 48739c1dd22dSJason Yan ext4_journal_submit_inode_data_buffers; 48749c1dd22dSJason Yan sbi->s_journal->j_finish_inode_data_buffers = 48759c1dd22dSJason Yan ext4_journal_finish_inode_data_buffers; 48769c1dd22dSJason Yan 48779c1dd22dSJason Yan return 0; 48789c1dd22dSJason Yan 48799c1dd22dSJason Yan out: 48809c1dd22dSJason Yan /* flush s_error_work before journal destroy. */ 48819c1dd22dSJason Yan flush_work(&sbi->s_error_work); 48829c1dd22dSJason Yan jbd2_journal_destroy(sbi->s_journal); 48839c1dd22dSJason Yan sbi->s_journal = NULL; 48849f2a1d9fSJason Yan return -EINVAL; 48859c1dd22dSJason Yan } 48869c1dd22dSJason Yan 4887a5991e53SJason Yan static int ext4_journal_data_mode_check(struct super_block *sb) 4888a5991e53SJason Yan { 4889a5991e53SJason Yan if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { 4890a5991e53SJason Yan printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with " 4891a5991e53SJason Yan "data=journal disables delayed allocation, " 4892a5991e53SJason Yan "dioread_nolock, O_DIRECT and fast_commit support!\n"); 4893a5991e53SJason Yan /* can't mount with both data=journal and dioread_nolock. */ 4894a5991e53SJason Yan clear_opt(sb, DIOREAD_NOLOCK); 4895a5991e53SJason Yan clear_opt2(sb, JOURNAL_FAST_COMMIT); 4896a5991e53SJason Yan if (test_opt2(sb, EXPLICIT_DELALLOC)) { 4897a5991e53SJason Yan ext4_msg(sb, KERN_ERR, "can't mount with " 4898a5991e53SJason Yan "both data=journal and delalloc"); 4899a5991e53SJason Yan return -EINVAL; 4900a5991e53SJason Yan } 4901a5991e53SJason Yan if (test_opt(sb, DAX_ALWAYS)) { 4902a5991e53SJason Yan ext4_msg(sb, KERN_ERR, "can't mount with " 4903a5991e53SJason Yan "both data=journal and dax"); 4904a5991e53SJason Yan return -EINVAL; 4905a5991e53SJason Yan } 4906a5991e53SJason Yan if (ext4_has_feature_encrypt(sb)) { 4907a5991e53SJason Yan ext4_msg(sb, KERN_WARNING, 4908a5991e53SJason Yan "encrypted files will use data=ordered " 4909a5991e53SJason Yan "instead of data journaling mode"); 4910a5991e53SJason Yan } 4911a5991e53SJason Yan if (test_opt(sb, DELALLOC)) 4912a5991e53SJason Yan clear_opt(sb, DELALLOC); 4913a5991e53SJason Yan } else { 4914a5991e53SJason Yan sb->s_iflags |= SB_I_CGROUPWB; 4915a5991e53SJason Yan } 4916a5991e53SJason Yan 4917a5991e53SJason Yan return 0; 4918a5991e53SJason Yan } 4919a5991e53SJason Yan 4920a7a79c29SJason Yan static int ext4_load_super(struct super_block *sb, ext4_fsblk_t *lsb, 4921a7a79c29SJason Yan int silent) 4922a7a79c29SJason Yan { 4923a7a79c29SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 4924a7a79c29SJason Yan struct ext4_super_block *es; 4925a7a79c29SJason Yan ext4_fsblk_t logical_sb_block; 4926a7a79c29SJason Yan unsigned long offset = 0; 4927a7a79c29SJason Yan struct buffer_head *bh; 4928a7a79c29SJason Yan int ret = -EINVAL; 4929a7a79c29SJason Yan int blocksize; 4930a7a79c29SJason Yan 4931a7a79c29SJason Yan blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE); 4932a7a79c29SJason Yan if (!blocksize) { 4933a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, "unable to set blocksize"); 4934a7a79c29SJason Yan return -EINVAL; 4935a7a79c29SJason Yan } 4936a7a79c29SJason Yan 4937a7a79c29SJason Yan /* 4938a7a79c29SJason Yan * The ext4 superblock will not be buffer aligned for other than 1kB 4939a7a79c29SJason Yan * block sizes. We need to calculate the offset from buffer start. 4940a7a79c29SJason Yan */ 4941a7a79c29SJason Yan if (blocksize != EXT4_MIN_BLOCK_SIZE) { 4942a7a79c29SJason Yan logical_sb_block = sbi->s_sb_block * EXT4_MIN_BLOCK_SIZE; 4943a7a79c29SJason Yan offset = do_div(logical_sb_block, blocksize); 4944a7a79c29SJason Yan } else { 4945a7a79c29SJason Yan logical_sb_block = sbi->s_sb_block; 4946a7a79c29SJason Yan } 4947a7a79c29SJason Yan 4948a7a79c29SJason Yan bh = ext4_sb_bread_unmovable(sb, logical_sb_block); 4949a7a79c29SJason Yan if (IS_ERR(bh)) { 4950a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, "unable to read superblock"); 4951a7a79c29SJason Yan return PTR_ERR(bh); 4952a7a79c29SJason Yan } 4953a7a79c29SJason Yan /* 4954a7a79c29SJason Yan * Note: s_es must be initialized as soon as possible because 4955a7a79c29SJason Yan * some ext4 macro-instructions depend on its value 4956a7a79c29SJason Yan */ 4957a7a79c29SJason Yan es = (struct ext4_super_block *) (bh->b_data + offset); 4958a7a79c29SJason Yan sbi->s_es = es; 4959a7a79c29SJason Yan sb->s_magic = le16_to_cpu(es->s_magic); 4960a7a79c29SJason Yan if (sb->s_magic != EXT4_SUPER_MAGIC) { 4961a7a79c29SJason Yan if (!silent) 4962a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); 4963a7a79c29SJason Yan goto out; 4964a7a79c29SJason Yan } 4965a7a79c29SJason Yan 4966a7a79c29SJason Yan if (le32_to_cpu(es->s_log_block_size) > 4967a7a79c29SJason Yan (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { 4968a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, 4969a7a79c29SJason Yan "Invalid log block size: %u", 4970a7a79c29SJason Yan le32_to_cpu(es->s_log_block_size)); 4971a7a79c29SJason Yan goto out; 4972a7a79c29SJason Yan } 4973a7a79c29SJason Yan if (le32_to_cpu(es->s_log_cluster_size) > 4974a7a79c29SJason Yan (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { 4975a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, 4976a7a79c29SJason Yan "Invalid log cluster size: %u", 4977a7a79c29SJason Yan le32_to_cpu(es->s_log_cluster_size)); 4978a7a79c29SJason Yan goto out; 4979a7a79c29SJason Yan } 4980a7a79c29SJason Yan 4981a7a79c29SJason Yan blocksize = EXT4_MIN_BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); 4982a7a79c29SJason Yan 4983a7a79c29SJason Yan /* 4984a7a79c29SJason Yan * If the default block size is not the same as the real block size, 4985a7a79c29SJason Yan * we need to reload it. 4986a7a79c29SJason Yan */ 4987a7a79c29SJason Yan if (sb->s_blocksize == blocksize) { 4988a7a79c29SJason Yan *lsb = logical_sb_block; 4989a7a79c29SJason Yan sbi->s_sbh = bh; 4990a7a79c29SJason Yan return 0; 4991a7a79c29SJason Yan } 4992a7a79c29SJason Yan 4993a7a79c29SJason Yan /* 4994a7a79c29SJason Yan * bh must be released before kill_bdev(), otherwise 4995a7a79c29SJason Yan * it won't be freed and its page also. kill_bdev() 4996a7a79c29SJason Yan * is called by sb_set_blocksize(). 4997a7a79c29SJason Yan */ 4998a7a79c29SJason Yan brelse(bh); 4999a7a79c29SJason Yan /* Validate the filesystem blocksize */ 5000a7a79c29SJason Yan if (!sb_set_blocksize(sb, blocksize)) { 5001a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, "bad block size %d", 5002a7a79c29SJason Yan blocksize); 5003a7a79c29SJason Yan bh = NULL; 5004a7a79c29SJason Yan goto out; 5005a7a79c29SJason Yan } 5006a7a79c29SJason Yan 5007a7a79c29SJason Yan logical_sb_block = sbi->s_sb_block * EXT4_MIN_BLOCK_SIZE; 5008a7a79c29SJason Yan offset = do_div(logical_sb_block, blocksize); 5009a7a79c29SJason Yan bh = ext4_sb_bread_unmovable(sb, logical_sb_block); 5010a7a79c29SJason Yan if (IS_ERR(bh)) { 5011a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, "Can't read superblock on 2nd try"); 5012a7a79c29SJason Yan ret = PTR_ERR(bh); 5013a7a79c29SJason Yan bh = NULL; 5014a7a79c29SJason Yan goto out; 5015a7a79c29SJason Yan } 5016a7a79c29SJason Yan es = (struct ext4_super_block *)(bh->b_data + offset); 5017a7a79c29SJason Yan sbi->s_es = es; 5018a7a79c29SJason Yan if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) { 5019a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, "Magic mismatch, very weird!"); 5020a7a79c29SJason Yan goto out; 5021a7a79c29SJason Yan } 5022a7a79c29SJason Yan *lsb = logical_sb_block; 5023a7a79c29SJason Yan sbi->s_sbh = bh; 5024a7a79c29SJason Yan return 0; 5025a7a79c29SJason Yan out: 5026a7a79c29SJason Yan brelse(bh); 5027a7a79c29SJason Yan return ret; 5028a7a79c29SJason Yan } 5029a7a79c29SJason Yan 5030960e0ab6SLukas Czerner static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) 50317edfd85bSLukas Czerner { 5032617ba13bSMingming Cao struct ext4_super_block *es = NULL; 50337edfd85bSLukas Czerner struct ext4_sb_info *sbi = EXT4_SB(sb); 50347c990728SSuraj Jitindar Singh struct flex_groups **flex_groups; 5035617ba13bSMingming Cao ext4_fsblk_t block; 503670bbb3e0SAndrew Morton ext4_fsblk_t logical_sb_block; 5037ac27a0ecSDave Kleikamp struct inode *root; 5038dcc7dae3SCyrill Gorcunov int ret = -ENOMEM; 50394ec11028STheodore Ts'o unsigned int i; 5040ef5fd681SKaixu Xia int needs_recovery, has_huge_files; 504107aa2ea1SLukas Czerner int err = 0; 5042bfff6873SLukas Czerner ext4_group_t first_not_zeroed; 50437edfd85bSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 5044960e0ab6SLukas Czerner int silent = fc->sb_flags & SB_SILENT; 5045b237e304SHarshad Shirwadkar 5046b237e304SHarshad Shirwadkar /* Set defaults for the variables that will be set during parsing */ 5047e4e58e5dSOjaswin Mujoo if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)) 50487edfd85bSLukas Czerner ctx->journal_ioprio = DEFAULT_JOURNAL_IOPRIO; 5049ac27a0ecSDave Kleikamp 5050240799cdSTheodore Ts'o sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS; 5051f613dfcbSTheodore Ts'o sbi->s_sectors_written_start = 50528446fe92SChristoph Hellwig part_stat_read(sb->s_bdev, sectors[STAT_WRITE]); 5053ac27a0ecSDave Kleikamp 505407aa2ea1SLukas Czerner /* -EINVAL is default */ 5055dcc7dae3SCyrill Gorcunov ret = -EINVAL; 5056a7a79c29SJason Yan err = ext4_load_super(sb, &logical_sb_block, silent); 5057a7a79c29SJason Yan if (err) 5058ac27a0ecSDave Kleikamp goto out_fail; 5059ac27a0ecSDave Kleikamp 5060a7a79c29SJason Yan es = sbi->s_es; 5061afc32f7eSTheodore Ts'o sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written); 5062ac27a0ecSDave Kleikamp 5063b26458d1SJason Yan err = ext4_init_metadata_csum(sb, es); 5064b26458d1SJason Yan if (err) 5065a5fc5119SJason Yan goto failed_mount; 5066a9c47317SDarrick J. Wong 50675f6d662dSJason Yan ext4_set_def_opts(sb, es); 5068ac27a0ecSDave Kleikamp 506908cefc7aSEric W. Biederman sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid)); 507008cefc7aSEric W. Biederman sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid)); 507130773840STheodore Ts'o sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ; 507230773840STheodore Ts'o sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME; 507330773840STheodore Ts'o sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME; 5074ac27a0ecSDave Kleikamp 507551ce6511SLukas Czerner /* 507651ce6511SLukas Czerner * set default s_li_wait_mult for lazyinit, for the case there is 507751ce6511SLukas Czerner * no mount option specified. 507851ce6511SLukas Czerner */ 507951ce6511SLukas Czerner sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; 508051ce6511SLukas Czerner 5081c8267c51SJason Yan if (ext4_inode_info_init(sb, es)) 50829803387cSTheodore Ts'o goto failed_mount; 50839803387cSTheodore Ts'o 50847edfd85bSLukas Czerner err = parse_apply_sb_mount_options(sb, ctx); 50857edfd85bSLukas Czerner if (err < 0) 50865aee0f8aSTheodore Ts'o goto failed_mount; 50877edfd85bSLukas Czerner 50885a916be1STheodore Ts'o sbi->s_def_mount_opt = sbi->s_mount_opt; 50897edfd85bSLukas Czerner 50907edfd85bSLukas Czerner err = ext4_check_opt_consistency(fc, sb); 50917edfd85bSLukas Czerner if (err < 0) 50927edfd85bSLukas Czerner goto failed_mount; 50937edfd85bSLukas Czerner 509485456054SEric Biggers ext4_apply_options(fc, sb); 5095ac27a0ecSDave Kleikamp 509639c135b0SJason Yan if (ext4_encoding_init(sb, es)) 5097c83ad55eSGabriel Krisman Bertazi goto failed_mount; 5098c83ad55eSGabriel Krisman Bertazi 5099a5991e53SJason Yan if (ext4_journal_data_mode_check(sb)) 510056889787STheodore Ts'o goto failed_mount; 510156889787STheodore Ts'o 51021751e8a6SLinus Torvalds sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | 51031751e8a6SLinus Torvalds (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0); 5104ac27a0ecSDave Kleikamp 51051ff20307SJeff Layton /* i_version is always enabled now */ 51061ff20307SJeff Layton sb->s_flags |= SB_I_VERSION; 51071ff20307SJeff Layton 5108d7f3542bSJason Yan if (ext4_check_feature_compatibility(sb, es, silent)) 5109ac27a0ecSDave Kleikamp goto failed_mount; 5110a13fb1a4SEric Sandeen 5111c8267c51SJason Yan if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (sb->s_blocksize / 4)) { 51125b9554dcSTheodore Ts'o ext4_msg(sb, KERN_ERR, 51135b9554dcSTheodore Ts'o "Number of reserved GDT blocks insanely large: %d", 51145b9554dcSTheodore Ts'o le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks)); 51155b9554dcSTheodore Ts'o goto failed_mount; 51165b9554dcSTheodore Ts'o } 51175b9554dcSTheodore Ts'o 511889b93a7bSChristoph Hellwig if (sbi->s_daxdev) { 5119c8267c51SJason Yan if (sb->s_blocksize == PAGE_SIZE) 5120a8ab6d38SIra Weiny set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags); 51217b0800d0SChristoph Hellwig else 51227b0800d0SChristoph Hellwig ext4_msg(sb, KERN_ERR, "unsupported blocksize for DAX\n"); 51237b0800d0SChristoph Hellwig } 5124a8ab6d38SIra Weiny 5125fc626fe3SIra Weiny if (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) { 5126559db4c6SRoss Zwisler if (ext4_has_feature_inline_data(sb)) { 5127559db4c6SRoss Zwisler ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem" 5128559db4c6SRoss Zwisler " that may contain inline data"); 5129361d24d4SEric Sandeen goto failed_mount; 5130559db4c6SRoss Zwisler } 5131a8ab6d38SIra Weiny if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) { 513224f3478dSDan Williams ext4_msg(sb, KERN_ERR, 5133361d24d4SEric Sandeen "DAX unsupported by block device."); 5134361d24d4SEric Sandeen goto failed_mount; 513524f3478dSDan Williams } 5136923ae0ffSRoss Zwisler } 5137923ae0ffSRoss Zwisler 5138e2b911c5SDarrick J. Wong if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) { 51396ddb2447STheodore Ts'o ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d", 51406ddb2447STheodore Ts'o es->s_encryption_level); 51416ddb2447STheodore Ts'o goto failed_mount; 51426ddb2447STheodore Ts'o } 51436ddb2447STheodore Ts'o 5144e2b911c5SDarrick J. Wong has_huge_files = ext4_has_feature_huge_file(sb); 5145f287a1a5STheodore Ts'o sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits, 5146f287a1a5STheodore Ts'o has_huge_files); 5147f287a1a5STheodore Ts'o sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files); 5148ac27a0ecSDave Kleikamp 51490d1ee42fSAlexandre Ratchov sbi->s_desc_size = le16_to_cpu(es->s_desc_size); 5150e2b911c5SDarrick J. Wong if (ext4_has_feature_64bit(sb)) { 51518fadc143SAlexandre Ratchov if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT || 51520d1ee42fSAlexandre Ratchov sbi->s_desc_size > EXT4_MAX_DESC_SIZE || 5153d8ea6cf8Svignesh babu !is_power_of_2(sbi->s_desc_size)) { 5154b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, 5155b31e1552SEric Sandeen "unsupported descriptor size %lu", 51560d1ee42fSAlexandre Ratchov sbi->s_desc_size); 51570d1ee42fSAlexandre Ratchov goto failed_mount; 51580d1ee42fSAlexandre Ratchov } 51590d1ee42fSAlexandre Ratchov } else 51600d1ee42fSAlexandre Ratchov sbi->s_desc_size = EXT4_MIN_DESC_SIZE; 51610b8e58a1SAndreas Dilger 5162ac27a0ecSDave Kleikamp sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); 5163ac27a0ecSDave Kleikamp sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); 51640b8e58a1SAndreas Dilger 5165c8267c51SJason Yan sbi->s_inodes_per_block = sb->s_blocksize / EXT4_INODE_SIZE(sb); 5166a5fc5119SJason Yan if (sbi->s_inodes_per_block == 0 || sbi->s_blocks_per_group == 0) { 5167a5fc5119SJason Yan if (!silent) 5168a5fc5119SJason Yan ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); 5169a5fc5119SJason Yan goto failed_mount; 5170a5fc5119SJason Yan } 5171cd6bb35bSTheodore Ts'o if (sbi->s_inodes_per_group < sbi->s_inodes_per_block || 5172c8267c51SJason Yan sbi->s_inodes_per_group > sb->s_blocksize * 8) { 5173cd6bb35bSTheodore Ts'o ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n", 5174b9c538daSJosh Triplett sbi->s_inodes_per_group); 5175cd6bb35bSTheodore Ts'o goto failed_mount; 5176cd6bb35bSTheodore Ts'o } 5177ac27a0ecSDave Kleikamp sbi->s_itb_per_group = sbi->s_inodes_per_group / 5178ac27a0ecSDave Kleikamp sbi->s_inodes_per_block; 5179c8267c51SJason Yan sbi->s_desc_per_block = sb->s_blocksize / EXT4_DESC_SIZE(sb); 5180c878bea3STheodore Ts'o sbi->s_mount_state = le16_to_cpu(es->s_state) & ~EXT4_FC_REPLAY; 5181e57aa839SFengguang Wu sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb)); 5182e57aa839SFengguang Wu sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb)); 51830b8e58a1SAndreas Dilger 5184ac27a0ecSDave Kleikamp for (i = 0; i < 4; i++) 5185ac27a0ecSDave Kleikamp sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); 5186ac27a0ecSDave Kleikamp sbi->s_def_hash_version = es->s_def_hash_version; 5187e2b911c5SDarrick J. Wong if (ext4_has_feature_dir_index(sb)) { 5188f99b2589STheodore Ts'o i = le32_to_cpu(es->s_flags); 5189f99b2589STheodore Ts'o if (i & EXT2_FLAGS_UNSIGNED_HASH) 5190f99b2589STheodore Ts'o sbi->s_hash_unsigned = 3; 5191f99b2589STheodore Ts'o else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { 5192f99b2589STheodore Ts'o #ifdef __CHAR_UNSIGNED__ 5193bc98a42cSDavid Howells if (!sb_rdonly(sb)) 519423301410STheodore Ts'o es->s_flags |= 519523301410STheodore Ts'o cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); 5196f99b2589STheodore Ts'o sbi->s_hash_unsigned = 3; 5197f99b2589STheodore Ts'o #else 5198bc98a42cSDavid Howells if (!sb_rdonly(sb)) 519923301410STheodore Ts'o es->s_flags |= 520023301410STheodore Ts'o cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); 5201f99b2589STheodore Ts'o #endif 5202f99b2589STheodore Ts'o } 520323301410STheodore Ts'o } 5204ac27a0ecSDave Kleikamp 5205c8267c51SJason Yan if (ext4_handle_clustersize(sb)) 5206281b5995STheodore Ts'o goto failed_mount; 5207960fd856STheodore Ts'o 5208bf43d84bSEric Sandeen /* 5209bf43d84bSEric Sandeen * Test whether we have more sectors than will fit in sector_t, 5210bf43d84bSEric Sandeen * and whether the max offset is addressable by the page cache. 5211bf43d84bSEric Sandeen */ 52125a9ae68aSDarrick J. Wong err = generic_check_addressable(sb->s_blocksize_bits, 521330ca22c7SPatrick J. LoPresti ext4_blocks_count(es)); 52145a9ae68aSDarrick J. Wong if (err) { 5215b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "filesystem" 5216bf43d84bSEric Sandeen " too large to mount safely on this system"); 5217ac27a0ecSDave Kleikamp goto failed_mount; 5218ac27a0ecSDave Kleikamp } 5219ac27a0ecSDave Kleikamp 5220bc62dbf9SJason Yan if (ext4_geometry_check(sb, es)) 52210f2ddca6SFrom: Thiemo Nagel goto failed_mount; 52220f2ddca6SFrom: Thiemo Nagel 5223a4e6a511SJason Yan err = ext4_group_desc_init(sb, es, logical_sb_block, &first_not_zeroed); 5224a4e6a511SJason Yan if (err) 52253a4b77cdSEryu Guan goto failed_mount; 5226772cb7c8SJose R. Santos 5227235699a8SKees Cook timer_setup(&sbi->s_err_report, print_daily_error_info, 0); 5228c92dc856SJan Kara spin_lock_init(&sbi->s_error_lock); 5229c92dc856SJan Kara INIT_WORK(&sbi->s_error_work, flush_stashed_error_work); 523004496411STao Ma 5231a75ae78fSDmitry Monakhov /* Register extent status tree shrinker */ 5232eb68d0e2SZheng Liu if (ext4_es_register_shrinker(sbi)) 5233ce7e010aSTheodore Ts'o goto failed_mount3; 5234ce7e010aSTheodore Ts'o 5235c9de560dSAlex Tomas sbi->s_stripe = ext4_get_stripe_size(sbi); 523667a5da56SZheng Liu sbi->s_extent_max_zeroout_kb = 32; 5237c9de560dSAlex Tomas 5238f9ae9cf5STheodore Ts'o /* 5239f9ae9cf5STheodore Ts'o * set up enough so that it can read an inode 5240f9ae9cf5STheodore Ts'o */ 5241f9ae9cf5STheodore Ts'o sb->s_op = &ext4_sops; 5242617ba13bSMingming Cao sb->s_export_op = &ext4_export_ops; 5243617ba13bSMingming Cao sb->s_xattr = ext4_xattr_handlers; 5244643fa961SChandan Rajendra #ifdef CONFIG_FS_ENCRYPTION 5245a7550b30SJaegeuk Kim sb->s_cop = &ext4_cryptops; 5246ffcc4182SEric Biggers #endif 5247c93d8f88SEric Biggers #ifdef CONFIG_FS_VERITY 5248c93d8f88SEric Biggers sb->s_vop = &ext4_verityops; 5249c93d8f88SEric Biggers #endif 5250ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 5251617ba13bSMingming Cao sb->dq_op = &ext4_quota_operations; 5252e2b911c5SDarrick J. Wong if (ext4_has_feature_quota(sb)) 52531fa5efe3SJan Kara sb->s_qcop = &dquot_quotactl_sysfile_ops; 5254262b4662SJan Kara else 5255262b4662SJan Kara sb->s_qcop = &ext4_qctl_operations; 5256689c958cSLi Xi sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; 5257ac27a0ecSDave Kleikamp #endif 525885787090SChristoph Hellwig memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid)); 5259f2fa2ffcSAneesh Kumar K.V 5260ac27a0ecSDave Kleikamp INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ 52613b9d4ed2STheodore Ts'o mutex_init(&sbi->s_orphan_lock); 5262ac27a0ecSDave Kleikamp 5263f7314a67SJason Yan ext4_fast_commit_init(sb); 5264aa75f4d3SHarshad Shirwadkar 5265ac27a0ecSDave Kleikamp sb->s_root = NULL; 5266ac27a0ecSDave Kleikamp 5267ac27a0ecSDave Kleikamp needs_recovery = (es->s_last_orphan != 0 || 526802f310fcSJan Kara ext4_has_feature_orphan_present(sb) || 5269e2b911c5SDarrick J. Wong ext4_has_feature_journal_needs_recovery(sb)); 5270ac27a0ecSDave Kleikamp 5271bc98a42cSDavid Howells if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb)) 5272c5e06d10SJohann Lombardi if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block))) 527350460fe8SDarrick J. Wong goto failed_mount3a; 5274c5e06d10SJohann Lombardi 5275ac27a0ecSDave Kleikamp /* 5276ac27a0ecSDave Kleikamp * The first inode we look at is the journal inode. Don't try 5277ac27a0ecSDave Kleikamp * root first: it may be modified in the journal! 5278ac27a0ecSDave Kleikamp */ 5279e2b911c5SDarrick J. Wong if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) { 52809c1dd22dSJason Yan err = ext4_load_and_init_journal(sb, es, ctx); 52814753d8a2STheodore Ts'o if (err) 528250460fe8SDarrick J. Wong goto failed_mount3a; 5283bc98a42cSDavid Howells } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) && 5284e2b911c5SDarrick J. Wong ext4_has_feature_journal_needs_recovery(sb)) { 5285b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "required journal recovery " 5286b31e1552SEric Sandeen "suppressed and not mounted read-only"); 528743bd6f1bSJason Yan goto failed_mount3a; 5288ac27a0ecSDave Kleikamp } else { 52891e381f60SDmitry Monakhov /* Nojournal mode, all journal mount options are illegal */ 52901e381f60SDmitry Monakhov if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) { 52911e381f60SDmitry Monakhov ext4_msg(sb, KERN_ERR, "can't mount with " 52921e381f60SDmitry Monakhov "journal_checksum, fs mounted w/o journal"); 529343bd6f1bSJason Yan goto failed_mount3a; 52941e381f60SDmitry Monakhov } 52951e381f60SDmitry Monakhov if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 52961e381f60SDmitry Monakhov ext4_msg(sb, KERN_ERR, "can't mount with " 52971e381f60SDmitry Monakhov "journal_async_commit, fs mounted w/o journal"); 529843bd6f1bSJason Yan goto failed_mount3a; 52991e381f60SDmitry Monakhov } 53001e381f60SDmitry Monakhov if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) { 53011e381f60SDmitry Monakhov ext4_msg(sb, KERN_ERR, "can't mount with " 53021e381f60SDmitry Monakhov "commit=%lu, fs mounted w/o journal", 53031e381f60SDmitry Monakhov sbi->s_commit_interval / HZ); 530443bd6f1bSJason Yan goto failed_mount3a; 53051e381f60SDmitry Monakhov } 53061e381f60SDmitry Monakhov if (EXT4_MOUNT_DATA_FLAGS & 53071e381f60SDmitry Monakhov (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) { 53081e381f60SDmitry Monakhov ext4_msg(sb, KERN_ERR, "can't mount with " 53091e381f60SDmitry Monakhov "data=, fs mounted w/o journal"); 531043bd6f1bSJason Yan goto failed_mount3a; 53111e381f60SDmitry Monakhov } 531250b29d8fSDebabrata Banerjee sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM; 53131e381f60SDmitry Monakhov clear_opt(sb, JOURNAL_CHECKSUM); 5314fd8c37ecSTheodore Ts'o clear_opt(sb, DATA_FLAGS); 5315995a3ed6SHarshad Shirwadkar clear_opt2(sb, JOURNAL_FAST_COMMIT); 53160390131bSFrank Mayhar sbi->s_journal = NULL; 53170390131bSFrank Mayhar needs_recovery = 0; 5318ac27a0ecSDave Kleikamp } 5319ac27a0ecSDave Kleikamp 5320cdb7ee4cSTahsin Erdogan if (!test_opt(sb, NO_MBCACHE)) { 532147387409STahsin Erdogan sbi->s_ea_block_cache = ext4_xattr_create_cache(); 532247387409STahsin Erdogan if (!sbi->s_ea_block_cache) { 5323cdb7ee4cSTahsin Erdogan ext4_msg(sb, KERN_ERR, 5324cdb7ee4cSTahsin Erdogan "Failed to create ea_block_cache"); 53259c191f70ST Makphaibulchoke goto failed_mount_wq; 53269c191f70ST Makphaibulchoke } 53279c191f70ST Makphaibulchoke 5328dec214d0STahsin Erdogan if (ext4_has_feature_ea_inode(sb)) { 5329dec214d0STahsin Erdogan sbi->s_ea_inode_cache = ext4_xattr_create_cache(); 5330dec214d0STahsin Erdogan if (!sbi->s_ea_inode_cache) { 5331dec214d0STahsin Erdogan ext4_msg(sb, KERN_ERR, 5332dec214d0STahsin Erdogan "Failed to create ea_inode_cache"); 5333dec214d0STahsin Erdogan goto failed_mount_wq; 5334dec214d0STahsin Erdogan } 5335dec214d0STahsin Erdogan } 5336cdb7ee4cSTahsin Erdogan } 5337dec214d0STahsin Erdogan 5338c8267c51SJason Yan if (ext4_has_feature_verity(sb) && sb->s_blocksize != PAGE_SIZE) { 5339c93d8f88SEric Biggers ext4_msg(sb, KERN_ERR, "Unsupported blocksize for fs-verity"); 5340c93d8f88SEric Biggers goto failed_mount_wq; 5341c93d8f88SEric Biggers } 5342c93d8f88SEric Biggers 5343fd89d5f2STejun Heo /* 5344952fc18eSTheodore Ts'o * Get the # of file system overhead blocks from the 5345952fc18eSTheodore Ts'o * superblock if present. 5346952fc18eSTheodore Ts'o */ 5347952fc18eSTheodore Ts'o sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters); 534885d825dbSTheodore Ts'o /* ignore the precalculated value if it is ridiculous */ 534985d825dbSTheodore Ts'o if (sbi->s_overhead > ext4_blocks_count(es)) 535085d825dbSTheodore Ts'o sbi->s_overhead = 0; 535185d825dbSTheodore Ts'o /* 535285d825dbSTheodore Ts'o * If the bigalloc feature is not enabled recalculating the 535385d825dbSTheodore Ts'o * overhead doesn't take long, so we might as well just redo 535485d825dbSTheodore Ts'o * it to make sure we are using the correct value. 535585d825dbSTheodore Ts'o */ 535685d825dbSTheodore Ts'o if (!ext4_has_feature_bigalloc(sb)) 535785d825dbSTheodore Ts'o sbi->s_overhead = 0; 535885d825dbSTheodore Ts'o if (sbi->s_overhead == 0) { 535907aa2ea1SLukas Czerner err = ext4_calculate_overhead(sb); 536007aa2ea1SLukas Czerner if (err) 5361952fc18eSTheodore Ts'o goto failed_mount_wq; 5362952fc18eSTheodore Ts'o } 5363952fc18eSTheodore Ts'o 5364952fc18eSTheodore Ts'o /* 5365fd89d5f2STejun Heo * The maximum number of concurrent works can be high and 5366fd89d5f2STejun Heo * concurrency isn't really necessary. Limit it to 1. 5367fd89d5f2STejun Heo */ 53682e8fa54eSJan Kara EXT4_SB(sb)->rsv_conversion_wq = 53692e8fa54eSJan Kara alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); 53702e8fa54eSJan Kara if (!EXT4_SB(sb)->rsv_conversion_wq) { 53712e8fa54eSJan Kara printk(KERN_ERR "EXT4-fs: failed to create workqueue\n"); 537207aa2ea1SLukas Czerner ret = -ENOMEM; 53732e8fa54eSJan Kara goto failed_mount4; 53742e8fa54eSJan Kara } 53752e8fa54eSJan Kara 5376ac27a0ecSDave Kleikamp /* 5377dab291afSMingming Cao * The jbd2_journal_load will have done any necessary log recovery, 5378ac27a0ecSDave Kleikamp * so we can safely mount the rest of the filesystem now. 5379ac27a0ecSDave Kleikamp */ 5380ac27a0ecSDave Kleikamp 53818a363970STheodore Ts'o root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL); 53821d1fe1eeSDavid Howells if (IS_ERR(root)) { 5383b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "get root inode failed"); 53841d1fe1eeSDavid Howells ret = PTR_ERR(root); 538532a9bb57SManish Katiyar root = NULL; 5386ac27a0ecSDave Kleikamp goto failed_mount4; 5387ac27a0ecSDave Kleikamp } 5388ac27a0ecSDave Kleikamp if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { 5389b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck"); 539094bf608aSAl Viro iput(root); 5391ac27a0ecSDave Kleikamp goto failed_mount4; 5392ac27a0ecSDave Kleikamp } 5393b886ee3eSGabriel Krisman Bertazi 539448fde701SAl Viro sb->s_root = d_make_root(root); 53951d1fe1eeSDavid Howells if (!sb->s_root) { 5396b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "get root dentry failed"); 53971d1fe1eeSDavid Howells ret = -ENOMEM; 53981d1fe1eeSDavid Howells goto failed_mount4; 53991d1fe1eeSDavid Howells } 5400ac27a0ecSDave Kleikamp 5401c89128a0SJaegeuk Kim ret = ext4_setup_super(sb, es, sb_rdonly(sb)); 5402c89128a0SJaegeuk Kim if (ret == -EROFS) { 54031751e8a6SLinus Torvalds sb->s_flags |= SB_RDONLY; 5404c89128a0SJaegeuk Kim ret = 0; 5405c89128a0SJaegeuk Kim } else if (ret) 5406c89128a0SJaegeuk Kim goto failed_mount4a; 5407ef7f3835SKalpak Shah 5408b5799018STheodore Ts'o ext4_set_resv_clusters(sb); 540927dd4385SLukas Czerner 54100f5bde1dSJan Kara if (test_opt(sb, BLOCK_VALIDITY)) { 54116fd058f7STheodore Ts'o err = ext4_setup_system_zone(sb); 54126fd058f7STheodore Ts'o if (err) { 5413b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "failed to initialize system " 5414fbe845ddSCurt Wohlgemuth "zone (%d)", err); 5415f9ae9cf5STheodore Ts'o goto failed_mount4a; 5416f9ae9cf5STheodore Ts'o } 54170f5bde1dSJan Kara } 54188016e29fSHarshad Shirwadkar ext4_fc_replay_cleanup(sb); 5419f9ae9cf5STheodore Ts'o 5420f9ae9cf5STheodore Ts'o ext4_ext_init(sb); 5421196e402aSHarshad Shirwadkar 5422196e402aSHarshad Shirwadkar /* 5423196e402aSHarshad Shirwadkar * Enable optimize_scan if number of groups is > threshold. This can be 5424196e402aSHarshad Shirwadkar * turned off by passing "mb_optimize_scan=0". This can also be 5425196e402aSHarshad Shirwadkar * turned on forcefully by passing "mb_optimize_scan=1". 5426196e402aSHarshad Shirwadkar */ 542727b38686SOjaswin Mujoo if (!(ctx->spec & EXT4_SPEC_mb_optimize_scan)) { 542827b38686SOjaswin Mujoo if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD) 5429196e402aSHarshad Shirwadkar set_opt2(sb, MB_OPTIMIZE_SCAN); 543027b38686SOjaswin Mujoo else 5431196e402aSHarshad Shirwadkar clear_opt2(sb, MB_OPTIMIZE_SCAN); 543227b38686SOjaswin Mujoo } 5433196e402aSHarshad Shirwadkar 5434f9ae9cf5STheodore Ts'o err = ext4_mb_init(sb); 5435f9ae9cf5STheodore Ts'o if (err) { 5436f9ae9cf5STheodore Ts'o ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)", 5437f9ae9cf5STheodore Ts'o err); 5438dcf2d804STao Ma goto failed_mount5; 5439c2774d84SAneesh Kumar K.V } 5440c2774d84SAneesh Kumar K.V 5441027f14f5STheodore Ts'o /* 5442027f14f5STheodore Ts'o * We can only set up the journal commit callback once 5443027f14f5STheodore Ts'o * mballoc is initialized 5444027f14f5STheodore Ts'o */ 5445027f14f5STheodore Ts'o if (sbi->s_journal) 5446027f14f5STheodore Ts'o sbi->s_journal->j_commit_callback = 5447027f14f5STheodore Ts'o ext4_journal_commit_callback; 5448027f14f5STheodore Ts'o 5449d5e03cbbSTheodore Ts'o block = ext4_count_free_clusters(sb); 5450d5e03cbbSTheodore Ts'o ext4_free_blocks_count_set(sbi->s_es, 5451d5e03cbbSTheodore Ts'o EXT4_C2B(sbi, block)); 5452908c7f19STejun Heo err = percpu_counter_init(&sbi->s_freeclusters_counter, block, 5453908c7f19STejun Heo GFP_KERNEL); 5454d5e03cbbSTheodore Ts'o if (!err) { 5455d5e03cbbSTheodore Ts'o unsigned long freei = ext4_count_free_inodes(sb); 5456d5e03cbbSTheodore Ts'o sbi->s_es->s_free_inodes_count = cpu_to_le32(freei); 5457908c7f19STejun Heo err = percpu_counter_init(&sbi->s_freeinodes_counter, freei, 5458908c7f19STejun Heo GFP_KERNEL); 5459d5e03cbbSTheodore Ts'o } 5460d5e03cbbSTheodore Ts'o if (!err) 5461d5e03cbbSTheodore Ts'o err = percpu_counter_init(&sbi->s_dirs_counter, 5462908c7f19STejun Heo ext4_count_dirs(sb), GFP_KERNEL); 5463d5e03cbbSTheodore Ts'o if (!err) 5464908c7f19STejun Heo err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0, 5465908c7f19STejun Heo GFP_KERNEL); 5466c8585c6fSDaeho Jeong if (!err) 5467efc61345SEric Whitney err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0, 5468efc61345SEric Whitney GFP_KERNEL); 5469efc61345SEric Whitney if (!err) 5470bbd55937SEric Biggers err = percpu_init_rwsem(&sbi->s_writepages_rwsem); 5471c8585c6fSDaeho Jeong 5472d5e03cbbSTheodore Ts'o if (err) { 5473d5e03cbbSTheodore Ts'o ext4_msg(sb, KERN_ERR, "insufficient memory"); 5474d5e03cbbSTheodore Ts'o goto failed_mount6; 5475d5e03cbbSTheodore Ts'o } 5476d5e03cbbSTheodore Ts'o 5477e2b911c5SDarrick J. Wong if (ext4_has_feature_flex_bg(sb)) 5478d5e03cbbSTheodore Ts'o if (!ext4_fill_flex_info(sb)) { 5479d5e03cbbSTheodore Ts'o ext4_msg(sb, KERN_ERR, 5480d5e03cbbSTheodore Ts'o "unable to initialize " 5481d5e03cbbSTheodore Ts'o "flex_bg meta info!"); 54828f6840c4SYang Yingliang ret = -ENOMEM; 5483d5e03cbbSTheodore Ts'o goto failed_mount6; 5484d5e03cbbSTheodore Ts'o } 5485d5e03cbbSTheodore Ts'o 5486bfff6873SLukas Czerner err = ext4_register_li_request(sb, first_not_zeroed); 5487bfff6873SLukas Czerner if (err) 5488dcf2d804STao Ma goto failed_mount6; 5489bfff6873SLukas Czerner 5490b5799018STheodore Ts'o err = ext4_register_sysfs(sb); 5491dcf2d804STao Ma if (err) 5492dcf2d804STao Ma goto failed_mount7; 54933197ebdbSTheodore Ts'o 549402f310fcSJan Kara err = ext4_init_orphan_info(sb); 549502f310fcSJan Kara if (err) 549602f310fcSJan Kara goto failed_mount8; 54979b2ff357SJan Kara #ifdef CONFIG_QUOTA 54989b2ff357SJan Kara /* Enable quota usage during mount. */ 5499bc98a42cSDavid Howells if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) { 55009b2ff357SJan Kara err = ext4_enable_quotas(sb); 55019b2ff357SJan Kara if (err) 550202f310fcSJan Kara goto failed_mount9; 55039b2ff357SJan Kara } 55049b2ff357SJan Kara #endif /* CONFIG_QUOTA */ 55059b2ff357SJan Kara 5506bc71726cSzhangyi (F) /* 5507bc71726cSzhangyi (F) * Save the original bdev mapping's wb_err value which could be 5508bc71726cSzhangyi (F) * used to detect the metadata async write error. 5509bc71726cSzhangyi (F) */ 5510bc71726cSzhangyi (F) spin_lock_init(&sbi->s_bdev_wb_lock); 5511bc71726cSzhangyi (F) errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err, 5512bc71726cSzhangyi (F) &sbi->s_bdev_wb_err); 5513bc71726cSzhangyi (F) sb->s_bdev->bd_super = sb; 5514617ba13bSMingming Cao EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS; 5515617ba13bSMingming Cao ext4_orphan_cleanup(sb, es); 5516617ba13bSMingming Cao EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS; 55179b6641ddSYe Bin /* 55189b6641ddSYe Bin * Update the checksum after updating free space/inode counters and 55199b6641ddSYe Bin * ext4_orphan_cleanup. Otherwise the superblock can have an incorrect 55209b6641ddSYe Bin * checksum in the buffer cache until it is written out and 55219b6641ddSYe Bin * e2fsprogs programs trying to open a file system immediately 55229b6641ddSYe Bin * after it is mounted can fail. 55239b6641ddSYe Bin */ 55249b6641ddSYe Bin ext4_superblock_csum_set(sb); 55250390131bSFrank Mayhar if (needs_recovery) { 5526b31e1552SEric Sandeen ext4_msg(sb, KERN_INFO, "recovery complete"); 552711215630SJan Kara err = ext4_mark_recovery_complete(sb, es); 552811215630SJan Kara if (err) 552902f310fcSJan Kara goto failed_mount9; 55300390131bSFrank Mayhar } 55310390131bSFrank Mayhar 553270200574SChristoph Hellwig if (test_opt(sb, DISCARD) && !bdev_max_discard_sectors(sb->s_bdev)) 553379add3a3SLukas Czerner ext4_msg(sb, KERN_WARNING, 553470200574SChristoph Hellwig "mounting with \"discard\" option, but the device does not support discard"); 553579add3a3SLukas Czerner 553666e61a9eSTheodore Ts'o if (es->s_error_count) 553766e61a9eSTheodore Ts'o mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */ 5538ac27a0ecSDave Kleikamp 5539efbed4dcSTheodore Ts'o /* Enable message ratelimiting. Default is 10 messages per 5 secs. */ 5540efbed4dcSTheodore Ts'o ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10); 5541efbed4dcSTheodore Ts'o ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10); 5542efbed4dcSTheodore Ts'o ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10); 55431cf006edSDmitry Monakhov atomic_set(&sbi->s_warning_count, 0); 55441cf006edSDmitry Monakhov atomic_set(&sbi->s_msg_count, 0); 5545efbed4dcSTheodore Ts'o 5546ac27a0ecSDave Kleikamp return 0; 5547ac27a0ecSDave Kleikamp 554802f310fcSJan Kara failed_mount9: 554902f310fcSJan Kara ext4_release_orphan_info(sb); 555072ba7450STheodore Ts'o failed_mount8: 5551ebd173beSTheodore Ts'o ext4_unregister_sysfs(sb); 5552cb8d53d2SEric Biggers kobject_put(&sbi->s_kobj); 5553dcf2d804STao Ma failed_mount7: 5554dcf2d804STao Ma ext4_unregister_li_request(sb); 5555dcf2d804STao Ma failed_mount6: 5556f9ae9cf5STheodore Ts'o ext4_mb_release(sb); 55577c990728SSuraj Jitindar Singh rcu_read_lock(); 55587c990728SSuraj Jitindar Singh flex_groups = rcu_dereference(sbi->s_flex_groups); 55597c990728SSuraj Jitindar Singh if (flex_groups) { 55607c990728SSuraj Jitindar Singh for (i = 0; i < sbi->s_flex_groups_allocated; i++) 55617c990728SSuraj Jitindar Singh kvfree(flex_groups[i]); 55627c990728SSuraj Jitindar Singh kvfree(flex_groups); 55637c990728SSuraj Jitindar Singh } 55647c990728SSuraj Jitindar Singh rcu_read_unlock(); 5565d5e03cbbSTheodore Ts'o percpu_counter_destroy(&sbi->s_freeclusters_counter); 5566d5e03cbbSTheodore Ts'o percpu_counter_destroy(&sbi->s_freeinodes_counter); 5567d5e03cbbSTheodore Ts'o percpu_counter_destroy(&sbi->s_dirs_counter); 5568d5e03cbbSTheodore Ts'o percpu_counter_destroy(&sbi->s_dirtyclusters_counter); 5569efc61345SEric Whitney percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit); 5570bbd55937SEric Biggers percpu_free_rwsem(&sbi->s_writepages_rwsem); 557100764937SAzat Khuzhin failed_mount5: 5572f9ae9cf5STheodore Ts'o ext4_ext_release(sb); 5573f9ae9cf5STheodore Ts'o ext4_release_system_zone(sb); 5574f9ae9cf5STheodore Ts'o failed_mount4a: 557594bf608aSAl Viro dput(sb->s_root); 557632a9bb57SManish Katiyar sb->s_root = NULL; 557794bf608aSAl Viro failed_mount4: 5578b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "mount failed"); 55792e8fa54eSJan Kara if (EXT4_SB(sb)->rsv_conversion_wq) 55802e8fa54eSJan Kara destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq); 55814c0425ffSMingming Cao failed_mount_wq: 5582dec214d0STahsin Erdogan ext4_xattr_destroy_cache(sbi->s_ea_inode_cache); 5583dec214d0STahsin Erdogan sbi->s_ea_inode_cache = NULL; 558450c15df6SChengguang Xu 558547387409STahsin Erdogan ext4_xattr_destroy_cache(sbi->s_ea_block_cache); 558647387409STahsin Erdogan sbi->s_ea_block_cache = NULL; 558750c15df6SChengguang Xu 55880390131bSFrank Mayhar if (sbi->s_journal) { 5589bb9464e0Syangerkun /* flush s_error_work before journal destroy. */ 5590bb9464e0Syangerkun flush_work(&sbi->s_error_work); 5591dab291afSMingming Cao jbd2_journal_destroy(sbi->s_journal); 559247b4a50bSJan Kara sbi->s_journal = NULL; 55930390131bSFrank Mayhar } 559450460fe8SDarrick J. Wong failed_mount3a: 5595d3922a77SZheng Liu ext4_es_unregister_shrinker(sbi); 5596eb68d0e2SZheng Liu failed_mount3: 5597bb9464e0Syangerkun /* flush s_error_work before sbi destroy */ 5598c92dc856SJan Kara flush_work(&sbi->s_error_work); 55992a4ae3bcSJan Kara del_timer_sync(&sbi->s_err_report); 5600618f0031SPavel Skripkin ext4_stop_mmpd(sbi); 5601a4e6a511SJason Yan ext4_group_desc_free(sbi); 5602ac27a0ecSDave Kleikamp failed_mount: 56030441984aSDarrick J. Wong if (sbi->s_chksum_driver) 56040441984aSDarrick J. Wong crypto_free_shash(sbi->s_chksum_driver); 5605c83ad55eSGabriel Krisman Bertazi 56065298d4bfSChristoph Hellwig #if IS_ENABLED(CONFIG_UNICODE) 5607f8f4acb6SDaniel Rosenberg utf8_unload(sb->s_encoding); 5608c83ad55eSGabriel Krisman Bertazi #endif 5609c83ad55eSGabriel Krisman Bertazi 5610ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 5611a2d4a646SJan Kara for (i = 0; i < EXT4_MAXQUOTAS; i++) 56120ba33facSTheodore Ts'o kfree(get_qf_name(sb, sbi, i)); 5613ac27a0ecSDave Kleikamp #endif 5614ac4acb1fSEric Biggers fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy); 5615afd09b61SAlexey Makhalov /* ext4_blkdev_remove() calls kill_bdev(), release bh before it. */ 5616a7a79c29SJason Yan brelse(sbi->s_sbh); 5617afd09b61SAlexey Makhalov ext4_blkdev_remove(sbi); 5618ac27a0ecSDave Kleikamp out_fail: 5619ac27a0ecSDave Kleikamp sb->s_fs_info = NULL; 562007aa2ea1SLukas Czerner return err ? err : ret; 5621ac27a0ecSDave Kleikamp } 5622ac27a0ecSDave Kleikamp 5623cebe85d5SLukas Czerner static int ext4_fill_super(struct super_block *sb, struct fs_context *fc) 56247edfd85bSLukas Czerner { 5625cebe85d5SLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 56267edfd85bSLukas Czerner struct ext4_sb_info *sbi; 56277edfd85bSLukas Czerner const char *descr; 5628cebe85d5SLukas Czerner int ret; 56297edfd85bSLukas Czerner 5630cebe85d5SLukas Czerner sbi = ext4_alloc_sbi(sb); 5631cebe85d5SLukas Czerner if (!sbi) 56327c268d4cSLukas Czerner return -ENOMEM; 5633cebe85d5SLukas Czerner 5634cebe85d5SLukas Czerner fc->s_fs_info = sbi; 56357edfd85bSLukas Czerner 56367edfd85bSLukas Czerner /* Cleanup superblock name */ 56377edfd85bSLukas Czerner strreplace(sb->s_id, '/', '!'); 56387edfd85bSLukas Czerner 56397edfd85bSLukas Czerner sbi->s_sb_block = 1; /* Default super block location */ 5640cebe85d5SLukas Czerner if (ctx->spec & EXT4_SPEC_s_sb_block) 5641cebe85d5SLukas Czerner sbi->s_sb_block = ctx->s_sb_block; 56427edfd85bSLukas Czerner 5643960e0ab6SLukas Czerner ret = __ext4_fill_super(fc, sb); 56447edfd85bSLukas Czerner if (ret < 0) 56457edfd85bSLukas Czerner goto free_sbi; 56467edfd85bSLukas Czerner 5647cebe85d5SLukas Czerner if (sbi->s_journal) { 56487edfd85bSLukas Czerner if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) 56497edfd85bSLukas Czerner descr = " journalled data mode"; 56507edfd85bSLukas Czerner else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) 56517edfd85bSLukas Czerner descr = " ordered data mode"; 56527edfd85bSLukas Czerner else 56537edfd85bSLukas Czerner descr = " writeback data mode"; 56547edfd85bSLukas Czerner } else 56557edfd85bSLukas Czerner descr = "out journal"; 56567edfd85bSLukas Czerner 56577edfd85bSLukas Czerner if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount")) 56587edfd85bSLukas Czerner ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. " 5659cebe85d5SLukas Czerner "Quota mode: %s.", descr, ext4_quota_mode(sb)); 56607edfd85bSLukas Czerner 5661eb705421STheodore Ts'o /* Update the s_overhead_clusters if necessary */ 5662827891a3STheodore Ts'o ext4_update_overhead(sb, false); 56637edfd85bSLukas Czerner return 0; 5664cebe85d5SLukas Czerner 56657edfd85bSLukas Czerner free_sbi: 56667edfd85bSLukas Czerner ext4_free_sbi(sbi); 5667cebe85d5SLukas Czerner fc->s_fs_info = NULL; 56687edfd85bSLukas Czerner return ret; 56697edfd85bSLukas Czerner } 56707edfd85bSLukas Czerner 5671cebe85d5SLukas Czerner static int ext4_get_tree(struct fs_context *fc) 5672cebe85d5SLukas Czerner { 5673cebe85d5SLukas Czerner return get_tree_bdev(fc, ext4_fill_super); 5674cebe85d5SLukas Czerner } 5675cebe85d5SLukas Czerner 5676ac27a0ecSDave Kleikamp /* 5677ac27a0ecSDave Kleikamp * Setup any per-fs journal parameters now. We'll do this both on 5678ac27a0ecSDave Kleikamp * initial mount, once the journal has been initialised but before we've 5679ac27a0ecSDave Kleikamp * done any recovery; and again on any subsequent remount. 5680ac27a0ecSDave Kleikamp */ 5681617ba13bSMingming Cao static void ext4_init_journal_params(struct super_block *sb, journal_t *journal) 5682ac27a0ecSDave Kleikamp { 5683617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 5684ac27a0ecSDave Kleikamp 5685ac27a0ecSDave Kleikamp journal->j_commit_interval = sbi->s_commit_interval; 568630773840STheodore Ts'o journal->j_min_batch_time = sbi->s_min_batch_time; 568730773840STheodore Ts'o journal->j_max_batch_time = sbi->s_max_batch_time; 56886866d7b3SHarshad Shirwadkar ext4_fc_init(sb, journal); 5689ac27a0ecSDave Kleikamp 5690a931da6aSTheodore Ts'o write_lock(&journal->j_state_lock); 5691ac27a0ecSDave Kleikamp if (test_opt(sb, BARRIER)) 5692dab291afSMingming Cao journal->j_flags |= JBD2_BARRIER; 5693ac27a0ecSDave Kleikamp else 5694dab291afSMingming Cao journal->j_flags &= ~JBD2_BARRIER; 56955bf5683aSHidehiro Kawai if (test_opt(sb, DATA_ERR_ABORT)) 56965bf5683aSHidehiro Kawai journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR; 56975bf5683aSHidehiro Kawai else 56985bf5683aSHidehiro Kawai journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR; 5699a931da6aSTheodore Ts'o write_unlock(&journal->j_state_lock); 5700ac27a0ecSDave Kleikamp } 5701ac27a0ecSDave Kleikamp 5702c6cb7e77SEric Whitney static struct inode *ext4_get_journal_inode(struct super_block *sb, 5703ac27a0ecSDave Kleikamp unsigned int journal_inum) 5704ac27a0ecSDave Kleikamp { 5705ac27a0ecSDave Kleikamp struct inode *journal_inode; 5706ac27a0ecSDave Kleikamp 5707c6cb7e77SEric Whitney /* 5708c6cb7e77SEric Whitney * Test for the existence of a valid inode on disk. Bad things 5709c6cb7e77SEric Whitney * happen if we iget() an unused inode, as the subsequent iput() 5710c6cb7e77SEric Whitney * will try to delete it. 5711c6cb7e77SEric Whitney */ 57128a363970STheodore Ts'o journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL); 57131d1fe1eeSDavid Howells if (IS_ERR(journal_inode)) { 5714b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "no journal found"); 5715ac27a0ecSDave Kleikamp return NULL; 5716ac27a0ecSDave Kleikamp } 5717ac27a0ecSDave Kleikamp if (!journal_inode->i_nlink) { 5718ac27a0ecSDave Kleikamp make_bad_inode(journal_inode); 5719ac27a0ecSDave Kleikamp iput(journal_inode); 5720b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "journal inode is deleted"); 5721ac27a0ecSDave Kleikamp return NULL; 5722ac27a0ecSDave Kleikamp } 5723ac27a0ecSDave Kleikamp 57244978c659SJan Kara ext4_debug("Journal inode found at %p: %lld bytes\n", 5725ac27a0ecSDave Kleikamp journal_inode, journal_inode->i_size); 5726*105c78e1SEric Biggers if (!S_ISREG(journal_inode->i_mode) || IS_ENCRYPTED(journal_inode)) { 5727b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "invalid journal inode"); 5728ac27a0ecSDave Kleikamp iput(journal_inode); 5729ac27a0ecSDave Kleikamp return NULL; 5730ac27a0ecSDave Kleikamp } 5731c6cb7e77SEric Whitney return journal_inode; 5732c6cb7e77SEric Whitney } 5733c6cb7e77SEric Whitney 5734c6cb7e77SEric Whitney static journal_t *ext4_get_journal(struct super_block *sb, 5735c6cb7e77SEric Whitney unsigned int journal_inum) 5736c6cb7e77SEric Whitney { 5737c6cb7e77SEric Whitney struct inode *journal_inode; 5738c6cb7e77SEric Whitney journal_t *journal; 5739c6cb7e77SEric Whitney 574011215630SJan Kara if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) 574111215630SJan Kara return NULL; 5742c6cb7e77SEric Whitney 5743c6cb7e77SEric Whitney journal_inode = ext4_get_journal_inode(sb, journal_inum); 5744c6cb7e77SEric Whitney if (!journal_inode) 5745c6cb7e77SEric Whitney return NULL; 5746ac27a0ecSDave Kleikamp 5747dab291afSMingming Cao journal = jbd2_journal_init_inode(journal_inode); 5748ac27a0ecSDave Kleikamp if (!journal) { 5749b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "Could not load journal inode"); 5750ac27a0ecSDave Kleikamp iput(journal_inode); 5751ac27a0ecSDave Kleikamp return NULL; 5752ac27a0ecSDave Kleikamp } 5753ac27a0ecSDave Kleikamp journal->j_private = sb; 5754617ba13bSMingming Cao ext4_init_journal_params(sb, journal); 5755ac27a0ecSDave Kleikamp return journal; 5756ac27a0ecSDave Kleikamp } 5757ac27a0ecSDave Kleikamp 5758617ba13bSMingming Cao static journal_t *ext4_get_dev_journal(struct super_block *sb, 5759ac27a0ecSDave Kleikamp dev_t j_dev) 5760ac27a0ecSDave Kleikamp { 5761ac27a0ecSDave Kleikamp struct buffer_head *bh; 5762ac27a0ecSDave Kleikamp journal_t *journal; 5763617ba13bSMingming Cao ext4_fsblk_t start; 5764617ba13bSMingming Cao ext4_fsblk_t len; 5765ac27a0ecSDave Kleikamp int hblock, blocksize; 5766617ba13bSMingming Cao ext4_fsblk_t sb_block; 5767ac27a0ecSDave Kleikamp unsigned long offset; 5768617ba13bSMingming Cao struct ext4_super_block *es; 5769ac27a0ecSDave Kleikamp struct block_device *bdev; 5770ac27a0ecSDave Kleikamp 577111215630SJan Kara if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) 577211215630SJan Kara return NULL; 57730390131bSFrank Mayhar 5774b31e1552SEric Sandeen bdev = ext4_blkdev_get(j_dev, sb); 5775ac27a0ecSDave Kleikamp if (bdev == NULL) 5776ac27a0ecSDave Kleikamp return NULL; 5777ac27a0ecSDave Kleikamp 5778ac27a0ecSDave Kleikamp blocksize = sb->s_blocksize; 5779e1defc4fSMartin K. Petersen hblock = bdev_logical_block_size(bdev); 5780ac27a0ecSDave Kleikamp if (blocksize < hblock) { 5781b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, 5782b31e1552SEric Sandeen "blocksize too small for journal device"); 5783ac27a0ecSDave Kleikamp goto out_bdev; 5784ac27a0ecSDave Kleikamp } 5785ac27a0ecSDave Kleikamp 5786617ba13bSMingming Cao sb_block = EXT4_MIN_BLOCK_SIZE / blocksize; 5787617ba13bSMingming Cao offset = EXT4_MIN_BLOCK_SIZE % blocksize; 5788ac27a0ecSDave Kleikamp set_blocksize(bdev, blocksize); 5789ac27a0ecSDave Kleikamp if (!(bh = __bread(bdev, sb_block, blocksize))) { 5790b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "couldn't read superblock of " 5791b31e1552SEric Sandeen "external journal"); 5792ac27a0ecSDave Kleikamp goto out_bdev; 5793ac27a0ecSDave Kleikamp } 5794ac27a0ecSDave Kleikamp 57952716b802STheodore Ts'o es = (struct ext4_super_block *) (bh->b_data + offset); 5796617ba13bSMingming Cao if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) || 5797ac27a0ecSDave Kleikamp !(le32_to_cpu(es->s_feature_incompat) & 5798617ba13bSMingming Cao EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) { 5799b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "external journal has " 5800b31e1552SEric Sandeen "bad superblock"); 5801ac27a0ecSDave Kleikamp brelse(bh); 5802ac27a0ecSDave Kleikamp goto out_bdev; 5803ac27a0ecSDave Kleikamp } 5804ac27a0ecSDave Kleikamp 5805df4763beSDarrick J. Wong if ((le32_to_cpu(es->s_feature_ro_compat) & 5806df4763beSDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && 5807df4763beSDarrick J. Wong es->s_checksum != ext4_superblock_csum(sb, es)) { 5808df4763beSDarrick J. Wong ext4_msg(sb, KERN_ERR, "external journal has " 5809df4763beSDarrick J. Wong "corrupt superblock"); 5810df4763beSDarrick J. Wong brelse(bh); 5811df4763beSDarrick J. Wong goto out_bdev; 5812df4763beSDarrick J. Wong } 5813df4763beSDarrick J. Wong 5814617ba13bSMingming Cao if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) { 5815b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "journal UUID does not match"); 5816ac27a0ecSDave Kleikamp brelse(bh); 5817ac27a0ecSDave Kleikamp goto out_bdev; 5818ac27a0ecSDave Kleikamp } 5819ac27a0ecSDave Kleikamp 5820bd81d8eeSLaurent Vivier len = ext4_blocks_count(es); 5821ac27a0ecSDave Kleikamp start = sb_block + 1; 5822ac27a0ecSDave Kleikamp brelse(bh); /* we're done with the superblock */ 5823ac27a0ecSDave Kleikamp 5824dab291afSMingming Cao journal = jbd2_journal_init_dev(bdev, sb->s_bdev, 5825ac27a0ecSDave Kleikamp start, len, blocksize); 5826ac27a0ecSDave Kleikamp if (!journal) { 5827b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "failed to create device journal"); 5828ac27a0ecSDave Kleikamp goto out_bdev; 5829ac27a0ecSDave Kleikamp } 5830ac27a0ecSDave Kleikamp journal->j_private = sb; 58312d069c08Szhangyi (F) if (ext4_read_bh_lock(journal->j_sb_buffer, REQ_META | REQ_PRIO, true)) { 5832b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "I/O error on journal device"); 5833ac27a0ecSDave Kleikamp goto out_journal; 5834ac27a0ecSDave Kleikamp } 5835ac27a0ecSDave Kleikamp if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) { 5836b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "External journal has more than one " 5837b31e1552SEric Sandeen "user (unsupported) - %d", 5838ac27a0ecSDave Kleikamp be32_to_cpu(journal->j_superblock->s_nr_users)); 5839ac27a0ecSDave Kleikamp goto out_journal; 5840ac27a0ecSDave Kleikamp } 5841ee7ed3aaSChunguang Xu EXT4_SB(sb)->s_journal_bdev = bdev; 5842617ba13bSMingming Cao ext4_init_journal_params(sb, journal); 5843ac27a0ecSDave Kleikamp return journal; 58440b8e58a1SAndreas Dilger 5845ac27a0ecSDave Kleikamp out_journal: 5846dab291afSMingming Cao jbd2_journal_destroy(journal); 5847ac27a0ecSDave Kleikamp out_bdev: 5848617ba13bSMingming Cao ext4_blkdev_put(bdev); 5849ac27a0ecSDave Kleikamp return NULL; 5850ac27a0ecSDave Kleikamp } 5851ac27a0ecSDave Kleikamp 5852617ba13bSMingming Cao static int ext4_load_journal(struct super_block *sb, 5853617ba13bSMingming Cao struct ext4_super_block *es, 5854ac27a0ecSDave Kleikamp unsigned long journal_devnum) 5855ac27a0ecSDave Kleikamp { 5856ac27a0ecSDave Kleikamp journal_t *journal; 5857ac27a0ecSDave Kleikamp unsigned int journal_inum = le32_to_cpu(es->s_journal_inum); 5858ac27a0ecSDave Kleikamp dev_t journal_dev; 5859ac27a0ecSDave Kleikamp int err = 0; 5860ac27a0ecSDave Kleikamp int really_read_only; 5861273108faSLukas Czerner int journal_dev_ro; 5862ac27a0ecSDave Kleikamp 586311215630SJan Kara if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) 586411215630SJan Kara return -EFSCORRUPTED; 58650390131bSFrank Mayhar 5866ac27a0ecSDave Kleikamp if (journal_devnum && 5867ac27a0ecSDave Kleikamp journal_devnum != le32_to_cpu(es->s_journal_dev)) { 5868b31e1552SEric Sandeen ext4_msg(sb, KERN_INFO, "external journal device major/minor " 5869b31e1552SEric Sandeen "numbers have changed"); 5870ac27a0ecSDave Kleikamp journal_dev = new_decode_dev(journal_devnum); 5871ac27a0ecSDave Kleikamp } else 5872ac27a0ecSDave Kleikamp journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); 5873ac27a0ecSDave Kleikamp 5874273108faSLukas Czerner if (journal_inum && journal_dev) { 5875273108faSLukas Czerner ext4_msg(sb, KERN_ERR, 5876273108faSLukas Czerner "filesystem has both journal inode and journal device!"); 5877273108faSLukas Czerner return -EINVAL; 5878273108faSLukas Czerner } 5879273108faSLukas Czerner 5880273108faSLukas Czerner if (journal_inum) { 5881273108faSLukas Czerner journal = ext4_get_journal(sb, journal_inum); 5882273108faSLukas Czerner if (!journal) 5883273108faSLukas Czerner return -EINVAL; 5884273108faSLukas Czerner } else { 5885273108faSLukas Czerner journal = ext4_get_dev_journal(sb, journal_dev); 5886273108faSLukas Czerner if (!journal) 5887273108faSLukas Czerner return -EINVAL; 5888273108faSLukas Czerner } 5889273108faSLukas Czerner 5890273108faSLukas Czerner journal_dev_ro = bdev_read_only(journal->j_dev); 5891273108faSLukas Czerner really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro; 5892273108faSLukas Czerner 5893273108faSLukas Czerner if (journal_dev_ro && !sb_rdonly(sb)) { 5894273108faSLukas Czerner ext4_msg(sb, KERN_ERR, 5895273108faSLukas Czerner "journal device read-only, try mounting with '-o ro'"); 5896273108faSLukas Czerner err = -EROFS; 5897273108faSLukas Czerner goto err_out; 5898273108faSLukas Czerner } 5899ac27a0ecSDave Kleikamp 5900ac27a0ecSDave Kleikamp /* 5901ac27a0ecSDave Kleikamp * Are we loading a blank journal or performing recovery after a 5902ac27a0ecSDave Kleikamp * crash? For recovery, we need to check in advance whether we 5903ac27a0ecSDave Kleikamp * can get read-write access to the device. 5904ac27a0ecSDave Kleikamp */ 5905e2b911c5SDarrick J. Wong if (ext4_has_feature_journal_needs_recovery(sb)) { 5906bc98a42cSDavid Howells if (sb_rdonly(sb)) { 5907b31e1552SEric Sandeen ext4_msg(sb, KERN_INFO, "INFO: recovery " 5908b31e1552SEric Sandeen "required on readonly filesystem"); 5909ac27a0ecSDave Kleikamp if (really_read_only) { 5910b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "write access " 5911d98bf8cdSSimon Ruderich "unavailable, cannot proceed " 5912d98bf8cdSSimon Ruderich "(try mounting with noload)"); 5913273108faSLukas Czerner err = -EROFS; 5914273108faSLukas Czerner goto err_out; 5915ac27a0ecSDave Kleikamp } 5916b31e1552SEric Sandeen ext4_msg(sb, KERN_INFO, "write access will " 5917b31e1552SEric Sandeen "be enabled during recovery"); 5918ac27a0ecSDave Kleikamp } 5919ac27a0ecSDave Kleikamp } 5920ac27a0ecSDave Kleikamp 592190576c0bSTheodore Ts'o if (!(journal->j_flags & JBD2_BARRIER)) 5922b31e1552SEric Sandeen ext4_msg(sb, KERN_INFO, "barriers disabled"); 59234776004fSTheodore Ts'o 5924e2b911c5SDarrick J. Wong if (!ext4_has_feature_journal_needs_recovery(sb)) 5925dab291afSMingming Cao err = jbd2_journal_wipe(journal, !really_read_only); 59261c13d5c0STheodore Ts'o if (!err) { 59271c13d5c0STheodore Ts'o char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL); 59281c13d5c0STheodore Ts'o if (save) 59291c13d5c0STheodore Ts'o memcpy(save, ((char *) es) + 59301c13d5c0STheodore Ts'o EXT4_S_ERR_START, EXT4_S_ERR_LEN); 5931dab291afSMingming Cao err = jbd2_journal_load(journal); 59321c13d5c0STheodore Ts'o if (save) 59331c13d5c0STheodore Ts'o memcpy(((char *) es) + EXT4_S_ERR_START, 59341c13d5c0STheodore Ts'o save, EXT4_S_ERR_LEN); 59351c13d5c0STheodore Ts'o kfree(save); 59361c13d5c0STheodore Ts'o } 5937ac27a0ecSDave Kleikamp 5938ac27a0ecSDave Kleikamp if (err) { 5939b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "error loading journal"); 5940273108faSLukas Czerner goto err_out; 5941ac27a0ecSDave Kleikamp } 5942ac27a0ecSDave Kleikamp 5943617ba13bSMingming Cao EXT4_SB(sb)->s_journal = journal; 594411215630SJan Kara err = ext4_clear_journal_err(sb, es); 594511215630SJan Kara if (err) { 594611215630SJan Kara EXT4_SB(sb)->s_journal = NULL; 594711215630SJan Kara jbd2_journal_destroy(journal); 594811215630SJan Kara return err; 594911215630SJan Kara } 5950ac27a0ecSDave Kleikamp 5951c41303ceSMaciej Żenczykowski if (!really_read_only && journal_devnum && 5952ac27a0ecSDave Kleikamp journal_devnum != le32_to_cpu(es->s_journal_dev)) { 5953ac27a0ecSDave Kleikamp es->s_journal_dev = cpu_to_le32(journal_devnum); 5954ac27a0ecSDave Kleikamp 5955ac27a0ecSDave Kleikamp /* Make sure we flush the recovery flag to disk. */ 59564392fbc4SJan Kara ext4_commit_super(sb); 5957ac27a0ecSDave Kleikamp } 5958ac27a0ecSDave Kleikamp 5959ac27a0ecSDave Kleikamp return 0; 5960273108faSLukas Czerner 5961273108faSLukas Czerner err_out: 5962273108faSLukas Czerner jbd2_journal_destroy(journal); 5963273108faSLukas Czerner return err; 5964ac27a0ecSDave Kleikamp } 5965ac27a0ecSDave Kleikamp 59662d01ddc8SJan Kara /* Copy state of EXT4_SB(sb) into buffer for on-disk superblock */ 59672d01ddc8SJan Kara static void ext4_update_super(struct super_block *sb) 5968ac27a0ecSDave Kleikamp { 5969c92dc856SJan Kara struct ext4_sb_info *sbi = EXT4_SB(sb); 5970e92ad03fSJan Kara struct ext4_super_block *es = sbi->s_es; 5971e92ad03fSJan Kara struct buffer_head *sbh = sbi->s_sbh; 5972ac27a0ecSDave Kleikamp 597305c2c00fSJan Kara lock_buffer(sbh); 5974a17712c8SJon Derrick /* 597571290b36STheodore Ts'o * If the file system is mounted read-only, don't update the 597671290b36STheodore Ts'o * superblock write time. This avoids updating the superblock 597771290b36STheodore Ts'o * write time when we are mounting the root file system 597871290b36STheodore Ts'o * read/only but we need to replay the journal; at that point, 597971290b36STheodore Ts'o * for people who are east of GMT and who make their clock 598071290b36STheodore Ts'o * tick in localtime for Windows bug-for-bug compatibility, 598171290b36STheodore Ts'o * the clock is set in the future, and this will cause e2fsck 598271290b36STheodore Ts'o * to complain and force a full file system check. 598371290b36STheodore Ts'o */ 59841751e8a6SLinus Torvalds if (!(sb->s_flags & SB_RDONLY)) 59856a0678a7SArnd Bergmann ext4_update_tstamp(es, s_wtime); 5986afc32f7eSTheodore Ts'o es->s_kbytes_written = 5987e92ad03fSJan Kara cpu_to_le64(sbi->s_kbytes_written + 59888446fe92SChristoph Hellwig ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) - 5989e92ad03fSJan Kara sbi->s_sectors_written_start) >> 1)); 5990e92ad03fSJan Kara if (percpu_counter_initialized(&sbi->s_freeclusters_counter)) 599157042651STheodore Ts'o ext4_free_blocks_count_set(es, 5992e92ad03fSJan Kara EXT4_C2B(sbi, percpu_counter_sum_positive( 5993e92ad03fSJan Kara &sbi->s_freeclusters_counter))); 5994e92ad03fSJan Kara if (percpu_counter_initialized(&sbi->s_freeinodes_counter)) 59957f93cff9STheodore Ts'o es->s_free_inodes_count = 59967f93cff9STheodore Ts'o cpu_to_le32(percpu_counter_sum_positive( 5997e92ad03fSJan Kara &sbi->s_freeinodes_counter)); 5998c92dc856SJan Kara /* Copy error information to the on-disk superblock */ 5999c92dc856SJan Kara spin_lock(&sbi->s_error_lock); 6000c92dc856SJan Kara if (sbi->s_add_error_count > 0) { 6001c92dc856SJan Kara es->s_state |= cpu_to_le16(EXT4_ERROR_FS); 6002c92dc856SJan Kara if (!es->s_first_error_time && !es->s_first_error_time_hi) { 6003c92dc856SJan Kara __ext4_update_tstamp(&es->s_first_error_time, 6004c92dc856SJan Kara &es->s_first_error_time_hi, 6005c92dc856SJan Kara sbi->s_first_error_time); 6006c92dc856SJan Kara strncpy(es->s_first_error_func, sbi->s_first_error_func, 6007c92dc856SJan Kara sizeof(es->s_first_error_func)); 6008c92dc856SJan Kara es->s_first_error_line = 6009c92dc856SJan Kara cpu_to_le32(sbi->s_first_error_line); 6010c92dc856SJan Kara es->s_first_error_ino = 6011c92dc856SJan Kara cpu_to_le32(sbi->s_first_error_ino); 6012c92dc856SJan Kara es->s_first_error_block = 6013c92dc856SJan Kara cpu_to_le64(sbi->s_first_error_block); 6014c92dc856SJan Kara es->s_first_error_errcode = 6015c92dc856SJan Kara ext4_errno_to_code(sbi->s_first_error_code); 6016c92dc856SJan Kara } 6017c92dc856SJan Kara __ext4_update_tstamp(&es->s_last_error_time, 6018c92dc856SJan Kara &es->s_last_error_time_hi, 6019c92dc856SJan Kara sbi->s_last_error_time); 6020c92dc856SJan Kara strncpy(es->s_last_error_func, sbi->s_last_error_func, 6021c92dc856SJan Kara sizeof(es->s_last_error_func)); 6022c92dc856SJan Kara es->s_last_error_line = cpu_to_le32(sbi->s_last_error_line); 6023c92dc856SJan Kara es->s_last_error_ino = cpu_to_le32(sbi->s_last_error_ino); 6024c92dc856SJan Kara es->s_last_error_block = cpu_to_le64(sbi->s_last_error_block); 6025c92dc856SJan Kara es->s_last_error_errcode = 6026c92dc856SJan Kara ext4_errno_to_code(sbi->s_last_error_code); 6027c92dc856SJan Kara /* 6028c92dc856SJan Kara * Start the daily error reporting function if it hasn't been 6029c92dc856SJan Kara * started already 6030c92dc856SJan Kara */ 6031c92dc856SJan Kara if (!es->s_error_count) 6032c92dc856SJan Kara mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); 6033c92dc856SJan Kara le32_add_cpu(&es->s_error_count, sbi->s_add_error_count); 6034c92dc856SJan Kara sbi->s_add_error_count = 0; 6035c92dc856SJan Kara } 6036c92dc856SJan Kara spin_unlock(&sbi->s_error_lock); 6037c92dc856SJan Kara 603806db49e6STheodore Ts'o ext4_superblock_csum_set(sb); 60392d01ddc8SJan Kara unlock_buffer(sbh); 60402d01ddc8SJan Kara } 60412d01ddc8SJan Kara 60422d01ddc8SJan Kara static int ext4_commit_super(struct super_block *sb) 60432d01ddc8SJan Kara { 60442d01ddc8SJan Kara struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; 60452d01ddc8SJan Kara 6046f88f1466SFengnan Chang if (!sbh) 6047f88f1466SFengnan Chang return -EINVAL; 6048f88f1466SFengnan Chang if (block_device_ejected(sb)) 6049f88f1466SFengnan Chang return -ENODEV; 60502d01ddc8SJan Kara 60512d01ddc8SJan Kara ext4_update_super(sb); 60522d01ddc8SJan Kara 605315baa7dcSZhang Yi lock_buffer(sbh); 605415baa7dcSZhang Yi /* Buffer got discarded which means block device got invalidated */ 605515baa7dcSZhang Yi if (!buffer_mapped(sbh)) { 605615baa7dcSZhang Yi unlock_buffer(sbh); 605715baa7dcSZhang Yi return -EIO; 605815baa7dcSZhang Yi } 605915baa7dcSZhang Yi 6060e8680786STheodore Ts'o if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) { 60614743f839SPranay Kr. Srivastava /* 60624743f839SPranay Kr. Srivastava * Oh, dear. A previous attempt to write the 60634743f839SPranay Kr. Srivastava * superblock failed. This could happen because the 60644743f839SPranay Kr. Srivastava * USB device was yanked out. Or it could happen to 60654743f839SPranay Kr. Srivastava * be a transient write error and maybe the block will 60664743f839SPranay Kr. Srivastava * be remapped. Nothing we can do but to retry the 60674743f839SPranay Kr. Srivastava * write and hope for the best. 60684743f839SPranay Kr. Srivastava */ 60694743f839SPranay Kr. Srivastava ext4_msg(sb, KERN_ERR, "previous I/O error to " 60704743f839SPranay Kr. Srivastava "superblock detected"); 60714743f839SPranay Kr. Srivastava clear_buffer_write_io_error(sbh); 60724743f839SPranay Kr. Srivastava set_buffer_uptodate(sbh); 60734743f839SPranay Kr. Srivastava } 607415baa7dcSZhang Yi get_bh(sbh); 607515baa7dcSZhang Yi /* Clear potential dirty bit if it was journalled update */ 607615baa7dcSZhang Yi clear_buffer_dirty(sbh); 607715baa7dcSZhang Yi sbh->b_end_io = end_buffer_write_sync; 60781420c4a5SBart Van Assche submit_bh(REQ_OP_WRITE | REQ_SYNC | 60791420c4a5SBart Van Assche (test_opt(sb, BARRIER) ? REQ_FUA : 0), sbh); 608015baa7dcSZhang Yi wait_on_buffer(sbh); 6081c89128a0SJaegeuk Kim if (buffer_write_io_error(sbh)) { 6082b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "I/O error while writing " 6083b31e1552SEric Sandeen "superblock"); 6084914258bfSTheodore Ts'o clear_buffer_write_io_error(sbh); 6085914258bfSTheodore Ts'o set_buffer_uptodate(sbh); 608615baa7dcSZhang Yi return -EIO; 6087914258bfSTheodore Ts'o } 608815baa7dcSZhang Yi return 0; 6089ac27a0ecSDave Kleikamp } 6090ac27a0ecSDave Kleikamp 6091ac27a0ecSDave Kleikamp /* 6092ac27a0ecSDave Kleikamp * Have we just finished recovery? If so, and if we are mounting (or 6093ac27a0ecSDave Kleikamp * remounting) the filesystem readonly, then we will end up with a 6094ac27a0ecSDave Kleikamp * consistent fs on disk. Record that fact. 6095ac27a0ecSDave Kleikamp */ 609611215630SJan Kara static int ext4_mark_recovery_complete(struct super_block *sb, 6097617ba13bSMingming Cao struct ext4_super_block *es) 6098ac27a0ecSDave Kleikamp { 609911215630SJan Kara int err; 6100617ba13bSMingming Cao journal_t *journal = EXT4_SB(sb)->s_journal; 6101ac27a0ecSDave Kleikamp 6102e2b911c5SDarrick J. Wong if (!ext4_has_feature_journal(sb)) { 610311215630SJan Kara if (journal != NULL) { 610411215630SJan Kara ext4_error(sb, "Journal got removed while the fs was " 610511215630SJan Kara "mounted!"); 610611215630SJan Kara return -EFSCORRUPTED; 610711215630SJan Kara } 610811215630SJan Kara return 0; 61090390131bSFrank Mayhar } 6110dab291afSMingming Cao jbd2_journal_lock_updates(journal); 611101d5d965SLeah Rumancik err = jbd2_journal_flush(journal, 0); 611211215630SJan Kara if (err < 0) 61137ffe1ea8SHidehiro Kawai goto out; 61147ffe1ea8SHidehiro Kawai 611502f310fcSJan Kara if (sb_rdonly(sb) && (ext4_has_feature_journal_needs_recovery(sb) || 611602f310fcSJan Kara ext4_has_feature_orphan_present(sb))) { 611702f310fcSJan Kara if (!ext4_orphan_file_empty(sb)) { 611802f310fcSJan Kara ext4_error(sb, "Orphan file not empty on read-only fs."); 611902f310fcSJan Kara err = -EFSCORRUPTED; 612002f310fcSJan Kara goto out; 612102f310fcSJan Kara } 6122e2b911c5SDarrick J. Wong ext4_clear_feature_journal_needs_recovery(sb); 612302f310fcSJan Kara ext4_clear_feature_orphan_present(sb); 61244392fbc4SJan Kara ext4_commit_super(sb); 6125ac27a0ecSDave Kleikamp } 61267ffe1ea8SHidehiro Kawai out: 6127dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 612811215630SJan Kara return err; 6129ac27a0ecSDave Kleikamp } 6130ac27a0ecSDave Kleikamp 6131ac27a0ecSDave Kleikamp /* 6132ac27a0ecSDave Kleikamp * If we are mounting (or read-write remounting) a filesystem whose journal 6133ac27a0ecSDave Kleikamp * has recorded an error from a previous lifetime, move that error to the 6134ac27a0ecSDave Kleikamp * main filesystem now. 6135ac27a0ecSDave Kleikamp */ 613611215630SJan Kara static int ext4_clear_journal_err(struct super_block *sb, 6137617ba13bSMingming Cao struct ext4_super_block *es) 6138ac27a0ecSDave Kleikamp { 6139ac27a0ecSDave Kleikamp journal_t *journal; 6140ac27a0ecSDave Kleikamp int j_errno; 6141ac27a0ecSDave Kleikamp const char *errstr; 6142ac27a0ecSDave Kleikamp 614311215630SJan Kara if (!ext4_has_feature_journal(sb)) { 614411215630SJan Kara ext4_error(sb, "Journal got removed while the fs was mounted!"); 614511215630SJan Kara return -EFSCORRUPTED; 614611215630SJan Kara } 61470390131bSFrank Mayhar 6148617ba13bSMingming Cao journal = EXT4_SB(sb)->s_journal; 6149ac27a0ecSDave Kleikamp 6150ac27a0ecSDave Kleikamp /* 6151ac27a0ecSDave Kleikamp * Now check for any error status which may have been recorded in the 6152617ba13bSMingming Cao * journal by a prior ext4_error() or ext4_abort() 6153ac27a0ecSDave Kleikamp */ 6154ac27a0ecSDave Kleikamp 6155dab291afSMingming Cao j_errno = jbd2_journal_errno(journal); 6156ac27a0ecSDave Kleikamp if (j_errno) { 6157ac27a0ecSDave Kleikamp char nbuf[16]; 6158ac27a0ecSDave Kleikamp 6159617ba13bSMingming Cao errstr = ext4_decode_error(sb, j_errno, nbuf); 616012062dddSEric Sandeen ext4_warning(sb, "Filesystem error recorded " 6161ac27a0ecSDave Kleikamp "from previous mount: %s", errstr); 616212062dddSEric Sandeen ext4_warning(sb, "Marking fs in need of filesystem check."); 6163ac27a0ecSDave Kleikamp 6164617ba13bSMingming Cao EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; 6165617ba13bSMingming Cao es->s_state |= cpu_to_le16(EXT4_ERROR_FS); 61664392fbc4SJan Kara ext4_commit_super(sb); 6167ac27a0ecSDave Kleikamp 6168dab291afSMingming Cao jbd2_journal_clear_err(journal); 6169d796c52eSTheodore Ts'o jbd2_journal_update_sb_errno(journal); 6170ac27a0ecSDave Kleikamp } 617111215630SJan Kara return 0; 6172ac27a0ecSDave Kleikamp } 6173ac27a0ecSDave Kleikamp 6174ac27a0ecSDave Kleikamp /* 6175ac27a0ecSDave Kleikamp * Force the running and committing transactions to commit, 6176ac27a0ecSDave Kleikamp * and wait on the commit. 6177ac27a0ecSDave Kleikamp */ 6178617ba13bSMingming Cao int ext4_force_commit(struct super_block *sb) 6179ac27a0ecSDave Kleikamp { 6180ac27a0ecSDave Kleikamp journal_t *journal; 6181ac27a0ecSDave Kleikamp 6182bc98a42cSDavid Howells if (sb_rdonly(sb)) 6183ac27a0ecSDave Kleikamp return 0; 6184ac27a0ecSDave Kleikamp 6185617ba13bSMingming Cao journal = EXT4_SB(sb)->s_journal; 6186b1deefc9SGuo Chao return ext4_journal_force_commit(journal); 6187ac27a0ecSDave Kleikamp } 6188ac27a0ecSDave Kleikamp 6189617ba13bSMingming Cao static int ext4_sync_fs(struct super_block *sb, int wait) 6190ac27a0ecSDave Kleikamp { 619114ce0cb4STheodore Ts'o int ret = 0; 61929eddacf9SJan Kara tid_t target; 619306a407f1SDmitry Monakhov bool needs_barrier = false; 61948d5d02e6SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 6195ac27a0ecSDave Kleikamp 619649598e04SJun Piao if (unlikely(ext4_forced_shutdown(sbi))) 61970db1ff22STheodore Ts'o return 0; 61980db1ff22STheodore Ts'o 61999bffad1eSTheodore Ts'o trace_ext4_sync_fs(sb, wait); 62002e8fa54eSJan Kara flush_workqueue(sbi->rsv_conversion_wq); 6201a1177825SJan Kara /* 6202a1177825SJan Kara * Writeback quota in non-journalled quota case - journalled quota has 6203a1177825SJan Kara * no dirty dquots 6204a1177825SJan Kara */ 6205a1177825SJan Kara dquot_writeback_dquots(sb, -1); 620606a407f1SDmitry Monakhov /* 620706a407f1SDmitry Monakhov * Data writeback is possible w/o journal transaction, so barrier must 620806a407f1SDmitry Monakhov * being sent at the end of the function. But we can skip it if 620906a407f1SDmitry Monakhov * transaction_commit will do it for us. 621006a407f1SDmitry Monakhov */ 6211bda32530STheodore Ts'o if (sbi->s_journal) { 621206a407f1SDmitry Monakhov target = jbd2_get_latest_transaction(sbi->s_journal); 621306a407f1SDmitry Monakhov if (wait && sbi->s_journal->j_flags & JBD2_BARRIER && 621406a407f1SDmitry Monakhov !jbd2_trans_will_send_data_barrier(sbi->s_journal, target)) 621506a407f1SDmitry Monakhov needs_barrier = true; 621606a407f1SDmitry Monakhov 62178d5d02e6SMingming Cao if (jbd2_journal_start_commit(sbi->s_journal, &target)) { 6218ac27a0ecSDave Kleikamp if (wait) 6219bda32530STheodore Ts'o ret = jbd2_log_wait_commit(sbi->s_journal, 6220bda32530STheodore Ts'o target); 62210390131bSFrank Mayhar } 6222bda32530STheodore Ts'o } else if (wait && test_opt(sb, BARRIER)) 6223bda32530STheodore Ts'o needs_barrier = true; 622406a407f1SDmitry Monakhov if (needs_barrier) { 622506a407f1SDmitry Monakhov int err; 6226c6bf3f0eSChristoph Hellwig err = blkdev_issue_flush(sb->s_bdev); 622706a407f1SDmitry Monakhov if (!ret) 622806a407f1SDmitry Monakhov ret = err; 622906a407f1SDmitry Monakhov } 623006a407f1SDmitry Monakhov 623106a407f1SDmitry Monakhov return ret; 623206a407f1SDmitry Monakhov } 623306a407f1SDmitry Monakhov 6234ac27a0ecSDave Kleikamp /* 6235ac27a0ecSDave Kleikamp * LVM calls this function before a (read-only) snapshot is created. This 6236ac27a0ecSDave Kleikamp * gives us a chance to flush the journal completely and mark the fs clean. 6237be4f27d3SYongqiang Yang * 6238be4f27d3SYongqiang Yang * Note that only this function cannot bring a filesystem to be in a clean 62398e8ad8a5SJan Kara * state independently. It relies on upper layer to stop all data & metadata 62408e8ad8a5SJan Kara * modifications. 6241ac27a0ecSDave Kleikamp */ 6242c4be0c1dSTakashi Sato static int ext4_freeze(struct super_block *sb) 6243ac27a0ecSDave Kleikamp { 6244c4be0c1dSTakashi Sato int error = 0; 6245c4be0c1dSTakashi Sato journal_t *journal; 6246ac27a0ecSDave Kleikamp 6247bc98a42cSDavid Howells if (sb_rdonly(sb)) 62489ca92389STheodore Ts'o return 0; 62499ca92389STheodore Ts'o 6250c4be0c1dSTakashi Sato journal = EXT4_SB(sb)->s_journal; 6251ac27a0ecSDave Kleikamp 6252bb044576STheodore Ts'o if (journal) { 6253ac27a0ecSDave Kleikamp /* Now we set up the journal barrier. */ 6254dab291afSMingming Cao jbd2_journal_lock_updates(journal); 62557ffe1ea8SHidehiro Kawai 62567ffe1ea8SHidehiro Kawai /* 6257bb044576STheodore Ts'o * Don't clear the needs_recovery flag if we failed to 6258bb044576STheodore Ts'o * flush the journal. 62597ffe1ea8SHidehiro Kawai */ 626001d5d965SLeah Rumancik error = jbd2_journal_flush(journal, 0); 62616b0310fbSEric Sandeen if (error < 0) 62626b0310fbSEric Sandeen goto out; 6263ac27a0ecSDave Kleikamp 6264ac27a0ecSDave Kleikamp /* Journal blocked and flushed, clear needs_recovery flag. */ 6265e2b911c5SDarrick J. Wong ext4_clear_feature_journal_needs_recovery(sb); 626602f310fcSJan Kara if (ext4_orphan_file_empty(sb)) 626702f310fcSJan Kara ext4_clear_feature_orphan_present(sb); 6268c642dc9eSEric Sandeen } 6269c642dc9eSEric Sandeen 62704392fbc4SJan Kara error = ext4_commit_super(sb); 62716b0310fbSEric Sandeen out: 6272bb044576STheodore Ts'o if (journal) 62738e8ad8a5SJan Kara /* we rely on upper layer to stop further updates */ 6274bb044576STheodore Ts'o jbd2_journal_unlock_updates(journal); 62756b0310fbSEric Sandeen return error; 6276ac27a0ecSDave Kleikamp } 6277ac27a0ecSDave Kleikamp 6278ac27a0ecSDave Kleikamp /* 6279ac27a0ecSDave Kleikamp * Called by LVM after the snapshot is done. We need to reset the RECOVER 6280ac27a0ecSDave Kleikamp * flag here, even though the filesystem is not technically dirty yet. 6281ac27a0ecSDave Kleikamp */ 6282c4be0c1dSTakashi Sato static int ext4_unfreeze(struct super_block *sb) 6283ac27a0ecSDave Kleikamp { 6284bc98a42cSDavid Howells if (sb_rdonly(sb) || ext4_forced_shutdown(EXT4_SB(sb))) 62859ca92389STheodore Ts'o return 0; 62869ca92389STheodore Ts'o 6287c642dc9eSEric Sandeen if (EXT4_SB(sb)->s_journal) { 62889ca92389STheodore Ts'o /* Reset the needs_recovery flag before the fs is unlocked. */ 6289e2b911c5SDarrick J. Wong ext4_set_feature_journal_needs_recovery(sb); 629002f310fcSJan Kara if (ext4_has_feature_orphan_file(sb)) 629102f310fcSJan Kara ext4_set_feature_orphan_present(sb); 6292c642dc9eSEric Sandeen } 6293c642dc9eSEric Sandeen 62944392fbc4SJan Kara ext4_commit_super(sb); 6295c4be0c1dSTakashi Sato return 0; 6296ac27a0ecSDave Kleikamp } 6297ac27a0ecSDave Kleikamp 6298673c6100STheodore Ts'o /* 6299673c6100STheodore Ts'o * Structure to save mount options for ext4_remount's benefit 6300673c6100STheodore Ts'o */ 6301673c6100STheodore Ts'o struct ext4_mount_options { 6302673c6100STheodore Ts'o unsigned long s_mount_opt; 6303a2595b8aSTheodore Ts'o unsigned long s_mount_opt2; 630408cefc7aSEric W. Biederman kuid_t s_resuid; 630508cefc7aSEric W. Biederman kgid_t s_resgid; 6306673c6100STheodore Ts'o unsigned long s_commit_interval; 6307673c6100STheodore Ts'o u32 s_min_batch_time, s_max_batch_time; 6308673c6100STheodore Ts'o #ifdef CONFIG_QUOTA 6309673c6100STheodore Ts'o int s_jquota_fmt; 6310a2d4a646SJan Kara char *s_qf_names[EXT4_MAXQUOTAS]; 6311673c6100STheodore Ts'o #endif 6312673c6100STheodore Ts'o }; 6313673c6100STheodore Ts'o 6314960e0ab6SLukas Czerner static int __ext4_remount(struct fs_context *fc, struct super_block *sb) 6315ac27a0ecSDave Kleikamp { 63167edfd85bSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 6317617ba13bSMingming Cao struct ext4_super_block *es; 6318617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 6319960e0ab6SLukas Czerner unsigned long old_sb_flags; 6320617ba13bSMingming Cao struct ext4_mount_options old_opts; 63218a266467STheodore Ts'o ext4_group_t g; 6322c5e06d10SJohann Lombardi int err = 0; 6323ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 63243bbef91bSAustin Kim int enable_quota = 0; 632503dafb5fSChen Gang int i, j; 632633458eabSTheodore Ts'o char *to_free[EXT4_MAXQUOTAS]; 6327ac27a0ecSDave Kleikamp #endif 6328b237e304SHarshad Shirwadkar 632921ac738eSChengguang Xu 6330ac27a0ecSDave Kleikamp /* Store the original options */ 6331ac27a0ecSDave Kleikamp old_sb_flags = sb->s_flags; 6332ac27a0ecSDave Kleikamp old_opts.s_mount_opt = sbi->s_mount_opt; 6333a2595b8aSTheodore Ts'o old_opts.s_mount_opt2 = sbi->s_mount_opt2; 6334ac27a0ecSDave Kleikamp old_opts.s_resuid = sbi->s_resuid; 6335ac27a0ecSDave Kleikamp old_opts.s_resgid = sbi->s_resgid; 6336ac27a0ecSDave Kleikamp old_opts.s_commit_interval = sbi->s_commit_interval; 633730773840STheodore Ts'o old_opts.s_min_batch_time = sbi->s_min_batch_time; 633830773840STheodore Ts'o old_opts.s_max_batch_time = sbi->s_max_batch_time; 6339ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 6340ac27a0ecSDave Kleikamp old_opts.s_jquota_fmt = sbi->s_jquota_fmt; 6341a2d4a646SJan Kara for (i = 0; i < EXT4_MAXQUOTAS; i++) 634203dafb5fSChen Gang if (sbi->s_qf_names[i]) { 634333458eabSTheodore Ts'o char *qf_name = get_qf_name(sb, sbi, i); 634433458eabSTheodore Ts'o 634533458eabSTheodore Ts'o old_opts.s_qf_names[i] = kstrdup(qf_name, GFP_KERNEL); 634603dafb5fSChen Gang if (!old_opts.s_qf_names[i]) { 634703dafb5fSChen Gang for (j = 0; j < i; j++) 634803dafb5fSChen Gang kfree(old_opts.s_qf_names[j]); 634903dafb5fSChen Gang return -ENOMEM; 635003dafb5fSChen Gang } 635103dafb5fSChen Gang } else 635203dafb5fSChen Gang old_opts.s_qf_names[i] = NULL; 6353ac27a0ecSDave Kleikamp #endif 6354e4e58e5dSOjaswin Mujoo if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)) { 6355b3881f74STheodore Ts'o if (sbi->s_journal && sbi->s_journal->j_task->io_context) 63567edfd85bSLukas Czerner ctx->journal_ioprio = 6357b237e304SHarshad Shirwadkar sbi->s_journal->j_task->io_context->ioprio; 6358e4e58e5dSOjaswin Mujoo else 6359e4e58e5dSOjaswin Mujoo ctx->journal_ioprio = DEFAULT_JOURNAL_IOPRIO; 6360e4e58e5dSOjaswin Mujoo 6361e4e58e5dSOjaswin Mujoo } 6362ac27a0ecSDave Kleikamp 63637edfd85bSLukas Czerner ext4_apply_options(fc, sb); 6364ac27a0ecSDave Kleikamp 63656b992ff2SDarrick J. Wong if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^ 63666b992ff2SDarrick J. Wong test_opt(sb, JOURNAL_CHECKSUM)) { 63676b992ff2SDarrick J. Wong ext4_msg(sb, KERN_ERR, "changing journal_checksum " 63682d5b86e0SEric Sandeen "during remount not supported; ignoring"); 63692d5b86e0SEric Sandeen sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM; 6370c6d3d56dSDarrick J. Wong } 6371c6d3d56dSDarrick J. Wong 63726ae6514bSPiotr Sarna if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { 63736ae6514bSPiotr Sarna if (test_opt2(sb, EXPLICIT_DELALLOC)) { 63746ae6514bSPiotr Sarna ext4_msg(sb, KERN_ERR, "can't mount with " 63756ae6514bSPiotr Sarna "both data=journal and delalloc"); 63766ae6514bSPiotr Sarna err = -EINVAL; 63776ae6514bSPiotr Sarna goto restore_opts; 63786ae6514bSPiotr Sarna } 63796ae6514bSPiotr Sarna if (test_opt(sb, DIOREAD_NOLOCK)) { 63806ae6514bSPiotr Sarna ext4_msg(sb, KERN_ERR, "can't mount with " 63816ae6514bSPiotr Sarna "both data=journal and dioread_nolock"); 63826ae6514bSPiotr Sarna err = -EINVAL; 63836ae6514bSPiotr Sarna goto restore_opts; 63846ae6514bSPiotr Sarna } 6385ab04df78SJan Kara } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) { 6386ab04df78SJan Kara if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 6387ab04df78SJan Kara ext4_msg(sb, KERN_ERR, "can't mount with " 6388ab04df78SJan Kara "journal_async_commit in data=ordered mode"); 6389ab04df78SJan Kara err = -EINVAL; 6390ab04df78SJan Kara goto restore_opts; 6391ab04df78SJan Kara } 6392923ae0ffSRoss Zwisler } 6393923ae0ffSRoss Zwisler 6394cdb7ee4cSTahsin Erdogan if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) { 6395cdb7ee4cSTahsin Erdogan ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount"); 6396cdb7ee4cSTahsin Erdogan err = -EINVAL; 6397cdb7ee4cSTahsin Erdogan goto restore_opts; 6398cdb7ee4cSTahsin Erdogan } 6399cdb7ee4cSTahsin Erdogan 64009b5f6c9bSHarshad Shirwadkar if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 6401124e7c61SGabriel Krisman Bertazi ext4_abort(sb, ESHUTDOWN, "Abort forced by user"); 6402ac27a0ecSDave Kleikamp 64031751e8a6SLinus Torvalds sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | 64041751e8a6SLinus Torvalds (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0); 6405ac27a0ecSDave Kleikamp 6406ac27a0ecSDave Kleikamp es = sbi->s_es; 6407ac27a0ecSDave Kleikamp 6408b3881f74STheodore Ts'o if (sbi->s_journal) { 6409617ba13bSMingming Cao ext4_init_journal_params(sb, sbi->s_journal); 64107edfd85bSLukas Czerner set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio); 6411b3881f74STheodore Ts'o } 6412ac27a0ecSDave Kleikamp 6413c92dc856SJan Kara /* Flush outstanding errors before changing fs state */ 6414c92dc856SJan Kara flush_work(&sbi->s_error_work); 6415c92dc856SJan Kara 6416960e0ab6SLukas Czerner if ((bool)(fc->sb_flags & SB_RDONLY) != sb_rdonly(sb)) { 64179b5f6c9bSHarshad Shirwadkar if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) { 6418ac27a0ecSDave Kleikamp err = -EROFS; 6419ac27a0ecSDave Kleikamp goto restore_opts; 6420ac27a0ecSDave Kleikamp } 6421ac27a0ecSDave Kleikamp 6422960e0ab6SLukas Czerner if (fc->sb_flags & SB_RDONLY) { 642338c03b34STheodore Ts'o err = sync_filesystem(sb); 642438c03b34STheodore Ts'o if (err < 0) 642538c03b34STheodore Ts'o goto restore_opts; 64260f0dd62fSChristoph Hellwig err = dquot_suspend(sb, -1); 64270f0dd62fSChristoph Hellwig if (err < 0) 6428c79d967dSChristoph Hellwig goto restore_opts; 6429c79d967dSChristoph Hellwig 6430ac27a0ecSDave Kleikamp /* 6431ac27a0ecSDave Kleikamp * First of all, the unconditional stuff we have to do 6432ac27a0ecSDave Kleikamp * to disable replay of the journal when we next remount 6433ac27a0ecSDave Kleikamp */ 64341751e8a6SLinus Torvalds sb->s_flags |= SB_RDONLY; 6435ac27a0ecSDave Kleikamp 6436ac27a0ecSDave Kleikamp /* 6437ac27a0ecSDave Kleikamp * OK, test if we are remounting a valid rw partition 6438ac27a0ecSDave Kleikamp * readonly, and if so set the rdonly flag and then 6439ac27a0ecSDave Kleikamp * mark the partition as valid again. 6440ac27a0ecSDave Kleikamp */ 6441617ba13bSMingming Cao if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) && 6442617ba13bSMingming Cao (sbi->s_mount_state & EXT4_VALID_FS)) 6443ac27a0ecSDave Kleikamp es->s_state = cpu_to_le16(sbi->s_mount_state); 6444ac27a0ecSDave Kleikamp 644511215630SJan Kara if (sbi->s_journal) { 644611215630SJan Kara /* 644711215630SJan Kara * We let remount-ro finish even if marking fs 644811215630SJan Kara * as clean failed... 644911215630SJan Kara */ 6450617ba13bSMingming Cao ext4_mark_recovery_complete(sb, es); 645111215630SJan Kara } 6452ac27a0ecSDave Kleikamp } else { 6453a13fb1a4SEric Sandeen /* Make sure we can mount this feature set readwrite */ 6454e2b911c5SDarrick J. Wong if (ext4_has_feature_readonly(sb) || 64552cb5cc8bSDarrick J. Wong !ext4_feature_set_ok(sb, 0)) { 6456ac27a0ecSDave Kleikamp err = -EROFS; 6457ac27a0ecSDave Kleikamp goto restore_opts; 6458ac27a0ecSDave Kleikamp } 6459ead6596bSEric Sandeen /* 64608a266467STheodore Ts'o * Make sure the group descriptor checksums 64610b8e58a1SAndreas Dilger * are sane. If they aren't, refuse to remount r/w. 64628a266467STheodore Ts'o */ 64638a266467STheodore Ts'o for (g = 0; g < sbi->s_groups_count; g++) { 64648a266467STheodore Ts'o struct ext4_group_desc *gdp = 64658a266467STheodore Ts'o ext4_get_group_desc(sb, g, NULL); 64668a266467STheodore Ts'o 6467feb0ab32SDarrick J. Wong if (!ext4_group_desc_csum_verify(sb, g, gdp)) { 6468b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, 6469b31e1552SEric Sandeen "ext4_remount: Checksum for group %u failed (%u!=%u)", 6470e2b911c5SDarrick J. Wong g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)), 64718a266467STheodore Ts'o le16_to_cpu(gdp->bg_checksum)); 64726a797d27SDarrick J. Wong err = -EFSBADCRC; 64738a266467STheodore Ts'o goto restore_opts; 64748a266467STheodore Ts'o } 64758a266467STheodore Ts'o } 64768a266467STheodore Ts'o 64778a266467STheodore Ts'o /* 6478ead6596bSEric Sandeen * If we have an unprocessed orphan list hanging 6479ead6596bSEric Sandeen * around from a previously readonly bdev mount, 6480ead6596bSEric Sandeen * require a full umount/remount for now. 6481ead6596bSEric Sandeen */ 648202f310fcSJan Kara if (es->s_last_orphan || !ext4_orphan_file_empty(sb)) { 6483b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, "Couldn't " 6484ead6596bSEric Sandeen "remount RDWR because of unprocessed " 6485ead6596bSEric Sandeen "orphan inode list. Please " 6486b31e1552SEric Sandeen "umount/remount instead"); 6487ead6596bSEric Sandeen err = -EINVAL; 6488ead6596bSEric Sandeen goto restore_opts; 6489ead6596bSEric Sandeen } 6490ead6596bSEric Sandeen 6491ac27a0ecSDave Kleikamp /* 6492ac27a0ecSDave Kleikamp * Mounting a RDONLY partition read-write, so reread 6493ac27a0ecSDave Kleikamp * and store the current valid flag. (It may have 6494ac27a0ecSDave Kleikamp * been changed by e2fsck since we originally mounted 6495ac27a0ecSDave Kleikamp * the partition.) 6496ac27a0ecSDave Kleikamp */ 649711215630SJan Kara if (sbi->s_journal) { 649811215630SJan Kara err = ext4_clear_journal_err(sb, es); 649911215630SJan Kara if (err) 650011215630SJan Kara goto restore_opts; 650111215630SJan Kara } 6502c878bea3STheodore Ts'o sbi->s_mount_state = (le16_to_cpu(es->s_state) & 6503c878bea3STheodore Ts'o ~EXT4_FC_REPLAY); 6504c89128a0SJaegeuk Kim 6505c89128a0SJaegeuk Kim err = ext4_setup_super(sb, es, 0); 6506c89128a0SJaegeuk Kim if (err) 6507c89128a0SJaegeuk Kim goto restore_opts; 6508c89128a0SJaegeuk Kim 65091751e8a6SLinus Torvalds sb->s_flags &= ~SB_RDONLY; 6510e2b911c5SDarrick J. Wong if (ext4_has_feature_mmp(sb)) 6511c5e06d10SJohann Lombardi if (ext4_multi_mount_protect(sb, 6512c5e06d10SJohann Lombardi le64_to_cpu(es->s_mmp_block))) { 6513c5e06d10SJohann Lombardi err = -EROFS; 6514c5e06d10SJohann Lombardi goto restore_opts; 6515c5e06d10SJohann Lombardi } 65163bbef91bSAustin Kim #ifdef CONFIG_QUOTA 6517c79d967dSChristoph Hellwig enable_quota = 1; 65183bbef91bSAustin Kim #endif 6519ac27a0ecSDave Kleikamp } 6520ac27a0ecSDave Kleikamp } 6521bfff6873SLukas Czerner 6522bfff6873SLukas Czerner /* 6523bfff6873SLukas Czerner * Reinitialize lazy itable initialization thread based on 6524bfff6873SLukas Czerner * current settings 6525bfff6873SLukas Czerner */ 6526bc98a42cSDavid Howells if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE)) 6527bfff6873SLukas Czerner ext4_unregister_li_request(sb); 6528bfff6873SLukas Czerner else { 6529bfff6873SLukas Czerner ext4_group_t first_not_zeroed; 6530bfff6873SLukas Czerner first_not_zeroed = ext4_has_uninit_itable(sb); 6531bfff6873SLukas Czerner ext4_register_li_request(sb, first_not_zeroed); 6532bfff6873SLukas Czerner } 6533bfff6873SLukas Czerner 65340f5bde1dSJan Kara /* 65350f5bde1dSJan Kara * Handle creation of system zone data early because it can fail. 65360f5bde1dSJan Kara * Releasing of existing data is done when we are sure remount will 65370f5bde1dSJan Kara * succeed. 65380f5bde1dSJan Kara */ 6539dd0db94fSChunguang Xu if (test_opt(sb, BLOCK_VALIDITY) && !sbi->s_system_blks) { 6540d176b1f6SJan Kara err = ext4_setup_system_zone(sb); 6541d176b1f6SJan Kara if (err) 6542d176b1f6SJan Kara goto restore_opts; 65430f5bde1dSJan Kara } 6544d176b1f6SJan Kara 6545c89128a0SJaegeuk Kim if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) { 65464392fbc4SJan Kara err = ext4_commit_super(sb); 6547c89128a0SJaegeuk Kim if (err) 6548c89128a0SJaegeuk Kim goto restore_opts; 6549c89128a0SJaegeuk Kim } 65500390131bSFrank Mayhar 6551ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 6552ac27a0ecSDave Kleikamp /* Release old quota file names */ 6553a2d4a646SJan Kara for (i = 0; i < EXT4_MAXQUOTAS; i++) 6554ac27a0ecSDave Kleikamp kfree(old_opts.s_qf_names[i]); 65557c319d32SAditya Kali if (enable_quota) { 65567c319d32SAditya Kali if (sb_any_quota_suspended(sb)) 65570f0dd62fSChristoph Hellwig dquot_resume(sb, -1); 6558e2b911c5SDarrick J. Wong else if (ext4_has_feature_quota(sb)) { 65597c319d32SAditya Kali err = ext4_enable_quotas(sb); 656007724f98STheodore Ts'o if (err) 65617c319d32SAditya Kali goto restore_opts; 65627c319d32SAditya Kali } 65637c319d32SAditya Kali } 65647c319d32SAditya Kali #endif 6565dd0db94fSChunguang Xu if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks) 65660f5bde1dSJan Kara ext4_release_system_zone(sb); 6567d4c402d9SCurt Wohlgemuth 656861bb4a1cSTheodore Ts'o if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb)) 656961bb4a1cSTheodore Ts'o ext4_stop_mmpd(sbi); 657061bb4a1cSTheodore Ts'o 6571ac27a0ecSDave Kleikamp return 0; 65720b8e58a1SAndreas Dilger 6573ac27a0ecSDave Kleikamp restore_opts: 6574ac27a0ecSDave Kleikamp sb->s_flags = old_sb_flags; 6575ac27a0ecSDave Kleikamp sbi->s_mount_opt = old_opts.s_mount_opt; 6576a2595b8aSTheodore Ts'o sbi->s_mount_opt2 = old_opts.s_mount_opt2; 6577ac27a0ecSDave Kleikamp sbi->s_resuid = old_opts.s_resuid; 6578ac27a0ecSDave Kleikamp sbi->s_resgid = old_opts.s_resgid; 6579ac27a0ecSDave Kleikamp sbi->s_commit_interval = old_opts.s_commit_interval; 658030773840STheodore Ts'o sbi->s_min_batch_time = old_opts.s_min_batch_time; 658130773840STheodore Ts'o sbi->s_max_batch_time = old_opts.s_max_batch_time; 6582dd0db94fSChunguang Xu if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks) 65830f5bde1dSJan Kara ext4_release_system_zone(sb); 6584ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 6585ac27a0ecSDave Kleikamp sbi->s_jquota_fmt = old_opts.s_jquota_fmt; 6586a2d4a646SJan Kara for (i = 0; i < EXT4_MAXQUOTAS; i++) { 658733458eabSTheodore Ts'o to_free[i] = get_qf_name(sb, sbi, i); 658833458eabSTheodore Ts'o rcu_assign_pointer(sbi->s_qf_names[i], old_opts.s_qf_names[i]); 6589ac27a0ecSDave Kleikamp } 659033458eabSTheodore Ts'o synchronize_rcu(); 659133458eabSTheodore Ts'o for (i = 0; i < EXT4_MAXQUOTAS; i++) 659233458eabSTheodore Ts'o kfree(to_free[i]); 6593ac27a0ecSDave Kleikamp #endif 659461bb4a1cSTheodore Ts'o if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb)) 659561bb4a1cSTheodore Ts'o ext4_stop_mmpd(sbi); 6596ac27a0ecSDave Kleikamp return err; 6597ac27a0ecSDave Kleikamp } 6598ac27a0ecSDave Kleikamp 6599cebe85d5SLukas Czerner static int ext4_reconfigure(struct fs_context *fc) 66007edfd85bSLukas Czerner { 6601cebe85d5SLukas Czerner struct super_block *sb = fc->root->d_sb; 66027edfd85bSLukas Czerner int ret; 66037edfd85bSLukas Czerner 6604cebe85d5SLukas Czerner fc->s_fs_info = EXT4_SB(sb); 66057edfd85bSLukas Czerner 6606cebe85d5SLukas Czerner ret = ext4_check_opt_consistency(fc, sb); 66077edfd85bSLukas Czerner if (ret < 0) 66087edfd85bSLukas Czerner return ret; 6609cebe85d5SLukas Czerner 6610960e0ab6SLukas Czerner ret = __ext4_remount(fc, sb); 6611cebe85d5SLukas Czerner if (ret < 0) 6612cebe85d5SLukas Czerner return ret; 6613cebe85d5SLukas Czerner 6614cebe85d5SLukas Czerner ext4_msg(sb, KERN_INFO, "re-mounted. Quota mode: %s.", 6615cebe85d5SLukas Czerner ext4_quota_mode(sb)); 6616cebe85d5SLukas Czerner 6617cebe85d5SLukas Czerner return 0; 66187edfd85bSLukas Czerner } 66197edfd85bSLukas Czerner 6620689c958cSLi Xi #ifdef CONFIG_QUOTA 6621689c958cSLi Xi static int ext4_statfs_project(struct super_block *sb, 6622689c958cSLi Xi kprojid_t projid, struct kstatfs *buf) 6623689c958cSLi Xi { 6624689c958cSLi Xi struct kqid qid; 6625689c958cSLi Xi struct dquot *dquot; 6626689c958cSLi Xi u64 limit; 6627689c958cSLi Xi u64 curblock; 6628689c958cSLi Xi 6629689c958cSLi Xi qid = make_kqid_projid(projid); 6630689c958cSLi Xi dquot = dqget(sb, qid); 6631689c958cSLi Xi if (IS_ERR(dquot)) 6632689c958cSLi Xi return PTR_ERR(dquot); 66337b9ca4c6SJan Kara spin_lock(&dquot->dq_dqb_lock); 6634689c958cSLi Xi 6635a08fe66eSChengguang Xu limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit, 6636a08fe66eSChengguang Xu dquot->dq_dqb.dqb_bhardlimit); 663757c32ea4SChengguang Xu limit >>= sb->s_blocksize_bits; 663857c32ea4SChengguang Xu 6639689c958cSLi Xi if (limit && buf->f_blocks > limit) { 6640f06925c7SKonstantin Khlebnikov curblock = (dquot->dq_dqb.dqb_curspace + 6641f06925c7SKonstantin Khlebnikov dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; 6642689c958cSLi Xi buf->f_blocks = limit; 6643689c958cSLi Xi buf->f_bfree = buf->f_bavail = 6644689c958cSLi Xi (buf->f_blocks > curblock) ? 6645689c958cSLi Xi (buf->f_blocks - curblock) : 0; 6646689c958cSLi Xi } 6647689c958cSLi Xi 6648a08fe66eSChengguang Xu limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit, 6649a08fe66eSChengguang Xu dquot->dq_dqb.dqb_ihardlimit); 6650689c958cSLi Xi if (limit && buf->f_files > limit) { 6651689c958cSLi Xi buf->f_files = limit; 6652689c958cSLi Xi buf->f_ffree = 6653689c958cSLi Xi (buf->f_files > dquot->dq_dqb.dqb_curinodes) ? 6654689c958cSLi Xi (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0; 6655689c958cSLi Xi } 6656689c958cSLi Xi 66577b9ca4c6SJan Kara spin_unlock(&dquot->dq_dqb_lock); 6658689c958cSLi Xi dqput(dquot); 6659689c958cSLi Xi return 0; 6660689c958cSLi Xi } 6661689c958cSLi Xi #endif 6662689c958cSLi Xi 6663617ba13bSMingming Cao static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) 6664ac27a0ecSDave Kleikamp { 6665ac27a0ecSDave Kleikamp struct super_block *sb = dentry->d_sb; 6666617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 6667617ba13bSMingming Cao struct ext4_super_block *es = sbi->s_es; 666827dd4385SLukas Czerner ext4_fsblk_t overhead = 0, resv_blocks; 6669d02a9391SKazuya Mio s64 bfree; 667027dd4385SLukas Czerner resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters)); 6671ac27a0ecSDave Kleikamp 6672952fc18eSTheodore Ts'o if (!test_opt(sb, MINIX_DF)) 6673952fc18eSTheodore Ts'o overhead = sbi->s_overhead; 6674ac27a0ecSDave Kleikamp 6675617ba13bSMingming Cao buf->f_type = EXT4_SUPER_MAGIC; 6676ac27a0ecSDave Kleikamp buf->f_bsize = sb->s_blocksize; 6677b72f78cbSEric Sandeen buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead); 667857042651STheodore Ts'o bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) - 667957042651STheodore Ts'o percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter); 6680d02a9391SKazuya Mio /* prevent underflow in case that few free space is available */ 668157042651STheodore Ts'o buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0)); 668227dd4385SLukas Czerner buf->f_bavail = buf->f_bfree - 668327dd4385SLukas Czerner (ext4_r_blocks_count(es) + resv_blocks); 668427dd4385SLukas Czerner if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks)) 6685ac27a0ecSDave Kleikamp buf->f_bavail = 0; 6686ac27a0ecSDave Kleikamp buf->f_files = le32_to_cpu(es->s_inodes_count); 668752d9f3b4SPeter Zijlstra buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter); 6688617ba13bSMingming Cao buf->f_namelen = EXT4_NAME_LEN; 66899591c3a3SAmir Goldstein buf->f_fsid = uuid_to_fsid(es->s_uuid); 66900b8e58a1SAndreas Dilger 6691689c958cSLi Xi #ifdef CONFIG_QUOTA 6692689c958cSLi Xi if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) && 6693689c958cSLi Xi sb_has_quota_limits_enabled(sb, PRJQUOTA)) 6694689c958cSLi Xi ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf); 6695689c958cSLi Xi #endif 6696ac27a0ecSDave Kleikamp return 0; 6697ac27a0ecSDave Kleikamp } 6698ac27a0ecSDave Kleikamp 6699ac27a0ecSDave Kleikamp 6700ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 6701ac27a0ecSDave Kleikamp 6702bc8230eeSJan Kara /* 6703bc8230eeSJan Kara * Helper functions so that transaction is started before we acquire dqio_sem 6704bc8230eeSJan Kara * to keep correct lock ordering of transaction > dqio_sem 6705bc8230eeSJan Kara */ 6706ac27a0ecSDave Kleikamp static inline struct inode *dquot_to_inode(struct dquot *dquot) 6707ac27a0ecSDave Kleikamp { 67084c376dcaSEric W. Biederman return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type]; 6709ac27a0ecSDave Kleikamp } 6710ac27a0ecSDave Kleikamp 6711617ba13bSMingming Cao static int ext4_write_dquot(struct dquot *dquot) 6712ac27a0ecSDave Kleikamp { 6713ac27a0ecSDave Kleikamp int ret, err; 6714ac27a0ecSDave Kleikamp handle_t *handle; 6715ac27a0ecSDave Kleikamp struct inode *inode; 6716ac27a0ecSDave Kleikamp 6717ac27a0ecSDave Kleikamp inode = dquot_to_inode(dquot); 67189924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 6719617ba13bSMingming Cao EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); 6720ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 6721ac27a0ecSDave Kleikamp return PTR_ERR(handle); 6722ac27a0ecSDave Kleikamp ret = dquot_commit(dquot); 6723617ba13bSMingming Cao err = ext4_journal_stop(handle); 6724ac27a0ecSDave Kleikamp if (!ret) 6725ac27a0ecSDave Kleikamp ret = err; 6726ac27a0ecSDave Kleikamp return ret; 6727ac27a0ecSDave Kleikamp } 6728ac27a0ecSDave Kleikamp 6729617ba13bSMingming Cao static int ext4_acquire_dquot(struct dquot *dquot) 6730ac27a0ecSDave Kleikamp { 6731ac27a0ecSDave Kleikamp int ret, err; 6732ac27a0ecSDave Kleikamp handle_t *handle; 6733ac27a0ecSDave Kleikamp 67349924a92aSTheodore Ts'o handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA, 6735617ba13bSMingming Cao EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb)); 6736ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 6737ac27a0ecSDave Kleikamp return PTR_ERR(handle); 6738ac27a0ecSDave Kleikamp ret = dquot_acquire(dquot); 6739617ba13bSMingming Cao err = ext4_journal_stop(handle); 6740ac27a0ecSDave Kleikamp if (!ret) 6741ac27a0ecSDave Kleikamp ret = err; 6742ac27a0ecSDave Kleikamp return ret; 6743ac27a0ecSDave Kleikamp } 6744ac27a0ecSDave Kleikamp 6745617ba13bSMingming Cao static int ext4_release_dquot(struct dquot *dquot) 6746ac27a0ecSDave Kleikamp { 6747ac27a0ecSDave Kleikamp int ret, err; 6748ac27a0ecSDave Kleikamp handle_t *handle; 6749ac27a0ecSDave Kleikamp 67509924a92aSTheodore Ts'o handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA, 6751617ba13bSMingming Cao EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb)); 67529c3013e9SJan Kara if (IS_ERR(handle)) { 67539c3013e9SJan Kara /* Release dquot anyway to avoid endless cycle in dqput() */ 67549c3013e9SJan Kara dquot_release(dquot); 6755ac27a0ecSDave Kleikamp return PTR_ERR(handle); 67569c3013e9SJan Kara } 6757ac27a0ecSDave Kleikamp ret = dquot_release(dquot); 6758617ba13bSMingming Cao err = ext4_journal_stop(handle); 6759ac27a0ecSDave Kleikamp if (!ret) 6760ac27a0ecSDave Kleikamp ret = err; 6761ac27a0ecSDave Kleikamp return ret; 6762ac27a0ecSDave Kleikamp } 6763ac27a0ecSDave Kleikamp 6764617ba13bSMingming Cao static int ext4_mark_dquot_dirty(struct dquot *dquot) 6765ac27a0ecSDave Kleikamp { 6766262b4662SJan Kara struct super_block *sb = dquot->dq_sb; 6767262b4662SJan Kara 6768f177ee08SRoman Anufriev if (ext4_is_quota_journalled(sb)) { 6769ac27a0ecSDave Kleikamp dquot_mark_dquot_dirty(dquot); 6770617ba13bSMingming Cao return ext4_write_dquot(dquot); 6771ac27a0ecSDave Kleikamp } else { 6772ac27a0ecSDave Kleikamp return dquot_mark_dquot_dirty(dquot); 6773ac27a0ecSDave Kleikamp } 6774ac27a0ecSDave Kleikamp } 6775ac27a0ecSDave Kleikamp 6776617ba13bSMingming Cao static int ext4_write_info(struct super_block *sb, int type) 6777ac27a0ecSDave Kleikamp { 6778ac27a0ecSDave Kleikamp int ret, err; 6779ac27a0ecSDave Kleikamp handle_t *handle; 6780ac27a0ecSDave Kleikamp 6781ac27a0ecSDave Kleikamp /* Data block + inode block */ 6782f9c1f248SBaokun Li handle = ext4_journal_start_sb(sb, EXT4_HT_QUOTA, 2); 6783ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 6784ac27a0ecSDave Kleikamp return PTR_ERR(handle); 6785ac27a0ecSDave Kleikamp ret = dquot_commit_info(sb, type); 6786617ba13bSMingming Cao err = ext4_journal_stop(handle); 6787ac27a0ecSDave Kleikamp if (!ret) 6788ac27a0ecSDave Kleikamp ret = err; 6789ac27a0ecSDave Kleikamp return ret; 6790ac27a0ecSDave Kleikamp } 6791ac27a0ecSDave Kleikamp 6792daf647d2STheodore Ts'o static void lockdep_set_quota_inode(struct inode *inode, int subclass) 6793daf647d2STheodore Ts'o { 6794daf647d2STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 6795daf647d2STheodore Ts'o 6796daf647d2STheodore Ts'o /* The first argument of lockdep_set_subclass has to be 6797daf647d2STheodore Ts'o * *exactly* the same as the argument to init_rwsem() --- in 6798daf647d2STheodore Ts'o * this case, in init_once() --- or lockdep gets unhappy 6799daf647d2STheodore Ts'o * because the name of the lock is set using the 6800daf647d2STheodore Ts'o * stringification of the argument to init_rwsem(). 6801daf647d2STheodore Ts'o */ 6802daf647d2STheodore Ts'o (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */ 6803daf647d2STheodore Ts'o lockdep_set_subclass(&ei->i_data_sem, subclass); 6804daf647d2STheodore Ts'o } 6805daf647d2STheodore Ts'o 6806ac27a0ecSDave Kleikamp /* 6807ac27a0ecSDave Kleikamp * Standard function to be called on quota_on 6808ac27a0ecSDave Kleikamp */ 6809617ba13bSMingming Cao static int ext4_quota_on(struct super_block *sb, int type, int format_id, 68108c54ca9cSAl Viro const struct path *path) 6811ac27a0ecSDave Kleikamp { 6812ac27a0ecSDave Kleikamp int err; 6813ac27a0ecSDave Kleikamp 6814ac27a0ecSDave Kleikamp if (!test_opt(sb, QUOTA)) 6815ac27a0ecSDave Kleikamp return -EINVAL; 68160623543bSJan Kara 6817ac27a0ecSDave Kleikamp /* Quotafile not on the same filesystem? */ 6818d8c9584eSAl Viro if (path->dentry->d_sb != sb) 6819ac27a0ecSDave Kleikamp return -EXDEV; 6820e0770e91SJan Kara 6821e0770e91SJan Kara /* Quota already enabled for this file? */ 6822e0770e91SJan Kara if (IS_NOQUOTA(d_inode(path->dentry))) 6823e0770e91SJan Kara return -EBUSY; 6824e0770e91SJan Kara 68250623543bSJan Kara /* Journaling quota? */ 68260623543bSJan Kara if (EXT4_SB(sb)->s_qf_names[type]) { 68272b2d6d01STheodore Ts'o /* Quotafile not in fs root? */ 6828f00c9e44SJan Kara if (path->dentry->d_parent != sb->s_root) 6829b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, 6830b31e1552SEric Sandeen "Quota file not on filesystem root. " 6831b31e1552SEric Sandeen "Journaled quota will not work"); 683291389240SJan Kara sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY; 683391389240SJan Kara } else { 683491389240SJan Kara /* 683591389240SJan Kara * Clear the flag just in case mount options changed since 683691389240SJan Kara * last time. 683791389240SJan Kara */ 683891389240SJan Kara sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY; 68390623543bSJan Kara } 68400623543bSJan Kara 68410623543bSJan Kara /* 68420623543bSJan Kara * When we journal data on quota file, we have to flush journal to see 68430623543bSJan Kara * all updates to the file when we bypass pagecache... 68440623543bSJan Kara */ 68450390131bSFrank Mayhar if (EXT4_SB(sb)->s_journal && 68462b0143b5SDavid Howells ext4_should_journal_data(d_inode(path->dentry))) { 68470623543bSJan Kara /* 68480623543bSJan Kara * We don't need to lock updates but journal_flush() could 68490623543bSJan Kara * otherwise be livelocked... 68500623543bSJan Kara */ 68510623543bSJan Kara jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); 685201d5d965SLeah Rumancik err = jbd2_journal_flush(EXT4_SB(sb)->s_journal, 0); 68530623543bSJan Kara jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); 6854f00c9e44SJan Kara if (err) 68557ffe1ea8SHidehiro Kawai return err; 68567ffe1ea8SHidehiro Kawai } 6857957153fcSJan Kara 6858daf647d2STheodore Ts'o lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA); 6859daf647d2STheodore Ts'o err = dquot_quota_on(sb, type, format_id, path); 686015fc69bbSJan Kara if (!err) { 6861957153fcSJan Kara struct inode *inode = d_inode(path->dentry); 6862957153fcSJan Kara handle_t *handle; 6863957153fcSJan Kara 686461a92987SJan Kara /* 686561a92987SJan Kara * Set inode flags to prevent userspace from messing with quota 686661a92987SJan Kara * files. If this fails, we return success anyway since quotas 686761a92987SJan Kara * are already enabled and this is not a hard failure. 686861a92987SJan Kara */ 6869957153fcSJan Kara inode_lock(inode); 6870957153fcSJan Kara handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1); 6871957153fcSJan Kara if (IS_ERR(handle)) 6872957153fcSJan Kara goto unlock_inode; 6873957153fcSJan Kara EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL; 6874957153fcSJan Kara inode_set_flags(inode, S_NOATIME | S_IMMUTABLE, 6875957153fcSJan Kara S_NOATIME | S_IMMUTABLE); 68764209ae12SHarshad Shirwadkar err = ext4_mark_inode_dirty(handle, inode); 6877957153fcSJan Kara ext4_journal_stop(handle); 6878957153fcSJan Kara unlock_inode: 6879957153fcSJan Kara inode_unlock(inode); 688015fc69bbSJan Kara if (err) 688115fc69bbSJan Kara dquot_quota_off(sb, type); 6882957153fcSJan Kara } 688315fc69bbSJan Kara if (err) 688415fc69bbSJan Kara lockdep_set_quota_inode(path->dentry->d_inode, 688515fc69bbSJan Kara I_DATA_SEM_NORMAL); 6886daf647d2STheodore Ts'o return err; 6887ac27a0ecSDave Kleikamp } 6888ac27a0ecSDave Kleikamp 688907342ec2SBaokun Li static inline bool ext4_check_quota_inum(int type, unsigned long qf_inum) 689007342ec2SBaokun Li { 689107342ec2SBaokun Li switch (type) { 689207342ec2SBaokun Li case USRQUOTA: 689307342ec2SBaokun Li return qf_inum == EXT4_USR_QUOTA_INO; 689407342ec2SBaokun Li case GRPQUOTA: 689507342ec2SBaokun Li return qf_inum == EXT4_GRP_QUOTA_INO; 689607342ec2SBaokun Li case PRJQUOTA: 689707342ec2SBaokun Li return qf_inum >= EXT4_GOOD_OLD_FIRST_INO; 689807342ec2SBaokun Li default: 689907342ec2SBaokun Li BUG(); 690007342ec2SBaokun Li } 690107342ec2SBaokun Li } 690207342ec2SBaokun Li 69037c319d32SAditya Kali static int ext4_quota_enable(struct super_block *sb, int type, int format_id, 69047c319d32SAditya Kali unsigned int flags) 69057c319d32SAditya Kali { 69067c319d32SAditya Kali int err; 69077c319d32SAditya Kali struct inode *qf_inode; 6908a2d4a646SJan Kara unsigned long qf_inums[EXT4_MAXQUOTAS] = { 69097c319d32SAditya Kali le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum), 6910689c958cSLi Xi le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum), 6911689c958cSLi Xi le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum) 69127c319d32SAditya Kali }; 69137c319d32SAditya Kali 6914e2b911c5SDarrick J. Wong BUG_ON(!ext4_has_feature_quota(sb)); 69157c319d32SAditya Kali 69167c319d32SAditya Kali if (!qf_inums[type]) 69177c319d32SAditya Kali return -EPERM; 69187c319d32SAditya Kali 691907342ec2SBaokun Li if (!ext4_check_quota_inum(type, qf_inums[type])) { 692007342ec2SBaokun Li ext4_error(sb, "Bad quota inum: %lu, type: %d", 692107342ec2SBaokun Li qf_inums[type], type); 692207342ec2SBaokun Li return -EUCLEAN; 692307342ec2SBaokun Li } 692407342ec2SBaokun Li 69258a363970STheodore Ts'o qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL); 69267c319d32SAditya Kali if (IS_ERR(qf_inode)) { 692707342ec2SBaokun Li ext4_error(sb, "Bad quota inode: %lu, type: %d", 692807342ec2SBaokun Li qf_inums[type], type); 69297c319d32SAditya Kali return PTR_ERR(qf_inode); 69307c319d32SAditya Kali } 69317c319d32SAditya Kali 6932bcb13850SJan Kara /* Don't account quota for quota files to avoid recursion */ 6933bcb13850SJan Kara qf_inode->i_flags |= S_NOQUOTA; 6934daf647d2STheodore Ts'o lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA); 69357212b95eSJan Kara err = dquot_load_quota_inode(qf_inode, type, format_id, flags); 6936daf647d2STheodore Ts'o if (err) 6937daf647d2STheodore Ts'o lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL); 693861157b24SPan Bian iput(qf_inode); 69397c319d32SAditya Kali 69407c319d32SAditya Kali return err; 69417c319d32SAditya Kali } 69427c319d32SAditya Kali 69437c319d32SAditya Kali /* Enable usage tracking for all quota types. */ 694425c6d98fSJan Kara int ext4_enable_quotas(struct super_block *sb) 69457c319d32SAditya Kali { 69467c319d32SAditya Kali int type, err = 0; 6947a2d4a646SJan Kara unsigned long qf_inums[EXT4_MAXQUOTAS] = { 69487c319d32SAditya Kali le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum), 6949689c958cSLi Xi le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum), 6950689c958cSLi Xi le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum) 69517c319d32SAditya Kali }; 695249da9392SJan Kara bool quota_mopt[EXT4_MAXQUOTAS] = { 695349da9392SJan Kara test_opt(sb, USRQUOTA), 695449da9392SJan Kara test_opt(sb, GRPQUOTA), 695549da9392SJan Kara test_opt(sb, PRJQUOTA), 695649da9392SJan Kara }; 69577c319d32SAditya Kali 695891389240SJan Kara sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY; 6959a2d4a646SJan Kara for (type = 0; type < EXT4_MAXQUOTAS; type++) { 69607c319d32SAditya Kali if (qf_inums[type]) { 69617c319d32SAditya Kali err = ext4_quota_enable(sb, type, QFMT_VFS_V1, 696249da9392SJan Kara DQUOT_USAGE_ENABLED | 696349da9392SJan Kara (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0)); 69647c319d32SAditya Kali if (err) { 69657c319d32SAditya Kali ext4_warning(sb, 696672ba7450STheodore Ts'o "Failed to enable quota tracking " 696707342ec2SBaokun Li "(type=%d, err=%d, ino=%lu). " 696807342ec2SBaokun Li "Please run e2fsck to fix.", type, 696907342ec2SBaokun Li err, qf_inums[type]); 69704013d47aSJan Kara for (type--; type >= 0; type--) { 69714013d47aSJan Kara struct inode *inode; 69724013d47aSJan Kara 69734013d47aSJan Kara inode = sb_dqopt(sb)->files[type]; 69744013d47aSJan Kara if (inode) 69754013d47aSJan Kara inode = igrab(inode); 69767f144fd0SJunichi Uekawa dquot_quota_off(sb, type); 69774013d47aSJan Kara if (inode) { 69784013d47aSJan Kara lockdep_set_quota_inode(inode, 69794013d47aSJan Kara I_DATA_SEM_NORMAL); 69804013d47aSJan Kara iput(inode); 69814013d47aSJan Kara } 69824013d47aSJan Kara } 69837f144fd0SJunichi Uekawa 69847c319d32SAditya Kali return err; 69857c319d32SAditya Kali } 69867c319d32SAditya Kali } 69877c319d32SAditya Kali } 69887c319d32SAditya Kali return 0; 69897c319d32SAditya Kali } 69907c319d32SAditya Kali 6991ca0e05e4SDmitry Monakhov static int ext4_quota_off(struct super_block *sb, int type) 6992ca0e05e4SDmitry Monakhov { 699321f97697SJan Kara struct inode *inode = sb_dqopt(sb)->files[type]; 699421f97697SJan Kara handle_t *handle; 6995957153fcSJan Kara int err; 699621f97697SJan Kara 699787009d86SDmitry Monakhov /* Force all delayed allocation blocks to be allocated. 699887009d86SDmitry Monakhov * Caller already holds s_umount sem */ 699987009d86SDmitry Monakhov if (test_opt(sb, DELALLOC)) 7000ca0e05e4SDmitry Monakhov sync_filesystem(sb); 7001ca0e05e4SDmitry Monakhov 7002957153fcSJan Kara if (!inode || !igrab(inode)) 70030b268590SAmir Goldstein goto out; 70040b268590SAmir Goldstein 7005957153fcSJan Kara err = dquot_quota_off(sb, type); 7006964edf66SJan Kara if (err || ext4_has_feature_quota(sb)) 7007957153fcSJan Kara goto out_put; 7008957153fcSJan Kara 7009957153fcSJan Kara inode_lock(inode); 701061a92987SJan Kara /* 701161a92987SJan Kara * Update modification times of quota files when userspace can 701261a92987SJan Kara * start looking at them. If we fail, we return success anyway since 701361a92987SJan Kara * this is not a hard failure and quotas are already disabled. 701461a92987SJan Kara */ 70159924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1); 70164209ae12SHarshad Shirwadkar if (IS_ERR(handle)) { 70174209ae12SHarshad Shirwadkar err = PTR_ERR(handle); 7018957153fcSJan Kara goto out_unlock; 70194209ae12SHarshad Shirwadkar } 7020957153fcSJan Kara EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL); 7021957153fcSJan Kara inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE); 7022eeca7ea1SDeepa Dinamani inode->i_mtime = inode->i_ctime = current_time(inode); 70234209ae12SHarshad Shirwadkar err = ext4_mark_inode_dirty(handle, inode); 702421f97697SJan Kara ext4_journal_stop(handle); 7025957153fcSJan Kara out_unlock: 7026957153fcSJan Kara inode_unlock(inode); 7027957153fcSJan Kara out_put: 7028964edf66SJan Kara lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL); 7029957153fcSJan Kara iput(inode); 7030957153fcSJan Kara return err; 703121f97697SJan Kara out: 7032ca0e05e4SDmitry Monakhov return dquot_quota_off(sb, type); 7033ca0e05e4SDmitry Monakhov } 7034ca0e05e4SDmitry Monakhov 7035ac27a0ecSDave Kleikamp /* Read data from quotafile - avoid pagecache and such because we cannot afford 7036ac27a0ecSDave Kleikamp * acquiring the locks... As quota files are never truncated and quota code 7037ac27a0ecSDave Kleikamp * itself serializes the operations (and no one else should touch the files) 7038ac27a0ecSDave Kleikamp * we don't have to be afraid of races */ 7039617ba13bSMingming Cao static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, 7040ac27a0ecSDave Kleikamp size_t len, loff_t off) 7041ac27a0ecSDave Kleikamp { 7042ac27a0ecSDave Kleikamp struct inode *inode = sb_dqopt(sb)->files[type]; 7043725d26d3SAneesh Kumar K.V ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); 7044ac27a0ecSDave Kleikamp int offset = off & (sb->s_blocksize - 1); 7045ac27a0ecSDave Kleikamp int tocopy; 7046ac27a0ecSDave Kleikamp size_t toread; 7047ac27a0ecSDave Kleikamp struct buffer_head *bh; 7048ac27a0ecSDave Kleikamp loff_t i_size = i_size_read(inode); 7049ac27a0ecSDave Kleikamp 7050ac27a0ecSDave Kleikamp if (off > i_size) 7051ac27a0ecSDave Kleikamp return 0; 7052ac27a0ecSDave Kleikamp if (off+len > i_size) 7053ac27a0ecSDave Kleikamp len = i_size-off; 7054ac27a0ecSDave Kleikamp toread = len; 7055ac27a0ecSDave Kleikamp while (toread > 0) { 705666267814SJiangshan Yi tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread); 70571c215028STheodore Ts'o bh = ext4_bread(NULL, inode, blk, 0); 70581c215028STheodore Ts'o if (IS_ERR(bh)) 70591c215028STheodore Ts'o return PTR_ERR(bh); 7060ac27a0ecSDave Kleikamp if (!bh) /* A hole? */ 7061ac27a0ecSDave Kleikamp memset(data, 0, tocopy); 7062ac27a0ecSDave Kleikamp else 7063ac27a0ecSDave Kleikamp memcpy(data, bh->b_data+offset, tocopy); 7064ac27a0ecSDave Kleikamp brelse(bh); 7065ac27a0ecSDave Kleikamp offset = 0; 7066ac27a0ecSDave Kleikamp toread -= tocopy; 7067ac27a0ecSDave Kleikamp data += tocopy; 7068ac27a0ecSDave Kleikamp blk++; 7069ac27a0ecSDave Kleikamp } 7070ac27a0ecSDave Kleikamp return len; 7071ac27a0ecSDave Kleikamp } 7072ac27a0ecSDave Kleikamp 7073ac27a0ecSDave Kleikamp /* Write to quotafile (we know the transaction is already started and has 7074ac27a0ecSDave Kleikamp * enough credits) */ 7075617ba13bSMingming Cao static ssize_t ext4_quota_write(struct super_block *sb, int type, 7076ac27a0ecSDave Kleikamp const char *data, size_t len, loff_t off) 7077ac27a0ecSDave Kleikamp { 7078ac27a0ecSDave Kleikamp struct inode *inode = sb_dqopt(sb)->files[type]; 7079725d26d3SAneesh Kumar K.V ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); 70804209ae12SHarshad Shirwadkar int err = 0, err2 = 0, offset = off & (sb->s_blocksize - 1); 7081c5e298aeSTheodore Ts'o int retries = 0; 7082ac27a0ecSDave Kleikamp struct buffer_head *bh; 7083ac27a0ecSDave Kleikamp handle_t *handle = journal_current_handle(); 7084ac27a0ecSDave Kleikamp 7085380a0091SYe Bin if (!handle) { 7086b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" 7087b31e1552SEric Sandeen " cancelled because transaction is not started", 70889c3013e9SJan Kara (unsigned long long)off, (unsigned long long)len); 70899c3013e9SJan Kara return -EIO; 70909c3013e9SJan Kara } 709167eeb568SDmitry Monakhov /* 709267eeb568SDmitry Monakhov * Since we account only one data block in transaction credits, 709367eeb568SDmitry Monakhov * then it is impossible to cross a block boundary. 709467eeb568SDmitry Monakhov */ 709567eeb568SDmitry Monakhov if (sb->s_blocksize - offset < len) { 709667eeb568SDmitry Monakhov ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" 709767eeb568SDmitry Monakhov " cancelled because not block aligned", 709867eeb568SDmitry Monakhov (unsigned long long)off, (unsigned long long)len); 709967eeb568SDmitry Monakhov return -EIO; 710067eeb568SDmitry Monakhov } 710167eeb568SDmitry Monakhov 7102c5e298aeSTheodore Ts'o do { 7103c5e298aeSTheodore Ts'o bh = ext4_bread(handle, inode, blk, 7104c5e298aeSTheodore Ts'o EXT4_GET_BLOCKS_CREATE | 7105c5e298aeSTheodore Ts'o EXT4_GET_BLOCKS_METADATA_NOFAIL); 710645586c70SMasahiro Yamada } while (PTR_ERR(bh) == -ENOSPC && 7107c5e298aeSTheodore Ts'o ext4_should_retry_alloc(inode->i_sb, &retries)); 71081c215028STheodore Ts'o if (IS_ERR(bh)) 71091c215028STheodore Ts'o return PTR_ERR(bh); 7110ac27a0ecSDave Kleikamp if (!bh) 7111ac27a0ecSDave Kleikamp goto out; 71125d601255Sliang xie BUFFER_TRACE(bh, "get write access"); 7113188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE); 7114ac27a0ecSDave Kleikamp if (err) { 7115ac27a0ecSDave Kleikamp brelse(bh); 71161c215028STheodore Ts'o return err; 7117ac27a0ecSDave Kleikamp } 7118ac27a0ecSDave Kleikamp lock_buffer(bh); 711967eeb568SDmitry Monakhov memcpy(bh->b_data+offset, data, len); 7120ac27a0ecSDave Kleikamp flush_dcache_page(bh->b_page); 7121ac27a0ecSDave Kleikamp unlock_buffer(bh); 71220390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, NULL, bh); 7123ac27a0ecSDave Kleikamp brelse(bh); 7124ac27a0ecSDave Kleikamp out: 712567eeb568SDmitry Monakhov if (inode->i_size < off + len) { 712667eeb568SDmitry Monakhov i_size_write(inode, off + len); 7127617ba13bSMingming Cao EXT4_I(inode)->i_disksize = inode->i_size; 71284209ae12SHarshad Shirwadkar err2 = ext4_mark_inode_dirty(handle, inode); 71294209ae12SHarshad Shirwadkar if (unlikely(err2 && !err)) 71304209ae12SHarshad Shirwadkar err = err2; 713121f97697SJan Kara } 71324209ae12SHarshad Shirwadkar return err ? err : len; 7133ac27a0ecSDave Kleikamp } 7134ac27a0ecSDave Kleikamp #endif 7135ac27a0ecSDave Kleikamp 7136c290ea01SJan Kara #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2) 713724b58424STheodore Ts'o static inline void register_as_ext2(void) 713824b58424STheodore Ts'o { 713924b58424STheodore Ts'o int err = register_filesystem(&ext2_fs_type); 714024b58424STheodore Ts'o if (err) 714124b58424STheodore Ts'o printk(KERN_WARNING 714224b58424STheodore Ts'o "EXT4-fs: Unable to register as ext2 (%d)\n", err); 714324b58424STheodore Ts'o } 714424b58424STheodore Ts'o 714524b58424STheodore Ts'o static inline void unregister_as_ext2(void) 714624b58424STheodore Ts'o { 714724b58424STheodore Ts'o unregister_filesystem(&ext2_fs_type); 714824b58424STheodore Ts'o } 71492035e776STheodore Ts'o 71502035e776STheodore Ts'o static inline int ext2_feature_set_ok(struct super_block *sb) 71512035e776STheodore Ts'o { 7152e2b911c5SDarrick J. Wong if (ext4_has_unknown_ext2_incompat_features(sb)) 71532035e776STheodore Ts'o return 0; 7154bc98a42cSDavid Howells if (sb_rdonly(sb)) 71552035e776STheodore Ts'o return 1; 7156e2b911c5SDarrick J. Wong if (ext4_has_unknown_ext2_ro_compat_features(sb)) 71572035e776STheodore Ts'o return 0; 71582035e776STheodore Ts'o return 1; 71592035e776STheodore Ts'o } 716024b58424STheodore Ts'o #else 716124b58424STheodore Ts'o static inline void register_as_ext2(void) { } 716224b58424STheodore Ts'o static inline void unregister_as_ext2(void) { } 71632035e776STheodore Ts'o static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; } 716424b58424STheodore Ts'o #endif 716524b58424STheodore Ts'o 716624b58424STheodore Ts'o static inline void register_as_ext3(void) 716724b58424STheodore Ts'o { 716824b58424STheodore Ts'o int err = register_filesystem(&ext3_fs_type); 716924b58424STheodore Ts'o if (err) 717024b58424STheodore Ts'o printk(KERN_WARNING 717124b58424STheodore Ts'o "EXT4-fs: Unable to register as ext3 (%d)\n", err); 717224b58424STheodore Ts'o } 717324b58424STheodore Ts'o 717424b58424STheodore Ts'o static inline void unregister_as_ext3(void) 717524b58424STheodore Ts'o { 717624b58424STheodore Ts'o unregister_filesystem(&ext3_fs_type); 717724b58424STheodore Ts'o } 71782035e776STheodore Ts'o 71792035e776STheodore Ts'o static inline int ext3_feature_set_ok(struct super_block *sb) 71802035e776STheodore Ts'o { 7181e2b911c5SDarrick J. Wong if (ext4_has_unknown_ext3_incompat_features(sb)) 71822035e776STheodore Ts'o return 0; 7183e2b911c5SDarrick J. Wong if (!ext4_has_feature_journal(sb)) 71842035e776STheodore Ts'o return 0; 7185bc98a42cSDavid Howells if (sb_rdonly(sb)) 71862035e776STheodore Ts'o return 1; 7187e2b911c5SDarrick J. Wong if (ext4_has_unknown_ext3_ro_compat_features(sb)) 71882035e776STheodore Ts'o return 0; 71892035e776STheodore Ts'o return 1; 71902035e776STheodore Ts'o } 719124b58424STheodore Ts'o 719203010a33STheodore Ts'o static struct file_system_type ext4_fs_type = { 7193ac27a0ecSDave Kleikamp .owner = THIS_MODULE, 719403010a33STheodore Ts'o .name = "ext4", 7195cebe85d5SLukas Czerner .init_fs_context = ext4_init_fs_context, 7196cebe85d5SLukas Czerner .parameters = ext4_param_specs, 7197ac27a0ecSDave Kleikamp .kill_sb = kill_block_super, 719814f3db55SChristian Brauner .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, 7199ac27a0ecSDave Kleikamp }; 72007f78e035SEric W. Biederman MODULE_ALIAS_FS("ext4"); 7201ac27a0ecSDave Kleikamp 7202e9e3bcecSEric Sandeen /* Shared across all ext4 file systems */ 7203e9e3bcecSEric Sandeen wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ]; 7204e9e3bcecSEric Sandeen 72055dabfc78STheodore Ts'o static int __init ext4_init_fs(void) 7206ac27a0ecSDave Kleikamp { 7207e9e3bcecSEric Sandeen int i, err; 7208c9de560dSAlex Tomas 7209e294a537STheodore Ts'o ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64); 721007c0c5d8SAl Viro ext4_li_info = NULL; 721107c0c5d8SAl Viro 72129a4c8019SCarlos Maiolino /* Build-time check for flags consistency */ 721312e9b892SDmitry Monakhov ext4_check_flag_values(); 7214e9e3bcecSEric Sandeen 7215e142d052SJan Kara for (i = 0; i < EXT4_WQ_HASH_SZ; i++) 7216e9e3bcecSEric Sandeen init_waitqueue_head(&ext4__ioend_wq[i]); 7217e9e3bcecSEric Sandeen 721851865fdaSZheng Liu err = ext4_init_es(); 72196fd058f7STheodore Ts'o if (err) 72206fd058f7STheodore Ts'o return err; 722151865fdaSZheng Liu 72221dc0aa46SEric Whitney err = ext4_init_pending(); 72231dc0aa46SEric Whitney if (err) 722422cfe4b4SEric Biggers goto out7; 722522cfe4b4SEric Biggers 722622cfe4b4SEric Biggers err = ext4_init_post_read_processing(); 722722cfe4b4SEric Biggers if (err) 72281dc0aa46SEric Whitney goto out6; 72291dc0aa46SEric Whitney 723051865fdaSZheng Liu err = ext4_init_pageio(); 723151865fdaSZheng Liu if (err) 7232b5799018STheodore Ts'o goto out5; 723351865fdaSZheng Liu 72345dabfc78STheodore Ts'o err = ext4_init_system_zone(); 7235bd2d0210STheodore Ts'o if (err) 7236b5799018STheodore Ts'o goto out4; 7237857ac889SLukas Czerner 7238b5799018STheodore Ts'o err = ext4_init_sysfs(); 7239dd68314cSTheodore Ts'o if (err) 7240b5799018STheodore Ts'o goto out3; 7241857ac889SLukas Czerner 72425dabfc78STheodore Ts'o err = ext4_init_mballoc(); 7243ac27a0ecSDave Kleikamp if (err) 7244c9de560dSAlex Tomas goto out2; 7245ac27a0ecSDave Kleikamp err = init_inodecache(); 7246ac27a0ecSDave Kleikamp if (err) 7247ac27a0ecSDave Kleikamp goto out1; 7248aa75f4d3SHarshad Shirwadkar 7249aa75f4d3SHarshad Shirwadkar err = ext4_fc_init_dentry_cache(); 7250aa75f4d3SHarshad Shirwadkar if (err) 7251aa75f4d3SHarshad Shirwadkar goto out05; 7252aa75f4d3SHarshad Shirwadkar 725324b58424STheodore Ts'o register_as_ext3(); 72542035e776STheodore Ts'o register_as_ext2(); 725503010a33STheodore Ts'o err = register_filesystem(&ext4_fs_type); 7256ac27a0ecSDave Kleikamp if (err) 7257ac27a0ecSDave Kleikamp goto out; 7258bfff6873SLukas Czerner 7259ac27a0ecSDave Kleikamp return 0; 7260ac27a0ecSDave Kleikamp out: 726124b58424STheodore Ts'o unregister_as_ext2(); 726224b58424STheodore Ts'o unregister_as_ext3(); 7263ab047d51SSebastian Andrzej Siewior ext4_fc_destroy_dentry_cache(); 7264aa75f4d3SHarshad Shirwadkar out05: 7265ac27a0ecSDave Kleikamp destroy_inodecache(); 7266ac27a0ecSDave Kleikamp out1: 72675dabfc78STheodore Ts'o ext4_exit_mballoc(); 72689c191f70ST Makphaibulchoke out2: 7269b5799018STheodore Ts'o ext4_exit_sysfs(); 7270b5799018STheodore Ts'o out3: 7271dd68314cSTheodore Ts'o ext4_exit_system_zone(); 7272b5799018STheodore Ts'o out4: 72735dabfc78STheodore Ts'o ext4_exit_pageio(); 7274b5799018STheodore Ts'o out5: 727522cfe4b4SEric Biggers ext4_exit_post_read_processing(); 72761dc0aa46SEric Whitney out6: 727722cfe4b4SEric Biggers ext4_exit_pending(); 727822cfe4b4SEric Biggers out7: 727951865fdaSZheng Liu ext4_exit_es(); 728051865fdaSZheng Liu 7281ac27a0ecSDave Kleikamp return err; 7282ac27a0ecSDave Kleikamp } 7283ac27a0ecSDave Kleikamp 72845dabfc78STheodore Ts'o static void __exit ext4_exit_fs(void) 7285ac27a0ecSDave Kleikamp { 7286bfff6873SLukas Czerner ext4_destroy_lazyinit_thread(); 728724b58424STheodore Ts'o unregister_as_ext2(); 728824b58424STheodore Ts'o unregister_as_ext3(); 728903010a33STheodore Ts'o unregister_filesystem(&ext4_fs_type); 7290ab047d51SSebastian Andrzej Siewior ext4_fc_destroy_dentry_cache(); 7291ac27a0ecSDave Kleikamp destroy_inodecache(); 72925dabfc78STheodore Ts'o ext4_exit_mballoc(); 7293b5799018STheodore Ts'o ext4_exit_sysfs(); 72945dabfc78STheodore Ts'o ext4_exit_system_zone(); 72955dabfc78STheodore Ts'o ext4_exit_pageio(); 729622cfe4b4SEric Biggers ext4_exit_post_read_processing(); 7297dd12ed14SEric Sandeen ext4_exit_es(); 72981dc0aa46SEric Whitney ext4_exit_pending(); 7299ac27a0ecSDave Kleikamp } 7300ac27a0ecSDave Kleikamp 7301ac27a0ecSDave Kleikamp MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); 730283982b6fSTheodore Ts'o MODULE_DESCRIPTION("Fourth Extended Filesystem"); 7303ac27a0ecSDave Kleikamp MODULE_LICENSE("GPL"); 73047ef79ad5STheodore Ts'o MODULE_SOFTDEP("pre: crc32c"); 73055dabfc78STheodore Ts'o module_init(ext4_init_fs) 73065dabfc78STheodore Ts'o module_exit(ext4_exit_fs) 7307