1f5166768STheodore Ts'o // SPDX-License-Identifier: GPL-2.0 2ac27a0ecSDave Kleikamp /* 3617ba13bSMingming Cao * linux/fs/ext4/super.c 4ac27a0ecSDave Kleikamp * 5ac27a0ecSDave Kleikamp * Copyright (C) 1992, 1993, 1994, 1995 6ac27a0ecSDave Kleikamp * Remy Card (card@masi.ibp.fr) 7ac27a0ecSDave Kleikamp * Laboratoire MASI - Institut Blaise Pascal 8ac27a0ecSDave Kleikamp * Universite Pierre et Marie Curie (Paris VI) 9ac27a0ecSDave Kleikamp * 10ac27a0ecSDave Kleikamp * from 11ac27a0ecSDave Kleikamp * 12ac27a0ecSDave Kleikamp * linux/fs/minix/inode.c 13ac27a0ecSDave Kleikamp * 14ac27a0ecSDave Kleikamp * Copyright (C) 1991, 1992 Linus Torvalds 15ac27a0ecSDave Kleikamp * 16ac27a0ecSDave Kleikamp * Big-endian to little-endian byte-swapping/bitmaps by 17ac27a0ecSDave Kleikamp * David S. Miller (davem@caip.rutgers.edu), 1995 18ac27a0ecSDave Kleikamp */ 19ac27a0ecSDave Kleikamp 20ac27a0ecSDave Kleikamp #include <linux/module.h> 21ac27a0ecSDave Kleikamp #include <linux/string.h> 22ac27a0ecSDave Kleikamp #include <linux/fs.h> 23ac27a0ecSDave Kleikamp #include <linux/time.h> 24c5ca7c76STheodore Ts'o #include <linux/vmalloc.h> 25ac27a0ecSDave Kleikamp #include <linux/slab.h> 26ac27a0ecSDave Kleikamp #include <linux/init.h> 27ac27a0ecSDave Kleikamp #include <linux/blkdev.h> 2866114cadSTejun Heo #include <linux/backing-dev.h> 29ac27a0ecSDave Kleikamp #include <linux/parser.h> 30ac27a0ecSDave Kleikamp #include <linux/buffer_head.h> 31a5694255SChristoph Hellwig #include <linux/exportfs.h> 32ac27a0ecSDave Kleikamp #include <linux/vfs.h> 33ac27a0ecSDave Kleikamp #include <linux/random.h> 34ac27a0ecSDave Kleikamp #include <linux/mount.h> 35ac27a0ecSDave Kleikamp #include <linux/namei.h> 36ac27a0ecSDave Kleikamp #include <linux/quotaops.h> 37ac27a0ecSDave Kleikamp #include <linux/seq_file.h> 383197ebdbSTheodore Ts'o #include <linux/ctype.h> 391330593eSVignesh Babu #include <linux/log2.h> 40717d50e4SAndreas Dilger #include <linux/crc16.h> 41ef510424SDan Williams #include <linux/dax.h> 427c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 43ee73f9a5SJeff Layton #include <linux/iversion.h> 44c83ad55eSGabriel Krisman Bertazi #include <linux/unicode.h> 45c6a564ffSChristoph Hellwig #include <linux/part_stat.h> 46bfff6873SLukas Czerner #include <linux/kthread.h> 47bfff6873SLukas Czerner #include <linux/freezer.h> 489a089b21SGabriel Krisman Bertazi #include <linux/fsnotify.h> 49e5a185c2SLukas Czerner #include <linux/fs_context.h> 50e5a185c2SLukas Czerner #include <linux/fs_parser.h> 51bfff6873SLukas Czerner 523dcf5451SChristoph Hellwig #include "ext4.h" 534a092d73STheodore Ts'o #include "ext4_extents.h" /* Needed for trace points definition */ 543dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 55ac27a0ecSDave Kleikamp #include "xattr.h" 56ac27a0ecSDave Kleikamp #include "acl.h" 573661d286STheodore Ts'o #include "mballoc.h" 580c9ec4beSDarrick J. Wong #include "fsmap.h" 59ac27a0ecSDave Kleikamp 609bffad1eSTheodore Ts'o #define CREATE_TRACE_POINTS 619bffad1eSTheodore Ts'o #include <trace/events/ext4.h> 629bffad1eSTheodore Ts'o 630b75a840SLukas Czerner static struct ext4_lazy_init *ext4_li_info; 6459ebc7fdSZheng Yongjun static DEFINE_MUTEX(ext4_li_mtx); 65e294a537STheodore Ts'o static struct ratelimit_state ext4_mount_msg_ratelimit; 669f6200bbSTheodore Ts'o 67617ba13bSMingming Cao static int ext4_load_journal(struct super_block *, struct ext4_super_block *, 68ac27a0ecSDave Kleikamp unsigned long journal_devnum); 692adf6da8STheodore Ts'o static int ext4_show_options(struct seq_file *seq, struct dentry *root); 702d01ddc8SJan Kara static void ext4_update_super(struct super_block *sb); 714392fbc4SJan Kara static int ext4_commit_super(struct super_block *sb); 7211215630SJan Kara static int ext4_mark_recovery_complete(struct super_block *sb, 73617ba13bSMingming Cao struct ext4_super_block *es); 7411215630SJan Kara static int ext4_clear_journal_err(struct super_block *sb, 75617ba13bSMingming Cao struct ext4_super_block *es); 76617ba13bSMingming Cao static int ext4_sync_fs(struct super_block *sb, int wait); 77617ba13bSMingming Cao static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); 78c4be0c1dSTakashi Sato static int ext4_unfreeze(struct super_block *sb); 79c4be0c1dSTakashi Sato static int ext4_freeze(struct super_block *sb); 802035e776STheodore Ts'o static inline int ext2_feature_set_ok(struct super_block *sb); 812035e776STheodore Ts'o static inline int ext3_feature_set_ok(struct super_block *sb); 82bfff6873SLukas Czerner static void ext4_destroy_lazyinit_thread(void); 83bfff6873SLukas Czerner static void ext4_unregister_li_request(struct super_block *sb); 848f1f7453SEric Sandeen static void ext4_clear_request_list(void); 85c6cb7e77SEric Whitney static struct inode *ext4_get_journal_inode(struct super_block *sb, 86c6cb7e77SEric Whitney unsigned int journal_inum); 87da812f61SLukas Czerner static int ext4_validate_options(struct fs_context *fc); 88b6bd2435SLukas Czerner static int ext4_check_opt_consistency(struct fs_context *fc, 89e6e268cbSLukas Czerner struct super_block *sb); 9085456054SEric Biggers static void ext4_apply_options(struct fs_context *fc, struct super_block *sb); 9102f960f8SLukas Czerner static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param); 92cebe85d5SLukas Czerner static int ext4_get_tree(struct fs_context *fc); 93cebe85d5SLukas Czerner static int ext4_reconfigure(struct fs_context *fc); 94cebe85d5SLukas Czerner static void ext4_fc_free(struct fs_context *fc); 95cebe85d5SLukas Czerner static int ext4_init_fs_context(struct fs_context *fc); 96cebe85d5SLukas Czerner static const struct fs_parameter_spec ext4_param_specs[]; 97ac27a0ecSDave Kleikamp 98e74031fdSJan Kara /* 99e74031fdSJan Kara * Lock ordering 100e74031fdSJan Kara * 101e74031fdSJan Kara * page fault path: 102d4f5258eSJan Kara * mmap_lock -> sb_start_pagefault -> invalidate_lock (r) -> transaction start 103d4f5258eSJan Kara * -> page lock -> i_data_sem (rw) 104e74031fdSJan Kara * 105e74031fdSJan Kara * buffered write path: 106c1e8d7c6SMichel Lespinasse * sb_start_write -> i_mutex -> mmap_lock 107e74031fdSJan Kara * sb_start_write -> i_mutex -> transaction start -> page lock -> 108e74031fdSJan Kara * i_data_sem (rw) 109e74031fdSJan Kara * 110e74031fdSJan Kara * truncate: 111d4f5258eSJan Kara * sb_start_write -> i_mutex -> invalidate_lock (w) -> i_mmap_rwsem (w) -> 112d4f5258eSJan Kara * page lock 113d4f5258eSJan Kara * sb_start_write -> i_mutex -> invalidate_lock (w) -> transaction start -> 1141d39834fSNikolay Borisov * i_data_sem (rw) 115e74031fdSJan Kara * 116e74031fdSJan Kara * direct IO: 117c1e8d7c6SMichel Lespinasse * sb_start_write -> i_mutex -> mmap_lock 1181d39834fSNikolay Borisov * sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw) 119e74031fdSJan Kara * 120e74031fdSJan Kara * writepages: 121e74031fdSJan Kara * transaction start -> page lock(s) -> i_data_sem (rw) 122e74031fdSJan Kara */ 123e74031fdSJan Kara 12402f960f8SLukas Czerner static const struct fs_context_operations ext4_context_ops = { 12502f960f8SLukas Czerner .parse_param = ext4_parse_param, 126cebe85d5SLukas Czerner .get_tree = ext4_get_tree, 127cebe85d5SLukas Czerner .reconfigure = ext4_reconfigure, 128cebe85d5SLukas Czerner .free = ext4_fc_free, 12902f960f8SLukas Czerner }; 13002f960f8SLukas Czerner 13102f960f8SLukas Czerner 132c290ea01SJan Kara #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2) 1332035e776STheodore Ts'o static struct file_system_type ext2_fs_type = { 1342035e776STheodore Ts'o .owner = THIS_MODULE, 1352035e776STheodore Ts'o .name = "ext2", 136cebe85d5SLukas Czerner .init_fs_context = ext4_init_fs_context, 137cebe85d5SLukas Czerner .parameters = ext4_param_specs, 1382035e776STheodore Ts'o .kill_sb = kill_block_super, 1392035e776STheodore Ts'o .fs_flags = FS_REQUIRES_DEV, 1402035e776STheodore Ts'o }; 1417f78e035SEric W. Biederman MODULE_ALIAS_FS("ext2"); 142fa7614ddSEric W. Biederman MODULE_ALIAS("ext2"); 1432035e776STheodore Ts'o #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type) 1442035e776STheodore Ts'o #else 1452035e776STheodore Ts'o #define IS_EXT2_SB(sb) (0) 1462035e776STheodore Ts'o #endif 1472035e776STheodore Ts'o 1482035e776STheodore Ts'o 149ba69f9abSJan Kara static struct file_system_type ext3_fs_type = { 150ba69f9abSJan Kara .owner = THIS_MODULE, 151ba69f9abSJan Kara .name = "ext3", 152cebe85d5SLukas Czerner .init_fs_context = ext4_init_fs_context, 153cebe85d5SLukas Czerner .parameters = ext4_param_specs, 154ba69f9abSJan Kara .kill_sb = kill_block_super, 155ba69f9abSJan Kara .fs_flags = FS_REQUIRES_DEV, 156ba69f9abSJan Kara }; 1577f78e035SEric W. Biederman MODULE_ALIAS_FS("ext3"); 158fa7614ddSEric W. Biederman MODULE_ALIAS("ext3"); 159ba69f9abSJan Kara #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type) 160bd81d8eeSLaurent Vivier 161fa491b14Szhangyi (F) 16267c0f556SBart Van Assche static inline void __ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags, 163fa491b14Szhangyi (F) bh_end_io_t *end_io) 164fa491b14Szhangyi (F) { 165fb265c9cSTheodore Ts'o /* 166fa491b14Szhangyi (F) * buffer's verified bit is no longer valid after reading from 167fa491b14Szhangyi (F) * disk again due to write out error, clear it to make sure we 168fa491b14Szhangyi (F) * recheck the buffer contents. 169fa491b14Szhangyi (F) */ 170fa491b14Szhangyi (F) clear_buffer_verified(bh); 171fa491b14Szhangyi (F) 172fa491b14Szhangyi (F) bh->b_end_io = end_io ? end_io : end_buffer_read_sync; 173fa491b14Szhangyi (F) get_bh(bh); 1741420c4a5SBart Van Assche submit_bh(REQ_OP_READ | op_flags, bh); 175fa491b14Szhangyi (F) } 176fa491b14Szhangyi (F) 17767c0f556SBart Van Assche void ext4_read_bh_nowait(struct buffer_head *bh, blk_opf_t op_flags, 178fa491b14Szhangyi (F) bh_end_io_t *end_io) 179fa491b14Szhangyi (F) { 180fa491b14Szhangyi (F) BUG_ON(!buffer_locked(bh)); 181fa491b14Szhangyi (F) 182fa491b14Szhangyi (F) if (ext4_buffer_uptodate(bh)) { 183fa491b14Szhangyi (F) unlock_buffer(bh); 184fa491b14Szhangyi (F) return; 185fa491b14Szhangyi (F) } 186fa491b14Szhangyi (F) __ext4_read_bh(bh, op_flags, end_io); 187fa491b14Szhangyi (F) } 188fa491b14Szhangyi (F) 18967c0f556SBart Van Assche int ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags, bh_end_io_t *end_io) 190fa491b14Szhangyi (F) { 191fa491b14Szhangyi (F) BUG_ON(!buffer_locked(bh)); 192fa491b14Szhangyi (F) 193fa491b14Szhangyi (F) if (ext4_buffer_uptodate(bh)) { 194fa491b14Szhangyi (F) unlock_buffer(bh); 195fa491b14Szhangyi (F) return 0; 196fa491b14Szhangyi (F) } 197fa491b14Szhangyi (F) 198fa491b14Szhangyi (F) __ext4_read_bh(bh, op_flags, end_io); 199fa491b14Szhangyi (F) 200fa491b14Szhangyi (F) wait_on_buffer(bh); 201fa491b14Szhangyi (F) if (buffer_uptodate(bh)) 202fa491b14Szhangyi (F) return 0; 203fa491b14Szhangyi (F) return -EIO; 204fa491b14Szhangyi (F) } 205fa491b14Szhangyi (F) 20667c0f556SBart Van Assche int ext4_read_bh_lock(struct buffer_head *bh, blk_opf_t op_flags, bool wait) 207fa491b14Szhangyi (F) { 2080b73284cSZhang Yi lock_buffer(bh); 2090b73284cSZhang Yi if (!wait) { 210fa491b14Szhangyi (F) ext4_read_bh_nowait(bh, op_flags, NULL); 211fa491b14Szhangyi (F) return 0; 212fa491b14Szhangyi (F) } 2130b73284cSZhang Yi return ext4_read_bh(bh, op_flags, NULL); 214fa491b14Szhangyi (F) } 215fa491b14Szhangyi (F) 216fb265c9cSTheodore Ts'o /* 2178394a6abSzhangyi (F) * This works like __bread_gfp() except it uses ERR_PTR for error 218fb265c9cSTheodore Ts'o * returns. Currently with sb_bread it's impossible to distinguish 219fb265c9cSTheodore Ts'o * between ENOMEM and EIO situations (since both result in a NULL 220fb265c9cSTheodore Ts'o * return. 221fb265c9cSTheodore Ts'o */ 2228394a6abSzhangyi (F) static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb, 22367c0f556SBart Van Assche sector_t block, 22467c0f556SBart Van Assche blk_opf_t op_flags, gfp_t gfp) 225fb265c9cSTheodore Ts'o { 2262d069c08Szhangyi (F) struct buffer_head *bh; 2272d069c08Szhangyi (F) int ret; 228fb265c9cSTheodore Ts'o 2298394a6abSzhangyi (F) bh = sb_getblk_gfp(sb, block, gfp); 230fb265c9cSTheodore Ts'o if (bh == NULL) 231fb265c9cSTheodore Ts'o return ERR_PTR(-ENOMEM); 232cf2834a5STheodore Ts'o if (ext4_buffer_uptodate(bh)) 233fb265c9cSTheodore Ts'o return bh; 2342d069c08Szhangyi (F) 2352d069c08Szhangyi (F) ret = ext4_read_bh_lock(bh, REQ_META | op_flags, true); 2362d069c08Szhangyi (F) if (ret) { 237fb265c9cSTheodore Ts'o put_bh(bh); 2382d069c08Szhangyi (F) return ERR_PTR(ret); 2392d069c08Szhangyi (F) } 2402d069c08Szhangyi (F) return bh; 241fb265c9cSTheodore Ts'o } 242fb265c9cSTheodore Ts'o 2438394a6abSzhangyi (F) struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block, 24467c0f556SBart Van Assche blk_opf_t op_flags) 2458394a6abSzhangyi (F) { 2468394a6abSzhangyi (F) return __ext4_sb_bread_gfp(sb, block, op_flags, __GFP_MOVABLE); 2478394a6abSzhangyi (F) } 2488394a6abSzhangyi (F) 2498394a6abSzhangyi (F) struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb, 2508394a6abSzhangyi (F) sector_t block) 2518394a6abSzhangyi (F) { 2528394a6abSzhangyi (F) return __ext4_sb_bread_gfp(sb, block, 0, 0); 2538394a6abSzhangyi (F) } 2548394a6abSzhangyi (F) 2555df1d412Szhangyi (F) void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block) 2565df1d412Szhangyi (F) { 2575df1d412Szhangyi (F) struct buffer_head *bh = sb_getblk_gfp(sb, block, 0); 2585df1d412Szhangyi (F) 2595df1d412Szhangyi (F) if (likely(bh)) { 2600b73284cSZhang Yi if (trylock_buffer(bh)) 2610b73284cSZhang Yi ext4_read_bh_nowait(bh, REQ_RAHEAD, NULL); 2625df1d412Szhangyi (F) brelse(bh); 2635df1d412Szhangyi (F) } 264c197855eSStephen Hemminger } 265a9c47317SDarrick J. Wong 266a9c47317SDarrick J. Wong static int ext4_verify_csum_type(struct super_block *sb, 2679aa5d32bSDmitry Monakhov struct ext4_super_block *es) 268a9c47317SDarrick J. Wong { 269a9c47317SDarrick J. Wong if (!ext4_has_feature_metadata_csum(sb)) 270a9c47317SDarrick J. Wong return 1; 271a9c47317SDarrick J. Wong 272a9c47317SDarrick J. Wong return es->s_checksum_type == EXT4_CRC32C_CHKSUM; 27306db49e6STheodore Ts'o } 274a9c47317SDarrick J. Wong 275bbc605cdSLukas Czerner __le32 ext4_superblock_csum(struct super_block *sb, 276a9c47317SDarrick J. Wong struct ext4_super_block *es) 277a9c47317SDarrick J. Wong { 278a9c47317SDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(sb); 279a9c47317SDarrick J. Wong int offset = offsetof(struct ext4_super_block, s_checksum); 280a9c47317SDarrick J. Wong __u32 csum; 281a9c47317SDarrick J. Wong 282a9c47317SDarrick J. Wong csum = ext4_chksum(sbi, ~0, (char *)es, offset); 283a9c47317SDarrick J. Wong 284a9c47317SDarrick J. Wong return cpu_to_le32(csum); 285a9c47317SDarrick J. Wong } 286a9c47317SDarrick J. Wong 287a9c47317SDarrick J. Wong static int ext4_superblock_csum_verify(struct super_block *sb, 288a9c47317SDarrick J. Wong struct ext4_super_block *es) 289a9c47317SDarrick J. Wong { 290a9c47317SDarrick J. Wong if (!ext4_has_metadata_csum(sb)) 291a9c47317SDarrick J. Wong return 1; 292a9c47317SDarrick J. Wong 293a9c47317SDarrick J. Wong return es->s_checksum == ext4_superblock_csum(sb, es); 294a9c47317SDarrick J. Wong } 295a9c47317SDarrick J. Wong 296a9c47317SDarrick J. Wong void ext4_superblock_csum_set(struct super_block *sb) 297a9c47317SDarrick J. Wong { 29806db49e6STheodore Ts'o struct ext4_super_block *es = EXT4_SB(sb)->s_es; 29906db49e6STheodore Ts'o 3009aa5d32bSDmitry Monakhov if (!ext4_has_metadata_csum(sb)) 301a9c47317SDarrick J. Wong return; 302a9c47317SDarrick J. Wong 303a9c47317SDarrick J. Wong es->s_checksum = ext4_superblock_csum(sb, es); 304a9c47317SDarrick J. Wong } 305a9c47317SDarrick J. Wong 3068fadc143SAlexandre Ratchov ext4_fsblk_t ext4_block_bitmap(struct super_block *sb, 3078fadc143SAlexandre Ratchov struct ext4_group_desc *bg) 308bd81d8eeSLaurent Vivier { 3093a14589cSAneesh Kumar K.V return le32_to_cpu(bg->bg_block_bitmap_lo) | 3108fadc143SAlexandre Ratchov (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 3118fadc143SAlexandre Ratchov (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0); 312bd81d8eeSLaurent Vivier } 313bd81d8eeSLaurent Vivier 3148fadc143SAlexandre Ratchov ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb, 3158fadc143SAlexandre Ratchov struct ext4_group_desc *bg) 316bd81d8eeSLaurent Vivier { 3175272f837SAneesh Kumar K.V return le32_to_cpu(bg->bg_inode_bitmap_lo) | 3188fadc143SAlexandre Ratchov (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 3198fadc143SAlexandre Ratchov (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0); 320bd81d8eeSLaurent Vivier } 321bd81d8eeSLaurent Vivier 3228fadc143SAlexandre Ratchov ext4_fsblk_t ext4_inode_table(struct super_block *sb, 3238fadc143SAlexandre Ratchov struct ext4_group_desc *bg) 324bd81d8eeSLaurent Vivier { 3255272f837SAneesh Kumar K.V return le32_to_cpu(bg->bg_inode_table_lo) | 3268fadc143SAlexandre Ratchov (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 3278fadc143SAlexandre Ratchov (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0); 328bd81d8eeSLaurent Vivier } 329bd81d8eeSLaurent Vivier 330021b65bbSTheodore Ts'o __u32 ext4_free_group_clusters(struct super_block *sb, 331560671a0SAneesh Kumar K.V struct ext4_group_desc *bg) 332560671a0SAneesh Kumar K.V { 333560671a0SAneesh Kumar K.V return le16_to_cpu(bg->bg_free_blocks_count_lo) | 334560671a0SAneesh Kumar K.V (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 335560671a0SAneesh Kumar K.V (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0); 336560671a0SAneesh Kumar K.V } 337560671a0SAneesh Kumar K.V 338560671a0SAneesh Kumar K.V __u32 ext4_free_inodes_count(struct super_block *sb, 339560671a0SAneesh Kumar K.V struct ext4_group_desc *bg) 340560671a0SAneesh Kumar K.V { 341560671a0SAneesh Kumar K.V return le16_to_cpu(bg->bg_free_inodes_count_lo) | 342560671a0SAneesh Kumar K.V (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 343560671a0SAneesh Kumar K.V (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0); 344560671a0SAneesh Kumar K.V } 345560671a0SAneesh Kumar K.V 346560671a0SAneesh Kumar K.V __u32 ext4_used_dirs_count(struct super_block *sb, 347560671a0SAneesh Kumar K.V struct ext4_group_desc *bg) 348560671a0SAneesh Kumar K.V { 349560671a0SAneesh Kumar K.V return le16_to_cpu(bg->bg_used_dirs_count_lo) | 350560671a0SAneesh Kumar K.V (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 351560671a0SAneesh Kumar K.V (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0); 352560671a0SAneesh Kumar K.V } 353560671a0SAneesh Kumar K.V 354560671a0SAneesh Kumar K.V __u32 ext4_itable_unused_count(struct super_block *sb, 355560671a0SAneesh Kumar K.V struct ext4_group_desc *bg) 356560671a0SAneesh Kumar K.V { 357560671a0SAneesh Kumar K.V return le16_to_cpu(bg->bg_itable_unused_lo) | 358560671a0SAneesh Kumar K.V (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? 359560671a0SAneesh Kumar K.V (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0); 360560671a0SAneesh Kumar K.V } 361560671a0SAneesh Kumar K.V 3628fadc143SAlexandre Ratchov void ext4_block_bitmap_set(struct super_block *sb, 3638fadc143SAlexandre Ratchov struct ext4_group_desc *bg, ext4_fsblk_t blk) 364bd81d8eeSLaurent Vivier { 3653a14589cSAneesh Kumar K.V bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk); 3668fadc143SAlexandre Ratchov if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 3678fadc143SAlexandre Ratchov bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32); 368bd81d8eeSLaurent Vivier } 369bd81d8eeSLaurent Vivier 3708fadc143SAlexandre Ratchov void ext4_inode_bitmap_set(struct super_block *sb, 3718fadc143SAlexandre Ratchov struct ext4_group_desc *bg, ext4_fsblk_t blk) 372bd81d8eeSLaurent Vivier { 3735272f837SAneesh Kumar K.V bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk); 3748fadc143SAlexandre Ratchov if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 3758fadc143SAlexandre Ratchov bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32); 376bd81d8eeSLaurent Vivier } 377bd81d8eeSLaurent Vivier 3788fadc143SAlexandre Ratchov void ext4_inode_table_set(struct super_block *sb, 3798fadc143SAlexandre Ratchov struct ext4_group_desc *bg, ext4_fsblk_t blk) 380bd81d8eeSLaurent Vivier { 3815272f837SAneesh Kumar K.V bg->bg_inode_table_lo = cpu_to_le32((u32)blk); 3828fadc143SAlexandre Ratchov if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 3838fadc143SAlexandre Ratchov bg->bg_inode_table_hi = cpu_to_le32(blk >> 32); 384bd81d8eeSLaurent Vivier } 385bd81d8eeSLaurent Vivier 386021b65bbSTheodore Ts'o void ext4_free_group_clusters_set(struct super_block *sb, 387560671a0SAneesh Kumar K.V struct ext4_group_desc *bg, __u32 count) 388560671a0SAneesh Kumar K.V { 389560671a0SAneesh Kumar K.V bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count); 390560671a0SAneesh Kumar K.V if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 391560671a0SAneesh Kumar K.V bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16); 392560671a0SAneesh Kumar K.V } 393560671a0SAneesh Kumar K.V 394560671a0SAneesh Kumar K.V void ext4_free_inodes_set(struct super_block *sb, 395560671a0SAneesh Kumar K.V struct ext4_group_desc *bg, __u32 count) 396560671a0SAneesh Kumar K.V { 397560671a0SAneesh Kumar K.V bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count); 398560671a0SAneesh Kumar K.V if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 399560671a0SAneesh Kumar K.V bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16); 400560671a0SAneesh Kumar K.V } 401560671a0SAneesh Kumar K.V 402560671a0SAneesh Kumar K.V void ext4_used_dirs_set(struct super_block *sb, 403560671a0SAneesh Kumar K.V struct ext4_group_desc *bg, __u32 count) 404560671a0SAneesh Kumar K.V { 405560671a0SAneesh Kumar K.V bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count); 406560671a0SAneesh Kumar K.V if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 407560671a0SAneesh Kumar K.V bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16); 408560671a0SAneesh Kumar K.V } 409560671a0SAneesh Kumar K.V 410560671a0SAneesh Kumar K.V void ext4_itable_unused_set(struct super_block *sb, 411560671a0SAneesh Kumar K.V struct ext4_group_desc *bg, __u32 count) 412560671a0SAneesh Kumar K.V { 413560671a0SAneesh Kumar K.V bg->bg_itable_unused_lo = cpu_to_le16((__u16)count); 414560671a0SAneesh Kumar K.V if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) 415560671a0SAneesh Kumar K.V bg->bg_itable_unused_hi = cpu_to_le16(count >> 16); 416560671a0SAneesh Kumar K.V } 417560671a0SAneesh Kumar K.V 418c92dc856SJan Kara static void __ext4_update_tstamp(__le32 *lo, __u8 *hi, time64_t now) 4196a0678a7SArnd Bergmann { 4206a0678a7SArnd Bergmann now = clamp_val(now, 0, (1ull << 40) - 1); 4216a0678a7SArnd Bergmann 4226a0678a7SArnd Bergmann *lo = cpu_to_le32(lower_32_bits(now)); 4236a0678a7SArnd Bergmann *hi = upper_32_bits(now); 4246a0678a7SArnd Bergmann } 4256a0678a7SArnd Bergmann 4266a0678a7SArnd Bergmann static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi) 4276a0678a7SArnd Bergmann { 4286a0678a7SArnd Bergmann return ((time64_t)(*hi) << 32) + le32_to_cpu(*lo); 4296a0678a7SArnd Bergmann } 4306a0678a7SArnd Bergmann #define ext4_update_tstamp(es, tstamp) \ 431c92dc856SJan Kara __ext4_update_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi, \ 432c92dc856SJan Kara ktime_get_real_seconds()) 4336a0678a7SArnd Bergmann #define ext4_get_tstamp(es, tstamp) \ 4346a0678a7SArnd Bergmann __ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi) 435d3d1faf6SCurt Wohlgemuth 436bdfe0cbdSTheodore Ts'o /* 437bdfe0cbdSTheodore Ts'o * The del_gendisk() function uninitializes the disk-specific data 438bdfe0cbdSTheodore Ts'o * structures, including the bdi structure, without telling anyone 439bdfe0cbdSTheodore Ts'o * else. Once this happens, any attempt to call mark_buffer_dirty() 440bdfe0cbdSTheodore Ts'o * (for example, by ext4_commit_super), will cause a kernel OOPS. 441bdfe0cbdSTheodore Ts'o * This is a kludge to prevent these oops until we can put in a proper 442bdfe0cbdSTheodore Ts'o * hook in del_gendisk() to inform the VFS and file system layers. 443bdfe0cbdSTheodore Ts'o */ 444bdfe0cbdSTheodore Ts'o static int block_device_ejected(struct super_block *sb) 445bdfe0cbdSTheodore Ts'o { 446bdfe0cbdSTheodore Ts'o struct inode *bd_inode = sb->s_bdev->bd_inode; 447bdfe0cbdSTheodore Ts'o struct backing_dev_info *bdi = inode_to_bdi(bd_inode); 448bdfe0cbdSTheodore Ts'o 449bdfe0cbdSTheodore Ts'o return bdi->dev == NULL; 450bdfe0cbdSTheodore Ts'o } 451bdfe0cbdSTheodore Ts'o 45218aadd47SBobi Jam static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) 45318aadd47SBobi Jam { 45418aadd47SBobi Jam struct super_block *sb = journal->j_private; 45518aadd47SBobi Jam struct ext4_sb_info *sbi = EXT4_SB(sb); 45618aadd47SBobi Jam int error = is_journal_aborted(journal); 4575d3ee208SDmitry Monakhov struct ext4_journal_cb_entry *jce; 45818aadd47SBobi Jam 4595d3ee208SDmitry Monakhov BUG_ON(txn->t_state == T_FINISHED); 460a0154344SDaeho Jeong 461a0154344SDaeho Jeong ext4_process_freed_data(sb, txn->t_tid); 462a0154344SDaeho Jeong 46318aadd47SBobi Jam spin_lock(&sbi->s_md_lock); 4645d3ee208SDmitry Monakhov while (!list_empty(&txn->t_private_list)) { 4655d3ee208SDmitry Monakhov jce = list_entry(txn->t_private_list.next, 4665d3ee208SDmitry Monakhov struct ext4_journal_cb_entry, jce_list); 46718aadd47SBobi Jam list_del_init(&jce->jce_list); 46818aadd47SBobi Jam spin_unlock(&sbi->s_md_lock); 46918aadd47SBobi Jam jce->jce_func(sb, jce, error); 47018aadd47SBobi Jam spin_lock(&sbi->s_md_lock); 47118aadd47SBobi Jam } 47218aadd47SBobi Jam spin_unlock(&sbi->s_md_lock); 47318aadd47SBobi Jam } 4741c13d5c0STheodore Ts'o 475afb585a9SMauricio Faria de Oliveira /* 476afb585a9SMauricio Faria de Oliveira * This writepage callback for write_cache_pages() 477afb585a9SMauricio Faria de Oliveira * takes care of a few cases after page cleaning. 478afb585a9SMauricio Faria de Oliveira * 479afb585a9SMauricio Faria de Oliveira * write_cache_pages() already checks for dirty pages 480afb585a9SMauricio Faria de Oliveira * and calls clear_page_dirty_for_io(), which we want, 481afb585a9SMauricio Faria de Oliveira * to write protect the pages. 482afb585a9SMauricio Faria de Oliveira * 483afb585a9SMauricio Faria de Oliveira * However, we may have to redirty a page (see below.) 484afb585a9SMauricio Faria de Oliveira */ 485afb585a9SMauricio Faria de Oliveira static int ext4_journalled_writepage_callback(struct page *page, 486afb585a9SMauricio Faria de Oliveira struct writeback_control *wbc, 487afb585a9SMauricio Faria de Oliveira void *data) 488afb585a9SMauricio Faria de Oliveira { 489afb585a9SMauricio Faria de Oliveira transaction_t *transaction = (transaction_t *) data; 490afb585a9SMauricio Faria de Oliveira struct buffer_head *bh, *head; 491afb585a9SMauricio Faria de Oliveira struct journal_head *jh; 492afb585a9SMauricio Faria de Oliveira 493afb585a9SMauricio Faria de Oliveira bh = head = page_buffers(page); 494afb585a9SMauricio Faria de Oliveira do { 495afb585a9SMauricio Faria de Oliveira /* 496afb585a9SMauricio Faria de Oliveira * We have to redirty a page in these cases: 497afb585a9SMauricio Faria de Oliveira * 1) If buffer is dirty, it means the page was dirty because it 498afb585a9SMauricio Faria de Oliveira * contains a buffer that needs checkpointing. So the dirty bit 499afb585a9SMauricio Faria de Oliveira * needs to be preserved so that checkpointing writes the buffer 500afb585a9SMauricio Faria de Oliveira * properly. 501afb585a9SMauricio Faria de Oliveira * 2) If buffer is not part of the committing transaction 502afb585a9SMauricio Faria de Oliveira * (we may have just accidentally come across this buffer because 503afb585a9SMauricio Faria de Oliveira * inode range tracking is not exact) or if the currently running 504afb585a9SMauricio Faria de Oliveira * transaction already contains this buffer as well, dirty bit 505afb585a9SMauricio Faria de Oliveira * needs to be preserved so that the buffer gets writeprotected 506afb585a9SMauricio Faria de Oliveira * properly on running transaction's commit. 507afb585a9SMauricio Faria de Oliveira */ 508afb585a9SMauricio Faria de Oliveira jh = bh2jh(bh); 509afb585a9SMauricio Faria de Oliveira if (buffer_dirty(bh) || 510afb585a9SMauricio Faria de Oliveira (jh && (jh->b_transaction != transaction || 511afb585a9SMauricio Faria de Oliveira jh->b_next_transaction))) { 512afb585a9SMauricio Faria de Oliveira redirty_page_for_writepage(wbc, page); 513afb585a9SMauricio Faria de Oliveira goto out; 514afb585a9SMauricio Faria de Oliveira } 515afb585a9SMauricio Faria de Oliveira } while ((bh = bh->b_this_page) != head); 516afb585a9SMauricio Faria de Oliveira 517afb585a9SMauricio Faria de Oliveira out: 518afb585a9SMauricio Faria de Oliveira return AOP_WRITEPAGE_ACTIVATE; 519afb585a9SMauricio Faria de Oliveira } 520afb585a9SMauricio Faria de Oliveira 521afb585a9SMauricio Faria de Oliveira static int ext4_journalled_submit_inode_data_buffers(struct jbd2_inode *jinode) 522afb585a9SMauricio Faria de Oliveira { 523afb585a9SMauricio Faria de Oliveira struct address_space *mapping = jinode->i_vfs_inode->i_mapping; 524afb585a9SMauricio Faria de Oliveira struct writeback_control wbc = { 525afb585a9SMauricio Faria de Oliveira .sync_mode = WB_SYNC_ALL, 526afb585a9SMauricio Faria de Oliveira .nr_to_write = LONG_MAX, 527afb585a9SMauricio Faria de Oliveira .range_start = jinode->i_dirty_start, 528afb585a9SMauricio Faria de Oliveira .range_end = jinode->i_dirty_end, 529afb585a9SMauricio Faria de Oliveira }; 530afb585a9SMauricio Faria de Oliveira 531afb585a9SMauricio Faria de Oliveira return write_cache_pages(mapping, &wbc, 532afb585a9SMauricio Faria de Oliveira ext4_journalled_writepage_callback, 533afb585a9SMauricio Faria de Oliveira jinode->i_transaction); 534afb585a9SMauricio Faria de Oliveira } 535afb585a9SMauricio Faria de Oliveira 536afb585a9SMauricio Faria de Oliveira static int ext4_journal_submit_inode_data_buffers(struct jbd2_inode *jinode) 537afb585a9SMauricio Faria de Oliveira { 538afb585a9SMauricio Faria de Oliveira int ret; 539afb585a9SMauricio Faria de Oliveira 540afb585a9SMauricio Faria de Oliveira if (ext4_should_journal_data(jinode->i_vfs_inode)) 541afb585a9SMauricio Faria de Oliveira ret = ext4_journalled_submit_inode_data_buffers(jinode); 542afb585a9SMauricio Faria de Oliveira else 54359205c8dSJan Kara ret = ext4_normal_submit_inode_data_buffers(jinode); 544afb585a9SMauricio Faria de Oliveira return ret; 545afb585a9SMauricio Faria de Oliveira } 546afb585a9SMauricio Faria de Oliveira 547afb585a9SMauricio Faria de Oliveira static int ext4_journal_finish_inode_data_buffers(struct jbd2_inode *jinode) 548afb585a9SMauricio Faria de Oliveira { 549afb585a9SMauricio Faria de Oliveira int ret = 0; 550afb585a9SMauricio Faria de Oliveira 551afb585a9SMauricio Faria de Oliveira if (!ext4_should_journal_data(jinode->i_vfs_inode)) 552afb585a9SMauricio Faria de Oliveira ret = jbd2_journal_finish_inode_data_buffers(jinode); 553afb585a9SMauricio Faria de Oliveira 554afb585a9SMauricio Faria de Oliveira return ret; 555afb585a9SMauricio Faria de Oliveira } 556afb585a9SMauricio Faria de Oliveira 5571dc1097fSJan Kara static bool system_going_down(void) 5581dc1097fSJan Kara { 5591dc1097fSJan Kara return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF 5601dc1097fSJan Kara || system_state == SYSTEM_RESTART; 5611dc1097fSJan Kara } 5621dc1097fSJan Kara 56302a7780eSJan Kara struct ext4_err_translation { 56402a7780eSJan Kara int code; 56502a7780eSJan Kara int errno; 56602a7780eSJan Kara }; 56702a7780eSJan Kara 56802a7780eSJan Kara #define EXT4_ERR_TRANSLATE(err) { .code = EXT4_ERR_##err, .errno = err } 56902a7780eSJan Kara 57002a7780eSJan Kara static struct ext4_err_translation err_translation[] = { 57102a7780eSJan Kara EXT4_ERR_TRANSLATE(EIO), 57202a7780eSJan Kara EXT4_ERR_TRANSLATE(ENOMEM), 57302a7780eSJan Kara EXT4_ERR_TRANSLATE(EFSBADCRC), 57402a7780eSJan Kara EXT4_ERR_TRANSLATE(EFSCORRUPTED), 57502a7780eSJan Kara EXT4_ERR_TRANSLATE(ENOSPC), 57602a7780eSJan Kara EXT4_ERR_TRANSLATE(ENOKEY), 57702a7780eSJan Kara EXT4_ERR_TRANSLATE(EROFS), 57802a7780eSJan Kara EXT4_ERR_TRANSLATE(EFBIG), 57902a7780eSJan Kara EXT4_ERR_TRANSLATE(EEXIST), 58002a7780eSJan Kara EXT4_ERR_TRANSLATE(ERANGE), 58102a7780eSJan Kara EXT4_ERR_TRANSLATE(EOVERFLOW), 58202a7780eSJan Kara EXT4_ERR_TRANSLATE(EBUSY), 58302a7780eSJan Kara EXT4_ERR_TRANSLATE(ENOTDIR), 58402a7780eSJan Kara EXT4_ERR_TRANSLATE(ENOTEMPTY), 58502a7780eSJan Kara EXT4_ERR_TRANSLATE(ESHUTDOWN), 58602a7780eSJan Kara EXT4_ERR_TRANSLATE(EFAULT), 58702a7780eSJan Kara }; 58802a7780eSJan Kara 58902a7780eSJan Kara static int ext4_errno_to_code(int errno) 59002a7780eSJan Kara { 59102a7780eSJan Kara int i; 59202a7780eSJan Kara 59302a7780eSJan Kara for (i = 0; i < ARRAY_SIZE(err_translation); i++) 59402a7780eSJan Kara if (err_translation[i].errno == errno) 59502a7780eSJan Kara return err_translation[i].code; 59602a7780eSJan Kara return EXT4_ERR_UNKNOWN; 59702a7780eSJan Kara } 59802a7780eSJan Kara 5992d01ddc8SJan Kara static void save_error_info(struct super_block *sb, int error, 60040676623SJan Kara __u32 ino, __u64 block, 60140676623SJan Kara const char *func, unsigned int line) 60240676623SJan Kara { 603c92dc856SJan Kara struct ext4_sb_info *sbi = EXT4_SB(sb); 60440676623SJan Kara 60502a7780eSJan Kara /* We default to EFSCORRUPTED error... */ 60602a7780eSJan Kara if (error == 0) 60702a7780eSJan Kara error = EFSCORRUPTED; 608c92dc856SJan Kara 609c92dc856SJan Kara spin_lock(&sbi->s_error_lock); 610c92dc856SJan Kara sbi->s_add_error_count++; 611c92dc856SJan Kara sbi->s_last_error_code = error; 612c92dc856SJan Kara sbi->s_last_error_line = line; 613c92dc856SJan Kara sbi->s_last_error_ino = ino; 614c92dc856SJan Kara sbi->s_last_error_block = block; 615c92dc856SJan Kara sbi->s_last_error_func = func; 616c92dc856SJan Kara sbi->s_last_error_time = ktime_get_real_seconds(); 617c92dc856SJan Kara if (!sbi->s_first_error_time) { 618c92dc856SJan Kara sbi->s_first_error_code = error; 619c92dc856SJan Kara sbi->s_first_error_line = line; 620c92dc856SJan Kara sbi->s_first_error_ino = ino; 621c92dc856SJan Kara sbi->s_first_error_block = block; 622c92dc856SJan Kara sbi->s_first_error_func = func; 623c92dc856SJan Kara sbi->s_first_error_time = sbi->s_last_error_time; 62440676623SJan Kara } 625c92dc856SJan Kara spin_unlock(&sbi->s_error_lock); 62640676623SJan Kara } 62740676623SJan Kara 628ac27a0ecSDave Kleikamp /* Deal with the reporting of failure conditions on a filesystem such as 629ac27a0ecSDave Kleikamp * inconsistencies detected or read IO failures. 630ac27a0ecSDave Kleikamp * 631ac27a0ecSDave Kleikamp * On ext2, we can store the error state of the filesystem in the 632617ba13bSMingming Cao * superblock. That is not possible on ext4, because we may have other 633ac27a0ecSDave Kleikamp * write ordering constraints on the superblock which prevent us from 634ac27a0ecSDave Kleikamp * writing it out straight away; and given that the journal is about to 635ac27a0ecSDave Kleikamp * be aborted, we can't rely on the current, or future, transactions to 636ac27a0ecSDave Kleikamp * write out the superblock safely. 637ac27a0ecSDave Kleikamp * 638dab291afSMingming Cao * We'll just use the jbd2_journal_abort() error code to record an error in 639d6b198bcSThadeu Lima de Souza Cascardo * the journal instead. On recovery, the journal will complain about 640ac27a0ecSDave Kleikamp * that error until we've noted it down and cleared it. 641014c9caaSJan Kara * 642014c9caaSJan Kara * If force_ro is set, we unconditionally force the filesystem into an 643014c9caaSJan Kara * ABORT|READONLY state, unless the error response on the fs has been set to 644014c9caaSJan Kara * panic in which case we take the easy way out and panic immediately. This is 645014c9caaSJan Kara * used to deal with unrecoverable failures such as journal IO errors or ENOMEM 646014c9caaSJan Kara * at a critical moment in log management. 647ac27a0ecSDave Kleikamp */ 648e789ca0cSJan Kara static void ext4_handle_error(struct super_block *sb, bool force_ro, int error, 649e789ca0cSJan Kara __u32 ino, __u64 block, 650e789ca0cSJan Kara const char *func, unsigned int line) 651ac27a0ecSDave Kleikamp { 652b08070ecSJan Kara journal_t *journal = EXT4_SB(sb)->s_journal; 6532d01ddc8SJan Kara bool continue_fs = !force_ro && test_opt(sb, ERRORS_CONT); 654b08070ecSJan Kara 655e789ca0cSJan Kara EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; 656327eaf73STheodore Ts'o if (test_opt(sb, WARN_ON_ERROR)) 657327eaf73STheodore Ts'o WARN_ON_ONCE(1); 658327eaf73STheodore Ts'o 6592d01ddc8SJan Kara if (!continue_fs && !sb_rdonly(sb)) { 6609b5f6c9bSHarshad Shirwadkar ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED); 661ac27a0ecSDave Kleikamp if (journal) 662dab291afSMingming Cao jbd2_journal_abort(journal, -EIO); 6632d01ddc8SJan Kara } 6642d01ddc8SJan Kara 6652d01ddc8SJan Kara if (!bdev_read_only(sb->s_bdev)) { 6662d01ddc8SJan Kara save_error_info(sb, error, ino, block, func, line); 6672d01ddc8SJan Kara /* 6682d01ddc8SJan Kara * In case the fs should keep running, we need to writeout 6692d01ddc8SJan Kara * superblock through the journal. Due to lock ordering 6702d01ddc8SJan Kara * constraints, it may not be safe to do it right here so we 6712d01ddc8SJan Kara * defer superblock flushing to a workqueue. 6722d01ddc8SJan Kara */ 673bb9464e0Syangerkun if (continue_fs && journal) 6742d01ddc8SJan Kara schedule_work(&EXT4_SB(sb)->s_error_work); 6752d01ddc8SJan Kara else 6762d01ddc8SJan Kara ext4_commit_super(sb); 6772d01ddc8SJan Kara } 6782d01ddc8SJan Kara 6791dc1097fSJan Kara /* 6801dc1097fSJan Kara * We force ERRORS_RO behavior when system is rebooting. Otherwise we 6811dc1097fSJan Kara * could panic during 'reboot -f' as the underlying device got already 6821dc1097fSJan Kara * disabled. 6831dc1097fSJan Kara */ 684014c9caaSJan Kara if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) { 685617ba13bSMingming Cao panic("EXT4-fs (device %s): panic forced after error\n", 686ac27a0ecSDave Kleikamp sb->s_id); 687ac27a0ecSDave Kleikamp } 688ac2f7ca5SYe Bin 689ac2f7ca5SYe Bin if (sb_rdonly(sb) || continue_fs) 690ac2f7ca5SYe Bin return; 691ac2f7ca5SYe Bin 692014c9caaSJan Kara ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); 693014c9caaSJan Kara /* 694014c9caaSJan Kara * Make sure updated value of ->s_mount_flags will be visible before 695014c9caaSJan Kara * ->s_flags update 696014c9caaSJan Kara */ 697014c9caaSJan Kara smp_wmb(); 698014c9caaSJan Kara sb->s_flags |= SB_RDONLY; 6994327ba52SDaeho Jeong } 700ac27a0ecSDave Kleikamp 701c92dc856SJan Kara static void flush_stashed_error_work(struct work_struct *work) 702c92dc856SJan Kara { 703c92dc856SJan Kara struct ext4_sb_info *sbi = container_of(work, struct ext4_sb_info, 704c92dc856SJan Kara s_error_work); 7052d01ddc8SJan Kara journal_t *journal = sbi->s_journal; 7062d01ddc8SJan Kara handle_t *handle; 707c92dc856SJan Kara 7082d01ddc8SJan Kara /* 7092d01ddc8SJan Kara * If the journal is still running, we have to write out superblock 7102d01ddc8SJan Kara * through the journal to avoid collisions of other journalled sb 7112d01ddc8SJan Kara * updates. 7122d01ddc8SJan Kara * 7132d01ddc8SJan Kara * We use directly jbd2 functions here to avoid recursing back into 7142d01ddc8SJan Kara * ext4 error handling code during handling of previous errors. 7152d01ddc8SJan Kara */ 7162d01ddc8SJan Kara if (!sb_rdonly(sbi->s_sb) && journal) { 717558d6450SYe Bin struct buffer_head *sbh = sbi->s_sbh; 7182d01ddc8SJan Kara handle = jbd2_journal_start(journal, 1); 7192d01ddc8SJan Kara if (IS_ERR(handle)) 7202d01ddc8SJan Kara goto write_directly; 721558d6450SYe Bin if (jbd2_journal_get_write_access(handle, sbh)) { 7222d01ddc8SJan Kara jbd2_journal_stop(handle); 7232d01ddc8SJan Kara goto write_directly; 7242d01ddc8SJan Kara } 7252d01ddc8SJan Kara ext4_update_super(sbi->s_sb); 726558d6450SYe Bin if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) { 727558d6450SYe Bin ext4_msg(sbi->s_sb, KERN_ERR, "previous I/O error to " 728558d6450SYe Bin "superblock detected"); 729558d6450SYe Bin clear_buffer_write_io_error(sbh); 730558d6450SYe Bin set_buffer_uptodate(sbh); 731558d6450SYe Bin } 732558d6450SYe Bin 733558d6450SYe Bin if (jbd2_journal_dirty_metadata(handle, sbh)) { 7342d01ddc8SJan Kara jbd2_journal_stop(handle); 7352d01ddc8SJan Kara goto write_directly; 7362d01ddc8SJan Kara } 7372d01ddc8SJan Kara jbd2_journal_stop(handle); 738d578b994SJonathan Davies ext4_notify_error_sysfs(sbi); 7392d01ddc8SJan Kara return; 7402d01ddc8SJan Kara } 7412d01ddc8SJan Kara write_directly: 7422d01ddc8SJan Kara /* 7432d01ddc8SJan Kara * Write through journal failed. Write sb directly to get error info 7442d01ddc8SJan Kara * out and hope for the best. 7452d01ddc8SJan Kara */ 7464392fbc4SJan Kara ext4_commit_super(sbi->s_sb); 747d578b994SJonathan Davies ext4_notify_error_sysfs(sbi); 748ac27a0ecSDave Kleikamp } 749ac27a0ecSDave Kleikamp 750efbed4dcSTheodore Ts'o #define ext4_error_ratelimit(sb) \ 751efbed4dcSTheodore Ts'o ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state), \ 752efbed4dcSTheodore Ts'o "EXT4-fs error") 753efbed4dcSTheodore Ts'o 75412062dddSEric Sandeen void __ext4_error(struct super_block *sb, const char *function, 755014c9caaSJan Kara unsigned int line, bool force_ro, int error, __u64 block, 75654d3adbcSTheodore Ts'o const char *fmt, ...) 757ac27a0ecSDave Kleikamp { 7580ff2ea7dSJoe Perches struct va_format vaf; 759ac27a0ecSDave Kleikamp va_list args; 760ac27a0ecSDave Kleikamp 7610db1ff22STheodore Ts'o if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) 7620db1ff22STheodore Ts'o return; 7630db1ff22STheodore Ts'o 764ccf0f32aSTheodore Ts'o trace_ext4_error(sb, function, line); 765efbed4dcSTheodore Ts'o if (ext4_error_ratelimit(sb)) { 766ac27a0ecSDave Kleikamp va_start(args, fmt); 7670ff2ea7dSJoe Perches vaf.fmt = fmt; 7680ff2ea7dSJoe Perches vaf.va = &args; 769efbed4dcSTheodore Ts'o printk(KERN_CRIT 770efbed4dcSTheodore Ts'o "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n", 7710ff2ea7dSJoe Perches sb->s_id, function, line, current->comm, &vaf); 772ac27a0ecSDave Kleikamp va_end(args); 773efbed4dcSTheodore Ts'o } 7749a089b21SGabriel Krisman Bertazi fsnotify_sb_error(sb, NULL, error ? error : EFSCORRUPTED); 7759a089b21SGabriel Krisman Bertazi 776e789ca0cSJan Kara ext4_handle_error(sb, force_ro, error, 0, block, function, line); 777ac27a0ecSDave Kleikamp } 778ac27a0ecSDave Kleikamp 779e7c96e8eSJoe Perches void __ext4_error_inode(struct inode *inode, const char *function, 78054d3adbcSTheodore Ts'o unsigned int line, ext4_fsblk_t block, int error, 781273df556SFrank Mayhar const char *fmt, ...) 782273df556SFrank Mayhar { 783273df556SFrank Mayhar va_list args; 784f7c21177STheodore Ts'o struct va_format vaf; 785273df556SFrank Mayhar 7860db1ff22STheodore Ts'o if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 7870db1ff22STheodore Ts'o return; 7880db1ff22STheodore Ts'o 789ccf0f32aSTheodore Ts'o trace_ext4_error(inode->i_sb, function, line); 790efbed4dcSTheodore Ts'o if (ext4_error_ratelimit(inode->i_sb)) { 791273df556SFrank Mayhar va_start(args, fmt); 792f7c21177STheodore Ts'o vaf.fmt = fmt; 793f7c21177STheodore Ts'o vaf.va = &args; 794c398eda0STheodore Ts'o if (block) 795d9ee81daSJoe Perches printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: " 796d9ee81daSJoe Perches "inode #%lu: block %llu: comm %s: %pV\n", 797d9ee81daSJoe Perches inode->i_sb->s_id, function, line, inode->i_ino, 798d9ee81daSJoe Perches block, current->comm, &vaf); 799d9ee81daSJoe Perches else 800d9ee81daSJoe Perches printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: " 801d9ee81daSJoe Perches "inode #%lu: comm %s: %pV\n", 802d9ee81daSJoe Perches inode->i_sb->s_id, function, line, inode->i_ino, 803d9ee81daSJoe Perches current->comm, &vaf); 804273df556SFrank Mayhar va_end(args); 805efbed4dcSTheodore Ts'o } 8069a089b21SGabriel Krisman Bertazi fsnotify_sb_error(inode->i_sb, inode, error ? error : EFSCORRUPTED); 8079a089b21SGabriel Krisman Bertazi 808e789ca0cSJan Kara ext4_handle_error(inode->i_sb, false, error, inode->i_ino, block, 80954d3adbcSTheodore Ts'o function, line); 810273df556SFrank Mayhar } 811273df556SFrank Mayhar 812e7c96e8eSJoe Perches void __ext4_error_file(struct file *file, const char *function, 813f7c21177STheodore Ts'o unsigned int line, ext4_fsblk_t block, 814f7c21177STheodore Ts'o const char *fmt, ...) 815273df556SFrank Mayhar { 816273df556SFrank Mayhar va_list args; 817f7c21177STheodore Ts'o struct va_format vaf; 818496ad9aaSAl Viro struct inode *inode = file_inode(file); 819273df556SFrank Mayhar char pathname[80], *path; 820273df556SFrank Mayhar 8210db1ff22STheodore Ts'o if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 8220db1ff22STheodore Ts'o return; 8230db1ff22STheodore Ts'o 824ccf0f32aSTheodore Ts'o trace_ext4_error(inode->i_sb, function, line); 825efbed4dcSTheodore Ts'o if (ext4_error_ratelimit(inode->i_sb)) { 8269bf39ab2SMiklos Szeredi path = file_path(file, pathname, sizeof(pathname)); 827f9a62d09SDan Carpenter if (IS_ERR(path)) 828273df556SFrank Mayhar path = "(unknown)"; 829f7c21177STheodore Ts'o va_start(args, fmt); 830f7c21177STheodore Ts'o vaf.fmt = fmt; 831f7c21177STheodore Ts'o vaf.va = &args; 832d9ee81daSJoe Perches if (block) 833d9ee81daSJoe Perches printk(KERN_CRIT 834d9ee81daSJoe Perches "EXT4-fs error (device %s): %s:%d: inode #%lu: " 835d9ee81daSJoe Perches "block %llu: comm %s: path %s: %pV\n", 836d9ee81daSJoe Perches inode->i_sb->s_id, function, line, inode->i_ino, 837d9ee81daSJoe Perches block, current->comm, path, &vaf); 838d9ee81daSJoe Perches else 839d9ee81daSJoe Perches printk(KERN_CRIT 840d9ee81daSJoe Perches "EXT4-fs error (device %s): %s:%d: inode #%lu: " 841d9ee81daSJoe Perches "comm %s: path %s: %pV\n", 842d9ee81daSJoe Perches inode->i_sb->s_id, function, line, inode->i_ino, 843d9ee81daSJoe Perches current->comm, path, &vaf); 844273df556SFrank Mayhar va_end(args); 845efbed4dcSTheodore Ts'o } 8469a089b21SGabriel Krisman Bertazi fsnotify_sb_error(inode->i_sb, inode, EFSCORRUPTED); 8479a089b21SGabriel Krisman Bertazi 848e789ca0cSJan Kara ext4_handle_error(inode->i_sb, false, EFSCORRUPTED, inode->i_ino, block, 84954d3adbcSTheodore Ts'o function, line); 850273df556SFrank Mayhar } 851273df556SFrank Mayhar 852722887ddSTheodore Ts'o const char *ext4_decode_error(struct super_block *sb, int errno, 853ac27a0ecSDave Kleikamp char nbuf[16]) 854ac27a0ecSDave Kleikamp { 855ac27a0ecSDave Kleikamp char *errstr = NULL; 856ac27a0ecSDave Kleikamp 857ac27a0ecSDave Kleikamp switch (errno) { 8586a797d27SDarrick J. Wong case -EFSCORRUPTED: 8596a797d27SDarrick J. Wong errstr = "Corrupt filesystem"; 8606a797d27SDarrick J. Wong break; 8616a797d27SDarrick J. Wong case -EFSBADCRC: 8626a797d27SDarrick J. Wong errstr = "Filesystem failed CRC"; 8636a797d27SDarrick J. Wong break; 864ac27a0ecSDave Kleikamp case -EIO: 865ac27a0ecSDave Kleikamp errstr = "IO failure"; 866ac27a0ecSDave Kleikamp break; 867ac27a0ecSDave Kleikamp case -ENOMEM: 868ac27a0ecSDave Kleikamp errstr = "Out of memory"; 869ac27a0ecSDave Kleikamp break; 870ac27a0ecSDave Kleikamp case -EROFS: 87178f1ddbbSTheodore Ts'o if (!sb || (EXT4_SB(sb)->s_journal && 87278f1ddbbSTheodore Ts'o EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT)) 873ac27a0ecSDave Kleikamp errstr = "Journal has aborted"; 874ac27a0ecSDave Kleikamp else 875ac27a0ecSDave Kleikamp errstr = "Readonly filesystem"; 876ac27a0ecSDave Kleikamp break; 877ac27a0ecSDave Kleikamp default: 878ac27a0ecSDave Kleikamp /* If the caller passed in an extra buffer for unknown 879ac27a0ecSDave Kleikamp * errors, textualise them now. Else we just return 880ac27a0ecSDave Kleikamp * NULL. */ 881ac27a0ecSDave Kleikamp if (nbuf) { 882ac27a0ecSDave Kleikamp /* Check for truncated error codes... */ 883ac27a0ecSDave Kleikamp if (snprintf(nbuf, 16, "error %d", -errno) >= 0) 884ac27a0ecSDave Kleikamp errstr = nbuf; 885ac27a0ecSDave Kleikamp } 886ac27a0ecSDave Kleikamp break; 887ac27a0ecSDave Kleikamp } 888ac27a0ecSDave Kleikamp 889ac27a0ecSDave Kleikamp return errstr; 890ac27a0ecSDave Kleikamp } 891ac27a0ecSDave Kleikamp 892617ba13bSMingming Cao /* __ext4_std_error decodes expected errors from journaling functions 893ac27a0ecSDave Kleikamp * automatically and invokes the appropriate error response. */ 894ac27a0ecSDave Kleikamp 895c398eda0STheodore Ts'o void __ext4_std_error(struct super_block *sb, const char *function, 896c398eda0STheodore Ts'o unsigned int line, int errno) 897ac27a0ecSDave Kleikamp { 898ac27a0ecSDave Kleikamp char nbuf[16]; 899ac27a0ecSDave Kleikamp const char *errstr; 900ac27a0ecSDave Kleikamp 9010db1ff22STheodore Ts'o if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) 9020db1ff22STheodore Ts'o return; 9030db1ff22STheodore Ts'o 904ac27a0ecSDave Kleikamp /* Special case: if the error is EROFS, and we're not already 905ac27a0ecSDave Kleikamp * inside a transaction, then there's really no point in logging 906ac27a0ecSDave Kleikamp * an error. */ 907bc98a42cSDavid Howells if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb)) 908ac27a0ecSDave Kleikamp return; 909ac27a0ecSDave Kleikamp 910efbed4dcSTheodore Ts'o if (ext4_error_ratelimit(sb)) { 911617ba13bSMingming Cao errstr = ext4_decode_error(sb, errno, nbuf); 912c398eda0STheodore Ts'o printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n", 913c398eda0STheodore Ts'o sb->s_id, function, line, errstr); 914efbed4dcSTheodore Ts'o } 9159a089b21SGabriel Krisman Bertazi fsnotify_sb_error(sb, NULL, errno ? errno : EFSCORRUPTED); 916ac27a0ecSDave Kleikamp 917e789ca0cSJan Kara ext4_handle_error(sb, false, -errno, 0, 0, function, line); 918ac27a0ecSDave Kleikamp } 919ac27a0ecSDave Kleikamp 920e7c96e8eSJoe Perches void __ext4_msg(struct super_block *sb, 921e7c96e8eSJoe Perches const char *prefix, const char *fmt, ...) 922b31e1552SEric Sandeen { 9230ff2ea7dSJoe Perches struct va_format vaf; 924b31e1552SEric Sandeen va_list args; 925b31e1552SEric Sandeen 926da812f61SLukas Czerner if (sb) { 9271cf006edSDmitry Monakhov atomic_inc(&EXT4_SB(sb)->s_msg_count); 928da812f61SLukas Czerner if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), 929da812f61SLukas Czerner "EXT4-fs")) 930efbed4dcSTheodore Ts'o return; 931da812f61SLukas Czerner } 932efbed4dcSTheodore Ts'o 933b31e1552SEric Sandeen va_start(args, fmt); 9340ff2ea7dSJoe Perches vaf.fmt = fmt; 9350ff2ea7dSJoe Perches vaf.va = &args; 936da812f61SLukas Czerner if (sb) 9370ff2ea7dSJoe Perches printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf); 938da812f61SLukas Czerner else 939da812f61SLukas Czerner printk("%sEXT4-fs: %pV\n", prefix, &vaf); 940b31e1552SEric Sandeen va_end(args); 941b31e1552SEric Sandeen } 942b31e1552SEric Sandeen 9431cf006edSDmitry Monakhov static int ext4_warning_ratelimit(struct super_block *sb) 9441cf006edSDmitry Monakhov { 9451cf006edSDmitry Monakhov atomic_inc(&EXT4_SB(sb)->s_warning_count); 9461cf006edSDmitry Monakhov return ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), 9471cf006edSDmitry Monakhov "EXT4-fs warning"); 9481cf006edSDmitry Monakhov } 949b03a2f7eSAndreas Dilger 95012062dddSEric Sandeen void __ext4_warning(struct super_block *sb, const char *function, 951c398eda0STheodore Ts'o unsigned int line, const char *fmt, ...) 952ac27a0ecSDave Kleikamp { 9530ff2ea7dSJoe Perches struct va_format vaf; 954ac27a0ecSDave Kleikamp va_list args; 955ac27a0ecSDave Kleikamp 956b03a2f7eSAndreas Dilger if (!ext4_warning_ratelimit(sb)) 957efbed4dcSTheodore Ts'o return; 958efbed4dcSTheodore Ts'o 959ac27a0ecSDave Kleikamp va_start(args, fmt); 9600ff2ea7dSJoe Perches vaf.fmt = fmt; 9610ff2ea7dSJoe Perches vaf.va = &args; 9620ff2ea7dSJoe Perches printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n", 9630ff2ea7dSJoe Perches sb->s_id, function, line, &vaf); 964ac27a0ecSDave Kleikamp va_end(args); 965ac27a0ecSDave Kleikamp } 966ac27a0ecSDave Kleikamp 967b03a2f7eSAndreas Dilger void __ext4_warning_inode(const struct inode *inode, const char *function, 968b03a2f7eSAndreas Dilger unsigned int line, const char *fmt, ...) 969b03a2f7eSAndreas Dilger { 970b03a2f7eSAndreas Dilger struct va_format vaf; 971b03a2f7eSAndreas Dilger va_list args; 972b03a2f7eSAndreas Dilger 973b03a2f7eSAndreas Dilger if (!ext4_warning_ratelimit(inode->i_sb)) 974b03a2f7eSAndreas Dilger return; 975b03a2f7eSAndreas Dilger 976b03a2f7eSAndreas Dilger va_start(args, fmt); 977b03a2f7eSAndreas Dilger vaf.fmt = fmt; 978b03a2f7eSAndreas Dilger vaf.va = &args; 979b03a2f7eSAndreas Dilger printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: " 980b03a2f7eSAndreas Dilger "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id, 981b03a2f7eSAndreas Dilger function, line, inode->i_ino, current->comm, &vaf); 982b03a2f7eSAndreas Dilger va_end(args); 983b03a2f7eSAndreas Dilger } 984b03a2f7eSAndreas Dilger 985e29136f8STheodore Ts'o void __ext4_grp_locked_error(const char *function, unsigned int line, 986e29136f8STheodore Ts'o struct super_block *sb, ext4_group_t grp, 987e29136f8STheodore Ts'o unsigned long ino, ext4_fsblk_t block, 988e29136f8STheodore Ts'o const char *fmt, ...) 9895d1b1b3fSAneesh Kumar K.V __releases(bitlock) 9905d1b1b3fSAneesh Kumar K.V __acquires(bitlock) 9915d1b1b3fSAneesh Kumar K.V { 9920ff2ea7dSJoe Perches struct va_format vaf; 9935d1b1b3fSAneesh Kumar K.V va_list args; 9945d1b1b3fSAneesh Kumar K.V 9950db1ff22STheodore Ts'o if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) 9960db1ff22STheodore Ts'o return; 9970db1ff22STheodore Ts'o 998ccf0f32aSTheodore Ts'o trace_ext4_error(sb, function, line); 999efbed4dcSTheodore Ts'o if (ext4_error_ratelimit(sb)) { 10005d1b1b3fSAneesh Kumar K.V va_start(args, fmt); 10010ff2ea7dSJoe Perches vaf.fmt = fmt; 10020ff2ea7dSJoe Perches vaf.va = &args; 100321149d61SRobin Dong printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ", 1004e29136f8STheodore Ts'o sb->s_id, function, line, grp); 1005e29136f8STheodore Ts'o if (ino) 10060ff2ea7dSJoe Perches printk(KERN_CONT "inode %lu: ", ino); 1007e29136f8STheodore Ts'o if (block) 1008efbed4dcSTheodore Ts'o printk(KERN_CONT "block %llu:", 1009efbed4dcSTheodore Ts'o (unsigned long long) block); 10100ff2ea7dSJoe Perches printk(KERN_CONT "%pV\n", &vaf); 10115d1b1b3fSAneesh Kumar K.V va_end(args); 1012efbed4dcSTheodore Ts'o } 10135d1b1b3fSAneesh Kumar K.V 1014c92dc856SJan Kara if (test_opt(sb, ERRORS_CONT)) { 1015327eaf73STheodore Ts'o if (test_opt(sb, WARN_ON_ERROR)) 1016327eaf73STheodore Ts'o WARN_ON_ONCE(1); 1017e789ca0cSJan Kara EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; 10182d01ddc8SJan Kara if (!bdev_read_only(sb->s_bdev)) { 10192d01ddc8SJan Kara save_error_info(sb, EFSCORRUPTED, ino, block, function, 10202d01ddc8SJan Kara line); 1021c92dc856SJan Kara schedule_work(&EXT4_SB(sb)->s_error_work); 10222d01ddc8SJan Kara } 10235d1b1b3fSAneesh Kumar K.V return; 10245d1b1b3fSAneesh Kumar K.V } 10255d1b1b3fSAneesh Kumar K.V ext4_unlock_group(sb, grp); 1026e789ca0cSJan Kara ext4_handle_error(sb, false, EFSCORRUPTED, ino, block, function, line); 10275d1b1b3fSAneesh Kumar K.V /* 10285d1b1b3fSAneesh Kumar K.V * We only get here in the ERRORS_RO case; relocking the group 10295d1b1b3fSAneesh Kumar K.V * may be dangerous, but nothing bad will happen since the 10305d1b1b3fSAneesh Kumar K.V * filesystem will have already been marked read/only and the 10315d1b1b3fSAneesh Kumar K.V * journal has been aborted. We return 1 as a hint to callers 10325d1b1b3fSAneesh Kumar K.V * who might what to use the return value from 103325985edcSLucas De Marchi * ext4_grp_locked_error() to distinguish between the 10345d1b1b3fSAneesh Kumar K.V * ERRORS_CONT and ERRORS_RO case, and perhaps return more 10355d1b1b3fSAneesh Kumar K.V * aggressively from the ext4 function in question, with a 10365d1b1b3fSAneesh Kumar K.V * more appropriate error code. 10375d1b1b3fSAneesh Kumar K.V */ 10385d1b1b3fSAneesh Kumar K.V ext4_lock_group(sb, grp); 10395d1b1b3fSAneesh Kumar K.V return; 10405d1b1b3fSAneesh Kumar K.V } 10415d1b1b3fSAneesh Kumar K.V 1042db79e6d1SWang Shilong void ext4_mark_group_bitmap_corrupted(struct super_block *sb, 1043db79e6d1SWang Shilong ext4_group_t group, 1044db79e6d1SWang Shilong unsigned int flags) 1045db79e6d1SWang Shilong { 1046db79e6d1SWang Shilong struct ext4_sb_info *sbi = EXT4_SB(sb); 1047db79e6d1SWang Shilong struct ext4_group_info *grp = ext4_get_group_info(sb, group); 1048db79e6d1SWang Shilong struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL); 10499af0b3d1SWang Shilong int ret; 1050db79e6d1SWang Shilong 10519af0b3d1SWang Shilong if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) { 10529af0b3d1SWang Shilong ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, 10539af0b3d1SWang Shilong &grp->bb_state); 10549af0b3d1SWang Shilong if (!ret) 1055db79e6d1SWang Shilong percpu_counter_sub(&sbi->s_freeclusters_counter, 1056db79e6d1SWang Shilong grp->bb_free); 1057db79e6d1SWang Shilong } 1058db79e6d1SWang Shilong 10599af0b3d1SWang Shilong if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) { 10609af0b3d1SWang Shilong ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, 10619af0b3d1SWang Shilong &grp->bb_state); 10629af0b3d1SWang Shilong if (!ret && gdp) { 1063db79e6d1SWang Shilong int count; 1064db79e6d1SWang Shilong 1065db79e6d1SWang Shilong count = ext4_free_inodes_count(sb, gdp); 1066db79e6d1SWang Shilong percpu_counter_sub(&sbi->s_freeinodes_counter, 1067db79e6d1SWang Shilong count); 1068db79e6d1SWang Shilong } 1069db79e6d1SWang Shilong } 1070db79e6d1SWang Shilong } 1071db79e6d1SWang Shilong 1072617ba13bSMingming Cao void ext4_update_dynamic_rev(struct super_block *sb) 1073ac27a0ecSDave Kleikamp { 1074617ba13bSMingming Cao struct ext4_super_block *es = EXT4_SB(sb)->s_es; 1075ac27a0ecSDave Kleikamp 1076617ba13bSMingming Cao if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV) 1077ac27a0ecSDave Kleikamp return; 1078ac27a0ecSDave Kleikamp 107912062dddSEric Sandeen ext4_warning(sb, 1080ac27a0ecSDave Kleikamp "updating to rev %d because of new feature flag, " 1081ac27a0ecSDave Kleikamp "running e2fsck is recommended", 1082617ba13bSMingming Cao EXT4_DYNAMIC_REV); 1083ac27a0ecSDave Kleikamp 1084617ba13bSMingming Cao es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO); 1085617ba13bSMingming Cao es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE); 1086617ba13bSMingming Cao es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV); 1087ac27a0ecSDave Kleikamp /* leave es->s_feature_*compat flags alone */ 1088ac27a0ecSDave Kleikamp /* es->s_uuid will be set by e2fsck if empty */ 1089ac27a0ecSDave Kleikamp 1090ac27a0ecSDave Kleikamp /* 1091ac27a0ecSDave Kleikamp * The rest of the superblock fields should be zero, and if not it 1092ac27a0ecSDave Kleikamp * means they are likely already in use, so leave them alone. We 1093ac27a0ecSDave Kleikamp * can leave it up to e2fsck to clean up any inconsistencies there. 1094ac27a0ecSDave Kleikamp */ 1095ac27a0ecSDave Kleikamp } 1096ac27a0ecSDave Kleikamp 1097ac27a0ecSDave Kleikamp /* 1098ac27a0ecSDave Kleikamp * Open the external journal device 1099ac27a0ecSDave Kleikamp */ 1100b31e1552SEric Sandeen static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb) 1101ac27a0ecSDave Kleikamp { 1102ac27a0ecSDave Kleikamp struct block_device *bdev; 1103ac27a0ecSDave Kleikamp 1104d4d77629STejun Heo bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); 1105ac27a0ecSDave Kleikamp if (IS_ERR(bdev)) 1106ac27a0ecSDave Kleikamp goto fail; 1107ac27a0ecSDave Kleikamp return bdev; 1108ac27a0ecSDave Kleikamp 1109ac27a0ecSDave Kleikamp fail: 1110ea3edd4dSChristoph Hellwig ext4_msg(sb, KERN_ERR, 1111ea3edd4dSChristoph Hellwig "failed to open journal device unknown-block(%u,%u) %ld", 1112ea3edd4dSChristoph Hellwig MAJOR(dev), MINOR(dev), PTR_ERR(bdev)); 1113ac27a0ecSDave Kleikamp return NULL; 1114ac27a0ecSDave Kleikamp } 1115ac27a0ecSDave Kleikamp 1116ac27a0ecSDave Kleikamp /* 1117ac27a0ecSDave Kleikamp * Release the journal device 1118ac27a0ecSDave Kleikamp */ 11194385bab1SAl Viro static void ext4_blkdev_put(struct block_device *bdev) 1120ac27a0ecSDave Kleikamp { 11214385bab1SAl Viro blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1122ac27a0ecSDave Kleikamp } 1123ac27a0ecSDave Kleikamp 11244385bab1SAl Viro static void ext4_blkdev_remove(struct ext4_sb_info *sbi) 1125ac27a0ecSDave Kleikamp { 1126ac27a0ecSDave Kleikamp struct block_device *bdev; 1127ee7ed3aaSChunguang Xu bdev = sbi->s_journal_bdev; 1128ac27a0ecSDave Kleikamp if (bdev) { 11294385bab1SAl Viro ext4_blkdev_put(bdev); 1130ee7ed3aaSChunguang Xu sbi->s_journal_bdev = NULL; 1131ac27a0ecSDave Kleikamp } 1132ac27a0ecSDave Kleikamp } 1133ac27a0ecSDave Kleikamp 1134ac27a0ecSDave Kleikamp static inline struct inode *orphan_list_entry(struct list_head *l) 1135ac27a0ecSDave Kleikamp { 1136617ba13bSMingming Cao return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode; 1137ac27a0ecSDave Kleikamp } 1138ac27a0ecSDave Kleikamp 1139617ba13bSMingming Cao static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi) 1140ac27a0ecSDave Kleikamp { 1141ac27a0ecSDave Kleikamp struct list_head *l; 1142ac27a0ecSDave Kleikamp 1143b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "sb orphan head is %d", 1144ac27a0ecSDave Kleikamp le32_to_cpu(sbi->s_es->s_last_orphan)); 1145ac27a0ecSDave Kleikamp 1146ac27a0ecSDave Kleikamp printk(KERN_ERR "sb_info orphan list:\n"); 1147ac27a0ecSDave Kleikamp list_for_each(l, &sbi->s_orphan) { 1148ac27a0ecSDave Kleikamp struct inode *inode = orphan_list_entry(l); 1149ac27a0ecSDave Kleikamp printk(KERN_ERR " " 1150ac27a0ecSDave Kleikamp "inode %s:%lu at %p: mode %o, nlink %d, next %d\n", 1151ac27a0ecSDave Kleikamp inode->i_sb->s_id, inode->i_ino, inode, 1152ac27a0ecSDave Kleikamp inode->i_mode, inode->i_nlink, 1153ac27a0ecSDave Kleikamp NEXT_ORPHAN(inode)); 1154ac27a0ecSDave Kleikamp } 1155ac27a0ecSDave Kleikamp } 1156ac27a0ecSDave Kleikamp 1157957153fcSJan Kara #ifdef CONFIG_QUOTA 1158957153fcSJan Kara static int ext4_quota_off(struct super_block *sb, int type); 1159957153fcSJan Kara 1160957153fcSJan Kara static inline void ext4_quota_off_umount(struct super_block *sb) 1161957153fcSJan Kara { 1162957153fcSJan Kara int type; 1163957153fcSJan Kara 1164957153fcSJan Kara /* Use our quota_off function to clear inode flags etc. */ 1165957153fcSJan Kara for (type = 0; type < EXT4_MAXQUOTAS; type++) 1166957153fcSJan Kara ext4_quota_off(sb, type); 1167957153fcSJan Kara } 116833458eabSTheodore Ts'o 116933458eabSTheodore Ts'o /* 117033458eabSTheodore Ts'o * This is a helper function which is used in the mount/remount 117133458eabSTheodore Ts'o * codepaths (which holds s_umount) to fetch the quota file name. 117233458eabSTheodore Ts'o */ 117333458eabSTheodore Ts'o static inline char *get_qf_name(struct super_block *sb, 117433458eabSTheodore Ts'o struct ext4_sb_info *sbi, 117533458eabSTheodore Ts'o int type) 117633458eabSTheodore Ts'o { 117733458eabSTheodore Ts'o return rcu_dereference_protected(sbi->s_qf_names[type], 117833458eabSTheodore Ts'o lockdep_is_held(&sb->s_umount)); 117933458eabSTheodore Ts'o } 1180957153fcSJan Kara #else 1181957153fcSJan Kara static inline void ext4_quota_off_umount(struct super_block *sb) 1182957153fcSJan Kara { 1183957153fcSJan Kara } 1184957153fcSJan Kara #endif 1185957153fcSJan Kara 1186617ba13bSMingming Cao static void ext4_put_super(struct super_block *sb) 1187ac27a0ecSDave Kleikamp { 1188617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 1189617ba13bSMingming Cao struct ext4_super_block *es = sbi->s_es; 11901d0c3924STheodore Ts'o struct buffer_head **group_desc; 11917c990728SSuraj Jitindar Singh struct flex_groups **flex_groups; 119297abd7d4STheodore Ts'o int aborted = 0; 1193ef2cabf7SHidehiro Kawai int i, err; 1194ac27a0ecSDave Kleikamp 1195b98535d0SYe Bin /* 1196b98535d0SYe Bin * Unregister sysfs before destroying jbd2 journal. 1197b98535d0SYe Bin * Since we could still access attr_journal_task attribute via sysfs 1198b98535d0SYe Bin * path which could have sbi->s_journal->j_task as NULL 1199b98535d0SYe Bin * Unregister sysfs before flush sbi->s_error_work. 1200b98535d0SYe Bin * Since user may read /proc/fs/ext4/xx/mb_groups during umount, If 1201b98535d0SYe Bin * read metadata verify failed then will queue error work. 1202b98535d0SYe Bin * flush_stashed_error_work will call start_this_handle may trigger 1203b98535d0SYe Bin * BUG_ON. 1204b98535d0SYe Bin */ 1205b98535d0SYe Bin ext4_unregister_sysfs(sb); 1206b98535d0SYe Bin 12074808cb5bSZhang Yi if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs unmount")) 1208bb0fbc78SLukas Czerner ext4_msg(sb, KERN_INFO, "unmounting filesystem %pU.", 1209bb0fbc78SLukas Czerner &sb->s_uuid); 12104808cb5bSZhang Yi 1211857ac889SLukas Czerner ext4_unregister_li_request(sb); 1212957153fcSJan Kara ext4_quota_off_umount(sb); 1213e0ccfd95SChristoph Hellwig 1214c92dc856SJan Kara flush_work(&sbi->s_error_work); 12152e8fa54eSJan Kara destroy_workqueue(sbi->rsv_conversion_wq); 121602f310fcSJan Kara ext4_release_orphan_info(sb); 12174c0425ffSMingming Cao 12180390131bSFrank Mayhar if (sbi->s_journal) { 121997abd7d4STheodore Ts'o aborted = is_journal_aborted(sbi->s_journal); 1220ef2cabf7SHidehiro Kawai err = jbd2_journal_destroy(sbi->s_journal); 122147b4a50bSJan Kara sbi->s_journal = NULL; 1222878520acSTheodore Ts'o if ((err < 0) && !aborted) { 122354d3adbcSTheodore Ts'o ext4_abort(sb, -err, "Couldn't clean up the journal"); 12240390131bSFrank Mayhar } 1225878520acSTheodore Ts'o } 1226d4edac31SJosef Bacik 1227d3922a77SZheng Liu ext4_es_unregister_shrinker(sbi); 1228292a089dSSteven Rostedt (Google) timer_shutdown_sync(&sbi->s_err_report); 1229d4edac31SJosef Bacik ext4_release_system_zone(sb); 1230d4edac31SJosef Bacik ext4_mb_release(sb); 1231d4edac31SJosef Bacik ext4_ext_release(sb); 1232d4edac31SJosef Bacik 1233bc98a42cSDavid Howells if (!sb_rdonly(sb) && !aborted) { 1234e2b911c5SDarrick J. Wong ext4_clear_feature_journal_needs_recovery(sb); 123502f310fcSJan Kara ext4_clear_feature_orphan_present(sb); 1236ac27a0ecSDave Kleikamp es->s_state = cpu_to_le16(sbi->s_mount_state); 1237ac27a0ecSDave Kleikamp } 1238bc98a42cSDavid Howells if (!sb_rdonly(sb)) 12394392fbc4SJan Kara ext4_commit_super(sb); 1240a8e25a83SArtem Bityutskiy 12411d0c3924STheodore Ts'o rcu_read_lock(); 12421d0c3924STheodore Ts'o group_desc = rcu_dereference(sbi->s_group_desc); 1243ac27a0ecSDave Kleikamp for (i = 0; i < sbi->s_gdb_count; i++) 12441d0c3924STheodore Ts'o brelse(group_desc[i]); 12451d0c3924STheodore Ts'o kvfree(group_desc); 12467c990728SSuraj Jitindar Singh flex_groups = rcu_dereference(sbi->s_flex_groups); 12477c990728SSuraj Jitindar Singh if (flex_groups) { 12487c990728SSuraj Jitindar Singh for (i = 0; i < sbi->s_flex_groups_allocated; i++) 12497c990728SSuraj Jitindar Singh kvfree(flex_groups[i]); 12507c990728SSuraj Jitindar Singh kvfree(flex_groups); 12517c990728SSuraj Jitindar Singh } 12521d0c3924STheodore Ts'o rcu_read_unlock(); 125357042651STheodore Ts'o percpu_counter_destroy(&sbi->s_freeclusters_counter); 1254ac27a0ecSDave Kleikamp percpu_counter_destroy(&sbi->s_freeinodes_counter); 1255ac27a0ecSDave Kleikamp percpu_counter_destroy(&sbi->s_dirs_counter); 125657042651STheodore Ts'o percpu_counter_destroy(&sbi->s_dirtyclusters_counter); 1257efc61345SEric Whitney percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit); 1258bbd55937SEric Biggers percpu_free_rwsem(&sbi->s_writepages_rwsem); 1259ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 1260a2d4a646SJan Kara for (i = 0; i < EXT4_MAXQUOTAS; i++) 126133458eabSTheodore Ts'o kfree(get_qf_name(sb, sbi, i)); 1262ac27a0ecSDave Kleikamp #endif 1263ac27a0ecSDave Kleikamp 1264ac27a0ecSDave Kleikamp /* Debugging code just in case the in-memory inode orphan list 1265ac27a0ecSDave Kleikamp * isn't empty. The on-disk one can be non-empty if we've 1266ac27a0ecSDave Kleikamp * detected an error and taken the fs readonly, but the 1267ac27a0ecSDave Kleikamp * in-memory list had better be clean by this point. */ 1268ac27a0ecSDave Kleikamp if (!list_empty(&sbi->s_orphan)) 1269ac27a0ecSDave Kleikamp dump_orphan_list(sb, sbi); 1270837c23fbSChunguang Xu ASSERT(list_empty(&sbi->s_orphan)); 1271ac27a0ecSDave Kleikamp 127289d96a6fSTheodore Ts'o sync_blockdev(sb->s_bdev); 1273f98393a6SPeter Zijlstra invalidate_bdev(sb->s_bdev); 1274ee7ed3aaSChunguang Xu if (sbi->s_journal_bdev && sbi->s_journal_bdev != sb->s_bdev) { 1275ac27a0ecSDave Kleikamp /* 1276ac27a0ecSDave Kleikamp * Invalidate the journal device's buffers. We don't want them 1277ac27a0ecSDave Kleikamp * floating about in memory - the physical journal device may 1278ac27a0ecSDave Kleikamp * hotswapped, and it breaks the `ro-after' testing code. 1279ac27a0ecSDave Kleikamp */ 1280ee7ed3aaSChunguang Xu sync_blockdev(sbi->s_journal_bdev); 1281ee7ed3aaSChunguang Xu invalidate_bdev(sbi->s_journal_bdev); 1282617ba13bSMingming Cao ext4_blkdev_remove(sbi); 1283ac27a0ecSDave Kleikamp } 128450c15df6SChengguang Xu 1285dec214d0STahsin Erdogan ext4_xattr_destroy_cache(sbi->s_ea_inode_cache); 1286dec214d0STahsin Erdogan sbi->s_ea_inode_cache = NULL; 128750c15df6SChengguang Xu 128847387409STahsin Erdogan ext4_xattr_destroy_cache(sbi->s_ea_block_cache); 128947387409STahsin Erdogan sbi->s_ea_block_cache = NULL; 129050c15df6SChengguang Xu 1291618f0031SPavel Skripkin ext4_stop_mmpd(sbi); 1292618f0031SPavel Skripkin 12939060dd2cSEric Sandeen brelse(sbi->s_sbh); 1294ac27a0ecSDave Kleikamp sb->s_fs_info = NULL; 12953197ebdbSTheodore Ts'o /* 12963197ebdbSTheodore Ts'o * Now that we are completely done shutting down the 12973197ebdbSTheodore Ts'o * superblock, we need to actually destroy the kobject. 12983197ebdbSTheodore Ts'o */ 12993197ebdbSTheodore Ts'o kobject_put(&sbi->s_kobj); 13003197ebdbSTheodore Ts'o wait_for_completion(&sbi->s_kobj_unregister); 13010441984aSDarrick J. Wong if (sbi->s_chksum_driver) 13020441984aSDarrick J. Wong crypto_free_shash(sbi->s_chksum_driver); 1303705895b6SPekka Enberg kfree(sbi->s_blockgroup_lock); 13048012b866SShiyang Ruan fs_put_dax(sbi->s_daxdev, NULL); 1305ac4acb1fSEric Biggers fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy); 13065298d4bfSChristoph Hellwig #if IS_ENABLED(CONFIG_UNICODE) 1307f8f4acb6SDaniel Rosenberg utf8_unload(sb->s_encoding); 1308c83ad55eSGabriel Krisman Bertazi #endif 1309ac27a0ecSDave Kleikamp kfree(sbi); 1310ac27a0ecSDave Kleikamp } 1311ac27a0ecSDave Kleikamp 1312e18b890bSChristoph Lameter static struct kmem_cache *ext4_inode_cachep; 1313ac27a0ecSDave Kleikamp 1314ac27a0ecSDave Kleikamp /* 1315ac27a0ecSDave Kleikamp * Called inside transaction, so use GFP_NOFS 1316ac27a0ecSDave Kleikamp */ 1317617ba13bSMingming Cao static struct inode *ext4_alloc_inode(struct super_block *sb) 1318ac27a0ecSDave Kleikamp { 1319617ba13bSMingming Cao struct ext4_inode_info *ei; 1320ac27a0ecSDave Kleikamp 1321fd60b288SMuchun Song ei = alloc_inode_sb(sb, ext4_inode_cachep, GFP_NOFS); 1322ac27a0ecSDave Kleikamp if (!ei) 1323ac27a0ecSDave Kleikamp return NULL; 13240b8e58a1SAndreas Dilger 1325ee73f9a5SJeff Layton inode_set_iversion(&ei->vfs_inode, 1); 13267ea71af9SYe Bin ei->i_flags = 0; 1327202ee5dfSTheodore Ts'o spin_lock_init(&ei->i_raw_lock); 1328c9de560dSAlex Tomas INIT_LIST_HEAD(&ei->i_prealloc_list); 132927bc446eSbrookxu atomic_set(&ei->i_prealloc_active, 0); 1330c9de560dSAlex Tomas spin_lock_init(&ei->i_prealloc_lock); 13319a26b661SZheng Liu ext4_es_init_tree(&ei->i_es_tree); 13329a26b661SZheng Liu rwlock_init(&ei->i_es_lock); 1333edaa53caSZheng Liu INIT_LIST_HEAD(&ei->i_es_list); 1334eb68d0e2SZheng Liu ei->i_es_all_nr = 0; 1335edaa53caSZheng Liu ei->i_es_shk_nr = 0; 1336dd475925SJan Kara ei->i_es_shrink_lblk = 0; 1337d2a17637SMingming Cao ei->i_reserved_data_blocks = 0; 1338d2a17637SMingming Cao spin_lock_init(&(ei->i_block_reservation_lock)); 13391dc0aa46SEric Whitney ext4_init_pending_tree(&ei->i_pending_tree); 1340a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA 1341a9e7f447SDmitry Monakhov ei->i_reserved_quota = 0; 134296c7e0d9SJan Kara memset(&ei->i_dquot, 0, sizeof(ei->i_dquot)); 1343a9e7f447SDmitry Monakhov #endif 13448aefcd55STheodore Ts'o ei->jinode = NULL; 13452e8fa54eSJan Kara INIT_LIST_HEAD(&ei->i_rsv_conversion_list); 1346744692dcSJiaying Zhang spin_lock_init(&ei->i_completed_io_lock); 1347b436b9beSJan Kara ei->i_sync_tid = 0; 1348b436b9beSJan Kara ei->i_datasync_tid = 0; 1349e27f41e1SDmitry Monakhov atomic_set(&ei->i_unwritten, 0); 13502e8fa54eSJan Kara INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work); 1351aa75f4d3SHarshad Shirwadkar ext4_fc_init_inode(&ei->vfs_inode); 1352aa75f4d3SHarshad Shirwadkar mutex_init(&ei->i_fc_lock); 1353ac27a0ecSDave Kleikamp return &ei->vfs_inode; 1354ac27a0ecSDave Kleikamp } 1355ac27a0ecSDave Kleikamp 13567ff9c073STheodore Ts'o static int ext4_drop_inode(struct inode *inode) 13577ff9c073STheodore Ts'o { 13587ff9c073STheodore Ts'o int drop = generic_drop_inode(inode); 13597ff9c073STheodore Ts'o 136029b3692eSEric Biggers if (!drop) 136129b3692eSEric Biggers drop = fscrypt_drop_inode(inode); 136229b3692eSEric Biggers 13637ff9c073STheodore Ts'o trace_ext4_drop_inode(inode, drop); 13647ff9c073STheodore Ts'o return drop; 13657ff9c073STheodore Ts'o } 13667ff9c073STheodore Ts'o 136794053139SAl Viro static void ext4_free_in_core_inode(struct inode *inode) 1368fa0d7e3dSNick Piggin { 13692c58d548SEric Biggers fscrypt_free_inode(inode); 1370aa75f4d3SHarshad Shirwadkar if (!list_empty(&(EXT4_I(inode)->i_fc_list))) { 1371aa75f4d3SHarshad Shirwadkar pr_warn("%s: inode %ld still in fc list", 1372aa75f4d3SHarshad Shirwadkar __func__, inode->i_ino); 1373aa75f4d3SHarshad Shirwadkar } 1374fa0d7e3dSNick Piggin kmem_cache_free(ext4_inode_cachep, EXT4_I(inode)); 1375fa0d7e3dSNick Piggin } 1376fa0d7e3dSNick Piggin 1377617ba13bSMingming Cao static void ext4_destroy_inode(struct inode *inode) 1378ac27a0ecSDave Kleikamp { 13799f7dd93dSVasily Averin if (!list_empty(&(EXT4_I(inode)->i_orphan))) { 1380b31e1552SEric Sandeen ext4_msg(inode->i_sb, KERN_ERR, 1381b31e1552SEric Sandeen "Inode %lu (%p): orphan list check failed!", 1382b31e1552SEric Sandeen inode->i_ino, EXT4_I(inode)); 13839f7dd93dSVasily Averin print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4, 13849f7dd93dSVasily Averin EXT4_I(inode), sizeof(struct ext4_inode_info), 13859f7dd93dSVasily Averin true); 13869f7dd93dSVasily Averin dump_stack(); 13879f7dd93dSVasily Averin } 13886fed8395SJeffle Xu 13896fed8395SJeffle Xu if (EXT4_I(inode)->i_reserved_data_blocks) 13906fed8395SJeffle Xu ext4_msg(inode->i_sb, KERN_ERR, 13916fed8395SJeffle Xu "Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!", 13926fed8395SJeffle Xu inode->i_ino, EXT4_I(inode), 13936fed8395SJeffle Xu EXT4_I(inode)->i_reserved_data_blocks); 1394ac27a0ecSDave Kleikamp } 1395ac27a0ecSDave Kleikamp 139651cc5068SAlexey Dobriyan static void init_once(void *foo) 1397ac27a0ecSDave Kleikamp { 1398c30365b9SYu Zhe struct ext4_inode_info *ei = foo; 1399ac27a0ecSDave Kleikamp 1400ac27a0ecSDave Kleikamp INIT_LIST_HEAD(&ei->i_orphan); 1401ac27a0ecSDave Kleikamp init_rwsem(&ei->xattr_sem); 14020e855ac8SAneesh Kumar K.V init_rwsem(&ei->i_data_sem); 1403ac27a0ecSDave Kleikamp inode_init_once(&ei->vfs_inode); 1404aa75f4d3SHarshad Shirwadkar ext4_fc_init_inode(&ei->vfs_inode); 1405ac27a0ecSDave Kleikamp } 1406ac27a0ecSDave Kleikamp 1407e67bc2b3SFabian Frederick static int __init init_inodecache(void) 1408ac27a0ecSDave Kleikamp { 1409f8dd7c70SDavid Windsor ext4_inode_cachep = kmem_cache_create_usercopy("ext4_inode_cache", 1410f8dd7c70SDavid Windsor sizeof(struct ext4_inode_info), 0, 1411f8dd7c70SDavid Windsor (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD| 1412f8dd7c70SDavid Windsor SLAB_ACCOUNT), 1413f8dd7c70SDavid Windsor offsetof(struct ext4_inode_info, i_data), 1414f8dd7c70SDavid Windsor sizeof_field(struct ext4_inode_info, i_data), 141520c2df83SPaul Mundt init_once); 1416617ba13bSMingming Cao if (ext4_inode_cachep == NULL) 1417ac27a0ecSDave Kleikamp return -ENOMEM; 1418ac27a0ecSDave Kleikamp return 0; 1419ac27a0ecSDave Kleikamp } 1420ac27a0ecSDave Kleikamp 1421ac27a0ecSDave Kleikamp static void destroy_inodecache(void) 1422ac27a0ecSDave Kleikamp { 14238c0a8537SKirill A. Shutemov /* 14248c0a8537SKirill A. Shutemov * Make sure all delayed rcu free inodes are flushed before we 14258c0a8537SKirill A. Shutemov * destroy cache. 14268c0a8537SKirill A. Shutemov */ 14278c0a8537SKirill A. Shutemov rcu_barrier(); 1428617ba13bSMingming Cao kmem_cache_destroy(ext4_inode_cachep); 1429ac27a0ecSDave Kleikamp } 1430ac27a0ecSDave Kleikamp 14310930fcc1SAl Viro void ext4_clear_inode(struct inode *inode) 1432ac27a0ecSDave Kleikamp { 1433aa75f4d3SHarshad Shirwadkar ext4_fc_del(inode); 14340930fcc1SAl Viro invalidate_inode_buffers(inode); 1435dbd5768fSJan Kara clear_inode(inode); 143627bc446eSbrookxu ext4_discard_preallocations(inode, 0); 143751865fdaSZheng Liu ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); 1438f4c2d372SJan Kara dquot_drop(inode); 14398aefcd55STheodore Ts'o if (EXT4_I(inode)->jinode) { 14408aefcd55STheodore Ts'o jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode), 14418aefcd55STheodore Ts'o EXT4_I(inode)->jinode); 14428aefcd55STheodore Ts'o jbd2_free_inode(EXT4_I(inode)->jinode); 14438aefcd55STheodore Ts'o EXT4_I(inode)->jinode = NULL; 14448aefcd55STheodore Ts'o } 14453d204e24SEric Biggers fscrypt_put_encryption_info(inode); 1446c93d8f88SEric Biggers fsverity_cleanup_inode(inode); 1447ac27a0ecSDave Kleikamp } 1448ac27a0ecSDave Kleikamp 14491b961ac0SChristoph Hellwig static struct inode *ext4_nfs_get_inode(struct super_block *sb, 14501b961ac0SChristoph Hellwig u64 ino, u32 generation) 1451ac27a0ecSDave Kleikamp { 1452ac27a0ecSDave Kleikamp struct inode *inode; 1453ac27a0ecSDave Kleikamp 14548a363970STheodore Ts'o /* 1455ac27a0ecSDave Kleikamp * Currently we don't know the generation for parent directory, so 1456ac27a0ecSDave Kleikamp * a generation of 0 means "accept any" 1457ac27a0ecSDave Kleikamp */ 14588a363970STheodore Ts'o inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE); 14591d1fe1eeSDavid Howells if (IS_ERR(inode)) 14601d1fe1eeSDavid Howells return ERR_CAST(inode); 14611d1fe1eeSDavid Howells if (generation && inode->i_generation != generation) { 1462ac27a0ecSDave Kleikamp iput(inode); 1463ac27a0ecSDave Kleikamp return ERR_PTR(-ESTALE); 1464ac27a0ecSDave Kleikamp } 14651b961ac0SChristoph Hellwig 14661b961ac0SChristoph Hellwig return inode; 1467ac27a0ecSDave Kleikamp } 14681b961ac0SChristoph Hellwig 14691b961ac0SChristoph Hellwig static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid, 14701b961ac0SChristoph Hellwig int fh_len, int fh_type) 14711b961ac0SChristoph Hellwig { 14721b961ac0SChristoph Hellwig return generic_fh_to_dentry(sb, fid, fh_len, fh_type, 14731b961ac0SChristoph Hellwig ext4_nfs_get_inode); 14741b961ac0SChristoph Hellwig } 14751b961ac0SChristoph Hellwig 14761b961ac0SChristoph Hellwig static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid, 14771b961ac0SChristoph Hellwig int fh_len, int fh_type) 14781b961ac0SChristoph Hellwig { 14791b961ac0SChristoph Hellwig return generic_fh_to_parent(sb, fid, fh_len, fh_type, 14801b961ac0SChristoph Hellwig ext4_nfs_get_inode); 1481ac27a0ecSDave Kleikamp } 1482ac27a0ecSDave Kleikamp 1483fde87268STheodore Ts'o static int ext4_nfs_commit_metadata(struct inode *inode) 1484fde87268STheodore Ts'o { 1485fde87268STheodore Ts'o struct writeback_control wbc = { 1486fde87268STheodore Ts'o .sync_mode = WB_SYNC_ALL 1487fde87268STheodore Ts'o }; 1488fde87268STheodore Ts'o 1489fde87268STheodore Ts'o trace_ext4_nfs_commit_metadata(inode); 1490fde87268STheodore Ts'o return ext4_write_inode(inode, &wbc); 1491fde87268STheodore Ts'o } 1492fde87268STheodore Ts'o 1493ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 1494d6006186SEric Biggers static const char * const quotatypes[] = INITQFNAMES; 1495689c958cSLi Xi #define QTYPE2NAME(t) (quotatypes[t]) 1496ac27a0ecSDave Kleikamp 1497617ba13bSMingming Cao static int ext4_write_dquot(struct dquot *dquot); 1498617ba13bSMingming Cao static int ext4_acquire_dquot(struct dquot *dquot); 1499617ba13bSMingming Cao static int ext4_release_dquot(struct dquot *dquot); 1500617ba13bSMingming Cao static int ext4_mark_dquot_dirty(struct dquot *dquot); 1501617ba13bSMingming Cao static int ext4_write_info(struct super_block *sb, int type); 15026f28e087SJan Kara static int ext4_quota_on(struct super_block *sb, int type, int format_id, 15038c54ca9cSAl Viro const struct path *path); 1504617ba13bSMingming Cao static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, 1505ac27a0ecSDave Kleikamp size_t len, loff_t off); 1506617ba13bSMingming Cao static ssize_t ext4_quota_write(struct super_block *sb, int type, 1507ac27a0ecSDave Kleikamp const char *data, size_t len, loff_t off); 15087c319d32SAditya Kali static int ext4_quota_enable(struct super_block *sb, int type, int format_id, 15097c319d32SAditya Kali unsigned int flags); 1510ac27a0ecSDave Kleikamp 151196c7e0d9SJan Kara static struct dquot **ext4_get_dquots(struct inode *inode) 151296c7e0d9SJan Kara { 151396c7e0d9SJan Kara return EXT4_I(inode)->i_dquot; 151496c7e0d9SJan Kara } 151596c7e0d9SJan Kara 151661e225dcSAlexey Dobriyan static const struct dquot_operations ext4_quota_operations = { 151760e58e0fSMingming Cao .get_reserved_space = ext4_get_reserved_space, 1518617ba13bSMingming Cao .write_dquot = ext4_write_dquot, 1519617ba13bSMingming Cao .acquire_dquot = ext4_acquire_dquot, 1520617ba13bSMingming Cao .release_dquot = ext4_release_dquot, 1521617ba13bSMingming Cao .mark_dirty = ext4_mark_dquot_dirty, 1522a5b5ee32SJan Kara .write_info = ext4_write_info, 1523a5b5ee32SJan Kara .alloc_dquot = dquot_alloc, 1524a5b5ee32SJan Kara .destroy_dquot = dquot_destroy, 1525040cb378SLi Xi .get_projid = ext4_get_projid, 15267a9ca53aSTahsin Erdogan .get_inode_usage = ext4_get_inode_usage, 1527ebc11f7bSChengguang Xu .get_next_id = dquot_get_next_id, 1528ac27a0ecSDave Kleikamp }; 1529ac27a0ecSDave Kleikamp 15300d54b217SAlexey Dobriyan static const struct quotactl_ops ext4_qctl_operations = { 1531617ba13bSMingming Cao .quota_on = ext4_quota_on, 1532ca0e05e4SDmitry Monakhov .quota_off = ext4_quota_off, 1533287a8095SChristoph Hellwig .quota_sync = dquot_quota_sync, 15340a240339SJan Kara .get_state = dquot_get_state, 1535287a8095SChristoph Hellwig .set_info = dquot_set_dqinfo, 1536287a8095SChristoph Hellwig .get_dqblk = dquot_get_dqblk, 15376332b9b5SEric Sandeen .set_dqblk = dquot_set_dqblk, 15386332b9b5SEric Sandeen .get_nextdqblk = dquot_get_next_dqblk, 1539ac27a0ecSDave Kleikamp }; 1540ac27a0ecSDave Kleikamp #endif 1541ac27a0ecSDave Kleikamp 1542ee9b6d61SJosef 'Jeff' Sipek static const struct super_operations ext4_sops = { 1543617ba13bSMingming Cao .alloc_inode = ext4_alloc_inode, 154494053139SAl Viro .free_inode = ext4_free_in_core_inode, 1545617ba13bSMingming Cao .destroy_inode = ext4_destroy_inode, 1546617ba13bSMingming Cao .write_inode = ext4_write_inode, 1547617ba13bSMingming Cao .dirty_inode = ext4_dirty_inode, 15487ff9c073STheodore Ts'o .drop_inode = ext4_drop_inode, 15490930fcc1SAl Viro .evict_inode = ext4_evict_inode, 1550617ba13bSMingming Cao .put_super = ext4_put_super, 1551617ba13bSMingming Cao .sync_fs = ext4_sync_fs, 1552c4be0c1dSTakashi Sato .freeze_fs = ext4_freeze, 1553c4be0c1dSTakashi Sato .unfreeze_fs = ext4_unfreeze, 1554617ba13bSMingming Cao .statfs = ext4_statfs, 1555617ba13bSMingming Cao .show_options = ext4_show_options, 1556ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 1557617ba13bSMingming Cao .quota_read = ext4_quota_read, 1558617ba13bSMingming Cao .quota_write = ext4_quota_write, 155996c7e0d9SJan Kara .get_dquots = ext4_get_dquots, 1560ac27a0ecSDave Kleikamp #endif 1561ac27a0ecSDave Kleikamp }; 1562ac27a0ecSDave Kleikamp 156339655164SChristoph Hellwig static const struct export_operations ext4_export_ops = { 15641b961ac0SChristoph Hellwig .fh_to_dentry = ext4_fh_to_dentry, 15651b961ac0SChristoph Hellwig .fh_to_parent = ext4_fh_to_parent, 1566617ba13bSMingming Cao .get_parent = ext4_get_parent, 1567fde87268STheodore Ts'o .commit_metadata = ext4_nfs_commit_metadata, 1568ac27a0ecSDave Kleikamp }; 1569ac27a0ecSDave Kleikamp 1570ac27a0ecSDave Kleikamp enum { 1571ac27a0ecSDave Kleikamp Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, 1572ba2e524dSLukas Czerner Opt_resgid, Opt_resuid, Opt_sb, 157372578c33STheodore Ts'o Opt_nouid32, Opt_debug, Opt_removed, 15742d544ec9SYang Xu Opt_user_xattr, Opt_acl, 157572578c33STheodore Ts'o Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload, 1576ad4eec61SEric Sandeen Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev, 1577ad4eec61SEric Sandeen Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit, 1578ac27a0ecSDave Kleikamp Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, 15796ddb2447STheodore Ts'o Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption, 15804f74d15fSEric Biggers Opt_inlinecrypt, 1581ba2e524dSLukas Czerner Opt_usrjquota, Opt_grpjquota, Opt_quota, 1582ee4a3fcdSTheodore Ts'o Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, 15831ff20307SJeff Layton Opt_usrquota, Opt_grpquota, Opt_prjquota, 15849cb20f94SIra Weiny Opt_dax, Opt_dax_always, Opt_dax_inode, Opt_dax_never, 1585327eaf73STheodore Ts'o Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error, 15864437992bSLukas Czerner Opt_nowarn_on_error, Opt_mblk_io_submit, Opt_debug_want_extra_isize, 15871449032bSTheodore Ts'o Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, 15885328e635SEric Sandeen Opt_inode_readahead_blks, Opt_journal_ioprio, 1589744692dcSJiaying Zhang Opt_dioread_nolock, Opt_dioread_lock, 1590fc6cb1cdSTheodore Ts'o Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable, 1591cdb7ee4cSTahsin Erdogan Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache, 159221175ca4SHarshad Shirwadkar Opt_no_prefetch_block_bitmaps, Opt_mb_optimize_scan, 1593e5a185c2SLukas Czerner Opt_errors, Opt_data, Opt_data_err, Opt_jqfmt, Opt_dax_type, 15948016e29fSHarshad Shirwadkar #ifdef CONFIG_EXT4_DEBUG 159599c880deSHarshad Shirwadkar Opt_fc_debug_max_replay, Opt_fc_debug_force 15968016e29fSHarshad Shirwadkar #endif 1597ac27a0ecSDave Kleikamp }; 1598ac27a0ecSDave Kleikamp 1599e5a185c2SLukas Czerner static const struct constant_table ext4_param_errors[] = { 1600ba2e524dSLukas Czerner {"continue", EXT4_MOUNT_ERRORS_CONT}, 1601ba2e524dSLukas Czerner {"panic", EXT4_MOUNT_ERRORS_PANIC}, 1602ba2e524dSLukas Czerner {"remount-ro", EXT4_MOUNT_ERRORS_RO}, 1603e5a185c2SLukas Czerner {} 1604e5a185c2SLukas Czerner }; 1605e5a185c2SLukas Czerner 1606e5a185c2SLukas Czerner static const struct constant_table ext4_param_data[] = { 1607ba2e524dSLukas Czerner {"journal", EXT4_MOUNT_JOURNAL_DATA}, 1608ba2e524dSLukas Czerner {"ordered", EXT4_MOUNT_ORDERED_DATA}, 1609ba2e524dSLukas Czerner {"writeback", EXT4_MOUNT_WRITEBACK_DATA}, 1610e5a185c2SLukas Czerner {} 1611e5a185c2SLukas Czerner }; 1612e5a185c2SLukas Czerner 1613e5a185c2SLukas Czerner static const struct constant_table ext4_param_data_err[] = { 1614e5a185c2SLukas Czerner {"abort", Opt_data_err_abort}, 1615e5a185c2SLukas Czerner {"ignore", Opt_data_err_ignore}, 1616e5a185c2SLukas Czerner {} 1617e5a185c2SLukas Czerner }; 1618e5a185c2SLukas Czerner 1619e5a185c2SLukas Czerner static const struct constant_table ext4_param_jqfmt[] = { 1620ba2e524dSLukas Czerner {"vfsold", QFMT_VFS_OLD}, 1621ba2e524dSLukas Czerner {"vfsv0", QFMT_VFS_V0}, 1622ba2e524dSLukas Czerner {"vfsv1", QFMT_VFS_V1}, 1623e5a185c2SLukas Czerner {} 1624e5a185c2SLukas Czerner }; 1625e5a185c2SLukas Czerner 1626e5a185c2SLukas Czerner static const struct constant_table ext4_param_dax[] = { 1627e5a185c2SLukas Czerner {"always", Opt_dax_always}, 1628e5a185c2SLukas Czerner {"inode", Opt_dax_inode}, 1629e5a185c2SLukas Czerner {"never", Opt_dax_never}, 1630e5a185c2SLukas Czerner {} 1631e5a185c2SLukas Czerner }; 1632e5a185c2SLukas Czerner 1633e5a185c2SLukas Czerner /* String parameter that allows empty argument */ 1634e5a185c2SLukas Czerner #define fsparam_string_empty(NAME, OPT) \ 1635e5a185c2SLukas Czerner __fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL) 1636e5a185c2SLukas Czerner 1637e5a185c2SLukas Czerner /* 1638e5a185c2SLukas Czerner * Mount option specification 1639e5a185c2SLukas Czerner * We don't use fsparam_flag_no because of the way we set the 1640e5a185c2SLukas Czerner * options and the way we show them in _ext4_show_options(). To 1641e5a185c2SLukas Czerner * keep the changes to a minimum, let's keep the negative options 1642e5a185c2SLukas Czerner * separate for now. 1643e5a185c2SLukas Czerner */ 1644e5a185c2SLukas Czerner static const struct fs_parameter_spec ext4_param_specs[] = { 1645e5a185c2SLukas Czerner fsparam_flag ("bsddf", Opt_bsd_df), 1646e5a185c2SLukas Czerner fsparam_flag ("minixdf", Opt_minix_df), 1647e5a185c2SLukas Czerner fsparam_flag ("grpid", Opt_grpid), 1648e5a185c2SLukas Czerner fsparam_flag ("bsdgroups", Opt_grpid), 1649e5a185c2SLukas Czerner fsparam_flag ("nogrpid", Opt_nogrpid), 1650e5a185c2SLukas Czerner fsparam_flag ("sysvgroups", Opt_nogrpid), 1651e5a185c2SLukas Czerner fsparam_u32 ("resgid", Opt_resgid), 1652e5a185c2SLukas Czerner fsparam_u32 ("resuid", Opt_resuid), 1653e5a185c2SLukas Czerner fsparam_u32 ("sb", Opt_sb), 1654e5a185c2SLukas Czerner fsparam_enum ("errors", Opt_errors, ext4_param_errors), 1655e5a185c2SLukas Czerner fsparam_flag ("nouid32", Opt_nouid32), 1656e5a185c2SLukas Czerner fsparam_flag ("debug", Opt_debug), 1657e5a185c2SLukas Czerner fsparam_flag ("oldalloc", Opt_removed), 1658e5a185c2SLukas Czerner fsparam_flag ("orlov", Opt_removed), 1659e5a185c2SLukas Czerner fsparam_flag ("user_xattr", Opt_user_xattr), 1660e5a185c2SLukas Czerner fsparam_flag ("acl", Opt_acl), 1661e5a185c2SLukas Czerner fsparam_flag ("norecovery", Opt_noload), 1662e5a185c2SLukas Czerner fsparam_flag ("noload", Opt_noload), 1663e5a185c2SLukas Czerner fsparam_flag ("bh", Opt_removed), 1664e5a185c2SLukas Czerner fsparam_flag ("nobh", Opt_removed), 1665e5a185c2SLukas Czerner fsparam_u32 ("commit", Opt_commit), 1666e5a185c2SLukas Czerner fsparam_u32 ("min_batch_time", Opt_min_batch_time), 1667e5a185c2SLukas Czerner fsparam_u32 ("max_batch_time", Opt_max_batch_time), 1668e5a185c2SLukas Czerner fsparam_u32 ("journal_dev", Opt_journal_dev), 1669e5a185c2SLukas Czerner fsparam_bdev ("journal_path", Opt_journal_path), 1670e5a185c2SLukas Czerner fsparam_flag ("journal_checksum", Opt_journal_checksum), 1671e5a185c2SLukas Czerner fsparam_flag ("nojournal_checksum", Opt_nojournal_checksum), 1672e5a185c2SLukas Czerner fsparam_flag ("journal_async_commit",Opt_journal_async_commit), 1673e5a185c2SLukas Czerner fsparam_flag ("abort", Opt_abort), 1674e5a185c2SLukas Czerner fsparam_enum ("data", Opt_data, ext4_param_data), 1675e5a185c2SLukas Czerner fsparam_enum ("data_err", Opt_data_err, 1676e5a185c2SLukas Czerner ext4_param_data_err), 1677e5a185c2SLukas Czerner fsparam_string_empty 1678e5a185c2SLukas Czerner ("usrjquota", Opt_usrjquota), 1679e5a185c2SLukas Czerner fsparam_string_empty 1680e5a185c2SLukas Czerner ("grpjquota", Opt_grpjquota), 1681e5a185c2SLukas Czerner fsparam_enum ("jqfmt", Opt_jqfmt, ext4_param_jqfmt), 1682e5a185c2SLukas Czerner fsparam_flag ("grpquota", Opt_grpquota), 1683e5a185c2SLukas Czerner fsparam_flag ("quota", Opt_quota), 1684e5a185c2SLukas Czerner fsparam_flag ("noquota", Opt_noquota), 1685e5a185c2SLukas Czerner fsparam_flag ("usrquota", Opt_usrquota), 1686e5a185c2SLukas Czerner fsparam_flag ("prjquota", Opt_prjquota), 1687e5a185c2SLukas Czerner fsparam_flag ("barrier", Opt_barrier), 1688e5a185c2SLukas Czerner fsparam_u32 ("barrier", Opt_barrier), 1689e5a185c2SLukas Czerner fsparam_flag ("nobarrier", Opt_nobarrier), 16901ff20307SJeff Layton fsparam_flag ("i_version", Opt_removed), 1691e5a185c2SLukas Czerner fsparam_flag ("dax", Opt_dax), 1692e5a185c2SLukas Czerner fsparam_enum ("dax", Opt_dax_type, ext4_param_dax), 1693e5a185c2SLukas Czerner fsparam_u32 ("stripe", Opt_stripe), 1694e5a185c2SLukas Czerner fsparam_flag ("delalloc", Opt_delalloc), 1695e5a185c2SLukas Czerner fsparam_flag ("nodelalloc", Opt_nodelalloc), 1696e5a185c2SLukas Czerner fsparam_flag ("warn_on_error", Opt_warn_on_error), 1697e5a185c2SLukas Czerner fsparam_flag ("nowarn_on_error", Opt_nowarn_on_error), 1698e5a185c2SLukas Czerner fsparam_u32 ("debug_want_extra_isize", 1699e5a185c2SLukas Czerner Opt_debug_want_extra_isize), 1700e5a185c2SLukas Czerner fsparam_flag ("mblk_io_submit", Opt_removed), 1701e5a185c2SLukas Czerner fsparam_flag ("nomblk_io_submit", Opt_removed), 1702e5a185c2SLukas Czerner fsparam_flag ("block_validity", Opt_block_validity), 1703e5a185c2SLukas Czerner fsparam_flag ("noblock_validity", Opt_noblock_validity), 1704e5a185c2SLukas Czerner fsparam_u32 ("inode_readahead_blks", 1705e5a185c2SLukas Czerner Opt_inode_readahead_blks), 1706e5a185c2SLukas Czerner fsparam_u32 ("journal_ioprio", Opt_journal_ioprio), 1707e5a185c2SLukas Czerner fsparam_u32 ("auto_da_alloc", Opt_auto_da_alloc), 1708e5a185c2SLukas Czerner fsparam_flag ("auto_da_alloc", Opt_auto_da_alloc), 1709e5a185c2SLukas Czerner fsparam_flag ("noauto_da_alloc", Opt_noauto_da_alloc), 1710e5a185c2SLukas Czerner fsparam_flag ("dioread_nolock", Opt_dioread_nolock), 1711e5a185c2SLukas Czerner fsparam_flag ("nodioread_nolock", Opt_dioread_lock), 1712e5a185c2SLukas Czerner fsparam_flag ("dioread_lock", Opt_dioread_lock), 1713e5a185c2SLukas Czerner fsparam_flag ("discard", Opt_discard), 1714e5a185c2SLukas Czerner fsparam_flag ("nodiscard", Opt_nodiscard), 1715e5a185c2SLukas Czerner fsparam_u32 ("init_itable", Opt_init_itable), 1716e5a185c2SLukas Czerner fsparam_flag ("init_itable", Opt_init_itable), 1717e5a185c2SLukas Czerner fsparam_flag ("noinit_itable", Opt_noinit_itable), 1718e5a185c2SLukas Czerner #ifdef CONFIG_EXT4_DEBUG 1719e5a185c2SLukas Czerner fsparam_flag ("fc_debug_force", Opt_fc_debug_force), 1720e5a185c2SLukas Czerner fsparam_u32 ("fc_debug_max_replay", Opt_fc_debug_max_replay), 1721e5a185c2SLukas Czerner #endif 1722e5a185c2SLukas Czerner fsparam_u32 ("max_dir_size_kb", Opt_max_dir_size_kb), 1723e5a185c2SLukas Czerner fsparam_flag ("test_dummy_encryption", 1724e5a185c2SLukas Czerner Opt_test_dummy_encryption), 1725e5a185c2SLukas Czerner fsparam_string ("test_dummy_encryption", 1726e5a185c2SLukas Czerner Opt_test_dummy_encryption), 1727e5a185c2SLukas Czerner fsparam_flag ("inlinecrypt", Opt_inlinecrypt), 1728e5a185c2SLukas Czerner fsparam_flag ("nombcache", Opt_nombcache), 1729e5a185c2SLukas Czerner fsparam_flag ("no_mbcache", Opt_nombcache), /* for backward compatibility */ 1730e5a185c2SLukas Czerner fsparam_flag ("prefetch_block_bitmaps", 1731e5a185c2SLukas Czerner Opt_removed), 1732e5a185c2SLukas Czerner fsparam_flag ("no_prefetch_block_bitmaps", 1733e5a185c2SLukas Czerner Opt_no_prefetch_block_bitmaps), 1734e5a185c2SLukas Czerner fsparam_s32 ("mb_optimize_scan", Opt_mb_optimize_scan), 1735e5a185c2SLukas Czerner fsparam_string ("check", Opt_removed), /* mount option from ext2/3 */ 1736e5a185c2SLukas Czerner fsparam_flag ("nocheck", Opt_removed), /* mount option from ext2/3 */ 1737e5a185c2SLukas Czerner fsparam_flag ("reservation", Opt_removed), /* mount option from ext2/3 */ 1738e5a185c2SLukas Czerner fsparam_flag ("noreservation", Opt_removed), /* mount option from ext2/3 */ 1739e5a185c2SLukas Czerner fsparam_u32 ("journal", Opt_removed), /* mount option from ext2/3 */ 1740e5a185c2SLukas Czerner {} 1741e5a185c2SLukas Czerner }; 1742e5a185c2SLukas Czerner 1743b3881f74STheodore Ts'o #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3)) 1744196e402aSHarshad Shirwadkar 174526092bf5STheodore Ts'o #define MOPT_SET 0x0001 174626092bf5STheodore Ts'o #define MOPT_CLEAR 0x0002 174726092bf5STheodore Ts'o #define MOPT_NOSUPPORT 0x0004 174826092bf5STheodore Ts'o #define MOPT_EXPLICIT 0x0008 174926092bf5STheodore Ts'o #ifdef CONFIG_QUOTA 175026092bf5STheodore Ts'o #define MOPT_Q 0 1751ba2e524dSLukas Czerner #define MOPT_QFMT 0x0010 175226092bf5STheodore Ts'o #else 175326092bf5STheodore Ts'o #define MOPT_Q MOPT_NOSUPPORT 175426092bf5STheodore Ts'o #define MOPT_QFMT MOPT_NOSUPPORT 175526092bf5STheodore Ts'o #endif 1756ba2e524dSLukas Czerner #define MOPT_NO_EXT2 0x0020 1757ba2e524dSLukas Czerner #define MOPT_NO_EXT3 0x0040 17588dc0aa8cSTheodore Ts'o #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3) 1759ba2e524dSLukas Czerner #define MOPT_SKIP 0x0080 1760ba2e524dSLukas Czerner #define MOPT_2 0x0100 176126092bf5STheodore Ts'o 176226092bf5STheodore Ts'o static const struct mount_opts { 176326092bf5STheodore Ts'o int token; 176426092bf5STheodore Ts'o int mount_opt; 176526092bf5STheodore Ts'o int flags; 176626092bf5STheodore Ts'o } ext4_mount_opts[] = { 176726092bf5STheodore Ts'o {Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET}, 176826092bf5STheodore Ts'o {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR}, 176926092bf5STheodore Ts'o {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET}, 177026092bf5STheodore Ts'o {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR}, 177126092bf5STheodore Ts'o {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET}, 177226092bf5STheodore Ts'o {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR}, 17738dc0aa8cSTheodore Ts'o {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK, 17748dc0aa8cSTheodore Ts'o MOPT_EXT4_ONLY | MOPT_SET}, 17758dc0aa8cSTheodore Ts'o {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK, 17768dc0aa8cSTheodore Ts'o MOPT_EXT4_ONLY | MOPT_CLEAR}, 177726092bf5STheodore Ts'o {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET}, 177826092bf5STheodore Ts'o {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR}, 17798dc0aa8cSTheodore Ts'o {Opt_delalloc, EXT4_MOUNT_DELALLOC, 17808dc0aa8cSTheodore Ts'o MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, 17818dc0aa8cSTheodore Ts'o {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, 178259d9fa5cSTheodore Ts'o MOPT_EXT4_ONLY | MOPT_CLEAR}, 1783327eaf73STheodore Ts'o {Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET}, 1784327eaf73STheodore Ts'o {Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR}, 1785cb8435dcSEric Biggers {Opt_commit, 0, MOPT_NO_EXT2}, 1786c6d3d56dSDarrick J. Wong {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, 1787c6d3d56dSDarrick J. Wong MOPT_EXT4_ONLY | MOPT_CLEAR}, 17888dc0aa8cSTheodore Ts'o {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, 17891e381f60SDmitry Monakhov MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, 179026092bf5STheodore Ts'o {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT | 17918dc0aa8cSTheodore Ts'o EXT4_MOUNT_JOURNAL_CHECKSUM), 17921e381f60SDmitry Monakhov MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, 17938dc0aa8cSTheodore Ts'o {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET}, 1794ba2e524dSLukas Czerner {Opt_data_err, EXT4_MOUNT_DATA_ERR_ABORT, MOPT_NO_EXT2}, 179526092bf5STheodore Ts'o {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET}, 179626092bf5STheodore Ts'o {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR}, 179726092bf5STheodore Ts'o {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET}, 179826092bf5STheodore Ts'o {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR}, 179926092bf5STheodore Ts'o {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR}, 1800ba2e524dSLukas Czerner {Opt_dax_type, 0, MOPT_EXT4_ONLY}, 1801ba2e524dSLukas Czerner {Opt_journal_dev, 0, MOPT_NO_EXT2}, 1802ba2e524dSLukas Czerner {Opt_journal_path, 0, MOPT_NO_EXT2}, 1803ba2e524dSLukas Czerner {Opt_journal_ioprio, 0, MOPT_NO_EXT2}, 1804ba2e524dSLukas Czerner {Opt_data, 0, MOPT_NO_EXT2}, 180526092bf5STheodore Ts'o {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET}, 180626092bf5STheodore Ts'o #ifdef CONFIG_EXT4_FS_POSIX_ACL 180726092bf5STheodore Ts'o {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET}, 180826092bf5STheodore Ts'o #else 180926092bf5STheodore Ts'o {Opt_acl, 0, MOPT_NOSUPPORT}, 181026092bf5STheodore Ts'o #endif 181126092bf5STheodore Ts'o {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET}, 181226092bf5STheodore Ts'o {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET}, 181326092bf5STheodore Ts'o {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q}, 181426092bf5STheodore Ts'o {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, 181526092bf5STheodore Ts'o MOPT_SET | MOPT_Q}, 181626092bf5STheodore Ts'o {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA, 181726092bf5STheodore Ts'o MOPT_SET | MOPT_Q}, 181849da9392SJan Kara {Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA, 181949da9392SJan Kara MOPT_SET | MOPT_Q}, 182026092bf5STheodore Ts'o {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA | 182149da9392SJan Kara EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA), 182249da9392SJan Kara MOPT_CLEAR | MOPT_Q}, 1823ba2e524dSLukas Czerner {Opt_usrjquota, 0, MOPT_Q}, 1824ba2e524dSLukas Czerner {Opt_grpjquota, 0, MOPT_Q}, 1825ba2e524dSLukas Czerner {Opt_jqfmt, 0, MOPT_QFMT}, 1826cdb7ee4cSTahsin Erdogan {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET}, 182721175ca4SHarshad Shirwadkar {Opt_no_prefetch_block_bitmaps, EXT4_MOUNT_NO_PREFETCH_BLOCK_BITMAPS, 18283d392b26STheodore Ts'o MOPT_SET}, 182999c880deSHarshad Shirwadkar #ifdef CONFIG_EXT4_DEBUG 18300f0672ffSHarshad Shirwadkar {Opt_fc_debug_force, EXT4_MOUNT2_JOURNAL_FAST_COMMIT, 18310f0672ffSHarshad Shirwadkar MOPT_SET | MOPT_2 | MOPT_EXT4_ONLY}, 18328016e29fSHarshad Shirwadkar #endif 183326092bf5STheodore Ts'o {Opt_err, 0, 0} 183426092bf5STheodore Ts'o }; 183526092bf5STheodore Ts'o 18365298d4bfSChristoph Hellwig #if IS_ENABLED(CONFIG_UNICODE) 1837c83ad55eSGabriel Krisman Bertazi static const struct ext4_sb_encodings { 1838c83ad55eSGabriel Krisman Bertazi __u16 magic; 1839c83ad55eSGabriel Krisman Bertazi char *name; 184049bd03ccSChristoph Hellwig unsigned int version; 1841c83ad55eSGabriel Krisman Bertazi } ext4_sb_encoding_map[] = { 184249bd03ccSChristoph Hellwig {EXT4_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)}, 1843c83ad55eSGabriel Krisman Bertazi }; 1844c83ad55eSGabriel Krisman Bertazi 1845aa8bf298SChristoph Hellwig static const struct ext4_sb_encodings * 1846aa8bf298SChristoph Hellwig ext4_sb_read_encoding(const struct ext4_super_block *es) 1847c83ad55eSGabriel Krisman Bertazi { 1848c83ad55eSGabriel Krisman Bertazi __u16 magic = le16_to_cpu(es->s_encoding); 1849c83ad55eSGabriel Krisman Bertazi int i; 1850c83ad55eSGabriel Krisman Bertazi 1851c83ad55eSGabriel Krisman Bertazi for (i = 0; i < ARRAY_SIZE(ext4_sb_encoding_map); i++) 1852c83ad55eSGabriel Krisman Bertazi if (magic == ext4_sb_encoding_map[i].magic) 1853aa8bf298SChristoph Hellwig return &ext4_sb_encoding_map[i]; 1854c83ad55eSGabriel Krisman Bertazi 1855aa8bf298SChristoph Hellwig return NULL; 1856c83ad55eSGabriel Krisman Bertazi } 1857c83ad55eSGabriel Krisman Bertazi #endif 1858c83ad55eSGabriel Krisman Bertazi 18596e47a3ccSLukas Czerner #define EXT4_SPEC_JQUOTA (1 << 0) 18606e47a3ccSLukas Czerner #define EXT4_SPEC_JQFMT (1 << 1) 18616e47a3ccSLukas Czerner #define EXT4_SPEC_DATAJ (1 << 2) 18626e47a3ccSLukas Czerner #define EXT4_SPEC_SB_BLOCK (1 << 3) 18636e47a3ccSLukas Czerner #define EXT4_SPEC_JOURNAL_DEV (1 << 4) 18646e47a3ccSLukas Czerner #define EXT4_SPEC_JOURNAL_IOPRIO (1 << 5) 18656e47a3ccSLukas Czerner #define EXT4_SPEC_s_want_extra_isize (1 << 7) 18666e47a3ccSLukas Czerner #define EXT4_SPEC_s_max_batch_time (1 << 8) 18676e47a3ccSLukas Czerner #define EXT4_SPEC_s_min_batch_time (1 << 9) 18686e47a3ccSLukas Czerner #define EXT4_SPEC_s_inode_readahead_blks (1 << 10) 18696e47a3ccSLukas Czerner #define EXT4_SPEC_s_li_wait_mult (1 << 11) 18706e47a3ccSLukas Czerner #define EXT4_SPEC_s_max_dir_size_kb (1 << 12) 18716e47a3ccSLukas Czerner #define EXT4_SPEC_s_stripe (1 << 13) 18726e47a3ccSLukas Czerner #define EXT4_SPEC_s_resuid (1 << 14) 18736e47a3ccSLukas Czerner #define EXT4_SPEC_s_resgid (1 << 15) 18746e47a3ccSLukas Czerner #define EXT4_SPEC_s_commit_interval (1 << 16) 18756e47a3ccSLukas Czerner #define EXT4_SPEC_s_fc_debug_max_replay (1 << 17) 18767edfd85bSLukas Czerner #define EXT4_SPEC_s_sb_block (1 << 18) 187727b38686SOjaswin Mujoo #define EXT4_SPEC_mb_optimize_scan (1 << 19) 18786e47a3ccSLukas Czerner 1879461c3af0SLukas Czerner struct ext4_fs_context { 1880e6e268cbSLukas Czerner char *s_qf_names[EXT4_MAXQUOTAS]; 188185456054SEric Biggers struct fscrypt_dummy_policy dummy_enc_policy; 1882e6e268cbSLukas Czerner int s_jquota_fmt; /* Format of quota to use */ 18836e47a3ccSLukas Czerner #ifdef CONFIG_EXT4_DEBUG 18846e47a3ccSLukas Czerner int s_fc_debug_max_replay; 18856e47a3ccSLukas Czerner #endif 18866e47a3ccSLukas Czerner unsigned short qname_spec; 18876e47a3ccSLukas Czerner unsigned long vals_s_flags; /* Bits to set in s_flags */ 18886e47a3ccSLukas Czerner unsigned long mask_s_flags; /* Bits changed in s_flags */ 18896e47a3ccSLukas Czerner unsigned long journal_devnum; 18906e47a3ccSLukas Czerner unsigned long s_commit_interval; 18916e47a3ccSLukas Czerner unsigned long s_stripe; 18926e47a3ccSLukas Czerner unsigned int s_inode_readahead_blks; 18936e47a3ccSLukas Czerner unsigned int s_want_extra_isize; 18946e47a3ccSLukas Czerner unsigned int s_li_wait_mult; 18956e47a3ccSLukas Czerner unsigned int s_max_dir_size_kb; 18966e47a3ccSLukas Czerner unsigned int journal_ioprio; 18976e47a3ccSLukas Czerner unsigned int vals_s_mount_opt; 18986e47a3ccSLukas Czerner unsigned int mask_s_mount_opt; 18996e47a3ccSLukas Czerner unsigned int vals_s_mount_opt2; 19006e47a3ccSLukas Czerner unsigned int mask_s_mount_opt2; 1901e3952fccSLukas Czerner unsigned long vals_s_mount_flags; 1902e3952fccSLukas Czerner unsigned long mask_s_mount_flags; 1903b6bd2435SLukas Czerner unsigned int opt_flags; /* MOPT flags */ 19046e47a3ccSLukas Czerner unsigned int spec; 19056e47a3ccSLukas Czerner u32 s_max_batch_time; 19066e47a3ccSLukas Czerner u32 s_min_batch_time; 19076e47a3ccSLukas Czerner kuid_t s_resuid; 19086e47a3ccSLukas Czerner kgid_t s_resgid; 19097edfd85bSLukas Czerner ext4_fsblk_t s_sb_block; 1910b237e304SHarshad Shirwadkar }; 1911b237e304SHarshad Shirwadkar 1912cebe85d5SLukas Czerner static void ext4_fc_free(struct fs_context *fc) 1913cebe85d5SLukas Czerner { 1914cebe85d5SLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 1915cebe85d5SLukas Czerner int i; 1916cebe85d5SLukas Czerner 1917cebe85d5SLukas Czerner if (!ctx) 1918cebe85d5SLukas Czerner return; 1919cebe85d5SLukas Czerner 1920cebe85d5SLukas Czerner for (i = 0; i < EXT4_MAXQUOTAS; i++) 1921cebe85d5SLukas Czerner kfree(ctx->s_qf_names[i]); 1922cebe85d5SLukas Czerner 192385456054SEric Biggers fscrypt_free_dummy_policy(&ctx->dummy_enc_policy); 1924cebe85d5SLukas Czerner kfree(ctx); 1925cebe85d5SLukas Czerner } 1926cebe85d5SLukas Czerner 1927cebe85d5SLukas Czerner int ext4_init_fs_context(struct fs_context *fc) 1928cebe85d5SLukas Czerner { 1929da9e4802SDan Carpenter struct ext4_fs_context *ctx; 1930cebe85d5SLukas Czerner 1931cebe85d5SLukas Czerner ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL); 1932cebe85d5SLukas Czerner if (!ctx) 1933cebe85d5SLukas Czerner return -ENOMEM; 1934cebe85d5SLukas Czerner 1935cebe85d5SLukas Czerner fc->fs_private = ctx; 1936cebe85d5SLukas Czerner fc->ops = &ext4_context_ops; 1937cebe85d5SLukas Czerner 1938cebe85d5SLukas Czerner return 0; 1939cebe85d5SLukas Czerner } 1940cebe85d5SLukas Czerner 1941e6e268cbSLukas Czerner #ifdef CONFIG_QUOTA 1942e6e268cbSLukas Czerner /* 1943e6e268cbSLukas Czerner * Note the name of the specified quota file. 1944e6e268cbSLukas Czerner */ 1945e6e268cbSLukas Czerner static int note_qf_name(struct fs_context *fc, int qtype, 1946e6e268cbSLukas Czerner struct fs_parameter *param) 1947e6e268cbSLukas Czerner { 1948e6e268cbSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 1949e6e268cbSLukas Czerner char *qname; 1950e6e268cbSLukas Czerner 1951e6e268cbSLukas Czerner if (param->size < 1) { 1952e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, "Missing quota name"); 1953e6e268cbSLukas Czerner return -EINVAL; 1954e6e268cbSLukas Czerner } 1955e6e268cbSLukas Czerner if (strchr(param->string, '/')) { 1956e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, 1957e6e268cbSLukas Czerner "quotafile must be on filesystem root"); 1958e6e268cbSLukas Czerner return -EINVAL; 1959e6e268cbSLukas Czerner } 1960e6e268cbSLukas Czerner if (ctx->s_qf_names[qtype]) { 1961e6e268cbSLukas Czerner if (strcmp(ctx->s_qf_names[qtype], param->string) != 0) { 1962e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, 1963e6e268cbSLukas Czerner "%s quota file already specified", 1964e6e268cbSLukas Czerner QTYPE2NAME(qtype)); 1965e6e268cbSLukas Czerner return -EINVAL; 1966e6e268cbSLukas Czerner } 1967e6e268cbSLukas Czerner return 0; 1968e6e268cbSLukas Czerner } 1969e6e268cbSLukas Czerner 1970e6e268cbSLukas Czerner qname = kmemdup_nul(param->string, param->size, GFP_KERNEL); 1971e6e268cbSLukas Czerner if (!qname) { 1972e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, 1973e6e268cbSLukas Czerner "Not enough memory for storing quotafile name"); 1974e6e268cbSLukas Czerner return -ENOMEM; 1975e6e268cbSLukas Czerner } 1976e6e268cbSLukas Czerner ctx->s_qf_names[qtype] = qname; 1977e6e268cbSLukas Czerner ctx->qname_spec |= 1 << qtype; 19786e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_JQUOTA; 1979e6e268cbSLukas Czerner return 0; 1980e6e268cbSLukas Czerner } 1981e6e268cbSLukas Czerner 1982e6e268cbSLukas Czerner /* 1983e6e268cbSLukas Czerner * Clear the name of the specified quota file. 1984e6e268cbSLukas Czerner */ 1985e6e268cbSLukas Czerner static int unnote_qf_name(struct fs_context *fc, int qtype) 1986e6e268cbSLukas Czerner { 1987e6e268cbSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 1988e6e268cbSLukas Czerner 1989e6e268cbSLukas Czerner if (ctx->s_qf_names[qtype]) 1990e6e268cbSLukas Czerner kfree(ctx->s_qf_names[qtype]); 1991e6e268cbSLukas Czerner 1992e6e268cbSLukas Czerner ctx->s_qf_names[qtype] = NULL; 1993e6e268cbSLukas Czerner ctx->qname_spec |= 1 << qtype; 19946e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_JQUOTA; 1995e6e268cbSLukas Czerner return 0; 1996e6e268cbSLukas Czerner } 1997e6e268cbSLukas Czerner #endif 1998e6e268cbSLukas Czerner 199985456054SEric Biggers static int ext4_parse_test_dummy_encryption(const struct fs_parameter *param, 200085456054SEric Biggers struct ext4_fs_context *ctx) 200185456054SEric Biggers { 200285456054SEric Biggers int err; 200385456054SEric Biggers 200485456054SEric Biggers if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) { 200585456054SEric Biggers ext4_msg(NULL, KERN_WARNING, 200685456054SEric Biggers "test_dummy_encryption option not supported"); 200785456054SEric Biggers return -EINVAL; 200885456054SEric Biggers } 200985456054SEric Biggers err = fscrypt_parse_test_dummy_encryption(param, 201085456054SEric Biggers &ctx->dummy_enc_policy); 201185456054SEric Biggers if (err == -EINVAL) { 201285456054SEric Biggers ext4_msg(NULL, KERN_WARNING, 201385456054SEric Biggers "Value of option \"%s\" is unrecognized", param->key); 201485456054SEric Biggers } else if (err == -EEXIST) { 201585456054SEric Biggers ext4_msg(NULL, KERN_WARNING, 201685456054SEric Biggers "Conflicting test_dummy_encryption options"); 201785456054SEric Biggers return -EINVAL; 201885456054SEric Biggers } 201985456054SEric Biggers return err; 202085456054SEric Biggers } 202185456054SEric Biggers 20226e47a3ccSLukas Czerner #define EXT4_SET_CTX(name) \ 20234c246728SLukas Czerner static inline void ctx_set_##name(struct ext4_fs_context *ctx, \ 20244c246728SLukas Czerner unsigned long flag) \ 20256e47a3ccSLukas Czerner { \ 20266e47a3ccSLukas Czerner ctx->mask_s_##name |= flag; \ 20276e47a3ccSLukas Czerner ctx->vals_s_##name |= flag; \ 2028e3952fccSLukas Czerner } 2029e3952fccSLukas Czerner 2030e3952fccSLukas Czerner #define EXT4_CLEAR_CTX(name) \ 20314c246728SLukas Czerner static inline void ctx_clear_##name(struct ext4_fs_context *ctx, \ 20324c246728SLukas Czerner unsigned long flag) \ 20336e47a3ccSLukas Czerner { \ 20346e47a3ccSLukas Czerner ctx->mask_s_##name |= flag; \ 20356e47a3ccSLukas Czerner ctx->vals_s_##name &= ~flag; \ 2036e3952fccSLukas Czerner } 2037e3952fccSLukas Czerner 2038e3952fccSLukas Czerner #define EXT4_TEST_CTX(name) \ 20394c246728SLukas Czerner static inline unsigned long \ 20404c246728SLukas Czerner ctx_test_##name(struct ext4_fs_context *ctx, unsigned long flag) \ 20416e47a3ccSLukas Czerner { \ 20424c246728SLukas Czerner return (ctx->vals_s_##name & flag); \ 2043e3952fccSLukas Czerner } 20446e47a3ccSLukas Czerner 2045e3952fccSLukas Czerner EXT4_SET_CTX(flags); /* set only */ 20466e47a3ccSLukas Czerner EXT4_SET_CTX(mount_opt); 2047e3952fccSLukas Czerner EXT4_CLEAR_CTX(mount_opt); 2048e3952fccSLukas Czerner EXT4_TEST_CTX(mount_opt); 20496e47a3ccSLukas Czerner EXT4_SET_CTX(mount_opt2); 2050e3952fccSLukas Czerner EXT4_CLEAR_CTX(mount_opt2); 2051e3952fccSLukas Czerner EXT4_TEST_CTX(mount_opt2); 2052e3952fccSLukas Czerner 2053e3952fccSLukas Czerner static inline void ctx_set_mount_flag(struct ext4_fs_context *ctx, int bit) 2054e3952fccSLukas Czerner { 2055e3952fccSLukas Czerner set_bit(bit, &ctx->mask_s_mount_flags); 2056e3952fccSLukas Czerner set_bit(bit, &ctx->vals_s_mount_flags); 2057e3952fccSLukas Czerner } 20586e47a3ccSLukas Czerner 205902f960f8SLukas Czerner static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param) 206026092bf5STheodore Ts'o { 2061461c3af0SLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 2062461c3af0SLukas Czerner struct fs_parse_result result; 206326092bf5STheodore Ts'o const struct mount_opts *m; 2064461c3af0SLukas Czerner int is_remount; 206508cefc7aSEric W. Biederman kuid_t uid; 206608cefc7aSEric W. Biederman kgid_t gid; 2067461c3af0SLukas Czerner int token; 2068461c3af0SLukas Czerner 2069461c3af0SLukas Czerner token = fs_parse(fc, ext4_param_specs, param, &result); 2070461c3af0SLukas Czerner if (token < 0) 2071461c3af0SLukas Czerner return token; 2072461c3af0SLukas Czerner is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE; 207326092bf5STheodore Ts'o 2074ba2e524dSLukas Czerner for (m = ext4_mount_opts; m->token != Opt_err; m++) 2075ba2e524dSLukas Czerner if (token == m->token) 2076ba2e524dSLukas Czerner break; 2077ba2e524dSLukas Czerner 2078ba2e524dSLukas Czerner ctx->opt_flags |= m->flags; 2079ba2e524dSLukas Czerner 2080ba2e524dSLukas Czerner if (m->flags & MOPT_EXPLICIT) { 2081ba2e524dSLukas Czerner if (m->mount_opt & EXT4_MOUNT_DELALLOC) { 2082ba2e524dSLukas Czerner ctx_set_mount_opt2(ctx, EXT4_MOUNT2_EXPLICIT_DELALLOC); 2083ba2e524dSLukas Czerner } else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) { 2084ba2e524dSLukas Czerner ctx_set_mount_opt2(ctx, 2085ba2e524dSLukas Czerner EXT4_MOUNT2_EXPLICIT_JOURNAL_CHECKSUM); 2086ba2e524dSLukas Czerner } else 2087ba2e524dSLukas Czerner return -EINVAL; 2088ba2e524dSLukas Czerner } 2089ba2e524dSLukas Czerner 2090ba2e524dSLukas Czerner if (m->flags & MOPT_NOSUPPORT) { 2091ba2e524dSLukas Czerner ext4_msg(NULL, KERN_ERR, "%s option not supported", 2092ba2e524dSLukas Czerner param->key); 2093ba2e524dSLukas Czerner return 0; 2094ba2e524dSLukas Czerner } 2095ba2e524dSLukas Czerner 2096ba2e524dSLukas Czerner switch (token) { 209757f73c2cSTheodore Ts'o #ifdef CONFIG_QUOTA 2098ba2e524dSLukas Czerner case Opt_usrjquota: 2099461c3af0SLukas Czerner if (!*param->string) 2100e6e268cbSLukas Czerner return unnote_qf_name(fc, USRQUOTA); 2101461c3af0SLukas Czerner else 2102e6e268cbSLukas Czerner return note_qf_name(fc, USRQUOTA, param); 2103ba2e524dSLukas Czerner case Opt_grpjquota: 2104461c3af0SLukas Czerner if (!*param->string) 2105e6e268cbSLukas Czerner return unnote_qf_name(fc, GRPQUOTA); 2106461c3af0SLukas Czerner else 2107e6e268cbSLukas Czerner return note_qf_name(fc, GRPQUOTA, param); 210857f73c2cSTheodore Ts'o #endif 210926092bf5STheodore Ts'o case Opt_sb: 21107edfd85bSLukas Czerner if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { 21117edfd85bSLukas Czerner ext4_msg(NULL, KERN_WARNING, 21127edfd85bSLukas Czerner "Ignoring %s option on remount", param->key); 21137edfd85bSLukas Czerner } else { 21147edfd85bSLukas Czerner ctx->s_sb_block = result.uint_32; 21157edfd85bSLukas Czerner ctx->spec |= EXT4_SPEC_s_sb_block; 21167edfd85bSLukas Czerner } 211702f960f8SLukas Czerner return 0; 211826092bf5STheodore Ts'o case Opt_removed: 2119da812f61SLukas Czerner ext4_msg(NULL, KERN_WARNING, "Ignoring removed %s option", 2120461c3af0SLukas Czerner param->key); 212102f960f8SLukas Czerner return 0; 212226092bf5STheodore Ts'o case Opt_abort: 2123e3952fccSLukas Czerner ctx_set_mount_flag(ctx, EXT4_MF_FS_ABORTED); 212402f960f8SLukas Czerner return 0; 21254f74d15fSEric Biggers case Opt_inlinecrypt: 21264f74d15fSEric Biggers #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT 21276e47a3ccSLukas Czerner ctx_set_flags(ctx, SB_INLINECRYPT); 21284f74d15fSEric Biggers #else 2129da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, "inline encryption not supported"); 21304f74d15fSEric Biggers #endif 213102f960f8SLukas Czerner return 0; 2132461c3af0SLukas Czerner case Opt_errors: 21336e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_ERRORS_MASK); 2134ba2e524dSLukas Czerner ctx_set_mount_opt(ctx, result.uint_32); 2135ba2e524dSLukas Czerner return 0; 2136ba2e524dSLukas Czerner #ifdef CONFIG_QUOTA 2137ba2e524dSLukas Czerner case Opt_jqfmt: 2138ba2e524dSLukas Czerner ctx->s_jquota_fmt = result.uint_32; 2139ba2e524dSLukas Czerner ctx->spec |= EXT4_SPEC_JQFMT; 2140ba2e524dSLukas Czerner return 0; 2141ba2e524dSLukas Czerner #endif 2142ba2e524dSLukas Czerner case Opt_data: 2143ba2e524dSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS); 2144ba2e524dSLukas Czerner ctx_set_mount_opt(ctx, result.uint_32); 2145ba2e524dSLukas Czerner ctx->spec |= EXT4_SPEC_DATAJ; 2146ba2e524dSLukas Czerner return 0; 2147ba2e524dSLukas Czerner case Opt_commit: 2148461c3af0SLukas Czerner if (result.uint_32 == 0) 21496e47a3ccSLukas Czerner ctx->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE; 2150461c3af0SLukas Czerner else if (result.uint_32 > INT_MAX / HZ) { 2151da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, 21529ba55543Szhangyi (F) "Invalid commit interval %d, " 21539ba55543Szhangyi (F) "must be smaller than %d", 2154461c3af0SLukas Czerner result.uint_32, INT_MAX / HZ); 2155da812f61SLukas Czerner return -EINVAL; 21569ba55543Szhangyi (F) } 21576e47a3ccSLukas Czerner ctx->s_commit_interval = HZ * result.uint_32; 21586e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_commit_interval; 2159ba2e524dSLukas Czerner return 0; 2160ba2e524dSLukas Czerner case Opt_debug_want_extra_isize: 21616e47a3ccSLukas Czerner if ((result.uint_32 & 1) || (result.uint_32 < 4)) { 2162da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, 2163461c3af0SLukas Czerner "Invalid want_extra_isize %d", result.uint_32); 2164da812f61SLukas Czerner return -EINVAL; 21659803387cSTheodore Ts'o } 21666e47a3ccSLukas Czerner ctx->s_want_extra_isize = result.uint_32; 21676e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_want_extra_isize; 2168ba2e524dSLukas Czerner return 0; 2169ba2e524dSLukas Czerner case Opt_max_batch_time: 21706e47a3ccSLukas Czerner ctx->s_max_batch_time = result.uint_32; 21716e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_max_batch_time; 2172ba2e524dSLukas Czerner return 0; 2173ba2e524dSLukas Czerner case Opt_min_batch_time: 21746e47a3ccSLukas Czerner ctx->s_min_batch_time = result.uint_32; 21756e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_min_batch_time; 2176ba2e524dSLukas Czerner return 0; 2177ba2e524dSLukas Czerner case Opt_inode_readahead_blks: 2178461c3af0SLukas Czerner if (result.uint_32 && 2179461c3af0SLukas Czerner (result.uint_32 > (1 << 30) || 2180461c3af0SLukas Czerner !is_power_of_2(result.uint_32))) { 2181da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, 2182e33e60eaSJan Kara "EXT4-fs: inode_readahead_blks must be " 2183e33e60eaSJan Kara "0 or a power of 2 smaller than 2^31"); 2184da812f61SLukas Czerner return -EINVAL; 218526092bf5STheodore Ts'o } 21866e47a3ccSLukas Czerner ctx->s_inode_readahead_blks = result.uint_32; 21876e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_inode_readahead_blks; 2188ba2e524dSLukas Czerner return 0; 2189ba2e524dSLukas Czerner case Opt_init_itable: 21906e47a3ccSLukas Czerner ctx_set_mount_opt(ctx, EXT4_MOUNT_INIT_INODE_TABLE); 21916e47a3ccSLukas Czerner ctx->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; 2192461c3af0SLukas Czerner if (param->type == fs_value_is_string) 21936e47a3ccSLukas Czerner ctx->s_li_wait_mult = result.uint_32; 21946e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_li_wait_mult; 2195ba2e524dSLukas Czerner return 0; 2196ba2e524dSLukas Czerner case Opt_max_dir_size_kb: 21976e47a3ccSLukas Czerner ctx->s_max_dir_size_kb = result.uint_32; 21986e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_max_dir_size_kb; 2199ba2e524dSLukas Czerner return 0; 22008016e29fSHarshad Shirwadkar #ifdef CONFIG_EXT4_DEBUG 2201ba2e524dSLukas Czerner case Opt_fc_debug_max_replay: 22026e47a3ccSLukas Czerner ctx->s_fc_debug_max_replay = result.uint_32; 22036e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_fc_debug_max_replay; 2204ba2e524dSLukas Czerner return 0; 22058016e29fSHarshad Shirwadkar #endif 2206ba2e524dSLukas Czerner case Opt_stripe: 22076e47a3ccSLukas Czerner ctx->s_stripe = result.uint_32; 22086e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_stripe; 2209ba2e524dSLukas Czerner return 0; 2210ba2e524dSLukas Czerner case Opt_resuid: 2211461c3af0SLukas Czerner uid = make_kuid(current_user_ns(), result.uint_32); 22120efb3b23SJan Kara if (!uid_valid(uid)) { 2213da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, "Invalid uid value %d", 2214461c3af0SLukas Czerner result.uint_32); 2215da812f61SLukas Czerner return -EINVAL; 22160efb3b23SJan Kara } 22176e47a3ccSLukas Czerner ctx->s_resuid = uid; 22186e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_resuid; 2219ba2e524dSLukas Czerner return 0; 2220ba2e524dSLukas Czerner case Opt_resgid: 2221461c3af0SLukas Czerner gid = make_kgid(current_user_ns(), result.uint_32); 22220efb3b23SJan Kara if (!gid_valid(gid)) { 2223da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, "Invalid gid value %d", 2224461c3af0SLukas Czerner result.uint_32); 2225da812f61SLukas Czerner return -EINVAL; 22260efb3b23SJan Kara } 22276e47a3ccSLukas Czerner ctx->s_resgid = gid; 22286e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_s_resgid; 2229ba2e524dSLukas Czerner return 0; 2230ba2e524dSLukas Czerner case Opt_journal_dev: 22310efb3b23SJan Kara if (is_remount) { 2232da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, 22330efb3b23SJan Kara "Cannot specify journal on remount"); 2234da812f61SLukas Czerner return -EINVAL; 22350efb3b23SJan Kara } 2236461c3af0SLukas Czerner ctx->journal_devnum = result.uint_32; 22376e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_JOURNAL_DEV; 2238ba2e524dSLukas Czerner return 0; 2239ba2e524dSLukas Czerner case Opt_journal_path: 2240ba2e524dSLukas Czerner { 2241ad4eec61SEric Sandeen struct inode *journal_inode; 2242ad4eec61SEric Sandeen struct path path; 2243ad4eec61SEric Sandeen int error; 2244ad4eec61SEric Sandeen 2245ad4eec61SEric Sandeen if (is_remount) { 2246da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, 2247ad4eec61SEric Sandeen "Cannot specify journal on remount"); 2248da812f61SLukas Czerner return -EINVAL; 2249ad4eec61SEric Sandeen } 2250ad4eec61SEric Sandeen 2251e3ea75eeSLukas Czerner error = fs_lookup_param(fc, param, 1, LOOKUP_FOLLOW, &path); 2252ad4eec61SEric Sandeen if (error) { 2253da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, "error: could not find " 2254461c3af0SLukas Czerner "journal device path"); 2255da812f61SLukas Czerner return -EINVAL; 2256ad4eec61SEric Sandeen } 2257ad4eec61SEric Sandeen 22582b0143b5SDavid Howells journal_inode = d_inode(path.dentry); 2259461c3af0SLukas Czerner ctx->journal_devnum = new_encode_dev(journal_inode->i_rdev); 22606e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_JOURNAL_DEV; 2261ad4eec61SEric Sandeen path_put(&path); 2262ba2e524dSLukas Czerner return 0; 2263ba2e524dSLukas Czerner } 2264ba2e524dSLukas Czerner case Opt_journal_ioprio: 2265461c3af0SLukas Czerner if (result.uint_32 > 7) { 2266da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, "Invalid journal IO priority" 22670efb3b23SJan Kara " (must be 0-7)"); 2268da812f61SLukas Czerner return -EINVAL; 22690efb3b23SJan Kara } 2270461c3af0SLukas Czerner ctx->journal_ioprio = 2271461c3af0SLukas Czerner IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, result.uint_32); 22726e47a3ccSLukas Czerner ctx->spec |= EXT4_SPEC_JOURNAL_IOPRIO; 2273ba2e524dSLukas Czerner return 0; 2274ba2e524dSLukas Czerner case Opt_test_dummy_encryption: 227585456054SEric Biggers return ext4_parse_test_dummy_encryption(param, ctx); 2276ba2e524dSLukas Czerner case Opt_dax: 2277ba2e524dSLukas Czerner case Opt_dax_type: 2278ef83b6e8SDan Williams #ifdef CONFIG_FS_DAX 2279ba2e524dSLukas Czerner { 2280ba2e524dSLukas Czerner int type = (token == Opt_dax) ? 2281ba2e524dSLukas Czerner Opt_dax : result.uint_32; 2282ba2e524dSLukas Czerner 2283ba2e524dSLukas Czerner switch (type) { 22849cb20f94SIra Weiny case Opt_dax: 22859cb20f94SIra Weiny case Opt_dax_always: 2286ba2e524dSLukas Czerner ctx_set_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS); 22876e47a3ccSLukas Czerner ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER); 22889cb20f94SIra Weiny break; 22899cb20f94SIra Weiny case Opt_dax_never: 2290ba2e524dSLukas Czerner ctx_set_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER); 22916e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS); 22929cb20f94SIra Weiny break; 22939cb20f94SIra Weiny case Opt_dax_inode: 22946e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS); 22956e47a3ccSLukas Czerner ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER); 22969cb20f94SIra Weiny /* Strictly for printing options */ 2297ba2e524dSLukas Czerner ctx_set_mount_opt2(ctx, EXT4_MOUNT2_DAX_INODE); 22989cb20f94SIra Weiny break; 22999cb20f94SIra Weiny } 2300ba2e524dSLukas Czerner return 0; 2301ba2e524dSLukas Czerner } 2302ef83b6e8SDan Williams #else 2303da812f61SLukas Czerner ext4_msg(NULL, KERN_INFO, "dax option not supported"); 2304da812f61SLukas Czerner return -EINVAL; 2305923ae0ffSRoss Zwisler #endif 2306ba2e524dSLukas Czerner case Opt_data_err: 2307ba2e524dSLukas Czerner if (result.uint_32 == Opt_data_err_abort) 23086e47a3ccSLukas Czerner ctx_set_mount_opt(ctx, m->mount_opt); 2309ba2e524dSLukas Czerner else if (result.uint_32 == Opt_data_err_ignore) 23106e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, m->mount_opt); 2311ba2e524dSLukas Czerner return 0; 2312ba2e524dSLukas Czerner case Opt_mb_optimize_scan: 231327b38686SOjaswin Mujoo if (result.int_32 == 1) { 231427b38686SOjaswin Mujoo ctx_set_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN); 231527b38686SOjaswin Mujoo ctx->spec |= EXT4_SPEC_mb_optimize_scan; 231627b38686SOjaswin Mujoo } else if (result.int_32 == 0) { 231727b38686SOjaswin Mujoo ctx_clear_mount_opt2(ctx, EXT4_MOUNT2_MB_OPTIMIZE_SCAN); 231827b38686SOjaswin Mujoo ctx->spec |= EXT4_SPEC_mb_optimize_scan; 231927b38686SOjaswin Mujoo } else { 2320da812f61SLukas Czerner ext4_msg(NULL, KERN_WARNING, 2321196e402aSHarshad Shirwadkar "mb_optimize_scan should be set to 0 or 1."); 2322da812f61SLukas Czerner return -EINVAL; 2323196e402aSHarshad Shirwadkar } 2324ba2e524dSLukas Czerner return 0; 2325ba2e524dSLukas Czerner } 2326ba2e524dSLukas Czerner 2327ba2e524dSLukas Czerner /* 2328ba2e524dSLukas Czerner * At this point we should only be getting options requiring MOPT_SET, 2329ba2e524dSLukas Czerner * or MOPT_CLEAR. Anything else is a bug 2330ba2e524dSLukas Czerner */ 2331ba2e524dSLukas Czerner if (m->token == Opt_err) { 2332ba2e524dSLukas Czerner ext4_msg(NULL, KERN_WARNING, "buggy handling of option %s", 2333ba2e524dSLukas Czerner param->key); 2334ba2e524dSLukas Czerner WARN_ON(1); 2335ba2e524dSLukas Czerner return -EINVAL; 2336ba2e524dSLukas Czerner } 2337ba2e524dSLukas Czerner 2338ba2e524dSLukas Czerner else { 2339461c3af0SLukas Czerner unsigned int set = 0; 2340461c3af0SLukas Czerner 2341461c3af0SLukas Czerner if ((param->type == fs_value_is_flag) || 2342461c3af0SLukas Czerner result.uint_32 > 0) 2343461c3af0SLukas Czerner set = 1; 2344461c3af0SLukas Czerner 234526092bf5STheodore Ts'o if (m->flags & MOPT_CLEAR) 2346461c3af0SLukas Czerner set = !set; 234726092bf5STheodore Ts'o else if (unlikely(!(m->flags & MOPT_SET))) { 2348da812f61SLukas Czerner ext4_msg(NULL, KERN_WARNING, 2349461c3af0SLukas Czerner "buggy handling of option %s", 2350461c3af0SLukas Czerner param->key); 235126092bf5STheodore Ts'o WARN_ON(1); 2352da812f61SLukas Czerner return -EINVAL; 235326092bf5STheodore Ts'o } 2354995a3ed6SHarshad Shirwadkar if (m->flags & MOPT_2) { 2355461c3af0SLukas Czerner if (set != 0) 23566e47a3ccSLukas Czerner ctx_set_mount_opt2(ctx, m->mount_opt); 2357995a3ed6SHarshad Shirwadkar else 23586e47a3ccSLukas Czerner ctx_clear_mount_opt2(ctx, m->mount_opt); 2359995a3ed6SHarshad Shirwadkar } else { 2360461c3af0SLukas Czerner if (set != 0) 23616e47a3ccSLukas Czerner ctx_set_mount_opt(ctx, m->mount_opt); 236226092bf5STheodore Ts'o else 23636e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, m->mount_opt); 236426092bf5STheodore Ts'o } 2365995a3ed6SHarshad Shirwadkar } 2366ba2e524dSLukas Czerner 236702f960f8SLukas Czerner return 0; 236826092bf5STheodore Ts'o } 236926092bf5STheodore Ts'o 23707edfd85bSLukas Czerner static int parse_options(struct fs_context *fc, char *options) 2371ac27a0ecSDave Kleikamp { 2372461c3af0SLukas Czerner struct fs_parameter param; 2373461c3af0SLukas Czerner int ret; 2374461c3af0SLukas Czerner char *key; 2375ac27a0ecSDave Kleikamp 2376ac27a0ecSDave Kleikamp if (!options) 23777edfd85bSLukas Czerner return 0; 2378461c3af0SLukas Czerner 2379461c3af0SLukas Czerner while ((key = strsep(&options, ",")) != NULL) { 2380461c3af0SLukas Czerner if (*key) { 2381461c3af0SLukas Czerner size_t v_len = 0; 2382461c3af0SLukas Czerner char *value = strchr(key, '='); 2383461c3af0SLukas Czerner 2384461c3af0SLukas Czerner param.type = fs_value_is_flag; 2385461c3af0SLukas Czerner param.string = NULL; 2386461c3af0SLukas Czerner 2387461c3af0SLukas Czerner if (value) { 2388461c3af0SLukas Czerner if (value == key) 2389ac27a0ecSDave Kleikamp continue; 2390461c3af0SLukas Czerner 2391461c3af0SLukas Czerner *value++ = 0; 2392461c3af0SLukas Czerner v_len = strlen(value); 2393461c3af0SLukas Czerner param.string = kmemdup_nul(value, v_len, 2394461c3af0SLukas Czerner GFP_KERNEL); 2395461c3af0SLukas Czerner if (!param.string) 23967edfd85bSLukas Czerner return -ENOMEM; 2397461c3af0SLukas Czerner param.type = fs_value_is_string; 2398461c3af0SLukas Czerner } 2399461c3af0SLukas Czerner 2400461c3af0SLukas Czerner param.key = key; 2401461c3af0SLukas Czerner param.size = v_len; 2402461c3af0SLukas Czerner 240302f960f8SLukas Czerner ret = ext4_parse_param(fc, ¶m); 2404461c3af0SLukas Czerner if (param.string) 2405461c3af0SLukas Czerner kfree(param.string); 2406461c3af0SLukas Czerner if (ret < 0) 24077edfd85bSLukas Czerner return ret; 2408ac27a0ecSDave Kleikamp } 2409461c3af0SLukas Czerner } 2410461c3af0SLukas Czerner 24117edfd85bSLukas Czerner ret = ext4_validate_options(fc); 2412da812f61SLukas Czerner if (ret < 0) 24137edfd85bSLukas Czerner return ret; 24147edfd85bSLukas Czerner 24157edfd85bSLukas Czerner return 0; 24167edfd85bSLukas Czerner } 24177edfd85bSLukas Czerner 24187edfd85bSLukas Czerner static int parse_apply_sb_mount_options(struct super_block *sb, 24197edfd85bSLukas Czerner struct ext4_fs_context *m_ctx) 24207edfd85bSLukas Czerner { 24217edfd85bSLukas Czerner struct ext4_sb_info *sbi = EXT4_SB(sb); 24227edfd85bSLukas Czerner char *s_mount_opts = NULL; 24237edfd85bSLukas Czerner struct ext4_fs_context *s_ctx = NULL; 24247edfd85bSLukas Czerner struct fs_context *fc = NULL; 24257edfd85bSLukas Czerner int ret = -ENOMEM; 24267edfd85bSLukas Czerner 24277edfd85bSLukas Czerner if (!sbi->s_es->s_mount_opts[0]) 2428da812f61SLukas Czerner return 0; 2429da812f61SLukas Czerner 24307edfd85bSLukas Czerner s_mount_opts = kstrndup(sbi->s_es->s_mount_opts, 24317edfd85bSLukas Czerner sizeof(sbi->s_es->s_mount_opts), 24327edfd85bSLukas Czerner GFP_KERNEL); 24337edfd85bSLukas Czerner if (!s_mount_opts) 24347edfd85bSLukas Czerner return ret; 2435e6e268cbSLukas Czerner 24367edfd85bSLukas Czerner fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL); 24377edfd85bSLukas Czerner if (!fc) 24387edfd85bSLukas Czerner goto out_free; 2439e6e268cbSLukas Czerner 24407edfd85bSLukas Czerner s_ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL); 24417edfd85bSLukas Czerner if (!s_ctx) 24427edfd85bSLukas Czerner goto out_free; 24437edfd85bSLukas Czerner 24447edfd85bSLukas Czerner fc->fs_private = s_ctx; 24457edfd85bSLukas Czerner fc->s_fs_info = sbi; 24467edfd85bSLukas Czerner 24477edfd85bSLukas Czerner ret = parse_options(fc, s_mount_opts); 24487edfd85bSLukas Czerner if (ret < 0) 24497edfd85bSLukas Czerner goto parse_failed; 24507edfd85bSLukas Czerner 24517edfd85bSLukas Czerner ret = ext4_check_opt_consistency(fc, sb); 24527edfd85bSLukas Czerner if (ret < 0) { 24537edfd85bSLukas Czerner parse_failed: 24547edfd85bSLukas Czerner ext4_msg(sb, KERN_WARNING, 24557edfd85bSLukas Czerner "failed to parse options in superblock: %s", 24567edfd85bSLukas Czerner s_mount_opts); 24577edfd85bSLukas Czerner ret = 0; 24587edfd85bSLukas Czerner goto out_free; 24597edfd85bSLukas Czerner } 24607edfd85bSLukas Czerner 24617edfd85bSLukas Czerner if (s_ctx->spec & EXT4_SPEC_JOURNAL_DEV) 24627edfd85bSLukas Czerner m_ctx->journal_devnum = s_ctx->journal_devnum; 24637edfd85bSLukas Czerner if (s_ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO) 24647edfd85bSLukas Czerner m_ctx->journal_ioprio = s_ctx->journal_ioprio; 24657edfd85bSLukas Czerner 246685456054SEric Biggers ext4_apply_options(fc, sb); 246785456054SEric Biggers ret = 0; 24687edfd85bSLukas Czerner 24697edfd85bSLukas Czerner out_free: 2470c069db76SEric Biggers if (fc) { 2471c069db76SEric Biggers ext4_fc_free(fc); 24727edfd85bSLukas Czerner kfree(fc); 2473c069db76SEric Biggers } 24747edfd85bSLukas Czerner kfree(s_mount_opts); 24757edfd85bSLukas Czerner return ret; 24764c94bff9SLukas Czerner } 24774c94bff9SLukas Czerner 2478e6e268cbSLukas Czerner static void ext4_apply_quota_options(struct fs_context *fc, 2479e6e268cbSLukas Czerner struct super_block *sb) 2480e6e268cbSLukas Czerner { 2481e6e268cbSLukas Czerner #ifdef CONFIG_QUOTA 24826e47a3ccSLukas Czerner bool quota_feature = ext4_has_feature_quota(sb); 2483e6e268cbSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 2484e6e268cbSLukas Czerner struct ext4_sb_info *sbi = EXT4_SB(sb); 2485e6e268cbSLukas Czerner char *qname; 2486e6e268cbSLukas Czerner int i; 2487e6e268cbSLukas Czerner 24886e47a3ccSLukas Czerner if (quota_feature) 24896e47a3ccSLukas Czerner return; 24906e47a3ccSLukas Czerner 24916e47a3ccSLukas Czerner if (ctx->spec & EXT4_SPEC_JQUOTA) { 2492e6e268cbSLukas Czerner for (i = 0; i < EXT4_MAXQUOTAS; i++) { 2493e6e268cbSLukas Czerner if (!(ctx->qname_spec & (1 << i))) 2494e6e268cbSLukas Czerner continue; 24956e47a3ccSLukas Czerner 2496e6e268cbSLukas Czerner qname = ctx->s_qf_names[i]; /* May be NULL */ 24974c1bd5a9SLukas Czerner if (qname) 24984c1bd5a9SLukas Czerner set_opt(sb, QUOTA); 2499e6e268cbSLukas Czerner ctx->s_qf_names[i] = NULL; 250013b215a9SLukas Czerner qname = rcu_replace_pointer(sbi->s_qf_names[i], qname, 250113b215a9SLukas Czerner lockdep_is_held(&sb->s_umount)); 250213b215a9SLukas Czerner if (qname) 250313b215a9SLukas Czerner kfree_rcu(qname); 2504e6e268cbSLukas Czerner } 25056e47a3ccSLukas Czerner } 25066e47a3ccSLukas Czerner 25076e47a3ccSLukas Czerner if (ctx->spec & EXT4_SPEC_JQFMT) 25086e47a3ccSLukas Czerner sbi->s_jquota_fmt = ctx->s_jquota_fmt; 2509e6e268cbSLukas Czerner #endif 2510e6e268cbSLukas Czerner } 2511e6e268cbSLukas Czerner 2512e6e268cbSLukas Czerner /* 2513e6e268cbSLukas Czerner * Check quota settings consistency. 2514e6e268cbSLukas Czerner */ 2515e6e268cbSLukas Czerner static int ext4_check_quota_consistency(struct fs_context *fc, 2516e6e268cbSLukas Czerner struct super_block *sb) 2517e6e268cbSLukas Czerner { 2518e6e268cbSLukas Czerner #ifdef CONFIG_QUOTA 2519e6e268cbSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 2520e6e268cbSLukas Czerner struct ext4_sb_info *sbi = EXT4_SB(sb); 2521e6e268cbSLukas Czerner bool quota_feature = ext4_has_feature_quota(sb); 2522e6e268cbSLukas Czerner bool quota_loaded = sb_any_quota_loaded(sb); 25236e47a3ccSLukas Czerner bool usr_qf_name, grp_qf_name, usrquota, grpquota; 25246e47a3ccSLukas Czerner int quota_flags, i; 2525e6e268cbSLukas Czerner 25266e47a3ccSLukas Czerner /* 25276e47a3ccSLukas Czerner * We do the test below only for project quotas. 'usrquota' and 25286e47a3ccSLukas Czerner * 'grpquota' mount options are allowed even without quota feature 25296e47a3ccSLukas Czerner * to support legacy quotas in quota files. 25306e47a3ccSLukas Czerner */ 25316e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_PRJQUOTA) && 25326e47a3ccSLukas Czerner !ext4_has_feature_project(sb)) { 25336e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, "Project quota feature not enabled. " 25346e47a3ccSLukas Czerner "Cannot enable project quota enforcement."); 25356e47a3ccSLukas Czerner return -EINVAL; 25366e47a3ccSLukas Czerner } 25376e47a3ccSLukas Czerner 25386e47a3ccSLukas Czerner quota_flags = EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA | 25396e47a3ccSLukas Czerner EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA; 25406e47a3ccSLukas Czerner if (quota_loaded && 25416e47a3ccSLukas Czerner ctx->mask_s_mount_opt & quota_flags && 25426e47a3ccSLukas Czerner !ctx_test_mount_opt(ctx, quota_flags)) 25436e47a3ccSLukas Czerner goto err_quota_change; 25446e47a3ccSLukas Czerner 25456e47a3ccSLukas Czerner if (ctx->spec & EXT4_SPEC_JQUOTA) { 2546e6e268cbSLukas Czerner 2547e6e268cbSLukas Czerner for (i = 0; i < EXT4_MAXQUOTAS; i++) { 2548e6e268cbSLukas Czerner if (!(ctx->qname_spec & (1 << i))) 2549e6e268cbSLukas Czerner continue; 2550e6e268cbSLukas Czerner 25516e47a3ccSLukas Czerner if (quota_loaded && 25526e47a3ccSLukas Czerner !!sbi->s_qf_names[i] != !!ctx->s_qf_names[i]) 2553e6e268cbSLukas Czerner goto err_jquota_change; 2554e6e268cbSLukas Czerner 2555e6e268cbSLukas Czerner if (sbi->s_qf_names[i] && ctx->s_qf_names[i] && 255613b215a9SLukas Czerner strcmp(get_qf_name(sb, sbi, i), 2557e6e268cbSLukas Czerner ctx->s_qf_names[i]) != 0) 2558e6e268cbSLukas Czerner goto err_jquota_specified; 2559e6e268cbSLukas Czerner } 25606e47a3ccSLukas Czerner 25616e47a3ccSLukas Czerner if (quota_feature) { 25626e47a3ccSLukas Czerner ext4_msg(NULL, KERN_INFO, 25636e47a3ccSLukas Czerner "Journaled quota options ignored when " 25646e47a3ccSLukas Czerner "QUOTA feature is enabled"); 25656e47a3ccSLukas Czerner return 0; 25666e47a3ccSLukas Czerner } 2567e6e268cbSLukas Czerner } 2568e6e268cbSLukas Czerner 25696e47a3ccSLukas Czerner if (ctx->spec & EXT4_SPEC_JQFMT) { 2570e6e268cbSLukas Czerner if (sbi->s_jquota_fmt != ctx->s_jquota_fmt && quota_loaded) 25716e47a3ccSLukas Czerner goto err_jquota_change; 2572e6e268cbSLukas Czerner if (quota_feature) { 2573e6e268cbSLukas Czerner ext4_msg(NULL, KERN_INFO, "Quota format mount options " 2574e6e268cbSLukas Czerner "ignored when QUOTA feature is enabled"); 2575e6e268cbSLukas Czerner return 0; 2576e6e268cbSLukas Czerner } 2577e6e268cbSLukas Czerner } 25786e47a3ccSLukas Czerner 25796e47a3ccSLukas Czerner /* Make sure we don't mix old and new quota format */ 25806e47a3ccSLukas Czerner usr_qf_name = (get_qf_name(sb, sbi, USRQUOTA) || 25816e47a3ccSLukas Czerner ctx->s_qf_names[USRQUOTA]); 25826e47a3ccSLukas Czerner grp_qf_name = (get_qf_name(sb, sbi, GRPQUOTA) || 25836e47a3ccSLukas Czerner ctx->s_qf_names[GRPQUOTA]); 25846e47a3ccSLukas Czerner 25856e47a3ccSLukas Czerner usrquota = (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) || 25866e47a3ccSLukas Czerner test_opt(sb, USRQUOTA)); 25876e47a3ccSLukas Czerner 25886e47a3ccSLukas Czerner grpquota = (ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA) || 25896e47a3ccSLukas Czerner test_opt(sb, GRPQUOTA)); 25906e47a3ccSLukas Czerner 25916e47a3ccSLukas Czerner if (usr_qf_name) { 25926e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_USRQUOTA); 25936e47a3ccSLukas Czerner usrquota = false; 25946e47a3ccSLukas Czerner } 25956e47a3ccSLukas Czerner if (grp_qf_name) { 25966e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA); 25976e47a3ccSLukas Czerner grpquota = false; 25986e47a3ccSLukas Czerner } 25996e47a3ccSLukas Czerner 26006e47a3ccSLukas Czerner if (usr_qf_name || grp_qf_name) { 26016e47a3ccSLukas Czerner if (usrquota || grpquota) { 26026e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, "old and new quota " 26036e47a3ccSLukas Czerner "format mixing"); 26046e47a3ccSLukas Czerner return -EINVAL; 26056e47a3ccSLukas Czerner } 26066e47a3ccSLukas Czerner 26076e47a3ccSLukas Czerner if (!(ctx->spec & EXT4_SPEC_JQFMT || sbi->s_jquota_fmt)) { 26086e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, "journaled quota format " 26096e47a3ccSLukas Czerner "not specified"); 26106e47a3ccSLukas Czerner return -EINVAL; 26116e47a3ccSLukas Czerner } 26126e47a3ccSLukas Czerner } 26136e47a3ccSLukas Czerner 2614e6e268cbSLukas Czerner return 0; 2615e6e268cbSLukas Czerner 2616e6e268cbSLukas Czerner err_quota_change: 2617e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, 2618e6e268cbSLukas Czerner "Cannot change quota options when quota turned on"); 2619e6e268cbSLukas Czerner return -EINVAL; 2620e6e268cbSLukas Czerner err_jquota_change: 2621e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, "Cannot change journaled quota " 2622e6e268cbSLukas Czerner "options when quota turned on"); 2623e6e268cbSLukas Czerner return -EINVAL; 2624e6e268cbSLukas Czerner err_jquota_specified: 2625e6e268cbSLukas Czerner ext4_msg(NULL, KERN_ERR, "%s quota file already specified", 2626e6e268cbSLukas Czerner QTYPE2NAME(i)); 2627e6e268cbSLukas Czerner return -EINVAL; 2628e6e268cbSLukas Czerner #else 2629e6e268cbSLukas Czerner return 0; 2630e6e268cbSLukas Czerner #endif 2631e6e268cbSLukas Czerner } 2632e6e268cbSLukas Czerner 26335f41fdaeSEric Biggers static int ext4_check_test_dummy_encryption(const struct fs_context *fc, 26345f41fdaeSEric Biggers struct super_block *sb) 26355f41fdaeSEric Biggers { 26365f41fdaeSEric Biggers const struct ext4_fs_context *ctx = fc->fs_private; 26375f41fdaeSEric Biggers const struct ext4_sb_info *sbi = EXT4_SB(sb); 26385f41fdaeSEric Biggers 263985456054SEric Biggers if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy)) 26405f41fdaeSEric Biggers return 0; 26415f41fdaeSEric Biggers 26425f41fdaeSEric Biggers if (!ext4_has_feature_encrypt(sb)) { 26435f41fdaeSEric Biggers ext4_msg(NULL, KERN_WARNING, 26445f41fdaeSEric Biggers "test_dummy_encryption requires encrypt feature"); 26455f41fdaeSEric Biggers return -EINVAL; 26465f41fdaeSEric Biggers } 26475f41fdaeSEric Biggers /* 26485f41fdaeSEric Biggers * This mount option is just for testing, and it's not worthwhile to 26495f41fdaeSEric Biggers * implement the extra complexity (e.g. RCU protection) that would be 26505f41fdaeSEric Biggers * needed to allow it to be set or changed during remount. We do allow 26515f41fdaeSEric Biggers * it to be specified during remount, but only if there is no change. 26525f41fdaeSEric Biggers */ 265385456054SEric Biggers if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { 265485456054SEric Biggers if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy, 265585456054SEric Biggers &ctx->dummy_enc_policy)) 265685456054SEric Biggers return 0; 26575f41fdaeSEric Biggers ext4_msg(NULL, KERN_WARNING, 265885456054SEric Biggers "Can't set or change test_dummy_encryption on remount"); 26595f41fdaeSEric Biggers return -EINVAL; 26605f41fdaeSEric Biggers } 266185456054SEric Biggers /* Also make sure s_mount_opts didn't contain a conflicting value. */ 266285456054SEric Biggers if (fscrypt_is_dummy_policy_set(&sbi->s_dummy_enc_policy)) { 266385456054SEric Biggers if (fscrypt_dummy_policies_equal(&sbi->s_dummy_enc_policy, 266485456054SEric Biggers &ctx->dummy_enc_policy)) 26655f41fdaeSEric Biggers return 0; 266685456054SEric Biggers ext4_msg(NULL, KERN_WARNING, 266785456054SEric Biggers "Conflicting test_dummy_encryption options"); 266885456054SEric Biggers return -EINVAL; 266985456054SEric Biggers } 2670*7959eb19SEric Biggers return 0; 267185456054SEric Biggers } 267285456054SEric Biggers 267385456054SEric Biggers static void ext4_apply_test_dummy_encryption(struct ext4_fs_context *ctx, 267485456054SEric Biggers struct super_block *sb) 267585456054SEric Biggers { 267685456054SEric Biggers if (!fscrypt_is_dummy_policy_set(&ctx->dummy_enc_policy) || 267785456054SEric Biggers /* if already set, it was already verified to be the same */ 267885456054SEric Biggers fscrypt_is_dummy_policy_set(&EXT4_SB(sb)->s_dummy_enc_policy)) 267985456054SEric Biggers return; 268085456054SEric Biggers EXT4_SB(sb)->s_dummy_enc_policy = ctx->dummy_enc_policy; 268185456054SEric Biggers memset(&ctx->dummy_enc_policy, 0, sizeof(ctx->dummy_enc_policy)); 268285456054SEric Biggers ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled"); 26835f41fdaeSEric Biggers } 26845f41fdaeSEric Biggers 2685b6bd2435SLukas Czerner static int ext4_check_opt_consistency(struct fs_context *fc, 2686b6bd2435SLukas Czerner struct super_block *sb) 2687b6bd2435SLukas Czerner { 2688b6bd2435SLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 26896e47a3ccSLukas Czerner struct ext4_sb_info *sbi = fc->s_fs_info; 26906e47a3ccSLukas Czerner int is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE; 26915f41fdaeSEric Biggers int err; 2692b6bd2435SLukas Czerner 2693b6bd2435SLukas Czerner if ((ctx->opt_flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) { 2694b6bd2435SLukas Czerner ext4_msg(NULL, KERN_ERR, 2695b6bd2435SLukas Czerner "Mount option(s) incompatible with ext2"); 2696b6bd2435SLukas Czerner return -EINVAL; 2697b6bd2435SLukas Czerner } 2698b6bd2435SLukas Czerner if ((ctx->opt_flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) { 2699b6bd2435SLukas Czerner ext4_msg(NULL, KERN_ERR, 2700b6bd2435SLukas Czerner "Mount option(s) incompatible with ext3"); 2701b6bd2435SLukas Czerner return -EINVAL; 2702b6bd2435SLukas Czerner } 2703b6bd2435SLukas Czerner 27046e47a3ccSLukas Czerner if (ctx->s_want_extra_isize > 27056e47a3ccSLukas Czerner (sbi->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE)) { 27066e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, 27076e47a3ccSLukas Czerner "Invalid want_extra_isize %d", 27086e47a3ccSLukas Czerner ctx->s_want_extra_isize); 27096e47a3ccSLukas Czerner return -EINVAL; 27106e47a3ccSLukas Czerner } 27116e47a3ccSLukas Czerner 27126e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DIOREAD_NOLOCK)) { 27136e47a3ccSLukas Czerner int blocksize = 27146e47a3ccSLukas Czerner BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); 27156e47a3ccSLukas Czerner if (blocksize < PAGE_SIZE) 27166e47a3ccSLukas Czerner ext4_msg(NULL, KERN_WARNING, "Warning: mounting with an " 27176e47a3ccSLukas Czerner "experimental mount option 'dioread_nolock' " 27186e47a3ccSLukas Czerner "for blocksize < PAGE_SIZE"); 27196e47a3ccSLukas Czerner } 27206e47a3ccSLukas Czerner 27215f41fdaeSEric Biggers err = ext4_check_test_dummy_encryption(fc, sb); 27225f41fdaeSEric Biggers if (err) 27235f41fdaeSEric Biggers return err; 27246e47a3ccSLukas Czerner 27256e47a3ccSLukas Czerner if ((ctx->spec & EXT4_SPEC_DATAJ) && is_remount) { 27266e47a3ccSLukas Czerner if (!sbi->s_journal) { 27276e47a3ccSLukas Czerner ext4_msg(NULL, KERN_WARNING, 27286e47a3ccSLukas Czerner "Remounting file system with no journal " 27296e47a3ccSLukas Czerner "so ignoring journalled data option"); 27306e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS); 27314c246728SLukas Czerner } else if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DATA_FLAGS) != 27324c246728SLukas Czerner test_opt(sb, DATA_FLAGS)) { 27336e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, "Cannot change data mode " 27346e47a3ccSLukas Czerner "on remount"); 27356e47a3ccSLukas Czerner return -EINVAL; 27366e47a3ccSLukas Czerner } 27376e47a3ccSLukas Czerner } 27386e47a3ccSLukas Czerner 27396e47a3ccSLukas Czerner if (is_remount) { 27406e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) && 27416e47a3ccSLukas Czerner (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) { 27426e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, "can't mount with " 27436e47a3ccSLukas Czerner "both data=journal and dax"); 27446e47a3ccSLukas Czerner return -EINVAL; 27456e47a3ccSLukas Czerner } 27466e47a3ccSLukas Czerner 27476e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) && 27486e47a3ccSLukas Czerner (!(sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) || 27496e47a3ccSLukas Czerner (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER))) { 27506e47a3ccSLukas Czerner fail_dax_change_remount: 27516e47a3ccSLukas Czerner ext4_msg(NULL, KERN_ERR, "can't change " 27526e47a3ccSLukas Czerner "dax mount option while remounting"); 27536e47a3ccSLukas Czerner return -EINVAL; 27546e47a3ccSLukas Czerner } else if (ctx_test_mount_opt2(ctx, EXT4_MOUNT2_DAX_NEVER) && 27556e47a3ccSLukas Czerner (!(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) || 27566e47a3ccSLukas Czerner (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS))) { 27576e47a3ccSLukas Czerner goto fail_dax_change_remount; 27586e47a3ccSLukas Czerner } else if (ctx_test_mount_opt2(ctx, EXT4_MOUNT2_DAX_INODE) && 27596e47a3ccSLukas Czerner ((sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) || 27606e47a3ccSLukas Czerner (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) || 27616e47a3ccSLukas Czerner !(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_INODE))) { 27626e47a3ccSLukas Czerner goto fail_dax_change_remount; 27636e47a3ccSLukas Czerner } 27646e47a3ccSLukas Czerner } 27656e47a3ccSLukas Czerner 2766b6bd2435SLukas Czerner return ext4_check_quota_consistency(fc, sb); 2767b6bd2435SLukas Czerner } 2768b6bd2435SLukas Czerner 276985456054SEric Biggers static void ext4_apply_options(struct fs_context *fc, struct super_block *sb) 27706e47a3ccSLukas Czerner { 27716e47a3ccSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 27726e47a3ccSLukas Czerner struct ext4_sb_info *sbi = fc->s_fs_info; 27736e47a3ccSLukas Czerner 27746e47a3ccSLukas Czerner sbi->s_mount_opt &= ~ctx->mask_s_mount_opt; 27756e47a3ccSLukas Czerner sbi->s_mount_opt |= ctx->vals_s_mount_opt; 27766e47a3ccSLukas Czerner sbi->s_mount_opt2 &= ~ctx->mask_s_mount_opt2; 27776e47a3ccSLukas Czerner sbi->s_mount_opt2 |= ctx->vals_s_mount_opt2; 27786e47a3ccSLukas Czerner sbi->s_mount_flags &= ~ctx->mask_s_mount_flags; 27796e47a3ccSLukas Czerner sbi->s_mount_flags |= ctx->vals_s_mount_flags; 27806e47a3ccSLukas Czerner sb->s_flags &= ~ctx->mask_s_flags; 27816e47a3ccSLukas Czerner sb->s_flags |= ctx->vals_s_flags; 27826e47a3ccSLukas Czerner 27836e47a3ccSLukas Czerner #define APPLY(X) ({ if (ctx->spec & EXT4_SPEC_##X) sbi->X = ctx->X; }) 27846e47a3ccSLukas Czerner APPLY(s_commit_interval); 27856e47a3ccSLukas Czerner APPLY(s_stripe); 27866e47a3ccSLukas Czerner APPLY(s_max_batch_time); 27876e47a3ccSLukas Czerner APPLY(s_min_batch_time); 27886e47a3ccSLukas Czerner APPLY(s_want_extra_isize); 27896e47a3ccSLukas Czerner APPLY(s_inode_readahead_blks); 27906e47a3ccSLukas Czerner APPLY(s_max_dir_size_kb); 27916e47a3ccSLukas Czerner APPLY(s_li_wait_mult); 27926e47a3ccSLukas Czerner APPLY(s_resgid); 27936e47a3ccSLukas Czerner APPLY(s_resuid); 27946e47a3ccSLukas Czerner 27956e47a3ccSLukas Czerner #ifdef CONFIG_EXT4_DEBUG 27966e47a3ccSLukas Czerner APPLY(s_fc_debug_max_replay); 27976e47a3ccSLukas Czerner #endif 27986e47a3ccSLukas Czerner 27996e47a3ccSLukas Czerner ext4_apply_quota_options(fc, sb); 280085456054SEric Biggers ext4_apply_test_dummy_encryption(ctx, sb); 28016e47a3ccSLukas Czerner } 28026e47a3ccSLukas Czerner 28036e47a3ccSLukas Czerner 2804da812f61SLukas Czerner static int ext4_validate_options(struct fs_context *fc) 28054c94bff9SLukas Czerner { 2806ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 28076e47a3ccSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 28084c94bff9SLukas Czerner char *usr_qf_name, *grp_qf_name; 28096e47a3ccSLukas Czerner 28106e47a3ccSLukas Czerner usr_qf_name = ctx->s_qf_names[USRQUOTA]; 28116e47a3ccSLukas Czerner grp_qf_name = ctx->s_qf_names[GRPQUOTA]; 28126e47a3ccSLukas Czerner 281333458eabSTheodore Ts'o if (usr_qf_name || grp_qf_name) { 28146e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) && usr_qf_name) 28156e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_USRQUOTA); 2816ac27a0ecSDave Kleikamp 28176e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA) && grp_qf_name) 28186e47a3ccSLukas Czerner ctx_clear_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA); 2819ac27a0ecSDave Kleikamp 28206e47a3ccSLukas Czerner if (ctx_test_mount_opt(ctx, EXT4_MOUNT_USRQUOTA) || 28216e47a3ccSLukas Czerner ctx_test_mount_opt(ctx, EXT4_MOUNT_GRPQUOTA)) { 2822da812f61SLukas Czerner ext4_msg(NULL, KERN_ERR, "old and new quota " 2823b31e1552SEric Sandeen "format mixing"); 2824da812f61SLukas Czerner return -EINVAL; 2825ac27a0ecSDave Kleikamp } 2826ac27a0ecSDave Kleikamp } 2827ac27a0ecSDave Kleikamp #endif 28286e47a3ccSLukas Czerner return 1; 2829ac27a0ecSDave Kleikamp } 2830ac27a0ecSDave Kleikamp 28312adf6da8STheodore Ts'o static inline void ext4_show_quota_options(struct seq_file *seq, 28322adf6da8STheodore Ts'o struct super_block *sb) 28332adf6da8STheodore Ts'o { 28342adf6da8STheodore Ts'o #if defined(CONFIG_QUOTA) 28352adf6da8STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 283633458eabSTheodore Ts'o char *usr_qf_name, *grp_qf_name; 28372adf6da8STheodore Ts'o 28382adf6da8STheodore Ts'o if (sbi->s_jquota_fmt) { 28392adf6da8STheodore Ts'o char *fmtname = ""; 28402adf6da8STheodore Ts'o 28412adf6da8STheodore Ts'o switch (sbi->s_jquota_fmt) { 28422adf6da8STheodore Ts'o case QFMT_VFS_OLD: 28432adf6da8STheodore Ts'o fmtname = "vfsold"; 28442adf6da8STheodore Ts'o break; 28452adf6da8STheodore Ts'o case QFMT_VFS_V0: 28462adf6da8STheodore Ts'o fmtname = "vfsv0"; 28472adf6da8STheodore Ts'o break; 28482adf6da8STheodore Ts'o case QFMT_VFS_V1: 28492adf6da8STheodore Ts'o fmtname = "vfsv1"; 28502adf6da8STheodore Ts'o break; 28512adf6da8STheodore Ts'o } 28522adf6da8STheodore Ts'o seq_printf(seq, ",jqfmt=%s", fmtname); 28532adf6da8STheodore Ts'o } 28542adf6da8STheodore Ts'o 285533458eabSTheodore Ts'o rcu_read_lock(); 285633458eabSTheodore Ts'o usr_qf_name = rcu_dereference(sbi->s_qf_names[USRQUOTA]); 285733458eabSTheodore Ts'o grp_qf_name = rcu_dereference(sbi->s_qf_names[GRPQUOTA]); 285833458eabSTheodore Ts'o if (usr_qf_name) 285933458eabSTheodore Ts'o seq_show_option(seq, "usrjquota", usr_qf_name); 286033458eabSTheodore Ts'o if (grp_qf_name) 286133458eabSTheodore Ts'o seq_show_option(seq, "grpjquota", grp_qf_name); 286233458eabSTheodore Ts'o rcu_read_unlock(); 28632adf6da8STheodore Ts'o #endif 28642adf6da8STheodore Ts'o } 28652adf6da8STheodore Ts'o 28665a916be1STheodore Ts'o static const char *token2str(int token) 28675a916be1STheodore Ts'o { 286897d8a670SLukas Czerner const struct fs_parameter_spec *spec; 28695a916be1STheodore Ts'o 287097d8a670SLukas Czerner for (spec = ext4_param_specs; spec->name != NULL; spec++) 287197d8a670SLukas Czerner if (spec->opt == token && !spec->type) 28725a916be1STheodore Ts'o break; 287397d8a670SLukas Czerner return spec->name; 28745a916be1STheodore Ts'o } 28755a916be1STheodore Ts'o 28762adf6da8STheodore Ts'o /* 28772adf6da8STheodore Ts'o * Show an option if 28782adf6da8STheodore Ts'o * - it's set to a non-default value OR 28792adf6da8STheodore Ts'o * - if the per-sb default is different from the global default 28802adf6da8STheodore Ts'o */ 288166acdcf4STheodore Ts'o static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, 288266acdcf4STheodore Ts'o int nodefs) 28832adf6da8STheodore Ts'o { 28842adf6da8STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 28852adf6da8STheodore Ts'o struct ext4_super_block *es = sbi->s_es; 288668afa7e0STyson Nottingham int def_errors, def_mount_opt = sbi->s_def_mount_opt; 28875a916be1STheodore Ts'o const struct mount_opts *m; 288866acdcf4STheodore Ts'o char sep = nodefs ? '\n' : ','; 28892adf6da8STheodore Ts'o 289066acdcf4STheodore Ts'o #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep) 289166acdcf4STheodore Ts'o #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg) 28922adf6da8STheodore Ts'o 28932adf6da8STheodore Ts'o if (sbi->s_sb_block != 1) 28945a916be1STheodore Ts'o SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block); 28955a916be1STheodore Ts'o 28965a916be1STheodore Ts'o for (m = ext4_mount_opts; m->token != Opt_err; m++) { 28975a916be1STheodore Ts'o int want_set = m->flags & MOPT_SET; 28985a916be1STheodore Ts'o if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) || 2899ba2e524dSLukas Czerner m->flags & MOPT_SKIP) 29005a916be1STheodore Ts'o continue; 290168afa7e0STyson Nottingham if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt))) 29025a916be1STheodore Ts'o continue; /* skip if same as the default */ 29035a916be1STheodore Ts'o if ((want_set && 29045a916be1STheodore Ts'o (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) || 29055a916be1STheodore Ts'o (!want_set && (sbi->s_mount_opt & m->mount_opt))) 29065a916be1STheodore Ts'o continue; /* select Opt_noFoo vs Opt_Foo */ 29075a916be1STheodore Ts'o SEQ_OPTS_PRINT("%s", token2str(m->token)); 29085a916be1STheodore Ts'o } 29095a916be1STheodore Ts'o 291008cefc7aSEric W. Biederman if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) || 29115a916be1STheodore Ts'o le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID) 291208cefc7aSEric W. Biederman SEQ_OPTS_PRINT("resuid=%u", 291308cefc7aSEric W. Biederman from_kuid_munged(&init_user_ns, sbi->s_resuid)); 291408cefc7aSEric W. Biederman if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) || 29155a916be1STheodore Ts'o le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID) 291608cefc7aSEric W. Biederman SEQ_OPTS_PRINT("resgid=%u", 291708cefc7aSEric W. Biederman from_kgid_munged(&init_user_ns, sbi->s_resgid)); 291866acdcf4STheodore Ts'o def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors); 29195a916be1STheodore Ts'o if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO) 29205a916be1STheodore Ts'o SEQ_OPTS_PUTS("errors=remount-ro"); 29212adf6da8STheodore Ts'o if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE) 29225a916be1STheodore Ts'o SEQ_OPTS_PUTS("errors=continue"); 29232adf6da8STheodore Ts'o if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC) 29245a916be1STheodore Ts'o SEQ_OPTS_PUTS("errors=panic"); 292566acdcf4STheodore Ts'o if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) 29265a916be1STheodore Ts'o SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ); 292766acdcf4STheodore Ts'o if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME) 29285a916be1STheodore Ts'o SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time); 292966acdcf4STheodore Ts'o if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) 29305a916be1STheodore Ts'o SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time); 293166acdcf4STheodore Ts'o if (nodefs || sbi->s_stripe) 29325a916be1STheodore Ts'o SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe); 293368afa7e0STyson Nottingham if (nodefs || EXT4_MOUNT_DATA_FLAGS & 293468afa7e0STyson Nottingham (sbi->s_mount_opt ^ def_mount_opt)) { 29352adf6da8STheodore Ts'o if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) 29365a916be1STheodore Ts'o SEQ_OPTS_PUTS("data=journal"); 29372adf6da8STheodore Ts'o else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) 29385a916be1STheodore Ts'o SEQ_OPTS_PUTS("data=ordered"); 29392adf6da8STheodore Ts'o else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) 29405a916be1STheodore Ts'o SEQ_OPTS_PUTS("data=writeback"); 29415a916be1STheodore Ts'o } 294266acdcf4STheodore Ts'o if (nodefs || 294366acdcf4STheodore Ts'o sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS) 29445a916be1STheodore Ts'o SEQ_OPTS_PRINT("inode_readahead_blks=%u", 29452adf6da8STheodore Ts'o sbi->s_inode_readahead_blks); 29462adf6da8STheodore Ts'o 2947ceec0376STyson Nottingham if (test_opt(sb, INIT_INODE_TABLE) && (nodefs || 294866acdcf4STheodore Ts'o (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT))) 29495a916be1STheodore Ts'o SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult); 2950df981d03STheodore Ts'o if (nodefs || sbi->s_max_dir_size_kb) 2951df981d03STheodore Ts'o SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb); 29527915a861SAles Novak if (test_opt(sb, DATA_ERR_ABORT)) 29537915a861SAles Novak SEQ_OPTS_PUTS("data_err=abort"); 2954ed318a6cSEric Biggers 2955ed318a6cSEric Biggers fscrypt_show_test_dummy_encryption(seq, sep, sb); 29562adf6da8STheodore Ts'o 29574f74d15fSEric Biggers if (sb->s_flags & SB_INLINECRYPT) 29584f74d15fSEric Biggers SEQ_OPTS_PUTS("inlinecrypt"); 29594f74d15fSEric Biggers 29609cb20f94SIra Weiny if (test_opt(sb, DAX_ALWAYS)) { 29619cb20f94SIra Weiny if (IS_EXT2_SB(sb)) 29629cb20f94SIra Weiny SEQ_OPTS_PUTS("dax"); 29639cb20f94SIra Weiny else 29649cb20f94SIra Weiny SEQ_OPTS_PUTS("dax=always"); 29659cb20f94SIra Weiny } else if (test_opt2(sb, DAX_NEVER)) { 29669cb20f94SIra Weiny SEQ_OPTS_PUTS("dax=never"); 29679cb20f94SIra Weiny } else if (test_opt2(sb, DAX_INODE)) { 29689cb20f94SIra Weiny SEQ_OPTS_PUTS("dax=inode"); 29699cb20f94SIra Weiny } 29703fa5d23eSOjaswin Mujoo 29713fa5d23eSOjaswin Mujoo if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD && 29723fa5d23eSOjaswin Mujoo !test_opt2(sb, MB_OPTIMIZE_SCAN)) { 29733fa5d23eSOjaswin Mujoo SEQ_OPTS_PUTS("mb_optimize_scan=0"); 29743fa5d23eSOjaswin Mujoo } else if (sbi->s_groups_count < MB_DEFAULT_LINEAR_SCAN_THRESHOLD && 29753fa5d23eSOjaswin Mujoo test_opt2(sb, MB_OPTIMIZE_SCAN)) { 29763fa5d23eSOjaswin Mujoo SEQ_OPTS_PUTS("mb_optimize_scan=1"); 29773fa5d23eSOjaswin Mujoo } 29783fa5d23eSOjaswin Mujoo 29792adf6da8STheodore Ts'o ext4_show_quota_options(seq, sb); 29802adf6da8STheodore Ts'o return 0; 29812adf6da8STheodore Ts'o } 29822adf6da8STheodore Ts'o 298366acdcf4STheodore Ts'o static int ext4_show_options(struct seq_file *seq, struct dentry *root) 298466acdcf4STheodore Ts'o { 298566acdcf4STheodore Ts'o return _ext4_show_options(seq, root->d_sb, 0); 298666acdcf4STheodore Ts'o } 298766acdcf4STheodore Ts'o 2988ebd173beSTheodore Ts'o int ext4_seq_options_show(struct seq_file *seq, void *offset) 298966acdcf4STheodore Ts'o { 299066acdcf4STheodore Ts'o struct super_block *sb = seq->private; 299166acdcf4STheodore Ts'o int rc; 299266acdcf4STheodore Ts'o 2993bc98a42cSDavid Howells seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw"); 299466acdcf4STheodore Ts'o rc = _ext4_show_options(seq, sb, 1); 299566acdcf4STheodore Ts'o seq_puts(seq, "\n"); 299666acdcf4STheodore Ts'o return rc; 299766acdcf4STheodore Ts'o } 299866acdcf4STheodore Ts'o 2999617ba13bSMingming Cao static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, 3000ac27a0ecSDave Kleikamp int read_only) 3001ac27a0ecSDave Kleikamp { 3002617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 3003c89128a0SJaegeuk Kim int err = 0; 3004ac27a0ecSDave Kleikamp 3005617ba13bSMingming Cao if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) { 3006b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "revision level too high, " 3007b31e1552SEric Sandeen "forcing read-only mode"); 3008c89128a0SJaegeuk Kim err = -EROFS; 30095adaccacSyangerkun goto done; 3010ac27a0ecSDave Kleikamp } 3011ac27a0ecSDave Kleikamp if (read_only) 3012281b5995STheodore Ts'o goto done; 3013617ba13bSMingming Cao if (!(sbi->s_mount_state & EXT4_VALID_FS)) 3014b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, " 3015b31e1552SEric Sandeen "running e2fsck is recommended"); 3016c8b459f4SLukas Czerner else if (sbi->s_mount_state & EXT4_ERROR_FS) 3017b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, 3018b31e1552SEric Sandeen "warning: mounting fs with errors, " 3019b31e1552SEric Sandeen "running e2fsck is recommended"); 3020ed3ce80aSTao Ma else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 && 3021ac27a0ecSDave Kleikamp le16_to_cpu(es->s_mnt_count) >= 3022ac27a0ecSDave Kleikamp (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count)) 3023b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, 3024b31e1552SEric Sandeen "warning: maximal mount count reached, " 3025b31e1552SEric Sandeen "running e2fsck is recommended"); 3026ac27a0ecSDave Kleikamp else if (le32_to_cpu(es->s_checkinterval) && 30276a0678a7SArnd Bergmann (ext4_get_tstamp(es, s_lastcheck) + 30286a0678a7SArnd Bergmann le32_to_cpu(es->s_checkinterval) <= ktime_get_real_seconds())) 3029b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, 3030b31e1552SEric Sandeen "warning: checktime reached, " 3031b31e1552SEric Sandeen "running e2fsck is recommended"); 30320390131bSFrank Mayhar if (!sbi->s_journal) 3033216c34b2SMarcin Slusarz es->s_state &= cpu_to_le16(~EXT4_VALID_FS); 3034ac27a0ecSDave Kleikamp if (!(__s16) le16_to_cpu(es->s_max_mnt_count)) 3035617ba13bSMingming Cao es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT); 3036e8546d06SMarcin Slusarz le16_add_cpu(&es->s_mnt_count, 1); 30376a0678a7SArnd Bergmann ext4_update_tstamp(es, s_mtime); 303802f310fcSJan Kara if (sbi->s_journal) { 3039e2b911c5SDarrick J. Wong ext4_set_feature_journal_needs_recovery(sb); 304002f310fcSJan Kara if (ext4_has_feature_orphan_file(sb)) 304102f310fcSJan Kara ext4_set_feature_orphan_present(sb); 304202f310fcSJan Kara } 3043ac27a0ecSDave Kleikamp 30444392fbc4SJan Kara err = ext4_commit_super(sb); 3045281b5995STheodore Ts'o done: 3046ac27a0ecSDave Kleikamp if (test_opt(sb, DEBUG)) 3047a9df9a49STheodore Ts'o printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, " 3048a2595b8aSTheodore Ts'o "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n", 3049ac27a0ecSDave Kleikamp sb->s_blocksize, 3050ac27a0ecSDave Kleikamp sbi->s_groups_count, 3051617ba13bSMingming Cao EXT4_BLOCKS_PER_GROUP(sb), 3052617ba13bSMingming Cao EXT4_INODES_PER_GROUP(sb), 3053a2595b8aSTheodore Ts'o sbi->s_mount_opt, sbi->s_mount_opt2); 3054c89128a0SJaegeuk Kim return err; 3055ac27a0ecSDave Kleikamp } 3056ac27a0ecSDave Kleikamp 3057117fff10STheodore Ts'o int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup) 3058117fff10STheodore Ts'o { 3059117fff10STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 30607c990728SSuraj Jitindar Singh struct flex_groups **old_groups, **new_groups; 306137b0b6b8SDan Carpenter int size, i, j; 3062117fff10STheodore Ts'o 3063117fff10STheodore Ts'o if (!sbi->s_log_groups_per_flex) 3064117fff10STheodore Ts'o return 0; 3065117fff10STheodore Ts'o 3066117fff10STheodore Ts'o size = ext4_flex_group(sbi, ngroup - 1) + 1; 3067117fff10STheodore Ts'o if (size <= sbi->s_flex_groups_allocated) 3068117fff10STheodore Ts'o return 0; 3069117fff10STheodore Ts'o 30707c990728SSuraj Jitindar Singh new_groups = kvzalloc(roundup_pow_of_two(size * 30717c990728SSuraj Jitindar Singh sizeof(*sbi->s_flex_groups)), GFP_KERNEL); 3072117fff10STheodore Ts'o if (!new_groups) { 30737c990728SSuraj Jitindar Singh ext4_msg(sb, KERN_ERR, 30747c990728SSuraj Jitindar Singh "not enough memory for %d flex group pointers", size); 3075117fff10STheodore Ts'o return -ENOMEM; 3076117fff10STheodore Ts'o } 30777c990728SSuraj Jitindar Singh for (i = sbi->s_flex_groups_allocated; i < size; i++) { 30787c990728SSuraj Jitindar Singh new_groups[i] = kvzalloc(roundup_pow_of_two( 30797c990728SSuraj Jitindar Singh sizeof(struct flex_groups)), 30807c990728SSuraj Jitindar Singh GFP_KERNEL); 30817c990728SSuraj Jitindar Singh if (!new_groups[i]) { 308237b0b6b8SDan Carpenter for (j = sbi->s_flex_groups_allocated; j < i; j++) 308337b0b6b8SDan Carpenter kvfree(new_groups[j]); 30847c990728SSuraj Jitindar Singh kvfree(new_groups); 30857c990728SSuraj Jitindar Singh ext4_msg(sb, KERN_ERR, 30867c990728SSuraj Jitindar Singh "not enough memory for %d flex groups", size); 30877c990728SSuraj Jitindar Singh return -ENOMEM; 3088117fff10STheodore Ts'o } 30897c990728SSuraj Jitindar Singh } 30907c990728SSuraj Jitindar Singh rcu_read_lock(); 30917c990728SSuraj Jitindar Singh old_groups = rcu_dereference(sbi->s_flex_groups); 30927c990728SSuraj Jitindar Singh if (old_groups) 30937c990728SSuraj Jitindar Singh memcpy(new_groups, old_groups, 30947c990728SSuraj Jitindar Singh (sbi->s_flex_groups_allocated * 30957c990728SSuraj Jitindar Singh sizeof(struct flex_groups *))); 30967c990728SSuraj Jitindar Singh rcu_read_unlock(); 30977c990728SSuraj Jitindar Singh rcu_assign_pointer(sbi->s_flex_groups, new_groups); 30987c990728SSuraj Jitindar Singh sbi->s_flex_groups_allocated = size; 30997c990728SSuraj Jitindar Singh if (old_groups) 31007c990728SSuraj Jitindar Singh ext4_kvfree_array_rcu(old_groups); 3101117fff10STheodore Ts'o return 0; 3102117fff10STheodore Ts'o } 3103117fff10STheodore Ts'o 3104772cb7c8SJose R. Santos static int ext4_fill_flex_info(struct super_block *sb) 3105772cb7c8SJose R. Santos { 3106772cb7c8SJose R. Santos struct ext4_sb_info *sbi = EXT4_SB(sb); 3107772cb7c8SJose R. Santos struct ext4_group_desc *gdp = NULL; 31087c990728SSuraj Jitindar Singh struct flex_groups *fg; 3109772cb7c8SJose R. Santos ext4_group_t flex_group; 3110117fff10STheodore Ts'o int i, err; 3111772cb7c8SJose R. Santos 3112503358aeSTheodore Ts'o sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex; 3113d50f2ab6SXi Wang if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) { 3114772cb7c8SJose R. Santos sbi->s_log_groups_per_flex = 0; 3115772cb7c8SJose R. Santos return 1; 3116772cb7c8SJose R. Santos } 3117772cb7c8SJose R. Santos 3118117fff10STheodore Ts'o err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count); 3119117fff10STheodore Ts'o if (err) 3120772cb7c8SJose R. Santos goto failed; 3121772cb7c8SJose R. Santos 3122772cb7c8SJose R. Santos for (i = 0; i < sbi->s_groups_count; i++) { 312388b6edd1STheodore Ts'o gdp = ext4_get_group_desc(sb, i, NULL); 3124772cb7c8SJose R. Santos 3125772cb7c8SJose R. Santos flex_group = ext4_flex_group(sbi, i); 31267c990728SSuraj Jitindar Singh fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group); 31277c990728SSuraj Jitindar Singh atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes); 312890ba983fSTheodore Ts'o atomic64_add(ext4_free_group_clusters(sb, gdp), 31297c990728SSuraj Jitindar Singh &fg->free_clusters); 31307c990728SSuraj Jitindar Singh atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs); 3131772cb7c8SJose R. Santos } 3132772cb7c8SJose R. Santos 3133772cb7c8SJose R. Santos return 1; 3134772cb7c8SJose R. Santos failed: 3135772cb7c8SJose R. Santos return 0; 3136772cb7c8SJose R. Santos } 3137772cb7c8SJose R. Santos 3138e2b911c5SDarrick J. Wong static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group, 3139717d50e4SAndreas Dilger struct ext4_group_desc *gdp) 3140717d50e4SAndreas Dilger { 3141b47820edSDaeho Jeong int offset = offsetof(struct ext4_group_desc, bg_checksum); 3142717d50e4SAndreas Dilger __u16 crc = 0; 3143717d50e4SAndreas Dilger __le32 le_group = cpu_to_le32(block_group); 3144e2b911c5SDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(sb); 3145717d50e4SAndreas Dilger 31469aa5d32bSDmitry Monakhov if (ext4_has_metadata_csum(sbi->s_sb)) { 3147feb0ab32SDarrick J. Wong /* Use new metadata_csum algorithm */ 3148feb0ab32SDarrick J. Wong __u32 csum32; 3149b47820edSDaeho Jeong __u16 dummy_csum = 0; 3150feb0ab32SDarrick J. Wong 3151feb0ab32SDarrick J. Wong csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group, 3152feb0ab32SDarrick J. Wong sizeof(le_group)); 3153b47820edSDaeho Jeong csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset); 3154b47820edSDaeho Jeong csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum, 3155b47820edSDaeho Jeong sizeof(dummy_csum)); 3156b47820edSDaeho Jeong offset += sizeof(dummy_csum); 3157b47820edSDaeho Jeong if (offset < sbi->s_desc_size) 3158b47820edSDaeho Jeong csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset, 3159b47820edSDaeho Jeong sbi->s_desc_size - offset); 3160feb0ab32SDarrick J. Wong 3161feb0ab32SDarrick J. Wong crc = csum32 & 0xFFFF; 3162feb0ab32SDarrick J. Wong goto out; 3163feb0ab32SDarrick J. Wong } 3164feb0ab32SDarrick J. Wong 3165feb0ab32SDarrick J. Wong /* old crc16 code */ 3166e2b911c5SDarrick J. Wong if (!ext4_has_feature_gdt_csum(sb)) 3167813d32f9SDarrick J. Wong return 0; 3168813d32f9SDarrick J. Wong 3169717d50e4SAndreas Dilger crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid)); 3170717d50e4SAndreas Dilger crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group)); 3171717d50e4SAndreas Dilger crc = crc16(crc, (__u8 *)gdp, offset); 3172717d50e4SAndreas Dilger offset += sizeof(gdp->bg_checksum); /* skip checksum */ 3173717d50e4SAndreas Dilger /* for checksum of struct ext4_group_desc do the rest...*/ 3174e2b911c5SDarrick J. Wong if (ext4_has_feature_64bit(sb) && 3175717d50e4SAndreas Dilger offset < le16_to_cpu(sbi->s_es->s_desc_size)) 3176717d50e4SAndreas Dilger crc = crc16(crc, (__u8 *)gdp + offset, 3177717d50e4SAndreas Dilger le16_to_cpu(sbi->s_es->s_desc_size) - 3178717d50e4SAndreas Dilger offset); 3179717d50e4SAndreas Dilger 3180feb0ab32SDarrick J. Wong out: 3181717d50e4SAndreas Dilger return cpu_to_le16(crc); 3182717d50e4SAndreas Dilger } 3183717d50e4SAndreas Dilger 3184feb0ab32SDarrick J. Wong int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group, 3185717d50e4SAndreas Dilger struct ext4_group_desc *gdp) 3186717d50e4SAndreas Dilger { 3187feb0ab32SDarrick J. Wong if (ext4_has_group_desc_csum(sb) && 3188e2b911c5SDarrick J. Wong (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp))) 3189717d50e4SAndreas Dilger return 0; 3190717d50e4SAndreas Dilger 3191717d50e4SAndreas Dilger return 1; 3192717d50e4SAndreas Dilger } 3193717d50e4SAndreas Dilger 3194feb0ab32SDarrick J. Wong void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group, 3195feb0ab32SDarrick J. Wong struct ext4_group_desc *gdp) 3196feb0ab32SDarrick J. Wong { 3197feb0ab32SDarrick J. Wong if (!ext4_has_group_desc_csum(sb)) 3198feb0ab32SDarrick J. Wong return; 3199e2b911c5SDarrick J. Wong gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp); 3200feb0ab32SDarrick J. Wong } 3201feb0ab32SDarrick J. Wong 3202ac27a0ecSDave Kleikamp /* Called at mount-time, super-block is locked */ 3203bfff6873SLukas Czerner static int ext4_check_descriptors(struct super_block *sb, 3204829fa70dSTheodore Ts'o ext4_fsblk_t sb_block, 3205bfff6873SLukas Czerner ext4_group_t *first_not_zeroed) 3206ac27a0ecSDave Kleikamp { 3207617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 3208617ba13bSMingming Cao ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); 3209617ba13bSMingming Cao ext4_fsblk_t last_block; 321044de022cSTheodore Ts'o ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0); 3211bd81d8eeSLaurent Vivier ext4_fsblk_t block_bitmap; 3212bd81d8eeSLaurent Vivier ext4_fsblk_t inode_bitmap; 3213bd81d8eeSLaurent Vivier ext4_fsblk_t inode_table; 3214ce421581SJose R. Santos int flexbg_flag = 0; 3215bfff6873SLukas Czerner ext4_group_t i, grp = sbi->s_groups_count; 3216ac27a0ecSDave Kleikamp 3217e2b911c5SDarrick J. Wong if (ext4_has_feature_flex_bg(sb)) 3218ce421581SJose R. Santos flexbg_flag = 1; 3219ce421581SJose R. Santos 3220617ba13bSMingming Cao ext4_debug("Checking group descriptors"); 3221ac27a0ecSDave Kleikamp 3222197cd65aSAkinobu Mita for (i = 0; i < sbi->s_groups_count; i++) { 3223197cd65aSAkinobu Mita struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL); 3224197cd65aSAkinobu Mita 3225ce421581SJose R. Santos if (i == sbi->s_groups_count - 1 || flexbg_flag) 3226bd81d8eeSLaurent Vivier last_block = ext4_blocks_count(sbi->s_es) - 1; 3227ac27a0ecSDave Kleikamp else 3228ac27a0ecSDave Kleikamp last_block = first_block + 3229617ba13bSMingming Cao (EXT4_BLOCKS_PER_GROUP(sb) - 1); 3230ac27a0ecSDave Kleikamp 3231bfff6873SLukas Czerner if ((grp == sbi->s_groups_count) && 3232bfff6873SLukas Czerner !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) 3233bfff6873SLukas Czerner grp = i; 3234bfff6873SLukas Czerner 32358fadc143SAlexandre Ratchov block_bitmap = ext4_block_bitmap(sb, gdp); 3236829fa70dSTheodore Ts'o if (block_bitmap == sb_block) { 3237829fa70dSTheodore Ts'o ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3238829fa70dSTheodore Ts'o "Block bitmap for group %u overlaps " 3239829fa70dSTheodore Ts'o "superblock", i); 324018db4b4eSTheodore Ts'o if (!sb_rdonly(sb)) 324118db4b4eSTheodore Ts'o return 0; 3242829fa70dSTheodore Ts'o } 324377260807STheodore Ts'o if (block_bitmap >= sb_block + 1 && 324477260807STheodore Ts'o block_bitmap <= last_bg_block) { 324577260807STheodore Ts'o ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 324677260807STheodore Ts'o "Block bitmap for group %u overlaps " 324777260807STheodore Ts'o "block group descriptors", i); 324877260807STheodore Ts'o if (!sb_rdonly(sb)) 324977260807STheodore Ts'o return 0; 325077260807STheodore Ts'o } 32512b2d6d01STheodore Ts'o if (block_bitmap < first_block || block_bitmap > last_block) { 3252b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3253a9df9a49STheodore Ts'o "Block bitmap for group %u not in group " 3254b31e1552SEric Sandeen "(block %llu)!", i, block_bitmap); 3255ac27a0ecSDave Kleikamp return 0; 3256ac27a0ecSDave Kleikamp } 32578fadc143SAlexandre Ratchov inode_bitmap = ext4_inode_bitmap(sb, gdp); 3258829fa70dSTheodore Ts'o if (inode_bitmap == sb_block) { 3259829fa70dSTheodore Ts'o ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3260829fa70dSTheodore Ts'o "Inode bitmap for group %u overlaps " 3261829fa70dSTheodore Ts'o "superblock", i); 326218db4b4eSTheodore Ts'o if (!sb_rdonly(sb)) 326318db4b4eSTheodore Ts'o return 0; 3264829fa70dSTheodore Ts'o } 326577260807STheodore Ts'o if (inode_bitmap >= sb_block + 1 && 326677260807STheodore Ts'o inode_bitmap <= last_bg_block) { 326777260807STheodore Ts'o ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 326877260807STheodore Ts'o "Inode bitmap for group %u overlaps " 326977260807STheodore Ts'o "block group descriptors", i); 327077260807STheodore Ts'o if (!sb_rdonly(sb)) 327177260807STheodore Ts'o return 0; 327277260807STheodore Ts'o } 32732b2d6d01STheodore Ts'o if (inode_bitmap < first_block || inode_bitmap > last_block) { 3274b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3275a9df9a49STheodore Ts'o "Inode bitmap for group %u not in group " 3276b31e1552SEric Sandeen "(block %llu)!", i, inode_bitmap); 3277ac27a0ecSDave Kleikamp return 0; 3278ac27a0ecSDave Kleikamp } 32798fadc143SAlexandre Ratchov inode_table = ext4_inode_table(sb, gdp); 3280829fa70dSTheodore Ts'o if (inode_table == sb_block) { 3281829fa70dSTheodore Ts'o ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3282829fa70dSTheodore Ts'o "Inode table for group %u overlaps " 3283829fa70dSTheodore Ts'o "superblock", i); 328418db4b4eSTheodore Ts'o if (!sb_rdonly(sb)) 328518db4b4eSTheodore Ts'o return 0; 3286829fa70dSTheodore Ts'o } 328777260807STheodore Ts'o if (inode_table >= sb_block + 1 && 328877260807STheodore Ts'o inode_table <= last_bg_block) { 328977260807STheodore Ts'o ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 329077260807STheodore Ts'o "Inode table for group %u overlaps " 329177260807STheodore Ts'o "block group descriptors", i); 329277260807STheodore Ts'o if (!sb_rdonly(sb)) 329377260807STheodore Ts'o return 0; 329477260807STheodore Ts'o } 3295bd81d8eeSLaurent Vivier if (inode_table < first_block || 32962b2d6d01STheodore Ts'o inode_table + sbi->s_itb_per_group - 1 > last_block) { 3297b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3298a9df9a49STheodore Ts'o "Inode table for group %u not in group " 3299b31e1552SEric Sandeen "(block %llu)!", i, inode_table); 3300ac27a0ecSDave Kleikamp return 0; 3301ac27a0ecSDave Kleikamp } 3302955ce5f5SAneesh Kumar K.V ext4_lock_group(sb, i); 3303feb0ab32SDarrick J. Wong if (!ext4_group_desc_csum_verify(sb, i, gdp)) { 3304b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 3305b31e1552SEric Sandeen "Checksum for group %u failed (%u!=%u)", 3306e2b911c5SDarrick J. Wong i, le16_to_cpu(ext4_group_desc_csum(sb, i, 3307fd2d4291SAvantika Mathur gdp)), le16_to_cpu(gdp->bg_checksum)); 3308bc98a42cSDavid Howells if (!sb_rdonly(sb)) { 3309955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, i); 3310717d50e4SAndreas Dilger return 0; 3311717d50e4SAndreas Dilger } 33127ee1ec4cSLi Zefan } 3313955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, i); 3314ce421581SJose R. Santos if (!flexbg_flag) 3315617ba13bSMingming Cao first_block += EXT4_BLOCKS_PER_GROUP(sb); 3316ac27a0ecSDave Kleikamp } 3317bfff6873SLukas Czerner if (NULL != first_not_zeroed) 3318bfff6873SLukas Czerner *first_not_zeroed = grp; 3319ac27a0ecSDave Kleikamp return 1; 3320ac27a0ecSDave Kleikamp } 3321ac27a0ecSDave Kleikamp 3322cd2291a4SEric Sandeen /* 3323cd2291a4SEric Sandeen * Maximal extent format file size. 3324cd2291a4SEric Sandeen * Resulting logical blkno at s_maxbytes must fit in our on-disk 3325cd2291a4SEric Sandeen * extent format containers, within a sector_t, and within i_blocks 3326cd2291a4SEric Sandeen * in the vfs. ext4 inode has 48 bits of i_block in fsblock units, 3327cd2291a4SEric Sandeen * so that won't be a limiting factor. 3328cd2291a4SEric Sandeen * 3329f17722f9SLukas Czerner * However there is other limiting factor. We do store extents in the form 3330f17722f9SLukas Czerner * of starting block and length, hence the resulting length of the extent 3331f17722f9SLukas Czerner * covering maximum file size must fit into on-disk format containers as 3332f17722f9SLukas Czerner * well. Given that length is always by 1 unit bigger than max unit (because 3333f17722f9SLukas Czerner * we count 0 as well) we have to lower the s_maxbytes by one fs block. 3334f17722f9SLukas Czerner * 3335cd2291a4SEric Sandeen * Note, this does *not* consider any metadata overhead for vfs i_blocks. 3336cd2291a4SEric Sandeen */ 3337f287a1a5STheodore Ts'o static loff_t ext4_max_size(int blkbits, int has_huge_files) 3338cd2291a4SEric Sandeen { 3339cd2291a4SEric Sandeen loff_t res; 3340cd2291a4SEric Sandeen loff_t upper_limit = MAX_LFS_FILESIZE; 3341cd2291a4SEric Sandeen 334272deb455SChristoph Hellwig BUILD_BUG_ON(sizeof(blkcnt_t) < sizeof(u64)); 334372deb455SChristoph Hellwig 334472deb455SChristoph Hellwig if (!has_huge_files) { 3345cd2291a4SEric Sandeen upper_limit = (1LL << 32) - 1; 3346cd2291a4SEric Sandeen 3347cd2291a4SEric Sandeen /* total blocks in file system block size */ 3348cd2291a4SEric Sandeen upper_limit >>= (blkbits - 9); 3349cd2291a4SEric Sandeen upper_limit <<= blkbits; 3350cd2291a4SEric Sandeen } 3351cd2291a4SEric Sandeen 3352f17722f9SLukas Czerner /* 3353f17722f9SLukas Czerner * 32-bit extent-start container, ee_block. We lower the maxbytes 3354f17722f9SLukas Czerner * by one fs block, so ee_len can cover the extent of maximum file 3355f17722f9SLukas Czerner * size 3356f17722f9SLukas Czerner */ 3357f17722f9SLukas Czerner res = (1LL << 32) - 1; 3358cd2291a4SEric Sandeen res <<= blkbits; 3359cd2291a4SEric Sandeen 3360cd2291a4SEric Sandeen /* Sanity check against vm- & vfs- imposed limits */ 3361cd2291a4SEric Sandeen if (res > upper_limit) 3362cd2291a4SEric Sandeen res = upper_limit; 3363cd2291a4SEric Sandeen 3364cd2291a4SEric Sandeen return res; 3365cd2291a4SEric Sandeen } 3366ac27a0ecSDave Kleikamp 3367ac27a0ecSDave Kleikamp /* 3368cd2291a4SEric Sandeen * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect 33690fc1b451SAneesh Kumar K.V * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks. 33700fc1b451SAneesh Kumar K.V * We need to be 1 filesystem block less than the 2^48 sector limit. 3371ac27a0ecSDave Kleikamp */ 3372f287a1a5STheodore Ts'o static loff_t ext4_max_bitmap_size(int bits, int has_huge_files) 3373ac27a0ecSDave Kleikamp { 33745c93e8ecSZhang Yi loff_t upper_limit, res = EXT4_NDIR_BLOCKS; 33750fc1b451SAneesh Kumar K.V int meta_blocks; 33765c93e8ecSZhang Yi unsigned int ppb = 1 << (bits - 2); 337775ca6ad4SRitesh Harjani 337875ca6ad4SRitesh Harjani /* 337975ca6ad4SRitesh Harjani * This is calculated to be the largest file size for a dense, block 33800b8e58a1SAndreas Dilger * mapped file such that the file's total number of 512-byte sectors, 33810b8e58a1SAndreas Dilger * including data and all indirect blocks, does not exceed (2^48 - 1). 33820b8e58a1SAndreas Dilger * 33830b8e58a1SAndreas Dilger * __u32 i_blocks_lo and _u16 i_blocks_high represent the total 33840b8e58a1SAndreas Dilger * number of 512-byte sectors of the file. 33850fc1b451SAneesh Kumar K.V */ 338672deb455SChristoph Hellwig if (!has_huge_files) { 33870fc1b451SAneesh Kumar K.V /* 338872deb455SChristoph Hellwig * !has_huge_files or implies that the inode i_block field 338972deb455SChristoph Hellwig * represents total file blocks in 2^32 512-byte sectors == 339072deb455SChristoph Hellwig * size of vfs inode i_blocks * 8 33910fc1b451SAneesh Kumar K.V */ 33920fc1b451SAneesh Kumar K.V upper_limit = (1LL << 32) - 1; 33930fc1b451SAneesh Kumar K.V 33940fc1b451SAneesh Kumar K.V /* total blocks in file system block size */ 33950fc1b451SAneesh Kumar K.V upper_limit >>= (bits - 9); 33960fc1b451SAneesh Kumar K.V 33970fc1b451SAneesh Kumar K.V } else { 33988180a562SAneesh Kumar K.V /* 33998180a562SAneesh Kumar K.V * We use 48 bit ext4_inode i_blocks 34008180a562SAneesh Kumar K.V * With EXT4_HUGE_FILE_FL set the i_blocks 34018180a562SAneesh Kumar K.V * represent total number of blocks in 34028180a562SAneesh Kumar K.V * file system block size 34038180a562SAneesh Kumar K.V */ 34040fc1b451SAneesh Kumar K.V upper_limit = (1LL << 48) - 1; 34050fc1b451SAneesh Kumar K.V 34060fc1b451SAneesh Kumar K.V } 34070fc1b451SAneesh Kumar K.V 34085c93e8ecSZhang Yi /* Compute how many blocks we can address by block tree */ 34095c93e8ecSZhang Yi res += ppb; 34105c93e8ecSZhang Yi res += ppb * ppb; 34115c93e8ecSZhang Yi res += ((loff_t)ppb) * ppb * ppb; 34125c93e8ecSZhang Yi /* Compute how many metadata blocks are needed */ 34135c93e8ecSZhang Yi meta_blocks = 1; 34145c93e8ecSZhang Yi meta_blocks += 1 + ppb; 34155c93e8ecSZhang Yi meta_blocks += 1 + ppb + ppb * ppb; 34165c93e8ecSZhang Yi /* Does block tree limit file size? */ 34175c93e8ecSZhang Yi if (res + meta_blocks <= upper_limit) 34185c93e8ecSZhang Yi goto check_lfs; 34195c93e8ecSZhang Yi 34205c93e8ecSZhang Yi res = upper_limit; 34215c93e8ecSZhang Yi /* How many metadata blocks are needed for addressing upper_limit? */ 34225c93e8ecSZhang Yi upper_limit -= EXT4_NDIR_BLOCKS; 34230fc1b451SAneesh Kumar K.V /* indirect blocks */ 34240fc1b451SAneesh Kumar K.V meta_blocks = 1; 34255c93e8ecSZhang Yi upper_limit -= ppb; 34260fc1b451SAneesh Kumar K.V /* double indirect blocks */ 34275c93e8ecSZhang Yi if (upper_limit < ppb * ppb) { 34285c93e8ecSZhang Yi meta_blocks += 1 + DIV_ROUND_UP_ULL(upper_limit, ppb); 34295c93e8ecSZhang Yi res -= meta_blocks; 34305c93e8ecSZhang Yi goto check_lfs; 34315c93e8ecSZhang Yi } 34325c93e8ecSZhang Yi meta_blocks += 1 + ppb; 34335c93e8ecSZhang Yi upper_limit -= ppb * ppb; 34345c93e8ecSZhang Yi /* tripple indirect blocks for the rest */ 34355c93e8ecSZhang Yi meta_blocks += 1 + DIV_ROUND_UP_ULL(upper_limit, ppb) + 34365c93e8ecSZhang Yi DIV_ROUND_UP_ULL(upper_limit, ppb*ppb); 34375c93e8ecSZhang Yi res -= meta_blocks; 34385c93e8ecSZhang Yi check_lfs: 3439ac27a0ecSDave Kleikamp res <<= bits; 34400fc1b451SAneesh Kumar K.V if (res > MAX_LFS_FILESIZE) 34410fc1b451SAneesh Kumar K.V res = MAX_LFS_FILESIZE; 34420fc1b451SAneesh Kumar K.V 34435c93e8ecSZhang Yi return res; 3444ac27a0ecSDave Kleikamp } 3445ac27a0ecSDave Kleikamp 3446617ba13bSMingming Cao static ext4_fsblk_t descriptor_loc(struct super_block *sb, 344770bbb3e0SAndrew Morton ext4_fsblk_t logical_sb_block, int nr) 3448ac27a0ecSDave Kleikamp { 3449617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 3450fd2d4291SAvantika Mathur ext4_group_t bg, first_meta_bg; 3451ac27a0ecSDave Kleikamp int has_super = 0; 3452ac27a0ecSDave Kleikamp 3453ac27a0ecSDave Kleikamp first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg); 3454ac27a0ecSDave Kleikamp 3455e2b911c5SDarrick J. Wong if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg) 345670bbb3e0SAndrew Morton return logical_sb_block + nr + 1; 3457ac27a0ecSDave Kleikamp bg = sbi->s_desc_per_block * nr; 3458617ba13bSMingming Cao if (ext4_bg_has_super(sb, bg)) 3459ac27a0ecSDave Kleikamp has_super = 1; 34600b8e58a1SAndreas Dilger 3461bd63f6b0SDarrick J. Wong /* 3462bd63f6b0SDarrick J. Wong * If we have a meta_bg fs with 1k blocks, group 0's GDT is at 3463bd63f6b0SDarrick J. Wong * block 2, not 1. If s_first_data_block == 0 (bigalloc is enabled 3464bd63f6b0SDarrick J. Wong * on modern mke2fs or blksize > 1k on older mke2fs) then we must 3465bd63f6b0SDarrick J. Wong * compensate. 3466bd63f6b0SDarrick J. Wong */ 3467bd63f6b0SDarrick J. Wong if (sb->s_blocksize == 1024 && nr == 0 && 346849598e04SJun Piao le32_to_cpu(sbi->s_es->s_first_data_block) == 0) 3469bd63f6b0SDarrick J. Wong has_super++; 3470bd63f6b0SDarrick J. Wong 3471617ba13bSMingming Cao return (has_super + ext4_group_first_block_no(sb, bg)); 3472ac27a0ecSDave Kleikamp } 3473ac27a0ecSDave Kleikamp 3474c9de560dSAlex Tomas /** 3475c9de560dSAlex Tomas * ext4_get_stripe_size: Get the stripe size. 3476c9de560dSAlex Tomas * @sbi: In memory super block info 3477c9de560dSAlex Tomas * 3478c9de560dSAlex Tomas * If we have specified it via mount option, then 3479c9de560dSAlex Tomas * use the mount option value. If the value specified at mount time is 3480c9de560dSAlex Tomas * greater than the blocks per group use the super block value. 3481c9de560dSAlex Tomas * If the super block value is greater than blocks per group return 0. 3482c9de560dSAlex Tomas * Allocator needs it be less than blocks per group. 3483c9de560dSAlex Tomas * 3484c9de560dSAlex Tomas */ 3485c9de560dSAlex Tomas static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi) 3486c9de560dSAlex Tomas { 3487c9de560dSAlex Tomas unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride); 3488c9de560dSAlex Tomas unsigned long stripe_width = 3489c9de560dSAlex Tomas le32_to_cpu(sbi->s_es->s_raid_stripe_width); 34903eb08658SDan Ehrenberg int ret; 3491c9de560dSAlex Tomas 3492c9de560dSAlex Tomas if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group) 34933eb08658SDan Ehrenberg ret = sbi->s_stripe; 34945469d7c3SJan Kara else if (stripe_width && stripe_width <= sbi->s_blocks_per_group) 34953eb08658SDan Ehrenberg ret = stripe_width; 34965469d7c3SJan Kara else if (stride && stride <= sbi->s_blocks_per_group) 34973eb08658SDan Ehrenberg ret = stride; 34983eb08658SDan Ehrenberg else 34993eb08658SDan Ehrenberg ret = 0; 3500c9de560dSAlex Tomas 35013eb08658SDan Ehrenberg /* 35023eb08658SDan Ehrenberg * If the stripe width is 1, this makes no sense and 35033eb08658SDan Ehrenberg * we set it to 0 to turn off stripe handling code. 35043eb08658SDan Ehrenberg */ 35053eb08658SDan Ehrenberg if (ret <= 1) 35063eb08658SDan Ehrenberg ret = 0; 3507c9de560dSAlex Tomas 35083eb08658SDan Ehrenberg return ret; 3509c9de560dSAlex Tomas } 3510ac27a0ecSDave Kleikamp 3511a13fb1a4SEric Sandeen /* 3512a13fb1a4SEric Sandeen * Check whether this filesystem can be mounted based on 3513a13fb1a4SEric Sandeen * the features present and the RDONLY/RDWR mount requested. 3514a13fb1a4SEric Sandeen * Returns 1 if this filesystem can be mounted as requested, 3515a13fb1a4SEric Sandeen * 0 if it cannot be. 3516a13fb1a4SEric Sandeen */ 351725c6d98fSJan Kara int ext4_feature_set_ok(struct super_block *sb, int readonly) 3518a13fb1a4SEric Sandeen { 3519e2b911c5SDarrick J. Wong if (ext4_has_unknown_ext4_incompat_features(sb)) { 3520a13fb1a4SEric Sandeen ext4_msg(sb, KERN_ERR, 3521a13fb1a4SEric Sandeen "Couldn't mount because of " 3522a13fb1a4SEric Sandeen "unsupported optional features (%x)", 3523a13fb1a4SEric Sandeen (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) & 3524a13fb1a4SEric Sandeen ~EXT4_FEATURE_INCOMPAT_SUPP)); 3525a13fb1a4SEric Sandeen return 0; 3526a13fb1a4SEric Sandeen } 3527a13fb1a4SEric Sandeen 35285298d4bfSChristoph Hellwig #if !IS_ENABLED(CONFIG_UNICODE) 3529c83ad55eSGabriel Krisman Bertazi if (ext4_has_feature_casefold(sb)) { 3530c83ad55eSGabriel Krisman Bertazi ext4_msg(sb, KERN_ERR, 3531c83ad55eSGabriel Krisman Bertazi "Filesystem with casefold feature cannot be " 3532c83ad55eSGabriel Krisman Bertazi "mounted without CONFIG_UNICODE"); 3533c83ad55eSGabriel Krisman Bertazi return 0; 3534c83ad55eSGabriel Krisman Bertazi } 3535c83ad55eSGabriel Krisman Bertazi #endif 3536c83ad55eSGabriel Krisman Bertazi 3537a13fb1a4SEric Sandeen if (readonly) 3538a13fb1a4SEric Sandeen return 1; 3539a13fb1a4SEric Sandeen 3540e2b911c5SDarrick J. Wong if (ext4_has_feature_readonly(sb)) { 35412cb5cc8bSDarrick J. Wong ext4_msg(sb, KERN_INFO, "filesystem is read-only"); 35421751e8a6SLinus Torvalds sb->s_flags |= SB_RDONLY; 35432cb5cc8bSDarrick J. Wong return 1; 35442cb5cc8bSDarrick J. Wong } 35452cb5cc8bSDarrick J. Wong 3546a13fb1a4SEric Sandeen /* Check that feature set is OK for a read-write mount */ 3547e2b911c5SDarrick J. Wong if (ext4_has_unknown_ext4_ro_compat_features(sb)) { 3548a13fb1a4SEric Sandeen ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of " 3549a13fb1a4SEric Sandeen "unsupported optional features (%x)", 3550a13fb1a4SEric Sandeen (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) & 3551a13fb1a4SEric Sandeen ~EXT4_FEATURE_RO_COMPAT_SUPP)); 3552a13fb1a4SEric Sandeen return 0; 3553a13fb1a4SEric Sandeen } 3554e2b911c5SDarrick J. Wong if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) { 3555bab08ab9STheodore Ts'o ext4_msg(sb, KERN_ERR, 3556bab08ab9STheodore Ts'o "Can't support bigalloc feature without " 3557bab08ab9STheodore Ts'o "extents feature\n"); 3558bab08ab9STheodore Ts'o return 0; 3559bab08ab9STheodore Ts'o } 35607c319d32SAditya Kali 35619db176bcSJan Kara #if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2) 3562d65d87a0STheodore Ts'o if (!readonly && (ext4_has_feature_quota(sb) || 3563d65d87a0STheodore Ts'o ext4_has_feature_project(sb))) { 35647c319d32SAditya Kali ext4_msg(sb, KERN_ERR, 3565d65d87a0STheodore Ts'o "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2"); 3566689c958cSLi Xi return 0; 3567689c958cSLi Xi } 35687c319d32SAditya Kali #endif /* CONFIG_QUOTA */ 3569a13fb1a4SEric Sandeen return 1; 3570a13fb1a4SEric Sandeen } 3571a13fb1a4SEric Sandeen 357266e61a9eSTheodore Ts'o /* 357366e61a9eSTheodore Ts'o * This function is called once a day if we have errors logged 357466e61a9eSTheodore Ts'o * on the file system 357566e61a9eSTheodore Ts'o */ 3576235699a8SKees Cook static void print_daily_error_info(struct timer_list *t) 357766e61a9eSTheodore Ts'o { 3578235699a8SKees Cook struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report); 3579235699a8SKees Cook struct super_block *sb = sbi->s_sb; 3580235699a8SKees Cook struct ext4_super_block *es = sbi->s_es; 358166e61a9eSTheodore Ts'o 358266e61a9eSTheodore Ts'o if (es->s_error_count) 3583ae0f78deSTheodore Ts'o /* fsck newer than v1.41.13 is needed to clean this condition. */ 3584ae0f78deSTheodore Ts'o ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u", 358566e61a9eSTheodore Ts'o le32_to_cpu(es->s_error_count)); 358666e61a9eSTheodore Ts'o if (es->s_first_error_time) { 35876a0678a7SArnd Bergmann printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %llu: %.*s:%d", 35886a0678a7SArnd Bergmann sb->s_id, 35896a0678a7SArnd Bergmann ext4_get_tstamp(es, s_first_error_time), 359066e61a9eSTheodore Ts'o (int) sizeof(es->s_first_error_func), 359166e61a9eSTheodore Ts'o es->s_first_error_func, 359266e61a9eSTheodore Ts'o le32_to_cpu(es->s_first_error_line)); 359366e61a9eSTheodore Ts'o if (es->s_first_error_ino) 3594651e1c3bSJoe Perches printk(KERN_CONT ": inode %u", 359566e61a9eSTheodore Ts'o le32_to_cpu(es->s_first_error_ino)); 359666e61a9eSTheodore Ts'o if (es->s_first_error_block) 3597651e1c3bSJoe Perches printk(KERN_CONT ": block %llu", (unsigned long long) 359866e61a9eSTheodore Ts'o le64_to_cpu(es->s_first_error_block)); 3599651e1c3bSJoe Perches printk(KERN_CONT "\n"); 360066e61a9eSTheodore Ts'o } 360166e61a9eSTheodore Ts'o if (es->s_last_error_time) { 36026a0678a7SArnd Bergmann printk(KERN_NOTICE "EXT4-fs (%s): last error at time %llu: %.*s:%d", 36036a0678a7SArnd Bergmann sb->s_id, 36046a0678a7SArnd Bergmann ext4_get_tstamp(es, s_last_error_time), 360566e61a9eSTheodore Ts'o (int) sizeof(es->s_last_error_func), 360666e61a9eSTheodore Ts'o es->s_last_error_func, 360766e61a9eSTheodore Ts'o le32_to_cpu(es->s_last_error_line)); 360866e61a9eSTheodore Ts'o if (es->s_last_error_ino) 3609651e1c3bSJoe Perches printk(KERN_CONT ": inode %u", 361066e61a9eSTheodore Ts'o le32_to_cpu(es->s_last_error_ino)); 361166e61a9eSTheodore Ts'o if (es->s_last_error_block) 3612651e1c3bSJoe Perches printk(KERN_CONT ": block %llu", (unsigned long long) 361366e61a9eSTheodore Ts'o le64_to_cpu(es->s_last_error_block)); 3614651e1c3bSJoe Perches printk(KERN_CONT "\n"); 361566e61a9eSTheodore Ts'o } 361666e61a9eSTheodore Ts'o mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */ 361766e61a9eSTheodore Ts'o } 361866e61a9eSTheodore Ts'o 3619bfff6873SLukas Czerner /* Find next suitable group and run ext4_init_inode_table */ 3620bfff6873SLukas Czerner static int ext4_run_li_request(struct ext4_li_request *elr) 3621bfff6873SLukas Czerner { 3622bfff6873SLukas Czerner struct ext4_group_desc *gdp = NULL; 36233d392b26STheodore Ts'o struct super_block *sb = elr->lr_super; 36243d392b26STheodore Ts'o ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; 36253d392b26STheodore Ts'o ext4_group_t group = elr->lr_next_group; 36263d392b26STheodore Ts'o unsigned int prefetch_ios = 0; 3627bfff6873SLukas Czerner int ret = 0; 362839fec688SShaoying Xu u64 start_time; 3629bfff6873SLukas Czerner 36303d392b26STheodore Ts'o if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) { 36313d392b26STheodore Ts'o elr->lr_next_group = ext4_mb_prefetch(sb, group, 36323d392b26STheodore Ts'o EXT4_SB(sb)->s_mb_prefetch, &prefetch_ios); 36333d392b26STheodore Ts'o if (prefetch_ios) 36343d392b26STheodore Ts'o ext4_mb_prefetch_fini(sb, elr->lr_next_group, 36353d392b26STheodore Ts'o prefetch_ios); 36363d392b26STheodore Ts'o trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group, 36373d392b26STheodore Ts'o prefetch_ios); 36383d392b26STheodore Ts'o if (group >= elr->lr_next_group) { 36393d392b26STheodore Ts'o ret = 1; 36403d392b26STheodore Ts'o if (elr->lr_first_not_zeroed != ngroups && 36413d392b26STheodore Ts'o !sb_rdonly(sb) && test_opt(sb, INIT_INODE_TABLE)) { 36423d392b26STheodore Ts'o elr->lr_next_group = elr->lr_first_not_zeroed; 36433d392b26STheodore Ts'o elr->lr_mode = EXT4_LI_MODE_ITABLE; 36443d392b26STheodore Ts'o ret = 0; 36453d392b26STheodore Ts'o } 36463d392b26STheodore Ts'o } 36473d392b26STheodore Ts'o return ret; 36483d392b26STheodore Ts'o } 3649bfff6873SLukas Czerner 36503d392b26STheodore Ts'o for (; group < ngroups; group++) { 3651bfff6873SLukas Czerner gdp = ext4_get_group_desc(sb, group, NULL); 3652bfff6873SLukas Czerner if (!gdp) { 3653bfff6873SLukas Czerner ret = 1; 3654bfff6873SLukas Czerner break; 3655bfff6873SLukas Czerner } 3656bfff6873SLukas Czerner 3657bfff6873SLukas Czerner if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) 3658bfff6873SLukas Czerner break; 3659bfff6873SLukas Czerner } 3660bfff6873SLukas Czerner 36617f511862STheodore Ts'o if (group >= ngroups) 3662bfff6873SLukas Czerner ret = 1; 3663bfff6873SLukas Czerner 3664bfff6873SLukas Czerner if (!ret) { 366539fec688SShaoying Xu start_time = ktime_get_real_ns(); 3666bfff6873SLukas Czerner ret = ext4_init_inode_table(sb, group, 3667bfff6873SLukas Czerner elr->lr_timeout ? 0 : 1); 36683d392b26STheodore Ts'o trace_ext4_lazy_itable_init(sb, group); 3669bfff6873SLukas Czerner if (elr->lr_timeout == 0) { 367039fec688SShaoying Xu elr->lr_timeout = nsecs_to_jiffies((ktime_get_real_ns() - start_time) * 367139fec688SShaoying Xu EXT4_SB(elr->lr_super)->s_li_wait_mult); 3672bfff6873SLukas Czerner } 3673bfff6873SLukas Czerner elr->lr_next_sched = jiffies + elr->lr_timeout; 3674bfff6873SLukas Czerner elr->lr_next_group = group + 1; 3675bfff6873SLukas Czerner } 3676bfff6873SLukas Czerner return ret; 3677bfff6873SLukas Czerner } 3678bfff6873SLukas Czerner 3679bfff6873SLukas Czerner /* 3680bfff6873SLukas Czerner * Remove lr_request from the list_request and free the 36814ed5c033SLukas Czerner * request structure. Should be called with li_list_mtx held 3682bfff6873SLukas Czerner */ 3683bfff6873SLukas Czerner static void ext4_remove_li_request(struct ext4_li_request *elr) 3684bfff6873SLukas Czerner { 3685bfff6873SLukas Czerner if (!elr) 3686bfff6873SLukas Czerner return; 3687bfff6873SLukas Czerner 3688bfff6873SLukas Czerner list_del(&elr->lr_request); 36893d392b26STheodore Ts'o EXT4_SB(elr->lr_super)->s_li_request = NULL; 3690bfff6873SLukas Czerner kfree(elr); 3691bfff6873SLukas Czerner } 3692bfff6873SLukas Czerner 3693bfff6873SLukas Czerner static void ext4_unregister_li_request(struct super_block *sb) 3694bfff6873SLukas Czerner { 36951bb933fbSLukas Czerner mutex_lock(&ext4_li_mtx); 36961bb933fbSLukas Czerner if (!ext4_li_info) { 36971bb933fbSLukas Czerner mutex_unlock(&ext4_li_mtx); 3698bfff6873SLukas Czerner return; 36991bb933fbSLukas Czerner } 3700bfff6873SLukas Czerner 3701bfff6873SLukas Czerner mutex_lock(&ext4_li_info->li_list_mtx); 37021bb933fbSLukas Czerner ext4_remove_li_request(EXT4_SB(sb)->s_li_request); 3703bfff6873SLukas Czerner mutex_unlock(&ext4_li_info->li_list_mtx); 37041bb933fbSLukas Czerner mutex_unlock(&ext4_li_mtx); 3705bfff6873SLukas Czerner } 3706bfff6873SLukas Czerner 37078f1f7453SEric Sandeen static struct task_struct *ext4_lazyinit_task; 37088f1f7453SEric Sandeen 3709bfff6873SLukas Czerner /* 3710bfff6873SLukas Czerner * This is the function where ext4lazyinit thread lives. It walks 3711bfff6873SLukas Czerner * through the request list searching for next scheduled filesystem. 3712bfff6873SLukas Czerner * When such a fs is found, run the lazy initialization request 3713bfff6873SLukas Czerner * (ext4_rn_li_request) and keep track of the time spend in this 3714bfff6873SLukas Czerner * function. Based on that time we compute next schedule time of 3715bfff6873SLukas Czerner * the request. When walking through the list is complete, compute 3716bfff6873SLukas Czerner * next waking time and put itself into sleep. 3717bfff6873SLukas Czerner */ 3718bfff6873SLukas Czerner static int ext4_lazyinit_thread(void *arg) 3719bfff6873SLukas Czerner { 3720c30365b9SYu Zhe struct ext4_lazy_init *eli = arg; 3721bfff6873SLukas Czerner struct list_head *pos, *n; 3722bfff6873SLukas Czerner struct ext4_li_request *elr; 37234ed5c033SLukas Czerner unsigned long next_wakeup, cur; 3724bfff6873SLukas Czerner 3725bfff6873SLukas Czerner BUG_ON(NULL == eli); 37263b575495SLalith Rajendran set_freezable(); 3727bfff6873SLukas Czerner 3728bfff6873SLukas Czerner cont_thread: 3729bfff6873SLukas Czerner while (true) { 3730bfff6873SLukas Czerner next_wakeup = MAX_JIFFY_OFFSET; 3731bfff6873SLukas Czerner 3732bfff6873SLukas Czerner mutex_lock(&eli->li_list_mtx); 3733bfff6873SLukas Czerner if (list_empty(&eli->li_request_list)) { 3734bfff6873SLukas Czerner mutex_unlock(&eli->li_list_mtx); 3735bfff6873SLukas Czerner goto exit_thread; 3736bfff6873SLukas Czerner } 3737bfff6873SLukas Czerner list_for_each_safe(pos, n, &eli->li_request_list) { 3738e22834f0SDmitry Monakhov int err = 0; 3739e22834f0SDmitry Monakhov int progress = 0; 3740bfff6873SLukas Czerner elr = list_entry(pos, struct ext4_li_request, 3741bfff6873SLukas Czerner lr_request); 3742bfff6873SLukas Czerner 3743e22834f0SDmitry Monakhov if (time_before(jiffies, elr->lr_next_sched)) { 3744e22834f0SDmitry Monakhov if (time_before(elr->lr_next_sched, next_wakeup)) 3745e22834f0SDmitry Monakhov next_wakeup = elr->lr_next_sched; 3746e22834f0SDmitry Monakhov continue; 3747e22834f0SDmitry Monakhov } 3748e22834f0SDmitry Monakhov if (down_read_trylock(&elr->lr_super->s_umount)) { 3749e22834f0SDmitry Monakhov if (sb_start_write_trylock(elr->lr_super)) { 3750e22834f0SDmitry Monakhov progress = 1; 3751e22834f0SDmitry Monakhov /* 3752e22834f0SDmitry Monakhov * We hold sb->s_umount, sb can not 3753e22834f0SDmitry Monakhov * be removed from the list, it is 3754e22834f0SDmitry Monakhov * now safe to drop li_list_mtx 3755e22834f0SDmitry Monakhov */ 3756e22834f0SDmitry Monakhov mutex_unlock(&eli->li_list_mtx); 3757e22834f0SDmitry Monakhov err = ext4_run_li_request(elr); 3758e22834f0SDmitry Monakhov sb_end_write(elr->lr_super); 3759e22834f0SDmitry Monakhov mutex_lock(&eli->li_list_mtx); 3760e22834f0SDmitry Monakhov n = pos->next; 3761e22834f0SDmitry Monakhov } 3762e22834f0SDmitry Monakhov up_read((&elr->lr_super->s_umount)); 3763e22834f0SDmitry Monakhov } 3764b2c78cd0STheodore Ts'o /* error, remove the lazy_init job */ 3765e22834f0SDmitry Monakhov if (err) { 3766bfff6873SLukas Czerner ext4_remove_li_request(elr); 3767bfff6873SLukas Czerner continue; 3768bfff6873SLukas Czerner } 3769e22834f0SDmitry Monakhov if (!progress) { 3770e22834f0SDmitry Monakhov elr->lr_next_sched = jiffies + 37718032bf12SJason A. Donenfeld get_random_u32_below(EXT4_DEF_LI_MAX_START_DELAY * HZ); 3772b2c78cd0STheodore Ts'o } 3773bfff6873SLukas Czerner if (time_before(elr->lr_next_sched, next_wakeup)) 3774bfff6873SLukas Czerner next_wakeup = elr->lr_next_sched; 3775bfff6873SLukas Czerner } 3776bfff6873SLukas Czerner mutex_unlock(&eli->li_list_mtx); 3777bfff6873SLukas Czerner 3778a0acae0eSTejun Heo try_to_freeze(); 3779bfff6873SLukas Czerner 37804ed5c033SLukas Czerner cur = jiffies; 37814ed5c033SLukas Czerner if ((time_after_eq(cur, next_wakeup)) || 3782f4245bd4SLukas Czerner (MAX_JIFFY_OFFSET == next_wakeup)) { 3783bfff6873SLukas Czerner cond_resched(); 3784bfff6873SLukas Czerner continue; 3785bfff6873SLukas Czerner } 3786bfff6873SLukas Czerner 37874ed5c033SLukas Czerner schedule_timeout_interruptible(next_wakeup - cur); 37884ed5c033SLukas Czerner 37898f1f7453SEric Sandeen if (kthread_should_stop()) { 37908f1f7453SEric Sandeen ext4_clear_request_list(); 37918f1f7453SEric Sandeen goto exit_thread; 37928f1f7453SEric Sandeen } 3793bfff6873SLukas Czerner } 3794bfff6873SLukas Czerner 3795bfff6873SLukas Czerner exit_thread: 3796bfff6873SLukas Czerner /* 3797bfff6873SLukas Czerner * It looks like the request list is empty, but we need 3798bfff6873SLukas Czerner * to check it under the li_list_mtx lock, to prevent any 3799bfff6873SLukas Czerner * additions into it, and of course we should lock ext4_li_mtx 3800bfff6873SLukas Czerner * to atomically free the list and ext4_li_info, because at 3801bfff6873SLukas Czerner * this point another ext4 filesystem could be registering 3802bfff6873SLukas Czerner * new one. 3803bfff6873SLukas Czerner */ 3804bfff6873SLukas Czerner mutex_lock(&ext4_li_mtx); 3805bfff6873SLukas Czerner mutex_lock(&eli->li_list_mtx); 3806bfff6873SLukas Czerner if (!list_empty(&eli->li_request_list)) { 3807bfff6873SLukas Czerner mutex_unlock(&eli->li_list_mtx); 3808bfff6873SLukas Czerner mutex_unlock(&ext4_li_mtx); 3809bfff6873SLukas Czerner goto cont_thread; 3810bfff6873SLukas Czerner } 3811bfff6873SLukas Czerner mutex_unlock(&eli->li_list_mtx); 3812bfff6873SLukas Czerner kfree(ext4_li_info); 3813bfff6873SLukas Czerner ext4_li_info = NULL; 3814bfff6873SLukas Czerner mutex_unlock(&ext4_li_mtx); 3815bfff6873SLukas Czerner 3816bfff6873SLukas Czerner return 0; 3817bfff6873SLukas Czerner } 3818bfff6873SLukas Czerner 3819bfff6873SLukas Czerner static void ext4_clear_request_list(void) 3820bfff6873SLukas Czerner { 3821bfff6873SLukas Czerner struct list_head *pos, *n; 3822bfff6873SLukas Czerner struct ext4_li_request *elr; 3823bfff6873SLukas Czerner 3824bfff6873SLukas Czerner mutex_lock(&ext4_li_info->li_list_mtx); 3825bfff6873SLukas Czerner list_for_each_safe(pos, n, &ext4_li_info->li_request_list) { 3826bfff6873SLukas Czerner elr = list_entry(pos, struct ext4_li_request, 3827bfff6873SLukas Czerner lr_request); 3828bfff6873SLukas Czerner ext4_remove_li_request(elr); 3829bfff6873SLukas Czerner } 3830bfff6873SLukas Czerner mutex_unlock(&ext4_li_info->li_list_mtx); 3831bfff6873SLukas Czerner } 3832bfff6873SLukas Czerner 3833bfff6873SLukas Czerner static int ext4_run_lazyinit_thread(void) 3834bfff6873SLukas Czerner { 38358f1f7453SEric Sandeen ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread, 38368f1f7453SEric Sandeen ext4_li_info, "ext4lazyinit"); 38378f1f7453SEric Sandeen if (IS_ERR(ext4_lazyinit_task)) { 38388f1f7453SEric Sandeen int err = PTR_ERR(ext4_lazyinit_task); 3839bfff6873SLukas Czerner ext4_clear_request_list(); 3840bfff6873SLukas Czerner kfree(ext4_li_info); 3841bfff6873SLukas Czerner ext4_li_info = NULL; 384292b97816STheodore Ts'o printk(KERN_CRIT "EXT4-fs: error %d creating inode table " 3843bfff6873SLukas Czerner "initialization thread\n", 3844bfff6873SLukas Czerner err); 3845bfff6873SLukas Czerner return err; 3846bfff6873SLukas Czerner } 3847bfff6873SLukas Czerner ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING; 3848bfff6873SLukas Czerner return 0; 3849bfff6873SLukas Czerner } 3850bfff6873SLukas Czerner 3851bfff6873SLukas Czerner /* 3852bfff6873SLukas Czerner * Check whether it make sense to run itable init. thread or not. 3853bfff6873SLukas Czerner * If there is at least one uninitialized inode table, return 3854bfff6873SLukas Czerner * corresponding group number, else the loop goes through all 3855bfff6873SLukas Czerner * groups and return total number of groups. 3856bfff6873SLukas Czerner */ 3857bfff6873SLukas Czerner static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) 3858bfff6873SLukas Czerner { 3859bfff6873SLukas Czerner ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count; 3860bfff6873SLukas Czerner struct ext4_group_desc *gdp = NULL; 3861bfff6873SLukas Czerner 38628844618dSTheodore Ts'o if (!ext4_has_group_desc_csum(sb)) 38638844618dSTheodore Ts'o return ngroups; 38648844618dSTheodore Ts'o 3865bfff6873SLukas Czerner for (group = 0; group < ngroups; group++) { 3866bfff6873SLukas Czerner gdp = ext4_get_group_desc(sb, group, NULL); 3867bfff6873SLukas Czerner if (!gdp) 3868bfff6873SLukas Czerner continue; 3869bfff6873SLukas Czerner 387050122847STheodore Ts'o if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) 3871bfff6873SLukas Czerner break; 3872bfff6873SLukas Czerner } 3873bfff6873SLukas Czerner 3874bfff6873SLukas Czerner return group; 3875bfff6873SLukas Czerner } 3876bfff6873SLukas Czerner 3877bfff6873SLukas Czerner static int ext4_li_info_new(void) 3878bfff6873SLukas Czerner { 3879bfff6873SLukas Czerner struct ext4_lazy_init *eli = NULL; 3880bfff6873SLukas Czerner 3881bfff6873SLukas Czerner eli = kzalloc(sizeof(*eli), GFP_KERNEL); 3882bfff6873SLukas Czerner if (!eli) 3883bfff6873SLukas Czerner return -ENOMEM; 3884bfff6873SLukas Czerner 3885bfff6873SLukas Czerner INIT_LIST_HEAD(&eli->li_request_list); 3886bfff6873SLukas Czerner mutex_init(&eli->li_list_mtx); 3887bfff6873SLukas Czerner 3888bfff6873SLukas Czerner eli->li_state |= EXT4_LAZYINIT_QUIT; 3889bfff6873SLukas Czerner 3890bfff6873SLukas Czerner ext4_li_info = eli; 3891bfff6873SLukas Czerner 3892bfff6873SLukas Czerner return 0; 3893bfff6873SLukas Czerner } 3894bfff6873SLukas Czerner 3895bfff6873SLukas Czerner static struct ext4_li_request *ext4_li_request_new(struct super_block *sb, 3896bfff6873SLukas Czerner ext4_group_t start) 3897bfff6873SLukas Czerner { 3898bfff6873SLukas Czerner struct ext4_li_request *elr; 3899bfff6873SLukas Czerner 3900bfff6873SLukas Czerner elr = kzalloc(sizeof(*elr), GFP_KERNEL); 3901bfff6873SLukas Czerner if (!elr) 3902bfff6873SLukas Czerner return NULL; 3903bfff6873SLukas Czerner 3904bfff6873SLukas Czerner elr->lr_super = sb; 39053d392b26STheodore Ts'o elr->lr_first_not_zeroed = start; 390621175ca4SHarshad Shirwadkar if (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS)) { 39073d392b26STheodore Ts'o elr->lr_mode = EXT4_LI_MODE_ITABLE; 3908bfff6873SLukas Czerner elr->lr_next_group = start; 390921175ca4SHarshad Shirwadkar } else { 391021175ca4SHarshad Shirwadkar elr->lr_mode = EXT4_LI_MODE_PREFETCH_BBITMAP; 39113d392b26STheodore Ts'o } 3912bfff6873SLukas Czerner 3913bfff6873SLukas Czerner /* 3914bfff6873SLukas Czerner * Randomize first schedule time of the request to 3915bfff6873SLukas Czerner * spread the inode table initialization requests 3916bfff6873SLukas Czerner * better. 3917bfff6873SLukas Czerner */ 39188032bf12SJason A. Donenfeld elr->lr_next_sched = jiffies + get_random_u32_below(EXT4_DEF_LI_MAX_START_DELAY * HZ); 3919bfff6873SLukas Czerner return elr; 3920bfff6873SLukas Czerner } 3921bfff6873SLukas Czerner 39227f511862STheodore Ts'o int ext4_register_li_request(struct super_block *sb, 3923bfff6873SLukas Czerner ext4_group_t first_not_zeroed) 3924bfff6873SLukas Czerner { 3925bfff6873SLukas Czerner struct ext4_sb_info *sbi = EXT4_SB(sb); 39267f511862STheodore Ts'o struct ext4_li_request *elr = NULL; 392749598e04SJun Piao ext4_group_t ngroups = sbi->s_groups_count; 39286c5a6cb9SAndrew Morton int ret = 0; 3929bfff6873SLukas Czerner 39307f511862STheodore Ts'o mutex_lock(&ext4_li_mtx); 393151ce6511SLukas Czerner if (sbi->s_li_request != NULL) { 393251ce6511SLukas Czerner /* 393351ce6511SLukas Czerner * Reset timeout so it can be computed again, because 393451ce6511SLukas Czerner * s_li_wait_mult might have changed. 393551ce6511SLukas Czerner */ 393651ce6511SLukas Czerner sbi->s_li_request->lr_timeout = 0; 39377f511862STheodore Ts'o goto out; 393851ce6511SLukas Czerner } 3939bfff6873SLukas Czerner 3940426d15adSJosh Triplett if (sb_rdonly(sb) || 3941426d15adSJosh Triplett (test_opt(sb, NO_PREFETCH_BLOCK_BITMAPS) && 3942426d15adSJosh Triplett (first_not_zeroed == ngroups || !test_opt(sb, INIT_INODE_TABLE)))) 39437f511862STheodore Ts'o goto out; 3944bfff6873SLukas Czerner 3945bfff6873SLukas Czerner elr = ext4_li_request_new(sb, first_not_zeroed); 39467f511862STheodore Ts'o if (!elr) { 39477f511862STheodore Ts'o ret = -ENOMEM; 39487f511862STheodore Ts'o goto out; 39497f511862STheodore Ts'o } 3950bfff6873SLukas Czerner 3951bfff6873SLukas Czerner if (NULL == ext4_li_info) { 3952bfff6873SLukas Czerner ret = ext4_li_info_new(); 3953bfff6873SLukas Czerner if (ret) 3954bfff6873SLukas Czerner goto out; 3955bfff6873SLukas Czerner } 3956bfff6873SLukas Czerner 3957bfff6873SLukas Czerner mutex_lock(&ext4_li_info->li_list_mtx); 3958bfff6873SLukas Czerner list_add(&elr->lr_request, &ext4_li_info->li_request_list); 3959bfff6873SLukas Czerner mutex_unlock(&ext4_li_info->li_list_mtx); 3960bfff6873SLukas Czerner 3961bfff6873SLukas Czerner sbi->s_li_request = elr; 396246e4690bSTao Ma /* 396346e4690bSTao Ma * set elr to NULL here since it has been inserted to 396446e4690bSTao Ma * the request_list and the removal and free of it is 396546e4690bSTao Ma * handled by ext4_clear_request_list from now on. 396646e4690bSTao Ma */ 396746e4690bSTao Ma elr = NULL; 3968bfff6873SLukas Czerner 3969bfff6873SLukas Czerner if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) { 3970bfff6873SLukas Czerner ret = ext4_run_lazyinit_thread(); 3971bfff6873SLukas Czerner if (ret) 3972bfff6873SLukas Czerner goto out; 3973bfff6873SLukas Czerner } 3974bfff6873SLukas Czerner out: 3975bfff6873SLukas Czerner mutex_unlock(&ext4_li_mtx); 3976beed5ecbSNicolas Kaiser if (ret) 3977bfff6873SLukas Czerner kfree(elr); 3978bfff6873SLukas Czerner return ret; 3979bfff6873SLukas Czerner } 3980bfff6873SLukas Czerner 3981bfff6873SLukas Czerner /* 3982bfff6873SLukas Czerner * We do not need to lock anything since this is called on 3983bfff6873SLukas Czerner * module unload. 3984bfff6873SLukas Czerner */ 3985bfff6873SLukas Czerner static void ext4_destroy_lazyinit_thread(void) 3986bfff6873SLukas Czerner { 3987bfff6873SLukas Czerner /* 3988bfff6873SLukas Czerner * If thread exited earlier 3989bfff6873SLukas Czerner * there's nothing to be done. 3990bfff6873SLukas Czerner */ 39918f1f7453SEric Sandeen if (!ext4_li_info || !ext4_lazyinit_task) 3992bfff6873SLukas Czerner return; 3993bfff6873SLukas Czerner 39948f1f7453SEric Sandeen kthread_stop(ext4_lazyinit_task); 3995bfff6873SLukas Czerner } 3996bfff6873SLukas Czerner 399725ed6e8aSDarrick J. Wong static int set_journal_csum_feature_set(struct super_block *sb) 399825ed6e8aSDarrick J. Wong { 399925ed6e8aSDarrick J. Wong int ret = 1; 400025ed6e8aSDarrick J. Wong int compat, incompat; 400125ed6e8aSDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(sb); 400225ed6e8aSDarrick J. Wong 40039aa5d32bSDmitry Monakhov if (ext4_has_metadata_csum(sb)) { 4004db9ee220SDarrick J. Wong /* journal checksum v3 */ 400525ed6e8aSDarrick J. Wong compat = 0; 4006db9ee220SDarrick J. Wong incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3; 400725ed6e8aSDarrick J. Wong } else { 400825ed6e8aSDarrick J. Wong /* journal checksum v1 */ 400925ed6e8aSDarrick J. Wong compat = JBD2_FEATURE_COMPAT_CHECKSUM; 401025ed6e8aSDarrick J. Wong incompat = 0; 401125ed6e8aSDarrick J. Wong } 401225ed6e8aSDarrick J. Wong 4013feb8c6d3SDarrick J. Wong jbd2_journal_clear_features(sbi->s_journal, 4014feb8c6d3SDarrick J. Wong JBD2_FEATURE_COMPAT_CHECKSUM, 0, 4015feb8c6d3SDarrick J. Wong JBD2_FEATURE_INCOMPAT_CSUM_V3 | 4016feb8c6d3SDarrick J. Wong JBD2_FEATURE_INCOMPAT_CSUM_V2); 401725ed6e8aSDarrick J. Wong if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 401825ed6e8aSDarrick J. Wong ret = jbd2_journal_set_features(sbi->s_journal, 401925ed6e8aSDarrick J. Wong compat, 0, 402025ed6e8aSDarrick J. Wong JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | 402125ed6e8aSDarrick J. Wong incompat); 402225ed6e8aSDarrick J. Wong } else if (test_opt(sb, JOURNAL_CHECKSUM)) { 402325ed6e8aSDarrick J. Wong ret = jbd2_journal_set_features(sbi->s_journal, 402425ed6e8aSDarrick J. Wong compat, 0, 402525ed6e8aSDarrick J. Wong incompat); 402625ed6e8aSDarrick J. Wong jbd2_journal_clear_features(sbi->s_journal, 0, 0, 402725ed6e8aSDarrick J. Wong JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); 402825ed6e8aSDarrick J. Wong } else { 4029feb8c6d3SDarrick J. Wong jbd2_journal_clear_features(sbi->s_journal, 0, 0, 4030feb8c6d3SDarrick J. Wong JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); 403125ed6e8aSDarrick J. Wong } 403225ed6e8aSDarrick J. Wong 403325ed6e8aSDarrick J. Wong return ret; 403425ed6e8aSDarrick J. Wong } 403525ed6e8aSDarrick J. Wong 4036952fc18eSTheodore Ts'o /* 4037952fc18eSTheodore Ts'o * Note: calculating the overhead so we can be compatible with 4038952fc18eSTheodore Ts'o * historical BSD practice is quite difficult in the face of 4039952fc18eSTheodore Ts'o * clusters/bigalloc. This is because multiple metadata blocks from 4040952fc18eSTheodore Ts'o * different block group can end up in the same allocation cluster. 4041952fc18eSTheodore Ts'o * Calculating the exact overhead in the face of clustered allocation 4042952fc18eSTheodore Ts'o * requires either O(all block bitmaps) in memory or O(number of block 4043952fc18eSTheodore Ts'o * groups**2) in time. We will still calculate the superblock for 4044952fc18eSTheodore Ts'o * older file systems --- and if we come across with a bigalloc file 4045952fc18eSTheodore Ts'o * system with zero in s_overhead_clusters the estimate will be close to 4046952fc18eSTheodore Ts'o * correct especially for very large cluster sizes --- but for newer 4047952fc18eSTheodore Ts'o * file systems, it's better to calculate this figure once at mkfs 4048952fc18eSTheodore Ts'o * time, and store it in the superblock. If the superblock value is 4049952fc18eSTheodore Ts'o * present (even for non-bigalloc file systems), we will use it. 4050952fc18eSTheodore Ts'o */ 4051952fc18eSTheodore Ts'o static int count_overhead(struct super_block *sb, ext4_group_t grp, 4052952fc18eSTheodore Ts'o char *buf) 4053952fc18eSTheodore Ts'o { 4054952fc18eSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 4055952fc18eSTheodore Ts'o struct ext4_group_desc *gdp; 4056952fc18eSTheodore Ts'o ext4_fsblk_t first_block, last_block, b; 4057952fc18eSTheodore Ts'o ext4_group_t i, ngroups = ext4_get_groups_count(sb); 4058952fc18eSTheodore Ts'o int s, j, count = 0; 405910b01ee9STheodore Ts'o int has_super = ext4_bg_has_super(sb, grp); 4060952fc18eSTheodore Ts'o 4061e2b911c5SDarrick J. Wong if (!ext4_has_feature_bigalloc(sb)) 406210b01ee9STheodore Ts'o return (has_super + ext4_bg_num_gdb(sb, grp) + 406310b01ee9STheodore Ts'o (has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) + 40640548bbb8STheodore Ts'o sbi->s_itb_per_group + 2); 40650548bbb8STheodore Ts'o 4066952fc18eSTheodore Ts'o first_block = le32_to_cpu(sbi->s_es->s_first_data_block) + 4067952fc18eSTheodore Ts'o (grp * EXT4_BLOCKS_PER_GROUP(sb)); 4068952fc18eSTheodore Ts'o last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1; 4069952fc18eSTheodore Ts'o for (i = 0; i < ngroups; i++) { 4070952fc18eSTheodore Ts'o gdp = ext4_get_group_desc(sb, i, NULL); 4071952fc18eSTheodore Ts'o b = ext4_block_bitmap(sb, gdp); 4072952fc18eSTheodore Ts'o if (b >= first_block && b <= last_block) { 4073952fc18eSTheodore Ts'o ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf); 4074952fc18eSTheodore Ts'o count++; 4075952fc18eSTheodore Ts'o } 4076952fc18eSTheodore Ts'o b = ext4_inode_bitmap(sb, gdp); 4077952fc18eSTheodore Ts'o if (b >= first_block && b <= last_block) { 4078952fc18eSTheodore Ts'o ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf); 4079952fc18eSTheodore Ts'o count++; 4080952fc18eSTheodore Ts'o } 4081952fc18eSTheodore Ts'o b = ext4_inode_table(sb, gdp); 4082952fc18eSTheodore Ts'o if (b >= first_block && b + sbi->s_itb_per_group <= last_block) 4083952fc18eSTheodore Ts'o for (j = 0; j < sbi->s_itb_per_group; j++, b++) { 4084952fc18eSTheodore Ts'o int c = EXT4_B2C(sbi, b - first_block); 4085952fc18eSTheodore Ts'o ext4_set_bit(c, buf); 4086952fc18eSTheodore Ts'o count++; 4087952fc18eSTheodore Ts'o } 4088952fc18eSTheodore Ts'o if (i != grp) 4089952fc18eSTheodore Ts'o continue; 4090952fc18eSTheodore Ts'o s = 0; 4091952fc18eSTheodore Ts'o if (ext4_bg_has_super(sb, grp)) { 4092952fc18eSTheodore Ts'o ext4_set_bit(s++, buf); 4093952fc18eSTheodore Ts'o count++; 4094952fc18eSTheodore Ts'o } 4095c48ae41bSTheodore Ts'o j = ext4_bg_num_gdb(sb, grp); 4096c48ae41bSTheodore Ts'o if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) { 4097c48ae41bSTheodore Ts'o ext4_error(sb, "Invalid number of block group " 4098c48ae41bSTheodore Ts'o "descriptor blocks: %d", j); 4099c48ae41bSTheodore Ts'o j = EXT4_BLOCKS_PER_GROUP(sb) - s; 4100952fc18eSTheodore Ts'o } 4101c48ae41bSTheodore Ts'o count += j; 4102c48ae41bSTheodore Ts'o for (; j > 0; j--) 4103c48ae41bSTheodore Ts'o ext4_set_bit(EXT4_B2C(sbi, s++), buf); 4104952fc18eSTheodore Ts'o } 4105952fc18eSTheodore Ts'o if (!count) 4106952fc18eSTheodore Ts'o return 0; 4107952fc18eSTheodore Ts'o return EXT4_CLUSTERS_PER_GROUP(sb) - 4108952fc18eSTheodore Ts'o ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8); 4109952fc18eSTheodore Ts'o } 4110952fc18eSTheodore Ts'o 4111952fc18eSTheodore Ts'o /* 4112952fc18eSTheodore Ts'o * Compute the overhead and stash it in sbi->s_overhead 4113952fc18eSTheodore Ts'o */ 4114952fc18eSTheodore Ts'o int ext4_calculate_overhead(struct super_block *sb) 4115952fc18eSTheodore Ts'o { 4116952fc18eSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 4117952fc18eSTheodore Ts'o struct ext4_super_block *es = sbi->s_es; 41183c816dedSEric Whitney struct inode *j_inode; 41193c816dedSEric Whitney unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum); 4120952fc18eSTheodore Ts'o ext4_group_t i, ngroups = ext4_get_groups_count(sb); 4121952fc18eSTheodore Ts'o ext4_fsblk_t overhead = 0; 41224fdb5543SDmitry Monakhov char *buf = (char *) get_zeroed_page(GFP_NOFS); 4123952fc18eSTheodore Ts'o 4124952fc18eSTheodore Ts'o if (!buf) 4125952fc18eSTheodore Ts'o return -ENOMEM; 4126952fc18eSTheodore Ts'o 4127952fc18eSTheodore Ts'o /* 4128952fc18eSTheodore Ts'o * Compute the overhead (FS structures). This is constant 4129952fc18eSTheodore Ts'o * for a given filesystem unless the number of block groups 4130952fc18eSTheodore Ts'o * changes so we cache the previous value until it does. 4131952fc18eSTheodore Ts'o */ 4132952fc18eSTheodore Ts'o 4133952fc18eSTheodore Ts'o /* 4134952fc18eSTheodore Ts'o * All of the blocks before first_data_block are overhead 4135952fc18eSTheodore Ts'o */ 4136952fc18eSTheodore Ts'o overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block)); 4137952fc18eSTheodore Ts'o 4138952fc18eSTheodore Ts'o /* 4139952fc18eSTheodore Ts'o * Add the overhead found in each block group 4140952fc18eSTheodore Ts'o */ 4141952fc18eSTheodore Ts'o for (i = 0; i < ngroups; i++) { 4142952fc18eSTheodore Ts'o int blks; 4143952fc18eSTheodore Ts'o 4144952fc18eSTheodore Ts'o blks = count_overhead(sb, i, buf); 4145952fc18eSTheodore Ts'o overhead += blks; 4146952fc18eSTheodore Ts'o if (blks) 4147952fc18eSTheodore Ts'o memset(buf, 0, PAGE_SIZE); 4148952fc18eSTheodore Ts'o cond_resched(); 4149952fc18eSTheodore Ts'o } 41503c816dedSEric Whitney 41513c816dedSEric Whitney /* 41523c816dedSEric Whitney * Add the internal journal blocks whether the journal has been 41533c816dedSEric Whitney * loaded or not 41543c816dedSEric Whitney */ 4155ee7ed3aaSChunguang Xu if (sbi->s_journal && !sbi->s_journal_bdev) 4156ede7dc7fSHarshad Shirwadkar overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_total_len); 4157f1eec3b0SRitesh Harjani else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) { 4158f1eec3b0SRitesh Harjani /* j_inum for internal journal is non-zero */ 41593c816dedSEric Whitney j_inode = ext4_get_journal_inode(sb, j_inum); 41603c816dedSEric Whitney if (j_inode) { 41613c816dedSEric Whitney j_blocks = j_inode->i_size >> sb->s_blocksize_bits; 41623c816dedSEric Whitney overhead += EXT4_NUM_B2C(sbi, j_blocks); 41633c816dedSEric Whitney iput(j_inode); 41643c816dedSEric Whitney } else { 41653c816dedSEric Whitney ext4_msg(sb, KERN_ERR, "can't get journal size"); 41663c816dedSEric Whitney } 41673c816dedSEric Whitney } 4168952fc18eSTheodore Ts'o sbi->s_overhead = overhead; 4169952fc18eSTheodore Ts'o smp_wmb(); 4170952fc18eSTheodore Ts'o free_page((unsigned long) buf); 4171952fc18eSTheodore Ts'o return 0; 4172952fc18eSTheodore Ts'o } 4173952fc18eSTheodore Ts'o 4174b5799018STheodore Ts'o static void ext4_set_resv_clusters(struct super_block *sb) 417527dd4385SLukas Czerner { 417627dd4385SLukas Czerner ext4_fsblk_t resv_clusters; 4177b5799018STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 417827dd4385SLukas Czerner 417927dd4385SLukas Czerner /* 418030fac0f7SJan Kara * There's no need to reserve anything when we aren't using extents. 418130fac0f7SJan Kara * The space estimates are exact, there are no unwritten extents, 418230fac0f7SJan Kara * hole punching doesn't need new metadata... This is needed especially 418330fac0f7SJan Kara * to keep ext2/3 backward compatibility. 418430fac0f7SJan Kara */ 4185e2b911c5SDarrick J. Wong if (!ext4_has_feature_extents(sb)) 4186b5799018STheodore Ts'o return; 418730fac0f7SJan Kara /* 418827dd4385SLukas Czerner * By default we reserve 2% or 4096 clusters, whichever is smaller. 418927dd4385SLukas Czerner * This should cover the situations where we can not afford to run 419027dd4385SLukas Czerner * out of space like for example punch hole, or converting 4191556615dcSLukas Czerner * unwritten extents in delalloc path. In most cases such 419227dd4385SLukas Czerner * allocation would require 1, or 2 blocks, higher numbers are 419327dd4385SLukas Czerner * very rare. 419427dd4385SLukas Czerner */ 4195b5799018STheodore Ts'o resv_clusters = (ext4_blocks_count(sbi->s_es) >> 4196b5799018STheodore Ts'o sbi->s_cluster_bits); 419727dd4385SLukas Czerner 419827dd4385SLukas Czerner do_div(resv_clusters, 50); 419927dd4385SLukas Czerner resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096); 420027dd4385SLukas Czerner 4201b5799018STheodore Ts'o atomic64_set(&sbi->s_resv_clusters, resv_clusters); 420227dd4385SLukas Czerner } 420327dd4385SLukas Czerner 4204ca9b404fSRoman Anufriev static const char *ext4_quota_mode(struct super_block *sb) 4205ca9b404fSRoman Anufriev { 4206ca9b404fSRoman Anufriev #ifdef CONFIG_QUOTA 4207ca9b404fSRoman Anufriev if (!ext4_quota_capable(sb)) 4208ca9b404fSRoman Anufriev return "none"; 4209ca9b404fSRoman Anufriev 4210ca9b404fSRoman Anufriev if (EXT4_SB(sb)->s_journal && ext4_is_quota_journalled(sb)) 4211ca9b404fSRoman Anufriev return "journalled"; 4212ca9b404fSRoman Anufriev else 4213ca9b404fSRoman Anufriev return "writeback"; 4214ca9b404fSRoman Anufriev #else 4215ca9b404fSRoman Anufriev return "disabled"; 4216ca9b404fSRoman Anufriev #endif 4217ca9b404fSRoman Anufriev } 4218ca9b404fSRoman Anufriev 4219188c299eSJan Kara static void ext4_setup_csum_trigger(struct super_block *sb, 4220188c299eSJan Kara enum ext4_journal_trigger_type type, 4221188c299eSJan Kara void (*trigger)( 4222188c299eSJan Kara struct jbd2_buffer_trigger_type *type, 4223188c299eSJan Kara struct buffer_head *bh, 4224188c299eSJan Kara void *mapped_data, 4225188c299eSJan Kara size_t size)) 4226188c299eSJan Kara { 4227188c299eSJan Kara struct ext4_sb_info *sbi = EXT4_SB(sb); 4228188c299eSJan Kara 4229188c299eSJan Kara sbi->s_journal_triggers[type].sb = sb; 4230188c299eSJan Kara sbi->s_journal_triggers[type].tr_triggers.t_frozen = trigger; 4231188c299eSJan Kara } 4232188c299eSJan Kara 42337edfd85bSLukas Czerner static void ext4_free_sbi(struct ext4_sb_info *sbi) 4234ac27a0ecSDave Kleikamp { 42357edfd85bSLukas Czerner if (!sbi) 42367edfd85bSLukas Czerner return; 42377edfd85bSLukas Czerner 42387edfd85bSLukas Czerner kfree(sbi->s_blockgroup_lock); 42398012b866SShiyang Ruan fs_put_dax(sbi->s_daxdev, NULL); 42407edfd85bSLukas Czerner kfree(sbi); 42417edfd85bSLukas Czerner } 42427edfd85bSLukas Czerner 42437edfd85bSLukas Czerner static struct ext4_sb_info *ext4_alloc_sbi(struct super_block *sb) 42447edfd85bSLukas Czerner { 42457edfd85bSLukas Czerner struct ext4_sb_info *sbi; 42467edfd85bSLukas Czerner 42477edfd85bSLukas Czerner sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 42487edfd85bSLukas Czerner if (!sbi) 42497edfd85bSLukas Czerner return NULL; 42507edfd85bSLukas Czerner 42518012b866SShiyang Ruan sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off, 42528012b866SShiyang Ruan NULL, NULL); 42537edfd85bSLukas Czerner 42547edfd85bSLukas Czerner sbi->s_blockgroup_lock = 42557edfd85bSLukas Czerner kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); 42567edfd85bSLukas Czerner 42577edfd85bSLukas Czerner if (!sbi->s_blockgroup_lock) 42587edfd85bSLukas Czerner goto err_out; 42597edfd85bSLukas Czerner 42607edfd85bSLukas Czerner sb->s_fs_info = sbi; 42617edfd85bSLukas Czerner sbi->s_sb = sb; 42627edfd85bSLukas Czerner return sbi; 42637edfd85bSLukas Czerner err_out: 42648012b866SShiyang Ruan fs_put_dax(sbi->s_daxdev, NULL); 42657edfd85bSLukas Czerner kfree(sbi); 42667edfd85bSLukas Czerner return NULL; 42677edfd85bSLukas Czerner } 42687edfd85bSLukas Czerner 42695f6d662dSJason Yan static void ext4_set_def_opts(struct super_block *sb, 42705f6d662dSJason Yan struct ext4_super_block *es) 42715f6d662dSJason Yan { 42725f6d662dSJason Yan unsigned long def_mount_opts; 42735f6d662dSJason Yan 42745f6d662dSJason Yan /* Set defaults before we parse the mount options */ 42755f6d662dSJason Yan def_mount_opts = le32_to_cpu(es->s_default_mount_opts); 42765f6d662dSJason Yan set_opt(sb, INIT_INODE_TABLE); 42775f6d662dSJason Yan if (def_mount_opts & EXT4_DEFM_DEBUG) 42785f6d662dSJason Yan set_opt(sb, DEBUG); 42795f6d662dSJason Yan if (def_mount_opts & EXT4_DEFM_BSDGROUPS) 42805f6d662dSJason Yan set_opt(sb, GRPID); 42815f6d662dSJason Yan if (def_mount_opts & EXT4_DEFM_UID16) 42825f6d662dSJason Yan set_opt(sb, NO_UID32); 42835f6d662dSJason Yan /* xattr user namespace & acls are now defaulted on */ 42845f6d662dSJason Yan set_opt(sb, XATTR_USER); 42855f6d662dSJason Yan #ifdef CONFIG_EXT4_FS_POSIX_ACL 42865f6d662dSJason Yan set_opt(sb, POSIX_ACL); 42875f6d662dSJason Yan #endif 42885f6d662dSJason Yan if (ext4_has_feature_fast_commit(sb)) 42895f6d662dSJason Yan set_opt2(sb, JOURNAL_FAST_COMMIT); 42905f6d662dSJason Yan /* don't forget to enable journal_csum when metadata_csum is enabled. */ 42915f6d662dSJason Yan if (ext4_has_metadata_csum(sb)) 42925f6d662dSJason Yan set_opt(sb, JOURNAL_CHECKSUM); 42935f6d662dSJason Yan 42945f6d662dSJason Yan if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) 42955f6d662dSJason Yan set_opt(sb, JOURNAL_DATA); 42965f6d662dSJason Yan else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) 42975f6d662dSJason Yan set_opt(sb, ORDERED_DATA); 42985f6d662dSJason Yan else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK) 42995f6d662dSJason Yan set_opt(sb, WRITEBACK_DATA); 43005f6d662dSJason Yan 43015f6d662dSJason Yan if (le16_to_cpu(es->s_errors) == EXT4_ERRORS_PANIC) 43025f6d662dSJason Yan set_opt(sb, ERRORS_PANIC); 43035f6d662dSJason Yan else if (le16_to_cpu(es->s_errors) == EXT4_ERRORS_CONTINUE) 43045f6d662dSJason Yan set_opt(sb, ERRORS_CONT); 43055f6d662dSJason Yan else 43065f6d662dSJason Yan set_opt(sb, ERRORS_RO); 43075f6d662dSJason Yan /* block_validity enabled by default; disable with noblock_validity */ 43085f6d662dSJason Yan set_opt(sb, BLOCK_VALIDITY); 43095f6d662dSJason Yan if (def_mount_opts & EXT4_DEFM_DISCARD) 43105f6d662dSJason Yan set_opt(sb, DISCARD); 43115f6d662dSJason Yan 43125f6d662dSJason Yan if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0) 43135f6d662dSJason Yan set_opt(sb, BARRIER); 43145f6d662dSJason Yan 43155f6d662dSJason Yan /* 43165f6d662dSJason Yan * enable delayed allocation by default 43175f6d662dSJason Yan * Use -o nodelalloc to turn it off 43185f6d662dSJason Yan */ 43195f6d662dSJason Yan if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) && 43205f6d662dSJason Yan ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0)) 43215f6d662dSJason Yan set_opt(sb, DELALLOC); 43223df11e27SJason Yan 43233df11e27SJason Yan if (sb->s_blocksize == PAGE_SIZE) 43243df11e27SJason Yan set_opt(sb, DIOREAD_NOLOCK); 43255f6d662dSJason Yan } 43265f6d662dSJason Yan 4327c8267c51SJason Yan static int ext4_handle_clustersize(struct super_block *sb) 43284a8557b0SJason Yan { 43294a8557b0SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 43304a8557b0SJason Yan struct ext4_super_block *es = sbi->s_es; 43314a8557b0SJason Yan int clustersize; 43324a8557b0SJason Yan 43334a8557b0SJason Yan /* Handle clustersize */ 43344a8557b0SJason Yan clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size); 43354a8557b0SJason Yan if (ext4_has_feature_bigalloc(sb)) { 4336c8267c51SJason Yan if (clustersize < sb->s_blocksize) { 43374a8557b0SJason Yan ext4_msg(sb, KERN_ERR, 43384a8557b0SJason Yan "cluster size (%d) smaller than " 4339c8267c51SJason Yan "block size (%lu)", clustersize, sb->s_blocksize); 43404a8557b0SJason Yan return -EINVAL; 43414a8557b0SJason Yan } 43424a8557b0SJason Yan sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - 43434a8557b0SJason Yan le32_to_cpu(es->s_log_block_size); 43444a8557b0SJason Yan sbi->s_clusters_per_group = 43454a8557b0SJason Yan le32_to_cpu(es->s_clusters_per_group); 4346c8267c51SJason Yan if (sbi->s_clusters_per_group > sb->s_blocksize * 8) { 43474a8557b0SJason Yan ext4_msg(sb, KERN_ERR, 43484a8557b0SJason Yan "#clusters per group too big: %lu", 43494a8557b0SJason Yan sbi->s_clusters_per_group); 43504a8557b0SJason Yan return -EINVAL; 43514a8557b0SJason Yan } 43524a8557b0SJason Yan if (sbi->s_blocks_per_group != 4353c8267c51SJason Yan (sbi->s_clusters_per_group * (clustersize / sb->s_blocksize))) { 43544a8557b0SJason Yan ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and " 43554a8557b0SJason Yan "clusters per group (%lu) inconsistent", 43564a8557b0SJason Yan sbi->s_blocks_per_group, 43574a8557b0SJason Yan sbi->s_clusters_per_group); 43584a8557b0SJason Yan return -EINVAL; 43594a8557b0SJason Yan } 43604a8557b0SJason Yan } else { 4361c8267c51SJason Yan if (clustersize != sb->s_blocksize) { 43624a8557b0SJason Yan ext4_msg(sb, KERN_ERR, 43634a8557b0SJason Yan "fragment/cluster size (%d) != " 4364c8267c51SJason Yan "block size (%lu)", clustersize, sb->s_blocksize); 43654a8557b0SJason Yan return -EINVAL; 43664a8557b0SJason Yan } 4367c8267c51SJason Yan if (sbi->s_blocks_per_group > sb->s_blocksize * 8) { 43684a8557b0SJason Yan ext4_msg(sb, KERN_ERR, 43694a8557b0SJason Yan "#blocks per group too big: %lu", 43704a8557b0SJason Yan sbi->s_blocks_per_group); 43714a8557b0SJason Yan return -EINVAL; 43724a8557b0SJason Yan } 43734a8557b0SJason Yan sbi->s_clusters_per_group = sbi->s_blocks_per_group; 43744a8557b0SJason Yan sbi->s_cluster_bits = 0; 43754a8557b0SJason Yan } 4376c8267c51SJason Yan sbi->s_cluster_ratio = clustersize / sb->s_blocksize; 43774a8557b0SJason Yan 43784a8557b0SJason Yan /* Do we have standard group size of clustersize * 8 blocks ? */ 43794a8557b0SJason Yan if (sbi->s_blocks_per_group == clustersize << 3) 43804a8557b0SJason Yan set_opt2(sb, STD_GROUP_SIZE); 43814a8557b0SJason Yan 43824a8557b0SJason Yan return 0; 43834a8557b0SJason Yan } 43844a8557b0SJason Yan 4385f7314a67SJason Yan static void ext4_fast_commit_init(struct super_block *sb) 4386f7314a67SJason Yan { 4387f7314a67SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 4388f7314a67SJason Yan 4389f7314a67SJason Yan /* Initialize fast commit stuff */ 4390f7314a67SJason Yan atomic_set(&sbi->s_fc_subtid, 0); 4391f7314a67SJason Yan INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_MAIN]); 4392f7314a67SJason Yan INIT_LIST_HEAD(&sbi->s_fc_q[FC_Q_STAGING]); 4393f7314a67SJason Yan INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_MAIN]); 4394f7314a67SJason Yan INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]); 4395f7314a67SJason Yan sbi->s_fc_bytes = 0; 4396f7314a67SJason Yan ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE); 4397f7314a67SJason Yan sbi->s_fc_ineligible_tid = 0; 4398f7314a67SJason Yan spin_lock_init(&sbi->s_fc_lock); 4399f7314a67SJason Yan memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats)); 4400f7314a67SJason Yan sbi->s_fc_replay_state.fc_regions = NULL; 4401f7314a67SJason Yan sbi->s_fc_replay_state.fc_regions_size = 0; 4402f7314a67SJason Yan sbi->s_fc_replay_state.fc_regions_used = 0; 4403f7314a67SJason Yan sbi->s_fc_replay_state.fc_regions_valid = 0; 4404f7314a67SJason Yan sbi->s_fc_replay_state.fc_modified_inodes = NULL; 4405f7314a67SJason Yan sbi->s_fc_replay_state.fc_modified_inodes_size = 0; 4406f7314a67SJason Yan sbi->s_fc_replay_state.fc_modified_inodes_used = 0; 4407f7314a67SJason Yan } 4408f7314a67SJason Yan 44090e495f7cSJason Yan static int ext4_inode_info_init(struct super_block *sb, 4410c8267c51SJason Yan struct ext4_super_block *es) 44110e495f7cSJason Yan { 44120e495f7cSJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 44130e495f7cSJason Yan 44140e495f7cSJason Yan if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) { 44150e495f7cSJason Yan sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE; 44160e495f7cSJason Yan sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO; 44170e495f7cSJason Yan } else { 44180e495f7cSJason Yan sbi->s_inode_size = le16_to_cpu(es->s_inode_size); 44190e495f7cSJason Yan sbi->s_first_ino = le32_to_cpu(es->s_first_ino); 44200e495f7cSJason Yan if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) { 44210e495f7cSJason Yan ext4_msg(sb, KERN_ERR, "invalid first ino: %u", 44220e495f7cSJason Yan sbi->s_first_ino); 44230e495f7cSJason Yan return -EINVAL; 44240e495f7cSJason Yan } 44250e495f7cSJason Yan if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || 44260e495f7cSJason Yan (!is_power_of_2(sbi->s_inode_size)) || 4427c8267c51SJason Yan (sbi->s_inode_size > sb->s_blocksize)) { 44280e495f7cSJason Yan ext4_msg(sb, KERN_ERR, 44290e495f7cSJason Yan "unsupported inode size: %d", 44300e495f7cSJason Yan sbi->s_inode_size); 4431c8267c51SJason Yan ext4_msg(sb, KERN_ERR, "blocksize: %lu", sb->s_blocksize); 44320e495f7cSJason Yan return -EINVAL; 44330e495f7cSJason Yan } 44340e495f7cSJason Yan /* 44350e495f7cSJason Yan * i_atime_extra is the last extra field available for 44360e495f7cSJason Yan * [acm]times in struct ext4_inode. Checking for that 44370e495f7cSJason Yan * field should suffice to ensure we have extra space 44380e495f7cSJason Yan * for all three. 44390e495f7cSJason Yan */ 44400e495f7cSJason Yan if (sbi->s_inode_size >= offsetof(struct ext4_inode, i_atime_extra) + 44410e495f7cSJason Yan sizeof(((struct ext4_inode *)0)->i_atime_extra)) { 44420e495f7cSJason Yan sb->s_time_gran = 1; 44430e495f7cSJason Yan sb->s_time_max = EXT4_EXTRA_TIMESTAMP_MAX; 44440e495f7cSJason Yan } else { 44450e495f7cSJason Yan sb->s_time_gran = NSEC_PER_SEC; 44460e495f7cSJason Yan sb->s_time_max = EXT4_NON_EXTRA_TIMESTAMP_MAX; 44470e495f7cSJason Yan } 44480e495f7cSJason Yan sb->s_time_min = EXT4_TIMESTAMP_MIN; 44490e495f7cSJason Yan } 44500e495f7cSJason Yan 44510e495f7cSJason Yan if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) { 44520e495f7cSJason Yan sbi->s_want_extra_isize = sizeof(struct ext4_inode) - 44530e495f7cSJason Yan EXT4_GOOD_OLD_INODE_SIZE; 44540e495f7cSJason Yan if (ext4_has_feature_extra_isize(sb)) { 44550e495f7cSJason Yan unsigned v, max = (sbi->s_inode_size - 44560e495f7cSJason Yan EXT4_GOOD_OLD_INODE_SIZE); 44570e495f7cSJason Yan 44580e495f7cSJason Yan v = le16_to_cpu(es->s_want_extra_isize); 44590e495f7cSJason Yan if (v > max) { 44600e495f7cSJason Yan ext4_msg(sb, KERN_ERR, 44610e495f7cSJason Yan "bad s_want_extra_isize: %d", v); 44620e495f7cSJason Yan return -EINVAL; 44630e495f7cSJason Yan } 44640e495f7cSJason Yan if (sbi->s_want_extra_isize < v) 44650e495f7cSJason Yan sbi->s_want_extra_isize = v; 44660e495f7cSJason Yan 44670e495f7cSJason Yan v = le16_to_cpu(es->s_min_extra_isize); 44680e495f7cSJason Yan if (v > max) { 44690e495f7cSJason Yan ext4_msg(sb, KERN_ERR, 44700e495f7cSJason Yan "bad s_min_extra_isize: %d", v); 44710e495f7cSJason Yan return -EINVAL; 44720e495f7cSJason Yan } 44730e495f7cSJason Yan if (sbi->s_want_extra_isize < v) 44740e495f7cSJason Yan sbi->s_want_extra_isize = v; 44750e495f7cSJason Yan } 44760e495f7cSJason Yan } 44770e495f7cSJason Yan 44780e495f7cSJason Yan return 0; 44790e495f7cSJason Yan } 44800e495f7cSJason Yan 448139c135b0SJason Yan #if IS_ENABLED(CONFIG_UNICODE) 448239c135b0SJason Yan static int ext4_encoding_init(struct super_block *sb, struct ext4_super_block *es) 448339c135b0SJason Yan { 448439c135b0SJason Yan const struct ext4_sb_encodings *encoding_info; 448539c135b0SJason Yan struct unicode_map *encoding; 448639c135b0SJason Yan __u16 encoding_flags = le16_to_cpu(es->s_encoding_flags); 448739c135b0SJason Yan 448839c135b0SJason Yan if (!ext4_has_feature_casefold(sb) || sb->s_encoding) 448939c135b0SJason Yan return 0; 449039c135b0SJason Yan 449139c135b0SJason Yan encoding_info = ext4_sb_read_encoding(es); 449239c135b0SJason Yan if (!encoding_info) { 449339c135b0SJason Yan ext4_msg(sb, KERN_ERR, 449439c135b0SJason Yan "Encoding requested by superblock is unknown"); 449539c135b0SJason Yan return -EINVAL; 449639c135b0SJason Yan } 449739c135b0SJason Yan 449839c135b0SJason Yan encoding = utf8_load(encoding_info->version); 449939c135b0SJason Yan if (IS_ERR(encoding)) { 450039c135b0SJason Yan ext4_msg(sb, KERN_ERR, 450139c135b0SJason Yan "can't mount with superblock charset: %s-%u.%u.%u " 450239c135b0SJason Yan "not supported by the kernel. flags: 0x%x.", 450339c135b0SJason Yan encoding_info->name, 450439c135b0SJason Yan unicode_major(encoding_info->version), 450539c135b0SJason Yan unicode_minor(encoding_info->version), 450639c135b0SJason Yan unicode_rev(encoding_info->version), 450739c135b0SJason Yan encoding_flags); 450839c135b0SJason Yan return -EINVAL; 450939c135b0SJason Yan } 451039c135b0SJason Yan ext4_msg(sb, KERN_INFO,"Using encoding defined by superblock: " 451139c135b0SJason Yan "%s-%u.%u.%u with flags 0x%hx", encoding_info->name, 451239c135b0SJason Yan unicode_major(encoding_info->version), 451339c135b0SJason Yan unicode_minor(encoding_info->version), 451439c135b0SJason Yan unicode_rev(encoding_info->version), 451539c135b0SJason Yan encoding_flags); 451639c135b0SJason Yan 451739c135b0SJason Yan sb->s_encoding = encoding; 451839c135b0SJason Yan sb->s_encoding_flags = encoding_flags; 451939c135b0SJason Yan 452039c135b0SJason Yan return 0; 452139c135b0SJason Yan } 452239c135b0SJason Yan #else 452339c135b0SJason Yan static inline int ext4_encoding_init(struct super_block *sb, struct ext4_super_block *es) 452439c135b0SJason Yan { 452539c135b0SJason Yan return 0; 452639c135b0SJason Yan } 452739c135b0SJason Yan #endif 452839c135b0SJason Yan 4529b26458d1SJason Yan static int ext4_init_metadata_csum(struct super_block *sb, struct ext4_super_block *es) 4530b26458d1SJason Yan { 4531b26458d1SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 4532b26458d1SJason Yan 4533b26458d1SJason Yan /* Warn if metadata_csum and gdt_csum are both set. */ 4534b26458d1SJason Yan if (ext4_has_feature_metadata_csum(sb) && 4535b26458d1SJason Yan ext4_has_feature_gdt_csum(sb)) 4536b26458d1SJason Yan ext4_warning(sb, "metadata_csum and uninit_bg are " 4537b26458d1SJason Yan "redundant flags; please run fsck."); 4538b26458d1SJason Yan 4539b26458d1SJason Yan /* Check for a known checksum algorithm */ 4540b26458d1SJason Yan if (!ext4_verify_csum_type(sb, es)) { 4541b26458d1SJason Yan ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " 4542b26458d1SJason Yan "unknown checksum algorithm."); 4543b26458d1SJason Yan return -EINVAL; 4544b26458d1SJason Yan } 4545b26458d1SJason Yan ext4_setup_csum_trigger(sb, EXT4_JTR_ORPHAN_FILE, 4546b26458d1SJason Yan ext4_orphan_file_block_trigger); 4547b26458d1SJason Yan 4548b26458d1SJason Yan /* Load the checksum driver */ 4549b26458d1SJason Yan sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); 4550b26458d1SJason Yan if (IS_ERR(sbi->s_chksum_driver)) { 4551b26458d1SJason Yan int ret = PTR_ERR(sbi->s_chksum_driver); 4552b26458d1SJason Yan ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver."); 4553b26458d1SJason Yan sbi->s_chksum_driver = NULL; 4554b26458d1SJason Yan return ret; 4555b26458d1SJason Yan } 4556b26458d1SJason Yan 4557b26458d1SJason Yan /* Check superblock checksum */ 4558b26458d1SJason Yan if (!ext4_superblock_csum_verify(sb, es)) { 4559b26458d1SJason Yan ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " 4560b26458d1SJason Yan "invalid superblock checksum. Run e2fsck?"); 4561b26458d1SJason Yan return -EFSBADCRC; 4562b26458d1SJason Yan } 4563b26458d1SJason Yan 4564b26458d1SJason Yan /* Precompute checksum seed for all metadata */ 4565b26458d1SJason Yan if (ext4_has_feature_csum_seed(sb)) 4566b26458d1SJason Yan sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed); 4567b26458d1SJason Yan else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb)) 4568b26458d1SJason Yan sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid, 4569b26458d1SJason Yan sizeof(es->s_uuid)); 4570b26458d1SJason Yan return 0; 4571b26458d1SJason Yan } 4572b26458d1SJason Yan 4573d7f3542bSJason Yan static int ext4_check_feature_compatibility(struct super_block *sb, 4574d7f3542bSJason Yan struct ext4_super_block *es, 4575d7f3542bSJason Yan int silent) 4576d7f3542bSJason Yan { 4577d7f3542bSJason Yan if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV && 4578d7f3542bSJason Yan (ext4_has_compat_features(sb) || 4579d7f3542bSJason Yan ext4_has_ro_compat_features(sb) || 4580d7f3542bSJason Yan ext4_has_incompat_features(sb))) 4581d7f3542bSJason Yan ext4_msg(sb, KERN_WARNING, 4582d7f3542bSJason Yan "feature flags set on rev 0 fs, " 4583d7f3542bSJason Yan "running e2fsck is recommended"); 4584d7f3542bSJason Yan 4585d7f3542bSJason Yan if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) { 4586d7f3542bSJason Yan set_opt2(sb, HURD_COMPAT); 4587d7f3542bSJason Yan if (ext4_has_feature_64bit(sb)) { 4588d7f3542bSJason Yan ext4_msg(sb, KERN_ERR, 4589d7f3542bSJason Yan "The Hurd can't support 64-bit file systems"); 4590d7f3542bSJason Yan return -EINVAL; 4591d7f3542bSJason Yan } 4592d7f3542bSJason Yan 4593d7f3542bSJason Yan /* 4594d7f3542bSJason Yan * ea_inode feature uses l_i_version field which is not 4595d7f3542bSJason Yan * available in HURD_COMPAT mode. 4596d7f3542bSJason Yan */ 4597d7f3542bSJason Yan if (ext4_has_feature_ea_inode(sb)) { 4598d7f3542bSJason Yan ext4_msg(sb, KERN_ERR, 4599d7f3542bSJason Yan "ea_inode feature is not supported for Hurd"); 4600d7f3542bSJason Yan return -EINVAL; 4601d7f3542bSJason Yan } 4602d7f3542bSJason Yan } 4603d7f3542bSJason Yan 4604d7f3542bSJason Yan if (IS_EXT2_SB(sb)) { 4605d7f3542bSJason Yan if (ext2_feature_set_ok(sb)) 4606d7f3542bSJason Yan ext4_msg(sb, KERN_INFO, "mounting ext2 file system " 4607d7f3542bSJason Yan "using the ext4 subsystem"); 4608d7f3542bSJason Yan else { 4609d7f3542bSJason Yan /* 4610d7f3542bSJason Yan * If we're probing be silent, if this looks like 4611d7f3542bSJason Yan * it's actually an ext[34] filesystem. 4612d7f3542bSJason Yan */ 4613d7f3542bSJason Yan if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb))) 4614d7f3542bSJason Yan return -EINVAL; 4615d7f3542bSJason Yan ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due " 4616d7f3542bSJason Yan "to feature incompatibilities"); 4617d7f3542bSJason Yan return -EINVAL; 4618d7f3542bSJason Yan } 4619d7f3542bSJason Yan } 4620d7f3542bSJason Yan 4621d7f3542bSJason Yan if (IS_EXT3_SB(sb)) { 4622d7f3542bSJason Yan if (ext3_feature_set_ok(sb)) 4623d7f3542bSJason Yan ext4_msg(sb, KERN_INFO, "mounting ext3 file system " 4624d7f3542bSJason Yan "using the ext4 subsystem"); 4625d7f3542bSJason Yan else { 4626d7f3542bSJason Yan /* 4627d7f3542bSJason Yan * If we're probing be silent, if this looks like 4628d7f3542bSJason Yan * it's actually an ext4 filesystem. 4629d7f3542bSJason Yan */ 4630d7f3542bSJason Yan if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb))) 4631d7f3542bSJason Yan return -EINVAL; 4632d7f3542bSJason Yan ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due " 4633d7f3542bSJason Yan "to feature incompatibilities"); 4634d7f3542bSJason Yan return -EINVAL; 4635d7f3542bSJason Yan } 4636d7f3542bSJason Yan } 4637d7f3542bSJason Yan 4638d7f3542bSJason Yan /* 4639d7f3542bSJason Yan * Check feature flags regardless of the revision level, since we 4640d7f3542bSJason Yan * previously didn't change the revision level when setting the flags, 4641d7f3542bSJason Yan * so there is a chance incompat flags are set on a rev 0 filesystem. 4642d7f3542bSJason Yan */ 4643d7f3542bSJason Yan if (!ext4_feature_set_ok(sb, (sb_rdonly(sb)))) 4644d7f3542bSJason Yan return -EINVAL; 4645d7f3542bSJason Yan 4646d7f3542bSJason Yan return 0; 4647d7f3542bSJason Yan } 4648d7f3542bSJason Yan 4649bc62dbf9SJason Yan static int ext4_geometry_check(struct super_block *sb, 4650bc62dbf9SJason Yan struct ext4_super_block *es) 4651bc62dbf9SJason Yan { 4652bc62dbf9SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 4653bc62dbf9SJason Yan __u64 blocks_count; 4654bc62dbf9SJason Yan 4655bc62dbf9SJason Yan /* check blocks count against device size */ 4656bc62dbf9SJason Yan blocks_count = sb_bdev_nr_blocks(sb); 4657bc62dbf9SJason Yan if (blocks_count && ext4_blocks_count(es) > blocks_count) { 4658bc62dbf9SJason Yan ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu " 4659bc62dbf9SJason Yan "exceeds size of device (%llu blocks)", 4660bc62dbf9SJason Yan ext4_blocks_count(es), blocks_count); 4661bc62dbf9SJason Yan return -EINVAL; 4662bc62dbf9SJason Yan } 4663bc62dbf9SJason Yan 4664bc62dbf9SJason Yan /* 4665bc62dbf9SJason Yan * It makes no sense for the first data block to be beyond the end 4666bc62dbf9SJason Yan * of the filesystem. 4667bc62dbf9SJason Yan */ 4668bc62dbf9SJason Yan if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) { 4669bc62dbf9SJason Yan ext4_msg(sb, KERN_WARNING, "bad geometry: first data " 4670bc62dbf9SJason Yan "block %u is beyond end of filesystem (%llu)", 4671bc62dbf9SJason Yan le32_to_cpu(es->s_first_data_block), 4672bc62dbf9SJason Yan ext4_blocks_count(es)); 4673bc62dbf9SJason Yan return -EINVAL; 4674bc62dbf9SJason Yan } 4675bc62dbf9SJason Yan if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) && 4676bc62dbf9SJason Yan (sbi->s_cluster_ratio == 1)) { 4677bc62dbf9SJason Yan ext4_msg(sb, KERN_WARNING, "bad geometry: first data " 4678bc62dbf9SJason Yan "block is 0 with a 1k block and cluster size"); 4679bc62dbf9SJason Yan return -EINVAL; 4680bc62dbf9SJason Yan } 4681bc62dbf9SJason Yan 4682bc62dbf9SJason Yan blocks_count = (ext4_blocks_count(es) - 4683bc62dbf9SJason Yan le32_to_cpu(es->s_first_data_block) + 4684bc62dbf9SJason Yan EXT4_BLOCKS_PER_GROUP(sb) - 1); 4685bc62dbf9SJason Yan do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb)); 4686bc62dbf9SJason Yan if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) { 4687bc62dbf9SJason Yan ext4_msg(sb, KERN_WARNING, "groups count too large: %llu " 4688bc62dbf9SJason Yan "(block count %llu, first data block %u, " 4689bc62dbf9SJason Yan "blocks per group %lu)", blocks_count, 4690bc62dbf9SJason Yan ext4_blocks_count(es), 4691bc62dbf9SJason Yan le32_to_cpu(es->s_first_data_block), 4692bc62dbf9SJason Yan EXT4_BLOCKS_PER_GROUP(sb)); 4693bc62dbf9SJason Yan return -EINVAL; 4694bc62dbf9SJason Yan } 4695bc62dbf9SJason Yan sbi->s_groups_count = blocks_count; 4696bc62dbf9SJason Yan sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, 4697bc62dbf9SJason Yan (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); 4698bc62dbf9SJason Yan if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) != 4699bc62dbf9SJason Yan le32_to_cpu(es->s_inodes_count)) { 4700bc62dbf9SJason Yan ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu", 4701bc62dbf9SJason Yan le32_to_cpu(es->s_inodes_count), 4702bc62dbf9SJason Yan ((u64)sbi->s_groups_count * sbi->s_inodes_per_group)); 4703bc62dbf9SJason Yan return -EINVAL; 4704bc62dbf9SJason Yan } 4705bc62dbf9SJason Yan 4706bc62dbf9SJason Yan return 0; 4707bc62dbf9SJason Yan } 4708bc62dbf9SJason Yan 4709a4e6a511SJason Yan static void ext4_group_desc_free(struct ext4_sb_info *sbi) 4710a4e6a511SJason Yan { 4711a4e6a511SJason Yan struct buffer_head **group_desc; 4712a4e6a511SJason Yan int i; 4713a4e6a511SJason Yan 4714a4e6a511SJason Yan rcu_read_lock(); 4715a4e6a511SJason Yan group_desc = rcu_dereference(sbi->s_group_desc); 4716a4e6a511SJason Yan for (i = 0; i < sbi->s_gdb_count; i++) 4717a4e6a511SJason Yan brelse(group_desc[i]); 4718a4e6a511SJason Yan kvfree(group_desc); 4719a4e6a511SJason Yan rcu_read_unlock(); 4720a4e6a511SJason Yan } 4721a4e6a511SJason Yan 4722a4e6a511SJason Yan static int ext4_group_desc_init(struct super_block *sb, 4723a4e6a511SJason Yan struct ext4_super_block *es, 4724a4e6a511SJason Yan ext4_fsblk_t logical_sb_block, 4725a4e6a511SJason Yan ext4_group_t *first_not_zeroed) 4726a4e6a511SJason Yan { 4727a4e6a511SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 4728a4e6a511SJason Yan unsigned int db_count; 4729a4e6a511SJason Yan ext4_fsblk_t block; 4730a4e6a511SJason Yan int ret; 4731a4e6a511SJason Yan int i; 4732a4e6a511SJason Yan 4733a4e6a511SJason Yan db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / 4734a4e6a511SJason Yan EXT4_DESC_PER_BLOCK(sb); 4735a4e6a511SJason Yan if (ext4_has_feature_meta_bg(sb)) { 4736a4e6a511SJason Yan if (le32_to_cpu(es->s_first_meta_bg) > db_count) { 4737a4e6a511SJason Yan ext4_msg(sb, KERN_WARNING, 4738a4e6a511SJason Yan "first meta block group too large: %u " 4739a4e6a511SJason Yan "(group descriptor block count %u)", 4740a4e6a511SJason Yan le32_to_cpu(es->s_first_meta_bg), db_count); 4741a4e6a511SJason Yan return -EINVAL; 4742a4e6a511SJason Yan } 4743a4e6a511SJason Yan } 4744a4e6a511SJason Yan rcu_assign_pointer(sbi->s_group_desc, 4745a4e6a511SJason Yan kvmalloc_array(db_count, 4746a4e6a511SJason Yan sizeof(struct buffer_head *), 4747a4e6a511SJason Yan GFP_KERNEL)); 4748a4e6a511SJason Yan if (sbi->s_group_desc == NULL) { 4749a4e6a511SJason Yan ext4_msg(sb, KERN_ERR, "not enough memory"); 4750a4e6a511SJason Yan return -ENOMEM; 4751a4e6a511SJason Yan } 4752a4e6a511SJason Yan 4753a4e6a511SJason Yan bgl_lock_init(sbi->s_blockgroup_lock); 4754a4e6a511SJason Yan 4755a4e6a511SJason Yan /* Pre-read the descriptors into the buffer cache */ 4756a4e6a511SJason Yan for (i = 0; i < db_count; i++) { 4757a4e6a511SJason Yan block = descriptor_loc(sb, logical_sb_block, i); 4758a4e6a511SJason Yan ext4_sb_breadahead_unmovable(sb, block); 4759a4e6a511SJason Yan } 4760a4e6a511SJason Yan 4761a4e6a511SJason Yan for (i = 0; i < db_count; i++) { 4762a4e6a511SJason Yan struct buffer_head *bh; 4763a4e6a511SJason Yan 4764a4e6a511SJason Yan block = descriptor_loc(sb, logical_sb_block, i); 4765a4e6a511SJason Yan bh = ext4_sb_bread_unmovable(sb, block); 4766a4e6a511SJason Yan if (IS_ERR(bh)) { 4767a4e6a511SJason Yan ext4_msg(sb, KERN_ERR, 4768a4e6a511SJason Yan "can't read group descriptor %d", i); 4769a4e6a511SJason Yan sbi->s_gdb_count = i; 4770a4e6a511SJason Yan ret = PTR_ERR(bh); 4771a4e6a511SJason Yan goto out; 4772a4e6a511SJason Yan } 4773a4e6a511SJason Yan rcu_read_lock(); 4774a4e6a511SJason Yan rcu_dereference(sbi->s_group_desc)[i] = bh; 4775a4e6a511SJason Yan rcu_read_unlock(); 4776a4e6a511SJason Yan } 4777a4e6a511SJason Yan sbi->s_gdb_count = db_count; 4778a4e6a511SJason Yan if (!ext4_check_descriptors(sb, logical_sb_block, first_not_zeroed)) { 4779a4e6a511SJason Yan ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); 4780a4e6a511SJason Yan ret = -EFSCORRUPTED; 4781a4e6a511SJason Yan goto out; 4782a4e6a511SJason Yan } 4783a4e6a511SJason Yan return 0; 4784a4e6a511SJason Yan out: 4785a4e6a511SJason Yan ext4_group_desc_free(sbi); 4786a4e6a511SJason Yan return ret; 4787a4e6a511SJason Yan } 4788a4e6a511SJason Yan 47899c1dd22dSJason Yan static int ext4_load_and_init_journal(struct super_block *sb, 47909c1dd22dSJason Yan struct ext4_super_block *es, 47919c1dd22dSJason Yan struct ext4_fs_context *ctx) 47929c1dd22dSJason Yan { 47939c1dd22dSJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 47949c1dd22dSJason Yan int err; 47959c1dd22dSJason Yan 47969c1dd22dSJason Yan err = ext4_load_journal(sb, es, ctx->journal_devnum); 47979c1dd22dSJason Yan if (err) 47989c1dd22dSJason Yan return err; 47999c1dd22dSJason Yan 48009c1dd22dSJason Yan if (ext4_has_feature_64bit(sb) && 48019c1dd22dSJason Yan !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, 48029c1dd22dSJason Yan JBD2_FEATURE_INCOMPAT_64BIT)) { 48039c1dd22dSJason Yan ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature"); 48049c1dd22dSJason Yan goto out; 48059c1dd22dSJason Yan } 48069c1dd22dSJason Yan 48079c1dd22dSJason Yan if (!set_journal_csum_feature_set(sb)) { 48089c1dd22dSJason Yan ext4_msg(sb, KERN_ERR, "Failed to set journal checksum " 48099c1dd22dSJason Yan "feature set"); 48109c1dd22dSJason Yan goto out; 48119c1dd22dSJason Yan } 48129c1dd22dSJason Yan 48139c1dd22dSJason Yan if (test_opt2(sb, JOURNAL_FAST_COMMIT) && 48149c1dd22dSJason Yan !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, 48159c1dd22dSJason Yan JBD2_FEATURE_INCOMPAT_FAST_COMMIT)) { 48169c1dd22dSJason Yan ext4_msg(sb, KERN_ERR, 48179c1dd22dSJason Yan "Failed to set fast commit journal feature"); 48189c1dd22dSJason Yan goto out; 48199c1dd22dSJason Yan } 48209c1dd22dSJason Yan 48219c1dd22dSJason Yan /* We have now updated the journal if required, so we can 48229c1dd22dSJason Yan * validate the data journaling mode. */ 48239c1dd22dSJason Yan switch (test_opt(sb, DATA_FLAGS)) { 48249c1dd22dSJason Yan case 0: 48259c1dd22dSJason Yan /* No mode set, assume a default based on the journal 48269c1dd22dSJason Yan * capabilities: ORDERED_DATA if the journal can 48279c1dd22dSJason Yan * cope, else JOURNAL_DATA 48289c1dd22dSJason Yan */ 48299c1dd22dSJason Yan if (jbd2_journal_check_available_features 48309c1dd22dSJason Yan (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { 48319c1dd22dSJason Yan set_opt(sb, ORDERED_DATA); 48329c1dd22dSJason Yan sbi->s_def_mount_opt |= EXT4_MOUNT_ORDERED_DATA; 48339c1dd22dSJason Yan } else { 48349c1dd22dSJason Yan set_opt(sb, JOURNAL_DATA); 48359c1dd22dSJason Yan sbi->s_def_mount_opt |= EXT4_MOUNT_JOURNAL_DATA; 48369c1dd22dSJason Yan } 48379c1dd22dSJason Yan break; 48389c1dd22dSJason Yan 48399c1dd22dSJason Yan case EXT4_MOUNT_ORDERED_DATA: 48409c1dd22dSJason Yan case EXT4_MOUNT_WRITEBACK_DATA: 48419c1dd22dSJason Yan if (!jbd2_journal_check_available_features 48429c1dd22dSJason Yan (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { 48439c1dd22dSJason Yan ext4_msg(sb, KERN_ERR, "Journal does not support " 48449c1dd22dSJason Yan "requested data journaling mode"); 48459c1dd22dSJason Yan goto out; 48469c1dd22dSJason Yan } 48479c1dd22dSJason Yan break; 48489c1dd22dSJason Yan default: 48499c1dd22dSJason Yan break; 48509c1dd22dSJason Yan } 48519c1dd22dSJason Yan 48529c1dd22dSJason Yan if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA && 48539c1dd22dSJason Yan test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 48549c1dd22dSJason Yan ext4_msg(sb, KERN_ERR, "can't mount with " 48559c1dd22dSJason Yan "journal_async_commit in data=ordered mode"); 48569c1dd22dSJason Yan goto out; 48579c1dd22dSJason Yan } 48589c1dd22dSJason Yan 48599c1dd22dSJason Yan set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio); 48609c1dd22dSJason Yan 48619c1dd22dSJason Yan sbi->s_journal->j_submit_inode_data_buffers = 48629c1dd22dSJason Yan ext4_journal_submit_inode_data_buffers; 48639c1dd22dSJason Yan sbi->s_journal->j_finish_inode_data_buffers = 48649c1dd22dSJason Yan ext4_journal_finish_inode_data_buffers; 48659c1dd22dSJason Yan 48669c1dd22dSJason Yan return 0; 48679c1dd22dSJason Yan 48689c1dd22dSJason Yan out: 48699c1dd22dSJason Yan /* flush s_error_work before journal destroy. */ 48709c1dd22dSJason Yan flush_work(&sbi->s_error_work); 48719c1dd22dSJason Yan jbd2_journal_destroy(sbi->s_journal); 48729c1dd22dSJason Yan sbi->s_journal = NULL; 48739f2a1d9fSJason Yan return -EINVAL; 48749c1dd22dSJason Yan } 48759c1dd22dSJason Yan 4876a5991e53SJason Yan static int ext4_journal_data_mode_check(struct super_block *sb) 4877a5991e53SJason Yan { 4878a5991e53SJason Yan if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { 4879a5991e53SJason Yan printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with " 4880a5991e53SJason Yan "data=journal disables delayed allocation, " 4881a5991e53SJason Yan "dioread_nolock, O_DIRECT and fast_commit support!\n"); 4882a5991e53SJason Yan /* can't mount with both data=journal and dioread_nolock. */ 4883a5991e53SJason Yan clear_opt(sb, DIOREAD_NOLOCK); 4884a5991e53SJason Yan clear_opt2(sb, JOURNAL_FAST_COMMIT); 4885a5991e53SJason Yan if (test_opt2(sb, EXPLICIT_DELALLOC)) { 4886a5991e53SJason Yan ext4_msg(sb, KERN_ERR, "can't mount with " 4887a5991e53SJason Yan "both data=journal and delalloc"); 4888a5991e53SJason Yan return -EINVAL; 4889a5991e53SJason Yan } 4890a5991e53SJason Yan if (test_opt(sb, DAX_ALWAYS)) { 4891a5991e53SJason Yan ext4_msg(sb, KERN_ERR, "can't mount with " 4892a5991e53SJason Yan "both data=journal and dax"); 4893a5991e53SJason Yan return -EINVAL; 4894a5991e53SJason Yan } 4895a5991e53SJason Yan if (ext4_has_feature_encrypt(sb)) { 4896a5991e53SJason Yan ext4_msg(sb, KERN_WARNING, 4897a5991e53SJason Yan "encrypted files will use data=ordered " 4898a5991e53SJason Yan "instead of data journaling mode"); 4899a5991e53SJason Yan } 4900a5991e53SJason Yan if (test_opt(sb, DELALLOC)) 4901a5991e53SJason Yan clear_opt(sb, DELALLOC); 4902a5991e53SJason Yan } else { 4903a5991e53SJason Yan sb->s_iflags |= SB_I_CGROUPWB; 4904a5991e53SJason Yan } 4905a5991e53SJason Yan 4906a5991e53SJason Yan return 0; 4907a5991e53SJason Yan } 4908a5991e53SJason Yan 4909a7a79c29SJason Yan static int ext4_load_super(struct super_block *sb, ext4_fsblk_t *lsb, 4910a7a79c29SJason Yan int silent) 4911a7a79c29SJason Yan { 4912a7a79c29SJason Yan struct ext4_sb_info *sbi = EXT4_SB(sb); 4913a7a79c29SJason Yan struct ext4_super_block *es; 4914a7a79c29SJason Yan ext4_fsblk_t logical_sb_block; 4915a7a79c29SJason Yan unsigned long offset = 0; 4916a7a79c29SJason Yan struct buffer_head *bh; 4917a7a79c29SJason Yan int ret = -EINVAL; 4918a7a79c29SJason Yan int blocksize; 4919a7a79c29SJason Yan 4920a7a79c29SJason Yan blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE); 4921a7a79c29SJason Yan if (!blocksize) { 4922a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, "unable to set blocksize"); 4923a7a79c29SJason Yan return -EINVAL; 4924a7a79c29SJason Yan } 4925a7a79c29SJason Yan 4926a7a79c29SJason Yan /* 4927a7a79c29SJason Yan * The ext4 superblock will not be buffer aligned for other than 1kB 4928a7a79c29SJason Yan * block sizes. We need to calculate the offset from buffer start. 4929a7a79c29SJason Yan */ 4930a7a79c29SJason Yan if (blocksize != EXT4_MIN_BLOCK_SIZE) { 4931a7a79c29SJason Yan logical_sb_block = sbi->s_sb_block * EXT4_MIN_BLOCK_SIZE; 4932a7a79c29SJason Yan offset = do_div(logical_sb_block, blocksize); 4933a7a79c29SJason Yan } else { 4934a7a79c29SJason Yan logical_sb_block = sbi->s_sb_block; 4935a7a79c29SJason Yan } 4936a7a79c29SJason Yan 4937a7a79c29SJason Yan bh = ext4_sb_bread_unmovable(sb, logical_sb_block); 4938a7a79c29SJason Yan if (IS_ERR(bh)) { 4939a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, "unable to read superblock"); 4940a7a79c29SJason Yan return PTR_ERR(bh); 4941a7a79c29SJason Yan } 4942a7a79c29SJason Yan /* 4943a7a79c29SJason Yan * Note: s_es must be initialized as soon as possible because 4944a7a79c29SJason Yan * some ext4 macro-instructions depend on its value 4945a7a79c29SJason Yan */ 4946a7a79c29SJason Yan es = (struct ext4_super_block *) (bh->b_data + offset); 4947a7a79c29SJason Yan sbi->s_es = es; 4948a7a79c29SJason Yan sb->s_magic = le16_to_cpu(es->s_magic); 4949a7a79c29SJason Yan if (sb->s_magic != EXT4_SUPER_MAGIC) { 4950a7a79c29SJason Yan if (!silent) 4951a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); 4952a7a79c29SJason Yan goto out; 4953a7a79c29SJason Yan } 4954a7a79c29SJason Yan 4955a7a79c29SJason Yan if (le32_to_cpu(es->s_log_block_size) > 4956a7a79c29SJason Yan (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { 4957a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, 4958a7a79c29SJason Yan "Invalid log block size: %u", 4959a7a79c29SJason Yan le32_to_cpu(es->s_log_block_size)); 4960a7a79c29SJason Yan goto out; 4961a7a79c29SJason Yan } 4962a7a79c29SJason Yan if (le32_to_cpu(es->s_log_cluster_size) > 4963a7a79c29SJason Yan (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { 4964a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, 4965a7a79c29SJason Yan "Invalid log cluster size: %u", 4966a7a79c29SJason Yan le32_to_cpu(es->s_log_cluster_size)); 4967a7a79c29SJason Yan goto out; 4968a7a79c29SJason Yan } 4969a7a79c29SJason Yan 4970a7a79c29SJason Yan blocksize = EXT4_MIN_BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); 4971a7a79c29SJason Yan 4972a7a79c29SJason Yan /* 4973a7a79c29SJason Yan * If the default block size is not the same as the real block size, 4974a7a79c29SJason Yan * we need to reload it. 4975a7a79c29SJason Yan */ 4976a7a79c29SJason Yan if (sb->s_blocksize == blocksize) { 4977a7a79c29SJason Yan *lsb = logical_sb_block; 4978a7a79c29SJason Yan sbi->s_sbh = bh; 4979a7a79c29SJason Yan return 0; 4980a7a79c29SJason Yan } 4981a7a79c29SJason Yan 4982a7a79c29SJason Yan /* 4983a7a79c29SJason Yan * bh must be released before kill_bdev(), otherwise 4984a7a79c29SJason Yan * it won't be freed and its page also. kill_bdev() 4985a7a79c29SJason Yan * is called by sb_set_blocksize(). 4986a7a79c29SJason Yan */ 4987a7a79c29SJason Yan brelse(bh); 4988a7a79c29SJason Yan /* Validate the filesystem blocksize */ 4989a7a79c29SJason Yan if (!sb_set_blocksize(sb, blocksize)) { 4990a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, "bad block size %d", 4991a7a79c29SJason Yan blocksize); 4992a7a79c29SJason Yan bh = NULL; 4993a7a79c29SJason Yan goto out; 4994a7a79c29SJason Yan } 4995a7a79c29SJason Yan 4996a7a79c29SJason Yan logical_sb_block = sbi->s_sb_block * EXT4_MIN_BLOCK_SIZE; 4997a7a79c29SJason Yan offset = do_div(logical_sb_block, blocksize); 4998a7a79c29SJason Yan bh = ext4_sb_bread_unmovable(sb, logical_sb_block); 4999a7a79c29SJason Yan if (IS_ERR(bh)) { 5000a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, "Can't read superblock on 2nd try"); 5001a7a79c29SJason Yan ret = PTR_ERR(bh); 5002a7a79c29SJason Yan bh = NULL; 5003a7a79c29SJason Yan goto out; 5004a7a79c29SJason Yan } 5005a7a79c29SJason Yan es = (struct ext4_super_block *)(bh->b_data + offset); 5006a7a79c29SJason Yan sbi->s_es = es; 5007a7a79c29SJason Yan if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) { 5008a7a79c29SJason Yan ext4_msg(sb, KERN_ERR, "Magic mismatch, very weird!"); 5009a7a79c29SJason Yan goto out; 5010a7a79c29SJason Yan } 5011a7a79c29SJason Yan *lsb = logical_sb_block; 5012a7a79c29SJason Yan sbi->s_sbh = bh; 5013a7a79c29SJason Yan return 0; 5014a7a79c29SJason Yan out: 5015a7a79c29SJason Yan brelse(bh); 5016a7a79c29SJason Yan return ret; 5017a7a79c29SJason Yan } 5018a7a79c29SJason Yan 5019960e0ab6SLukas Czerner static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) 50207edfd85bSLukas Czerner { 5021617ba13bSMingming Cao struct ext4_super_block *es = NULL; 50227edfd85bSLukas Czerner struct ext4_sb_info *sbi = EXT4_SB(sb); 50237c990728SSuraj Jitindar Singh struct flex_groups **flex_groups; 5024617ba13bSMingming Cao ext4_fsblk_t block; 502570bbb3e0SAndrew Morton ext4_fsblk_t logical_sb_block; 5026ac27a0ecSDave Kleikamp struct inode *root; 5027dcc7dae3SCyrill Gorcunov int ret = -ENOMEM; 50284ec11028STheodore Ts'o unsigned int i; 5029ef5fd681SKaixu Xia int needs_recovery, has_huge_files; 503007aa2ea1SLukas Czerner int err = 0; 5031bfff6873SLukas Czerner ext4_group_t first_not_zeroed; 50327edfd85bSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 5033960e0ab6SLukas Czerner int silent = fc->sb_flags & SB_SILENT; 5034b237e304SHarshad Shirwadkar 5035b237e304SHarshad Shirwadkar /* Set defaults for the variables that will be set during parsing */ 5036e4e58e5dSOjaswin Mujoo if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)) 50377edfd85bSLukas Czerner ctx->journal_ioprio = DEFAULT_JOURNAL_IOPRIO; 5038ac27a0ecSDave Kleikamp 5039240799cdSTheodore Ts'o sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS; 5040f613dfcbSTheodore Ts'o sbi->s_sectors_written_start = 50418446fe92SChristoph Hellwig part_stat_read(sb->s_bdev, sectors[STAT_WRITE]); 5042ac27a0ecSDave Kleikamp 504307aa2ea1SLukas Czerner /* -EINVAL is default */ 5044dcc7dae3SCyrill Gorcunov ret = -EINVAL; 5045a7a79c29SJason Yan err = ext4_load_super(sb, &logical_sb_block, silent); 5046a7a79c29SJason Yan if (err) 5047ac27a0ecSDave Kleikamp goto out_fail; 5048ac27a0ecSDave Kleikamp 5049a7a79c29SJason Yan es = sbi->s_es; 5050afc32f7eSTheodore Ts'o sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written); 5051ac27a0ecSDave Kleikamp 5052b26458d1SJason Yan err = ext4_init_metadata_csum(sb, es); 5053b26458d1SJason Yan if (err) 5054a5fc5119SJason Yan goto failed_mount; 5055a9c47317SDarrick J. Wong 50565f6d662dSJason Yan ext4_set_def_opts(sb, es); 5057ac27a0ecSDave Kleikamp 505808cefc7aSEric W. Biederman sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid)); 505908cefc7aSEric W. Biederman sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid)); 506030773840STheodore Ts'o sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ; 506130773840STheodore Ts'o sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME; 506230773840STheodore Ts'o sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME; 5063ac27a0ecSDave Kleikamp 506451ce6511SLukas Czerner /* 506551ce6511SLukas Czerner * set default s_li_wait_mult for lazyinit, for the case there is 506651ce6511SLukas Czerner * no mount option specified. 506751ce6511SLukas Czerner */ 506851ce6511SLukas Czerner sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; 506951ce6511SLukas Czerner 5070c8267c51SJason Yan if (ext4_inode_info_init(sb, es)) 50719803387cSTheodore Ts'o goto failed_mount; 50729803387cSTheodore Ts'o 50737edfd85bSLukas Czerner err = parse_apply_sb_mount_options(sb, ctx); 50747edfd85bSLukas Czerner if (err < 0) 50755aee0f8aSTheodore Ts'o goto failed_mount; 50767edfd85bSLukas Czerner 50775a916be1STheodore Ts'o sbi->s_def_mount_opt = sbi->s_mount_opt; 50787edfd85bSLukas Czerner 50797edfd85bSLukas Czerner err = ext4_check_opt_consistency(fc, sb); 50807edfd85bSLukas Czerner if (err < 0) 50817edfd85bSLukas Czerner goto failed_mount; 50827edfd85bSLukas Czerner 508385456054SEric Biggers ext4_apply_options(fc, sb); 5084ac27a0ecSDave Kleikamp 508539c135b0SJason Yan if (ext4_encoding_init(sb, es)) 5086c83ad55eSGabriel Krisman Bertazi goto failed_mount; 5087c83ad55eSGabriel Krisman Bertazi 5088a5991e53SJason Yan if (ext4_journal_data_mode_check(sb)) 508956889787STheodore Ts'o goto failed_mount; 509056889787STheodore Ts'o 50911751e8a6SLinus Torvalds sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | 50921751e8a6SLinus Torvalds (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0); 5093ac27a0ecSDave Kleikamp 50941ff20307SJeff Layton /* i_version is always enabled now */ 50951ff20307SJeff Layton sb->s_flags |= SB_I_VERSION; 50961ff20307SJeff Layton 5097d7f3542bSJason Yan if (ext4_check_feature_compatibility(sb, es, silent)) 5098ac27a0ecSDave Kleikamp goto failed_mount; 5099a13fb1a4SEric Sandeen 5100c8267c51SJason Yan if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (sb->s_blocksize / 4)) { 51015b9554dcSTheodore Ts'o ext4_msg(sb, KERN_ERR, 51025b9554dcSTheodore Ts'o "Number of reserved GDT blocks insanely large: %d", 51035b9554dcSTheodore Ts'o le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks)); 51045b9554dcSTheodore Ts'o goto failed_mount; 51055b9554dcSTheodore Ts'o } 51065b9554dcSTheodore Ts'o 510789b93a7bSChristoph Hellwig if (sbi->s_daxdev) { 5108c8267c51SJason Yan if (sb->s_blocksize == PAGE_SIZE) 5109a8ab6d38SIra Weiny set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags); 51107b0800d0SChristoph Hellwig else 51117b0800d0SChristoph Hellwig ext4_msg(sb, KERN_ERR, "unsupported blocksize for DAX\n"); 51127b0800d0SChristoph Hellwig } 5113a8ab6d38SIra Weiny 5114fc626fe3SIra Weiny if (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) { 5115559db4c6SRoss Zwisler if (ext4_has_feature_inline_data(sb)) { 5116559db4c6SRoss Zwisler ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem" 5117559db4c6SRoss Zwisler " that may contain inline data"); 5118361d24d4SEric Sandeen goto failed_mount; 5119559db4c6SRoss Zwisler } 5120a8ab6d38SIra Weiny if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) { 512124f3478dSDan Williams ext4_msg(sb, KERN_ERR, 5122361d24d4SEric Sandeen "DAX unsupported by block device."); 5123361d24d4SEric Sandeen goto failed_mount; 512424f3478dSDan Williams } 5125923ae0ffSRoss Zwisler } 5126923ae0ffSRoss Zwisler 5127e2b911c5SDarrick J. Wong if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) { 51286ddb2447STheodore Ts'o ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d", 51296ddb2447STheodore Ts'o es->s_encryption_level); 51306ddb2447STheodore Ts'o goto failed_mount; 51316ddb2447STheodore Ts'o } 51326ddb2447STheodore Ts'o 5133e2b911c5SDarrick J. Wong has_huge_files = ext4_has_feature_huge_file(sb); 5134f287a1a5STheodore Ts'o sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits, 5135f287a1a5STheodore Ts'o has_huge_files); 5136f287a1a5STheodore Ts'o sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files); 5137ac27a0ecSDave Kleikamp 51380d1ee42fSAlexandre Ratchov sbi->s_desc_size = le16_to_cpu(es->s_desc_size); 5139e2b911c5SDarrick J. Wong if (ext4_has_feature_64bit(sb)) { 51408fadc143SAlexandre Ratchov if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT || 51410d1ee42fSAlexandre Ratchov sbi->s_desc_size > EXT4_MAX_DESC_SIZE || 5142d8ea6cf8Svignesh babu !is_power_of_2(sbi->s_desc_size)) { 5143b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, 5144b31e1552SEric Sandeen "unsupported descriptor size %lu", 51450d1ee42fSAlexandre Ratchov sbi->s_desc_size); 51460d1ee42fSAlexandre Ratchov goto failed_mount; 51470d1ee42fSAlexandre Ratchov } 51480d1ee42fSAlexandre Ratchov } else 51490d1ee42fSAlexandre Ratchov sbi->s_desc_size = EXT4_MIN_DESC_SIZE; 51500b8e58a1SAndreas Dilger 5151ac27a0ecSDave Kleikamp sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); 5152ac27a0ecSDave Kleikamp sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); 51530b8e58a1SAndreas Dilger 5154c8267c51SJason Yan sbi->s_inodes_per_block = sb->s_blocksize / EXT4_INODE_SIZE(sb); 5155a5fc5119SJason Yan if (sbi->s_inodes_per_block == 0 || sbi->s_blocks_per_group == 0) { 5156a5fc5119SJason Yan if (!silent) 5157a5fc5119SJason Yan ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); 5158a5fc5119SJason Yan goto failed_mount; 5159a5fc5119SJason Yan } 5160cd6bb35bSTheodore Ts'o if (sbi->s_inodes_per_group < sbi->s_inodes_per_block || 5161c8267c51SJason Yan sbi->s_inodes_per_group > sb->s_blocksize * 8) { 5162cd6bb35bSTheodore Ts'o ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n", 5163b9c538daSJosh Triplett sbi->s_inodes_per_group); 5164cd6bb35bSTheodore Ts'o goto failed_mount; 5165cd6bb35bSTheodore Ts'o } 5166ac27a0ecSDave Kleikamp sbi->s_itb_per_group = sbi->s_inodes_per_group / 5167ac27a0ecSDave Kleikamp sbi->s_inodes_per_block; 5168c8267c51SJason Yan sbi->s_desc_per_block = sb->s_blocksize / EXT4_DESC_SIZE(sb); 5169c878bea3STheodore Ts'o sbi->s_mount_state = le16_to_cpu(es->s_state) & ~EXT4_FC_REPLAY; 5170e57aa839SFengguang Wu sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb)); 5171e57aa839SFengguang Wu sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb)); 51720b8e58a1SAndreas Dilger 5173ac27a0ecSDave Kleikamp for (i = 0; i < 4; i++) 5174ac27a0ecSDave Kleikamp sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); 5175ac27a0ecSDave Kleikamp sbi->s_def_hash_version = es->s_def_hash_version; 5176e2b911c5SDarrick J. Wong if (ext4_has_feature_dir_index(sb)) { 5177f99b2589STheodore Ts'o i = le32_to_cpu(es->s_flags); 5178f99b2589STheodore Ts'o if (i & EXT2_FLAGS_UNSIGNED_HASH) 5179f99b2589STheodore Ts'o sbi->s_hash_unsigned = 3; 5180f99b2589STheodore Ts'o else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { 5181f99b2589STheodore Ts'o #ifdef __CHAR_UNSIGNED__ 5182bc98a42cSDavid Howells if (!sb_rdonly(sb)) 518323301410STheodore Ts'o es->s_flags |= 518423301410STheodore Ts'o cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); 5185f99b2589STheodore Ts'o sbi->s_hash_unsigned = 3; 5186f99b2589STheodore Ts'o #else 5187bc98a42cSDavid Howells if (!sb_rdonly(sb)) 518823301410STheodore Ts'o es->s_flags |= 518923301410STheodore Ts'o cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); 5190f99b2589STheodore Ts'o #endif 5191f99b2589STheodore Ts'o } 519223301410STheodore Ts'o } 5193ac27a0ecSDave Kleikamp 5194c8267c51SJason Yan if (ext4_handle_clustersize(sb)) 5195281b5995STheodore Ts'o goto failed_mount; 5196960fd856STheodore Ts'o 5197bf43d84bSEric Sandeen /* 5198bf43d84bSEric Sandeen * Test whether we have more sectors than will fit in sector_t, 5199bf43d84bSEric Sandeen * and whether the max offset is addressable by the page cache. 5200bf43d84bSEric Sandeen */ 52015a9ae68aSDarrick J. Wong err = generic_check_addressable(sb->s_blocksize_bits, 520230ca22c7SPatrick J. LoPresti ext4_blocks_count(es)); 52035a9ae68aSDarrick J. Wong if (err) { 5204b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "filesystem" 5205bf43d84bSEric Sandeen " too large to mount safely on this system"); 5206ac27a0ecSDave Kleikamp goto failed_mount; 5207ac27a0ecSDave Kleikamp } 5208ac27a0ecSDave Kleikamp 5209bc62dbf9SJason Yan if (ext4_geometry_check(sb, es)) 52100f2ddca6SFrom: Thiemo Nagel goto failed_mount; 52110f2ddca6SFrom: Thiemo Nagel 5212a4e6a511SJason Yan err = ext4_group_desc_init(sb, es, logical_sb_block, &first_not_zeroed); 5213a4e6a511SJason Yan if (err) 52143a4b77cdSEryu Guan goto failed_mount; 5215772cb7c8SJose R. Santos 5216235699a8SKees Cook timer_setup(&sbi->s_err_report, print_daily_error_info, 0); 5217c92dc856SJan Kara spin_lock_init(&sbi->s_error_lock); 5218c92dc856SJan Kara INIT_WORK(&sbi->s_error_work, flush_stashed_error_work); 521904496411STao Ma 5220a75ae78fSDmitry Monakhov /* Register extent status tree shrinker */ 5221eb68d0e2SZheng Liu if (ext4_es_register_shrinker(sbi)) 5222ce7e010aSTheodore Ts'o goto failed_mount3; 5223ce7e010aSTheodore Ts'o 5224c9de560dSAlex Tomas sbi->s_stripe = ext4_get_stripe_size(sbi); 522567a5da56SZheng Liu sbi->s_extent_max_zeroout_kb = 32; 5226c9de560dSAlex Tomas 5227f9ae9cf5STheodore Ts'o /* 5228f9ae9cf5STheodore Ts'o * set up enough so that it can read an inode 5229f9ae9cf5STheodore Ts'o */ 5230f9ae9cf5STheodore Ts'o sb->s_op = &ext4_sops; 5231617ba13bSMingming Cao sb->s_export_op = &ext4_export_ops; 5232617ba13bSMingming Cao sb->s_xattr = ext4_xattr_handlers; 5233643fa961SChandan Rajendra #ifdef CONFIG_FS_ENCRYPTION 5234a7550b30SJaegeuk Kim sb->s_cop = &ext4_cryptops; 5235ffcc4182SEric Biggers #endif 5236c93d8f88SEric Biggers #ifdef CONFIG_FS_VERITY 5237c93d8f88SEric Biggers sb->s_vop = &ext4_verityops; 5238c93d8f88SEric Biggers #endif 5239ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 5240617ba13bSMingming Cao sb->dq_op = &ext4_quota_operations; 5241e2b911c5SDarrick J. Wong if (ext4_has_feature_quota(sb)) 52421fa5efe3SJan Kara sb->s_qcop = &dquot_quotactl_sysfile_ops; 5243262b4662SJan Kara else 5244262b4662SJan Kara sb->s_qcop = &ext4_qctl_operations; 5245689c958cSLi Xi sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; 5246ac27a0ecSDave Kleikamp #endif 524785787090SChristoph Hellwig memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid)); 5248f2fa2ffcSAneesh Kumar K.V 5249ac27a0ecSDave Kleikamp INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ 52503b9d4ed2STheodore Ts'o mutex_init(&sbi->s_orphan_lock); 5251ac27a0ecSDave Kleikamp 5252f7314a67SJason Yan ext4_fast_commit_init(sb); 5253aa75f4d3SHarshad Shirwadkar 5254ac27a0ecSDave Kleikamp sb->s_root = NULL; 5255ac27a0ecSDave Kleikamp 5256ac27a0ecSDave Kleikamp needs_recovery = (es->s_last_orphan != 0 || 525702f310fcSJan Kara ext4_has_feature_orphan_present(sb) || 5258e2b911c5SDarrick J. Wong ext4_has_feature_journal_needs_recovery(sb)); 5259ac27a0ecSDave Kleikamp 5260bc98a42cSDavid Howells if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb)) 5261c5e06d10SJohann Lombardi if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block))) 526250460fe8SDarrick J. Wong goto failed_mount3a; 5263c5e06d10SJohann Lombardi 5264ac27a0ecSDave Kleikamp /* 5265ac27a0ecSDave Kleikamp * The first inode we look at is the journal inode. Don't try 5266ac27a0ecSDave Kleikamp * root first: it may be modified in the journal! 5267ac27a0ecSDave Kleikamp */ 5268e2b911c5SDarrick J. Wong if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) { 52699c1dd22dSJason Yan err = ext4_load_and_init_journal(sb, es, ctx); 52704753d8a2STheodore Ts'o if (err) 527150460fe8SDarrick J. Wong goto failed_mount3a; 5272bc98a42cSDavid Howells } else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) && 5273e2b911c5SDarrick J. Wong ext4_has_feature_journal_needs_recovery(sb)) { 5274b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "required journal recovery " 5275b31e1552SEric Sandeen "suppressed and not mounted read-only"); 527643bd6f1bSJason Yan goto failed_mount3a; 5277ac27a0ecSDave Kleikamp } else { 52781e381f60SDmitry Monakhov /* Nojournal mode, all journal mount options are illegal */ 52791e381f60SDmitry Monakhov if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 52801e381f60SDmitry Monakhov ext4_msg(sb, KERN_ERR, "can't mount with " 52811e381f60SDmitry Monakhov "journal_async_commit, fs mounted w/o journal"); 528243bd6f1bSJason Yan goto failed_mount3a; 52831e381f60SDmitry Monakhov } 528489481b5fSBaokun Li 528589481b5fSBaokun Li if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) { 528689481b5fSBaokun Li ext4_msg(sb, KERN_ERR, "can't mount with " 528789481b5fSBaokun Li "journal_checksum, fs mounted w/o journal"); 528889481b5fSBaokun Li goto failed_mount3a; 528989481b5fSBaokun Li } 52901e381f60SDmitry Monakhov if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) { 52911e381f60SDmitry Monakhov ext4_msg(sb, KERN_ERR, "can't mount with " 52921e381f60SDmitry Monakhov "commit=%lu, fs mounted w/o journal", 52931e381f60SDmitry Monakhov sbi->s_commit_interval / HZ); 529443bd6f1bSJason Yan goto failed_mount3a; 52951e381f60SDmitry Monakhov } 52961e381f60SDmitry Monakhov if (EXT4_MOUNT_DATA_FLAGS & 52971e381f60SDmitry Monakhov (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) { 52981e381f60SDmitry Monakhov ext4_msg(sb, KERN_ERR, "can't mount with " 52991e381f60SDmitry Monakhov "data=, fs mounted w/o journal"); 530043bd6f1bSJason Yan goto failed_mount3a; 53011e381f60SDmitry Monakhov } 530250b29d8fSDebabrata Banerjee sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM; 53031e381f60SDmitry Monakhov clear_opt(sb, JOURNAL_CHECKSUM); 5304fd8c37ecSTheodore Ts'o clear_opt(sb, DATA_FLAGS); 5305995a3ed6SHarshad Shirwadkar clear_opt2(sb, JOURNAL_FAST_COMMIT); 53060390131bSFrank Mayhar sbi->s_journal = NULL; 53070390131bSFrank Mayhar needs_recovery = 0; 5308ac27a0ecSDave Kleikamp } 5309ac27a0ecSDave Kleikamp 5310cdb7ee4cSTahsin Erdogan if (!test_opt(sb, NO_MBCACHE)) { 531147387409STahsin Erdogan sbi->s_ea_block_cache = ext4_xattr_create_cache(); 531247387409STahsin Erdogan if (!sbi->s_ea_block_cache) { 5313cdb7ee4cSTahsin Erdogan ext4_msg(sb, KERN_ERR, 5314cdb7ee4cSTahsin Erdogan "Failed to create ea_block_cache"); 53159c191f70ST Makphaibulchoke goto failed_mount_wq; 53169c191f70ST Makphaibulchoke } 53179c191f70ST Makphaibulchoke 5318dec214d0STahsin Erdogan if (ext4_has_feature_ea_inode(sb)) { 5319dec214d0STahsin Erdogan sbi->s_ea_inode_cache = ext4_xattr_create_cache(); 5320dec214d0STahsin Erdogan if (!sbi->s_ea_inode_cache) { 5321dec214d0STahsin Erdogan ext4_msg(sb, KERN_ERR, 5322dec214d0STahsin Erdogan "Failed to create ea_inode_cache"); 5323dec214d0STahsin Erdogan goto failed_mount_wq; 5324dec214d0STahsin Erdogan } 5325dec214d0STahsin Erdogan } 5326cdb7ee4cSTahsin Erdogan } 5327dec214d0STahsin Erdogan 5328c8267c51SJason Yan if (ext4_has_feature_verity(sb) && sb->s_blocksize != PAGE_SIZE) { 5329c93d8f88SEric Biggers ext4_msg(sb, KERN_ERR, "Unsupported blocksize for fs-verity"); 5330c93d8f88SEric Biggers goto failed_mount_wq; 5331c93d8f88SEric Biggers } 5332c93d8f88SEric Biggers 5333fd89d5f2STejun Heo /* 5334952fc18eSTheodore Ts'o * Get the # of file system overhead blocks from the 5335952fc18eSTheodore Ts'o * superblock if present. 5336952fc18eSTheodore Ts'o */ 5337952fc18eSTheodore Ts'o sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters); 533885d825dbSTheodore Ts'o /* ignore the precalculated value if it is ridiculous */ 533985d825dbSTheodore Ts'o if (sbi->s_overhead > ext4_blocks_count(es)) 534085d825dbSTheodore Ts'o sbi->s_overhead = 0; 534185d825dbSTheodore Ts'o /* 534285d825dbSTheodore Ts'o * If the bigalloc feature is not enabled recalculating the 534385d825dbSTheodore Ts'o * overhead doesn't take long, so we might as well just redo 534485d825dbSTheodore Ts'o * it to make sure we are using the correct value. 534585d825dbSTheodore Ts'o */ 534685d825dbSTheodore Ts'o if (!ext4_has_feature_bigalloc(sb)) 534785d825dbSTheodore Ts'o sbi->s_overhead = 0; 534885d825dbSTheodore Ts'o if (sbi->s_overhead == 0) { 534907aa2ea1SLukas Czerner err = ext4_calculate_overhead(sb); 535007aa2ea1SLukas Czerner if (err) 5351952fc18eSTheodore Ts'o goto failed_mount_wq; 5352952fc18eSTheodore Ts'o } 5353952fc18eSTheodore Ts'o 5354952fc18eSTheodore Ts'o /* 5355fd89d5f2STejun Heo * The maximum number of concurrent works can be high and 5356fd89d5f2STejun Heo * concurrency isn't really necessary. Limit it to 1. 5357fd89d5f2STejun Heo */ 53582e8fa54eSJan Kara EXT4_SB(sb)->rsv_conversion_wq = 53592e8fa54eSJan Kara alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); 53602e8fa54eSJan Kara if (!EXT4_SB(sb)->rsv_conversion_wq) { 53612e8fa54eSJan Kara printk(KERN_ERR "EXT4-fs: failed to create workqueue\n"); 536207aa2ea1SLukas Czerner ret = -ENOMEM; 53632e8fa54eSJan Kara goto failed_mount4; 53642e8fa54eSJan Kara } 53652e8fa54eSJan Kara 5366ac27a0ecSDave Kleikamp /* 5367dab291afSMingming Cao * The jbd2_journal_load will have done any necessary log recovery, 5368ac27a0ecSDave Kleikamp * so we can safely mount the rest of the filesystem now. 5369ac27a0ecSDave Kleikamp */ 5370ac27a0ecSDave Kleikamp 53718a363970STheodore Ts'o root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL); 53721d1fe1eeSDavid Howells if (IS_ERR(root)) { 5373b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "get root inode failed"); 53741d1fe1eeSDavid Howells ret = PTR_ERR(root); 537532a9bb57SManish Katiyar root = NULL; 5376ac27a0ecSDave Kleikamp goto failed_mount4; 5377ac27a0ecSDave Kleikamp } 5378ac27a0ecSDave Kleikamp if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { 5379b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck"); 538094bf608aSAl Viro iput(root); 5381ac27a0ecSDave Kleikamp goto failed_mount4; 5382ac27a0ecSDave Kleikamp } 5383b886ee3eSGabriel Krisman Bertazi 538448fde701SAl Viro sb->s_root = d_make_root(root); 53851d1fe1eeSDavid Howells if (!sb->s_root) { 5386b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "get root dentry failed"); 53871d1fe1eeSDavid Howells ret = -ENOMEM; 53881d1fe1eeSDavid Howells goto failed_mount4; 53891d1fe1eeSDavid Howells } 5390ac27a0ecSDave Kleikamp 5391c89128a0SJaegeuk Kim ret = ext4_setup_super(sb, es, sb_rdonly(sb)); 5392c89128a0SJaegeuk Kim if (ret == -EROFS) { 53931751e8a6SLinus Torvalds sb->s_flags |= SB_RDONLY; 5394c89128a0SJaegeuk Kim ret = 0; 5395c89128a0SJaegeuk Kim } else if (ret) 5396c89128a0SJaegeuk Kim goto failed_mount4a; 5397ef7f3835SKalpak Shah 5398b5799018STheodore Ts'o ext4_set_resv_clusters(sb); 539927dd4385SLukas Czerner 54000f5bde1dSJan Kara if (test_opt(sb, BLOCK_VALIDITY)) { 54016fd058f7STheodore Ts'o err = ext4_setup_system_zone(sb); 54026fd058f7STheodore Ts'o if (err) { 5403b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "failed to initialize system " 5404fbe845ddSCurt Wohlgemuth "zone (%d)", err); 5405f9ae9cf5STheodore Ts'o goto failed_mount4a; 5406f9ae9cf5STheodore Ts'o } 54070f5bde1dSJan Kara } 54088016e29fSHarshad Shirwadkar ext4_fc_replay_cleanup(sb); 5409f9ae9cf5STheodore Ts'o 5410f9ae9cf5STheodore Ts'o ext4_ext_init(sb); 5411196e402aSHarshad Shirwadkar 5412196e402aSHarshad Shirwadkar /* 5413196e402aSHarshad Shirwadkar * Enable optimize_scan if number of groups is > threshold. This can be 5414196e402aSHarshad Shirwadkar * turned off by passing "mb_optimize_scan=0". This can also be 5415196e402aSHarshad Shirwadkar * turned on forcefully by passing "mb_optimize_scan=1". 5416196e402aSHarshad Shirwadkar */ 541727b38686SOjaswin Mujoo if (!(ctx->spec & EXT4_SPEC_mb_optimize_scan)) { 541827b38686SOjaswin Mujoo if (sbi->s_groups_count >= MB_DEFAULT_LINEAR_SCAN_THRESHOLD) 5419196e402aSHarshad Shirwadkar set_opt2(sb, MB_OPTIMIZE_SCAN); 542027b38686SOjaswin Mujoo else 5421196e402aSHarshad Shirwadkar clear_opt2(sb, MB_OPTIMIZE_SCAN); 542227b38686SOjaswin Mujoo } 5423196e402aSHarshad Shirwadkar 5424f9ae9cf5STheodore Ts'o err = ext4_mb_init(sb); 5425f9ae9cf5STheodore Ts'o if (err) { 5426f9ae9cf5STheodore Ts'o ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)", 5427f9ae9cf5STheodore Ts'o err); 5428dcf2d804STao Ma goto failed_mount5; 5429c2774d84SAneesh Kumar K.V } 5430c2774d84SAneesh Kumar K.V 5431027f14f5STheodore Ts'o /* 5432027f14f5STheodore Ts'o * We can only set up the journal commit callback once 5433027f14f5STheodore Ts'o * mballoc is initialized 5434027f14f5STheodore Ts'o */ 5435027f14f5STheodore Ts'o if (sbi->s_journal) 5436027f14f5STheodore Ts'o sbi->s_journal->j_commit_callback = 5437027f14f5STheodore Ts'o ext4_journal_commit_callback; 5438027f14f5STheodore Ts'o 5439d5e03cbbSTheodore Ts'o block = ext4_count_free_clusters(sb); 5440d5e03cbbSTheodore Ts'o ext4_free_blocks_count_set(sbi->s_es, 5441d5e03cbbSTheodore Ts'o EXT4_C2B(sbi, block)); 5442908c7f19STejun Heo err = percpu_counter_init(&sbi->s_freeclusters_counter, block, 5443908c7f19STejun Heo GFP_KERNEL); 5444d5e03cbbSTheodore Ts'o if (!err) { 5445d5e03cbbSTheodore Ts'o unsigned long freei = ext4_count_free_inodes(sb); 5446d5e03cbbSTheodore Ts'o sbi->s_es->s_free_inodes_count = cpu_to_le32(freei); 5447908c7f19STejun Heo err = percpu_counter_init(&sbi->s_freeinodes_counter, freei, 5448908c7f19STejun Heo GFP_KERNEL); 5449d5e03cbbSTheodore Ts'o } 5450d5e03cbbSTheodore Ts'o if (!err) 5451d5e03cbbSTheodore Ts'o err = percpu_counter_init(&sbi->s_dirs_counter, 5452908c7f19STejun Heo ext4_count_dirs(sb), GFP_KERNEL); 5453d5e03cbbSTheodore Ts'o if (!err) 5454908c7f19STejun Heo err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0, 5455908c7f19STejun Heo GFP_KERNEL); 5456c8585c6fSDaeho Jeong if (!err) 5457efc61345SEric Whitney err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0, 5458efc61345SEric Whitney GFP_KERNEL); 5459efc61345SEric Whitney if (!err) 5460bbd55937SEric Biggers err = percpu_init_rwsem(&sbi->s_writepages_rwsem); 5461c8585c6fSDaeho Jeong 5462d5e03cbbSTheodore Ts'o if (err) { 5463d5e03cbbSTheodore Ts'o ext4_msg(sb, KERN_ERR, "insufficient memory"); 5464d5e03cbbSTheodore Ts'o goto failed_mount6; 5465d5e03cbbSTheodore Ts'o } 5466d5e03cbbSTheodore Ts'o 5467e2b911c5SDarrick J. Wong if (ext4_has_feature_flex_bg(sb)) 5468d5e03cbbSTheodore Ts'o if (!ext4_fill_flex_info(sb)) { 5469d5e03cbbSTheodore Ts'o ext4_msg(sb, KERN_ERR, 5470d5e03cbbSTheodore Ts'o "unable to initialize " 5471d5e03cbbSTheodore Ts'o "flex_bg meta info!"); 54728f6840c4SYang Yingliang ret = -ENOMEM; 5473d5e03cbbSTheodore Ts'o goto failed_mount6; 5474d5e03cbbSTheodore Ts'o } 5475d5e03cbbSTheodore Ts'o 5476bfff6873SLukas Czerner err = ext4_register_li_request(sb, first_not_zeroed); 5477bfff6873SLukas Czerner if (err) 5478dcf2d804STao Ma goto failed_mount6; 5479bfff6873SLukas Czerner 5480b5799018STheodore Ts'o err = ext4_register_sysfs(sb); 5481dcf2d804STao Ma if (err) 5482dcf2d804STao Ma goto failed_mount7; 54833197ebdbSTheodore Ts'o 548402f310fcSJan Kara err = ext4_init_orphan_info(sb); 548502f310fcSJan Kara if (err) 548602f310fcSJan Kara goto failed_mount8; 54879b2ff357SJan Kara #ifdef CONFIG_QUOTA 54889b2ff357SJan Kara /* Enable quota usage during mount. */ 5489bc98a42cSDavid Howells if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) { 54909b2ff357SJan Kara err = ext4_enable_quotas(sb); 54919b2ff357SJan Kara if (err) 549202f310fcSJan Kara goto failed_mount9; 54939b2ff357SJan Kara } 54949b2ff357SJan Kara #endif /* CONFIG_QUOTA */ 54959b2ff357SJan Kara 5496bc71726cSzhangyi (F) /* 5497bc71726cSzhangyi (F) * Save the original bdev mapping's wb_err value which could be 5498bc71726cSzhangyi (F) * used to detect the metadata async write error. 5499bc71726cSzhangyi (F) */ 5500bc71726cSzhangyi (F) spin_lock_init(&sbi->s_bdev_wb_lock); 5501bc71726cSzhangyi (F) errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err, 5502bc71726cSzhangyi (F) &sbi->s_bdev_wb_err); 5503bc71726cSzhangyi (F) sb->s_bdev->bd_super = sb; 5504617ba13bSMingming Cao EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS; 5505617ba13bSMingming Cao ext4_orphan_cleanup(sb, es); 5506617ba13bSMingming Cao EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS; 55079b6641ddSYe Bin /* 55089b6641ddSYe Bin * Update the checksum after updating free space/inode counters and 55099b6641ddSYe Bin * ext4_orphan_cleanup. Otherwise the superblock can have an incorrect 55109b6641ddSYe Bin * checksum in the buffer cache until it is written out and 55119b6641ddSYe Bin * e2fsprogs programs trying to open a file system immediately 55129b6641ddSYe Bin * after it is mounted can fail. 55139b6641ddSYe Bin */ 55149b6641ddSYe Bin ext4_superblock_csum_set(sb); 55150390131bSFrank Mayhar if (needs_recovery) { 5516b31e1552SEric Sandeen ext4_msg(sb, KERN_INFO, "recovery complete"); 551711215630SJan Kara err = ext4_mark_recovery_complete(sb, es); 551811215630SJan Kara if (err) 551902f310fcSJan Kara goto failed_mount9; 55200390131bSFrank Mayhar } 55210390131bSFrank Mayhar 552270200574SChristoph Hellwig if (test_opt(sb, DISCARD) && !bdev_max_discard_sectors(sb->s_bdev)) 552379add3a3SLukas Czerner ext4_msg(sb, KERN_WARNING, 552470200574SChristoph Hellwig "mounting with \"discard\" option, but the device does not support discard"); 552579add3a3SLukas Czerner 552666e61a9eSTheodore Ts'o if (es->s_error_count) 552766e61a9eSTheodore Ts'o mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */ 5528ac27a0ecSDave Kleikamp 5529efbed4dcSTheodore Ts'o /* Enable message ratelimiting. Default is 10 messages per 5 secs. */ 5530efbed4dcSTheodore Ts'o ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10); 5531efbed4dcSTheodore Ts'o ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10); 5532efbed4dcSTheodore Ts'o ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10); 55331cf006edSDmitry Monakhov atomic_set(&sbi->s_warning_count, 0); 55341cf006edSDmitry Monakhov atomic_set(&sbi->s_msg_count, 0); 5535efbed4dcSTheodore Ts'o 5536ac27a0ecSDave Kleikamp return 0; 5537ac27a0ecSDave Kleikamp 553802f310fcSJan Kara failed_mount9: 553902f310fcSJan Kara ext4_release_orphan_info(sb); 554072ba7450STheodore Ts'o failed_mount8: 5541ebd173beSTheodore Ts'o ext4_unregister_sysfs(sb); 5542cb8d53d2SEric Biggers kobject_put(&sbi->s_kobj); 5543dcf2d804STao Ma failed_mount7: 5544dcf2d804STao Ma ext4_unregister_li_request(sb); 5545dcf2d804STao Ma failed_mount6: 5546f9ae9cf5STheodore Ts'o ext4_mb_release(sb); 55477c990728SSuraj Jitindar Singh rcu_read_lock(); 55487c990728SSuraj Jitindar Singh flex_groups = rcu_dereference(sbi->s_flex_groups); 55497c990728SSuraj Jitindar Singh if (flex_groups) { 55507c990728SSuraj Jitindar Singh for (i = 0; i < sbi->s_flex_groups_allocated; i++) 55517c990728SSuraj Jitindar Singh kvfree(flex_groups[i]); 55527c990728SSuraj Jitindar Singh kvfree(flex_groups); 55537c990728SSuraj Jitindar Singh } 55547c990728SSuraj Jitindar Singh rcu_read_unlock(); 5555d5e03cbbSTheodore Ts'o percpu_counter_destroy(&sbi->s_freeclusters_counter); 5556d5e03cbbSTheodore Ts'o percpu_counter_destroy(&sbi->s_freeinodes_counter); 5557d5e03cbbSTheodore Ts'o percpu_counter_destroy(&sbi->s_dirs_counter); 5558d5e03cbbSTheodore Ts'o percpu_counter_destroy(&sbi->s_dirtyclusters_counter); 5559efc61345SEric Whitney percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit); 5560bbd55937SEric Biggers percpu_free_rwsem(&sbi->s_writepages_rwsem); 556100764937SAzat Khuzhin failed_mount5: 5562f9ae9cf5STheodore Ts'o ext4_ext_release(sb); 5563f9ae9cf5STheodore Ts'o ext4_release_system_zone(sb); 5564f9ae9cf5STheodore Ts'o failed_mount4a: 556594bf608aSAl Viro dput(sb->s_root); 556632a9bb57SManish Katiyar sb->s_root = NULL; 556794bf608aSAl Viro failed_mount4: 5568b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "mount failed"); 55692e8fa54eSJan Kara if (EXT4_SB(sb)->rsv_conversion_wq) 55702e8fa54eSJan Kara destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq); 55714c0425ffSMingming Cao failed_mount_wq: 5572dec214d0STahsin Erdogan ext4_xattr_destroy_cache(sbi->s_ea_inode_cache); 5573dec214d0STahsin Erdogan sbi->s_ea_inode_cache = NULL; 557450c15df6SChengguang Xu 557547387409STahsin Erdogan ext4_xattr_destroy_cache(sbi->s_ea_block_cache); 557647387409STahsin Erdogan sbi->s_ea_block_cache = NULL; 557750c15df6SChengguang Xu 55780390131bSFrank Mayhar if (sbi->s_journal) { 5579bb9464e0Syangerkun /* flush s_error_work before journal destroy. */ 5580bb9464e0Syangerkun flush_work(&sbi->s_error_work); 5581dab291afSMingming Cao jbd2_journal_destroy(sbi->s_journal); 558247b4a50bSJan Kara sbi->s_journal = NULL; 55830390131bSFrank Mayhar } 558450460fe8SDarrick J. Wong failed_mount3a: 5585d3922a77SZheng Liu ext4_es_unregister_shrinker(sbi); 5586eb68d0e2SZheng Liu failed_mount3: 5587bb9464e0Syangerkun /* flush s_error_work before sbi destroy */ 5588c92dc856SJan Kara flush_work(&sbi->s_error_work); 55892a4ae3bcSJan Kara del_timer_sync(&sbi->s_err_report); 5590618f0031SPavel Skripkin ext4_stop_mmpd(sbi); 5591a4e6a511SJason Yan ext4_group_desc_free(sbi); 5592ac27a0ecSDave Kleikamp failed_mount: 55930441984aSDarrick J. Wong if (sbi->s_chksum_driver) 55940441984aSDarrick J. Wong crypto_free_shash(sbi->s_chksum_driver); 5595c83ad55eSGabriel Krisman Bertazi 55965298d4bfSChristoph Hellwig #if IS_ENABLED(CONFIG_UNICODE) 5597f8f4acb6SDaniel Rosenberg utf8_unload(sb->s_encoding); 5598c83ad55eSGabriel Krisman Bertazi #endif 5599c83ad55eSGabriel Krisman Bertazi 5600ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 5601a2d4a646SJan Kara for (i = 0; i < EXT4_MAXQUOTAS; i++) 56020ba33facSTheodore Ts'o kfree(get_qf_name(sb, sbi, i)); 5603ac27a0ecSDave Kleikamp #endif 5604ac4acb1fSEric Biggers fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy); 5605afd09b61SAlexey Makhalov /* ext4_blkdev_remove() calls kill_bdev(), release bh before it. */ 5606a7a79c29SJason Yan brelse(sbi->s_sbh); 5607afd09b61SAlexey Makhalov ext4_blkdev_remove(sbi); 5608ac27a0ecSDave Kleikamp out_fail: 5609ac27a0ecSDave Kleikamp sb->s_fs_info = NULL; 561007aa2ea1SLukas Czerner return err ? err : ret; 5611ac27a0ecSDave Kleikamp } 5612ac27a0ecSDave Kleikamp 5613cebe85d5SLukas Czerner static int ext4_fill_super(struct super_block *sb, struct fs_context *fc) 56147edfd85bSLukas Czerner { 5615cebe85d5SLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 56167edfd85bSLukas Czerner struct ext4_sb_info *sbi; 56177edfd85bSLukas Czerner const char *descr; 5618cebe85d5SLukas Czerner int ret; 56197edfd85bSLukas Czerner 5620cebe85d5SLukas Czerner sbi = ext4_alloc_sbi(sb); 5621cebe85d5SLukas Czerner if (!sbi) 56227c268d4cSLukas Czerner return -ENOMEM; 5623cebe85d5SLukas Czerner 5624cebe85d5SLukas Czerner fc->s_fs_info = sbi; 56257edfd85bSLukas Czerner 56267edfd85bSLukas Czerner /* Cleanup superblock name */ 56277edfd85bSLukas Czerner strreplace(sb->s_id, '/', '!'); 56287edfd85bSLukas Czerner 56297edfd85bSLukas Czerner sbi->s_sb_block = 1; /* Default super block location */ 5630cebe85d5SLukas Czerner if (ctx->spec & EXT4_SPEC_s_sb_block) 5631cebe85d5SLukas Czerner sbi->s_sb_block = ctx->s_sb_block; 56327edfd85bSLukas Czerner 5633960e0ab6SLukas Czerner ret = __ext4_fill_super(fc, sb); 56347edfd85bSLukas Czerner if (ret < 0) 56357edfd85bSLukas Czerner goto free_sbi; 56367edfd85bSLukas Czerner 5637cebe85d5SLukas Czerner if (sbi->s_journal) { 56387edfd85bSLukas Czerner if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) 56397edfd85bSLukas Czerner descr = " journalled data mode"; 56407edfd85bSLukas Czerner else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) 56417edfd85bSLukas Czerner descr = " ordered data mode"; 56427edfd85bSLukas Czerner else 56437edfd85bSLukas Czerner descr = " writeback data mode"; 56447edfd85bSLukas Czerner } else 56457edfd85bSLukas Czerner descr = "out journal"; 56467edfd85bSLukas Czerner 56477edfd85bSLukas Czerner if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount")) 5648bb0fbc78SLukas Czerner ext4_msg(sb, KERN_INFO, "mounted filesystem %pU with%s. " 5649bb0fbc78SLukas Czerner "Quota mode: %s.", &sb->s_uuid, descr, 5650bb0fbc78SLukas Czerner ext4_quota_mode(sb)); 56517edfd85bSLukas Czerner 5652eb705421STheodore Ts'o /* Update the s_overhead_clusters if necessary */ 5653827891a3STheodore Ts'o ext4_update_overhead(sb, false); 56547edfd85bSLukas Czerner return 0; 5655cebe85d5SLukas Czerner 56567edfd85bSLukas Czerner free_sbi: 56577edfd85bSLukas Czerner ext4_free_sbi(sbi); 5658cebe85d5SLukas Czerner fc->s_fs_info = NULL; 56597edfd85bSLukas Czerner return ret; 56607edfd85bSLukas Czerner } 56617edfd85bSLukas Czerner 5662cebe85d5SLukas Czerner static int ext4_get_tree(struct fs_context *fc) 5663cebe85d5SLukas Czerner { 5664cebe85d5SLukas Czerner return get_tree_bdev(fc, ext4_fill_super); 5665cebe85d5SLukas Czerner } 5666cebe85d5SLukas Czerner 5667ac27a0ecSDave Kleikamp /* 5668ac27a0ecSDave Kleikamp * Setup any per-fs journal parameters now. We'll do this both on 5669ac27a0ecSDave Kleikamp * initial mount, once the journal has been initialised but before we've 5670ac27a0ecSDave Kleikamp * done any recovery; and again on any subsequent remount. 5671ac27a0ecSDave Kleikamp */ 5672617ba13bSMingming Cao static void ext4_init_journal_params(struct super_block *sb, journal_t *journal) 5673ac27a0ecSDave Kleikamp { 5674617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 5675ac27a0ecSDave Kleikamp 5676ac27a0ecSDave Kleikamp journal->j_commit_interval = sbi->s_commit_interval; 567730773840STheodore Ts'o journal->j_min_batch_time = sbi->s_min_batch_time; 567830773840STheodore Ts'o journal->j_max_batch_time = sbi->s_max_batch_time; 56796866d7b3SHarshad Shirwadkar ext4_fc_init(sb, journal); 5680ac27a0ecSDave Kleikamp 5681a931da6aSTheodore Ts'o write_lock(&journal->j_state_lock); 5682ac27a0ecSDave Kleikamp if (test_opt(sb, BARRIER)) 5683dab291afSMingming Cao journal->j_flags |= JBD2_BARRIER; 5684ac27a0ecSDave Kleikamp else 5685dab291afSMingming Cao journal->j_flags &= ~JBD2_BARRIER; 56865bf5683aSHidehiro Kawai if (test_opt(sb, DATA_ERR_ABORT)) 56875bf5683aSHidehiro Kawai journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR; 56885bf5683aSHidehiro Kawai else 56895bf5683aSHidehiro Kawai journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR; 5690a931da6aSTheodore Ts'o write_unlock(&journal->j_state_lock); 5691ac27a0ecSDave Kleikamp } 5692ac27a0ecSDave Kleikamp 5693c6cb7e77SEric Whitney static struct inode *ext4_get_journal_inode(struct super_block *sb, 5694ac27a0ecSDave Kleikamp unsigned int journal_inum) 5695ac27a0ecSDave Kleikamp { 5696ac27a0ecSDave Kleikamp struct inode *journal_inode; 5697ac27a0ecSDave Kleikamp 5698c6cb7e77SEric Whitney /* 5699c6cb7e77SEric Whitney * Test for the existence of a valid inode on disk. Bad things 5700c6cb7e77SEric Whitney * happen if we iget() an unused inode, as the subsequent iput() 5701c6cb7e77SEric Whitney * will try to delete it. 5702c6cb7e77SEric Whitney */ 57038a363970STheodore Ts'o journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL); 57041d1fe1eeSDavid Howells if (IS_ERR(journal_inode)) { 5705b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "no journal found"); 5706ac27a0ecSDave Kleikamp return NULL; 5707ac27a0ecSDave Kleikamp } 5708ac27a0ecSDave Kleikamp if (!journal_inode->i_nlink) { 5709ac27a0ecSDave Kleikamp make_bad_inode(journal_inode); 5710ac27a0ecSDave Kleikamp iput(journal_inode); 5711b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "journal inode is deleted"); 5712ac27a0ecSDave Kleikamp return NULL; 5713ac27a0ecSDave Kleikamp } 5714ac27a0ecSDave Kleikamp 57154978c659SJan Kara ext4_debug("Journal inode found at %p: %lld bytes\n", 5716ac27a0ecSDave Kleikamp journal_inode, journal_inode->i_size); 5717105c78e1SEric Biggers if (!S_ISREG(journal_inode->i_mode) || IS_ENCRYPTED(journal_inode)) { 5718b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "invalid journal inode"); 5719ac27a0ecSDave Kleikamp iput(journal_inode); 5720ac27a0ecSDave Kleikamp return NULL; 5721ac27a0ecSDave Kleikamp } 5722c6cb7e77SEric Whitney return journal_inode; 5723c6cb7e77SEric Whitney } 5724c6cb7e77SEric Whitney 5725c6cb7e77SEric Whitney static journal_t *ext4_get_journal(struct super_block *sb, 5726c6cb7e77SEric Whitney unsigned int journal_inum) 5727c6cb7e77SEric Whitney { 5728c6cb7e77SEric Whitney struct inode *journal_inode; 5729c6cb7e77SEric Whitney journal_t *journal; 5730c6cb7e77SEric Whitney 573111215630SJan Kara if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) 573211215630SJan Kara return NULL; 5733c6cb7e77SEric Whitney 5734c6cb7e77SEric Whitney journal_inode = ext4_get_journal_inode(sb, journal_inum); 5735c6cb7e77SEric Whitney if (!journal_inode) 5736c6cb7e77SEric Whitney return NULL; 5737ac27a0ecSDave Kleikamp 5738dab291afSMingming Cao journal = jbd2_journal_init_inode(journal_inode); 5739ac27a0ecSDave Kleikamp if (!journal) { 5740b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "Could not load journal inode"); 5741ac27a0ecSDave Kleikamp iput(journal_inode); 5742ac27a0ecSDave Kleikamp return NULL; 5743ac27a0ecSDave Kleikamp } 5744ac27a0ecSDave Kleikamp journal->j_private = sb; 5745617ba13bSMingming Cao ext4_init_journal_params(sb, journal); 5746ac27a0ecSDave Kleikamp return journal; 5747ac27a0ecSDave Kleikamp } 5748ac27a0ecSDave Kleikamp 5749617ba13bSMingming Cao static journal_t *ext4_get_dev_journal(struct super_block *sb, 5750ac27a0ecSDave Kleikamp dev_t j_dev) 5751ac27a0ecSDave Kleikamp { 5752ac27a0ecSDave Kleikamp struct buffer_head *bh; 5753ac27a0ecSDave Kleikamp journal_t *journal; 5754617ba13bSMingming Cao ext4_fsblk_t start; 5755617ba13bSMingming Cao ext4_fsblk_t len; 5756ac27a0ecSDave Kleikamp int hblock, blocksize; 5757617ba13bSMingming Cao ext4_fsblk_t sb_block; 5758ac27a0ecSDave Kleikamp unsigned long offset; 5759617ba13bSMingming Cao struct ext4_super_block *es; 5760ac27a0ecSDave Kleikamp struct block_device *bdev; 5761ac27a0ecSDave Kleikamp 576211215630SJan Kara if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) 576311215630SJan Kara return NULL; 57640390131bSFrank Mayhar 5765b31e1552SEric Sandeen bdev = ext4_blkdev_get(j_dev, sb); 5766ac27a0ecSDave Kleikamp if (bdev == NULL) 5767ac27a0ecSDave Kleikamp return NULL; 5768ac27a0ecSDave Kleikamp 5769ac27a0ecSDave Kleikamp blocksize = sb->s_blocksize; 5770e1defc4fSMartin K. Petersen hblock = bdev_logical_block_size(bdev); 5771ac27a0ecSDave Kleikamp if (blocksize < hblock) { 5772b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, 5773b31e1552SEric Sandeen "blocksize too small for journal device"); 5774ac27a0ecSDave Kleikamp goto out_bdev; 5775ac27a0ecSDave Kleikamp } 5776ac27a0ecSDave Kleikamp 5777617ba13bSMingming Cao sb_block = EXT4_MIN_BLOCK_SIZE / blocksize; 5778617ba13bSMingming Cao offset = EXT4_MIN_BLOCK_SIZE % blocksize; 5779ac27a0ecSDave Kleikamp set_blocksize(bdev, blocksize); 5780ac27a0ecSDave Kleikamp if (!(bh = __bread(bdev, sb_block, blocksize))) { 5781b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "couldn't read superblock of " 5782b31e1552SEric Sandeen "external journal"); 5783ac27a0ecSDave Kleikamp goto out_bdev; 5784ac27a0ecSDave Kleikamp } 5785ac27a0ecSDave Kleikamp 57862716b802STheodore Ts'o es = (struct ext4_super_block *) (bh->b_data + offset); 5787617ba13bSMingming Cao if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) || 5788ac27a0ecSDave Kleikamp !(le32_to_cpu(es->s_feature_incompat) & 5789617ba13bSMingming Cao EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) { 5790b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "external journal has " 5791b31e1552SEric Sandeen "bad superblock"); 5792ac27a0ecSDave Kleikamp brelse(bh); 5793ac27a0ecSDave Kleikamp goto out_bdev; 5794ac27a0ecSDave Kleikamp } 5795ac27a0ecSDave Kleikamp 5796df4763beSDarrick J. Wong if ((le32_to_cpu(es->s_feature_ro_compat) & 5797df4763beSDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && 5798df4763beSDarrick J. Wong es->s_checksum != ext4_superblock_csum(sb, es)) { 5799df4763beSDarrick J. Wong ext4_msg(sb, KERN_ERR, "external journal has " 5800df4763beSDarrick J. Wong "corrupt superblock"); 5801df4763beSDarrick J. Wong brelse(bh); 5802df4763beSDarrick J. Wong goto out_bdev; 5803df4763beSDarrick J. Wong } 5804df4763beSDarrick J. Wong 5805617ba13bSMingming Cao if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) { 5806b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "journal UUID does not match"); 5807ac27a0ecSDave Kleikamp brelse(bh); 5808ac27a0ecSDave Kleikamp goto out_bdev; 5809ac27a0ecSDave Kleikamp } 5810ac27a0ecSDave Kleikamp 5811bd81d8eeSLaurent Vivier len = ext4_blocks_count(es); 5812ac27a0ecSDave Kleikamp start = sb_block + 1; 5813ac27a0ecSDave Kleikamp brelse(bh); /* we're done with the superblock */ 5814ac27a0ecSDave Kleikamp 5815dab291afSMingming Cao journal = jbd2_journal_init_dev(bdev, sb->s_bdev, 5816ac27a0ecSDave Kleikamp start, len, blocksize); 5817ac27a0ecSDave Kleikamp if (!journal) { 5818b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "failed to create device journal"); 5819ac27a0ecSDave Kleikamp goto out_bdev; 5820ac27a0ecSDave Kleikamp } 5821ac27a0ecSDave Kleikamp journal->j_private = sb; 58222d069c08Szhangyi (F) if (ext4_read_bh_lock(journal->j_sb_buffer, REQ_META | REQ_PRIO, true)) { 5823b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "I/O error on journal device"); 5824ac27a0ecSDave Kleikamp goto out_journal; 5825ac27a0ecSDave Kleikamp } 5826ac27a0ecSDave Kleikamp if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) { 5827b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "External journal has more than one " 5828b31e1552SEric Sandeen "user (unsupported) - %d", 5829ac27a0ecSDave Kleikamp be32_to_cpu(journal->j_superblock->s_nr_users)); 5830ac27a0ecSDave Kleikamp goto out_journal; 5831ac27a0ecSDave Kleikamp } 5832ee7ed3aaSChunguang Xu EXT4_SB(sb)->s_journal_bdev = bdev; 5833617ba13bSMingming Cao ext4_init_journal_params(sb, journal); 5834ac27a0ecSDave Kleikamp return journal; 58350b8e58a1SAndreas Dilger 5836ac27a0ecSDave Kleikamp out_journal: 5837dab291afSMingming Cao jbd2_journal_destroy(journal); 5838ac27a0ecSDave Kleikamp out_bdev: 5839617ba13bSMingming Cao ext4_blkdev_put(bdev); 5840ac27a0ecSDave Kleikamp return NULL; 5841ac27a0ecSDave Kleikamp } 5842ac27a0ecSDave Kleikamp 5843617ba13bSMingming Cao static int ext4_load_journal(struct super_block *sb, 5844617ba13bSMingming Cao struct ext4_super_block *es, 5845ac27a0ecSDave Kleikamp unsigned long journal_devnum) 5846ac27a0ecSDave Kleikamp { 5847ac27a0ecSDave Kleikamp journal_t *journal; 5848ac27a0ecSDave Kleikamp unsigned int journal_inum = le32_to_cpu(es->s_journal_inum); 5849ac27a0ecSDave Kleikamp dev_t journal_dev; 5850ac27a0ecSDave Kleikamp int err = 0; 5851ac27a0ecSDave Kleikamp int really_read_only; 5852273108faSLukas Czerner int journal_dev_ro; 5853ac27a0ecSDave Kleikamp 585411215630SJan Kara if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) 585511215630SJan Kara return -EFSCORRUPTED; 58560390131bSFrank Mayhar 5857ac27a0ecSDave Kleikamp if (journal_devnum && 5858ac27a0ecSDave Kleikamp journal_devnum != le32_to_cpu(es->s_journal_dev)) { 5859b31e1552SEric Sandeen ext4_msg(sb, KERN_INFO, "external journal device major/minor " 5860b31e1552SEric Sandeen "numbers have changed"); 5861ac27a0ecSDave Kleikamp journal_dev = new_decode_dev(journal_devnum); 5862ac27a0ecSDave Kleikamp } else 5863ac27a0ecSDave Kleikamp journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); 5864ac27a0ecSDave Kleikamp 5865273108faSLukas Czerner if (journal_inum && journal_dev) { 5866273108faSLukas Czerner ext4_msg(sb, KERN_ERR, 5867273108faSLukas Czerner "filesystem has both journal inode and journal device!"); 5868273108faSLukas Czerner return -EINVAL; 5869273108faSLukas Czerner } 5870273108faSLukas Czerner 5871273108faSLukas Czerner if (journal_inum) { 5872273108faSLukas Czerner journal = ext4_get_journal(sb, journal_inum); 5873273108faSLukas Czerner if (!journal) 5874273108faSLukas Czerner return -EINVAL; 5875273108faSLukas Czerner } else { 5876273108faSLukas Czerner journal = ext4_get_dev_journal(sb, journal_dev); 5877273108faSLukas Czerner if (!journal) 5878273108faSLukas Czerner return -EINVAL; 5879273108faSLukas Czerner } 5880273108faSLukas Czerner 5881273108faSLukas Czerner journal_dev_ro = bdev_read_only(journal->j_dev); 5882273108faSLukas Czerner really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro; 5883273108faSLukas Czerner 5884273108faSLukas Czerner if (journal_dev_ro && !sb_rdonly(sb)) { 5885273108faSLukas Czerner ext4_msg(sb, KERN_ERR, 5886273108faSLukas Czerner "journal device read-only, try mounting with '-o ro'"); 5887273108faSLukas Czerner err = -EROFS; 5888273108faSLukas Czerner goto err_out; 5889273108faSLukas Czerner } 5890ac27a0ecSDave Kleikamp 5891ac27a0ecSDave Kleikamp /* 5892ac27a0ecSDave Kleikamp * Are we loading a blank journal or performing recovery after a 5893ac27a0ecSDave Kleikamp * crash? For recovery, we need to check in advance whether we 5894ac27a0ecSDave Kleikamp * can get read-write access to the device. 5895ac27a0ecSDave Kleikamp */ 5896e2b911c5SDarrick J. Wong if (ext4_has_feature_journal_needs_recovery(sb)) { 5897bc98a42cSDavid Howells if (sb_rdonly(sb)) { 5898b31e1552SEric Sandeen ext4_msg(sb, KERN_INFO, "INFO: recovery " 5899b31e1552SEric Sandeen "required on readonly filesystem"); 5900ac27a0ecSDave Kleikamp if (really_read_only) { 5901b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "write access " 5902d98bf8cdSSimon Ruderich "unavailable, cannot proceed " 5903d98bf8cdSSimon Ruderich "(try mounting with noload)"); 5904273108faSLukas Czerner err = -EROFS; 5905273108faSLukas Czerner goto err_out; 5906ac27a0ecSDave Kleikamp } 5907b31e1552SEric Sandeen ext4_msg(sb, KERN_INFO, "write access will " 5908b31e1552SEric Sandeen "be enabled during recovery"); 5909ac27a0ecSDave Kleikamp } 5910ac27a0ecSDave Kleikamp } 5911ac27a0ecSDave Kleikamp 591290576c0bSTheodore Ts'o if (!(journal->j_flags & JBD2_BARRIER)) 5913b31e1552SEric Sandeen ext4_msg(sb, KERN_INFO, "barriers disabled"); 59144776004fSTheodore Ts'o 5915e2b911c5SDarrick J. Wong if (!ext4_has_feature_journal_needs_recovery(sb)) 5916dab291afSMingming Cao err = jbd2_journal_wipe(journal, !really_read_only); 59171c13d5c0STheodore Ts'o if (!err) { 59181c13d5c0STheodore Ts'o char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL); 59191c13d5c0STheodore Ts'o if (save) 59201c13d5c0STheodore Ts'o memcpy(save, ((char *) es) + 59211c13d5c0STheodore Ts'o EXT4_S_ERR_START, EXT4_S_ERR_LEN); 5922dab291afSMingming Cao err = jbd2_journal_load(journal); 59231c13d5c0STheodore Ts'o if (save) 59241c13d5c0STheodore Ts'o memcpy(((char *) es) + EXT4_S_ERR_START, 59251c13d5c0STheodore Ts'o save, EXT4_S_ERR_LEN); 59261c13d5c0STheodore Ts'o kfree(save); 59271c13d5c0STheodore Ts'o } 5928ac27a0ecSDave Kleikamp 5929ac27a0ecSDave Kleikamp if (err) { 5930b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "error loading journal"); 5931273108faSLukas Czerner goto err_out; 5932ac27a0ecSDave Kleikamp } 5933ac27a0ecSDave Kleikamp 5934617ba13bSMingming Cao EXT4_SB(sb)->s_journal = journal; 593511215630SJan Kara err = ext4_clear_journal_err(sb, es); 593611215630SJan Kara if (err) { 593711215630SJan Kara EXT4_SB(sb)->s_journal = NULL; 593811215630SJan Kara jbd2_journal_destroy(journal); 593911215630SJan Kara return err; 594011215630SJan Kara } 5941ac27a0ecSDave Kleikamp 5942c41303ceSMaciej Żenczykowski if (!really_read_only && journal_devnum && 5943ac27a0ecSDave Kleikamp journal_devnum != le32_to_cpu(es->s_journal_dev)) { 5944ac27a0ecSDave Kleikamp es->s_journal_dev = cpu_to_le32(journal_devnum); 5945ac27a0ecSDave Kleikamp 5946ac27a0ecSDave Kleikamp /* Make sure we flush the recovery flag to disk. */ 59474392fbc4SJan Kara ext4_commit_super(sb); 5948ac27a0ecSDave Kleikamp } 5949ac27a0ecSDave Kleikamp 5950ac27a0ecSDave Kleikamp return 0; 5951273108faSLukas Czerner 5952273108faSLukas Czerner err_out: 5953273108faSLukas Czerner jbd2_journal_destroy(journal); 5954273108faSLukas Czerner return err; 5955ac27a0ecSDave Kleikamp } 5956ac27a0ecSDave Kleikamp 59572d01ddc8SJan Kara /* Copy state of EXT4_SB(sb) into buffer for on-disk superblock */ 59582d01ddc8SJan Kara static void ext4_update_super(struct super_block *sb) 5959ac27a0ecSDave Kleikamp { 5960c92dc856SJan Kara struct ext4_sb_info *sbi = EXT4_SB(sb); 5961e92ad03fSJan Kara struct ext4_super_block *es = sbi->s_es; 5962e92ad03fSJan Kara struct buffer_head *sbh = sbi->s_sbh; 5963ac27a0ecSDave Kleikamp 596405c2c00fSJan Kara lock_buffer(sbh); 5965a17712c8SJon Derrick /* 596671290b36STheodore Ts'o * If the file system is mounted read-only, don't update the 596771290b36STheodore Ts'o * superblock write time. This avoids updating the superblock 596871290b36STheodore Ts'o * write time when we are mounting the root file system 596971290b36STheodore Ts'o * read/only but we need to replay the journal; at that point, 597071290b36STheodore Ts'o * for people who are east of GMT and who make their clock 597171290b36STheodore Ts'o * tick in localtime for Windows bug-for-bug compatibility, 597271290b36STheodore Ts'o * the clock is set in the future, and this will cause e2fsck 597371290b36STheodore Ts'o * to complain and force a full file system check. 597471290b36STheodore Ts'o */ 59751751e8a6SLinus Torvalds if (!(sb->s_flags & SB_RDONLY)) 59766a0678a7SArnd Bergmann ext4_update_tstamp(es, s_wtime); 5977afc32f7eSTheodore Ts'o es->s_kbytes_written = 5978e92ad03fSJan Kara cpu_to_le64(sbi->s_kbytes_written + 59798446fe92SChristoph Hellwig ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) - 5980e92ad03fSJan Kara sbi->s_sectors_written_start) >> 1)); 5981e92ad03fSJan Kara if (percpu_counter_initialized(&sbi->s_freeclusters_counter)) 598257042651STheodore Ts'o ext4_free_blocks_count_set(es, 5983e92ad03fSJan Kara EXT4_C2B(sbi, percpu_counter_sum_positive( 5984e92ad03fSJan Kara &sbi->s_freeclusters_counter))); 5985e92ad03fSJan Kara if (percpu_counter_initialized(&sbi->s_freeinodes_counter)) 59867f93cff9STheodore Ts'o es->s_free_inodes_count = 59877f93cff9STheodore Ts'o cpu_to_le32(percpu_counter_sum_positive( 5988e92ad03fSJan Kara &sbi->s_freeinodes_counter)); 5989c92dc856SJan Kara /* Copy error information to the on-disk superblock */ 5990c92dc856SJan Kara spin_lock(&sbi->s_error_lock); 5991c92dc856SJan Kara if (sbi->s_add_error_count > 0) { 5992c92dc856SJan Kara es->s_state |= cpu_to_le16(EXT4_ERROR_FS); 5993c92dc856SJan Kara if (!es->s_first_error_time && !es->s_first_error_time_hi) { 5994c92dc856SJan Kara __ext4_update_tstamp(&es->s_first_error_time, 5995c92dc856SJan Kara &es->s_first_error_time_hi, 5996c92dc856SJan Kara sbi->s_first_error_time); 5997c92dc856SJan Kara strncpy(es->s_first_error_func, sbi->s_first_error_func, 5998c92dc856SJan Kara sizeof(es->s_first_error_func)); 5999c92dc856SJan Kara es->s_first_error_line = 6000c92dc856SJan Kara cpu_to_le32(sbi->s_first_error_line); 6001c92dc856SJan Kara es->s_first_error_ino = 6002c92dc856SJan Kara cpu_to_le32(sbi->s_first_error_ino); 6003c92dc856SJan Kara es->s_first_error_block = 6004c92dc856SJan Kara cpu_to_le64(sbi->s_first_error_block); 6005c92dc856SJan Kara es->s_first_error_errcode = 6006c92dc856SJan Kara ext4_errno_to_code(sbi->s_first_error_code); 6007c92dc856SJan Kara } 6008c92dc856SJan Kara __ext4_update_tstamp(&es->s_last_error_time, 6009c92dc856SJan Kara &es->s_last_error_time_hi, 6010c92dc856SJan Kara sbi->s_last_error_time); 6011c92dc856SJan Kara strncpy(es->s_last_error_func, sbi->s_last_error_func, 6012c92dc856SJan Kara sizeof(es->s_last_error_func)); 6013c92dc856SJan Kara es->s_last_error_line = cpu_to_le32(sbi->s_last_error_line); 6014c92dc856SJan Kara es->s_last_error_ino = cpu_to_le32(sbi->s_last_error_ino); 6015c92dc856SJan Kara es->s_last_error_block = cpu_to_le64(sbi->s_last_error_block); 6016c92dc856SJan Kara es->s_last_error_errcode = 6017c92dc856SJan Kara ext4_errno_to_code(sbi->s_last_error_code); 6018c92dc856SJan Kara /* 6019c92dc856SJan Kara * Start the daily error reporting function if it hasn't been 6020c92dc856SJan Kara * started already 6021c92dc856SJan Kara */ 6022c92dc856SJan Kara if (!es->s_error_count) 6023c92dc856SJan Kara mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); 6024c92dc856SJan Kara le32_add_cpu(&es->s_error_count, sbi->s_add_error_count); 6025c92dc856SJan Kara sbi->s_add_error_count = 0; 6026c92dc856SJan Kara } 6027c92dc856SJan Kara spin_unlock(&sbi->s_error_lock); 6028c92dc856SJan Kara 602906db49e6STheodore Ts'o ext4_superblock_csum_set(sb); 60302d01ddc8SJan Kara unlock_buffer(sbh); 60312d01ddc8SJan Kara } 60322d01ddc8SJan Kara 60332d01ddc8SJan Kara static int ext4_commit_super(struct super_block *sb) 60342d01ddc8SJan Kara { 60352d01ddc8SJan Kara struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; 60362d01ddc8SJan Kara 6037f88f1466SFengnan Chang if (!sbh) 6038f88f1466SFengnan Chang return -EINVAL; 6039f88f1466SFengnan Chang if (block_device_ejected(sb)) 6040f88f1466SFengnan Chang return -ENODEV; 60412d01ddc8SJan Kara 60422d01ddc8SJan Kara ext4_update_super(sb); 60432d01ddc8SJan Kara 604415baa7dcSZhang Yi lock_buffer(sbh); 604515baa7dcSZhang Yi /* Buffer got discarded which means block device got invalidated */ 604615baa7dcSZhang Yi if (!buffer_mapped(sbh)) { 604715baa7dcSZhang Yi unlock_buffer(sbh); 604815baa7dcSZhang Yi return -EIO; 604915baa7dcSZhang Yi } 605015baa7dcSZhang Yi 6051e8680786STheodore Ts'o if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) { 60524743f839SPranay Kr. Srivastava /* 60534743f839SPranay Kr. Srivastava * Oh, dear. A previous attempt to write the 60544743f839SPranay Kr. Srivastava * superblock failed. This could happen because the 60554743f839SPranay Kr. Srivastava * USB device was yanked out. Or it could happen to 60564743f839SPranay Kr. Srivastava * be a transient write error and maybe the block will 60574743f839SPranay Kr. Srivastava * be remapped. Nothing we can do but to retry the 60584743f839SPranay Kr. Srivastava * write and hope for the best. 60594743f839SPranay Kr. Srivastava */ 60604743f839SPranay Kr. Srivastava ext4_msg(sb, KERN_ERR, "previous I/O error to " 60614743f839SPranay Kr. Srivastava "superblock detected"); 60624743f839SPranay Kr. Srivastava clear_buffer_write_io_error(sbh); 60634743f839SPranay Kr. Srivastava set_buffer_uptodate(sbh); 60644743f839SPranay Kr. Srivastava } 606515baa7dcSZhang Yi get_bh(sbh); 606615baa7dcSZhang Yi /* Clear potential dirty bit if it was journalled update */ 606715baa7dcSZhang Yi clear_buffer_dirty(sbh); 606815baa7dcSZhang Yi sbh->b_end_io = end_buffer_write_sync; 60691420c4a5SBart Van Assche submit_bh(REQ_OP_WRITE | REQ_SYNC | 60701420c4a5SBart Van Assche (test_opt(sb, BARRIER) ? REQ_FUA : 0), sbh); 607115baa7dcSZhang Yi wait_on_buffer(sbh); 6072c89128a0SJaegeuk Kim if (buffer_write_io_error(sbh)) { 6073b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, "I/O error while writing " 6074b31e1552SEric Sandeen "superblock"); 6075914258bfSTheodore Ts'o clear_buffer_write_io_error(sbh); 6076914258bfSTheodore Ts'o set_buffer_uptodate(sbh); 607715baa7dcSZhang Yi return -EIO; 6078914258bfSTheodore Ts'o } 607915baa7dcSZhang Yi return 0; 6080ac27a0ecSDave Kleikamp } 6081ac27a0ecSDave Kleikamp 6082ac27a0ecSDave Kleikamp /* 6083ac27a0ecSDave Kleikamp * Have we just finished recovery? If so, and if we are mounting (or 6084ac27a0ecSDave Kleikamp * remounting) the filesystem readonly, then we will end up with a 6085ac27a0ecSDave Kleikamp * consistent fs on disk. Record that fact. 6086ac27a0ecSDave Kleikamp */ 608711215630SJan Kara static int ext4_mark_recovery_complete(struct super_block *sb, 6088617ba13bSMingming Cao struct ext4_super_block *es) 6089ac27a0ecSDave Kleikamp { 609011215630SJan Kara int err; 6091617ba13bSMingming Cao journal_t *journal = EXT4_SB(sb)->s_journal; 6092ac27a0ecSDave Kleikamp 6093e2b911c5SDarrick J. Wong if (!ext4_has_feature_journal(sb)) { 609411215630SJan Kara if (journal != NULL) { 609511215630SJan Kara ext4_error(sb, "Journal got removed while the fs was " 609611215630SJan Kara "mounted!"); 609711215630SJan Kara return -EFSCORRUPTED; 609811215630SJan Kara } 609911215630SJan Kara return 0; 61000390131bSFrank Mayhar } 6101dab291afSMingming Cao jbd2_journal_lock_updates(journal); 610201d5d965SLeah Rumancik err = jbd2_journal_flush(journal, 0); 610311215630SJan Kara if (err < 0) 61047ffe1ea8SHidehiro Kawai goto out; 61057ffe1ea8SHidehiro Kawai 610602f310fcSJan Kara if (sb_rdonly(sb) && (ext4_has_feature_journal_needs_recovery(sb) || 610702f310fcSJan Kara ext4_has_feature_orphan_present(sb))) { 610802f310fcSJan Kara if (!ext4_orphan_file_empty(sb)) { 610902f310fcSJan Kara ext4_error(sb, "Orphan file not empty on read-only fs."); 611002f310fcSJan Kara err = -EFSCORRUPTED; 611102f310fcSJan Kara goto out; 611202f310fcSJan Kara } 6113e2b911c5SDarrick J. Wong ext4_clear_feature_journal_needs_recovery(sb); 611402f310fcSJan Kara ext4_clear_feature_orphan_present(sb); 61154392fbc4SJan Kara ext4_commit_super(sb); 6116ac27a0ecSDave Kleikamp } 61177ffe1ea8SHidehiro Kawai out: 6118dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 611911215630SJan Kara return err; 6120ac27a0ecSDave Kleikamp } 6121ac27a0ecSDave Kleikamp 6122ac27a0ecSDave Kleikamp /* 6123ac27a0ecSDave Kleikamp * If we are mounting (or read-write remounting) a filesystem whose journal 6124ac27a0ecSDave Kleikamp * has recorded an error from a previous lifetime, move that error to the 6125ac27a0ecSDave Kleikamp * main filesystem now. 6126ac27a0ecSDave Kleikamp */ 612711215630SJan Kara static int ext4_clear_journal_err(struct super_block *sb, 6128617ba13bSMingming Cao struct ext4_super_block *es) 6129ac27a0ecSDave Kleikamp { 6130ac27a0ecSDave Kleikamp journal_t *journal; 6131ac27a0ecSDave Kleikamp int j_errno; 6132ac27a0ecSDave Kleikamp const char *errstr; 6133ac27a0ecSDave Kleikamp 613411215630SJan Kara if (!ext4_has_feature_journal(sb)) { 613511215630SJan Kara ext4_error(sb, "Journal got removed while the fs was mounted!"); 613611215630SJan Kara return -EFSCORRUPTED; 613711215630SJan Kara } 61380390131bSFrank Mayhar 6139617ba13bSMingming Cao journal = EXT4_SB(sb)->s_journal; 6140ac27a0ecSDave Kleikamp 6141ac27a0ecSDave Kleikamp /* 6142ac27a0ecSDave Kleikamp * Now check for any error status which may have been recorded in the 6143617ba13bSMingming Cao * journal by a prior ext4_error() or ext4_abort() 6144ac27a0ecSDave Kleikamp */ 6145ac27a0ecSDave Kleikamp 6146dab291afSMingming Cao j_errno = jbd2_journal_errno(journal); 6147ac27a0ecSDave Kleikamp if (j_errno) { 6148ac27a0ecSDave Kleikamp char nbuf[16]; 6149ac27a0ecSDave Kleikamp 6150617ba13bSMingming Cao errstr = ext4_decode_error(sb, j_errno, nbuf); 615112062dddSEric Sandeen ext4_warning(sb, "Filesystem error recorded " 6152ac27a0ecSDave Kleikamp "from previous mount: %s", errstr); 615312062dddSEric Sandeen ext4_warning(sb, "Marking fs in need of filesystem check."); 6154ac27a0ecSDave Kleikamp 6155617ba13bSMingming Cao EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; 6156617ba13bSMingming Cao es->s_state |= cpu_to_le16(EXT4_ERROR_FS); 61574392fbc4SJan Kara ext4_commit_super(sb); 6158ac27a0ecSDave Kleikamp 6159dab291afSMingming Cao jbd2_journal_clear_err(journal); 6160d796c52eSTheodore Ts'o jbd2_journal_update_sb_errno(journal); 6161ac27a0ecSDave Kleikamp } 616211215630SJan Kara return 0; 6163ac27a0ecSDave Kleikamp } 6164ac27a0ecSDave Kleikamp 6165ac27a0ecSDave Kleikamp /* 6166ac27a0ecSDave Kleikamp * Force the running and committing transactions to commit, 6167ac27a0ecSDave Kleikamp * and wait on the commit. 6168ac27a0ecSDave Kleikamp */ 6169617ba13bSMingming Cao int ext4_force_commit(struct super_block *sb) 6170ac27a0ecSDave Kleikamp { 6171ac27a0ecSDave Kleikamp journal_t *journal; 6172ac27a0ecSDave Kleikamp 6173bc98a42cSDavid Howells if (sb_rdonly(sb)) 6174ac27a0ecSDave Kleikamp return 0; 6175ac27a0ecSDave Kleikamp 6176617ba13bSMingming Cao journal = EXT4_SB(sb)->s_journal; 6177b1deefc9SGuo Chao return ext4_journal_force_commit(journal); 6178ac27a0ecSDave Kleikamp } 6179ac27a0ecSDave Kleikamp 6180617ba13bSMingming Cao static int ext4_sync_fs(struct super_block *sb, int wait) 6181ac27a0ecSDave Kleikamp { 618214ce0cb4STheodore Ts'o int ret = 0; 61839eddacf9SJan Kara tid_t target; 618406a407f1SDmitry Monakhov bool needs_barrier = false; 61858d5d02e6SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 6186ac27a0ecSDave Kleikamp 618749598e04SJun Piao if (unlikely(ext4_forced_shutdown(sbi))) 61880db1ff22STheodore Ts'o return 0; 61890db1ff22STheodore Ts'o 61909bffad1eSTheodore Ts'o trace_ext4_sync_fs(sb, wait); 61912e8fa54eSJan Kara flush_workqueue(sbi->rsv_conversion_wq); 6192a1177825SJan Kara /* 6193a1177825SJan Kara * Writeback quota in non-journalled quota case - journalled quota has 6194a1177825SJan Kara * no dirty dquots 6195a1177825SJan Kara */ 6196a1177825SJan Kara dquot_writeback_dquots(sb, -1); 619706a407f1SDmitry Monakhov /* 619806a407f1SDmitry Monakhov * Data writeback is possible w/o journal transaction, so barrier must 619906a407f1SDmitry Monakhov * being sent at the end of the function. But we can skip it if 620006a407f1SDmitry Monakhov * transaction_commit will do it for us. 620106a407f1SDmitry Monakhov */ 6202bda32530STheodore Ts'o if (sbi->s_journal) { 620306a407f1SDmitry Monakhov target = jbd2_get_latest_transaction(sbi->s_journal); 620406a407f1SDmitry Monakhov if (wait && sbi->s_journal->j_flags & JBD2_BARRIER && 620506a407f1SDmitry Monakhov !jbd2_trans_will_send_data_barrier(sbi->s_journal, target)) 620606a407f1SDmitry Monakhov needs_barrier = true; 620706a407f1SDmitry Monakhov 62088d5d02e6SMingming Cao if (jbd2_journal_start_commit(sbi->s_journal, &target)) { 6209ac27a0ecSDave Kleikamp if (wait) 6210bda32530STheodore Ts'o ret = jbd2_log_wait_commit(sbi->s_journal, 6211bda32530STheodore Ts'o target); 62120390131bSFrank Mayhar } 6213bda32530STheodore Ts'o } else if (wait && test_opt(sb, BARRIER)) 6214bda32530STheodore Ts'o needs_barrier = true; 621506a407f1SDmitry Monakhov if (needs_barrier) { 621606a407f1SDmitry Monakhov int err; 6217c6bf3f0eSChristoph Hellwig err = blkdev_issue_flush(sb->s_bdev); 621806a407f1SDmitry Monakhov if (!ret) 621906a407f1SDmitry Monakhov ret = err; 622006a407f1SDmitry Monakhov } 622106a407f1SDmitry Monakhov 622206a407f1SDmitry Monakhov return ret; 622306a407f1SDmitry Monakhov } 622406a407f1SDmitry Monakhov 6225ac27a0ecSDave Kleikamp /* 6226ac27a0ecSDave Kleikamp * LVM calls this function before a (read-only) snapshot is created. This 6227ac27a0ecSDave Kleikamp * gives us a chance to flush the journal completely and mark the fs clean. 6228be4f27d3SYongqiang Yang * 6229be4f27d3SYongqiang Yang * Note that only this function cannot bring a filesystem to be in a clean 62308e8ad8a5SJan Kara * state independently. It relies on upper layer to stop all data & metadata 62318e8ad8a5SJan Kara * modifications. 6232ac27a0ecSDave Kleikamp */ 6233c4be0c1dSTakashi Sato static int ext4_freeze(struct super_block *sb) 6234ac27a0ecSDave Kleikamp { 6235c4be0c1dSTakashi Sato int error = 0; 6236c4be0c1dSTakashi Sato journal_t *journal; 6237ac27a0ecSDave Kleikamp 6238bc98a42cSDavid Howells if (sb_rdonly(sb)) 62399ca92389STheodore Ts'o return 0; 62409ca92389STheodore Ts'o 6241c4be0c1dSTakashi Sato journal = EXT4_SB(sb)->s_journal; 6242ac27a0ecSDave Kleikamp 6243bb044576STheodore Ts'o if (journal) { 6244ac27a0ecSDave Kleikamp /* Now we set up the journal barrier. */ 6245dab291afSMingming Cao jbd2_journal_lock_updates(journal); 62467ffe1ea8SHidehiro Kawai 62477ffe1ea8SHidehiro Kawai /* 6248bb044576STheodore Ts'o * Don't clear the needs_recovery flag if we failed to 6249bb044576STheodore Ts'o * flush the journal. 62507ffe1ea8SHidehiro Kawai */ 625101d5d965SLeah Rumancik error = jbd2_journal_flush(journal, 0); 62526b0310fbSEric Sandeen if (error < 0) 62536b0310fbSEric Sandeen goto out; 6254ac27a0ecSDave Kleikamp 6255ac27a0ecSDave Kleikamp /* Journal blocked and flushed, clear needs_recovery flag. */ 6256e2b911c5SDarrick J. Wong ext4_clear_feature_journal_needs_recovery(sb); 625702f310fcSJan Kara if (ext4_orphan_file_empty(sb)) 625802f310fcSJan Kara ext4_clear_feature_orphan_present(sb); 6259c642dc9eSEric Sandeen } 6260c642dc9eSEric Sandeen 62614392fbc4SJan Kara error = ext4_commit_super(sb); 62626b0310fbSEric Sandeen out: 6263bb044576STheodore Ts'o if (journal) 62648e8ad8a5SJan Kara /* we rely on upper layer to stop further updates */ 6265bb044576STheodore Ts'o jbd2_journal_unlock_updates(journal); 62666b0310fbSEric Sandeen return error; 6267ac27a0ecSDave Kleikamp } 6268ac27a0ecSDave Kleikamp 6269ac27a0ecSDave Kleikamp /* 6270ac27a0ecSDave Kleikamp * Called by LVM after the snapshot is done. We need to reset the RECOVER 6271ac27a0ecSDave Kleikamp * flag here, even though the filesystem is not technically dirty yet. 6272ac27a0ecSDave Kleikamp */ 6273c4be0c1dSTakashi Sato static int ext4_unfreeze(struct super_block *sb) 6274ac27a0ecSDave Kleikamp { 6275bc98a42cSDavid Howells if (sb_rdonly(sb) || ext4_forced_shutdown(EXT4_SB(sb))) 62769ca92389STheodore Ts'o return 0; 62779ca92389STheodore Ts'o 6278c642dc9eSEric Sandeen if (EXT4_SB(sb)->s_journal) { 62799ca92389STheodore Ts'o /* Reset the needs_recovery flag before the fs is unlocked. */ 6280e2b911c5SDarrick J. Wong ext4_set_feature_journal_needs_recovery(sb); 628102f310fcSJan Kara if (ext4_has_feature_orphan_file(sb)) 628202f310fcSJan Kara ext4_set_feature_orphan_present(sb); 6283c642dc9eSEric Sandeen } 6284c642dc9eSEric Sandeen 62854392fbc4SJan Kara ext4_commit_super(sb); 6286c4be0c1dSTakashi Sato return 0; 6287ac27a0ecSDave Kleikamp } 6288ac27a0ecSDave Kleikamp 6289673c6100STheodore Ts'o /* 6290673c6100STheodore Ts'o * Structure to save mount options for ext4_remount's benefit 6291673c6100STheodore Ts'o */ 6292673c6100STheodore Ts'o struct ext4_mount_options { 6293673c6100STheodore Ts'o unsigned long s_mount_opt; 6294a2595b8aSTheodore Ts'o unsigned long s_mount_opt2; 629508cefc7aSEric W. Biederman kuid_t s_resuid; 629608cefc7aSEric W. Biederman kgid_t s_resgid; 6297673c6100STheodore Ts'o unsigned long s_commit_interval; 6298673c6100STheodore Ts'o u32 s_min_batch_time, s_max_batch_time; 6299673c6100STheodore Ts'o #ifdef CONFIG_QUOTA 6300673c6100STheodore Ts'o int s_jquota_fmt; 6301a2d4a646SJan Kara char *s_qf_names[EXT4_MAXQUOTAS]; 6302673c6100STheodore Ts'o #endif 6303673c6100STheodore Ts'o }; 6304673c6100STheodore Ts'o 6305960e0ab6SLukas Czerner static int __ext4_remount(struct fs_context *fc, struct super_block *sb) 6306ac27a0ecSDave Kleikamp { 63077edfd85bSLukas Czerner struct ext4_fs_context *ctx = fc->fs_private; 6308617ba13bSMingming Cao struct ext4_super_block *es; 6309617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 6310960e0ab6SLukas Czerner unsigned long old_sb_flags; 6311617ba13bSMingming Cao struct ext4_mount_options old_opts; 63128a266467STheodore Ts'o ext4_group_t g; 6313c5e06d10SJohann Lombardi int err = 0; 6314ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 63153bbef91bSAustin Kim int enable_quota = 0; 631603dafb5fSChen Gang int i, j; 631733458eabSTheodore Ts'o char *to_free[EXT4_MAXQUOTAS]; 6318ac27a0ecSDave Kleikamp #endif 6319b237e304SHarshad Shirwadkar 632021ac738eSChengguang Xu 6321ac27a0ecSDave Kleikamp /* Store the original options */ 6322ac27a0ecSDave Kleikamp old_sb_flags = sb->s_flags; 6323ac27a0ecSDave Kleikamp old_opts.s_mount_opt = sbi->s_mount_opt; 6324a2595b8aSTheodore Ts'o old_opts.s_mount_opt2 = sbi->s_mount_opt2; 6325ac27a0ecSDave Kleikamp old_opts.s_resuid = sbi->s_resuid; 6326ac27a0ecSDave Kleikamp old_opts.s_resgid = sbi->s_resgid; 6327ac27a0ecSDave Kleikamp old_opts.s_commit_interval = sbi->s_commit_interval; 632830773840STheodore Ts'o old_opts.s_min_batch_time = sbi->s_min_batch_time; 632930773840STheodore Ts'o old_opts.s_max_batch_time = sbi->s_max_batch_time; 6330ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 6331ac27a0ecSDave Kleikamp old_opts.s_jquota_fmt = sbi->s_jquota_fmt; 6332a2d4a646SJan Kara for (i = 0; i < EXT4_MAXQUOTAS; i++) 633303dafb5fSChen Gang if (sbi->s_qf_names[i]) { 633433458eabSTheodore Ts'o char *qf_name = get_qf_name(sb, sbi, i); 633533458eabSTheodore Ts'o 633633458eabSTheodore Ts'o old_opts.s_qf_names[i] = kstrdup(qf_name, GFP_KERNEL); 633703dafb5fSChen Gang if (!old_opts.s_qf_names[i]) { 633803dafb5fSChen Gang for (j = 0; j < i; j++) 633903dafb5fSChen Gang kfree(old_opts.s_qf_names[j]); 634003dafb5fSChen Gang return -ENOMEM; 634103dafb5fSChen Gang } 634203dafb5fSChen Gang } else 634303dafb5fSChen Gang old_opts.s_qf_names[i] = NULL; 6344ac27a0ecSDave Kleikamp #endif 6345e4e58e5dSOjaswin Mujoo if (!(ctx->spec & EXT4_SPEC_JOURNAL_IOPRIO)) { 6346b3881f74STheodore Ts'o if (sbi->s_journal && sbi->s_journal->j_task->io_context) 63477edfd85bSLukas Czerner ctx->journal_ioprio = 6348b237e304SHarshad Shirwadkar sbi->s_journal->j_task->io_context->ioprio; 6349e4e58e5dSOjaswin Mujoo else 6350e4e58e5dSOjaswin Mujoo ctx->journal_ioprio = DEFAULT_JOURNAL_IOPRIO; 6351e4e58e5dSOjaswin Mujoo 6352e4e58e5dSOjaswin Mujoo } 6353ac27a0ecSDave Kleikamp 63547edfd85bSLukas Czerner ext4_apply_options(fc, sb); 6355ac27a0ecSDave Kleikamp 63566b992ff2SDarrick J. Wong if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^ 63576b992ff2SDarrick J. Wong test_opt(sb, JOURNAL_CHECKSUM)) { 63586b992ff2SDarrick J. Wong ext4_msg(sb, KERN_ERR, "changing journal_checksum " 63592d5b86e0SEric Sandeen "during remount not supported; ignoring"); 63602d5b86e0SEric Sandeen sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM; 6361c6d3d56dSDarrick J. Wong } 6362c6d3d56dSDarrick J. Wong 63636ae6514bSPiotr Sarna if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { 63646ae6514bSPiotr Sarna if (test_opt2(sb, EXPLICIT_DELALLOC)) { 63656ae6514bSPiotr Sarna ext4_msg(sb, KERN_ERR, "can't mount with " 63666ae6514bSPiotr Sarna "both data=journal and delalloc"); 63676ae6514bSPiotr Sarna err = -EINVAL; 63686ae6514bSPiotr Sarna goto restore_opts; 63696ae6514bSPiotr Sarna } 63706ae6514bSPiotr Sarna if (test_opt(sb, DIOREAD_NOLOCK)) { 63716ae6514bSPiotr Sarna ext4_msg(sb, KERN_ERR, "can't mount with " 63726ae6514bSPiotr Sarna "both data=journal and dioread_nolock"); 63736ae6514bSPiotr Sarna err = -EINVAL; 63746ae6514bSPiotr Sarna goto restore_opts; 63756ae6514bSPiotr Sarna } 6376ab04df78SJan Kara } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) { 6377ab04df78SJan Kara if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 6378ab04df78SJan Kara ext4_msg(sb, KERN_ERR, "can't mount with " 6379ab04df78SJan Kara "journal_async_commit in data=ordered mode"); 6380ab04df78SJan Kara err = -EINVAL; 6381ab04df78SJan Kara goto restore_opts; 6382ab04df78SJan Kara } 6383923ae0ffSRoss Zwisler } 6384923ae0ffSRoss Zwisler 6385cdb7ee4cSTahsin Erdogan if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) { 6386cdb7ee4cSTahsin Erdogan ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount"); 6387cdb7ee4cSTahsin Erdogan err = -EINVAL; 6388cdb7ee4cSTahsin Erdogan goto restore_opts; 6389cdb7ee4cSTahsin Erdogan } 6390cdb7ee4cSTahsin Erdogan 63919b5f6c9bSHarshad Shirwadkar if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 6392124e7c61SGabriel Krisman Bertazi ext4_abort(sb, ESHUTDOWN, "Abort forced by user"); 6393ac27a0ecSDave Kleikamp 63941751e8a6SLinus Torvalds sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | 63951751e8a6SLinus Torvalds (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0); 6396ac27a0ecSDave Kleikamp 6397ac27a0ecSDave Kleikamp es = sbi->s_es; 6398ac27a0ecSDave Kleikamp 6399b3881f74STheodore Ts'o if (sbi->s_journal) { 6400617ba13bSMingming Cao ext4_init_journal_params(sb, sbi->s_journal); 64017edfd85bSLukas Czerner set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio); 6402b3881f74STheodore Ts'o } 6403ac27a0ecSDave Kleikamp 6404c92dc856SJan Kara /* Flush outstanding errors before changing fs state */ 6405c92dc856SJan Kara flush_work(&sbi->s_error_work); 6406c92dc856SJan Kara 6407960e0ab6SLukas Czerner if ((bool)(fc->sb_flags & SB_RDONLY) != sb_rdonly(sb)) { 64089b5f6c9bSHarshad Shirwadkar if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) { 6409ac27a0ecSDave Kleikamp err = -EROFS; 6410ac27a0ecSDave Kleikamp goto restore_opts; 6411ac27a0ecSDave Kleikamp } 6412ac27a0ecSDave Kleikamp 6413960e0ab6SLukas Czerner if (fc->sb_flags & SB_RDONLY) { 641438c03b34STheodore Ts'o err = sync_filesystem(sb); 641538c03b34STheodore Ts'o if (err < 0) 641638c03b34STheodore Ts'o goto restore_opts; 64170f0dd62fSChristoph Hellwig err = dquot_suspend(sb, -1); 64180f0dd62fSChristoph Hellwig if (err < 0) 6419c79d967dSChristoph Hellwig goto restore_opts; 6420c79d967dSChristoph Hellwig 6421ac27a0ecSDave Kleikamp /* 6422ac27a0ecSDave Kleikamp * First of all, the unconditional stuff we have to do 6423ac27a0ecSDave Kleikamp * to disable replay of the journal when we next remount 6424ac27a0ecSDave Kleikamp */ 64251751e8a6SLinus Torvalds sb->s_flags |= SB_RDONLY; 6426ac27a0ecSDave Kleikamp 6427ac27a0ecSDave Kleikamp /* 6428ac27a0ecSDave Kleikamp * OK, test if we are remounting a valid rw partition 6429ac27a0ecSDave Kleikamp * readonly, and if so set the rdonly flag and then 6430ac27a0ecSDave Kleikamp * mark the partition as valid again. 6431ac27a0ecSDave Kleikamp */ 6432617ba13bSMingming Cao if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) && 6433617ba13bSMingming Cao (sbi->s_mount_state & EXT4_VALID_FS)) 6434ac27a0ecSDave Kleikamp es->s_state = cpu_to_le16(sbi->s_mount_state); 6435ac27a0ecSDave Kleikamp 643611215630SJan Kara if (sbi->s_journal) { 643711215630SJan Kara /* 643811215630SJan Kara * We let remount-ro finish even if marking fs 643911215630SJan Kara * as clean failed... 644011215630SJan Kara */ 6441617ba13bSMingming Cao ext4_mark_recovery_complete(sb, es); 644211215630SJan Kara } 6443ac27a0ecSDave Kleikamp } else { 6444a13fb1a4SEric Sandeen /* Make sure we can mount this feature set readwrite */ 6445e2b911c5SDarrick J. Wong if (ext4_has_feature_readonly(sb) || 64462cb5cc8bSDarrick J. Wong !ext4_feature_set_ok(sb, 0)) { 6447ac27a0ecSDave Kleikamp err = -EROFS; 6448ac27a0ecSDave Kleikamp goto restore_opts; 6449ac27a0ecSDave Kleikamp } 6450ead6596bSEric Sandeen /* 64518a266467STheodore Ts'o * Make sure the group descriptor checksums 64520b8e58a1SAndreas Dilger * are sane. If they aren't, refuse to remount r/w. 64538a266467STheodore Ts'o */ 64548a266467STheodore Ts'o for (g = 0; g < sbi->s_groups_count; g++) { 64558a266467STheodore Ts'o struct ext4_group_desc *gdp = 64568a266467STheodore Ts'o ext4_get_group_desc(sb, g, NULL); 64578a266467STheodore Ts'o 6458feb0ab32SDarrick J. Wong if (!ext4_group_desc_csum_verify(sb, g, gdp)) { 6459b31e1552SEric Sandeen ext4_msg(sb, KERN_ERR, 6460b31e1552SEric Sandeen "ext4_remount: Checksum for group %u failed (%u!=%u)", 6461e2b911c5SDarrick J. Wong g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)), 64628a266467STheodore Ts'o le16_to_cpu(gdp->bg_checksum)); 64636a797d27SDarrick J. Wong err = -EFSBADCRC; 64648a266467STheodore Ts'o goto restore_opts; 64658a266467STheodore Ts'o } 64668a266467STheodore Ts'o } 64678a266467STheodore Ts'o 64688a266467STheodore Ts'o /* 6469ead6596bSEric Sandeen * If we have an unprocessed orphan list hanging 6470ead6596bSEric Sandeen * around from a previously readonly bdev mount, 6471ead6596bSEric Sandeen * require a full umount/remount for now. 6472ead6596bSEric Sandeen */ 647302f310fcSJan Kara if (es->s_last_orphan || !ext4_orphan_file_empty(sb)) { 6474b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, "Couldn't " 6475ead6596bSEric Sandeen "remount RDWR because of unprocessed " 6476ead6596bSEric Sandeen "orphan inode list. Please " 6477b31e1552SEric Sandeen "umount/remount instead"); 6478ead6596bSEric Sandeen err = -EINVAL; 6479ead6596bSEric Sandeen goto restore_opts; 6480ead6596bSEric Sandeen } 6481ead6596bSEric Sandeen 6482ac27a0ecSDave Kleikamp /* 6483ac27a0ecSDave Kleikamp * Mounting a RDONLY partition read-write, so reread 6484ac27a0ecSDave Kleikamp * and store the current valid flag. (It may have 6485ac27a0ecSDave Kleikamp * been changed by e2fsck since we originally mounted 6486ac27a0ecSDave Kleikamp * the partition.) 6487ac27a0ecSDave Kleikamp */ 648811215630SJan Kara if (sbi->s_journal) { 648911215630SJan Kara err = ext4_clear_journal_err(sb, es); 649011215630SJan Kara if (err) 649111215630SJan Kara goto restore_opts; 649211215630SJan Kara } 6493c878bea3STheodore Ts'o sbi->s_mount_state = (le16_to_cpu(es->s_state) & 6494c878bea3STheodore Ts'o ~EXT4_FC_REPLAY); 6495c89128a0SJaegeuk Kim 6496c89128a0SJaegeuk Kim err = ext4_setup_super(sb, es, 0); 6497c89128a0SJaegeuk Kim if (err) 6498c89128a0SJaegeuk Kim goto restore_opts; 6499c89128a0SJaegeuk Kim 65001751e8a6SLinus Torvalds sb->s_flags &= ~SB_RDONLY; 6501e2b911c5SDarrick J. Wong if (ext4_has_feature_mmp(sb)) 6502c5e06d10SJohann Lombardi if (ext4_multi_mount_protect(sb, 6503c5e06d10SJohann Lombardi le64_to_cpu(es->s_mmp_block))) { 6504c5e06d10SJohann Lombardi err = -EROFS; 6505c5e06d10SJohann Lombardi goto restore_opts; 6506c5e06d10SJohann Lombardi } 65073bbef91bSAustin Kim #ifdef CONFIG_QUOTA 6508c79d967dSChristoph Hellwig enable_quota = 1; 65093bbef91bSAustin Kim #endif 6510ac27a0ecSDave Kleikamp } 6511ac27a0ecSDave Kleikamp } 6512bfff6873SLukas Czerner 6513bfff6873SLukas Czerner /* 6514bfff6873SLukas Czerner * Reinitialize lazy itable initialization thread based on 6515bfff6873SLukas Czerner * current settings 6516bfff6873SLukas Czerner */ 6517bc98a42cSDavid Howells if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE)) 6518bfff6873SLukas Czerner ext4_unregister_li_request(sb); 6519bfff6873SLukas Czerner else { 6520bfff6873SLukas Czerner ext4_group_t first_not_zeroed; 6521bfff6873SLukas Czerner first_not_zeroed = ext4_has_uninit_itable(sb); 6522bfff6873SLukas Czerner ext4_register_li_request(sb, first_not_zeroed); 6523bfff6873SLukas Czerner } 6524bfff6873SLukas Czerner 65250f5bde1dSJan Kara /* 65260f5bde1dSJan Kara * Handle creation of system zone data early because it can fail. 65270f5bde1dSJan Kara * Releasing of existing data is done when we are sure remount will 65280f5bde1dSJan Kara * succeed. 65290f5bde1dSJan Kara */ 6530dd0db94fSChunguang Xu if (test_opt(sb, BLOCK_VALIDITY) && !sbi->s_system_blks) { 6531d176b1f6SJan Kara err = ext4_setup_system_zone(sb); 6532d176b1f6SJan Kara if (err) 6533d176b1f6SJan Kara goto restore_opts; 65340f5bde1dSJan Kara } 6535d176b1f6SJan Kara 6536c89128a0SJaegeuk Kim if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) { 65374392fbc4SJan Kara err = ext4_commit_super(sb); 6538c89128a0SJaegeuk Kim if (err) 6539c89128a0SJaegeuk Kim goto restore_opts; 6540c89128a0SJaegeuk Kim } 65410390131bSFrank Mayhar 6542ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 6543ac27a0ecSDave Kleikamp /* Release old quota file names */ 6544a2d4a646SJan Kara for (i = 0; i < EXT4_MAXQUOTAS; i++) 6545ac27a0ecSDave Kleikamp kfree(old_opts.s_qf_names[i]); 65467c319d32SAditya Kali if (enable_quota) { 65477c319d32SAditya Kali if (sb_any_quota_suspended(sb)) 65480f0dd62fSChristoph Hellwig dquot_resume(sb, -1); 6549e2b911c5SDarrick J. Wong else if (ext4_has_feature_quota(sb)) { 65507c319d32SAditya Kali err = ext4_enable_quotas(sb); 655107724f98STheodore Ts'o if (err) 65527c319d32SAditya Kali goto restore_opts; 65537c319d32SAditya Kali } 65547c319d32SAditya Kali } 65557c319d32SAditya Kali #endif 6556dd0db94fSChunguang Xu if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks) 65570f5bde1dSJan Kara ext4_release_system_zone(sb); 6558d4c402d9SCurt Wohlgemuth 655961bb4a1cSTheodore Ts'o if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb)) 656061bb4a1cSTheodore Ts'o ext4_stop_mmpd(sbi); 656161bb4a1cSTheodore Ts'o 6562ac27a0ecSDave Kleikamp return 0; 65630b8e58a1SAndreas Dilger 6564ac27a0ecSDave Kleikamp restore_opts: 6565ac27a0ecSDave Kleikamp sb->s_flags = old_sb_flags; 6566ac27a0ecSDave Kleikamp sbi->s_mount_opt = old_opts.s_mount_opt; 6567a2595b8aSTheodore Ts'o sbi->s_mount_opt2 = old_opts.s_mount_opt2; 6568ac27a0ecSDave Kleikamp sbi->s_resuid = old_opts.s_resuid; 6569ac27a0ecSDave Kleikamp sbi->s_resgid = old_opts.s_resgid; 6570ac27a0ecSDave Kleikamp sbi->s_commit_interval = old_opts.s_commit_interval; 657130773840STheodore Ts'o sbi->s_min_batch_time = old_opts.s_min_batch_time; 657230773840STheodore Ts'o sbi->s_max_batch_time = old_opts.s_max_batch_time; 6573dd0db94fSChunguang Xu if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks) 65740f5bde1dSJan Kara ext4_release_system_zone(sb); 6575ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 6576ac27a0ecSDave Kleikamp sbi->s_jquota_fmt = old_opts.s_jquota_fmt; 6577a2d4a646SJan Kara for (i = 0; i < EXT4_MAXQUOTAS; i++) { 657833458eabSTheodore Ts'o to_free[i] = get_qf_name(sb, sbi, i); 657933458eabSTheodore Ts'o rcu_assign_pointer(sbi->s_qf_names[i], old_opts.s_qf_names[i]); 6580ac27a0ecSDave Kleikamp } 658133458eabSTheodore Ts'o synchronize_rcu(); 658233458eabSTheodore Ts'o for (i = 0; i < EXT4_MAXQUOTAS; i++) 658333458eabSTheodore Ts'o kfree(to_free[i]); 6584ac27a0ecSDave Kleikamp #endif 658561bb4a1cSTheodore Ts'o if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb)) 658661bb4a1cSTheodore Ts'o ext4_stop_mmpd(sbi); 6587ac27a0ecSDave Kleikamp return err; 6588ac27a0ecSDave Kleikamp } 6589ac27a0ecSDave Kleikamp 6590cebe85d5SLukas Czerner static int ext4_reconfigure(struct fs_context *fc) 65917edfd85bSLukas Czerner { 6592cebe85d5SLukas Czerner struct super_block *sb = fc->root->d_sb; 65937edfd85bSLukas Czerner int ret; 65947edfd85bSLukas Czerner 6595cebe85d5SLukas Czerner fc->s_fs_info = EXT4_SB(sb); 65967edfd85bSLukas Czerner 6597cebe85d5SLukas Czerner ret = ext4_check_opt_consistency(fc, sb); 65987edfd85bSLukas Czerner if (ret < 0) 65997edfd85bSLukas Czerner return ret; 6600cebe85d5SLukas Czerner 6601960e0ab6SLukas Czerner ret = __ext4_remount(fc, sb); 6602cebe85d5SLukas Czerner if (ret < 0) 6603cebe85d5SLukas Czerner return ret; 6604cebe85d5SLukas Czerner 6605bb0fbc78SLukas Czerner ext4_msg(sb, KERN_INFO, "re-mounted %pU. Quota mode: %s.", 6606bb0fbc78SLukas Czerner &sb->s_uuid, ext4_quota_mode(sb)); 6607cebe85d5SLukas Czerner 6608cebe85d5SLukas Czerner return 0; 66097edfd85bSLukas Czerner } 66107edfd85bSLukas Czerner 6611689c958cSLi Xi #ifdef CONFIG_QUOTA 6612689c958cSLi Xi static int ext4_statfs_project(struct super_block *sb, 6613689c958cSLi Xi kprojid_t projid, struct kstatfs *buf) 6614689c958cSLi Xi { 6615689c958cSLi Xi struct kqid qid; 6616689c958cSLi Xi struct dquot *dquot; 6617689c958cSLi Xi u64 limit; 6618689c958cSLi Xi u64 curblock; 6619689c958cSLi Xi 6620689c958cSLi Xi qid = make_kqid_projid(projid); 6621689c958cSLi Xi dquot = dqget(sb, qid); 6622689c958cSLi Xi if (IS_ERR(dquot)) 6623689c958cSLi Xi return PTR_ERR(dquot); 66247b9ca4c6SJan Kara spin_lock(&dquot->dq_dqb_lock); 6625689c958cSLi Xi 6626a08fe66eSChengguang Xu limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit, 6627a08fe66eSChengguang Xu dquot->dq_dqb.dqb_bhardlimit); 662857c32ea4SChengguang Xu limit >>= sb->s_blocksize_bits; 662957c32ea4SChengguang Xu 6630689c958cSLi Xi if (limit && buf->f_blocks > limit) { 6631f06925c7SKonstantin Khlebnikov curblock = (dquot->dq_dqb.dqb_curspace + 6632f06925c7SKonstantin Khlebnikov dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; 6633689c958cSLi Xi buf->f_blocks = limit; 6634689c958cSLi Xi buf->f_bfree = buf->f_bavail = 6635689c958cSLi Xi (buf->f_blocks > curblock) ? 6636689c958cSLi Xi (buf->f_blocks - curblock) : 0; 6637689c958cSLi Xi } 6638689c958cSLi Xi 6639a08fe66eSChengguang Xu limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit, 6640a08fe66eSChengguang Xu dquot->dq_dqb.dqb_ihardlimit); 6641689c958cSLi Xi if (limit && buf->f_files > limit) { 6642689c958cSLi Xi buf->f_files = limit; 6643689c958cSLi Xi buf->f_ffree = 6644689c958cSLi Xi (buf->f_files > dquot->dq_dqb.dqb_curinodes) ? 6645689c958cSLi Xi (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0; 6646689c958cSLi Xi } 6647689c958cSLi Xi 66487b9ca4c6SJan Kara spin_unlock(&dquot->dq_dqb_lock); 6649689c958cSLi Xi dqput(dquot); 6650689c958cSLi Xi return 0; 6651689c958cSLi Xi } 6652689c958cSLi Xi #endif 6653689c958cSLi Xi 6654617ba13bSMingming Cao static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) 6655ac27a0ecSDave Kleikamp { 6656ac27a0ecSDave Kleikamp struct super_block *sb = dentry->d_sb; 6657617ba13bSMingming Cao struct ext4_sb_info *sbi = EXT4_SB(sb); 6658617ba13bSMingming Cao struct ext4_super_block *es = sbi->s_es; 665927dd4385SLukas Czerner ext4_fsblk_t overhead = 0, resv_blocks; 6660d02a9391SKazuya Mio s64 bfree; 666127dd4385SLukas Czerner resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters)); 6662ac27a0ecSDave Kleikamp 6663952fc18eSTheodore Ts'o if (!test_opt(sb, MINIX_DF)) 6664952fc18eSTheodore Ts'o overhead = sbi->s_overhead; 6665ac27a0ecSDave Kleikamp 6666617ba13bSMingming Cao buf->f_type = EXT4_SUPER_MAGIC; 6667ac27a0ecSDave Kleikamp buf->f_bsize = sb->s_blocksize; 6668b72f78cbSEric Sandeen buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead); 666957042651STheodore Ts'o bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) - 667057042651STheodore Ts'o percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter); 6671d02a9391SKazuya Mio /* prevent underflow in case that few free space is available */ 667257042651STheodore Ts'o buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0)); 667327dd4385SLukas Czerner buf->f_bavail = buf->f_bfree - 667427dd4385SLukas Czerner (ext4_r_blocks_count(es) + resv_blocks); 667527dd4385SLukas Czerner if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks)) 6676ac27a0ecSDave Kleikamp buf->f_bavail = 0; 6677ac27a0ecSDave Kleikamp buf->f_files = le32_to_cpu(es->s_inodes_count); 667852d9f3b4SPeter Zijlstra buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter); 6679617ba13bSMingming Cao buf->f_namelen = EXT4_NAME_LEN; 66809591c3a3SAmir Goldstein buf->f_fsid = uuid_to_fsid(es->s_uuid); 66810b8e58a1SAndreas Dilger 6682689c958cSLi Xi #ifdef CONFIG_QUOTA 6683689c958cSLi Xi if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) && 6684689c958cSLi Xi sb_has_quota_limits_enabled(sb, PRJQUOTA)) 6685689c958cSLi Xi ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf); 6686689c958cSLi Xi #endif 6687ac27a0ecSDave Kleikamp return 0; 6688ac27a0ecSDave Kleikamp } 6689ac27a0ecSDave Kleikamp 6690ac27a0ecSDave Kleikamp 6691ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 6692ac27a0ecSDave Kleikamp 6693bc8230eeSJan Kara /* 6694bc8230eeSJan Kara * Helper functions so that transaction is started before we acquire dqio_sem 6695bc8230eeSJan Kara * to keep correct lock ordering of transaction > dqio_sem 6696bc8230eeSJan Kara */ 6697ac27a0ecSDave Kleikamp static inline struct inode *dquot_to_inode(struct dquot *dquot) 6698ac27a0ecSDave Kleikamp { 66994c376dcaSEric W. Biederman return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type]; 6700ac27a0ecSDave Kleikamp } 6701ac27a0ecSDave Kleikamp 6702617ba13bSMingming Cao static int ext4_write_dquot(struct dquot *dquot) 6703ac27a0ecSDave Kleikamp { 6704ac27a0ecSDave Kleikamp int ret, err; 6705ac27a0ecSDave Kleikamp handle_t *handle; 6706ac27a0ecSDave Kleikamp struct inode *inode; 6707ac27a0ecSDave Kleikamp 6708ac27a0ecSDave Kleikamp inode = dquot_to_inode(dquot); 67099924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 6710617ba13bSMingming Cao EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); 6711ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 6712ac27a0ecSDave Kleikamp return PTR_ERR(handle); 6713ac27a0ecSDave Kleikamp ret = dquot_commit(dquot); 6714617ba13bSMingming Cao err = ext4_journal_stop(handle); 6715ac27a0ecSDave Kleikamp if (!ret) 6716ac27a0ecSDave Kleikamp ret = err; 6717ac27a0ecSDave Kleikamp return ret; 6718ac27a0ecSDave Kleikamp } 6719ac27a0ecSDave Kleikamp 6720617ba13bSMingming Cao static int ext4_acquire_dquot(struct dquot *dquot) 6721ac27a0ecSDave Kleikamp { 6722ac27a0ecSDave Kleikamp int ret, err; 6723ac27a0ecSDave Kleikamp handle_t *handle; 6724ac27a0ecSDave Kleikamp 67259924a92aSTheodore Ts'o handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA, 6726617ba13bSMingming Cao EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb)); 6727ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 6728ac27a0ecSDave Kleikamp return PTR_ERR(handle); 6729ac27a0ecSDave Kleikamp ret = dquot_acquire(dquot); 6730617ba13bSMingming Cao err = ext4_journal_stop(handle); 6731ac27a0ecSDave Kleikamp if (!ret) 6732ac27a0ecSDave Kleikamp ret = err; 6733ac27a0ecSDave Kleikamp return ret; 6734ac27a0ecSDave Kleikamp } 6735ac27a0ecSDave Kleikamp 6736617ba13bSMingming Cao static int ext4_release_dquot(struct dquot *dquot) 6737ac27a0ecSDave Kleikamp { 6738ac27a0ecSDave Kleikamp int ret, err; 6739ac27a0ecSDave Kleikamp handle_t *handle; 6740ac27a0ecSDave Kleikamp 67419924a92aSTheodore Ts'o handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA, 6742617ba13bSMingming Cao EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb)); 67439c3013e9SJan Kara if (IS_ERR(handle)) { 67449c3013e9SJan Kara /* Release dquot anyway to avoid endless cycle in dqput() */ 67459c3013e9SJan Kara dquot_release(dquot); 6746ac27a0ecSDave Kleikamp return PTR_ERR(handle); 67479c3013e9SJan Kara } 6748ac27a0ecSDave Kleikamp ret = dquot_release(dquot); 6749617ba13bSMingming Cao err = ext4_journal_stop(handle); 6750ac27a0ecSDave Kleikamp if (!ret) 6751ac27a0ecSDave Kleikamp ret = err; 6752ac27a0ecSDave Kleikamp return ret; 6753ac27a0ecSDave Kleikamp } 6754ac27a0ecSDave Kleikamp 6755617ba13bSMingming Cao static int ext4_mark_dquot_dirty(struct dquot *dquot) 6756ac27a0ecSDave Kleikamp { 6757262b4662SJan Kara struct super_block *sb = dquot->dq_sb; 6758262b4662SJan Kara 6759f177ee08SRoman Anufriev if (ext4_is_quota_journalled(sb)) { 6760ac27a0ecSDave Kleikamp dquot_mark_dquot_dirty(dquot); 6761617ba13bSMingming Cao return ext4_write_dquot(dquot); 6762ac27a0ecSDave Kleikamp } else { 6763ac27a0ecSDave Kleikamp return dquot_mark_dquot_dirty(dquot); 6764ac27a0ecSDave Kleikamp } 6765ac27a0ecSDave Kleikamp } 6766ac27a0ecSDave Kleikamp 6767617ba13bSMingming Cao static int ext4_write_info(struct super_block *sb, int type) 6768ac27a0ecSDave Kleikamp { 6769ac27a0ecSDave Kleikamp int ret, err; 6770ac27a0ecSDave Kleikamp handle_t *handle; 6771ac27a0ecSDave Kleikamp 6772ac27a0ecSDave Kleikamp /* Data block + inode block */ 6773f9c1f248SBaokun Li handle = ext4_journal_start_sb(sb, EXT4_HT_QUOTA, 2); 6774ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 6775ac27a0ecSDave Kleikamp return PTR_ERR(handle); 6776ac27a0ecSDave Kleikamp ret = dquot_commit_info(sb, type); 6777617ba13bSMingming Cao err = ext4_journal_stop(handle); 6778ac27a0ecSDave Kleikamp if (!ret) 6779ac27a0ecSDave Kleikamp ret = err; 6780ac27a0ecSDave Kleikamp return ret; 6781ac27a0ecSDave Kleikamp } 6782ac27a0ecSDave Kleikamp 6783daf647d2STheodore Ts'o static void lockdep_set_quota_inode(struct inode *inode, int subclass) 6784daf647d2STheodore Ts'o { 6785daf647d2STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 6786daf647d2STheodore Ts'o 6787daf647d2STheodore Ts'o /* The first argument of lockdep_set_subclass has to be 6788daf647d2STheodore Ts'o * *exactly* the same as the argument to init_rwsem() --- in 6789daf647d2STheodore Ts'o * this case, in init_once() --- or lockdep gets unhappy 6790daf647d2STheodore Ts'o * because the name of the lock is set using the 6791daf647d2STheodore Ts'o * stringification of the argument to init_rwsem(). 6792daf647d2STheodore Ts'o */ 6793daf647d2STheodore Ts'o (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */ 6794daf647d2STheodore Ts'o lockdep_set_subclass(&ei->i_data_sem, subclass); 6795daf647d2STheodore Ts'o } 6796daf647d2STheodore Ts'o 6797ac27a0ecSDave Kleikamp /* 6798ac27a0ecSDave Kleikamp * Standard function to be called on quota_on 6799ac27a0ecSDave Kleikamp */ 6800617ba13bSMingming Cao static int ext4_quota_on(struct super_block *sb, int type, int format_id, 68018c54ca9cSAl Viro const struct path *path) 6802ac27a0ecSDave Kleikamp { 6803ac27a0ecSDave Kleikamp int err; 6804ac27a0ecSDave Kleikamp 6805ac27a0ecSDave Kleikamp if (!test_opt(sb, QUOTA)) 6806ac27a0ecSDave Kleikamp return -EINVAL; 68070623543bSJan Kara 6808ac27a0ecSDave Kleikamp /* Quotafile not on the same filesystem? */ 6809d8c9584eSAl Viro if (path->dentry->d_sb != sb) 6810ac27a0ecSDave Kleikamp return -EXDEV; 6811e0770e91SJan Kara 6812e0770e91SJan Kara /* Quota already enabled for this file? */ 6813e0770e91SJan Kara if (IS_NOQUOTA(d_inode(path->dentry))) 6814e0770e91SJan Kara return -EBUSY; 6815e0770e91SJan Kara 68160623543bSJan Kara /* Journaling quota? */ 68170623543bSJan Kara if (EXT4_SB(sb)->s_qf_names[type]) { 68182b2d6d01STheodore Ts'o /* Quotafile not in fs root? */ 6819f00c9e44SJan Kara if (path->dentry->d_parent != sb->s_root) 6820b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, 6821b31e1552SEric Sandeen "Quota file not on filesystem root. " 6822b31e1552SEric Sandeen "Journaled quota will not work"); 682391389240SJan Kara sb_dqopt(sb)->flags |= DQUOT_NOLIST_DIRTY; 682491389240SJan Kara } else { 682591389240SJan Kara /* 682691389240SJan Kara * Clear the flag just in case mount options changed since 682791389240SJan Kara * last time. 682891389240SJan Kara */ 682991389240SJan Kara sb_dqopt(sb)->flags &= ~DQUOT_NOLIST_DIRTY; 68300623543bSJan Kara } 68310623543bSJan Kara 68320623543bSJan Kara /* 68330623543bSJan Kara * When we journal data on quota file, we have to flush journal to see 68340623543bSJan Kara * all updates to the file when we bypass pagecache... 68350623543bSJan Kara */ 68360390131bSFrank Mayhar if (EXT4_SB(sb)->s_journal && 68372b0143b5SDavid Howells ext4_should_journal_data(d_inode(path->dentry))) { 68380623543bSJan Kara /* 68390623543bSJan Kara * We don't need to lock updates but journal_flush() could 68400623543bSJan Kara * otherwise be livelocked... 68410623543bSJan Kara */ 68420623543bSJan Kara jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); 684301d5d965SLeah Rumancik err = jbd2_journal_flush(EXT4_SB(sb)->s_journal, 0); 68440623543bSJan Kara jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); 6845f00c9e44SJan Kara if (err) 68467ffe1ea8SHidehiro Kawai return err; 68477ffe1ea8SHidehiro Kawai } 6848957153fcSJan Kara 6849daf647d2STheodore Ts'o lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA); 6850daf647d2STheodore Ts'o err = dquot_quota_on(sb, type, format_id, path); 685115fc69bbSJan Kara if (!err) { 6852957153fcSJan Kara struct inode *inode = d_inode(path->dentry); 6853957153fcSJan Kara handle_t *handle; 6854957153fcSJan Kara 685561a92987SJan Kara /* 685661a92987SJan Kara * Set inode flags to prevent userspace from messing with quota 685761a92987SJan Kara * files. If this fails, we return success anyway since quotas 685861a92987SJan Kara * are already enabled and this is not a hard failure. 685961a92987SJan Kara */ 6860957153fcSJan Kara inode_lock(inode); 6861957153fcSJan Kara handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1); 6862957153fcSJan Kara if (IS_ERR(handle)) 6863957153fcSJan Kara goto unlock_inode; 6864957153fcSJan Kara EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL; 6865957153fcSJan Kara inode_set_flags(inode, S_NOATIME | S_IMMUTABLE, 6866957153fcSJan Kara S_NOATIME | S_IMMUTABLE); 68674209ae12SHarshad Shirwadkar err = ext4_mark_inode_dirty(handle, inode); 6868957153fcSJan Kara ext4_journal_stop(handle); 6869957153fcSJan Kara unlock_inode: 6870957153fcSJan Kara inode_unlock(inode); 687115fc69bbSJan Kara if (err) 687215fc69bbSJan Kara dquot_quota_off(sb, type); 6873957153fcSJan Kara } 687415fc69bbSJan Kara if (err) 687515fc69bbSJan Kara lockdep_set_quota_inode(path->dentry->d_inode, 687615fc69bbSJan Kara I_DATA_SEM_NORMAL); 6877daf647d2STheodore Ts'o return err; 6878ac27a0ecSDave Kleikamp } 6879ac27a0ecSDave Kleikamp 688007342ec2SBaokun Li static inline bool ext4_check_quota_inum(int type, unsigned long qf_inum) 688107342ec2SBaokun Li { 688207342ec2SBaokun Li switch (type) { 688307342ec2SBaokun Li case USRQUOTA: 688407342ec2SBaokun Li return qf_inum == EXT4_USR_QUOTA_INO; 688507342ec2SBaokun Li case GRPQUOTA: 688607342ec2SBaokun Li return qf_inum == EXT4_GRP_QUOTA_INO; 688707342ec2SBaokun Li case PRJQUOTA: 688807342ec2SBaokun Li return qf_inum >= EXT4_GOOD_OLD_FIRST_INO; 688907342ec2SBaokun Li default: 689007342ec2SBaokun Li BUG(); 689107342ec2SBaokun Li } 689207342ec2SBaokun Li } 689307342ec2SBaokun Li 68947c319d32SAditya Kali static int ext4_quota_enable(struct super_block *sb, int type, int format_id, 68957c319d32SAditya Kali unsigned int flags) 68967c319d32SAditya Kali { 68977c319d32SAditya Kali int err; 68987c319d32SAditya Kali struct inode *qf_inode; 6899a2d4a646SJan Kara unsigned long qf_inums[EXT4_MAXQUOTAS] = { 69007c319d32SAditya Kali le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum), 6901689c958cSLi Xi le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum), 6902689c958cSLi Xi le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum) 69037c319d32SAditya Kali }; 69047c319d32SAditya Kali 6905e2b911c5SDarrick J. Wong BUG_ON(!ext4_has_feature_quota(sb)); 69067c319d32SAditya Kali 69077c319d32SAditya Kali if (!qf_inums[type]) 69087c319d32SAditya Kali return -EPERM; 69097c319d32SAditya Kali 691007342ec2SBaokun Li if (!ext4_check_quota_inum(type, qf_inums[type])) { 691107342ec2SBaokun Li ext4_error(sb, "Bad quota inum: %lu, type: %d", 691207342ec2SBaokun Li qf_inums[type], type); 691307342ec2SBaokun Li return -EUCLEAN; 691407342ec2SBaokun Li } 691507342ec2SBaokun Li 69168a363970STheodore Ts'o qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL); 69177c319d32SAditya Kali if (IS_ERR(qf_inode)) { 691807342ec2SBaokun Li ext4_error(sb, "Bad quota inode: %lu, type: %d", 691907342ec2SBaokun Li qf_inums[type], type); 69207c319d32SAditya Kali return PTR_ERR(qf_inode); 69217c319d32SAditya Kali } 69227c319d32SAditya Kali 6923bcb13850SJan Kara /* Don't account quota for quota files to avoid recursion */ 6924bcb13850SJan Kara qf_inode->i_flags |= S_NOQUOTA; 6925daf647d2STheodore Ts'o lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA); 69267212b95eSJan Kara err = dquot_load_quota_inode(qf_inode, type, format_id, flags); 6927daf647d2STheodore Ts'o if (err) 6928daf647d2STheodore Ts'o lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL); 692961157b24SPan Bian iput(qf_inode); 69307c319d32SAditya Kali 69317c319d32SAditya Kali return err; 69327c319d32SAditya Kali } 69337c319d32SAditya Kali 69347c319d32SAditya Kali /* Enable usage tracking for all quota types. */ 693525c6d98fSJan Kara int ext4_enable_quotas(struct super_block *sb) 69367c319d32SAditya Kali { 69377c319d32SAditya Kali int type, err = 0; 6938a2d4a646SJan Kara unsigned long qf_inums[EXT4_MAXQUOTAS] = { 69397c319d32SAditya Kali le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum), 6940689c958cSLi Xi le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum), 6941689c958cSLi Xi le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum) 69427c319d32SAditya Kali }; 694349da9392SJan Kara bool quota_mopt[EXT4_MAXQUOTAS] = { 694449da9392SJan Kara test_opt(sb, USRQUOTA), 694549da9392SJan Kara test_opt(sb, GRPQUOTA), 694649da9392SJan Kara test_opt(sb, PRJQUOTA), 694749da9392SJan Kara }; 69487c319d32SAditya Kali 694991389240SJan Kara sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY; 6950a2d4a646SJan Kara for (type = 0; type < EXT4_MAXQUOTAS; type++) { 69517c319d32SAditya Kali if (qf_inums[type]) { 69527c319d32SAditya Kali err = ext4_quota_enable(sb, type, QFMT_VFS_V1, 695349da9392SJan Kara DQUOT_USAGE_ENABLED | 695449da9392SJan Kara (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0)); 69557c319d32SAditya Kali if (err) { 69567c319d32SAditya Kali ext4_warning(sb, 695772ba7450STheodore Ts'o "Failed to enable quota tracking " 695807342ec2SBaokun Li "(type=%d, err=%d, ino=%lu). " 695907342ec2SBaokun Li "Please run e2fsck to fix.", type, 696007342ec2SBaokun Li err, qf_inums[type]); 69614013d47aSJan Kara for (type--; type >= 0; type--) { 69624013d47aSJan Kara struct inode *inode; 69634013d47aSJan Kara 69644013d47aSJan Kara inode = sb_dqopt(sb)->files[type]; 69654013d47aSJan Kara if (inode) 69664013d47aSJan Kara inode = igrab(inode); 69677f144fd0SJunichi Uekawa dquot_quota_off(sb, type); 69684013d47aSJan Kara if (inode) { 69694013d47aSJan Kara lockdep_set_quota_inode(inode, 69704013d47aSJan Kara I_DATA_SEM_NORMAL); 69714013d47aSJan Kara iput(inode); 69724013d47aSJan Kara } 69734013d47aSJan Kara } 69747f144fd0SJunichi Uekawa 69757c319d32SAditya Kali return err; 69767c319d32SAditya Kali } 69777c319d32SAditya Kali } 69787c319d32SAditya Kali } 69797c319d32SAditya Kali return 0; 69807c319d32SAditya Kali } 69817c319d32SAditya Kali 6982ca0e05e4SDmitry Monakhov static int ext4_quota_off(struct super_block *sb, int type) 6983ca0e05e4SDmitry Monakhov { 698421f97697SJan Kara struct inode *inode = sb_dqopt(sb)->files[type]; 698521f97697SJan Kara handle_t *handle; 6986957153fcSJan Kara int err; 698721f97697SJan Kara 698887009d86SDmitry Monakhov /* Force all delayed allocation blocks to be allocated. 698987009d86SDmitry Monakhov * Caller already holds s_umount sem */ 699087009d86SDmitry Monakhov if (test_opt(sb, DELALLOC)) 6991ca0e05e4SDmitry Monakhov sync_filesystem(sb); 6992ca0e05e4SDmitry Monakhov 6993957153fcSJan Kara if (!inode || !igrab(inode)) 69940b268590SAmir Goldstein goto out; 69950b268590SAmir Goldstein 6996957153fcSJan Kara err = dquot_quota_off(sb, type); 6997964edf66SJan Kara if (err || ext4_has_feature_quota(sb)) 6998957153fcSJan Kara goto out_put; 6999957153fcSJan Kara 7000957153fcSJan Kara inode_lock(inode); 700161a92987SJan Kara /* 700261a92987SJan Kara * Update modification times of quota files when userspace can 700361a92987SJan Kara * start looking at them. If we fail, we return success anyway since 700461a92987SJan Kara * this is not a hard failure and quotas are already disabled. 700561a92987SJan Kara */ 70069924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1); 70074209ae12SHarshad Shirwadkar if (IS_ERR(handle)) { 70084209ae12SHarshad Shirwadkar err = PTR_ERR(handle); 7009957153fcSJan Kara goto out_unlock; 70104209ae12SHarshad Shirwadkar } 7011957153fcSJan Kara EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL); 7012957153fcSJan Kara inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE); 7013eeca7ea1SDeepa Dinamani inode->i_mtime = inode->i_ctime = current_time(inode); 70144209ae12SHarshad Shirwadkar err = ext4_mark_inode_dirty(handle, inode); 701521f97697SJan Kara ext4_journal_stop(handle); 7016957153fcSJan Kara out_unlock: 7017957153fcSJan Kara inode_unlock(inode); 7018957153fcSJan Kara out_put: 7019964edf66SJan Kara lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL); 7020957153fcSJan Kara iput(inode); 7021957153fcSJan Kara return err; 702221f97697SJan Kara out: 7023ca0e05e4SDmitry Monakhov return dquot_quota_off(sb, type); 7024ca0e05e4SDmitry Monakhov } 7025ca0e05e4SDmitry Monakhov 7026ac27a0ecSDave Kleikamp /* Read data from quotafile - avoid pagecache and such because we cannot afford 7027ac27a0ecSDave Kleikamp * acquiring the locks... As quota files are never truncated and quota code 7028ac27a0ecSDave Kleikamp * itself serializes the operations (and no one else should touch the files) 7029ac27a0ecSDave Kleikamp * we don't have to be afraid of races */ 7030617ba13bSMingming Cao static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, 7031ac27a0ecSDave Kleikamp size_t len, loff_t off) 7032ac27a0ecSDave Kleikamp { 7033ac27a0ecSDave Kleikamp struct inode *inode = sb_dqopt(sb)->files[type]; 7034725d26d3SAneesh Kumar K.V ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); 7035ac27a0ecSDave Kleikamp int offset = off & (sb->s_blocksize - 1); 7036ac27a0ecSDave Kleikamp int tocopy; 7037ac27a0ecSDave Kleikamp size_t toread; 7038ac27a0ecSDave Kleikamp struct buffer_head *bh; 7039ac27a0ecSDave Kleikamp loff_t i_size = i_size_read(inode); 7040ac27a0ecSDave Kleikamp 7041ac27a0ecSDave Kleikamp if (off > i_size) 7042ac27a0ecSDave Kleikamp return 0; 7043ac27a0ecSDave Kleikamp if (off+len > i_size) 7044ac27a0ecSDave Kleikamp len = i_size-off; 7045ac27a0ecSDave Kleikamp toread = len; 7046ac27a0ecSDave Kleikamp while (toread > 0) { 704766267814SJiangshan Yi tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread); 70481c215028STheodore Ts'o bh = ext4_bread(NULL, inode, blk, 0); 70491c215028STheodore Ts'o if (IS_ERR(bh)) 70501c215028STheodore Ts'o return PTR_ERR(bh); 7051ac27a0ecSDave Kleikamp if (!bh) /* A hole? */ 7052ac27a0ecSDave Kleikamp memset(data, 0, tocopy); 7053ac27a0ecSDave Kleikamp else 7054ac27a0ecSDave Kleikamp memcpy(data, bh->b_data+offset, tocopy); 7055ac27a0ecSDave Kleikamp brelse(bh); 7056ac27a0ecSDave Kleikamp offset = 0; 7057ac27a0ecSDave Kleikamp toread -= tocopy; 7058ac27a0ecSDave Kleikamp data += tocopy; 7059ac27a0ecSDave Kleikamp blk++; 7060ac27a0ecSDave Kleikamp } 7061ac27a0ecSDave Kleikamp return len; 7062ac27a0ecSDave Kleikamp } 7063ac27a0ecSDave Kleikamp 7064ac27a0ecSDave Kleikamp /* Write to quotafile (we know the transaction is already started and has 7065ac27a0ecSDave Kleikamp * enough credits) */ 7066617ba13bSMingming Cao static ssize_t ext4_quota_write(struct super_block *sb, int type, 7067ac27a0ecSDave Kleikamp const char *data, size_t len, loff_t off) 7068ac27a0ecSDave Kleikamp { 7069ac27a0ecSDave Kleikamp struct inode *inode = sb_dqopt(sb)->files[type]; 7070725d26d3SAneesh Kumar K.V ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); 70714209ae12SHarshad Shirwadkar int err = 0, err2 = 0, offset = off & (sb->s_blocksize - 1); 7072c5e298aeSTheodore Ts'o int retries = 0; 7073ac27a0ecSDave Kleikamp struct buffer_head *bh; 7074ac27a0ecSDave Kleikamp handle_t *handle = journal_current_handle(); 7075ac27a0ecSDave Kleikamp 7076380a0091SYe Bin if (!handle) { 7077b31e1552SEric Sandeen ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" 7078b31e1552SEric Sandeen " cancelled because transaction is not started", 70799c3013e9SJan Kara (unsigned long long)off, (unsigned long long)len); 70809c3013e9SJan Kara return -EIO; 70819c3013e9SJan Kara } 708267eeb568SDmitry Monakhov /* 708367eeb568SDmitry Monakhov * Since we account only one data block in transaction credits, 708467eeb568SDmitry Monakhov * then it is impossible to cross a block boundary. 708567eeb568SDmitry Monakhov */ 708667eeb568SDmitry Monakhov if (sb->s_blocksize - offset < len) { 708767eeb568SDmitry Monakhov ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" 708867eeb568SDmitry Monakhov " cancelled because not block aligned", 708967eeb568SDmitry Monakhov (unsigned long long)off, (unsigned long long)len); 709067eeb568SDmitry Monakhov return -EIO; 709167eeb568SDmitry Monakhov } 709267eeb568SDmitry Monakhov 7093c5e298aeSTheodore Ts'o do { 7094c5e298aeSTheodore Ts'o bh = ext4_bread(handle, inode, blk, 7095c5e298aeSTheodore Ts'o EXT4_GET_BLOCKS_CREATE | 7096c5e298aeSTheodore Ts'o EXT4_GET_BLOCKS_METADATA_NOFAIL); 709745586c70SMasahiro Yamada } while (PTR_ERR(bh) == -ENOSPC && 7098c5e298aeSTheodore Ts'o ext4_should_retry_alloc(inode->i_sb, &retries)); 70991c215028STheodore Ts'o if (IS_ERR(bh)) 71001c215028STheodore Ts'o return PTR_ERR(bh); 7101ac27a0ecSDave Kleikamp if (!bh) 7102ac27a0ecSDave Kleikamp goto out; 71035d601255Sliang xie BUFFER_TRACE(bh, "get write access"); 7104188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, bh, EXT4_JTR_NONE); 7105ac27a0ecSDave Kleikamp if (err) { 7106ac27a0ecSDave Kleikamp brelse(bh); 71071c215028STheodore Ts'o return err; 7108ac27a0ecSDave Kleikamp } 7109ac27a0ecSDave Kleikamp lock_buffer(bh); 711067eeb568SDmitry Monakhov memcpy(bh->b_data+offset, data, len); 7111ac27a0ecSDave Kleikamp flush_dcache_page(bh->b_page); 7112ac27a0ecSDave Kleikamp unlock_buffer(bh); 71130390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, NULL, bh); 7114ac27a0ecSDave Kleikamp brelse(bh); 7115ac27a0ecSDave Kleikamp out: 711667eeb568SDmitry Monakhov if (inode->i_size < off + len) { 711767eeb568SDmitry Monakhov i_size_write(inode, off + len); 7118617ba13bSMingming Cao EXT4_I(inode)->i_disksize = inode->i_size; 71194209ae12SHarshad Shirwadkar err2 = ext4_mark_inode_dirty(handle, inode); 71204209ae12SHarshad Shirwadkar if (unlikely(err2 && !err)) 71214209ae12SHarshad Shirwadkar err = err2; 712221f97697SJan Kara } 71234209ae12SHarshad Shirwadkar return err ? err : len; 7124ac27a0ecSDave Kleikamp } 7125ac27a0ecSDave Kleikamp #endif 7126ac27a0ecSDave Kleikamp 7127c290ea01SJan Kara #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2) 712824b58424STheodore Ts'o static inline void register_as_ext2(void) 712924b58424STheodore Ts'o { 713024b58424STheodore Ts'o int err = register_filesystem(&ext2_fs_type); 713124b58424STheodore Ts'o if (err) 713224b58424STheodore Ts'o printk(KERN_WARNING 713324b58424STheodore Ts'o "EXT4-fs: Unable to register as ext2 (%d)\n", err); 713424b58424STheodore Ts'o } 713524b58424STheodore Ts'o 713624b58424STheodore Ts'o static inline void unregister_as_ext2(void) 713724b58424STheodore Ts'o { 713824b58424STheodore Ts'o unregister_filesystem(&ext2_fs_type); 713924b58424STheodore Ts'o } 71402035e776STheodore Ts'o 71412035e776STheodore Ts'o static inline int ext2_feature_set_ok(struct super_block *sb) 71422035e776STheodore Ts'o { 7143e2b911c5SDarrick J. Wong if (ext4_has_unknown_ext2_incompat_features(sb)) 71442035e776STheodore Ts'o return 0; 7145bc98a42cSDavid Howells if (sb_rdonly(sb)) 71462035e776STheodore Ts'o return 1; 7147e2b911c5SDarrick J. Wong if (ext4_has_unknown_ext2_ro_compat_features(sb)) 71482035e776STheodore Ts'o return 0; 71492035e776STheodore Ts'o return 1; 71502035e776STheodore Ts'o } 715124b58424STheodore Ts'o #else 715224b58424STheodore Ts'o static inline void register_as_ext2(void) { } 715324b58424STheodore Ts'o static inline void unregister_as_ext2(void) { } 71542035e776STheodore Ts'o static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; } 715524b58424STheodore Ts'o #endif 715624b58424STheodore Ts'o 715724b58424STheodore Ts'o static inline void register_as_ext3(void) 715824b58424STheodore Ts'o { 715924b58424STheodore Ts'o int err = register_filesystem(&ext3_fs_type); 716024b58424STheodore Ts'o if (err) 716124b58424STheodore Ts'o printk(KERN_WARNING 716224b58424STheodore Ts'o "EXT4-fs: Unable to register as ext3 (%d)\n", err); 716324b58424STheodore Ts'o } 716424b58424STheodore Ts'o 716524b58424STheodore Ts'o static inline void unregister_as_ext3(void) 716624b58424STheodore Ts'o { 716724b58424STheodore Ts'o unregister_filesystem(&ext3_fs_type); 716824b58424STheodore Ts'o } 71692035e776STheodore Ts'o 71702035e776STheodore Ts'o static inline int ext3_feature_set_ok(struct super_block *sb) 71712035e776STheodore Ts'o { 7172e2b911c5SDarrick J. Wong if (ext4_has_unknown_ext3_incompat_features(sb)) 71732035e776STheodore Ts'o return 0; 7174e2b911c5SDarrick J. Wong if (!ext4_has_feature_journal(sb)) 71752035e776STheodore Ts'o return 0; 7176bc98a42cSDavid Howells if (sb_rdonly(sb)) 71772035e776STheodore Ts'o return 1; 7178e2b911c5SDarrick J. Wong if (ext4_has_unknown_ext3_ro_compat_features(sb)) 71792035e776STheodore Ts'o return 0; 71802035e776STheodore Ts'o return 1; 71812035e776STheodore Ts'o } 718224b58424STheodore Ts'o 718303010a33STheodore Ts'o static struct file_system_type ext4_fs_type = { 7184ac27a0ecSDave Kleikamp .owner = THIS_MODULE, 718503010a33STheodore Ts'o .name = "ext4", 7186cebe85d5SLukas Czerner .init_fs_context = ext4_init_fs_context, 7187cebe85d5SLukas Czerner .parameters = ext4_param_specs, 7188ac27a0ecSDave Kleikamp .kill_sb = kill_block_super, 718914f3db55SChristian Brauner .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, 7190ac27a0ecSDave Kleikamp }; 71917f78e035SEric W. Biederman MODULE_ALIAS_FS("ext4"); 7192ac27a0ecSDave Kleikamp 7193e9e3bcecSEric Sandeen /* Shared across all ext4 file systems */ 7194e9e3bcecSEric Sandeen wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ]; 7195e9e3bcecSEric Sandeen 71965dabfc78STheodore Ts'o static int __init ext4_init_fs(void) 7197ac27a0ecSDave Kleikamp { 7198e9e3bcecSEric Sandeen int i, err; 7199c9de560dSAlex Tomas 7200e294a537STheodore Ts'o ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64); 720107c0c5d8SAl Viro ext4_li_info = NULL; 720207c0c5d8SAl Viro 72039a4c8019SCarlos Maiolino /* Build-time check for flags consistency */ 720412e9b892SDmitry Monakhov ext4_check_flag_values(); 7205e9e3bcecSEric Sandeen 7206e142d052SJan Kara for (i = 0; i < EXT4_WQ_HASH_SZ; i++) 7207e9e3bcecSEric Sandeen init_waitqueue_head(&ext4__ioend_wq[i]); 7208e9e3bcecSEric Sandeen 720951865fdaSZheng Liu err = ext4_init_es(); 72106fd058f7STheodore Ts'o if (err) 72116fd058f7STheodore Ts'o return err; 721251865fdaSZheng Liu 72131dc0aa46SEric Whitney err = ext4_init_pending(); 72141dc0aa46SEric Whitney if (err) 721522cfe4b4SEric Biggers goto out7; 721622cfe4b4SEric Biggers 721722cfe4b4SEric Biggers err = ext4_init_post_read_processing(); 721822cfe4b4SEric Biggers if (err) 72191dc0aa46SEric Whitney goto out6; 72201dc0aa46SEric Whitney 722151865fdaSZheng Liu err = ext4_init_pageio(); 722251865fdaSZheng Liu if (err) 7223b5799018STheodore Ts'o goto out5; 722451865fdaSZheng Liu 72255dabfc78STheodore Ts'o err = ext4_init_system_zone(); 7226bd2d0210STheodore Ts'o if (err) 7227b5799018STheodore Ts'o goto out4; 7228857ac889SLukas Czerner 7229b5799018STheodore Ts'o err = ext4_init_sysfs(); 7230dd68314cSTheodore Ts'o if (err) 7231b5799018STheodore Ts'o goto out3; 7232857ac889SLukas Czerner 72335dabfc78STheodore Ts'o err = ext4_init_mballoc(); 7234ac27a0ecSDave Kleikamp if (err) 7235c9de560dSAlex Tomas goto out2; 7236ac27a0ecSDave Kleikamp err = init_inodecache(); 7237ac27a0ecSDave Kleikamp if (err) 7238ac27a0ecSDave Kleikamp goto out1; 7239aa75f4d3SHarshad Shirwadkar 7240aa75f4d3SHarshad Shirwadkar err = ext4_fc_init_dentry_cache(); 7241aa75f4d3SHarshad Shirwadkar if (err) 7242aa75f4d3SHarshad Shirwadkar goto out05; 7243aa75f4d3SHarshad Shirwadkar 724424b58424STheodore Ts'o register_as_ext3(); 72452035e776STheodore Ts'o register_as_ext2(); 724603010a33STheodore Ts'o err = register_filesystem(&ext4_fs_type); 7247ac27a0ecSDave Kleikamp if (err) 7248ac27a0ecSDave Kleikamp goto out; 7249bfff6873SLukas Czerner 7250ac27a0ecSDave Kleikamp return 0; 7251ac27a0ecSDave Kleikamp out: 725224b58424STheodore Ts'o unregister_as_ext2(); 725324b58424STheodore Ts'o unregister_as_ext3(); 7254ab047d51SSebastian Andrzej Siewior ext4_fc_destroy_dentry_cache(); 7255aa75f4d3SHarshad Shirwadkar out05: 7256ac27a0ecSDave Kleikamp destroy_inodecache(); 7257ac27a0ecSDave Kleikamp out1: 72585dabfc78STheodore Ts'o ext4_exit_mballoc(); 72599c191f70ST Makphaibulchoke out2: 7260b5799018STheodore Ts'o ext4_exit_sysfs(); 7261b5799018STheodore Ts'o out3: 7262dd68314cSTheodore Ts'o ext4_exit_system_zone(); 7263b5799018STheodore Ts'o out4: 72645dabfc78STheodore Ts'o ext4_exit_pageio(); 7265b5799018STheodore Ts'o out5: 726622cfe4b4SEric Biggers ext4_exit_post_read_processing(); 72671dc0aa46SEric Whitney out6: 726822cfe4b4SEric Biggers ext4_exit_pending(); 726922cfe4b4SEric Biggers out7: 727051865fdaSZheng Liu ext4_exit_es(); 727151865fdaSZheng Liu 7272ac27a0ecSDave Kleikamp return err; 7273ac27a0ecSDave Kleikamp } 7274ac27a0ecSDave Kleikamp 72755dabfc78STheodore Ts'o static void __exit ext4_exit_fs(void) 7276ac27a0ecSDave Kleikamp { 7277bfff6873SLukas Czerner ext4_destroy_lazyinit_thread(); 727824b58424STheodore Ts'o unregister_as_ext2(); 727924b58424STheodore Ts'o unregister_as_ext3(); 728003010a33STheodore Ts'o unregister_filesystem(&ext4_fs_type); 7281ab047d51SSebastian Andrzej Siewior ext4_fc_destroy_dentry_cache(); 7282ac27a0ecSDave Kleikamp destroy_inodecache(); 72835dabfc78STheodore Ts'o ext4_exit_mballoc(); 7284b5799018STheodore Ts'o ext4_exit_sysfs(); 72855dabfc78STheodore Ts'o ext4_exit_system_zone(); 72865dabfc78STheodore Ts'o ext4_exit_pageio(); 728722cfe4b4SEric Biggers ext4_exit_post_read_processing(); 7288dd12ed14SEric Sandeen ext4_exit_es(); 72891dc0aa46SEric Whitney ext4_exit_pending(); 7290ac27a0ecSDave Kleikamp } 7291ac27a0ecSDave Kleikamp 7292ac27a0ecSDave Kleikamp MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); 729383982b6fSTheodore Ts'o MODULE_DESCRIPTION("Fourth Extended Filesystem"); 7294ac27a0ecSDave Kleikamp MODULE_LICENSE("GPL"); 72957ef79ad5STheodore Ts'o MODULE_SOFTDEP("pre: crc32c"); 72965dabfc78STheodore Ts'o module_init(ext4_init_fs) 72975dabfc78STheodore Ts'o module_exit(ext4_exit_fs) 7298