xref: /linux/fs/ext4/super.c (revision 0db1ff222d40f1601c961f0edb86d10426992595)
1ac27a0ecSDave Kleikamp /*
2617ba13bSMingming Cao  *  linux/fs/ext4/super.c
3ac27a0ecSDave Kleikamp  *
4ac27a0ecSDave Kleikamp  * Copyright (C) 1992, 1993, 1994, 1995
5ac27a0ecSDave Kleikamp  * Remy Card (card@masi.ibp.fr)
6ac27a0ecSDave Kleikamp  * Laboratoire MASI - Institut Blaise Pascal
7ac27a0ecSDave Kleikamp  * Universite Pierre et Marie Curie (Paris VI)
8ac27a0ecSDave Kleikamp  *
9ac27a0ecSDave Kleikamp  *  from
10ac27a0ecSDave Kleikamp  *
11ac27a0ecSDave Kleikamp  *  linux/fs/minix/inode.c
12ac27a0ecSDave Kleikamp  *
13ac27a0ecSDave Kleikamp  *  Copyright (C) 1991, 1992  Linus Torvalds
14ac27a0ecSDave Kleikamp  *
15ac27a0ecSDave Kleikamp  *  Big-endian to little-endian byte-swapping/bitmaps by
16ac27a0ecSDave Kleikamp  *        David S. Miller (davem@caip.rutgers.edu), 1995
17ac27a0ecSDave Kleikamp  */
18ac27a0ecSDave Kleikamp 
19ac27a0ecSDave Kleikamp #include <linux/module.h>
20ac27a0ecSDave Kleikamp #include <linux/string.h>
21ac27a0ecSDave Kleikamp #include <linux/fs.h>
22ac27a0ecSDave Kleikamp #include <linux/time.h>
23c5ca7c76STheodore Ts'o #include <linux/vmalloc.h>
24ac27a0ecSDave Kleikamp #include <linux/slab.h>
25ac27a0ecSDave Kleikamp #include <linux/init.h>
26ac27a0ecSDave Kleikamp #include <linux/blkdev.h>
2766114cadSTejun Heo #include <linux/backing-dev.h>
28ac27a0ecSDave Kleikamp #include <linux/parser.h>
29ac27a0ecSDave Kleikamp #include <linux/buffer_head.h>
30a5694255SChristoph Hellwig #include <linux/exportfs.h>
31ac27a0ecSDave Kleikamp #include <linux/vfs.h>
32ac27a0ecSDave Kleikamp #include <linux/random.h>
33ac27a0ecSDave Kleikamp #include <linux/mount.h>
34ac27a0ecSDave Kleikamp #include <linux/namei.h>
35ac27a0ecSDave Kleikamp #include <linux/quotaops.h>
36ac27a0ecSDave Kleikamp #include <linux/seq_file.h>
373197ebdbSTheodore Ts'o #include <linux/ctype.h>
381330593eSVignesh Babu #include <linux/log2.h>
39717d50e4SAndreas Dilger #include <linux/crc16.h>
407abc52c2SDan Magenheimer #include <linux/cleancache.h>
417c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
42ac27a0ecSDave Kleikamp 
43bfff6873SLukas Czerner #include <linux/kthread.h>
44bfff6873SLukas Czerner #include <linux/freezer.h>
45bfff6873SLukas Czerner 
463dcf5451SChristoph Hellwig #include "ext4.h"
474a092d73STheodore Ts'o #include "ext4_extents.h"	/* Needed for trace points definition */
483dcf5451SChristoph Hellwig #include "ext4_jbd2.h"
49ac27a0ecSDave Kleikamp #include "xattr.h"
50ac27a0ecSDave Kleikamp #include "acl.h"
513661d286STheodore Ts'o #include "mballoc.h"
52ac27a0ecSDave Kleikamp 
539bffad1eSTheodore Ts'o #define CREATE_TRACE_POINTS
549bffad1eSTheodore Ts'o #include <trace/events/ext4.h>
559bffad1eSTheodore Ts'o 
560b75a840SLukas Czerner static struct ext4_lazy_init *ext4_li_info;
570b75a840SLukas Czerner static struct mutex ext4_li_mtx;
58e294a537STheodore Ts'o static struct ratelimit_state ext4_mount_msg_ratelimit;
599f6200bbSTheodore Ts'o 
60617ba13bSMingming Cao static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
61ac27a0ecSDave Kleikamp 			     unsigned long journal_devnum);
622adf6da8STheodore Ts'o static int ext4_show_options(struct seq_file *seq, struct dentry *root);
63e2d67052STheodore Ts'o static int ext4_commit_super(struct super_block *sb, int sync);
64617ba13bSMingming Cao static void ext4_mark_recovery_complete(struct super_block *sb,
65617ba13bSMingming Cao 					struct ext4_super_block *es);
66617ba13bSMingming Cao static void ext4_clear_journal_err(struct super_block *sb,
67617ba13bSMingming Cao 				   struct ext4_super_block *es);
68617ba13bSMingming Cao static int ext4_sync_fs(struct super_block *sb, int wait);
69617ba13bSMingming Cao static int ext4_remount(struct super_block *sb, int *flags, char *data);
70617ba13bSMingming Cao static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
71c4be0c1dSTakashi Sato static int ext4_unfreeze(struct super_block *sb);
72c4be0c1dSTakashi Sato static int ext4_freeze(struct super_block *sb);
73152a0836SAl Viro static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
74152a0836SAl Viro 		       const char *dev_name, void *data);
752035e776STheodore Ts'o static inline int ext2_feature_set_ok(struct super_block *sb);
762035e776STheodore Ts'o static inline int ext3_feature_set_ok(struct super_block *sb);
77d39195c3SAmir Goldstein static int ext4_feature_set_ok(struct super_block *sb, int readonly);
78bfff6873SLukas Czerner static void ext4_destroy_lazyinit_thread(void);
79bfff6873SLukas Czerner static void ext4_unregister_li_request(struct super_block *sb);
808f1f7453SEric Sandeen static void ext4_clear_request_list(void);
81c6cb7e77SEric Whitney static struct inode *ext4_get_journal_inode(struct super_block *sb,
82c6cb7e77SEric Whitney 					    unsigned int journal_inum);
83ac27a0ecSDave Kleikamp 
84e74031fdSJan Kara /*
85e74031fdSJan Kara  * Lock ordering
86e74031fdSJan Kara  *
87e74031fdSJan Kara  * Note the difference between i_mmap_sem (EXT4_I(inode)->i_mmap_sem) and
88e74031fdSJan Kara  * i_mmap_rwsem (inode->i_mmap_rwsem)!
89e74031fdSJan Kara  *
90e74031fdSJan Kara  * page fault path:
91e74031fdSJan Kara  * mmap_sem -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
92e74031fdSJan Kara  *   page lock -> i_data_sem (rw)
93e74031fdSJan Kara  *
94e74031fdSJan Kara  * buffered write path:
95e74031fdSJan Kara  * sb_start_write -> i_mutex -> mmap_sem
96e74031fdSJan Kara  * sb_start_write -> i_mutex -> transaction start -> page lock ->
97e74031fdSJan Kara  *   i_data_sem (rw)
98e74031fdSJan Kara  *
99e74031fdSJan Kara  * truncate:
100e74031fdSJan Kara  * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (w) -> i_mmap_sem (w) ->
101e74031fdSJan Kara  *   i_mmap_rwsem (w) -> page lock
102e74031fdSJan Kara  * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (w) -> i_mmap_sem (w) ->
103e74031fdSJan Kara  *   transaction start -> i_data_sem (rw)
104e74031fdSJan Kara  *
105e74031fdSJan Kara  * direct IO:
106e74031fdSJan Kara  * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (r) -> mmap_sem
107e74031fdSJan Kara  * sb_start_write -> i_mutex -> EXT4_STATE_DIOREAD_LOCK (r) ->
108e74031fdSJan Kara  *   transaction start -> i_data_sem (rw)
109e74031fdSJan Kara  *
110e74031fdSJan Kara  * writepages:
111e74031fdSJan Kara  * transaction start -> page lock(s) -> i_data_sem (rw)
112e74031fdSJan Kara  */
113e74031fdSJan Kara 
114c290ea01SJan Kara #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
1152035e776STheodore Ts'o static struct file_system_type ext2_fs_type = {
1162035e776STheodore Ts'o 	.owner		= THIS_MODULE,
1172035e776STheodore Ts'o 	.name		= "ext2",
1182035e776STheodore Ts'o 	.mount		= ext4_mount,
1192035e776STheodore Ts'o 	.kill_sb	= kill_block_super,
1202035e776STheodore Ts'o 	.fs_flags	= FS_REQUIRES_DEV,
1212035e776STheodore Ts'o };
1227f78e035SEric W. Biederman MODULE_ALIAS_FS("ext2");
123fa7614ddSEric W. Biederman MODULE_ALIAS("ext2");
1242035e776STheodore Ts'o #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
1252035e776STheodore Ts'o #else
1262035e776STheodore Ts'o #define IS_EXT2_SB(sb) (0)
1272035e776STheodore Ts'o #endif
1282035e776STheodore Ts'o 
1292035e776STheodore Ts'o 
130ba69f9abSJan Kara static struct file_system_type ext3_fs_type = {
131ba69f9abSJan Kara 	.owner		= THIS_MODULE,
132ba69f9abSJan Kara 	.name		= "ext3",
133152a0836SAl Viro 	.mount		= ext4_mount,
134ba69f9abSJan Kara 	.kill_sb	= kill_block_super,
135ba69f9abSJan Kara 	.fs_flags	= FS_REQUIRES_DEV,
136ba69f9abSJan Kara };
1377f78e035SEric W. Biederman MODULE_ALIAS_FS("ext3");
138fa7614ddSEric W. Biederman MODULE_ALIAS("ext3");
139ba69f9abSJan Kara #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
140bd81d8eeSLaurent Vivier 
141d25425f8SDarrick J. Wong static int ext4_verify_csum_type(struct super_block *sb,
142d25425f8SDarrick J. Wong 				 struct ext4_super_block *es)
143d25425f8SDarrick J. Wong {
144e2b911c5SDarrick J. Wong 	if (!ext4_has_feature_metadata_csum(sb))
145d25425f8SDarrick J. Wong 		return 1;
146d25425f8SDarrick J. Wong 
147d25425f8SDarrick J. Wong 	return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
148d25425f8SDarrick J. Wong }
149d25425f8SDarrick J. Wong 
150a9c47317SDarrick J. Wong static __le32 ext4_superblock_csum(struct super_block *sb,
151a9c47317SDarrick J. Wong 				   struct ext4_super_block *es)
152a9c47317SDarrick J. Wong {
153a9c47317SDarrick J. Wong 	struct ext4_sb_info *sbi = EXT4_SB(sb);
154a9c47317SDarrick J. Wong 	int offset = offsetof(struct ext4_super_block, s_checksum);
155a9c47317SDarrick J. Wong 	__u32 csum;
156a9c47317SDarrick J. Wong 
157a9c47317SDarrick J. Wong 	csum = ext4_chksum(sbi, ~0, (char *)es, offset);
158a9c47317SDarrick J. Wong 
159a9c47317SDarrick J. Wong 	return cpu_to_le32(csum);
160a9c47317SDarrick J. Wong }
161a9c47317SDarrick J. Wong 
162c197855eSStephen Hemminger static int ext4_superblock_csum_verify(struct super_block *sb,
163a9c47317SDarrick J. Wong 				       struct ext4_super_block *es)
164a9c47317SDarrick J. Wong {
1659aa5d32bSDmitry Monakhov 	if (!ext4_has_metadata_csum(sb))
166a9c47317SDarrick J. Wong 		return 1;
167a9c47317SDarrick J. Wong 
168a9c47317SDarrick J. Wong 	return es->s_checksum == ext4_superblock_csum(sb, es);
169a9c47317SDarrick J. Wong }
170a9c47317SDarrick J. Wong 
17106db49e6STheodore Ts'o void ext4_superblock_csum_set(struct super_block *sb)
172a9c47317SDarrick J. Wong {
17306db49e6STheodore Ts'o 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
17406db49e6STheodore Ts'o 
1759aa5d32bSDmitry Monakhov 	if (!ext4_has_metadata_csum(sb))
176a9c47317SDarrick J. Wong 		return;
177a9c47317SDarrick J. Wong 
178a9c47317SDarrick J. Wong 	es->s_checksum = ext4_superblock_csum(sb, es);
179a9c47317SDarrick J. Wong }
180a9c47317SDarrick J. Wong 
1819933fc0aSTheodore Ts'o void *ext4_kvmalloc(size_t size, gfp_t flags)
1829933fc0aSTheodore Ts'o {
1839933fc0aSTheodore Ts'o 	void *ret;
1849933fc0aSTheodore Ts'o 
1858be04b93SJoe Perches 	ret = kmalloc(size, flags | __GFP_NOWARN);
1869933fc0aSTheodore Ts'o 	if (!ret)
1879933fc0aSTheodore Ts'o 		ret = __vmalloc(size, flags, PAGE_KERNEL);
1889933fc0aSTheodore Ts'o 	return ret;
1899933fc0aSTheodore Ts'o }
1909933fc0aSTheodore Ts'o 
1919933fc0aSTheodore Ts'o void *ext4_kvzalloc(size_t size, gfp_t flags)
1929933fc0aSTheodore Ts'o {
1939933fc0aSTheodore Ts'o 	void *ret;
1949933fc0aSTheodore Ts'o 
1958be04b93SJoe Perches 	ret = kzalloc(size, flags | __GFP_NOWARN);
1969933fc0aSTheodore Ts'o 	if (!ret)
1979933fc0aSTheodore Ts'o 		ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
1989933fc0aSTheodore Ts'o 	return ret;
1999933fc0aSTheodore Ts'o }
2009933fc0aSTheodore Ts'o 
2018fadc143SAlexandre Ratchov ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
2028fadc143SAlexandre Ratchov 			       struct ext4_group_desc *bg)
203bd81d8eeSLaurent Vivier {
2043a14589cSAneesh Kumar K.V 	return le32_to_cpu(bg->bg_block_bitmap_lo) |
2058fadc143SAlexandre Ratchov 		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
2068fadc143SAlexandre Ratchov 		 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
207bd81d8eeSLaurent Vivier }
208bd81d8eeSLaurent Vivier 
2098fadc143SAlexandre Ratchov ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
2108fadc143SAlexandre Ratchov 			       struct ext4_group_desc *bg)
211bd81d8eeSLaurent Vivier {
2125272f837SAneesh Kumar K.V 	return le32_to_cpu(bg->bg_inode_bitmap_lo) |
2138fadc143SAlexandre Ratchov 		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
2148fadc143SAlexandre Ratchov 		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
215bd81d8eeSLaurent Vivier }
216bd81d8eeSLaurent Vivier 
2178fadc143SAlexandre Ratchov ext4_fsblk_t ext4_inode_table(struct super_block *sb,
2188fadc143SAlexandre Ratchov 			      struct ext4_group_desc *bg)
219bd81d8eeSLaurent Vivier {
2205272f837SAneesh Kumar K.V 	return le32_to_cpu(bg->bg_inode_table_lo) |
2218fadc143SAlexandre Ratchov 		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
2228fadc143SAlexandre Ratchov 		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
223bd81d8eeSLaurent Vivier }
224bd81d8eeSLaurent Vivier 
225021b65bbSTheodore Ts'o __u32 ext4_free_group_clusters(struct super_block *sb,
226560671a0SAneesh Kumar K.V 			       struct ext4_group_desc *bg)
227560671a0SAneesh Kumar K.V {
228560671a0SAneesh Kumar K.V 	return le16_to_cpu(bg->bg_free_blocks_count_lo) |
229560671a0SAneesh Kumar K.V 		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
230560671a0SAneesh Kumar K.V 		 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
231560671a0SAneesh Kumar K.V }
232560671a0SAneesh Kumar K.V 
233560671a0SAneesh Kumar K.V __u32 ext4_free_inodes_count(struct super_block *sb,
234560671a0SAneesh Kumar K.V 			      struct ext4_group_desc *bg)
235560671a0SAneesh Kumar K.V {
236560671a0SAneesh Kumar K.V 	return le16_to_cpu(bg->bg_free_inodes_count_lo) |
237560671a0SAneesh Kumar K.V 		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
238560671a0SAneesh Kumar K.V 		 (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
239560671a0SAneesh Kumar K.V }
240560671a0SAneesh Kumar K.V 
241560671a0SAneesh Kumar K.V __u32 ext4_used_dirs_count(struct super_block *sb,
242560671a0SAneesh Kumar K.V 			      struct ext4_group_desc *bg)
243560671a0SAneesh Kumar K.V {
244560671a0SAneesh Kumar K.V 	return le16_to_cpu(bg->bg_used_dirs_count_lo) |
245560671a0SAneesh Kumar K.V 		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
246560671a0SAneesh Kumar K.V 		 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
247560671a0SAneesh Kumar K.V }
248560671a0SAneesh Kumar K.V 
249560671a0SAneesh Kumar K.V __u32 ext4_itable_unused_count(struct super_block *sb,
250560671a0SAneesh Kumar K.V 			      struct ext4_group_desc *bg)
251560671a0SAneesh Kumar K.V {
252560671a0SAneesh Kumar K.V 	return le16_to_cpu(bg->bg_itable_unused_lo) |
253560671a0SAneesh Kumar K.V 		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
254560671a0SAneesh Kumar K.V 		 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
255560671a0SAneesh Kumar K.V }
256560671a0SAneesh Kumar K.V 
2578fadc143SAlexandre Ratchov void ext4_block_bitmap_set(struct super_block *sb,
2588fadc143SAlexandre Ratchov 			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
259bd81d8eeSLaurent Vivier {
2603a14589cSAneesh Kumar K.V 	bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
2618fadc143SAlexandre Ratchov 	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
2628fadc143SAlexandre Ratchov 		bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
263bd81d8eeSLaurent Vivier }
264bd81d8eeSLaurent Vivier 
2658fadc143SAlexandre Ratchov void ext4_inode_bitmap_set(struct super_block *sb,
2668fadc143SAlexandre Ratchov 			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
267bd81d8eeSLaurent Vivier {
2685272f837SAneesh Kumar K.V 	bg->bg_inode_bitmap_lo  = cpu_to_le32((u32)blk);
2698fadc143SAlexandre Ratchov 	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
2708fadc143SAlexandre Ratchov 		bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
271bd81d8eeSLaurent Vivier }
272bd81d8eeSLaurent Vivier 
2738fadc143SAlexandre Ratchov void ext4_inode_table_set(struct super_block *sb,
2748fadc143SAlexandre Ratchov 			  struct ext4_group_desc *bg, ext4_fsblk_t blk)
275bd81d8eeSLaurent Vivier {
2765272f837SAneesh Kumar K.V 	bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
2778fadc143SAlexandre Ratchov 	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
2788fadc143SAlexandre Ratchov 		bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
279bd81d8eeSLaurent Vivier }
280bd81d8eeSLaurent Vivier 
281021b65bbSTheodore Ts'o void ext4_free_group_clusters_set(struct super_block *sb,
282560671a0SAneesh Kumar K.V 				  struct ext4_group_desc *bg, __u32 count)
283560671a0SAneesh Kumar K.V {
284560671a0SAneesh Kumar K.V 	bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
285560671a0SAneesh Kumar K.V 	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
286560671a0SAneesh Kumar K.V 		bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
287560671a0SAneesh Kumar K.V }
288560671a0SAneesh Kumar K.V 
289560671a0SAneesh Kumar K.V void ext4_free_inodes_set(struct super_block *sb,
290560671a0SAneesh Kumar K.V 			  struct ext4_group_desc *bg, __u32 count)
291560671a0SAneesh Kumar K.V {
292560671a0SAneesh Kumar K.V 	bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
293560671a0SAneesh Kumar K.V 	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
294560671a0SAneesh Kumar K.V 		bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
295560671a0SAneesh Kumar K.V }
296560671a0SAneesh Kumar K.V 
297560671a0SAneesh Kumar K.V void ext4_used_dirs_set(struct super_block *sb,
298560671a0SAneesh Kumar K.V 			  struct ext4_group_desc *bg, __u32 count)
299560671a0SAneesh Kumar K.V {
300560671a0SAneesh Kumar K.V 	bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
301560671a0SAneesh Kumar K.V 	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
302560671a0SAneesh Kumar K.V 		bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
303560671a0SAneesh Kumar K.V }
304560671a0SAneesh Kumar K.V 
305560671a0SAneesh Kumar K.V void ext4_itable_unused_set(struct super_block *sb,
306560671a0SAneesh Kumar K.V 			  struct ext4_group_desc *bg, __u32 count)
307560671a0SAneesh Kumar K.V {
308560671a0SAneesh Kumar K.V 	bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
309560671a0SAneesh Kumar K.V 	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
310560671a0SAneesh Kumar K.V 		bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
311560671a0SAneesh Kumar K.V }
312560671a0SAneesh Kumar K.V 
313d3d1faf6SCurt Wohlgemuth 
3141c13d5c0STheodore Ts'o static void __save_error_info(struct super_block *sb, const char *func,
3151c13d5c0STheodore Ts'o 			    unsigned int line)
3161c13d5c0STheodore Ts'o {
3171c13d5c0STheodore Ts'o 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
3181c13d5c0STheodore Ts'o 
3191c13d5c0STheodore Ts'o 	EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
3201b46617bSTheodore Ts'o 	if (bdev_read_only(sb->s_bdev))
3211b46617bSTheodore Ts'o 		return;
3221c13d5c0STheodore Ts'o 	es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
3231c13d5c0STheodore Ts'o 	es->s_last_error_time = cpu_to_le32(get_seconds());
3241c13d5c0STheodore Ts'o 	strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
3251c13d5c0STheodore Ts'o 	es->s_last_error_line = cpu_to_le32(line);
3261c13d5c0STheodore Ts'o 	if (!es->s_first_error_time) {
3271c13d5c0STheodore Ts'o 		es->s_first_error_time = es->s_last_error_time;
3281c13d5c0STheodore Ts'o 		strncpy(es->s_first_error_func, func,
3291c13d5c0STheodore Ts'o 			sizeof(es->s_first_error_func));
3301c13d5c0STheodore Ts'o 		es->s_first_error_line = cpu_to_le32(line);
3311c13d5c0STheodore Ts'o 		es->s_first_error_ino = es->s_last_error_ino;
3321c13d5c0STheodore Ts'o 		es->s_first_error_block = es->s_last_error_block;
3331c13d5c0STheodore Ts'o 	}
33466e61a9eSTheodore Ts'o 	/*
33566e61a9eSTheodore Ts'o 	 * Start the daily error reporting function if it hasn't been
33666e61a9eSTheodore Ts'o 	 * started already
33766e61a9eSTheodore Ts'o 	 */
33866e61a9eSTheodore Ts'o 	if (!es->s_error_count)
33966e61a9eSTheodore Ts'o 		mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
340ba39ebb6SWei Yongjun 	le32_add_cpu(&es->s_error_count, 1);
3411c13d5c0STheodore Ts'o }
3421c13d5c0STheodore Ts'o 
3431c13d5c0STheodore Ts'o static void save_error_info(struct super_block *sb, const char *func,
3441c13d5c0STheodore Ts'o 			    unsigned int line)
3451c13d5c0STheodore Ts'o {
3461c13d5c0STheodore Ts'o 	__save_error_info(sb, func, line);
3471c13d5c0STheodore Ts'o 	ext4_commit_super(sb, 1);
3481c13d5c0STheodore Ts'o }
3491c13d5c0STheodore Ts'o 
350bdfe0cbdSTheodore Ts'o /*
351bdfe0cbdSTheodore Ts'o  * The del_gendisk() function uninitializes the disk-specific data
352bdfe0cbdSTheodore Ts'o  * structures, including the bdi structure, without telling anyone
353bdfe0cbdSTheodore Ts'o  * else.  Once this happens, any attempt to call mark_buffer_dirty()
354bdfe0cbdSTheodore Ts'o  * (for example, by ext4_commit_super), will cause a kernel OOPS.
355bdfe0cbdSTheodore Ts'o  * This is a kludge to prevent these oops until we can put in a proper
356bdfe0cbdSTheodore Ts'o  * hook in del_gendisk() to inform the VFS and file system layers.
357bdfe0cbdSTheodore Ts'o  */
358bdfe0cbdSTheodore Ts'o static int block_device_ejected(struct super_block *sb)
359bdfe0cbdSTheodore Ts'o {
360bdfe0cbdSTheodore Ts'o 	struct inode *bd_inode = sb->s_bdev->bd_inode;
361bdfe0cbdSTheodore Ts'o 	struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
362bdfe0cbdSTheodore Ts'o 
363bdfe0cbdSTheodore Ts'o 	return bdi->dev == NULL;
364bdfe0cbdSTheodore Ts'o }
365bdfe0cbdSTheodore Ts'o 
36618aadd47SBobi Jam static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
36718aadd47SBobi Jam {
36818aadd47SBobi Jam 	struct super_block		*sb = journal->j_private;
36918aadd47SBobi Jam 	struct ext4_sb_info		*sbi = EXT4_SB(sb);
37018aadd47SBobi Jam 	int				error = is_journal_aborted(journal);
3715d3ee208SDmitry Monakhov 	struct ext4_journal_cb_entry	*jce;
37218aadd47SBobi Jam 
3735d3ee208SDmitry Monakhov 	BUG_ON(txn->t_state == T_FINISHED);
37418aadd47SBobi Jam 	spin_lock(&sbi->s_md_lock);
3755d3ee208SDmitry Monakhov 	while (!list_empty(&txn->t_private_list)) {
3765d3ee208SDmitry Monakhov 		jce = list_entry(txn->t_private_list.next,
3775d3ee208SDmitry Monakhov 				 struct ext4_journal_cb_entry, jce_list);
37818aadd47SBobi Jam 		list_del_init(&jce->jce_list);
37918aadd47SBobi Jam 		spin_unlock(&sbi->s_md_lock);
38018aadd47SBobi Jam 		jce->jce_func(sb, jce, error);
38118aadd47SBobi Jam 		spin_lock(&sbi->s_md_lock);
38218aadd47SBobi Jam 	}
38318aadd47SBobi Jam 	spin_unlock(&sbi->s_md_lock);
38418aadd47SBobi Jam }
3851c13d5c0STheodore Ts'o 
386ac27a0ecSDave Kleikamp /* Deal with the reporting of failure conditions on a filesystem such as
387ac27a0ecSDave Kleikamp  * inconsistencies detected or read IO failures.
388ac27a0ecSDave Kleikamp  *
389ac27a0ecSDave Kleikamp  * On ext2, we can store the error state of the filesystem in the
390617ba13bSMingming Cao  * superblock.  That is not possible on ext4, because we may have other
391ac27a0ecSDave Kleikamp  * write ordering constraints on the superblock which prevent us from
392ac27a0ecSDave Kleikamp  * writing it out straight away; and given that the journal is about to
393ac27a0ecSDave Kleikamp  * be aborted, we can't rely on the current, or future, transactions to
394ac27a0ecSDave Kleikamp  * write out the superblock safely.
395ac27a0ecSDave Kleikamp  *
396dab291afSMingming Cao  * We'll just use the jbd2_journal_abort() error code to record an error in
397d6b198bcSThadeu Lima de Souza Cascardo  * the journal instead.  On recovery, the journal will complain about
398ac27a0ecSDave Kleikamp  * that error until we've noted it down and cleared it.
399ac27a0ecSDave Kleikamp  */
400ac27a0ecSDave Kleikamp 
401617ba13bSMingming Cao static void ext4_handle_error(struct super_block *sb)
402ac27a0ecSDave Kleikamp {
403ac27a0ecSDave Kleikamp 	if (sb->s_flags & MS_RDONLY)
404ac27a0ecSDave Kleikamp 		return;
405ac27a0ecSDave Kleikamp 
406ac27a0ecSDave Kleikamp 	if (!test_opt(sb, ERRORS_CONT)) {
407617ba13bSMingming Cao 		journal_t *journal = EXT4_SB(sb)->s_journal;
408ac27a0ecSDave Kleikamp 
4094ab2f15bSTheodore Ts'o 		EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
410ac27a0ecSDave Kleikamp 		if (journal)
411dab291afSMingming Cao 			jbd2_journal_abort(journal, -EIO);
412ac27a0ecSDave Kleikamp 	}
413ac27a0ecSDave Kleikamp 	if (test_opt(sb, ERRORS_RO)) {
414b31e1552SEric Sandeen 		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
4154418e141SDmitry Monakhov 		/*
4164418e141SDmitry Monakhov 		 * Make sure updated value of ->s_mount_flags will be visible
4174418e141SDmitry Monakhov 		 * before ->s_flags update
4184418e141SDmitry Monakhov 		 */
4194418e141SDmitry Monakhov 		smp_wmb();
420ac27a0ecSDave Kleikamp 		sb->s_flags |= MS_RDONLY;
421ac27a0ecSDave Kleikamp 	}
4224327ba52SDaeho Jeong 	if (test_opt(sb, ERRORS_PANIC)) {
4234327ba52SDaeho Jeong 		if (EXT4_SB(sb)->s_journal &&
4244327ba52SDaeho Jeong 		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
4254327ba52SDaeho Jeong 			return;
426617ba13bSMingming Cao 		panic("EXT4-fs (device %s): panic forced after error\n",
427ac27a0ecSDave Kleikamp 			sb->s_id);
428ac27a0ecSDave Kleikamp 	}
4294327ba52SDaeho Jeong }
430ac27a0ecSDave Kleikamp 
431efbed4dcSTheodore Ts'o #define ext4_error_ratelimit(sb)					\
432efbed4dcSTheodore Ts'o 		___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state),	\
433efbed4dcSTheodore Ts'o 			     "EXT4-fs error")
434efbed4dcSTheodore Ts'o 
43512062dddSEric Sandeen void __ext4_error(struct super_block *sb, const char *function,
436c398eda0STheodore Ts'o 		  unsigned int line, const char *fmt, ...)
437ac27a0ecSDave Kleikamp {
4380ff2ea7dSJoe Perches 	struct va_format vaf;
439ac27a0ecSDave Kleikamp 	va_list args;
440ac27a0ecSDave Kleikamp 
441*0db1ff22STheodore Ts'o 	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
442*0db1ff22STheodore Ts'o 		return;
443*0db1ff22STheodore Ts'o 
444efbed4dcSTheodore Ts'o 	if (ext4_error_ratelimit(sb)) {
445ac27a0ecSDave Kleikamp 		va_start(args, fmt);
4460ff2ea7dSJoe Perches 		vaf.fmt = fmt;
4470ff2ea7dSJoe Perches 		vaf.va = &args;
448efbed4dcSTheodore Ts'o 		printk(KERN_CRIT
449efbed4dcSTheodore Ts'o 		       "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
4500ff2ea7dSJoe Perches 		       sb->s_id, function, line, current->comm, &vaf);
451ac27a0ecSDave Kleikamp 		va_end(args);
452efbed4dcSTheodore Ts'o 	}
453f3fc0210STheodore Ts'o 	save_error_info(sb, function, line);
454617ba13bSMingming Cao 	ext4_handle_error(sb);
455ac27a0ecSDave Kleikamp }
456ac27a0ecSDave Kleikamp 
457e7c96e8eSJoe Perches void __ext4_error_inode(struct inode *inode, const char *function,
458c398eda0STheodore Ts'o 			unsigned int line, ext4_fsblk_t block,
459273df556SFrank Mayhar 			const char *fmt, ...)
460273df556SFrank Mayhar {
461273df556SFrank Mayhar 	va_list args;
462f7c21177STheodore Ts'o 	struct va_format vaf;
4631c13d5c0STheodore Ts'o 	struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
464273df556SFrank Mayhar 
465*0db1ff22STheodore Ts'o 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
466*0db1ff22STheodore Ts'o 		return;
467*0db1ff22STheodore Ts'o 
4681c13d5c0STheodore Ts'o 	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
4691c13d5c0STheodore Ts'o 	es->s_last_error_block = cpu_to_le64(block);
470efbed4dcSTheodore Ts'o 	if (ext4_error_ratelimit(inode->i_sb)) {
471273df556SFrank Mayhar 		va_start(args, fmt);
472f7c21177STheodore Ts'o 		vaf.fmt = fmt;
473f7c21177STheodore Ts'o 		vaf.va = &args;
474c398eda0STheodore Ts'o 		if (block)
475d9ee81daSJoe Perches 			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
476d9ee81daSJoe Perches 			       "inode #%lu: block %llu: comm %s: %pV\n",
477d9ee81daSJoe Perches 			       inode->i_sb->s_id, function, line, inode->i_ino,
478d9ee81daSJoe Perches 			       block, current->comm, &vaf);
479d9ee81daSJoe Perches 		else
480d9ee81daSJoe Perches 			printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
481d9ee81daSJoe Perches 			       "inode #%lu: comm %s: %pV\n",
482d9ee81daSJoe Perches 			       inode->i_sb->s_id, function, line, inode->i_ino,
483d9ee81daSJoe Perches 			       current->comm, &vaf);
484273df556SFrank Mayhar 		va_end(args);
485efbed4dcSTheodore Ts'o 	}
486efbed4dcSTheodore Ts'o 	save_error_info(inode->i_sb, function, line);
487273df556SFrank Mayhar 	ext4_handle_error(inode->i_sb);
488273df556SFrank Mayhar }
489273df556SFrank Mayhar 
490e7c96e8eSJoe Perches void __ext4_error_file(struct file *file, const char *function,
491f7c21177STheodore Ts'o 		       unsigned int line, ext4_fsblk_t block,
492f7c21177STheodore Ts'o 		       const char *fmt, ...)
493273df556SFrank Mayhar {
494273df556SFrank Mayhar 	va_list args;
495f7c21177STheodore Ts'o 	struct va_format vaf;
4961c13d5c0STheodore Ts'o 	struct ext4_super_block *es;
497496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
498273df556SFrank Mayhar 	char pathname[80], *path;
499273df556SFrank Mayhar 
500*0db1ff22STheodore Ts'o 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
501*0db1ff22STheodore Ts'o 		return;
502*0db1ff22STheodore Ts'o 
5031c13d5c0STheodore Ts'o 	es = EXT4_SB(inode->i_sb)->s_es;
5041c13d5c0STheodore Ts'o 	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
505efbed4dcSTheodore Ts'o 	if (ext4_error_ratelimit(inode->i_sb)) {
5069bf39ab2SMiklos Szeredi 		path = file_path(file, pathname, sizeof(pathname));
507f9a62d09SDan Carpenter 		if (IS_ERR(path))
508273df556SFrank Mayhar 			path = "(unknown)";
509f7c21177STheodore Ts'o 		va_start(args, fmt);
510f7c21177STheodore Ts'o 		vaf.fmt = fmt;
511f7c21177STheodore Ts'o 		vaf.va = &args;
512d9ee81daSJoe Perches 		if (block)
513d9ee81daSJoe Perches 			printk(KERN_CRIT
514d9ee81daSJoe Perches 			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
515d9ee81daSJoe Perches 			       "block %llu: comm %s: path %s: %pV\n",
516d9ee81daSJoe Perches 			       inode->i_sb->s_id, function, line, inode->i_ino,
517d9ee81daSJoe Perches 			       block, current->comm, path, &vaf);
518d9ee81daSJoe Perches 		else
519d9ee81daSJoe Perches 			printk(KERN_CRIT
520d9ee81daSJoe Perches 			       "EXT4-fs error (device %s): %s:%d: inode #%lu: "
521d9ee81daSJoe Perches 			       "comm %s: path %s: %pV\n",
522d9ee81daSJoe Perches 			       inode->i_sb->s_id, function, line, inode->i_ino,
523d9ee81daSJoe Perches 			       current->comm, path, &vaf);
524273df556SFrank Mayhar 		va_end(args);
525efbed4dcSTheodore Ts'o 	}
526efbed4dcSTheodore Ts'o 	save_error_info(inode->i_sb, function, line);
527273df556SFrank Mayhar 	ext4_handle_error(inode->i_sb);
528273df556SFrank Mayhar }
529273df556SFrank Mayhar 
530722887ddSTheodore Ts'o const char *ext4_decode_error(struct super_block *sb, int errno,
531ac27a0ecSDave Kleikamp 			      char nbuf[16])
532ac27a0ecSDave Kleikamp {
533ac27a0ecSDave Kleikamp 	char *errstr = NULL;
534ac27a0ecSDave Kleikamp 
535ac27a0ecSDave Kleikamp 	switch (errno) {
5366a797d27SDarrick J. Wong 	case -EFSCORRUPTED:
5376a797d27SDarrick J. Wong 		errstr = "Corrupt filesystem";
5386a797d27SDarrick J. Wong 		break;
5396a797d27SDarrick J. Wong 	case -EFSBADCRC:
5406a797d27SDarrick J. Wong 		errstr = "Filesystem failed CRC";
5416a797d27SDarrick J. Wong 		break;
542ac27a0ecSDave Kleikamp 	case -EIO:
543ac27a0ecSDave Kleikamp 		errstr = "IO failure";
544ac27a0ecSDave Kleikamp 		break;
545ac27a0ecSDave Kleikamp 	case -ENOMEM:
546ac27a0ecSDave Kleikamp 		errstr = "Out of memory";
547ac27a0ecSDave Kleikamp 		break;
548ac27a0ecSDave Kleikamp 	case -EROFS:
54978f1ddbbSTheodore Ts'o 		if (!sb || (EXT4_SB(sb)->s_journal &&
55078f1ddbbSTheodore Ts'o 			    EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
551ac27a0ecSDave Kleikamp 			errstr = "Journal has aborted";
552ac27a0ecSDave Kleikamp 		else
553ac27a0ecSDave Kleikamp 			errstr = "Readonly filesystem";
554ac27a0ecSDave Kleikamp 		break;
555ac27a0ecSDave Kleikamp 	default:
556ac27a0ecSDave Kleikamp 		/* If the caller passed in an extra buffer for unknown
557ac27a0ecSDave Kleikamp 		 * errors, textualise them now.  Else we just return
558ac27a0ecSDave Kleikamp 		 * NULL. */
559ac27a0ecSDave Kleikamp 		if (nbuf) {
560ac27a0ecSDave Kleikamp 			/* Check for truncated error codes... */
561ac27a0ecSDave Kleikamp 			if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
562ac27a0ecSDave Kleikamp 				errstr = nbuf;
563ac27a0ecSDave Kleikamp 		}
564ac27a0ecSDave Kleikamp 		break;
565ac27a0ecSDave Kleikamp 	}
566ac27a0ecSDave Kleikamp 
567ac27a0ecSDave Kleikamp 	return errstr;
568ac27a0ecSDave Kleikamp }
569ac27a0ecSDave Kleikamp 
570617ba13bSMingming Cao /* __ext4_std_error decodes expected errors from journaling functions
571ac27a0ecSDave Kleikamp  * automatically and invokes the appropriate error response.  */
572ac27a0ecSDave Kleikamp 
573c398eda0STheodore Ts'o void __ext4_std_error(struct super_block *sb, const char *function,
574c398eda0STheodore Ts'o 		      unsigned int line, int errno)
575ac27a0ecSDave Kleikamp {
576ac27a0ecSDave Kleikamp 	char nbuf[16];
577ac27a0ecSDave Kleikamp 	const char *errstr;
578ac27a0ecSDave Kleikamp 
579*0db1ff22STheodore Ts'o 	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
580*0db1ff22STheodore Ts'o 		return;
581*0db1ff22STheodore Ts'o 
582ac27a0ecSDave Kleikamp 	/* Special case: if the error is EROFS, and we're not already
583ac27a0ecSDave Kleikamp 	 * inside a transaction, then there's really no point in logging
584ac27a0ecSDave Kleikamp 	 * an error. */
585ac27a0ecSDave Kleikamp 	if (errno == -EROFS && journal_current_handle() == NULL &&
586ac27a0ecSDave Kleikamp 	    (sb->s_flags & MS_RDONLY))
587ac27a0ecSDave Kleikamp 		return;
588ac27a0ecSDave Kleikamp 
589efbed4dcSTheodore Ts'o 	if (ext4_error_ratelimit(sb)) {
590617ba13bSMingming Cao 		errstr = ext4_decode_error(sb, errno, nbuf);
591c398eda0STheodore Ts'o 		printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
592c398eda0STheodore Ts'o 		       sb->s_id, function, line, errstr);
593efbed4dcSTheodore Ts'o 	}
594ac27a0ecSDave Kleikamp 
595efbed4dcSTheodore Ts'o 	save_error_info(sb, function, line);
596617ba13bSMingming Cao 	ext4_handle_error(sb);
597ac27a0ecSDave Kleikamp }
598ac27a0ecSDave Kleikamp 
599ac27a0ecSDave Kleikamp /*
600617ba13bSMingming Cao  * ext4_abort is a much stronger failure handler than ext4_error.  The
601ac27a0ecSDave Kleikamp  * abort function may be used to deal with unrecoverable failures such
602ac27a0ecSDave Kleikamp  * as journal IO errors or ENOMEM at a critical moment in log management.
603ac27a0ecSDave Kleikamp  *
604ac27a0ecSDave Kleikamp  * We unconditionally force the filesystem into an ABORT|READONLY state,
605ac27a0ecSDave Kleikamp  * unless the error response on the fs has been set to panic in which
606ac27a0ecSDave Kleikamp  * case we take the easy way out and panic immediately.
607ac27a0ecSDave Kleikamp  */
608ac27a0ecSDave Kleikamp 
609c67d859eSTheodore Ts'o void __ext4_abort(struct super_block *sb, const char *function,
610c398eda0STheodore Ts'o 		unsigned int line, const char *fmt, ...)
611ac27a0ecSDave Kleikamp {
612651e1c3bSJoe Perches 	struct va_format vaf;
613ac27a0ecSDave Kleikamp 	va_list args;
614ac27a0ecSDave Kleikamp 
615*0db1ff22STheodore Ts'o 	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
616*0db1ff22STheodore Ts'o 		return;
617*0db1ff22STheodore Ts'o 
6181c13d5c0STheodore Ts'o 	save_error_info(sb, function, line);
619ac27a0ecSDave Kleikamp 	va_start(args, fmt);
620651e1c3bSJoe Perches 	vaf.fmt = fmt;
621651e1c3bSJoe Perches 	vaf.va = &args;
622651e1c3bSJoe Perches 	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: %pV\n",
623651e1c3bSJoe Perches 	       sb->s_id, function, line, &vaf);
624ac27a0ecSDave Kleikamp 	va_end(args);
625ac27a0ecSDave Kleikamp 
6261c13d5c0STheodore Ts'o 	if ((sb->s_flags & MS_RDONLY) == 0) {
627b31e1552SEric Sandeen 		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
6284ab2f15bSTheodore Ts'o 		EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
6294418e141SDmitry Monakhov 		/*
6304418e141SDmitry Monakhov 		 * Make sure updated value of ->s_mount_flags will be visible
6314418e141SDmitry Monakhov 		 * before ->s_flags update
6324418e141SDmitry Monakhov 		 */
6334418e141SDmitry Monakhov 		smp_wmb();
6344418e141SDmitry Monakhov 		sb->s_flags |= MS_RDONLY;
635ef2cabf7SHidehiro Kawai 		if (EXT4_SB(sb)->s_journal)
636dab291afSMingming Cao 			jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
6371c13d5c0STheodore Ts'o 		save_error_info(sb, function, line);
6381c13d5c0STheodore Ts'o 	}
6394327ba52SDaeho Jeong 	if (test_opt(sb, ERRORS_PANIC)) {
6404327ba52SDaeho Jeong 		if (EXT4_SB(sb)->s_journal &&
6414327ba52SDaeho Jeong 		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
6424327ba52SDaeho Jeong 			return;
6431c13d5c0STheodore Ts'o 		panic("EXT4-fs panic from previous error\n");
644ac27a0ecSDave Kleikamp 	}
6454327ba52SDaeho Jeong }
646ac27a0ecSDave Kleikamp 
647e7c96e8eSJoe Perches void __ext4_msg(struct super_block *sb,
648e7c96e8eSJoe Perches 		const char *prefix, const char *fmt, ...)
649b31e1552SEric Sandeen {
6500ff2ea7dSJoe Perches 	struct va_format vaf;
651b31e1552SEric Sandeen 	va_list args;
652b31e1552SEric Sandeen 
653efbed4dcSTheodore Ts'o 	if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
654efbed4dcSTheodore Ts'o 		return;
655efbed4dcSTheodore Ts'o 
656b31e1552SEric Sandeen 	va_start(args, fmt);
6570ff2ea7dSJoe Perches 	vaf.fmt = fmt;
6580ff2ea7dSJoe Perches 	vaf.va = &args;
6590ff2ea7dSJoe Perches 	printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
660b31e1552SEric Sandeen 	va_end(args);
661b31e1552SEric Sandeen }
662b31e1552SEric Sandeen 
663b03a2f7eSAndreas Dilger #define ext4_warning_ratelimit(sb)					\
664b03a2f7eSAndreas Dilger 		___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),	\
665b03a2f7eSAndreas Dilger 			     "EXT4-fs warning")
666b03a2f7eSAndreas Dilger 
66712062dddSEric Sandeen void __ext4_warning(struct super_block *sb, const char *function,
668c398eda0STheodore Ts'o 		    unsigned int line, const char *fmt, ...)
669ac27a0ecSDave Kleikamp {
6700ff2ea7dSJoe Perches 	struct va_format vaf;
671ac27a0ecSDave Kleikamp 	va_list args;
672ac27a0ecSDave Kleikamp 
673b03a2f7eSAndreas Dilger 	if (!ext4_warning_ratelimit(sb))
674efbed4dcSTheodore Ts'o 		return;
675efbed4dcSTheodore Ts'o 
676ac27a0ecSDave Kleikamp 	va_start(args, fmt);
6770ff2ea7dSJoe Perches 	vaf.fmt = fmt;
6780ff2ea7dSJoe Perches 	vaf.va = &args;
6790ff2ea7dSJoe Perches 	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
6800ff2ea7dSJoe Perches 	       sb->s_id, function, line, &vaf);
681ac27a0ecSDave Kleikamp 	va_end(args);
682ac27a0ecSDave Kleikamp }
683ac27a0ecSDave Kleikamp 
684b03a2f7eSAndreas Dilger void __ext4_warning_inode(const struct inode *inode, const char *function,
685b03a2f7eSAndreas Dilger 			  unsigned int line, const char *fmt, ...)
686b03a2f7eSAndreas Dilger {
687b03a2f7eSAndreas Dilger 	struct va_format vaf;
688b03a2f7eSAndreas Dilger 	va_list args;
689b03a2f7eSAndreas Dilger 
690b03a2f7eSAndreas Dilger 	if (!ext4_warning_ratelimit(inode->i_sb))
691b03a2f7eSAndreas Dilger 		return;
692b03a2f7eSAndreas Dilger 
693b03a2f7eSAndreas Dilger 	va_start(args, fmt);
694b03a2f7eSAndreas Dilger 	vaf.fmt = fmt;
695b03a2f7eSAndreas Dilger 	vaf.va = &args;
696b03a2f7eSAndreas Dilger 	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: "
697b03a2f7eSAndreas Dilger 	       "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id,
698b03a2f7eSAndreas Dilger 	       function, line, inode->i_ino, current->comm, &vaf);
699b03a2f7eSAndreas Dilger 	va_end(args);
700b03a2f7eSAndreas Dilger }
701b03a2f7eSAndreas Dilger 
702e29136f8STheodore Ts'o void __ext4_grp_locked_error(const char *function, unsigned int line,
703e29136f8STheodore Ts'o 			     struct super_block *sb, ext4_group_t grp,
704e29136f8STheodore Ts'o 			     unsigned long ino, ext4_fsblk_t block,
705e29136f8STheodore Ts'o 			     const char *fmt, ...)
7065d1b1b3fSAneesh Kumar K.V __releases(bitlock)
7075d1b1b3fSAneesh Kumar K.V __acquires(bitlock)
7085d1b1b3fSAneesh Kumar K.V {
7090ff2ea7dSJoe Perches 	struct va_format vaf;
7105d1b1b3fSAneesh Kumar K.V 	va_list args;
7115d1b1b3fSAneesh Kumar K.V 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
7125d1b1b3fSAneesh Kumar K.V 
713*0db1ff22STheodore Ts'o 	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
714*0db1ff22STheodore Ts'o 		return;
715*0db1ff22STheodore Ts'o 
7161c13d5c0STheodore Ts'o 	es->s_last_error_ino = cpu_to_le32(ino);
7171c13d5c0STheodore Ts'o 	es->s_last_error_block = cpu_to_le64(block);
7181c13d5c0STheodore Ts'o 	__save_error_info(sb, function, line);
7190ff2ea7dSJoe Perches 
720efbed4dcSTheodore Ts'o 	if (ext4_error_ratelimit(sb)) {
7215d1b1b3fSAneesh Kumar K.V 		va_start(args, fmt);
7220ff2ea7dSJoe Perches 		vaf.fmt = fmt;
7230ff2ea7dSJoe Perches 		vaf.va = &args;
72421149d61SRobin Dong 		printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
725e29136f8STheodore Ts'o 		       sb->s_id, function, line, grp);
726e29136f8STheodore Ts'o 		if (ino)
7270ff2ea7dSJoe Perches 			printk(KERN_CONT "inode %lu: ", ino);
728e29136f8STheodore Ts'o 		if (block)
729efbed4dcSTheodore Ts'o 			printk(KERN_CONT "block %llu:",
730efbed4dcSTheodore Ts'o 			       (unsigned long long) block);
7310ff2ea7dSJoe Perches 		printk(KERN_CONT "%pV\n", &vaf);
7325d1b1b3fSAneesh Kumar K.V 		va_end(args);
733efbed4dcSTheodore Ts'o 	}
7345d1b1b3fSAneesh Kumar K.V 
7355d1b1b3fSAneesh Kumar K.V 	if (test_opt(sb, ERRORS_CONT)) {
736e2d67052STheodore Ts'o 		ext4_commit_super(sb, 0);
7375d1b1b3fSAneesh Kumar K.V 		return;
7385d1b1b3fSAneesh Kumar K.V 	}
7391c13d5c0STheodore Ts'o 
7405d1b1b3fSAneesh Kumar K.V 	ext4_unlock_group(sb, grp);
7415d1b1b3fSAneesh Kumar K.V 	ext4_handle_error(sb);
7425d1b1b3fSAneesh Kumar K.V 	/*
7435d1b1b3fSAneesh Kumar K.V 	 * We only get here in the ERRORS_RO case; relocking the group
7445d1b1b3fSAneesh Kumar K.V 	 * may be dangerous, but nothing bad will happen since the
7455d1b1b3fSAneesh Kumar K.V 	 * filesystem will have already been marked read/only and the
7465d1b1b3fSAneesh Kumar K.V 	 * journal has been aborted.  We return 1 as a hint to callers
7475d1b1b3fSAneesh Kumar K.V 	 * who might what to use the return value from
74825985edcSLucas De Marchi 	 * ext4_grp_locked_error() to distinguish between the
7495d1b1b3fSAneesh Kumar K.V 	 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
7505d1b1b3fSAneesh Kumar K.V 	 * aggressively from the ext4 function in question, with a
7515d1b1b3fSAneesh Kumar K.V 	 * more appropriate error code.
7525d1b1b3fSAneesh Kumar K.V 	 */
7535d1b1b3fSAneesh Kumar K.V 	ext4_lock_group(sb, grp);
7545d1b1b3fSAneesh Kumar K.V 	return;
7555d1b1b3fSAneesh Kumar K.V }
7565d1b1b3fSAneesh Kumar K.V 
757617ba13bSMingming Cao void ext4_update_dynamic_rev(struct super_block *sb)
758ac27a0ecSDave Kleikamp {
759617ba13bSMingming Cao 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
760ac27a0ecSDave Kleikamp 
761617ba13bSMingming Cao 	if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
762ac27a0ecSDave Kleikamp 		return;
763ac27a0ecSDave Kleikamp 
76412062dddSEric Sandeen 	ext4_warning(sb,
765ac27a0ecSDave Kleikamp 		     "updating to rev %d because of new feature flag, "
766ac27a0ecSDave Kleikamp 		     "running e2fsck is recommended",
767617ba13bSMingming Cao 		     EXT4_DYNAMIC_REV);
768ac27a0ecSDave Kleikamp 
769617ba13bSMingming Cao 	es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
770617ba13bSMingming Cao 	es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
771617ba13bSMingming Cao 	es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
772ac27a0ecSDave Kleikamp 	/* leave es->s_feature_*compat flags alone */
773ac27a0ecSDave Kleikamp 	/* es->s_uuid will be set by e2fsck if empty */
774ac27a0ecSDave Kleikamp 
775ac27a0ecSDave Kleikamp 	/*
776ac27a0ecSDave Kleikamp 	 * The rest of the superblock fields should be zero, and if not it
777ac27a0ecSDave Kleikamp 	 * means they are likely already in use, so leave them alone.  We
778ac27a0ecSDave Kleikamp 	 * can leave it up to e2fsck to clean up any inconsistencies there.
779ac27a0ecSDave Kleikamp 	 */
780ac27a0ecSDave Kleikamp }
781ac27a0ecSDave Kleikamp 
782ac27a0ecSDave Kleikamp /*
783ac27a0ecSDave Kleikamp  * Open the external journal device
784ac27a0ecSDave Kleikamp  */
785b31e1552SEric Sandeen static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
786ac27a0ecSDave Kleikamp {
787ac27a0ecSDave Kleikamp 	struct block_device *bdev;
788ac27a0ecSDave Kleikamp 	char b[BDEVNAME_SIZE];
789ac27a0ecSDave Kleikamp 
790d4d77629STejun Heo 	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
791ac27a0ecSDave Kleikamp 	if (IS_ERR(bdev))
792ac27a0ecSDave Kleikamp 		goto fail;
793ac27a0ecSDave Kleikamp 	return bdev;
794ac27a0ecSDave Kleikamp 
795ac27a0ecSDave Kleikamp fail:
796b31e1552SEric Sandeen 	ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld",
797ac27a0ecSDave Kleikamp 			__bdevname(dev, b), PTR_ERR(bdev));
798ac27a0ecSDave Kleikamp 	return NULL;
799ac27a0ecSDave Kleikamp }
800ac27a0ecSDave Kleikamp 
801ac27a0ecSDave Kleikamp /*
802ac27a0ecSDave Kleikamp  * Release the journal device
803ac27a0ecSDave Kleikamp  */
8044385bab1SAl Viro static void ext4_blkdev_put(struct block_device *bdev)
805ac27a0ecSDave Kleikamp {
8064385bab1SAl Viro 	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
807ac27a0ecSDave Kleikamp }
808ac27a0ecSDave Kleikamp 
8094385bab1SAl Viro static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
810ac27a0ecSDave Kleikamp {
811ac27a0ecSDave Kleikamp 	struct block_device *bdev;
812ac27a0ecSDave Kleikamp 	bdev = sbi->journal_bdev;
813ac27a0ecSDave Kleikamp 	if (bdev) {
8144385bab1SAl Viro 		ext4_blkdev_put(bdev);
815ac27a0ecSDave Kleikamp 		sbi->journal_bdev = NULL;
816ac27a0ecSDave Kleikamp 	}
817ac27a0ecSDave Kleikamp }
818ac27a0ecSDave Kleikamp 
819ac27a0ecSDave Kleikamp static inline struct inode *orphan_list_entry(struct list_head *l)
820ac27a0ecSDave Kleikamp {
821617ba13bSMingming Cao 	return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
822ac27a0ecSDave Kleikamp }
823ac27a0ecSDave Kleikamp 
824617ba13bSMingming Cao static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
825ac27a0ecSDave Kleikamp {
826ac27a0ecSDave Kleikamp 	struct list_head *l;
827ac27a0ecSDave Kleikamp 
828b31e1552SEric Sandeen 	ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
829ac27a0ecSDave Kleikamp 		 le32_to_cpu(sbi->s_es->s_last_orphan));
830ac27a0ecSDave Kleikamp 
831ac27a0ecSDave Kleikamp 	printk(KERN_ERR "sb_info orphan list:\n");
832ac27a0ecSDave Kleikamp 	list_for_each(l, &sbi->s_orphan) {
833ac27a0ecSDave Kleikamp 		struct inode *inode = orphan_list_entry(l);
834ac27a0ecSDave Kleikamp 		printk(KERN_ERR "  "
835ac27a0ecSDave Kleikamp 		       "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
836ac27a0ecSDave Kleikamp 		       inode->i_sb->s_id, inode->i_ino, inode,
837ac27a0ecSDave Kleikamp 		       inode->i_mode, inode->i_nlink,
838ac27a0ecSDave Kleikamp 		       NEXT_ORPHAN(inode));
839ac27a0ecSDave Kleikamp 	}
840ac27a0ecSDave Kleikamp }
841ac27a0ecSDave Kleikamp 
842617ba13bSMingming Cao static void ext4_put_super(struct super_block *sb)
843ac27a0ecSDave Kleikamp {
844617ba13bSMingming Cao 	struct ext4_sb_info *sbi = EXT4_SB(sb);
845617ba13bSMingming Cao 	struct ext4_super_block *es = sbi->s_es;
84697abd7d4STheodore Ts'o 	int aborted = 0;
847ef2cabf7SHidehiro Kawai 	int i, err;
848ac27a0ecSDave Kleikamp 
849857ac889SLukas Czerner 	ext4_unregister_li_request(sb);
850e0ccfd95SChristoph Hellwig 	dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
851e0ccfd95SChristoph Hellwig 
8522e8fa54eSJan Kara 	flush_workqueue(sbi->rsv_conversion_wq);
8532e8fa54eSJan Kara 	destroy_workqueue(sbi->rsv_conversion_wq);
8544c0425ffSMingming Cao 
8550390131bSFrank Mayhar 	if (sbi->s_journal) {
85697abd7d4STheodore Ts'o 		aborted = is_journal_aborted(sbi->s_journal);
857ef2cabf7SHidehiro Kawai 		err = jbd2_journal_destroy(sbi->s_journal);
85847b4a50bSJan Kara 		sbi->s_journal = NULL;
85997abd7d4STheodore Ts'o 		if ((err < 0) && !aborted)
860c67d859eSTheodore Ts'o 			ext4_abort(sb, "Couldn't clean up the journal");
8610390131bSFrank Mayhar 	}
862d4edac31SJosef Bacik 
863ebd173beSTheodore Ts'o 	ext4_unregister_sysfs(sb);
864d3922a77SZheng Liu 	ext4_es_unregister_shrinker(sbi);
8659105bb14SAl Viro 	del_timer_sync(&sbi->s_err_report);
866d4edac31SJosef Bacik 	ext4_release_system_zone(sb);
867d4edac31SJosef Bacik 	ext4_mb_release(sb);
868d4edac31SJosef Bacik 	ext4_ext_release(sb);
869d4edac31SJosef Bacik 
87097abd7d4STheodore Ts'o 	if (!(sb->s_flags & MS_RDONLY) && !aborted) {
871e2b911c5SDarrick J. Wong 		ext4_clear_feature_journal_needs_recovery(sb);
872ac27a0ecSDave Kleikamp 		es->s_state = cpu_to_le16(sbi->s_mount_state);
873ac27a0ecSDave Kleikamp 	}
87458c5873aSArtem Bityutskiy 	if (!(sb->s_flags & MS_RDONLY))
875a8e25a83SArtem Bityutskiy 		ext4_commit_super(sb, 1);
876a8e25a83SArtem Bityutskiy 
877ac27a0ecSDave Kleikamp 	for (i = 0; i < sbi->s_gdb_count; i++)
878ac27a0ecSDave Kleikamp 		brelse(sbi->s_group_desc[i]);
879b93b41d4SAl Viro 	kvfree(sbi->s_group_desc);
880b93b41d4SAl Viro 	kvfree(sbi->s_flex_groups);
88157042651STheodore Ts'o 	percpu_counter_destroy(&sbi->s_freeclusters_counter);
882ac27a0ecSDave Kleikamp 	percpu_counter_destroy(&sbi->s_freeinodes_counter);
883ac27a0ecSDave Kleikamp 	percpu_counter_destroy(&sbi->s_dirs_counter);
88457042651STheodore Ts'o 	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
885c8585c6fSDaeho Jeong 	percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
886ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA
887a2d4a646SJan Kara 	for (i = 0; i < EXT4_MAXQUOTAS; i++)
888ac27a0ecSDave Kleikamp 		kfree(sbi->s_qf_names[i]);
889ac27a0ecSDave Kleikamp #endif
890ac27a0ecSDave Kleikamp 
891ac27a0ecSDave Kleikamp 	/* Debugging code just in case the in-memory inode orphan list
892ac27a0ecSDave Kleikamp 	 * isn't empty.  The on-disk one can be non-empty if we've
893ac27a0ecSDave Kleikamp 	 * detected an error and taken the fs readonly, but the
894ac27a0ecSDave Kleikamp 	 * in-memory list had better be clean by this point. */
895ac27a0ecSDave Kleikamp 	if (!list_empty(&sbi->s_orphan))
896ac27a0ecSDave Kleikamp 		dump_orphan_list(sb, sbi);
897ac27a0ecSDave Kleikamp 	J_ASSERT(list_empty(&sbi->s_orphan));
898ac27a0ecSDave Kleikamp 
89989d96a6fSTheodore Ts'o 	sync_blockdev(sb->s_bdev);
900f98393a6SPeter Zijlstra 	invalidate_bdev(sb->s_bdev);
901ac27a0ecSDave Kleikamp 	if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
902ac27a0ecSDave Kleikamp 		/*
903ac27a0ecSDave Kleikamp 		 * Invalidate the journal device's buffers.  We don't want them
904ac27a0ecSDave Kleikamp 		 * floating about in memory - the physical journal device may
905ac27a0ecSDave Kleikamp 		 * hotswapped, and it breaks the `ro-after' testing code.
906ac27a0ecSDave Kleikamp 		 */
907ac27a0ecSDave Kleikamp 		sync_blockdev(sbi->journal_bdev);
908f98393a6SPeter Zijlstra 		invalidate_bdev(sbi->journal_bdev);
909617ba13bSMingming Cao 		ext4_blkdev_remove(sbi);
910ac27a0ecSDave Kleikamp 	}
9119c191f70ST Makphaibulchoke 	if (sbi->s_mb_cache) {
9129c191f70ST Makphaibulchoke 		ext4_xattr_destroy_cache(sbi->s_mb_cache);
9139c191f70ST Makphaibulchoke 		sbi->s_mb_cache = NULL;
9149c191f70ST Makphaibulchoke 	}
915c5e06d10SJohann Lombardi 	if (sbi->s_mmp_tsk)
916c5e06d10SJohann Lombardi 		kthread_stop(sbi->s_mmp_tsk);
9179060dd2cSEric Sandeen 	brelse(sbi->s_sbh);
918ac27a0ecSDave Kleikamp 	sb->s_fs_info = NULL;
9193197ebdbSTheodore Ts'o 	/*
9203197ebdbSTheodore Ts'o 	 * Now that we are completely done shutting down the
9213197ebdbSTheodore Ts'o 	 * superblock, we need to actually destroy the kobject.
9223197ebdbSTheodore Ts'o 	 */
9233197ebdbSTheodore Ts'o 	kobject_put(&sbi->s_kobj);
9243197ebdbSTheodore Ts'o 	wait_for_completion(&sbi->s_kobj_unregister);
9250441984aSDarrick J. Wong 	if (sbi->s_chksum_driver)
9260441984aSDarrick J. Wong 		crypto_free_shash(sbi->s_chksum_driver);
927705895b6SPekka Enberg 	kfree(sbi->s_blockgroup_lock);
928ac27a0ecSDave Kleikamp 	kfree(sbi);
929ac27a0ecSDave Kleikamp }
930ac27a0ecSDave Kleikamp 
931e18b890bSChristoph Lameter static struct kmem_cache *ext4_inode_cachep;
932ac27a0ecSDave Kleikamp 
933ac27a0ecSDave Kleikamp /*
934ac27a0ecSDave Kleikamp  * Called inside transaction, so use GFP_NOFS
935ac27a0ecSDave Kleikamp  */
936617ba13bSMingming Cao static struct inode *ext4_alloc_inode(struct super_block *sb)
937ac27a0ecSDave Kleikamp {
938617ba13bSMingming Cao 	struct ext4_inode_info *ei;
939ac27a0ecSDave Kleikamp 
940e6b4f8daSChristoph Lameter 	ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
941ac27a0ecSDave Kleikamp 	if (!ei)
942ac27a0ecSDave Kleikamp 		return NULL;
9430b8e58a1SAndreas Dilger 
944ac27a0ecSDave Kleikamp 	ei->vfs_inode.i_version = 1;
945202ee5dfSTheodore Ts'o 	spin_lock_init(&ei->i_raw_lock);
946c9de560dSAlex Tomas 	INIT_LIST_HEAD(&ei->i_prealloc_list);
947c9de560dSAlex Tomas 	spin_lock_init(&ei->i_prealloc_lock);
9489a26b661SZheng Liu 	ext4_es_init_tree(&ei->i_es_tree);
9499a26b661SZheng Liu 	rwlock_init(&ei->i_es_lock);
950edaa53caSZheng Liu 	INIT_LIST_HEAD(&ei->i_es_list);
951eb68d0e2SZheng Liu 	ei->i_es_all_nr = 0;
952edaa53caSZheng Liu 	ei->i_es_shk_nr = 0;
953dd475925SJan Kara 	ei->i_es_shrink_lblk = 0;
954d2a17637SMingming Cao 	ei->i_reserved_data_blocks = 0;
955d2a17637SMingming Cao 	ei->i_reserved_meta_blocks = 0;
956d2a17637SMingming Cao 	ei->i_allocated_meta_blocks = 0;
9579d0be502STheodore Ts'o 	ei->i_da_metadata_calc_len = 0;
9587e731bc9STheodore Ts'o 	ei->i_da_metadata_calc_last_lblock = 0;
959d2a17637SMingming Cao 	spin_lock_init(&(ei->i_block_reservation_lock));
960a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA
961a9e7f447SDmitry Monakhov 	ei->i_reserved_quota = 0;
96296c7e0d9SJan Kara 	memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
963a9e7f447SDmitry Monakhov #endif
9648aefcd55STheodore Ts'o 	ei->jinode = NULL;
9652e8fa54eSJan Kara 	INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
966744692dcSJiaying Zhang 	spin_lock_init(&ei->i_completed_io_lock);
967b436b9beSJan Kara 	ei->i_sync_tid = 0;
968b436b9beSJan Kara 	ei->i_datasync_tid = 0;
969e27f41e1SDmitry Monakhov 	atomic_set(&ei->i_unwritten, 0);
9702e8fa54eSJan Kara 	INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
971ac27a0ecSDave Kleikamp 	return &ei->vfs_inode;
972ac27a0ecSDave Kleikamp }
973ac27a0ecSDave Kleikamp 
9747ff9c073STheodore Ts'o static int ext4_drop_inode(struct inode *inode)
9757ff9c073STheodore Ts'o {
9767ff9c073STheodore Ts'o 	int drop = generic_drop_inode(inode);
9777ff9c073STheodore Ts'o 
9787ff9c073STheodore Ts'o 	trace_ext4_drop_inode(inode, drop);
9797ff9c073STheodore Ts'o 	return drop;
9807ff9c073STheodore Ts'o }
9817ff9c073STheodore Ts'o 
982fa0d7e3dSNick Piggin static void ext4_i_callback(struct rcu_head *head)
983fa0d7e3dSNick Piggin {
984fa0d7e3dSNick Piggin 	struct inode *inode = container_of(head, struct inode, i_rcu);
985fa0d7e3dSNick Piggin 	kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
986fa0d7e3dSNick Piggin }
987fa0d7e3dSNick Piggin 
988617ba13bSMingming Cao static void ext4_destroy_inode(struct inode *inode)
989ac27a0ecSDave Kleikamp {
9909f7dd93dSVasily Averin 	if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
991b31e1552SEric Sandeen 		ext4_msg(inode->i_sb, KERN_ERR,
992b31e1552SEric Sandeen 			 "Inode %lu (%p): orphan list check failed!",
993b31e1552SEric Sandeen 			 inode->i_ino, EXT4_I(inode));
9949f7dd93dSVasily Averin 		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
9959f7dd93dSVasily Averin 				EXT4_I(inode), sizeof(struct ext4_inode_info),
9969f7dd93dSVasily Averin 				true);
9979f7dd93dSVasily Averin 		dump_stack();
9989f7dd93dSVasily Averin 	}
999fa0d7e3dSNick Piggin 	call_rcu(&inode->i_rcu, ext4_i_callback);
1000ac27a0ecSDave Kleikamp }
1001ac27a0ecSDave Kleikamp 
100251cc5068SAlexey Dobriyan static void init_once(void *foo)
1003ac27a0ecSDave Kleikamp {
1004617ba13bSMingming Cao 	struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
1005ac27a0ecSDave Kleikamp 
1006ac27a0ecSDave Kleikamp 	INIT_LIST_HEAD(&ei->i_orphan);
1007ac27a0ecSDave Kleikamp 	init_rwsem(&ei->xattr_sem);
10080e855ac8SAneesh Kumar K.V 	init_rwsem(&ei->i_data_sem);
1009ea3d7209SJan Kara 	init_rwsem(&ei->i_mmap_sem);
1010ac27a0ecSDave Kleikamp 	inode_init_once(&ei->vfs_inode);
1011ac27a0ecSDave Kleikamp }
1012ac27a0ecSDave Kleikamp 
1013e67bc2b3SFabian Frederick static int __init init_inodecache(void)
1014ac27a0ecSDave Kleikamp {
1015617ba13bSMingming Cao 	ext4_inode_cachep = kmem_cache_create("ext4_inode_cache",
1016617ba13bSMingming Cao 					     sizeof(struct ext4_inode_info),
1017ac27a0ecSDave Kleikamp 					     0, (SLAB_RECLAIM_ACCOUNT|
10185d097056SVladimir Davydov 						SLAB_MEM_SPREAD|SLAB_ACCOUNT),
101920c2df83SPaul Mundt 					     init_once);
1020617ba13bSMingming Cao 	if (ext4_inode_cachep == NULL)
1021ac27a0ecSDave Kleikamp 		return -ENOMEM;
1022ac27a0ecSDave Kleikamp 	return 0;
1023ac27a0ecSDave Kleikamp }
1024ac27a0ecSDave Kleikamp 
1025ac27a0ecSDave Kleikamp static void destroy_inodecache(void)
1026ac27a0ecSDave Kleikamp {
10278c0a8537SKirill A. Shutemov 	/*
10288c0a8537SKirill A. Shutemov 	 * Make sure all delayed rcu free inodes are flushed before we
10298c0a8537SKirill A. Shutemov 	 * destroy cache.
10308c0a8537SKirill A. Shutemov 	 */
10318c0a8537SKirill A. Shutemov 	rcu_barrier();
1032617ba13bSMingming Cao 	kmem_cache_destroy(ext4_inode_cachep);
1033ac27a0ecSDave Kleikamp }
1034ac27a0ecSDave Kleikamp 
10350930fcc1SAl Viro void ext4_clear_inode(struct inode *inode)
1036ac27a0ecSDave Kleikamp {
10370930fcc1SAl Viro 	invalidate_inode_buffers(inode);
1038dbd5768fSJan Kara 	clear_inode(inode);
10399f754758SChristoph Hellwig 	dquot_drop(inode);
1040c2ea3fdeSTheodore Ts'o 	ext4_discard_preallocations(inode);
104151865fdaSZheng Liu 	ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
10428aefcd55STheodore Ts'o 	if (EXT4_I(inode)->jinode) {
10438aefcd55STheodore Ts'o 		jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
10448aefcd55STheodore Ts'o 					       EXT4_I(inode)->jinode);
10458aefcd55STheodore Ts'o 		jbd2_free_inode(EXT4_I(inode)->jinode);
10468aefcd55STheodore Ts'o 		EXT4_I(inode)->jinode = NULL;
10478aefcd55STheodore Ts'o 	}
1048b7236e21STheodore Ts'o #ifdef CONFIG_EXT4_FS_ENCRYPTION
1049a7550b30SJaegeuk Kim 	fscrypt_put_encryption_info(inode, NULL);
1050b7236e21STheodore Ts'o #endif
1051ac27a0ecSDave Kleikamp }
1052ac27a0ecSDave Kleikamp 
10531b961ac0SChristoph Hellwig static struct inode *ext4_nfs_get_inode(struct super_block *sb,
10541b961ac0SChristoph Hellwig 					u64 ino, u32 generation)
1055ac27a0ecSDave Kleikamp {
1056ac27a0ecSDave Kleikamp 	struct inode *inode;
1057ac27a0ecSDave Kleikamp 
1058617ba13bSMingming Cao 	if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
1059ac27a0ecSDave Kleikamp 		return ERR_PTR(-ESTALE);
1060617ba13bSMingming Cao 	if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
1061ac27a0ecSDave Kleikamp 		return ERR_PTR(-ESTALE);
1062ac27a0ecSDave Kleikamp 
1063ac27a0ecSDave Kleikamp 	/* iget isn't really right if the inode is currently unallocated!!
1064ac27a0ecSDave Kleikamp 	 *
1065617ba13bSMingming Cao 	 * ext4_read_inode will return a bad_inode if the inode had been
1066ac27a0ecSDave Kleikamp 	 * deleted, so we should be safe.
1067ac27a0ecSDave Kleikamp 	 *
1068ac27a0ecSDave Kleikamp 	 * Currently we don't know the generation for parent directory, so
1069ac27a0ecSDave Kleikamp 	 * a generation of 0 means "accept any"
1070ac27a0ecSDave Kleikamp 	 */
1071f4bb2981STheodore Ts'o 	inode = ext4_iget_normal(sb, ino);
10721d1fe1eeSDavid Howells 	if (IS_ERR(inode))
10731d1fe1eeSDavid Howells 		return ERR_CAST(inode);
10741d1fe1eeSDavid Howells 	if (generation && inode->i_generation != generation) {
1075ac27a0ecSDave Kleikamp 		iput(inode);
1076ac27a0ecSDave Kleikamp 		return ERR_PTR(-ESTALE);
1077ac27a0ecSDave Kleikamp 	}
10781b961ac0SChristoph Hellwig 
10791b961ac0SChristoph Hellwig 	return inode;
1080ac27a0ecSDave Kleikamp }
10811b961ac0SChristoph Hellwig 
10821b961ac0SChristoph Hellwig static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
10831b961ac0SChristoph Hellwig 					int fh_len, int fh_type)
10841b961ac0SChristoph Hellwig {
10851b961ac0SChristoph Hellwig 	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
10861b961ac0SChristoph Hellwig 				    ext4_nfs_get_inode);
10871b961ac0SChristoph Hellwig }
10881b961ac0SChristoph Hellwig 
10891b961ac0SChristoph Hellwig static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
10901b961ac0SChristoph Hellwig 					int fh_len, int fh_type)
10911b961ac0SChristoph Hellwig {
10921b961ac0SChristoph Hellwig 	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
10931b961ac0SChristoph Hellwig 				    ext4_nfs_get_inode);
1094ac27a0ecSDave Kleikamp }
1095ac27a0ecSDave Kleikamp 
1096c39a7f84SToshiyuki Okajima /*
1097c39a7f84SToshiyuki Okajima  * Try to release metadata pages (indirect blocks, directories) which are
1098c39a7f84SToshiyuki Okajima  * mapped via the block device.  Since these pages could have journal heads
1099c39a7f84SToshiyuki Okajima  * which would prevent try_to_free_buffers() from freeing them, we must use
1100c39a7f84SToshiyuki Okajima  * jbd2 layer's try_to_free_buffers() function to release them.
1101c39a7f84SToshiyuki Okajima  */
11020b8e58a1SAndreas Dilger static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
11030b8e58a1SAndreas Dilger 				 gfp_t wait)
1104c39a7f84SToshiyuki Okajima {
1105c39a7f84SToshiyuki Okajima 	journal_t *journal = EXT4_SB(sb)->s_journal;
1106c39a7f84SToshiyuki Okajima 
1107c39a7f84SToshiyuki Okajima 	WARN_ON(PageChecked(page));
1108c39a7f84SToshiyuki Okajima 	if (!page_has_buffers(page))
1109c39a7f84SToshiyuki Okajima 		return 0;
1110c39a7f84SToshiyuki Okajima 	if (journal)
1111c39a7f84SToshiyuki Okajima 		return jbd2_journal_try_to_free_buffers(journal, page,
1112d0164adcSMel Gorman 						wait & ~__GFP_DIRECT_RECLAIM);
1113c39a7f84SToshiyuki Okajima 	return try_to_free_buffers(page);
1114c39a7f84SToshiyuki Okajima }
1115c39a7f84SToshiyuki Okajima 
1116a7550b30SJaegeuk Kim #ifdef CONFIG_EXT4_FS_ENCRYPTION
1117a7550b30SJaegeuk Kim static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
1118a7550b30SJaegeuk Kim {
1119a7550b30SJaegeuk Kim 	return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
1120a7550b30SJaegeuk Kim 				 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
1121a7550b30SJaegeuk Kim }
1122a7550b30SJaegeuk Kim 
1123a7550b30SJaegeuk Kim static int ext4_prepare_context(struct inode *inode)
1124a7550b30SJaegeuk Kim {
1125a7550b30SJaegeuk Kim 	return ext4_convert_inline_data(inode);
1126a7550b30SJaegeuk Kim }
1127a7550b30SJaegeuk Kim 
1128a7550b30SJaegeuk Kim static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
1129a7550b30SJaegeuk Kim 							void *fs_data)
1130a7550b30SJaegeuk Kim {
11312f8f5e76SEric Biggers 	handle_t *handle = fs_data;
11322f8f5e76SEric Biggers 	int res, res2, retries = 0;
1133a7550b30SJaegeuk Kim 
11342f8f5e76SEric Biggers 	/*
11352f8f5e76SEric Biggers 	 * If a journal handle was specified, then the encryption context is
11362f8f5e76SEric Biggers 	 * being set on a new inode via inheritance and is part of a larger
11372f8f5e76SEric Biggers 	 * transaction to create the inode.  Otherwise the encryption context is
11382f8f5e76SEric Biggers 	 * being set on an existing inode in its own transaction.  Only in the
11392f8f5e76SEric Biggers 	 * latter case should the "retry on ENOSPC" logic be used.
11402f8f5e76SEric Biggers 	 */
11412f8f5e76SEric Biggers 
11422f8f5e76SEric Biggers 	if (handle) {
11432f8f5e76SEric Biggers 		res = ext4_xattr_set_handle(handle, inode,
11442f8f5e76SEric Biggers 					    EXT4_XATTR_INDEX_ENCRYPTION,
11452f8f5e76SEric Biggers 					    EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
11462f8f5e76SEric Biggers 					    ctx, len, 0);
1147a7550b30SJaegeuk Kim 		if (!res) {
1148a7550b30SJaegeuk Kim 			ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
1149a7550b30SJaegeuk Kim 			ext4_clear_inode_state(inode,
1150a7550b30SJaegeuk Kim 					EXT4_STATE_MAY_INLINE_DATA);
1151a3caa24bSJan Kara 			/*
1152a3caa24bSJan Kara 			 * Update inode->i_flags - e.g. S_DAX may get disabled
1153a3caa24bSJan Kara 			 */
1154a3caa24bSJan Kara 			ext4_set_inode_flags(inode);
1155a7550b30SJaegeuk Kim 		}
1156a7550b30SJaegeuk Kim 		return res;
1157a7550b30SJaegeuk Kim 	}
1158a7550b30SJaegeuk Kim 
11592f8f5e76SEric Biggers retry:
1160a7550b30SJaegeuk Kim 	handle = ext4_journal_start(inode, EXT4_HT_MISC,
1161a7550b30SJaegeuk Kim 			ext4_jbd2_credits_xattr(inode));
1162a7550b30SJaegeuk Kim 	if (IS_ERR(handle))
1163a7550b30SJaegeuk Kim 		return PTR_ERR(handle);
1164a7550b30SJaegeuk Kim 
11652f8f5e76SEric Biggers 	res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
11662f8f5e76SEric Biggers 				    EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
11672f8f5e76SEric Biggers 				    ctx, len, 0);
1168a7550b30SJaegeuk Kim 	if (!res) {
1169a7550b30SJaegeuk Kim 		ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
1170a3caa24bSJan Kara 		/* Update inode->i_flags - e.g. S_DAX may get disabled */
1171a3caa24bSJan Kara 		ext4_set_inode_flags(inode);
1172a7550b30SJaegeuk Kim 		res = ext4_mark_inode_dirty(handle, inode);
1173a7550b30SJaegeuk Kim 		if (res)
1174a7550b30SJaegeuk Kim 			EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
1175a7550b30SJaegeuk Kim 	}
1176a7550b30SJaegeuk Kim 	res2 = ext4_journal_stop(handle);
11772f8f5e76SEric Biggers 
11782f8f5e76SEric Biggers 	if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
11792f8f5e76SEric Biggers 		goto retry;
1180a7550b30SJaegeuk Kim 	if (!res)
1181a7550b30SJaegeuk Kim 		res = res2;
1182a7550b30SJaegeuk Kim 	return res;
1183a7550b30SJaegeuk Kim }
1184a7550b30SJaegeuk Kim 
1185a7550b30SJaegeuk Kim static int ext4_dummy_context(struct inode *inode)
1186a7550b30SJaegeuk Kim {
1187a7550b30SJaegeuk Kim 	return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
1188a7550b30SJaegeuk Kim }
1189a7550b30SJaegeuk Kim 
1190a7550b30SJaegeuk Kim static unsigned ext4_max_namelen(struct inode *inode)
1191a7550b30SJaegeuk Kim {
1192a7550b30SJaegeuk Kim 	return S_ISLNK(inode->i_mode) ? inode->i_sb->s_blocksize :
1193a7550b30SJaegeuk Kim 		EXT4_NAME_LEN;
1194a7550b30SJaegeuk Kim }
1195a7550b30SJaegeuk Kim 
1196a7550b30SJaegeuk Kim static struct fscrypt_operations ext4_cryptops = {
1197a5d431efSEric Biggers 	.key_prefix		= "ext4:",
1198a7550b30SJaegeuk Kim 	.get_context		= ext4_get_context,
1199a7550b30SJaegeuk Kim 	.prepare_context	= ext4_prepare_context,
1200a7550b30SJaegeuk Kim 	.set_context		= ext4_set_context,
1201a7550b30SJaegeuk Kim 	.dummy_context		= ext4_dummy_context,
1202a7550b30SJaegeuk Kim 	.is_encrypted		= ext4_encrypted_inode,
1203a7550b30SJaegeuk Kim 	.empty_dir		= ext4_empty_dir,
1204a7550b30SJaegeuk Kim 	.max_namelen		= ext4_max_namelen,
1205a7550b30SJaegeuk Kim };
1206a7550b30SJaegeuk Kim #else
1207a7550b30SJaegeuk Kim static struct fscrypt_operations ext4_cryptops = {
1208a7550b30SJaegeuk Kim 	.is_encrypted		= ext4_encrypted_inode,
1209a7550b30SJaegeuk Kim };
1210a7550b30SJaegeuk Kim #endif
1211a7550b30SJaegeuk Kim 
1212ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA
1213689c958cSLi Xi static char *quotatypes[] = INITQFNAMES;
1214689c958cSLi Xi #define QTYPE2NAME(t) (quotatypes[t])
1215ac27a0ecSDave Kleikamp 
1216617ba13bSMingming Cao static int ext4_write_dquot(struct dquot *dquot);
1217617ba13bSMingming Cao static int ext4_acquire_dquot(struct dquot *dquot);
1218617ba13bSMingming Cao static int ext4_release_dquot(struct dquot *dquot);
1219617ba13bSMingming Cao static int ext4_mark_dquot_dirty(struct dquot *dquot);
1220617ba13bSMingming Cao static int ext4_write_info(struct super_block *sb, int type);
12216f28e087SJan Kara static int ext4_quota_on(struct super_block *sb, int type, int format_id,
12228c54ca9cSAl Viro 			 const struct path *path);
1223ca0e05e4SDmitry Monakhov static int ext4_quota_off(struct super_block *sb, int type);
1224617ba13bSMingming Cao static int ext4_quota_on_mount(struct super_block *sb, int type);
1225617ba13bSMingming Cao static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1226ac27a0ecSDave Kleikamp 			       size_t len, loff_t off);
1227617ba13bSMingming Cao static ssize_t ext4_quota_write(struct super_block *sb, int type,
1228ac27a0ecSDave Kleikamp 				const char *data, size_t len, loff_t off);
12297c319d32SAditya Kali static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
12307c319d32SAditya Kali 			     unsigned int flags);
12317c319d32SAditya Kali static int ext4_enable_quotas(struct super_block *sb);
12328f0e8746STheodore Ts'o static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
1233ac27a0ecSDave Kleikamp 
123496c7e0d9SJan Kara static struct dquot **ext4_get_dquots(struct inode *inode)
123596c7e0d9SJan Kara {
123696c7e0d9SJan Kara 	return EXT4_I(inode)->i_dquot;
123796c7e0d9SJan Kara }
123896c7e0d9SJan Kara 
123961e225dcSAlexey Dobriyan static const struct dquot_operations ext4_quota_operations = {
124060e58e0fSMingming Cao 	.get_reserved_space = ext4_get_reserved_space,
1241617ba13bSMingming Cao 	.write_dquot	= ext4_write_dquot,
1242617ba13bSMingming Cao 	.acquire_dquot	= ext4_acquire_dquot,
1243617ba13bSMingming Cao 	.release_dquot	= ext4_release_dquot,
1244617ba13bSMingming Cao 	.mark_dirty	= ext4_mark_dquot_dirty,
1245a5b5ee32SJan Kara 	.write_info	= ext4_write_info,
1246a5b5ee32SJan Kara 	.alloc_dquot	= dquot_alloc,
1247a5b5ee32SJan Kara 	.destroy_dquot	= dquot_destroy,
1248040cb378SLi Xi 	.get_projid	= ext4_get_projid,
12498f0e8746STheodore Ts'o 	.get_next_id	= ext4_get_next_id,
1250ac27a0ecSDave Kleikamp };
1251ac27a0ecSDave Kleikamp 
12520d54b217SAlexey Dobriyan static const struct quotactl_ops ext4_qctl_operations = {
1253617ba13bSMingming Cao 	.quota_on	= ext4_quota_on,
1254ca0e05e4SDmitry Monakhov 	.quota_off	= ext4_quota_off,
1255287a8095SChristoph Hellwig 	.quota_sync	= dquot_quota_sync,
12560a240339SJan Kara 	.get_state	= dquot_get_state,
1257287a8095SChristoph Hellwig 	.set_info	= dquot_set_dqinfo,
1258287a8095SChristoph Hellwig 	.get_dqblk	= dquot_get_dqblk,
12596332b9b5SEric Sandeen 	.set_dqblk	= dquot_set_dqblk,
12606332b9b5SEric Sandeen 	.get_nextdqblk	= dquot_get_next_dqblk,
1261ac27a0ecSDave Kleikamp };
1262ac27a0ecSDave Kleikamp #endif
1263ac27a0ecSDave Kleikamp 
1264ee9b6d61SJosef 'Jeff' Sipek static const struct super_operations ext4_sops = {
1265617ba13bSMingming Cao 	.alloc_inode	= ext4_alloc_inode,
1266617ba13bSMingming Cao 	.destroy_inode	= ext4_destroy_inode,
1267617ba13bSMingming Cao 	.write_inode	= ext4_write_inode,
1268617ba13bSMingming Cao 	.dirty_inode	= ext4_dirty_inode,
12697ff9c073STheodore Ts'o 	.drop_inode	= ext4_drop_inode,
12700930fcc1SAl Viro 	.evict_inode	= ext4_evict_inode,
1271617ba13bSMingming Cao 	.put_super	= ext4_put_super,
1272617ba13bSMingming Cao 	.sync_fs	= ext4_sync_fs,
1273c4be0c1dSTakashi Sato 	.freeze_fs	= ext4_freeze,
1274c4be0c1dSTakashi Sato 	.unfreeze_fs	= ext4_unfreeze,
1275617ba13bSMingming Cao 	.statfs		= ext4_statfs,
1276617ba13bSMingming Cao 	.remount_fs	= ext4_remount,
1277617ba13bSMingming Cao 	.show_options	= ext4_show_options,
1278ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA
1279617ba13bSMingming Cao 	.quota_read	= ext4_quota_read,
1280617ba13bSMingming Cao 	.quota_write	= ext4_quota_write,
128196c7e0d9SJan Kara 	.get_dquots	= ext4_get_dquots,
1282ac27a0ecSDave Kleikamp #endif
1283c39a7f84SToshiyuki Okajima 	.bdev_try_to_free_page = bdev_try_to_free_page,
1284ac27a0ecSDave Kleikamp };
1285ac27a0ecSDave Kleikamp 
128639655164SChristoph Hellwig static const struct export_operations ext4_export_ops = {
12871b961ac0SChristoph Hellwig 	.fh_to_dentry = ext4_fh_to_dentry,
12881b961ac0SChristoph Hellwig 	.fh_to_parent = ext4_fh_to_parent,
1289617ba13bSMingming Cao 	.get_parent = ext4_get_parent,
1290ac27a0ecSDave Kleikamp };
1291ac27a0ecSDave Kleikamp 
1292ac27a0ecSDave Kleikamp enum {
1293ac27a0ecSDave Kleikamp 	Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
1294ac27a0ecSDave Kleikamp 	Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
129572578c33STheodore Ts'o 	Opt_nouid32, Opt_debug, Opt_removed,
1296ac27a0ecSDave Kleikamp 	Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
129772578c33STheodore Ts'o 	Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
1298ad4eec61SEric Sandeen 	Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
1299ad4eec61SEric Sandeen 	Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
1300ac27a0ecSDave Kleikamp 	Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
13016ddb2447STheodore Ts'o 	Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
1302ac27a0ecSDave Kleikamp 	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
13035a20bdfcSJan Kara 	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
1304ee4a3fcdSTheodore Ts'o 	Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
130549da9392SJan Kara 	Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
13061449032bSTheodore Ts'o 	Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
1307670e9875STheodore Ts'o 	Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
13081449032bSTheodore Ts'o 	Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
13095328e635SEric Sandeen 	Opt_inode_readahead_blks, Opt_journal_ioprio,
1310744692dcSJiaying Zhang 	Opt_dioread_nolock, Opt_dioread_lock,
1311fc6cb1cdSTheodore Ts'o 	Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
1312c6d3d56dSDarrick J. Wong 	Opt_max_dir_size_kb, Opt_nojournal_checksum,
1313ac27a0ecSDave Kleikamp };
1314ac27a0ecSDave Kleikamp 
1315a447c093SSteven Whitehouse static const match_table_t tokens = {
1316ac27a0ecSDave Kleikamp 	{Opt_bsd_df, "bsddf"},
1317ac27a0ecSDave Kleikamp 	{Opt_minix_df, "minixdf"},
1318ac27a0ecSDave Kleikamp 	{Opt_grpid, "grpid"},
1319ac27a0ecSDave Kleikamp 	{Opt_grpid, "bsdgroups"},
1320ac27a0ecSDave Kleikamp 	{Opt_nogrpid, "nogrpid"},
1321ac27a0ecSDave Kleikamp 	{Opt_nogrpid, "sysvgroups"},
1322ac27a0ecSDave Kleikamp 	{Opt_resgid, "resgid=%u"},
1323ac27a0ecSDave Kleikamp 	{Opt_resuid, "resuid=%u"},
1324ac27a0ecSDave Kleikamp 	{Opt_sb, "sb=%u"},
1325ac27a0ecSDave Kleikamp 	{Opt_err_cont, "errors=continue"},
1326ac27a0ecSDave Kleikamp 	{Opt_err_panic, "errors=panic"},
1327ac27a0ecSDave Kleikamp 	{Opt_err_ro, "errors=remount-ro"},
1328ac27a0ecSDave Kleikamp 	{Opt_nouid32, "nouid32"},
1329ac27a0ecSDave Kleikamp 	{Opt_debug, "debug"},
133072578c33STheodore Ts'o 	{Opt_removed, "oldalloc"},
133172578c33STheodore Ts'o 	{Opt_removed, "orlov"},
1332ac27a0ecSDave Kleikamp 	{Opt_user_xattr, "user_xattr"},
1333ac27a0ecSDave Kleikamp 	{Opt_nouser_xattr, "nouser_xattr"},
1334ac27a0ecSDave Kleikamp 	{Opt_acl, "acl"},
1335ac27a0ecSDave Kleikamp 	{Opt_noacl, "noacl"},
1336e3bb52aeSEric Sandeen 	{Opt_noload, "norecovery"},
13375a916be1STheodore Ts'o 	{Opt_noload, "noload"},
133872578c33STheodore Ts'o 	{Opt_removed, "nobh"},
133972578c33STheodore Ts'o 	{Opt_removed, "bh"},
1340ac27a0ecSDave Kleikamp 	{Opt_commit, "commit=%u"},
134130773840STheodore Ts'o 	{Opt_min_batch_time, "min_batch_time=%u"},
134230773840STheodore Ts'o 	{Opt_max_batch_time, "max_batch_time=%u"},
1343ac27a0ecSDave Kleikamp 	{Opt_journal_dev, "journal_dev=%u"},
1344ad4eec61SEric Sandeen 	{Opt_journal_path, "journal_path=%s"},
1345818d276cSGirish Shilamkar 	{Opt_journal_checksum, "journal_checksum"},
1346c6d3d56dSDarrick J. Wong 	{Opt_nojournal_checksum, "nojournal_checksum"},
1347818d276cSGirish Shilamkar 	{Opt_journal_async_commit, "journal_async_commit"},
1348ac27a0ecSDave Kleikamp 	{Opt_abort, "abort"},
1349ac27a0ecSDave Kleikamp 	{Opt_data_journal, "data=journal"},
1350ac27a0ecSDave Kleikamp 	{Opt_data_ordered, "data=ordered"},
1351ac27a0ecSDave Kleikamp 	{Opt_data_writeback, "data=writeback"},
13525bf5683aSHidehiro Kawai 	{Opt_data_err_abort, "data_err=abort"},
13535bf5683aSHidehiro Kawai 	{Opt_data_err_ignore, "data_err=ignore"},
1354ac27a0ecSDave Kleikamp 	{Opt_offusrjquota, "usrjquota="},
1355ac27a0ecSDave Kleikamp 	{Opt_usrjquota, "usrjquota=%s"},
1356ac27a0ecSDave Kleikamp 	{Opt_offgrpjquota, "grpjquota="},
1357ac27a0ecSDave Kleikamp 	{Opt_grpjquota, "grpjquota=%s"},
1358ac27a0ecSDave Kleikamp 	{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
1359ac27a0ecSDave Kleikamp 	{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
13605a20bdfcSJan Kara 	{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
1361ac27a0ecSDave Kleikamp 	{Opt_grpquota, "grpquota"},
1362ac27a0ecSDave Kleikamp 	{Opt_noquota, "noquota"},
1363ac27a0ecSDave Kleikamp 	{Opt_quota, "quota"},
1364ac27a0ecSDave Kleikamp 	{Opt_usrquota, "usrquota"},
136549da9392SJan Kara 	{Opt_prjquota, "prjquota"},
1366ac27a0ecSDave Kleikamp 	{Opt_barrier, "barrier=%u"},
136706705bffSTheodore Ts'o 	{Opt_barrier, "barrier"},
136806705bffSTheodore Ts'o 	{Opt_nobarrier, "nobarrier"},
136925ec56b5SJean Noel Cordenner 	{Opt_i_version, "i_version"},
1370923ae0ffSRoss Zwisler 	{Opt_dax, "dax"},
1371c9de560dSAlex Tomas 	{Opt_stripe, "stripe=%u"},
137264769240SAlex Tomas 	{Opt_delalloc, "delalloc"},
1373a26f4992STheodore Ts'o 	{Opt_lazytime, "lazytime"},
1374a26f4992STheodore Ts'o 	{Opt_nolazytime, "nolazytime"},
1375670e9875STheodore Ts'o 	{Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
1376dd919b98SAneesh Kumar K.V 	{Opt_nodelalloc, "nodelalloc"},
137736ade451SJan Kara 	{Opt_removed, "mblk_io_submit"},
137836ade451SJan Kara 	{Opt_removed, "nomblk_io_submit"},
13796fd058f7STheodore Ts'o 	{Opt_block_validity, "block_validity"},
13806fd058f7STheodore Ts'o 	{Opt_noblock_validity, "noblock_validity"},
1381240799cdSTheodore Ts'o 	{Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
1382b3881f74STheodore Ts'o 	{Opt_journal_ioprio, "journal_ioprio=%u"},
1383afd4672dSTheodore Ts'o 	{Opt_auto_da_alloc, "auto_da_alloc=%u"},
138406705bffSTheodore Ts'o 	{Opt_auto_da_alloc, "auto_da_alloc"},
138506705bffSTheodore Ts'o 	{Opt_noauto_da_alloc, "noauto_da_alloc"},
1386744692dcSJiaying Zhang 	{Opt_dioread_nolock, "dioread_nolock"},
1387744692dcSJiaying Zhang 	{Opt_dioread_lock, "dioread_lock"},
13885328e635SEric Sandeen 	{Opt_discard, "discard"},
13895328e635SEric Sandeen 	{Opt_nodiscard, "nodiscard"},
1390fc6cb1cdSTheodore Ts'o 	{Opt_init_itable, "init_itable=%u"},
1391fc6cb1cdSTheodore Ts'o 	{Opt_init_itable, "init_itable"},
1392fc6cb1cdSTheodore Ts'o 	{Opt_noinit_itable, "noinit_itable"},
1393df981d03STheodore Ts'o 	{Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
13946ddb2447STheodore Ts'o 	{Opt_test_dummy_encryption, "test_dummy_encryption"},
1395c7198b9cSTheodore Ts'o 	{Opt_removed, "check=none"},	/* mount option from ext2/3 */
1396c7198b9cSTheodore Ts'o 	{Opt_removed, "nocheck"},	/* mount option from ext2/3 */
1397c7198b9cSTheodore Ts'o 	{Opt_removed, "reservation"},	/* mount option from ext2/3 */
1398c7198b9cSTheodore Ts'o 	{Opt_removed, "noreservation"}, /* mount option from ext2/3 */
1399c7198b9cSTheodore Ts'o 	{Opt_removed, "journal=%u"},	/* mount option from ext2/3 */
1400f3f12faaSJosef Bacik 	{Opt_err, NULL},
1401ac27a0ecSDave Kleikamp };
1402ac27a0ecSDave Kleikamp 
1403617ba13bSMingming Cao static ext4_fsblk_t get_sb_block(void **data)
1404ac27a0ecSDave Kleikamp {
1405617ba13bSMingming Cao 	ext4_fsblk_t	sb_block;
1406ac27a0ecSDave Kleikamp 	char		*options = (char *) *data;
1407ac27a0ecSDave Kleikamp 
1408ac27a0ecSDave Kleikamp 	if (!options || strncmp(options, "sb=", 3) != 0)
1409ac27a0ecSDave Kleikamp 		return 1;	/* Default location */
14100b8e58a1SAndreas Dilger 
1411ac27a0ecSDave Kleikamp 	options += 3;
14120b8e58a1SAndreas Dilger 	/* TODO: use simple_strtoll with >32bit ext4 */
1413ac27a0ecSDave Kleikamp 	sb_block = simple_strtoul(options, &options, 0);
1414ac27a0ecSDave Kleikamp 	if (*options && *options != ',') {
14154776004fSTheodore Ts'o 		printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
1416ac27a0ecSDave Kleikamp 		       (char *) *data);
1417ac27a0ecSDave Kleikamp 		return 1;
1418ac27a0ecSDave Kleikamp 	}
1419ac27a0ecSDave Kleikamp 	if (*options == ',')
1420ac27a0ecSDave Kleikamp 		options++;
1421ac27a0ecSDave Kleikamp 	*data = (void *) options;
14220b8e58a1SAndreas Dilger 
1423ac27a0ecSDave Kleikamp 	return sb_block;
1424ac27a0ecSDave Kleikamp }
1425ac27a0ecSDave Kleikamp 
1426b3881f74STheodore Ts'o #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
1427437ca0fdSDmitry Monakhov static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
1428437ca0fdSDmitry Monakhov 	"Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
1429b3881f74STheodore Ts'o 
143056c50f11SDmitry Monakhov #ifdef CONFIG_QUOTA
143156c50f11SDmitry Monakhov static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
143256c50f11SDmitry Monakhov {
143356c50f11SDmitry Monakhov 	struct ext4_sb_info *sbi = EXT4_SB(sb);
143456c50f11SDmitry Monakhov 	char *qname;
143503dafb5fSChen Gang 	int ret = -1;
143656c50f11SDmitry Monakhov 
143756c50f11SDmitry Monakhov 	if (sb_any_quota_loaded(sb) &&
143856c50f11SDmitry Monakhov 		!sbi->s_qf_names[qtype]) {
143956c50f11SDmitry Monakhov 		ext4_msg(sb, KERN_ERR,
144056c50f11SDmitry Monakhov 			"Cannot change journaled "
144156c50f11SDmitry Monakhov 			"quota options when quota turned on");
144257f73c2cSTheodore Ts'o 		return -1;
144356c50f11SDmitry Monakhov 	}
1444e2b911c5SDarrick J. Wong 	if (ext4_has_feature_quota(sb)) {
1445c325a67cSTheodore Ts'o 		ext4_msg(sb, KERN_INFO, "Journaled quota options "
1446c325a67cSTheodore Ts'o 			 "ignored when QUOTA feature is enabled");
1447c325a67cSTheodore Ts'o 		return 1;
1448262b4662SJan Kara 	}
144956c50f11SDmitry Monakhov 	qname = match_strdup(args);
145056c50f11SDmitry Monakhov 	if (!qname) {
145156c50f11SDmitry Monakhov 		ext4_msg(sb, KERN_ERR,
145256c50f11SDmitry Monakhov 			"Not enough memory for storing quotafile name");
145357f73c2cSTheodore Ts'o 		return -1;
145456c50f11SDmitry Monakhov 	}
145503dafb5fSChen Gang 	if (sbi->s_qf_names[qtype]) {
145603dafb5fSChen Gang 		if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
145703dafb5fSChen Gang 			ret = 1;
145803dafb5fSChen Gang 		else
145956c50f11SDmitry Monakhov 			ext4_msg(sb, KERN_ERR,
146003dafb5fSChen Gang 				 "%s quota file already specified",
146103dafb5fSChen Gang 				 QTYPE2NAME(qtype));
146203dafb5fSChen Gang 		goto errout;
146356c50f11SDmitry Monakhov 	}
146403dafb5fSChen Gang 	if (strchr(qname, '/')) {
146556c50f11SDmitry Monakhov 		ext4_msg(sb, KERN_ERR,
146656c50f11SDmitry Monakhov 			"quotafile must be on filesystem root");
146703dafb5fSChen Gang 		goto errout;
146856c50f11SDmitry Monakhov 	}
146903dafb5fSChen Gang 	sbi->s_qf_names[qtype] = qname;
1470fd8c37ecSTheodore Ts'o 	set_opt(sb, QUOTA);
147156c50f11SDmitry Monakhov 	return 1;
147203dafb5fSChen Gang errout:
147303dafb5fSChen Gang 	kfree(qname);
147403dafb5fSChen Gang 	return ret;
147556c50f11SDmitry Monakhov }
147656c50f11SDmitry Monakhov 
147756c50f11SDmitry Monakhov static int clear_qf_name(struct super_block *sb, int qtype)
147856c50f11SDmitry Monakhov {
147956c50f11SDmitry Monakhov 
148056c50f11SDmitry Monakhov 	struct ext4_sb_info *sbi = EXT4_SB(sb);
148156c50f11SDmitry Monakhov 
148256c50f11SDmitry Monakhov 	if (sb_any_quota_loaded(sb) &&
148356c50f11SDmitry Monakhov 		sbi->s_qf_names[qtype]) {
148456c50f11SDmitry Monakhov 		ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
148556c50f11SDmitry Monakhov 			" when quota turned on");
148657f73c2cSTheodore Ts'o 		return -1;
148756c50f11SDmitry Monakhov 	}
148803dafb5fSChen Gang 	kfree(sbi->s_qf_names[qtype]);
148956c50f11SDmitry Monakhov 	sbi->s_qf_names[qtype] = NULL;
149056c50f11SDmitry Monakhov 	return 1;
149156c50f11SDmitry Monakhov }
149256c50f11SDmitry Monakhov #endif
149356c50f11SDmitry Monakhov 
149426092bf5STheodore Ts'o #define MOPT_SET	0x0001
149526092bf5STheodore Ts'o #define MOPT_CLEAR	0x0002
149626092bf5STheodore Ts'o #define MOPT_NOSUPPORT	0x0004
149726092bf5STheodore Ts'o #define MOPT_EXPLICIT	0x0008
149826092bf5STheodore Ts'o #define MOPT_CLEAR_ERR	0x0010
149926092bf5STheodore Ts'o #define MOPT_GTE0	0x0020
150026092bf5STheodore Ts'o #ifdef CONFIG_QUOTA
150126092bf5STheodore Ts'o #define MOPT_Q		0
150226092bf5STheodore Ts'o #define MOPT_QFMT	0x0040
150326092bf5STheodore Ts'o #else
150426092bf5STheodore Ts'o #define MOPT_Q		MOPT_NOSUPPORT
150526092bf5STheodore Ts'o #define MOPT_QFMT	MOPT_NOSUPPORT
150626092bf5STheodore Ts'o #endif
150726092bf5STheodore Ts'o #define MOPT_DATAJ	0x0080
15088dc0aa8cSTheodore Ts'o #define MOPT_NO_EXT2	0x0100
15098dc0aa8cSTheodore Ts'o #define MOPT_NO_EXT3	0x0200
15108dc0aa8cSTheodore Ts'o #define MOPT_EXT4_ONLY	(MOPT_NO_EXT2 | MOPT_NO_EXT3)
1511ad4eec61SEric Sandeen #define MOPT_STRING	0x0400
151226092bf5STheodore Ts'o 
151326092bf5STheodore Ts'o static const struct mount_opts {
151426092bf5STheodore Ts'o 	int	token;
151526092bf5STheodore Ts'o 	int	mount_opt;
151626092bf5STheodore Ts'o 	int	flags;
151726092bf5STheodore Ts'o } ext4_mount_opts[] = {
151826092bf5STheodore Ts'o 	{Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET},
151926092bf5STheodore Ts'o 	{Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR},
152026092bf5STheodore Ts'o 	{Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET},
152126092bf5STheodore Ts'o 	{Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR},
152226092bf5STheodore Ts'o 	{Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET},
152326092bf5STheodore Ts'o 	{Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR},
15248dc0aa8cSTheodore Ts'o 	{Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK,
15258dc0aa8cSTheodore Ts'o 	 MOPT_EXT4_ONLY | MOPT_SET},
15268dc0aa8cSTheodore Ts'o 	{Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK,
15278dc0aa8cSTheodore Ts'o 	 MOPT_EXT4_ONLY | MOPT_CLEAR},
152826092bf5STheodore Ts'o 	{Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET},
152926092bf5STheodore Ts'o 	{Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR},
15308dc0aa8cSTheodore Ts'o 	{Opt_delalloc, EXT4_MOUNT_DELALLOC,
15318dc0aa8cSTheodore Ts'o 	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
15328dc0aa8cSTheodore Ts'o 	{Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
153359d9fa5cSTheodore Ts'o 	 MOPT_EXT4_ONLY | MOPT_CLEAR},
1534c6d3d56dSDarrick J. Wong 	{Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1535c6d3d56dSDarrick J. Wong 	 MOPT_EXT4_ONLY | MOPT_CLEAR},
15368dc0aa8cSTheodore Ts'o 	{Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
15371e381f60SDmitry Monakhov 	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
153826092bf5STheodore Ts'o 	{Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
15398dc0aa8cSTheodore Ts'o 				    EXT4_MOUNT_JOURNAL_CHECKSUM),
15401e381f60SDmitry Monakhov 	 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
15418dc0aa8cSTheodore Ts'o 	{Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET},
154226092bf5STheodore Ts'o 	{Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR},
154326092bf5STheodore Ts'o 	{Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR},
154426092bf5STheodore Ts'o 	{Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR},
15458dc0aa8cSTheodore Ts'o 	{Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT,
15467915a861SAles Novak 	 MOPT_NO_EXT2},
15478dc0aa8cSTheodore Ts'o 	{Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT,
15487915a861SAles Novak 	 MOPT_NO_EXT2},
154926092bf5STheodore Ts'o 	{Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET},
155026092bf5STheodore Ts'o 	{Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR},
155126092bf5STheodore Ts'o 	{Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
155226092bf5STheodore Ts'o 	{Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
155326092bf5STheodore Ts'o 	{Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
155426092bf5STheodore Ts'o 	{Opt_commit, 0, MOPT_GTE0},
155526092bf5STheodore Ts'o 	{Opt_max_batch_time, 0, MOPT_GTE0},
155626092bf5STheodore Ts'o 	{Opt_min_batch_time, 0, MOPT_GTE0},
155726092bf5STheodore Ts'o 	{Opt_inode_readahead_blks, 0, MOPT_GTE0},
155826092bf5STheodore Ts'o 	{Opt_init_itable, 0, MOPT_GTE0},
1559923ae0ffSRoss Zwisler 	{Opt_dax, EXT4_MOUNT_DAX, MOPT_SET},
156026092bf5STheodore Ts'o 	{Opt_stripe, 0, MOPT_GTE0},
15610efb3b23SJan Kara 	{Opt_resuid, 0, MOPT_GTE0},
15620efb3b23SJan Kara 	{Opt_resgid, 0, MOPT_GTE0},
15635ba92bcfSCarlos Maiolino 	{Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
15645ba92bcfSCarlos Maiolino 	{Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
15655ba92bcfSCarlos Maiolino 	{Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
15668dc0aa8cSTheodore Ts'o 	{Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
15678dc0aa8cSTheodore Ts'o 	{Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
15688dc0aa8cSTheodore Ts'o 	{Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
15698dc0aa8cSTheodore Ts'o 	 MOPT_NO_EXT2 | MOPT_DATAJ},
157026092bf5STheodore Ts'o 	{Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
157126092bf5STheodore Ts'o 	{Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
157226092bf5STheodore Ts'o #ifdef CONFIG_EXT4_FS_POSIX_ACL
157326092bf5STheodore Ts'o 	{Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
157426092bf5STheodore Ts'o 	{Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
157526092bf5STheodore Ts'o #else
157626092bf5STheodore Ts'o 	{Opt_acl, 0, MOPT_NOSUPPORT},
157726092bf5STheodore Ts'o 	{Opt_noacl, 0, MOPT_NOSUPPORT},
157826092bf5STheodore Ts'o #endif
157926092bf5STheodore Ts'o 	{Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
158026092bf5STheodore Ts'o 	{Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
1581670e9875STheodore Ts'o 	{Opt_debug_want_extra_isize, 0, MOPT_GTE0},
158226092bf5STheodore Ts'o 	{Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
158326092bf5STheodore Ts'o 	{Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
158426092bf5STheodore Ts'o 							MOPT_SET | MOPT_Q},
158526092bf5STheodore Ts'o 	{Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA,
158626092bf5STheodore Ts'o 							MOPT_SET | MOPT_Q},
158749da9392SJan Kara 	{Opt_prjquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_PRJQUOTA,
158849da9392SJan Kara 							MOPT_SET | MOPT_Q},
158926092bf5STheodore Ts'o 	{Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
159049da9392SJan Kara 		       EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
159149da9392SJan Kara 							MOPT_CLEAR | MOPT_Q},
159226092bf5STheodore Ts'o 	{Opt_usrjquota, 0, MOPT_Q},
159326092bf5STheodore Ts'o 	{Opt_grpjquota, 0, MOPT_Q},
159426092bf5STheodore Ts'o 	{Opt_offusrjquota, 0, MOPT_Q},
159526092bf5STheodore Ts'o 	{Opt_offgrpjquota, 0, MOPT_Q},
159626092bf5STheodore Ts'o 	{Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
159726092bf5STheodore Ts'o 	{Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
159826092bf5STheodore Ts'o 	{Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
1599df981d03STheodore Ts'o 	{Opt_max_dir_size_kb, 0, MOPT_GTE0},
16006ddb2447STheodore Ts'o 	{Opt_test_dummy_encryption, 0, MOPT_GTE0},
160126092bf5STheodore Ts'o 	{Opt_err, 0, 0}
160226092bf5STheodore Ts'o };
160326092bf5STheodore Ts'o 
160426092bf5STheodore Ts'o static int handle_mount_opt(struct super_block *sb, char *opt, int token,
160526092bf5STheodore Ts'o 			    substring_t *args, unsigned long *journal_devnum,
160626092bf5STheodore Ts'o 			    unsigned int *journal_ioprio, int is_remount)
160726092bf5STheodore Ts'o {
160826092bf5STheodore Ts'o 	struct ext4_sb_info *sbi = EXT4_SB(sb);
160926092bf5STheodore Ts'o 	const struct mount_opts *m;
161008cefc7aSEric W. Biederman 	kuid_t uid;
161108cefc7aSEric W. Biederman 	kgid_t gid;
161226092bf5STheodore Ts'o 	int arg = 0;
161326092bf5STheodore Ts'o 
161457f73c2cSTheodore Ts'o #ifdef CONFIG_QUOTA
161557f73c2cSTheodore Ts'o 	if (token == Opt_usrjquota)
161657f73c2cSTheodore Ts'o 		return set_qf_name(sb, USRQUOTA, &args[0]);
161757f73c2cSTheodore Ts'o 	else if (token == Opt_grpjquota)
161857f73c2cSTheodore Ts'o 		return set_qf_name(sb, GRPQUOTA, &args[0]);
161957f73c2cSTheodore Ts'o 	else if (token == Opt_offusrjquota)
162057f73c2cSTheodore Ts'o 		return clear_qf_name(sb, USRQUOTA);
162157f73c2cSTheodore Ts'o 	else if (token == Opt_offgrpjquota)
162257f73c2cSTheodore Ts'o 		return clear_qf_name(sb, GRPQUOTA);
162357f73c2cSTheodore Ts'o #endif
162426092bf5STheodore Ts'o 	switch (token) {
1625f7048605STheodore Ts'o 	case Opt_noacl:
1626f7048605STheodore Ts'o 	case Opt_nouser_xattr:
1627f7048605STheodore Ts'o 		ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5");
1628f7048605STheodore Ts'o 		break;
162926092bf5STheodore Ts'o 	case Opt_sb:
163026092bf5STheodore Ts'o 		return 1;	/* handled by get_sb_block() */
163126092bf5STheodore Ts'o 	case Opt_removed:
16325f3633e3SJan Kara 		ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt);
163326092bf5STheodore Ts'o 		return 1;
163426092bf5STheodore Ts'o 	case Opt_abort:
163526092bf5STheodore Ts'o 		sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
163626092bf5STheodore Ts'o 		return 1;
163726092bf5STheodore Ts'o 	case Opt_i_version:
163826092bf5STheodore Ts'o 		sb->s_flags |= MS_I_VERSION;
163926092bf5STheodore Ts'o 		return 1;
1640a26f4992STheodore Ts'o 	case Opt_lazytime:
1641a26f4992STheodore Ts'o 		sb->s_flags |= MS_LAZYTIME;
1642a26f4992STheodore Ts'o 		return 1;
1643a26f4992STheodore Ts'o 	case Opt_nolazytime:
1644a26f4992STheodore Ts'o 		sb->s_flags &= ~MS_LAZYTIME;
1645a26f4992STheodore Ts'o 		return 1;
164626092bf5STheodore Ts'o 	}
164726092bf5STheodore Ts'o 
16485f3633e3SJan Kara 	for (m = ext4_mount_opts; m->token != Opt_err; m++)
16495f3633e3SJan Kara 		if (token == m->token)
16505f3633e3SJan Kara 			break;
16515f3633e3SJan Kara 
16525f3633e3SJan Kara 	if (m->token == Opt_err) {
16535f3633e3SJan Kara 		ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" "
16545f3633e3SJan Kara 			 "or missing value", opt);
16555f3633e3SJan Kara 		return -1;
16565f3633e3SJan Kara 	}
16575f3633e3SJan Kara 
16588dc0aa8cSTheodore Ts'o 	if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) {
16598dc0aa8cSTheodore Ts'o 		ext4_msg(sb, KERN_ERR,
16608dc0aa8cSTheodore Ts'o 			 "Mount option \"%s\" incompatible with ext2", opt);
16618dc0aa8cSTheodore Ts'o 		return -1;
16628dc0aa8cSTheodore Ts'o 	}
16638dc0aa8cSTheodore Ts'o 	if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) {
16648dc0aa8cSTheodore Ts'o 		ext4_msg(sb, KERN_ERR,
16658dc0aa8cSTheodore Ts'o 			 "Mount option \"%s\" incompatible with ext3", opt);
16668dc0aa8cSTheodore Ts'o 		return -1;
16678dc0aa8cSTheodore Ts'o 	}
16688dc0aa8cSTheodore Ts'o 
1669ad4eec61SEric Sandeen 	if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
16700efb3b23SJan Kara 		return -1;
167126092bf5STheodore Ts'o 	if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
167226092bf5STheodore Ts'o 		return -1;
1673c93cf2d7SDmitry Monakhov 	if (m->flags & MOPT_EXPLICIT) {
1674c93cf2d7SDmitry Monakhov 		if (m->mount_opt & EXT4_MOUNT_DELALLOC) {
167526092bf5STheodore Ts'o 			set_opt2(sb, EXPLICIT_DELALLOC);
16761e381f60SDmitry Monakhov 		} else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) {
16771e381f60SDmitry Monakhov 			set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM);
1678c93cf2d7SDmitry Monakhov 		} else
1679c93cf2d7SDmitry Monakhov 			return -1;
1680c93cf2d7SDmitry Monakhov 	}
168126092bf5STheodore Ts'o 	if (m->flags & MOPT_CLEAR_ERR)
168226092bf5STheodore Ts'o 		clear_opt(sb, ERRORS_MASK);
168326092bf5STheodore Ts'o 	if (token == Opt_noquota && sb_any_quota_loaded(sb)) {
168426092bf5STheodore Ts'o 		ext4_msg(sb, KERN_ERR, "Cannot change quota "
168526092bf5STheodore Ts'o 			 "options when quota turned on");
168626092bf5STheodore Ts'o 		return -1;
168726092bf5STheodore Ts'o 	}
168826092bf5STheodore Ts'o 
168926092bf5STheodore Ts'o 	if (m->flags & MOPT_NOSUPPORT) {
169026092bf5STheodore Ts'o 		ext4_msg(sb, KERN_ERR, "%s option not supported", opt);
169126092bf5STheodore Ts'o 	} else if (token == Opt_commit) {
169226092bf5STheodore Ts'o 		if (arg == 0)
169326092bf5STheodore Ts'o 			arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
169426092bf5STheodore Ts'o 		sbi->s_commit_interval = HZ * arg;
1695670e9875STheodore Ts'o 	} else if (token == Opt_debug_want_extra_isize) {
1696670e9875STheodore Ts'o 		sbi->s_want_extra_isize = arg;
169726092bf5STheodore Ts'o 	} else if (token == Opt_max_batch_time) {
169826092bf5STheodore Ts'o 		sbi->s_max_batch_time = arg;
169926092bf5STheodore Ts'o 	} else if (token == Opt_min_batch_time) {
170026092bf5STheodore Ts'o 		sbi->s_min_batch_time = arg;
170126092bf5STheodore Ts'o 	} else if (token == Opt_inode_readahead_blks) {
1702e33e60eaSJan Kara 		if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) {
1703e33e60eaSJan Kara 			ext4_msg(sb, KERN_ERR,
1704e33e60eaSJan Kara 				 "EXT4-fs: inode_readahead_blks must be "
1705e33e60eaSJan Kara 				 "0 or a power of 2 smaller than 2^31");
170626092bf5STheodore Ts'o 			return -1;
170726092bf5STheodore Ts'o 		}
170826092bf5STheodore Ts'o 		sbi->s_inode_readahead_blks = arg;
170926092bf5STheodore Ts'o 	} else if (token == Opt_init_itable) {
171026092bf5STheodore Ts'o 		set_opt(sb, INIT_INODE_TABLE);
171126092bf5STheodore Ts'o 		if (!args->from)
171226092bf5STheodore Ts'o 			arg = EXT4_DEF_LI_WAIT_MULT;
171326092bf5STheodore Ts'o 		sbi->s_li_wait_mult = arg;
1714df981d03STheodore Ts'o 	} else if (token == Opt_max_dir_size_kb) {
1715df981d03STheodore Ts'o 		sbi->s_max_dir_size_kb = arg;
171626092bf5STheodore Ts'o 	} else if (token == Opt_stripe) {
171726092bf5STheodore Ts'o 		sbi->s_stripe = arg;
17180efb3b23SJan Kara 	} else if (token == Opt_resuid) {
17190efb3b23SJan Kara 		uid = make_kuid(current_user_ns(), arg);
17200efb3b23SJan Kara 		if (!uid_valid(uid)) {
17215f3633e3SJan Kara 			ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg);
17220efb3b23SJan Kara 			return -1;
17230efb3b23SJan Kara 		}
17240efb3b23SJan Kara 		sbi->s_resuid = uid;
17250efb3b23SJan Kara 	} else if (token == Opt_resgid) {
17260efb3b23SJan Kara 		gid = make_kgid(current_user_ns(), arg);
17270efb3b23SJan Kara 		if (!gid_valid(gid)) {
17285f3633e3SJan Kara 			ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg);
17290efb3b23SJan Kara 			return -1;
17300efb3b23SJan Kara 		}
17310efb3b23SJan Kara 		sbi->s_resgid = gid;
17320efb3b23SJan Kara 	} else if (token == Opt_journal_dev) {
17330efb3b23SJan Kara 		if (is_remount) {
17340efb3b23SJan Kara 			ext4_msg(sb, KERN_ERR,
17350efb3b23SJan Kara 				 "Cannot specify journal on remount");
17360efb3b23SJan Kara 			return -1;
17370efb3b23SJan Kara 		}
17380efb3b23SJan Kara 		*journal_devnum = arg;
1739ad4eec61SEric Sandeen 	} else if (token == Opt_journal_path) {
1740ad4eec61SEric Sandeen 		char *journal_path;
1741ad4eec61SEric Sandeen 		struct inode *journal_inode;
1742ad4eec61SEric Sandeen 		struct path path;
1743ad4eec61SEric Sandeen 		int error;
1744ad4eec61SEric Sandeen 
1745ad4eec61SEric Sandeen 		if (is_remount) {
1746ad4eec61SEric Sandeen 			ext4_msg(sb, KERN_ERR,
1747ad4eec61SEric Sandeen 				 "Cannot specify journal on remount");
1748ad4eec61SEric Sandeen 			return -1;
1749ad4eec61SEric Sandeen 		}
1750ad4eec61SEric Sandeen 		journal_path = match_strdup(&args[0]);
1751ad4eec61SEric Sandeen 		if (!journal_path) {
1752ad4eec61SEric Sandeen 			ext4_msg(sb, KERN_ERR, "error: could not dup "
1753ad4eec61SEric Sandeen 				"journal device string");
1754ad4eec61SEric Sandeen 			return -1;
1755ad4eec61SEric Sandeen 		}
1756ad4eec61SEric Sandeen 
1757ad4eec61SEric Sandeen 		error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
1758ad4eec61SEric Sandeen 		if (error) {
1759ad4eec61SEric Sandeen 			ext4_msg(sb, KERN_ERR, "error: could not find "
1760ad4eec61SEric Sandeen 				"journal device path: error %d", error);
1761ad4eec61SEric Sandeen 			kfree(journal_path);
1762ad4eec61SEric Sandeen 			return -1;
1763ad4eec61SEric Sandeen 		}
1764ad4eec61SEric Sandeen 
17652b0143b5SDavid Howells 		journal_inode = d_inode(path.dentry);
1766ad4eec61SEric Sandeen 		if (!S_ISBLK(journal_inode->i_mode)) {
1767ad4eec61SEric Sandeen 			ext4_msg(sb, KERN_ERR, "error: journal path %s "
1768ad4eec61SEric Sandeen 				"is not a block device", journal_path);
1769ad4eec61SEric Sandeen 			path_put(&path);
1770ad4eec61SEric Sandeen 			kfree(journal_path);
1771ad4eec61SEric Sandeen 			return -1;
1772ad4eec61SEric Sandeen 		}
1773ad4eec61SEric Sandeen 
1774ad4eec61SEric Sandeen 		*journal_devnum = new_encode_dev(journal_inode->i_rdev);
1775ad4eec61SEric Sandeen 		path_put(&path);
1776ad4eec61SEric Sandeen 		kfree(journal_path);
17770efb3b23SJan Kara 	} else if (token == Opt_journal_ioprio) {
17780efb3b23SJan Kara 		if (arg > 7) {
17795f3633e3SJan Kara 			ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
17800efb3b23SJan Kara 				 " (must be 0-7)");
17810efb3b23SJan Kara 			return -1;
17820efb3b23SJan Kara 		}
17830efb3b23SJan Kara 		*journal_ioprio =
17840efb3b23SJan Kara 			IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
17856ddb2447STheodore Ts'o 	} else if (token == Opt_test_dummy_encryption) {
17866ddb2447STheodore Ts'o #ifdef CONFIG_EXT4_FS_ENCRYPTION
17876ddb2447STheodore Ts'o 		sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
17886ddb2447STheodore Ts'o 		ext4_msg(sb, KERN_WARNING,
17896ddb2447STheodore Ts'o 			 "Test dummy encryption mode enabled");
17906ddb2447STheodore Ts'o #else
17916ddb2447STheodore Ts'o 		ext4_msg(sb, KERN_WARNING,
17926ddb2447STheodore Ts'o 			 "Test dummy encryption mount option ignored");
17936ddb2447STheodore Ts'o #endif
179426092bf5STheodore Ts'o 	} else if (m->flags & MOPT_DATAJ) {
179526092bf5STheodore Ts'o 		if (is_remount) {
179626092bf5STheodore Ts'o 			if (!sbi->s_journal)
179726092bf5STheodore Ts'o 				ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option");
17985f3633e3SJan Kara 			else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) {
179926092bf5STheodore Ts'o 				ext4_msg(sb, KERN_ERR,
180026092bf5STheodore Ts'o 					 "Cannot change data mode on remount");
180126092bf5STheodore Ts'o 				return -1;
180226092bf5STheodore Ts'o 			}
180326092bf5STheodore Ts'o 		} else {
180426092bf5STheodore Ts'o 			clear_opt(sb, DATA_FLAGS);
180526092bf5STheodore Ts'o 			sbi->s_mount_opt |= m->mount_opt;
180626092bf5STheodore Ts'o 		}
180726092bf5STheodore Ts'o #ifdef CONFIG_QUOTA
180826092bf5STheodore Ts'o 	} else if (m->flags & MOPT_QFMT) {
180926092bf5STheodore Ts'o 		if (sb_any_quota_loaded(sb) &&
181026092bf5STheodore Ts'o 		    sbi->s_jquota_fmt != m->mount_opt) {
18115f3633e3SJan Kara 			ext4_msg(sb, KERN_ERR, "Cannot change journaled "
18125f3633e3SJan Kara 				 "quota options when quota turned on");
181326092bf5STheodore Ts'o 			return -1;
181426092bf5STheodore Ts'o 		}
1815e2b911c5SDarrick J. Wong 		if (ext4_has_feature_quota(sb)) {
1816c325a67cSTheodore Ts'o 			ext4_msg(sb, KERN_INFO,
1817c325a67cSTheodore Ts'o 				 "Quota format mount options ignored "
1818262b4662SJan Kara 				 "when QUOTA feature is enabled");
1819c325a67cSTheodore Ts'o 			return 1;
1820262b4662SJan Kara 		}
182126092bf5STheodore Ts'o 		sbi->s_jquota_fmt = m->mount_opt;
182226092bf5STheodore Ts'o #endif
1823923ae0ffSRoss Zwisler 	} else if (token == Opt_dax) {
1824ef83b6e8SDan Williams #ifdef CONFIG_FS_DAX
1825ef83b6e8SDan Williams 		ext4_msg(sb, KERN_WARNING,
1826ef83b6e8SDan Williams 		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1827ef83b6e8SDan Williams 			sbi->s_mount_opt |= m->mount_opt;
1828ef83b6e8SDan Williams #else
1829923ae0ffSRoss Zwisler 		ext4_msg(sb, KERN_INFO, "dax option not supported");
1830923ae0ffSRoss Zwisler 		return -1;
1831923ae0ffSRoss Zwisler #endif
18327915a861SAles Novak 	} else if (token == Opt_data_err_abort) {
18337915a861SAles Novak 		sbi->s_mount_opt |= m->mount_opt;
18347915a861SAles Novak 	} else if (token == Opt_data_err_ignore) {
18357915a861SAles Novak 		sbi->s_mount_opt &= ~m->mount_opt;
183626092bf5STheodore Ts'o 	} else {
183726092bf5STheodore Ts'o 		if (!args->from)
183826092bf5STheodore Ts'o 			arg = 1;
183926092bf5STheodore Ts'o 		if (m->flags & MOPT_CLEAR)
184026092bf5STheodore Ts'o 			arg = !arg;
184126092bf5STheodore Ts'o 		else if (unlikely(!(m->flags & MOPT_SET))) {
184226092bf5STheodore Ts'o 			ext4_msg(sb, KERN_WARNING,
184326092bf5STheodore Ts'o 				 "buggy handling of option %s", opt);
184426092bf5STheodore Ts'o 			WARN_ON(1);
184526092bf5STheodore Ts'o 			return -1;
184626092bf5STheodore Ts'o 		}
184726092bf5STheodore Ts'o 		if (arg != 0)
184826092bf5STheodore Ts'o 			sbi->s_mount_opt |= m->mount_opt;
184926092bf5STheodore Ts'o 		else
185026092bf5STheodore Ts'o 			sbi->s_mount_opt &= ~m->mount_opt;
185126092bf5STheodore Ts'o 	}
185226092bf5STheodore Ts'o 	return 1;
185326092bf5STheodore Ts'o }
185426092bf5STheodore Ts'o 
1855ac27a0ecSDave Kleikamp static int parse_options(char *options, struct super_block *sb,
1856c3191067STheodore Ts'o 			 unsigned long *journal_devnum,
1857b3881f74STheodore Ts'o 			 unsigned int *journal_ioprio,
1858661aa520SEric Sandeen 			 int is_remount)
1859ac27a0ecSDave Kleikamp {
1860617ba13bSMingming Cao 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1861ac27a0ecSDave Kleikamp 	char *p;
1862ac27a0ecSDave Kleikamp 	substring_t args[MAX_OPT_ARGS];
186326092bf5STheodore Ts'o 	int token;
1864ac27a0ecSDave Kleikamp 
1865ac27a0ecSDave Kleikamp 	if (!options)
1866ac27a0ecSDave Kleikamp 		return 1;
1867ac27a0ecSDave Kleikamp 
1868ac27a0ecSDave Kleikamp 	while ((p = strsep(&options, ",")) != NULL) {
1869ac27a0ecSDave Kleikamp 		if (!*p)
1870ac27a0ecSDave Kleikamp 			continue;
187115121c18SEric Sandeen 		/*
187215121c18SEric Sandeen 		 * Initialize args struct so we know whether arg was
187315121c18SEric Sandeen 		 * found; some options take optional arguments.
187415121c18SEric Sandeen 		 */
1875caecd0afSSachin Kamat 		args[0].to = args[0].from = NULL;
1876ac27a0ecSDave Kleikamp 		token = match_token(p, tokens, args);
187726092bf5STheodore Ts'o 		if (handle_mount_opt(sb, p, token, args, journal_devnum,
187826092bf5STheodore Ts'o 				     journal_ioprio, is_remount) < 0)
1879ac27a0ecSDave Kleikamp 			return 0;
1880ac27a0ecSDave Kleikamp 	}
1881ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA
188249da9392SJan Kara 	/*
188349da9392SJan Kara 	 * We do the test below only for project quotas. 'usrquota' and
188449da9392SJan Kara 	 * 'grpquota' mount options are allowed even without quota feature
188549da9392SJan Kara 	 * to support legacy quotas in quota files.
188649da9392SJan Kara 	 */
188749da9392SJan Kara 	if (test_opt(sb, PRJQUOTA) && !ext4_has_feature_project(sb)) {
188849da9392SJan Kara 		ext4_msg(sb, KERN_ERR, "Project quota feature not enabled. "
188949da9392SJan Kara 			 "Cannot enable project quota enforcement.");
189049da9392SJan Kara 		return 0;
189149da9392SJan Kara 	}
189249da9392SJan Kara 	if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
1893482a7425SDmitry Monakhov 		if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
1894fd8c37ecSTheodore Ts'o 			clear_opt(sb, USRQUOTA);
1895ac27a0ecSDave Kleikamp 
1896482a7425SDmitry Monakhov 		if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
1897fd8c37ecSTheodore Ts'o 			clear_opt(sb, GRPQUOTA);
1898ac27a0ecSDave Kleikamp 
189956c50f11SDmitry Monakhov 		if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
1900b31e1552SEric Sandeen 			ext4_msg(sb, KERN_ERR, "old and new quota "
1901b31e1552SEric Sandeen 					"format mixing");
1902ac27a0ecSDave Kleikamp 			return 0;
1903ac27a0ecSDave Kleikamp 		}
1904ac27a0ecSDave Kleikamp 
1905ac27a0ecSDave Kleikamp 		if (!sbi->s_jquota_fmt) {
1906b31e1552SEric Sandeen 			ext4_msg(sb, KERN_ERR, "journaled quota format "
1907b31e1552SEric Sandeen 					"not specified");
1908ac27a0ecSDave Kleikamp 			return 0;
1909ac27a0ecSDave Kleikamp 		}
1910ac27a0ecSDave Kleikamp 	}
1911ac27a0ecSDave Kleikamp #endif
1912261cb20cSJan Kara 	if (test_opt(sb, DIOREAD_NOLOCK)) {
1913261cb20cSJan Kara 		int blocksize =
1914261cb20cSJan Kara 			BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
1915261cb20cSJan Kara 
191609cbfeafSKirill A. Shutemov 		if (blocksize < PAGE_SIZE) {
1917261cb20cSJan Kara 			ext4_msg(sb, KERN_ERR, "can't mount with "
1918261cb20cSJan Kara 				 "dioread_nolock if block size != PAGE_SIZE");
1919261cb20cSJan Kara 			return 0;
1920261cb20cSJan Kara 		}
1921261cb20cSJan Kara 	}
1922ac27a0ecSDave Kleikamp 	return 1;
1923ac27a0ecSDave Kleikamp }
1924ac27a0ecSDave Kleikamp 
19252adf6da8STheodore Ts'o static inline void ext4_show_quota_options(struct seq_file *seq,
19262adf6da8STheodore Ts'o 					   struct super_block *sb)
19272adf6da8STheodore Ts'o {
19282adf6da8STheodore Ts'o #if defined(CONFIG_QUOTA)
19292adf6da8STheodore Ts'o 	struct ext4_sb_info *sbi = EXT4_SB(sb);
19302adf6da8STheodore Ts'o 
19312adf6da8STheodore Ts'o 	if (sbi->s_jquota_fmt) {
19322adf6da8STheodore Ts'o 		char *fmtname = "";
19332adf6da8STheodore Ts'o 
19342adf6da8STheodore Ts'o 		switch (sbi->s_jquota_fmt) {
19352adf6da8STheodore Ts'o 		case QFMT_VFS_OLD:
19362adf6da8STheodore Ts'o 			fmtname = "vfsold";
19372adf6da8STheodore Ts'o 			break;
19382adf6da8STheodore Ts'o 		case QFMT_VFS_V0:
19392adf6da8STheodore Ts'o 			fmtname = "vfsv0";
19402adf6da8STheodore Ts'o 			break;
19412adf6da8STheodore Ts'o 		case QFMT_VFS_V1:
19422adf6da8STheodore Ts'o 			fmtname = "vfsv1";
19432adf6da8STheodore Ts'o 			break;
19442adf6da8STheodore Ts'o 		}
19452adf6da8STheodore Ts'o 		seq_printf(seq, ",jqfmt=%s", fmtname);
19462adf6da8STheodore Ts'o 	}
19472adf6da8STheodore Ts'o 
19482adf6da8STheodore Ts'o 	if (sbi->s_qf_names[USRQUOTA])
1949a068acf2SKees Cook 		seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
19502adf6da8STheodore Ts'o 
19512adf6da8STheodore Ts'o 	if (sbi->s_qf_names[GRPQUOTA])
1952a068acf2SKees Cook 		seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
19532adf6da8STheodore Ts'o #endif
19542adf6da8STheodore Ts'o }
19552adf6da8STheodore Ts'o 
19565a916be1STheodore Ts'o static const char *token2str(int token)
19575a916be1STheodore Ts'o {
195850df9fd5SHerton Ronaldo Krzesinski 	const struct match_token *t;
19595a916be1STheodore Ts'o 
19605a916be1STheodore Ts'o 	for (t = tokens; t->token != Opt_err; t++)
19615a916be1STheodore Ts'o 		if (t->token == token && !strchr(t->pattern, '='))
19625a916be1STheodore Ts'o 			break;
19635a916be1STheodore Ts'o 	return t->pattern;
19645a916be1STheodore Ts'o }
19655a916be1STheodore Ts'o 
19662adf6da8STheodore Ts'o /*
19672adf6da8STheodore Ts'o  * Show an option if
19682adf6da8STheodore Ts'o  *  - it's set to a non-default value OR
19692adf6da8STheodore Ts'o  *  - if the per-sb default is different from the global default
19702adf6da8STheodore Ts'o  */
197166acdcf4STheodore Ts'o static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
197266acdcf4STheodore Ts'o 			      int nodefs)
19732adf6da8STheodore Ts'o {
19742adf6da8STheodore Ts'o 	struct ext4_sb_info *sbi = EXT4_SB(sb);
19752adf6da8STheodore Ts'o 	struct ext4_super_block *es = sbi->s_es;
197666acdcf4STheodore Ts'o 	int def_errors, def_mount_opt = nodefs ? 0 : sbi->s_def_mount_opt;
19775a916be1STheodore Ts'o 	const struct mount_opts *m;
197866acdcf4STheodore Ts'o 	char sep = nodefs ? '\n' : ',';
19792adf6da8STheodore Ts'o 
198066acdcf4STheodore Ts'o #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep)
198166acdcf4STheodore Ts'o #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg)
19822adf6da8STheodore Ts'o 
19832adf6da8STheodore Ts'o 	if (sbi->s_sb_block != 1)
19845a916be1STheodore Ts'o 		SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block);
19855a916be1STheodore Ts'o 
19865a916be1STheodore Ts'o 	for (m = ext4_mount_opts; m->token != Opt_err; m++) {
19875a916be1STheodore Ts'o 		int want_set = m->flags & MOPT_SET;
19885a916be1STheodore Ts'o 		if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
19895a916be1STheodore Ts'o 		    (m->flags & MOPT_CLEAR_ERR))
19905a916be1STheodore Ts'o 			continue;
199166acdcf4STheodore Ts'o 		if (!(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
19925a916be1STheodore Ts'o 			continue; /* skip if same as the default */
19935a916be1STheodore Ts'o 		if ((want_set &&
19945a916be1STheodore Ts'o 		     (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
19955a916be1STheodore Ts'o 		    (!want_set && (sbi->s_mount_opt & m->mount_opt)))
19965a916be1STheodore Ts'o 			continue; /* select Opt_noFoo vs Opt_Foo */
19975a916be1STheodore Ts'o 		SEQ_OPTS_PRINT("%s", token2str(m->token));
19985a916be1STheodore Ts'o 	}
19995a916be1STheodore Ts'o 
200008cefc7aSEric W. Biederman 	if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) ||
20015a916be1STheodore Ts'o 	    le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID)
200208cefc7aSEric W. Biederman 		SEQ_OPTS_PRINT("resuid=%u",
200308cefc7aSEric W. Biederman 				from_kuid_munged(&init_user_ns, sbi->s_resuid));
200408cefc7aSEric W. Biederman 	if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) ||
20055a916be1STheodore Ts'o 	    le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID)
200608cefc7aSEric W. Biederman 		SEQ_OPTS_PRINT("resgid=%u",
200708cefc7aSEric W. Biederman 				from_kgid_munged(&init_user_ns, sbi->s_resgid));
200866acdcf4STheodore Ts'o 	def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors);
20095a916be1STheodore Ts'o 	if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO)
20105a916be1STheodore Ts'o 		SEQ_OPTS_PUTS("errors=remount-ro");
20112adf6da8STheodore Ts'o 	if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
20125a916be1STheodore Ts'o 		SEQ_OPTS_PUTS("errors=continue");
20132adf6da8STheodore Ts'o 	if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
20145a916be1STheodore Ts'o 		SEQ_OPTS_PUTS("errors=panic");
201566acdcf4STheodore Ts'o 	if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ)
20165a916be1STheodore Ts'o 		SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ);
201766acdcf4STheodore Ts'o 	if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME)
20185a916be1STheodore Ts'o 		SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
201966acdcf4STheodore Ts'o 	if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
20205a916be1STheodore Ts'o 		SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
20212adf6da8STheodore Ts'o 	if (sb->s_flags & MS_I_VERSION)
20225a916be1STheodore Ts'o 		SEQ_OPTS_PUTS("i_version");
202366acdcf4STheodore Ts'o 	if (nodefs || sbi->s_stripe)
20245a916be1STheodore Ts'o 		SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
202566acdcf4STheodore Ts'o 	if (EXT4_MOUNT_DATA_FLAGS & (sbi->s_mount_opt ^ def_mount_opt)) {
20262adf6da8STheodore Ts'o 		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
20275a916be1STheodore Ts'o 			SEQ_OPTS_PUTS("data=journal");
20282adf6da8STheodore Ts'o 		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
20295a916be1STheodore Ts'o 			SEQ_OPTS_PUTS("data=ordered");
20302adf6da8STheodore Ts'o 		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
20315a916be1STheodore Ts'o 			SEQ_OPTS_PUTS("data=writeback");
20325a916be1STheodore Ts'o 	}
203366acdcf4STheodore Ts'o 	if (nodefs ||
203466acdcf4STheodore Ts'o 	    sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
20355a916be1STheodore Ts'o 		SEQ_OPTS_PRINT("inode_readahead_blks=%u",
20362adf6da8STheodore Ts'o 			       sbi->s_inode_readahead_blks);
20372adf6da8STheodore Ts'o 
203866acdcf4STheodore Ts'o 	if (nodefs || (test_opt(sb, INIT_INODE_TABLE) &&
203966acdcf4STheodore Ts'o 		       (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
20405a916be1STheodore Ts'o 		SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
2041df981d03STheodore Ts'o 	if (nodefs || sbi->s_max_dir_size_kb)
2042df981d03STheodore Ts'o 		SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
20437915a861SAles Novak 	if (test_opt(sb, DATA_ERR_ABORT))
20447915a861SAles Novak 		SEQ_OPTS_PUTS("data_err=abort");
20452adf6da8STheodore Ts'o 
20462adf6da8STheodore Ts'o 	ext4_show_quota_options(seq, sb);
20472adf6da8STheodore Ts'o 	return 0;
20482adf6da8STheodore Ts'o }
20492adf6da8STheodore Ts'o 
205066acdcf4STheodore Ts'o static int ext4_show_options(struct seq_file *seq, struct dentry *root)
205166acdcf4STheodore Ts'o {
205266acdcf4STheodore Ts'o 	return _ext4_show_options(seq, root->d_sb, 0);
205366acdcf4STheodore Ts'o }
205466acdcf4STheodore Ts'o 
2055ebd173beSTheodore Ts'o int ext4_seq_options_show(struct seq_file *seq, void *offset)
205666acdcf4STheodore Ts'o {
205766acdcf4STheodore Ts'o 	struct super_block *sb = seq->private;
205866acdcf4STheodore Ts'o 	int rc;
205966acdcf4STheodore Ts'o 
206066acdcf4STheodore Ts'o 	seq_puts(seq, (sb->s_flags & MS_RDONLY) ? "ro" : "rw");
206166acdcf4STheodore Ts'o 	rc = _ext4_show_options(seq, sb, 1);
206266acdcf4STheodore Ts'o 	seq_puts(seq, "\n");
206366acdcf4STheodore Ts'o 	return rc;
206466acdcf4STheodore Ts'o }
206566acdcf4STheodore Ts'o 
2066617ba13bSMingming Cao static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
2067ac27a0ecSDave Kleikamp 			    int read_only)
2068ac27a0ecSDave Kleikamp {
2069617ba13bSMingming Cao 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2070ac27a0ecSDave Kleikamp 	int res = 0;
2071ac27a0ecSDave Kleikamp 
2072617ba13bSMingming Cao 	if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
2073b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "revision level too high, "
2074b31e1552SEric Sandeen 			 "forcing read-only mode");
2075ac27a0ecSDave Kleikamp 		res = MS_RDONLY;
2076ac27a0ecSDave Kleikamp 	}
2077ac27a0ecSDave Kleikamp 	if (read_only)
2078281b5995STheodore Ts'o 		goto done;
2079617ba13bSMingming Cao 	if (!(sbi->s_mount_state & EXT4_VALID_FS))
2080b31e1552SEric Sandeen 		ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
2081b31e1552SEric Sandeen 			 "running e2fsck is recommended");
2082c8b459f4SLukas Czerner 	else if (sbi->s_mount_state & EXT4_ERROR_FS)
2083b31e1552SEric Sandeen 		ext4_msg(sb, KERN_WARNING,
2084b31e1552SEric Sandeen 			 "warning: mounting fs with errors, "
2085b31e1552SEric Sandeen 			 "running e2fsck is recommended");
2086ed3ce80aSTao Ma 	else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
2087ac27a0ecSDave Kleikamp 		 le16_to_cpu(es->s_mnt_count) >=
2088ac27a0ecSDave Kleikamp 		 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
2089b31e1552SEric Sandeen 		ext4_msg(sb, KERN_WARNING,
2090b31e1552SEric Sandeen 			 "warning: maximal mount count reached, "
2091b31e1552SEric Sandeen 			 "running e2fsck is recommended");
2092ac27a0ecSDave Kleikamp 	else if (le32_to_cpu(es->s_checkinterval) &&
2093ac27a0ecSDave Kleikamp 		(le32_to_cpu(es->s_lastcheck) +
2094ac27a0ecSDave Kleikamp 			le32_to_cpu(es->s_checkinterval) <= get_seconds()))
2095b31e1552SEric Sandeen 		ext4_msg(sb, KERN_WARNING,
2096b31e1552SEric Sandeen 			 "warning: checktime reached, "
2097b31e1552SEric Sandeen 			 "running e2fsck is recommended");
20980390131bSFrank Mayhar 	if (!sbi->s_journal)
2099216c34b2SMarcin Slusarz 		es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
2100ac27a0ecSDave Kleikamp 	if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
2101617ba13bSMingming Cao 		es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
2102e8546d06SMarcin Slusarz 	le16_add_cpu(&es->s_mnt_count, 1);
2103ac27a0ecSDave Kleikamp 	es->s_mtime = cpu_to_le32(get_seconds());
2104617ba13bSMingming Cao 	ext4_update_dynamic_rev(sb);
21050390131bSFrank Mayhar 	if (sbi->s_journal)
2106e2b911c5SDarrick J. Wong 		ext4_set_feature_journal_needs_recovery(sb);
2107ac27a0ecSDave Kleikamp 
2108e2d67052STheodore Ts'o 	ext4_commit_super(sb, 1);
2109281b5995STheodore Ts'o done:
2110ac27a0ecSDave Kleikamp 	if (test_opt(sb, DEBUG))
2111a9df9a49STheodore Ts'o 		printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
2112a2595b8aSTheodore Ts'o 				"bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
2113ac27a0ecSDave Kleikamp 			sb->s_blocksize,
2114ac27a0ecSDave Kleikamp 			sbi->s_groups_count,
2115617ba13bSMingming Cao 			EXT4_BLOCKS_PER_GROUP(sb),
2116617ba13bSMingming Cao 			EXT4_INODES_PER_GROUP(sb),
2117a2595b8aSTheodore Ts'o 			sbi->s_mount_opt, sbi->s_mount_opt2);
2118ac27a0ecSDave Kleikamp 
21197abc52c2SDan Magenheimer 	cleancache_init_fs(sb);
2120ac27a0ecSDave Kleikamp 	return res;
2121ac27a0ecSDave Kleikamp }
2122ac27a0ecSDave Kleikamp 
2123117fff10STheodore Ts'o int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
2124117fff10STheodore Ts'o {
2125117fff10STheodore Ts'o 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2126117fff10STheodore Ts'o 	struct flex_groups *new_groups;
2127117fff10STheodore Ts'o 	int size;
2128117fff10STheodore Ts'o 
2129117fff10STheodore Ts'o 	if (!sbi->s_log_groups_per_flex)
2130117fff10STheodore Ts'o 		return 0;
2131117fff10STheodore Ts'o 
2132117fff10STheodore Ts'o 	size = ext4_flex_group(sbi, ngroup - 1) + 1;
2133117fff10STheodore Ts'o 	if (size <= sbi->s_flex_groups_allocated)
2134117fff10STheodore Ts'o 		return 0;
2135117fff10STheodore Ts'o 
2136117fff10STheodore Ts'o 	size = roundup_pow_of_two(size * sizeof(struct flex_groups));
2137117fff10STheodore Ts'o 	new_groups = ext4_kvzalloc(size, GFP_KERNEL);
2138117fff10STheodore Ts'o 	if (!new_groups) {
2139117fff10STheodore Ts'o 		ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
2140117fff10STheodore Ts'o 			 size / (int) sizeof(struct flex_groups));
2141117fff10STheodore Ts'o 		return -ENOMEM;
2142117fff10STheodore Ts'o 	}
2143117fff10STheodore Ts'o 
2144117fff10STheodore Ts'o 	if (sbi->s_flex_groups) {
2145117fff10STheodore Ts'o 		memcpy(new_groups, sbi->s_flex_groups,
2146117fff10STheodore Ts'o 		       (sbi->s_flex_groups_allocated *
2147117fff10STheodore Ts'o 			sizeof(struct flex_groups)));
2148b93b41d4SAl Viro 		kvfree(sbi->s_flex_groups);
2149117fff10STheodore Ts'o 	}
2150117fff10STheodore Ts'o 	sbi->s_flex_groups = new_groups;
2151117fff10STheodore Ts'o 	sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
2152117fff10STheodore Ts'o 	return 0;
2153117fff10STheodore Ts'o }
2154117fff10STheodore Ts'o 
2155772cb7c8SJose R. Santos static int ext4_fill_flex_info(struct super_block *sb)
2156772cb7c8SJose R. Santos {
2157772cb7c8SJose R. Santos 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2158772cb7c8SJose R. Santos 	struct ext4_group_desc *gdp = NULL;
2159772cb7c8SJose R. Santos 	ext4_group_t flex_group;
2160117fff10STheodore Ts'o 	int i, err;
2161772cb7c8SJose R. Santos 
2162503358aeSTheodore Ts'o 	sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
2163d50f2ab6SXi Wang 	if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
2164772cb7c8SJose R. Santos 		sbi->s_log_groups_per_flex = 0;
2165772cb7c8SJose R. Santos 		return 1;
2166772cb7c8SJose R. Santos 	}
2167772cb7c8SJose R. Santos 
2168117fff10STheodore Ts'o 	err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
2169117fff10STheodore Ts'o 	if (err)
2170772cb7c8SJose R. Santos 		goto failed;
2171772cb7c8SJose R. Santos 
2172772cb7c8SJose R. Santos 	for (i = 0; i < sbi->s_groups_count; i++) {
217388b6edd1STheodore Ts'o 		gdp = ext4_get_group_desc(sb, i, NULL);
2174772cb7c8SJose R. Santos 
2175772cb7c8SJose R. Santos 		flex_group = ext4_flex_group(sbi, i);
21767ad9bb65STheodore Ts'o 		atomic_add(ext4_free_inodes_count(sb, gdp),
21777ad9bb65STheodore Ts'o 			   &sbi->s_flex_groups[flex_group].free_inodes);
217890ba983fSTheodore Ts'o 		atomic64_add(ext4_free_group_clusters(sb, gdp),
217924aaa8efSTheodore Ts'o 			     &sbi->s_flex_groups[flex_group].free_clusters);
21807ad9bb65STheodore Ts'o 		atomic_add(ext4_used_dirs_count(sb, gdp),
21817ad9bb65STheodore Ts'o 			   &sbi->s_flex_groups[flex_group].used_dirs);
2182772cb7c8SJose R. Santos 	}
2183772cb7c8SJose R. Santos 
2184772cb7c8SJose R. Santos 	return 1;
2185772cb7c8SJose R. Santos failed:
2186772cb7c8SJose R. Santos 	return 0;
2187772cb7c8SJose R. Santos }
2188772cb7c8SJose R. Santos 
2189e2b911c5SDarrick J. Wong static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
2190717d50e4SAndreas Dilger 				   struct ext4_group_desc *gdp)
2191717d50e4SAndreas Dilger {
2192b47820edSDaeho Jeong 	int offset = offsetof(struct ext4_group_desc, bg_checksum);
2193717d50e4SAndreas Dilger 	__u16 crc = 0;
2194717d50e4SAndreas Dilger 	__le32 le_group = cpu_to_le32(block_group);
2195e2b911c5SDarrick J. Wong 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2196717d50e4SAndreas Dilger 
21979aa5d32bSDmitry Monakhov 	if (ext4_has_metadata_csum(sbi->s_sb)) {
2198feb0ab32SDarrick J. Wong 		/* Use new metadata_csum algorithm */
2199feb0ab32SDarrick J. Wong 		__u32 csum32;
2200b47820edSDaeho Jeong 		__u16 dummy_csum = 0;
2201feb0ab32SDarrick J. Wong 
2202feb0ab32SDarrick J. Wong 		csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
2203feb0ab32SDarrick J. Wong 				     sizeof(le_group));
2204b47820edSDaeho Jeong 		csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
2205b47820edSDaeho Jeong 		csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
2206b47820edSDaeho Jeong 				     sizeof(dummy_csum));
2207b47820edSDaeho Jeong 		offset += sizeof(dummy_csum);
2208b47820edSDaeho Jeong 		if (offset < sbi->s_desc_size)
2209b47820edSDaeho Jeong 			csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
2210b47820edSDaeho Jeong 					     sbi->s_desc_size - offset);
2211feb0ab32SDarrick J. Wong 
2212feb0ab32SDarrick J. Wong 		crc = csum32 & 0xFFFF;
2213feb0ab32SDarrick J. Wong 		goto out;
2214feb0ab32SDarrick J. Wong 	}
2215feb0ab32SDarrick J. Wong 
2216feb0ab32SDarrick J. Wong 	/* old crc16 code */
2217e2b911c5SDarrick J. Wong 	if (!ext4_has_feature_gdt_csum(sb))
2218813d32f9SDarrick J. Wong 		return 0;
2219813d32f9SDarrick J. Wong 
2220717d50e4SAndreas Dilger 	crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
2221717d50e4SAndreas Dilger 	crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
2222717d50e4SAndreas Dilger 	crc = crc16(crc, (__u8 *)gdp, offset);
2223717d50e4SAndreas Dilger 	offset += sizeof(gdp->bg_checksum); /* skip checksum */
2224717d50e4SAndreas Dilger 	/* for checksum of struct ext4_group_desc do the rest...*/
2225e2b911c5SDarrick J. Wong 	if (ext4_has_feature_64bit(sb) &&
2226717d50e4SAndreas Dilger 	    offset < le16_to_cpu(sbi->s_es->s_desc_size))
2227717d50e4SAndreas Dilger 		crc = crc16(crc, (__u8 *)gdp + offset,
2228717d50e4SAndreas Dilger 			    le16_to_cpu(sbi->s_es->s_desc_size) -
2229717d50e4SAndreas Dilger 				offset);
2230717d50e4SAndreas Dilger 
2231feb0ab32SDarrick J. Wong out:
2232717d50e4SAndreas Dilger 	return cpu_to_le16(crc);
2233717d50e4SAndreas Dilger }
2234717d50e4SAndreas Dilger 
2235feb0ab32SDarrick J. Wong int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
2236717d50e4SAndreas Dilger 				struct ext4_group_desc *gdp)
2237717d50e4SAndreas Dilger {
2238feb0ab32SDarrick J. Wong 	if (ext4_has_group_desc_csum(sb) &&
2239e2b911c5SDarrick J. Wong 	    (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp)))
2240717d50e4SAndreas Dilger 		return 0;
2241717d50e4SAndreas Dilger 
2242717d50e4SAndreas Dilger 	return 1;
2243717d50e4SAndreas Dilger }
2244717d50e4SAndreas Dilger 
2245feb0ab32SDarrick J. Wong void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
2246feb0ab32SDarrick J. Wong 			      struct ext4_group_desc *gdp)
2247feb0ab32SDarrick J. Wong {
2248feb0ab32SDarrick J. Wong 	if (!ext4_has_group_desc_csum(sb))
2249feb0ab32SDarrick J. Wong 		return;
2250e2b911c5SDarrick J. Wong 	gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp);
2251feb0ab32SDarrick J. Wong }
2252feb0ab32SDarrick J. Wong 
2253ac27a0ecSDave Kleikamp /* Called at mount-time, super-block is locked */
2254bfff6873SLukas Czerner static int ext4_check_descriptors(struct super_block *sb,
2255829fa70dSTheodore Ts'o 				  ext4_fsblk_t sb_block,
2256bfff6873SLukas Czerner 				  ext4_group_t *first_not_zeroed)
2257ac27a0ecSDave Kleikamp {
2258617ba13bSMingming Cao 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2259617ba13bSMingming Cao 	ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
2260617ba13bSMingming Cao 	ext4_fsblk_t last_block;
2261bd81d8eeSLaurent Vivier 	ext4_fsblk_t block_bitmap;
2262bd81d8eeSLaurent Vivier 	ext4_fsblk_t inode_bitmap;
2263bd81d8eeSLaurent Vivier 	ext4_fsblk_t inode_table;
2264ce421581SJose R. Santos 	int flexbg_flag = 0;
2265bfff6873SLukas Czerner 	ext4_group_t i, grp = sbi->s_groups_count;
2266ac27a0ecSDave Kleikamp 
2267e2b911c5SDarrick J. Wong 	if (ext4_has_feature_flex_bg(sb))
2268ce421581SJose R. Santos 		flexbg_flag = 1;
2269ce421581SJose R. Santos 
2270617ba13bSMingming Cao 	ext4_debug("Checking group descriptors");
2271ac27a0ecSDave Kleikamp 
2272197cd65aSAkinobu Mita 	for (i = 0; i < sbi->s_groups_count; i++) {
2273197cd65aSAkinobu Mita 		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
2274197cd65aSAkinobu Mita 
2275ce421581SJose R. Santos 		if (i == sbi->s_groups_count - 1 || flexbg_flag)
2276bd81d8eeSLaurent Vivier 			last_block = ext4_blocks_count(sbi->s_es) - 1;
2277ac27a0ecSDave Kleikamp 		else
2278ac27a0ecSDave Kleikamp 			last_block = first_block +
2279617ba13bSMingming Cao 				(EXT4_BLOCKS_PER_GROUP(sb) - 1);
2280ac27a0ecSDave Kleikamp 
2281bfff6873SLukas Czerner 		if ((grp == sbi->s_groups_count) &&
2282bfff6873SLukas Czerner 		   !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
2283bfff6873SLukas Czerner 			grp = i;
2284bfff6873SLukas Czerner 
22858fadc143SAlexandre Ratchov 		block_bitmap = ext4_block_bitmap(sb, gdp);
2286829fa70dSTheodore Ts'o 		if (block_bitmap == sb_block) {
2287829fa70dSTheodore Ts'o 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2288829fa70dSTheodore Ts'o 				 "Block bitmap for group %u overlaps "
2289829fa70dSTheodore Ts'o 				 "superblock", i);
2290829fa70dSTheodore Ts'o 		}
22912b2d6d01STheodore Ts'o 		if (block_bitmap < first_block || block_bitmap > last_block) {
2292b31e1552SEric Sandeen 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2293a9df9a49STheodore Ts'o 			       "Block bitmap for group %u not in group "
2294b31e1552SEric Sandeen 			       "(block %llu)!", i, block_bitmap);
2295ac27a0ecSDave Kleikamp 			return 0;
2296ac27a0ecSDave Kleikamp 		}
22978fadc143SAlexandre Ratchov 		inode_bitmap = ext4_inode_bitmap(sb, gdp);
2298829fa70dSTheodore Ts'o 		if (inode_bitmap == sb_block) {
2299829fa70dSTheodore Ts'o 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2300829fa70dSTheodore Ts'o 				 "Inode bitmap for group %u overlaps "
2301829fa70dSTheodore Ts'o 				 "superblock", i);
2302829fa70dSTheodore Ts'o 		}
23032b2d6d01STheodore Ts'o 		if (inode_bitmap < first_block || inode_bitmap > last_block) {
2304b31e1552SEric Sandeen 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2305a9df9a49STheodore Ts'o 			       "Inode bitmap for group %u not in group "
2306b31e1552SEric Sandeen 			       "(block %llu)!", i, inode_bitmap);
2307ac27a0ecSDave Kleikamp 			return 0;
2308ac27a0ecSDave Kleikamp 		}
23098fadc143SAlexandre Ratchov 		inode_table = ext4_inode_table(sb, gdp);
2310829fa70dSTheodore Ts'o 		if (inode_table == sb_block) {
2311829fa70dSTheodore Ts'o 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2312829fa70dSTheodore Ts'o 				 "Inode table for group %u overlaps "
2313829fa70dSTheodore Ts'o 				 "superblock", i);
2314829fa70dSTheodore Ts'o 		}
2315bd81d8eeSLaurent Vivier 		if (inode_table < first_block ||
23162b2d6d01STheodore Ts'o 		    inode_table + sbi->s_itb_per_group - 1 > last_block) {
2317b31e1552SEric Sandeen 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2318a9df9a49STheodore Ts'o 			       "Inode table for group %u not in group "
2319b31e1552SEric Sandeen 			       "(block %llu)!", i, inode_table);
2320ac27a0ecSDave Kleikamp 			return 0;
2321ac27a0ecSDave Kleikamp 		}
2322955ce5f5SAneesh Kumar K.V 		ext4_lock_group(sb, i);
2323feb0ab32SDarrick J. Wong 		if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
2324b31e1552SEric Sandeen 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2325b31e1552SEric Sandeen 				 "Checksum for group %u failed (%u!=%u)",
2326e2b911c5SDarrick J. Wong 				 i, le16_to_cpu(ext4_group_desc_csum(sb, i,
2327fd2d4291SAvantika Mathur 				     gdp)), le16_to_cpu(gdp->bg_checksum));
23287ee1ec4cSLi Zefan 			if (!(sb->s_flags & MS_RDONLY)) {
2329955ce5f5SAneesh Kumar K.V 				ext4_unlock_group(sb, i);
2330717d50e4SAndreas Dilger 				return 0;
2331717d50e4SAndreas Dilger 			}
23327ee1ec4cSLi Zefan 		}
2333955ce5f5SAneesh Kumar K.V 		ext4_unlock_group(sb, i);
2334ce421581SJose R. Santos 		if (!flexbg_flag)
2335617ba13bSMingming Cao 			first_block += EXT4_BLOCKS_PER_GROUP(sb);
2336ac27a0ecSDave Kleikamp 	}
2337bfff6873SLukas Czerner 	if (NULL != first_not_zeroed)
2338bfff6873SLukas Czerner 		*first_not_zeroed = grp;
2339ac27a0ecSDave Kleikamp 	return 1;
2340ac27a0ecSDave Kleikamp }
2341ac27a0ecSDave Kleikamp 
2342617ba13bSMingming Cao /* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
2343ac27a0ecSDave Kleikamp  * the superblock) which were deleted from all directories, but held open by
2344ac27a0ecSDave Kleikamp  * a process at the time of a crash.  We walk the list and try to delete these
2345ac27a0ecSDave Kleikamp  * inodes at recovery time (only with a read-write filesystem).
2346ac27a0ecSDave Kleikamp  *
2347ac27a0ecSDave Kleikamp  * In order to keep the orphan inode chain consistent during traversal (in
2348ac27a0ecSDave Kleikamp  * case of crash during recovery), we link each inode into the superblock
2349ac27a0ecSDave Kleikamp  * orphan list_head and handle it the same way as an inode deletion during
2350ac27a0ecSDave Kleikamp  * normal operation (which journals the operations for us).
2351ac27a0ecSDave Kleikamp  *
2352ac27a0ecSDave Kleikamp  * We only do an iget() and an iput() on each inode, which is very safe if we
2353ac27a0ecSDave Kleikamp  * accidentally point at an in-use or already deleted inode.  The worst that
2354ac27a0ecSDave Kleikamp  * can happen in this case is that we get a "bit already cleared" message from
2355617ba13bSMingming Cao  * ext4_free_inode().  The only reason we would point at a wrong inode is if
2356ac27a0ecSDave Kleikamp  * e2fsck was run on this filesystem, and it must have already done the orphan
2357ac27a0ecSDave Kleikamp  * inode cleanup for us, so we can safely abort without any further action.
2358ac27a0ecSDave Kleikamp  */
2359617ba13bSMingming Cao static void ext4_orphan_cleanup(struct super_block *sb,
2360617ba13bSMingming Cao 				struct ext4_super_block *es)
2361ac27a0ecSDave Kleikamp {
2362ac27a0ecSDave Kleikamp 	unsigned int s_flags = sb->s_flags;
23632c98eb5eSTheodore Ts'o 	int ret, nr_orphans = 0, nr_truncates = 0;
2364ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA
2365ac27a0ecSDave Kleikamp 	int i;
2366ac27a0ecSDave Kleikamp #endif
2367ac27a0ecSDave Kleikamp 	if (!es->s_last_orphan) {
2368ac27a0ecSDave Kleikamp 		jbd_debug(4, "no orphan inodes to clean up\n");
2369ac27a0ecSDave Kleikamp 		return;
2370ac27a0ecSDave Kleikamp 	}
2371ac27a0ecSDave Kleikamp 
2372a8f48a95SEric Sandeen 	if (bdev_read_only(sb->s_bdev)) {
2373b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "write access "
2374b31e1552SEric Sandeen 			"unavailable, skipping orphan cleanup");
2375a8f48a95SEric Sandeen 		return;
2376a8f48a95SEric Sandeen 	}
2377a8f48a95SEric Sandeen 
2378d39195c3SAmir Goldstein 	/* Check if feature set would not allow a r/w mount */
2379d39195c3SAmir Goldstein 	if (!ext4_feature_set_ok(sb, 0)) {
2380d39195c3SAmir Goldstein 		ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
2381d39195c3SAmir Goldstein 			 "unknown ROCOMPAT features");
2382d39195c3SAmir Goldstein 		return;
2383d39195c3SAmir Goldstein 	}
2384d39195c3SAmir Goldstein 
2385617ba13bSMingming Cao 	if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
2386c25f9bc6SEric Sandeen 		/* don't clear list on RO mount w/ errors */
2387c25f9bc6SEric Sandeen 		if (es->s_last_orphan && !(s_flags & MS_RDONLY)) {
238884474976SDmitry Monakhov 			ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
2389ac27a0ecSDave Kleikamp 				  "clearing orphan list.\n");
2390ac27a0ecSDave Kleikamp 			es->s_last_orphan = 0;
2391c25f9bc6SEric Sandeen 		}
2392ac27a0ecSDave Kleikamp 		jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
2393ac27a0ecSDave Kleikamp 		return;
2394ac27a0ecSDave Kleikamp 	}
2395ac27a0ecSDave Kleikamp 
2396ac27a0ecSDave Kleikamp 	if (s_flags & MS_RDONLY) {
2397b31e1552SEric Sandeen 		ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
2398ac27a0ecSDave Kleikamp 		sb->s_flags &= ~MS_RDONLY;
2399ac27a0ecSDave Kleikamp 	}
2400ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA
2401ac27a0ecSDave Kleikamp 	/* Needed for iput() to work correctly and not trash data */
2402ac27a0ecSDave Kleikamp 	sb->s_flags |= MS_ACTIVE;
2403ac27a0ecSDave Kleikamp 	/* Turn on quotas so that they are updated correctly */
2404a2d4a646SJan Kara 	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2405617ba13bSMingming Cao 		if (EXT4_SB(sb)->s_qf_names[i]) {
2406617ba13bSMingming Cao 			int ret = ext4_quota_on_mount(sb, i);
2407ac27a0ecSDave Kleikamp 			if (ret < 0)
2408b31e1552SEric Sandeen 				ext4_msg(sb, KERN_ERR,
2409b31e1552SEric Sandeen 					"Cannot turn on journaled "
2410b31e1552SEric Sandeen 					"quota: error %d", ret);
2411ac27a0ecSDave Kleikamp 		}
2412ac27a0ecSDave Kleikamp 	}
2413ac27a0ecSDave Kleikamp #endif
2414ac27a0ecSDave Kleikamp 
2415ac27a0ecSDave Kleikamp 	while (es->s_last_orphan) {
2416ac27a0ecSDave Kleikamp 		struct inode *inode;
2417ac27a0ecSDave Kleikamp 
2418c65d5c6cSVegard Nossum 		/*
2419c65d5c6cSVegard Nossum 		 * We may have encountered an error during cleanup; if
2420c65d5c6cSVegard Nossum 		 * so, skip the rest.
2421c65d5c6cSVegard Nossum 		 */
2422c65d5c6cSVegard Nossum 		if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
2423c65d5c6cSVegard Nossum 			jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
2424c65d5c6cSVegard Nossum 			es->s_last_orphan = 0;
2425c65d5c6cSVegard Nossum 			break;
2426c65d5c6cSVegard Nossum 		}
2427c65d5c6cSVegard Nossum 
242897bd42b9SJosef Bacik 		inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
242997bd42b9SJosef Bacik 		if (IS_ERR(inode)) {
2430ac27a0ecSDave Kleikamp 			es->s_last_orphan = 0;
2431ac27a0ecSDave Kleikamp 			break;
2432ac27a0ecSDave Kleikamp 		}
2433ac27a0ecSDave Kleikamp 
2434617ba13bSMingming Cao 		list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
2435871a2931SChristoph Hellwig 		dquot_initialize(inode);
2436ac27a0ecSDave Kleikamp 		if (inode->i_nlink) {
2437566370a2SPaul Taysom 			if (test_opt(sb, DEBUG))
2438b31e1552SEric Sandeen 				ext4_msg(sb, KERN_DEBUG,
2439b31e1552SEric Sandeen 					"%s: truncating inode %lu to %lld bytes",
244046e665e9SHarvey Harrison 					__func__, inode->i_ino, inode->i_size);
2441e5f8eab8STheodore Ts'o 			jbd_debug(2, "truncating inode %lu to %lld bytes\n",
2442ac27a0ecSDave Kleikamp 				  inode->i_ino, inode->i_size);
24435955102cSAl Viro 			inode_lock(inode);
244455f252c9SLukas Czerner 			truncate_inode_pages(inode->i_mapping, inode->i_size);
24452c98eb5eSTheodore Ts'o 			ret = ext4_truncate(inode);
24462c98eb5eSTheodore Ts'o 			if (ret)
24472c98eb5eSTheodore Ts'o 				ext4_std_error(inode->i_sb, ret);
24485955102cSAl Viro 			inode_unlock(inode);
2449ac27a0ecSDave Kleikamp 			nr_truncates++;
2450ac27a0ecSDave Kleikamp 		} else {
2451566370a2SPaul Taysom 			if (test_opt(sb, DEBUG))
2452b31e1552SEric Sandeen 				ext4_msg(sb, KERN_DEBUG,
2453b31e1552SEric Sandeen 					"%s: deleting unreferenced inode %lu",
245446e665e9SHarvey Harrison 					__func__, inode->i_ino);
2455ac27a0ecSDave Kleikamp 			jbd_debug(2, "deleting unreferenced inode %lu\n",
2456ac27a0ecSDave Kleikamp 				  inode->i_ino);
2457ac27a0ecSDave Kleikamp 			nr_orphans++;
2458ac27a0ecSDave Kleikamp 		}
2459ac27a0ecSDave Kleikamp 		iput(inode);  /* The delete magic happens here! */
2460ac27a0ecSDave Kleikamp 	}
2461ac27a0ecSDave Kleikamp 
2462ac27a0ecSDave Kleikamp #define PLURAL(x) (x), ((x) == 1) ? "" : "s"
2463ac27a0ecSDave Kleikamp 
2464ac27a0ecSDave Kleikamp 	if (nr_orphans)
2465b31e1552SEric Sandeen 		ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
2466b31e1552SEric Sandeen 		       PLURAL(nr_orphans));
2467ac27a0ecSDave Kleikamp 	if (nr_truncates)
2468b31e1552SEric Sandeen 		ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
2469b31e1552SEric Sandeen 		       PLURAL(nr_truncates));
2470ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA
2471ac27a0ecSDave Kleikamp 	/* Turn quotas off */
2472a2d4a646SJan Kara 	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2473ac27a0ecSDave Kleikamp 		if (sb_dqopt(sb)->files[i])
2474287a8095SChristoph Hellwig 			dquot_quota_off(sb, i);
2475ac27a0ecSDave Kleikamp 	}
2476ac27a0ecSDave Kleikamp #endif
2477ac27a0ecSDave Kleikamp 	sb->s_flags = s_flags; /* Restore MS_RDONLY status */
2478ac27a0ecSDave Kleikamp }
24790b8e58a1SAndreas Dilger 
2480cd2291a4SEric Sandeen /*
2481cd2291a4SEric Sandeen  * Maximal extent format file size.
2482cd2291a4SEric Sandeen  * Resulting logical blkno at s_maxbytes must fit in our on-disk
2483cd2291a4SEric Sandeen  * extent format containers, within a sector_t, and within i_blocks
2484cd2291a4SEric Sandeen  * in the vfs.  ext4 inode has 48 bits of i_block in fsblock units,
2485cd2291a4SEric Sandeen  * so that won't be a limiting factor.
2486cd2291a4SEric Sandeen  *
2487f17722f9SLukas Czerner  * However there is other limiting factor. We do store extents in the form
2488f17722f9SLukas Czerner  * of starting block and length, hence the resulting length of the extent
2489f17722f9SLukas Czerner  * covering maximum file size must fit into on-disk format containers as
2490f17722f9SLukas Czerner  * well. Given that length is always by 1 unit bigger than max unit (because
2491f17722f9SLukas Czerner  * we count 0 as well) we have to lower the s_maxbytes by one fs block.
2492f17722f9SLukas Czerner  *
2493cd2291a4SEric Sandeen  * Note, this does *not* consider any metadata overhead for vfs i_blocks.
2494cd2291a4SEric Sandeen  */
2495f287a1a5STheodore Ts'o static loff_t ext4_max_size(int blkbits, int has_huge_files)
2496cd2291a4SEric Sandeen {
2497cd2291a4SEric Sandeen 	loff_t res;
2498cd2291a4SEric Sandeen 	loff_t upper_limit = MAX_LFS_FILESIZE;
2499cd2291a4SEric Sandeen 
2500cd2291a4SEric Sandeen 	/* small i_blocks in vfs inode? */
2501f287a1a5STheodore Ts'o 	if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
2502cd2291a4SEric Sandeen 		/*
250390c699a9SBartlomiej Zolnierkiewicz 		 * CONFIG_LBDAF is not enabled implies the inode
2504cd2291a4SEric Sandeen 		 * i_block represent total blocks in 512 bytes
2505cd2291a4SEric Sandeen 		 * 32 == size of vfs inode i_blocks * 8
2506cd2291a4SEric Sandeen 		 */
2507cd2291a4SEric Sandeen 		upper_limit = (1LL << 32) - 1;
2508cd2291a4SEric Sandeen 
2509cd2291a4SEric Sandeen 		/* total blocks in file system block size */
2510cd2291a4SEric Sandeen 		upper_limit >>= (blkbits - 9);
2511cd2291a4SEric Sandeen 		upper_limit <<= blkbits;
2512cd2291a4SEric Sandeen 	}
2513cd2291a4SEric Sandeen 
2514f17722f9SLukas Czerner 	/*
2515f17722f9SLukas Czerner 	 * 32-bit extent-start container, ee_block. We lower the maxbytes
2516f17722f9SLukas Czerner 	 * by one fs block, so ee_len can cover the extent of maximum file
2517f17722f9SLukas Czerner 	 * size
2518f17722f9SLukas Czerner 	 */
2519f17722f9SLukas Czerner 	res = (1LL << 32) - 1;
2520cd2291a4SEric Sandeen 	res <<= blkbits;
2521cd2291a4SEric Sandeen 
2522cd2291a4SEric Sandeen 	/* Sanity check against vm- & vfs- imposed limits */
2523cd2291a4SEric Sandeen 	if (res > upper_limit)
2524cd2291a4SEric Sandeen 		res = upper_limit;
2525cd2291a4SEric Sandeen 
2526cd2291a4SEric Sandeen 	return res;
2527cd2291a4SEric Sandeen }
2528ac27a0ecSDave Kleikamp 
2529ac27a0ecSDave Kleikamp /*
2530cd2291a4SEric Sandeen  * Maximal bitmap file size.  There is a direct, and {,double-,triple-}indirect
25310fc1b451SAneesh Kumar K.V  * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
25320fc1b451SAneesh Kumar K.V  * We need to be 1 filesystem block less than the 2^48 sector limit.
2533ac27a0ecSDave Kleikamp  */
2534f287a1a5STheodore Ts'o static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
2535ac27a0ecSDave Kleikamp {
2536617ba13bSMingming Cao 	loff_t res = EXT4_NDIR_BLOCKS;
25370fc1b451SAneesh Kumar K.V 	int meta_blocks;
25380fc1b451SAneesh Kumar K.V 	loff_t upper_limit;
25390b8e58a1SAndreas Dilger 	/* This is calculated to be the largest file size for a dense, block
25400b8e58a1SAndreas Dilger 	 * mapped file such that the file's total number of 512-byte sectors,
25410b8e58a1SAndreas Dilger 	 * including data and all indirect blocks, does not exceed (2^48 - 1).
25420b8e58a1SAndreas Dilger 	 *
25430b8e58a1SAndreas Dilger 	 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
25440b8e58a1SAndreas Dilger 	 * number of 512-byte sectors of the file.
25450fc1b451SAneesh Kumar K.V 	 */
25460fc1b451SAneesh Kumar K.V 
2547f287a1a5STheodore Ts'o 	if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
25480fc1b451SAneesh Kumar K.V 		/*
254990c699a9SBartlomiej Zolnierkiewicz 		 * !has_huge_files or CONFIG_LBDAF not enabled implies that
25500b8e58a1SAndreas Dilger 		 * the inode i_block field represents total file blocks in
25510b8e58a1SAndreas Dilger 		 * 2^32 512-byte sectors == size of vfs inode i_blocks * 8
25520fc1b451SAneesh Kumar K.V 		 */
25530fc1b451SAneesh Kumar K.V 		upper_limit = (1LL << 32) - 1;
25540fc1b451SAneesh Kumar K.V 
25550fc1b451SAneesh Kumar K.V 		/* total blocks in file system block size */
25560fc1b451SAneesh Kumar K.V 		upper_limit >>= (bits - 9);
25570fc1b451SAneesh Kumar K.V 
25580fc1b451SAneesh Kumar K.V 	} else {
25598180a562SAneesh Kumar K.V 		/*
25608180a562SAneesh Kumar K.V 		 * We use 48 bit ext4_inode i_blocks
25618180a562SAneesh Kumar K.V 		 * With EXT4_HUGE_FILE_FL set the i_blocks
25628180a562SAneesh Kumar K.V 		 * represent total number of blocks in
25638180a562SAneesh Kumar K.V 		 * file system block size
25648180a562SAneesh Kumar K.V 		 */
25650fc1b451SAneesh Kumar K.V 		upper_limit = (1LL << 48) - 1;
25660fc1b451SAneesh Kumar K.V 
25670fc1b451SAneesh Kumar K.V 	}
25680fc1b451SAneesh Kumar K.V 
25690fc1b451SAneesh Kumar K.V 	/* indirect blocks */
25700fc1b451SAneesh Kumar K.V 	meta_blocks = 1;
25710fc1b451SAneesh Kumar K.V 	/* double indirect blocks */
25720fc1b451SAneesh Kumar K.V 	meta_blocks += 1 + (1LL << (bits-2));
25730fc1b451SAneesh Kumar K.V 	/* tripple indirect blocks */
25740fc1b451SAneesh Kumar K.V 	meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
25750fc1b451SAneesh Kumar K.V 
25760fc1b451SAneesh Kumar K.V 	upper_limit -= meta_blocks;
25770fc1b451SAneesh Kumar K.V 	upper_limit <<= bits;
2578ac27a0ecSDave Kleikamp 
2579ac27a0ecSDave Kleikamp 	res += 1LL << (bits-2);
2580ac27a0ecSDave Kleikamp 	res += 1LL << (2*(bits-2));
2581ac27a0ecSDave Kleikamp 	res += 1LL << (3*(bits-2));
2582ac27a0ecSDave Kleikamp 	res <<= bits;
2583ac27a0ecSDave Kleikamp 	if (res > upper_limit)
2584ac27a0ecSDave Kleikamp 		res = upper_limit;
25850fc1b451SAneesh Kumar K.V 
25860fc1b451SAneesh Kumar K.V 	if (res > MAX_LFS_FILESIZE)
25870fc1b451SAneesh Kumar K.V 		res = MAX_LFS_FILESIZE;
25880fc1b451SAneesh Kumar K.V 
2589ac27a0ecSDave Kleikamp 	return res;
2590ac27a0ecSDave Kleikamp }
2591ac27a0ecSDave Kleikamp 
2592617ba13bSMingming Cao static ext4_fsblk_t descriptor_loc(struct super_block *sb,
259370bbb3e0SAndrew Morton 				   ext4_fsblk_t logical_sb_block, int nr)
2594ac27a0ecSDave Kleikamp {
2595617ba13bSMingming Cao 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2596fd2d4291SAvantika Mathur 	ext4_group_t bg, first_meta_bg;
2597ac27a0ecSDave Kleikamp 	int has_super = 0;
2598ac27a0ecSDave Kleikamp 
2599ac27a0ecSDave Kleikamp 	first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
2600ac27a0ecSDave Kleikamp 
2601e2b911c5SDarrick J. Wong 	if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg)
260270bbb3e0SAndrew Morton 		return logical_sb_block + nr + 1;
2603ac27a0ecSDave Kleikamp 	bg = sbi->s_desc_per_block * nr;
2604617ba13bSMingming Cao 	if (ext4_bg_has_super(sb, bg))
2605ac27a0ecSDave Kleikamp 		has_super = 1;
26060b8e58a1SAndreas Dilger 
2607bd63f6b0SDarrick J. Wong 	/*
2608bd63f6b0SDarrick J. Wong 	 * If we have a meta_bg fs with 1k blocks, group 0's GDT is at
2609bd63f6b0SDarrick J. Wong 	 * block 2, not 1.  If s_first_data_block == 0 (bigalloc is enabled
2610bd63f6b0SDarrick J. Wong 	 * on modern mke2fs or blksize > 1k on older mke2fs) then we must
2611bd63f6b0SDarrick J. Wong 	 * compensate.
2612bd63f6b0SDarrick J. Wong 	 */
2613bd63f6b0SDarrick J. Wong 	if (sb->s_blocksize == 1024 && nr == 0 &&
2614bd63f6b0SDarrick J. Wong 	    le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) == 0)
2615bd63f6b0SDarrick J. Wong 		has_super++;
2616bd63f6b0SDarrick J. Wong 
2617617ba13bSMingming Cao 	return (has_super + ext4_group_first_block_no(sb, bg));
2618ac27a0ecSDave Kleikamp }
2619ac27a0ecSDave Kleikamp 
2620c9de560dSAlex Tomas /**
2621c9de560dSAlex Tomas  * ext4_get_stripe_size: Get the stripe size.
2622c9de560dSAlex Tomas  * @sbi: In memory super block info
2623c9de560dSAlex Tomas  *
2624c9de560dSAlex Tomas  * If we have specified it via mount option, then
2625c9de560dSAlex Tomas  * use the mount option value. If the value specified at mount time is
2626c9de560dSAlex Tomas  * greater than the blocks per group use the super block value.
2627c9de560dSAlex Tomas  * If the super block value is greater than blocks per group return 0.
2628c9de560dSAlex Tomas  * Allocator needs it be less than blocks per group.
2629c9de560dSAlex Tomas  *
2630c9de560dSAlex Tomas  */
2631c9de560dSAlex Tomas static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
2632c9de560dSAlex Tomas {
2633c9de560dSAlex Tomas 	unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
2634c9de560dSAlex Tomas 	unsigned long stripe_width =
2635c9de560dSAlex Tomas 			le32_to_cpu(sbi->s_es->s_raid_stripe_width);
26363eb08658SDan Ehrenberg 	int ret;
2637c9de560dSAlex Tomas 
2638c9de560dSAlex Tomas 	if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
26393eb08658SDan Ehrenberg 		ret = sbi->s_stripe;
26403eb08658SDan Ehrenberg 	else if (stripe_width <= sbi->s_blocks_per_group)
26413eb08658SDan Ehrenberg 		ret = stripe_width;
26423eb08658SDan Ehrenberg 	else if (stride <= sbi->s_blocks_per_group)
26433eb08658SDan Ehrenberg 		ret = stride;
26443eb08658SDan Ehrenberg 	else
26453eb08658SDan Ehrenberg 		ret = 0;
2646c9de560dSAlex Tomas 
26473eb08658SDan Ehrenberg 	/*
26483eb08658SDan Ehrenberg 	 * If the stripe width is 1, this makes no sense and
26493eb08658SDan Ehrenberg 	 * we set it to 0 to turn off stripe handling code.
26503eb08658SDan Ehrenberg 	 */
26513eb08658SDan Ehrenberg 	if (ret <= 1)
26523eb08658SDan Ehrenberg 		ret = 0;
2653c9de560dSAlex Tomas 
26543eb08658SDan Ehrenberg 	return ret;
2655c9de560dSAlex Tomas }
2656ac27a0ecSDave Kleikamp 
2657a13fb1a4SEric Sandeen /*
2658a13fb1a4SEric Sandeen  * Check whether this filesystem can be mounted based on
2659a13fb1a4SEric Sandeen  * the features present and the RDONLY/RDWR mount requested.
2660a13fb1a4SEric Sandeen  * Returns 1 if this filesystem can be mounted as requested,
2661a13fb1a4SEric Sandeen  * 0 if it cannot be.
2662a13fb1a4SEric Sandeen  */
2663a13fb1a4SEric Sandeen static int ext4_feature_set_ok(struct super_block *sb, int readonly)
2664a13fb1a4SEric Sandeen {
2665e2b911c5SDarrick J. Wong 	if (ext4_has_unknown_ext4_incompat_features(sb)) {
2666a13fb1a4SEric Sandeen 		ext4_msg(sb, KERN_ERR,
2667a13fb1a4SEric Sandeen 			"Couldn't mount because of "
2668a13fb1a4SEric Sandeen 			"unsupported optional features (%x)",
2669a13fb1a4SEric Sandeen 			(le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
2670a13fb1a4SEric Sandeen 			~EXT4_FEATURE_INCOMPAT_SUPP));
2671a13fb1a4SEric Sandeen 		return 0;
2672a13fb1a4SEric Sandeen 	}
2673a13fb1a4SEric Sandeen 
2674a13fb1a4SEric Sandeen 	if (readonly)
2675a13fb1a4SEric Sandeen 		return 1;
2676a13fb1a4SEric Sandeen 
2677e2b911c5SDarrick J. Wong 	if (ext4_has_feature_readonly(sb)) {
26782cb5cc8bSDarrick J. Wong 		ext4_msg(sb, KERN_INFO, "filesystem is read-only");
26792cb5cc8bSDarrick J. Wong 		sb->s_flags |= MS_RDONLY;
26802cb5cc8bSDarrick J. Wong 		return 1;
26812cb5cc8bSDarrick J. Wong 	}
26822cb5cc8bSDarrick J. Wong 
2683a13fb1a4SEric Sandeen 	/* Check that feature set is OK for a read-write mount */
2684e2b911c5SDarrick J. Wong 	if (ext4_has_unknown_ext4_ro_compat_features(sb)) {
2685a13fb1a4SEric Sandeen 		ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
2686a13fb1a4SEric Sandeen 			 "unsupported optional features (%x)",
2687a13fb1a4SEric Sandeen 			 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
2688a13fb1a4SEric Sandeen 				~EXT4_FEATURE_RO_COMPAT_SUPP));
2689a13fb1a4SEric Sandeen 		return 0;
2690a13fb1a4SEric Sandeen 	}
2691a13fb1a4SEric Sandeen 	/*
2692a13fb1a4SEric Sandeen 	 * Large file size enabled file system can only be mounted
2693a13fb1a4SEric Sandeen 	 * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF
2694a13fb1a4SEric Sandeen 	 */
2695e2b911c5SDarrick J. Wong 	if (ext4_has_feature_huge_file(sb)) {
2696a13fb1a4SEric Sandeen 		if (sizeof(blkcnt_t) < sizeof(u64)) {
2697a13fb1a4SEric Sandeen 			ext4_msg(sb, KERN_ERR, "Filesystem with huge files "
2698a13fb1a4SEric Sandeen 				 "cannot be mounted RDWR without "
2699a13fb1a4SEric Sandeen 				 "CONFIG_LBDAF");
2700a13fb1a4SEric Sandeen 			return 0;
2701a13fb1a4SEric Sandeen 		}
2702a13fb1a4SEric Sandeen 	}
2703e2b911c5SDarrick J. Wong 	if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
2704bab08ab9STheodore Ts'o 		ext4_msg(sb, KERN_ERR,
2705bab08ab9STheodore Ts'o 			 "Can't support bigalloc feature without "
2706bab08ab9STheodore Ts'o 			 "extents feature\n");
2707bab08ab9STheodore Ts'o 		return 0;
2708bab08ab9STheodore Ts'o 	}
27097c319d32SAditya Kali 
27107c319d32SAditya Kali #ifndef CONFIG_QUOTA
2711e2b911c5SDarrick J. Wong 	if (ext4_has_feature_quota(sb) && !readonly) {
27127c319d32SAditya Kali 		ext4_msg(sb, KERN_ERR,
27137c319d32SAditya Kali 			 "Filesystem with quota feature cannot be mounted RDWR "
27147c319d32SAditya Kali 			 "without CONFIG_QUOTA");
27157c319d32SAditya Kali 		return 0;
27167c319d32SAditya Kali 	}
2717689c958cSLi Xi 	if (ext4_has_feature_project(sb) && !readonly) {
2718689c958cSLi Xi 		ext4_msg(sb, KERN_ERR,
2719689c958cSLi Xi 			 "Filesystem with project quota feature cannot be mounted RDWR "
2720689c958cSLi Xi 			 "without CONFIG_QUOTA");
2721689c958cSLi Xi 		return 0;
2722689c958cSLi Xi 	}
27237c319d32SAditya Kali #endif  /* CONFIG_QUOTA */
2724a13fb1a4SEric Sandeen 	return 1;
2725a13fb1a4SEric Sandeen }
2726a13fb1a4SEric Sandeen 
272766e61a9eSTheodore Ts'o /*
272866e61a9eSTheodore Ts'o  * This function is called once a day if we have errors logged
272966e61a9eSTheodore Ts'o  * on the file system
273066e61a9eSTheodore Ts'o  */
273166e61a9eSTheodore Ts'o static void print_daily_error_info(unsigned long arg)
273266e61a9eSTheodore Ts'o {
273366e61a9eSTheodore Ts'o 	struct super_block *sb = (struct super_block *) arg;
273466e61a9eSTheodore Ts'o 	struct ext4_sb_info *sbi;
273566e61a9eSTheodore Ts'o 	struct ext4_super_block *es;
273666e61a9eSTheodore Ts'o 
273766e61a9eSTheodore Ts'o 	sbi = EXT4_SB(sb);
273866e61a9eSTheodore Ts'o 	es = sbi->s_es;
273966e61a9eSTheodore Ts'o 
274066e61a9eSTheodore Ts'o 	if (es->s_error_count)
2741ae0f78deSTheodore Ts'o 		/* fsck newer than v1.41.13 is needed to clean this condition. */
2742ae0f78deSTheodore Ts'o 		ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
274366e61a9eSTheodore Ts'o 			 le32_to_cpu(es->s_error_count));
274466e61a9eSTheodore Ts'o 	if (es->s_first_error_time) {
2745ae0f78deSTheodore Ts'o 		printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
274666e61a9eSTheodore Ts'o 		       sb->s_id, le32_to_cpu(es->s_first_error_time),
274766e61a9eSTheodore Ts'o 		       (int) sizeof(es->s_first_error_func),
274866e61a9eSTheodore Ts'o 		       es->s_first_error_func,
274966e61a9eSTheodore Ts'o 		       le32_to_cpu(es->s_first_error_line));
275066e61a9eSTheodore Ts'o 		if (es->s_first_error_ino)
2751651e1c3bSJoe Perches 			printk(KERN_CONT ": inode %u",
275266e61a9eSTheodore Ts'o 			       le32_to_cpu(es->s_first_error_ino));
275366e61a9eSTheodore Ts'o 		if (es->s_first_error_block)
2754651e1c3bSJoe Perches 			printk(KERN_CONT ": block %llu", (unsigned long long)
275566e61a9eSTheodore Ts'o 			       le64_to_cpu(es->s_first_error_block));
2756651e1c3bSJoe Perches 		printk(KERN_CONT "\n");
275766e61a9eSTheodore Ts'o 	}
275866e61a9eSTheodore Ts'o 	if (es->s_last_error_time) {
2759ae0f78deSTheodore Ts'o 		printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
276066e61a9eSTheodore Ts'o 		       sb->s_id, le32_to_cpu(es->s_last_error_time),
276166e61a9eSTheodore Ts'o 		       (int) sizeof(es->s_last_error_func),
276266e61a9eSTheodore Ts'o 		       es->s_last_error_func,
276366e61a9eSTheodore Ts'o 		       le32_to_cpu(es->s_last_error_line));
276466e61a9eSTheodore Ts'o 		if (es->s_last_error_ino)
2765651e1c3bSJoe Perches 			printk(KERN_CONT ": inode %u",
276666e61a9eSTheodore Ts'o 			       le32_to_cpu(es->s_last_error_ino));
276766e61a9eSTheodore Ts'o 		if (es->s_last_error_block)
2768651e1c3bSJoe Perches 			printk(KERN_CONT ": block %llu", (unsigned long long)
276966e61a9eSTheodore Ts'o 			       le64_to_cpu(es->s_last_error_block));
2770651e1c3bSJoe Perches 		printk(KERN_CONT "\n");
277166e61a9eSTheodore Ts'o 	}
277266e61a9eSTheodore Ts'o 	mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);  /* Once a day */
277366e61a9eSTheodore Ts'o }
277466e61a9eSTheodore Ts'o 
2775bfff6873SLukas Czerner /* Find next suitable group and run ext4_init_inode_table */
2776bfff6873SLukas Czerner static int ext4_run_li_request(struct ext4_li_request *elr)
2777bfff6873SLukas Czerner {
2778bfff6873SLukas Czerner 	struct ext4_group_desc *gdp = NULL;
2779bfff6873SLukas Czerner 	ext4_group_t group, ngroups;
2780bfff6873SLukas Czerner 	struct super_block *sb;
2781bfff6873SLukas Czerner 	unsigned long timeout = 0;
2782bfff6873SLukas Czerner 	int ret = 0;
2783bfff6873SLukas Czerner 
2784bfff6873SLukas Czerner 	sb = elr->lr_super;
2785bfff6873SLukas Czerner 	ngroups = EXT4_SB(sb)->s_groups_count;
2786bfff6873SLukas Czerner 
2787bfff6873SLukas Czerner 	for (group = elr->lr_next_group; group < ngroups; group++) {
2788bfff6873SLukas Czerner 		gdp = ext4_get_group_desc(sb, group, NULL);
2789bfff6873SLukas Czerner 		if (!gdp) {
2790bfff6873SLukas Czerner 			ret = 1;
2791bfff6873SLukas Czerner 			break;
2792bfff6873SLukas Czerner 		}
2793bfff6873SLukas Czerner 
2794bfff6873SLukas Czerner 		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
2795bfff6873SLukas Czerner 			break;
2796bfff6873SLukas Czerner 	}
2797bfff6873SLukas Czerner 
27987f511862STheodore Ts'o 	if (group >= ngroups)
2799bfff6873SLukas Czerner 		ret = 1;
2800bfff6873SLukas Czerner 
2801bfff6873SLukas Czerner 	if (!ret) {
2802bfff6873SLukas Czerner 		timeout = jiffies;
2803bfff6873SLukas Czerner 		ret = ext4_init_inode_table(sb, group,
2804bfff6873SLukas Czerner 					    elr->lr_timeout ? 0 : 1);
2805bfff6873SLukas Czerner 		if (elr->lr_timeout == 0) {
280651ce6511SLukas Czerner 			timeout = (jiffies - timeout) *
280751ce6511SLukas Czerner 				  elr->lr_sbi->s_li_wait_mult;
2808bfff6873SLukas Czerner 			elr->lr_timeout = timeout;
2809bfff6873SLukas Czerner 		}
2810bfff6873SLukas Czerner 		elr->lr_next_sched = jiffies + elr->lr_timeout;
2811bfff6873SLukas Czerner 		elr->lr_next_group = group + 1;
2812bfff6873SLukas Czerner 	}
2813bfff6873SLukas Czerner 	return ret;
2814bfff6873SLukas Czerner }
2815bfff6873SLukas Czerner 
2816bfff6873SLukas Czerner /*
2817bfff6873SLukas Czerner  * Remove lr_request from the list_request and free the
28184ed5c033SLukas Czerner  * request structure. Should be called with li_list_mtx held
2819bfff6873SLukas Czerner  */
2820bfff6873SLukas Czerner static void ext4_remove_li_request(struct ext4_li_request *elr)
2821bfff6873SLukas Czerner {
2822bfff6873SLukas Czerner 	struct ext4_sb_info *sbi;
2823bfff6873SLukas Czerner 
2824bfff6873SLukas Czerner 	if (!elr)
2825bfff6873SLukas Czerner 		return;
2826bfff6873SLukas Czerner 
2827bfff6873SLukas Czerner 	sbi = elr->lr_sbi;
2828bfff6873SLukas Czerner 
2829bfff6873SLukas Czerner 	list_del(&elr->lr_request);
2830bfff6873SLukas Czerner 	sbi->s_li_request = NULL;
2831bfff6873SLukas Czerner 	kfree(elr);
2832bfff6873SLukas Czerner }
2833bfff6873SLukas Czerner 
2834bfff6873SLukas Czerner static void ext4_unregister_li_request(struct super_block *sb)
2835bfff6873SLukas Czerner {
28361bb933fbSLukas Czerner 	mutex_lock(&ext4_li_mtx);
28371bb933fbSLukas Czerner 	if (!ext4_li_info) {
28381bb933fbSLukas Czerner 		mutex_unlock(&ext4_li_mtx);
2839bfff6873SLukas Czerner 		return;
28401bb933fbSLukas Czerner 	}
2841bfff6873SLukas Czerner 
2842bfff6873SLukas Czerner 	mutex_lock(&ext4_li_info->li_list_mtx);
28431bb933fbSLukas Czerner 	ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
2844bfff6873SLukas Czerner 	mutex_unlock(&ext4_li_info->li_list_mtx);
28451bb933fbSLukas Czerner 	mutex_unlock(&ext4_li_mtx);
2846bfff6873SLukas Czerner }
2847bfff6873SLukas Czerner 
28488f1f7453SEric Sandeen static struct task_struct *ext4_lazyinit_task;
28498f1f7453SEric Sandeen 
2850bfff6873SLukas Czerner /*
2851bfff6873SLukas Czerner  * This is the function where ext4lazyinit thread lives. It walks
2852bfff6873SLukas Czerner  * through the request list searching for next scheduled filesystem.
2853bfff6873SLukas Czerner  * When such a fs is found, run the lazy initialization request
2854bfff6873SLukas Czerner  * (ext4_rn_li_request) and keep track of the time spend in this
2855bfff6873SLukas Czerner  * function. Based on that time we compute next schedule time of
2856bfff6873SLukas Czerner  * the request. When walking through the list is complete, compute
2857bfff6873SLukas Czerner  * next waking time and put itself into sleep.
2858bfff6873SLukas Czerner  */
2859bfff6873SLukas Czerner static int ext4_lazyinit_thread(void *arg)
2860bfff6873SLukas Czerner {
2861bfff6873SLukas Czerner 	struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
2862bfff6873SLukas Czerner 	struct list_head *pos, *n;
2863bfff6873SLukas Czerner 	struct ext4_li_request *elr;
28644ed5c033SLukas Czerner 	unsigned long next_wakeup, cur;
2865bfff6873SLukas Czerner 
2866bfff6873SLukas Czerner 	BUG_ON(NULL == eli);
2867bfff6873SLukas Czerner 
2868bfff6873SLukas Czerner cont_thread:
2869bfff6873SLukas Czerner 	while (true) {
2870bfff6873SLukas Czerner 		next_wakeup = MAX_JIFFY_OFFSET;
2871bfff6873SLukas Czerner 
2872bfff6873SLukas Czerner 		mutex_lock(&eli->li_list_mtx);
2873bfff6873SLukas Czerner 		if (list_empty(&eli->li_request_list)) {
2874bfff6873SLukas Czerner 			mutex_unlock(&eli->li_list_mtx);
2875bfff6873SLukas Czerner 			goto exit_thread;
2876bfff6873SLukas Czerner 		}
2877bfff6873SLukas Czerner 		list_for_each_safe(pos, n, &eli->li_request_list) {
2878e22834f0SDmitry Monakhov 			int err = 0;
2879e22834f0SDmitry Monakhov 			int progress = 0;
2880bfff6873SLukas Czerner 			elr = list_entry(pos, struct ext4_li_request,
2881bfff6873SLukas Czerner 					 lr_request);
2882bfff6873SLukas Czerner 
2883e22834f0SDmitry Monakhov 			if (time_before(jiffies, elr->lr_next_sched)) {
2884e22834f0SDmitry Monakhov 				if (time_before(elr->lr_next_sched, next_wakeup))
2885e22834f0SDmitry Monakhov 					next_wakeup = elr->lr_next_sched;
2886e22834f0SDmitry Monakhov 				continue;
2887e22834f0SDmitry Monakhov 			}
2888e22834f0SDmitry Monakhov 			if (down_read_trylock(&elr->lr_super->s_umount)) {
2889e22834f0SDmitry Monakhov 				if (sb_start_write_trylock(elr->lr_super)) {
2890e22834f0SDmitry Monakhov 					progress = 1;
2891e22834f0SDmitry Monakhov 					/*
2892e22834f0SDmitry Monakhov 					 * We hold sb->s_umount, sb can not
2893e22834f0SDmitry Monakhov 					 * be removed from the list, it is
2894e22834f0SDmitry Monakhov 					 * now safe to drop li_list_mtx
2895e22834f0SDmitry Monakhov 					 */
2896e22834f0SDmitry Monakhov 					mutex_unlock(&eli->li_list_mtx);
2897e22834f0SDmitry Monakhov 					err = ext4_run_li_request(elr);
2898e22834f0SDmitry Monakhov 					sb_end_write(elr->lr_super);
2899e22834f0SDmitry Monakhov 					mutex_lock(&eli->li_list_mtx);
2900e22834f0SDmitry Monakhov 					n = pos->next;
2901e22834f0SDmitry Monakhov 				}
2902e22834f0SDmitry Monakhov 				up_read((&elr->lr_super->s_umount));
2903e22834f0SDmitry Monakhov 			}
2904b2c78cd0STheodore Ts'o 			/* error, remove the lazy_init job */
2905e22834f0SDmitry Monakhov 			if (err) {
2906bfff6873SLukas Czerner 				ext4_remove_li_request(elr);
2907bfff6873SLukas Czerner 				continue;
2908bfff6873SLukas Czerner 			}
2909e22834f0SDmitry Monakhov 			if (!progress) {
2910e22834f0SDmitry Monakhov 				elr->lr_next_sched = jiffies +
2911e22834f0SDmitry Monakhov 					(prandom_u32()
2912e22834f0SDmitry Monakhov 					 % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
2913b2c78cd0STheodore Ts'o 			}
2914bfff6873SLukas Czerner 			if (time_before(elr->lr_next_sched, next_wakeup))
2915bfff6873SLukas Czerner 				next_wakeup = elr->lr_next_sched;
2916bfff6873SLukas Czerner 		}
2917bfff6873SLukas Czerner 		mutex_unlock(&eli->li_list_mtx);
2918bfff6873SLukas Czerner 
2919a0acae0eSTejun Heo 		try_to_freeze();
2920bfff6873SLukas Czerner 
29214ed5c033SLukas Czerner 		cur = jiffies;
29224ed5c033SLukas Czerner 		if ((time_after_eq(cur, next_wakeup)) ||
2923f4245bd4SLukas Czerner 		    (MAX_JIFFY_OFFSET == next_wakeup)) {
2924bfff6873SLukas Czerner 			cond_resched();
2925bfff6873SLukas Czerner 			continue;
2926bfff6873SLukas Czerner 		}
2927bfff6873SLukas Czerner 
29284ed5c033SLukas Czerner 		schedule_timeout_interruptible(next_wakeup - cur);
29294ed5c033SLukas Czerner 
29308f1f7453SEric Sandeen 		if (kthread_should_stop()) {
29318f1f7453SEric Sandeen 			ext4_clear_request_list();
29328f1f7453SEric Sandeen 			goto exit_thread;
29338f1f7453SEric Sandeen 		}
2934bfff6873SLukas Czerner 	}
2935bfff6873SLukas Czerner 
2936bfff6873SLukas Czerner exit_thread:
2937bfff6873SLukas Czerner 	/*
2938bfff6873SLukas Czerner 	 * It looks like the request list is empty, but we need
2939bfff6873SLukas Czerner 	 * to check it under the li_list_mtx lock, to prevent any
2940bfff6873SLukas Czerner 	 * additions into it, and of course we should lock ext4_li_mtx
2941bfff6873SLukas Czerner 	 * to atomically free the list and ext4_li_info, because at
2942bfff6873SLukas Czerner 	 * this point another ext4 filesystem could be registering
2943bfff6873SLukas Czerner 	 * new one.
2944bfff6873SLukas Czerner 	 */
2945bfff6873SLukas Czerner 	mutex_lock(&ext4_li_mtx);
2946bfff6873SLukas Czerner 	mutex_lock(&eli->li_list_mtx);
2947bfff6873SLukas Czerner 	if (!list_empty(&eli->li_request_list)) {
2948bfff6873SLukas Czerner 		mutex_unlock(&eli->li_list_mtx);
2949bfff6873SLukas Czerner 		mutex_unlock(&ext4_li_mtx);
2950bfff6873SLukas Czerner 		goto cont_thread;
2951bfff6873SLukas Czerner 	}
2952bfff6873SLukas Czerner 	mutex_unlock(&eli->li_list_mtx);
2953bfff6873SLukas Czerner 	kfree(ext4_li_info);
2954bfff6873SLukas Czerner 	ext4_li_info = NULL;
2955bfff6873SLukas Czerner 	mutex_unlock(&ext4_li_mtx);
2956bfff6873SLukas Czerner 
2957bfff6873SLukas Czerner 	return 0;
2958bfff6873SLukas Czerner }
2959bfff6873SLukas Czerner 
2960bfff6873SLukas Czerner static void ext4_clear_request_list(void)
2961bfff6873SLukas Czerner {
2962bfff6873SLukas Czerner 	struct list_head *pos, *n;
2963bfff6873SLukas Czerner 	struct ext4_li_request *elr;
2964bfff6873SLukas Czerner 
2965bfff6873SLukas Czerner 	mutex_lock(&ext4_li_info->li_list_mtx);
2966bfff6873SLukas Czerner 	list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
2967bfff6873SLukas Czerner 		elr = list_entry(pos, struct ext4_li_request,
2968bfff6873SLukas Czerner 				 lr_request);
2969bfff6873SLukas Czerner 		ext4_remove_li_request(elr);
2970bfff6873SLukas Czerner 	}
2971bfff6873SLukas Czerner 	mutex_unlock(&ext4_li_info->li_list_mtx);
2972bfff6873SLukas Czerner }
2973bfff6873SLukas Czerner 
2974bfff6873SLukas Czerner static int ext4_run_lazyinit_thread(void)
2975bfff6873SLukas Czerner {
29768f1f7453SEric Sandeen 	ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
29778f1f7453SEric Sandeen 					 ext4_li_info, "ext4lazyinit");
29788f1f7453SEric Sandeen 	if (IS_ERR(ext4_lazyinit_task)) {
29798f1f7453SEric Sandeen 		int err = PTR_ERR(ext4_lazyinit_task);
2980bfff6873SLukas Czerner 		ext4_clear_request_list();
2981bfff6873SLukas Czerner 		kfree(ext4_li_info);
2982bfff6873SLukas Czerner 		ext4_li_info = NULL;
298392b97816STheodore Ts'o 		printk(KERN_CRIT "EXT4-fs: error %d creating inode table "
2984bfff6873SLukas Czerner 				 "initialization thread\n",
2985bfff6873SLukas Czerner 				 err);
2986bfff6873SLukas Czerner 		return err;
2987bfff6873SLukas Czerner 	}
2988bfff6873SLukas Czerner 	ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
2989bfff6873SLukas Czerner 	return 0;
2990bfff6873SLukas Czerner }
2991bfff6873SLukas Czerner 
2992bfff6873SLukas Czerner /*
2993bfff6873SLukas Czerner  * Check whether it make sense to run itable init. thread or not.
2994bfff6873SLukas Czerner  * If there is at least one uninitialized inode table, return
2995bfff6873SLukas Czerner  * corresponding group number, else the loop goes through all
2996bfff6873SLukas Czerner  * groups and return total number of groups.
2997bfff6873SLukas Czerner  */
2998bfff6873SLukas Czerner static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
2999bfff6873SLukas Czerner {
3000bfff6873SLukas Czerner 	ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
3001bfff6873SLukas Czerner 	struct ext4_group_desc *gdp = NULL;
3002bfff6873SLukas Czerner 
3003bfff6873SLukas Czerner 	for (group = 0; group < ngroups; group++) {
3004bfff6873SLukas Czerner 		gdp = ext4_get_group_desc(sb, group, NULL);
3005bfff6873SLukas Czerner 		if (!gdp)
3006bfff6873SLukas Czerner 			continue;
3007bfff6873SLukas Czerner 
3008bfff6873SLukas Czerner 		if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3009bfff6873SLukas Czerner 			break;
3010bfff6873SLukas Czerner 	}
3011bfff6873SLukas Czerner 
3012bfff6873SLukas Czerner 	return group;
3013bfff6873SLukas Czerner }
3014bfff6873SLukas Czerner 
3015bfff6873SLukas Czerner static int ext4_li_info_new(void)
3016bfff6873SLukas Czerner {
3017bfff6873SLukas Czerner 	struct ext4_lazy_init *eli = NULL;
3018bfff6873SLukas Czerner 
3019bfff6873SLukas Czerner 	eli = kzalloc(sizeof(*eli), GFP_KERNEL);
3020bfff6873SLukas Czerner 	if (!eli)
3021bfff6873SLukas Czerner 		return -ENOMEM;
3022bfff6873SLukas Czerner 
3023bfff6873SLukas Czerner 	INIT_LIST_HEAD(&eli->li_request_list);
3024bfff6873SLukas Czerner 	mutex_init(&eli->li_list_mtx);
3025bfff6873SLukas Czerner 
3026bfff6873SLukas Czerner 	eli->li_state |= EXT4_LAZYINIT_QUIT;
3027bfff6873SLukas Czerner 
3028bfff6873SLukas Czerner 	ext4_li_info = eli;
3029bfff6873SLukas Czerner 
3030bfff6873SLukas Czerner 	return 0;
3031bfff6873SLukas Czerner }
3032bfff6873SLukas Czerner 
3033bfff6873SLukas Czerner static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
3034bfff6873SLukas Czerner 					    ext4_group_t start)
3035bfff6873SLukas Czerner {
3036bfff6873SLukas Czerner 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3037bfff6873SLukas Czerner 	struct ext4_li_request *elr;
3038bfff6873SLukas Czerner 
3039bfff6873SLukas Czerner 	elr = kzalloc(sizeof(*elr), GFP_KERNEL);
3040bfff6873SLukas Czerner 	if (!elr)
3041bfff6873SLukas Czerner 		return NULL;
3042bfff6873SLukas Czerner 
3043bfff6873SLukas Czerner 	elr->lr_super = sb;
3044bfff6873SLukas Czerner 	elr->lr_sbi = sbi;
3045bfff6873SLukas Czerner 	elr->lr_next_group = start;
3046bfff6873SLukas Czerner 
3047bfff6873SLukas Czerner 	/*
3048bfff6873SLukas Czerner 	 * Randomize first schedule time of the request to
3049bfff6873SLukas Czerner 	 * spread the inode table initialization requests
3050bfff6873SLukas Czerner 	 * better.
3051bfff6873SLukas Czerner 	 */
3052dd1f723bSTheodore Ts'o 	elr->lr_next_sched = jiffies + (prandom_u32() %
3053dd1f723bSTheodore Ts'o 				(EXT4_DEF_LI_MAX_START_DELAY * HZ));
3054bfff6873SLukas Czerner 	return elr;
3055bfff6873SLukas Czerner }
3056bfff6873SLukas Czerner 
30577f511862STheodore Ts'o int ext4_register_li_request(struct super_block *sb,
3058bfff6873SLukas Czerner 			     ext4_group_t first_not_zeroed)
3059bfff6873SLukas Czerner {
3060bfff6873SLukas Czerner 	struct ext4_sb_info *sbi = EXT4_SB(sb);
30617f511862STheodore Ts'o 	struct ext4_li_request *elr = NULL;
3062bfff6873SLukas Czerner 	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
30636c5a6cb9SAndrew Morton 	int ret = 0;
3064bfff6873SLukas Czerner 
30657f511862STheodore Ts'o 	mutex_lock(&ext4_li_mtx);
306651ce6511SLukas Czerner 	if (sbi->s_li_request != NULL) {
306751ce6511SLukas Czerner 		/*
306851ce6511SLukas Czerner 		 * Reset timeout so it can be computed again, because
306951ce6511SLukas Czerner 		 * s_li_wait_mult might have changed.
307051ce6511SLukas Czerner 		 */
307151ce6511SLukas Czerner 		sbi->s_li_request->lr_timeout = 0;
30727f511862STheodore Ts'o 		goto out;
307351ce6511SLukas Czerner 	}
3074bfff6873SLukas Czerner 
3075bfff6873SLukas Czerner 	if (first_not_zeroed == ngroups ||
3076bfff6873SLukas Czerner 	    (sb->s_flags & MS_RDONLY) ||
307755ff3840STao Ma 	    !test_opt(sb, INIT_INODE_TABLE))
30787f511862STheodore Ts'o 		goto out;
3079bfff6873SLukas Czerner 
3080bfff6873SLukas Czerner 	elr = ext4_li_request_new(sb, first_not_zeroed);
30817f511862STheodore Ts'o 	if (!elr) {
30827f511862STheodore Ts'o 		ret = -ENOMEM;
30837f511862STheodore Ts'o 		goto out;
30847f511862STheodore Ts'o 	}
3085bfff6873SLukas Czerner 
3086bfff6873SLukas Czerner 	if (NULL == ext4_li_info) {
3087bfff6873SLukas Czerner 		ret = ext4_li_info_new();
3088bfff6873SLukas Czerner 		if (ret)
3089bfff6873SLukas Czerner 			goto out;
3090bfff6873SLukas Czerner 	}
3091bfff6873SLukas Czerner 
3092bfff6873SLukas Czerner 	mutex_lock(&ext4_li_info->li_list_mtx);
3093bfff6873SLukas Czerner 	list_add(&elr->lr_request, &ext4_li_info->li_request_list);
3094bfff6873SLukas Czerner 	mutex_unlock(&ext4_li_info->li_list_mtx);
3095bfff6873SLukas Czerner 
3096bfff6873SLukas Czerner 	sbi->s_li_request = elr;
309746e4690bSTao Ma 	/*
309846e4690bSTao Ma 	 * set elr to NULL here since it has been inserted to
309946e4690bSTao Ma 	 * the request_list and the removal and free of it is
310046e4690bSTao Ma 	 * handled by ext4_clear_request_list from now on.
310146e4690bSTao Ma 	 */
310246e4690bSTao Ma 	elr = NULL;
3103bfff6873SLukas Czerner 
3104bfff6873SLukas Czerner 	if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
3105bfff6873SLukas Czerner 		ret = ext4_run_lazyinit_thread();
3106bfff6873SLukas Czerner 		if (ret)
3107bfff6873SLukas Czerner 			goto out;
3108bfff6873SLukas Czerner 	}
3109bfff6873SLukas Czerner out:
3110bfff6873SLukas Czerner 	mutex_unlock(&ext4_li_mtx);
3111beed5ecbSNicolas Kaiser 	if (ret)
3112bfff6873SLukas Czerner 		kfree(elr);
3113bfff6873SLukas Czerner 	return ret;
3114bfff6873SLukas Czerner }
3115bfff6873SLukas Czerner 
3116bfff6873SLukas Czerner /*
3117bfff6873SLukas Czerner  * We do not need to lock anything since this is called on
3118bfff6873SLukas Czerner  * module unload.
3119bfff6873SLukas Czerner  */
3120bfff6873SLukas Czerner static void ext4_destroy_lazyinit_thread(void)
3121bfff6873SLukas Czerner {
3122bfff6873SLukas Czerner 	/*
3123bfff6873SLukas Czerner 	 * If thread exited earlier
3124bfff6873SLukas Czerner 	 * there's nothing to be done.
3125bfff6873SLukas Czerner 	 */
31268f1f7453SEric Sandeen 	if (!ext4_li_info || !ext4_lazyinit_task)
3127bfff6873SLukas Czerner 		return;
3128bfff6873SLukas Czerner 
31298f1f7453SEric Sandeen 	kthread_stop(ext4_lazyinit_task);
3130bfff6873SLukas Czerner }
3131bfff6873SLukas Czerner 
313225ed6e8aSDarrick J. Wong static int set_journal_csum_feature_set(struct super_block *sb)
313325ed6e8aSDarrick J. Wong {
313425ed6e8aSDarrick J. Wong 	int ret = 1;
313525ed6e8aSDarrick J. Wong 	int compat, incompat;
313625ed6e8aSDarrick J. Wong 	struct ext4_sb_info *sbi = EXT4_SB(sb);
313725ed6e8aSDarrick J. Wong 
31389aa5d32bSDmitry Monakhov 	if (ext4_has_metadata_csum(sb)) {
3139db9ee220SDarrick J. Wong 		/* journal checksum v3 */
314025ed6e8aSDarrick J. Wong 		compat = 0;
3141db9ee220SDarrick J. Wong 		incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
314225ed6e8aSDarrick J. Wong 	} else {
314325ed6e8aSDarrick J. Wong 		/* journal checksum v1 */
314425ed6e8aSDarrick J. Wong 		compat = JBD2_FEATURE_COMPAT_CHECKSUM;
314525ed6e8aSDarrick J. Wong 		incompat = 0;
314625ed6e8aSDarrick J. Wong 	}
314725ed6e8aSDarrick J. Wong 
3148feb8c6d3SDarrick J. Wong 	jbd2_journal_clear_features(sbi->s_journal,
3149feb8c6d3SDarrick J. Wong 			JBD2_FEATURE_COMPAT_CHECKSUM, 0,
3150feb8c6d3SDarrick J. Wong 			JBD2_FEATURE_INCOMPAT_CSUM_V3 |
3151feb8c6d3SDarrick J. Wong 			JBD2_FEATURE_INCOMPAT_CSUM_V2);
315225ed6e8aSDarrick J. Wong 	if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
315325ed6e8aSDarrick J. Wong 		ret = jbd2_journal_set_features(sbi->s_journal,
315425ed6e8aSDarrick J. Wong 				compat, 0,
315525ed6e8aSDarrick J. Wong 				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
315625ed6e8aSDarrick J. Wong 				incompat);
315725ed6e8aSDarrick J. Wong 	} else if (test_opt(sb, JOURNAL_CHECKSUM)) {
315825ed6e8aSDarrick J. Wong 		ret = jbd2_journal_set_features(sbi->s_journal,
315925ed6e8aSDarrick J. Wong 				compat, 0,
316025ed6e8aSDarrick J. Wong 				incompat);
316125ed6e8aSDarrick J. Wong 		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
316225ed6e8aSDarrick J. Wong 				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
316325ed6e8aSDarrick J. Wong 	} else {
3164feb8c6d3SDarrick J. Wong 		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
3165feb8c6d3SDarrick J. Wong 				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
316625ed6e8aSDarrick J. Wong 	}
316725ed6e8aSDarrick J. Wong 
316825ed6e8aSDarrick J. Wong 	return ret;
316925ed6e8aSDarrick J. Wong }
317025ed6e8aSDarrick J. Wong 
3171952fc18eSTheodore Ts'o /*
3172952fc18eSTheodore Ts'o  * Note: calculating the overhead so we can be compatible with
3173952fc18eSTheodore Ts'o  * historical BSD practice is quite difficult in the face of
3174952fc18eSTheodore Ts'o  * clusters/bigalloc.  This is because multiple metadata blocks from
3175952fc18eSTheodore Ts'o  * different block group can end up in the same allocation cluster.
3176952fc18eSTheodore Ts'o  * Calculating the exact overhead in the face of clustered allocation
3177952fc18eSTheodore Ts'o  * requires either O(all block bitmaps) in memory or O(number of block
3178952fc18eSTheodore Ts'o  * groups**2) in time.  We will still calculate the superblock for
3179952fc18eSTheodore Ts'o  * older file systems --- and if we come across with a bigalloc file
3180952fc18eSTheodore Ts'o  * system with zero in s_overhead_clusters the estimate will be close to
3181952fc18eSTheodore Ts'o  * correct especially for very large cluster sizes --- but for newer
3182952fc18eSTheodore Ts'o  * file systems, it's better to calculate this figure once at mkfs
3183952fc18eSTheodore Ts'o  * time, and store it in the superblock.  If the superblock value is
3184952fc18eSTheodore Ts'o  * present (even for non-bigalloc file systems), we will use it.
3185952fc18eSTheodore Ts'o  */
3186952fc18eSTheodore Ts'o static int count_overhead(struct super_block *sb, ext4_group_t grp,
3187952fc18eSTheodore Ts'o 			  char *buf)
3188952fc18eSTheodore Ts'o {
3189952fc18eSTheodore Ts'o 	struct ext4_sb_info	*sbi = EXT4_SB(sb);
3190952fc18eSTheodore Ts'o 	struct ext4_group_desc	*gdp;
3191952fc18eSTheodore Ts'o 	ext4_fsblk_t		first_block, last_block, b;
3192952fc18eSTheodore Ts'o 	ext4_group_t		i, ngroups = ext4_get_groups_count(sb);
3193952fc18eSTheodore Ts'o 	int			s, j, count = 0;
3194952fc18eSTheodore Ts'o 
3195e2b911c5SDarrick J. Wong 	if (!ext4_has_feature_bigalloc(sb))
31960548bbb8STheodore Ts'o 		return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
31970548bbb8STheodore Ts'o 			sbi->s_itb_per_group + 2);
31980548bbb8STheodore Ts'o 
3199952fc18eSTheodore Ts'o 	first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
3200952fc18eSTheodore Ts'o 		(grp * EXT4_BLOCKS_PER_GROUP(sb));
3201952fc18eSTheodore Ts'o 	last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
3202952fc18eSTheodore Ts'o 	for (i = 0; i < ngroups; i++) {
3203952fc18eSTheodore Ts'o 		gdp = ext4_get_group_desc(sb, i, NULL);
3204952fc18eSTheodore Ts'o 		b = ext4_block_bitmap(sb, gdp);
3205952fc18eSTheodore Ts'o 		if (b >= first_block && b <= last_block) {
3206952fc18eSTheodore Ts'o 			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
3207952fc18eSTheodore Ts'o 			count++;
3208952fc18eSTheodore Ts'o 		}
3209952fc18eSTheodore Ts'o 		b = ext4_inode_bitmap(sb, gdp);
3210952fc18eSTheodore Ts'o 		if (b >= first_block && b <= last_block) {
3211952fc18eSTheodore Ts'o 			ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
3212952fc18eSTheodore Ts'o 			count++;
3213952fc18eSTheodore Ts'o 		}
3214952fc18eSTheodore Ts'o 		b = ext4_inode_table(sb, gdp);
3215952fc18eSTheodore Ts'o 		if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
3216952fc18eSTheodore Ts'o 			for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
3217952fc18eSTheodore Ts'o 				int c = EXT4_B2C(sbi, b - first_block);
3218952fc18eSTheodore Ts'o 				ext4_set_bit(c, buf);
3219952fc18eSTheodore Ts'o 				count++;
3220952fc18eSTheodore Ts'o 			}
3221952fc18eSTheodore Ts'o 		if (i != grp)
3222952fc18eSTheodore Ts'o 			continue;
3223952fc18eSTheodore Ts'o 		s = 0;
3224952fc18eSTheodore Ts'o 		if (ext4_bg_has_super(sb, grp)) {
3225952fc18eSTheodore Ts'o 			ext4_set_bit(s++, buf);
3226952fc18eSTheodore Ts'o 			count++;
3227952fc18eSTheodore Ts'o 		}
3228c48ae41bSTheodore Ts'o 		j = ext4_bg_num_gdb(sb, grp);
3229c48ae41bSTheodore Ts'o 		if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
3230c48ae41bSTheodore Ts'o 			ext4_error(sb, "Invalid number of block group "
3231c48ae41bSTheodore Ts'o 				   "descriptor blocks: %d", j);
3232c48ae41bSTheodore Ts'o 			j = EXT4_BLOCKS_PER_GROUP(sb) - s;
3233952fc18eSTheodore Ts'o 		}
3234c48ae41bSTheodore Ts'o 		count += j;
3235c48ae41bSTheodore Ts'o 		for (; j > 0; j--)
3236c48ae41bSTheodore Ts'o 			ext4_set_bit(EXT4_B2C(sbi, s++), buf);
3237952fc18eSTheodore Ts'o 	}
3238952fc18eSTheodore Ts'o 	if (!count)
3239952fc18eSTheodore Ts'o 		return 0;
3240952fc18eSTheodore Ts'o 	return EXT4_CLUSTERS_PER_GROUP(sb) -
3241952fc18eSTheodore Ts'o 		ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
3242952fc18eSTheodore Ts'o }
3243952fc18eSTheodore Ts'o 
3244952fc18eSTheodore Ts'o /*
3245952fc18eSTheodore Ts'o  * Compute the overhead and stash it in sbi->s_overhead
3246952fc18eSTheodore Ts'o  */
3247952fc18eSTheodore Ts'o int ext4_calculate_overhead(struct super_block *sb)
3248952fc18eSTheodore Ts'o {
3249952fc18eSTheodore Ts'o 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3250952fc18eSTheodore Ts'o 	struct ext4_super_block *es = sbi->s_es;
32513c816dedSEric Whitney 	struct inode *j_inode;
32523c816dedSEric Whitney 	unsigned int j_blocks, j_inum = le32_to_cpu(es->s_journal_inum);
3253952fc18eSTheodore Ts'o 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
3254952fc18eSTheodore Ts'o 	ext4_fsblk_t overhead = 0;
32554fdb5543SDmitry Monakhov 	char *buf = (char *) get_zeroed_page(GFP_NOFS);
3256952fc18eSTheodore Ts'o 
3257952fc18eSTheodore Ts'o 	if (!buf)
3258952fc18eSTheodore Ts'o 		return -ENOMEM;
3259952fc18eSTheodore Ts'o 
3260952fc18eSTheodore Ts'o 	/*
3261952fc18eSTheodore Ts'o 	 * Compute the overhead (FS structures).  This is constant
3262952fc18eSTheodore Ts'o 	 * for a given filesystem unless the number of block groups
3263952fc18eSTheodore Ts'o 	 * changes so we cache the previous value until it does.
3264952fc18eSTheodore Ts'o 	 */
3265952fc18eSTheodore Ts'o 
3266952fc18eSTheodore Ts'o 	/*
3267952fc18eSTheodore Ts'o 	 * All of the blocks before first_data_block are overhead
3268952fc18eSTheodore Ts'o 	 */
3269952fc18eSTheodore Ts'o 	overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
3270952fc18eSTheodore Ts'o 
3271952fc18eSTheodore Ts'o 	/*
3272952fc18eSTheodore Ts'o 	 * Add the overhead found in each block group
3273952fc18eSTheodore Ts'o 	 */
3274952fc18eSTheodore Ts'o 	for (i = 0; i < ngroups; i++) {
3275952fc18eSTheodore Ts'o 		int blks;
3276952fc18eSTheodore Ts'o 
3277952fc18eSTheodore Ts'o 		blks = count_overhead(sb, i, buf);
3278952fc18eSTheodore Ts'o 		overhead += blks;
3279952fc18eSTheodore Ts'o 		if (blks)
3280952fc18eSTheodore Ts'o 			memset(buf, 0, PAGE_SIZE);
3281952fc18eSTheodore Ts'o 		cond_resched();
3282952fc18eSTheodore Ts'o 	}
32833c816dedSEric Whitney 
32843c816dedSEric Whitney 	/*
32853c816dedSEric Whitney 	 * Add the internal journal blocks whether the journal has been
32863c816dedSEric Whitney 	 * loaded or not
32873c816dedSEric Whitney 	 */
3288b003b524SEric Sandeen 	if (sbi->s_journal && !sbi->journal_bdev)
3289810da240SLukas Czerner 		overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
32903c816dedSEric Whitney 	else if (ext4_has_feature_journal(sb) && !sbi->s_journal) {
32913c816dedSEric Whitney 		j_inode = ext4_get_journal_inode(sb, j_inum);
32923c816dedSEric Whitney 		if (j_inode) {
32933c816dedSEric Whitney 			j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
32943c816dedSEric Whitney 			overhead += EXT4_NUM_B2C(sbi, j_blocks);
32953c816dedSEric Whitney 			iput(j_inode);
32963c816dedSEric Whitney 		} else {
32973c816dedSEric Whitney 			ext4_msg(sb, KERN_ERR, "can't get journal size");
32983c816dedSEric Whitney 		}
32993c816dedSEric Whitney 	}
3300952fc18eSTheodore Ts'o 	sbi->s_overhead = overhead;
3301952fc18eSTheodore Ts'o 	smp_wmb();
3302952fc18eSTheodore Ts'o 	free_page((unsigned long) buf);
3303952fc18eSTheodore Ts'o 	return 0;
3304952fc18eSTheodore Ts'o }
3305952fc18eSTheodore Ts'o 
3306b5799018STheodore Ts'o static void ext4_set_resv_clusters(struct super_block *sb)
330727dd4385SLukas Czerner {
330827dd4385SLukas Czerner 	ext4_fsblk_t resv_clusters;
3309b5799018STheodore Ts'o 	struct ext4_sb_info *sbi = EXT4_SB(sb);
331027dd4385SLukas Czerner 
331127dd4385SLukas Czerner 	/*
331230fac0f7SJan Kara 	 * There's no need to reserve anything when we aren't using extents.
331330fac0f7SJan Kara 	 * The space estimates are exact, there are no unwritten extents,
331430fac0f7SJan Kara 	 * hole punching doesn't need new metadata... This is needed especially
331530fac0f7SJan Kara 	 * to keep ext2/3 backward compatibility.
331630fac0f7SJan Kara 	 */
3317e2b911c5SDarrick J. Wong 	if (!ext4_has_feature_extents(sb))
3318b5799018STheodore Ts'o 		return;
331930fac0f7SJan Kara 	/*
332027dd4385SLukas Czerner 	 * By default we reserve 2% or 4096 clusters, whichever is smaller.
332127dd4385SLukas Czerner 	 * This should cover the situations where we can not afford to run
332227dd4385SLukas Czerner 	 * out of space like for example punch hole, or converting
3323556615dcSLukas Czerner 	 * unwritten extents in delalloc path. In most cases such
332427dd4385SLukas Czerner 	 * allocation would require 1, or 2 blocks, higher numbers are
332527dd4385SLukas Czerner 	 * very rare.
332627dd4385SLukas Czerner 	 */
3327b5799018STheodore Ts'o 	resv_clusters = (ext4_blocks_count(sbi->s_es) >>
3328b5799018STheodore Ts'o 			 sbi->s_cluster_bits);
332927dd4385SLukas Czerner 
333027dd4385SLukas Czerner 	do_div(resv_clusters, 50);
333127dd4385SLukas Czerner 	resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
333227dd4385SLukas Czerner 
3333b5799018STheodore Ts'o 	atomic64_set(&sbi->s_resv_clusters, resv_clusters);
333427dd4385SLukas Czerner }
333527dd4385SLukas Czerner 
3336617ba13bSMingming Cao static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3337ac27a0ecSDave Kleikamp {
3338d4c402d9SCurt Wohlgemuth 	char *orig_data = kstrdup(data, GFP_KERNEL);
3339ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
3340617ba13bSMingming Cao 	struct ext4_super_block *es = NULL;
33415aee0f8aSTheodore Ts'o 	struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
3342617ba13bSMingming Cao 	ext4_fsblk_t block;
3343617ba13bSMingming Cao 	ext4_fsblk_t sb_block = get_sb_block(&data);
334470bbb3e0SAndrew Morton 	ext4_fsblk_t logical_sb_block;
3345ac27a0ecSDave Kleikamp 	unsigned long offset = 0;
3346ac27a0ecSDave Kleikamp 	unsigned long journal_devnum = 0;
3347ac27a0ecSDave Kleikamp 	unsigned long def_mount_opts;
3348ac27a0ecSDave Kleikamp 	struct inode *root;
33490390131bSFrank Mayhar 	const char *descr;
3350dcc7dae3SCyrill Gorcunov 	int ret = -ENOMEM;
3351281b5995STheodore Ts'o 	int blocksize, clustersize;
33524ec11028STheodore Ts'o 	unsigned int db_count;
33534ec11028STheodore Ts'o 	unsigned int i;
3354281b5995STheodore Ts'o 	int needs_recovery, has_huge_files, has_bigalloc;
3355bd81d8eeSLaurent Vivier 	__u64 blocks_count;
335607aa2ea1SLukas Czerner 	int err = 0;
3357b3881f74STheodore Ts'o 	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
3358bfff6873SLukas Czerner 	ext4_group_t first_not_zeroed;
3359ac27a0ecSDave Kleikamp 
33605aee0f8aSTheodore Ts'o 	if ((data && !orig_data) || !sbi)
33615aee0f8aSTheodore Ts'o 		goto out_free_base;
3362705895b6SPekka Enberg 
3363705895b6SPekka Enberg 	sbi->s_blockgroup_lock =
3364705895b6SPekka Enberg 		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
33655aee0f8aSTheodore Ts'o 	if (!sbi->s_blockgroup_lock)
33665aee0f8aSTheodore Ts'o 		goto out_free_base;
33675aee0f8aSTheodore Ts'o 
3368ac27a0ecSDave Kleikamp 	sb->s_fs_info = sbi;
33692c0544b2STheodore Ts'o 	sbi->s_sb = sb;
3370240799cdSTheodore Ts'o 	sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
3371d9c9bef1SMiklos Szeredi 	sbi->s_sb_block = sb_block;
3372f613dfcbSTheodore Ts'o 	if (sb->s_bdev->bd_part)
3373f613dfcbSTheodore Ts'o 		sbi->s_sectors_written_start =
3374f613dfcbSTheodore Ts'o 			part_stat_read(sb->s_bdev->bd_part, sectors[1]);
3375ac27a0ecSDave Kleikamp 
33769f6200bbSTheodore Ts'o 	/* Cleanup superblock name */
3377ec3904dcSRasmus Villemoes 	strreplace(sb->s_id, '/', '!');
33789f6200bbSTheodore Ts'o 
337907aa2ea1SLukas Czerner 	/* -EINVAL is default */
3380dcc7dae3SCyrill Gorcunov 	ret = -EINVAL;
3381617ba13bSMingming Cao 	blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
3382ac27a0ecSDave Kleikamp 	if (!blocksize) {
3383b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "unable to set blocksize");
3384ac27a0ecSDave Kleikamp 		goto out_fail;
3385ac27a0ecSDave Kleikamp 	}
3386ac27a0ecSDave Kleikamp 
3387ac27a0ecSDave Kleikamp 	/*
3388617ba13bSMingming Cao 	 * The ext4 superblock will not be buffer aligned for other than 1kB
3389ac27a0ecSDave Kleikamp 	 * block sizes.  We need to calculate the offset from buffer start.
3390ac27a0ecSDave Kleikamp 	 */
3391617ba13bSMingming Cao 	if (blocksize != EXT4_MIN_BLOCK_SIZE) {
339270bbb3e0SAndrew Morton 		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
339370bbb3e0SAndrew Morton 		offset = do_div(logical_sb_block, blocksize);
3394ac27a0ecSDave Kleikamp 	} else {
339570bbb3e0SAndrew Morton 		logical_sb_block = sb_block;
3396ac27a0ecSDave Kleikamp 	}
3397ac27a0ecSDave Kleikamp 
3398a8ac900bSGioh Kim 	if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) {
3399b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "unable to read superblock");
3400ac27a0ecSDave Kleikamp 		goto out_fail;
3401ac27a0ecSDave Kleikamp 	}
3402ac27a0ecSDave Kleikamp 	/*
3403ac27a0ecSDave Kleikamp 	 * Note: s_es must be initialized as soon as possible because
3404617ba13bSMingming Cao 	 *       some ext4 macro-instructions depend on its value
3405ac27a0ecSDave Kleikamp 	 */
34062716b802STheodore Ts'o 	es = (struct ext4_super_block *) (bh->b_data + offset);
3407ac27a0ecSDave Kleikamp 	sbi->s_es = es;
3408ac27a0ecSDave Kleikamp 	sb->s_magic = le16_to_cpu(es->s_magic);
3409617ba13bSMingming Cao 	if (sb->s_magic != EXT4_SUPER_MAGIC)
3410617ba13bSMingming Cao 		goto cantfind_ext4;
3411afc32f7eSTheodore Ts'o 	sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
3412ac27a0ecSDave Kleikamp 
3413feb0ab32SDarrick J. Wong 	/* Warn if metadata_csum and gdt_csum are both set. */
3414e2b911c5SDarrick J. Wong 	if (ext4_has_feature_metadata_csum(sb) &&
3415e2b911c5SDarrick J. Wong 	    ext4_has_feature_gdt_csum(sb))
3416363307e6SJakub Wilk 		ext4_warning(sb, "metadata_csum and uninit_bg are "
3417feb0ab32SDarrick J. Wong 			     "redundant flags; please run fsck.");
3418feb0ab32SDarrick J. Wong 
3419d25425f8SDarrick J. Wong 	/* Check for a known checksum algorithm */
3420d25425f8SDarrick J. Wong 	if (!ext4_verify_csum_type(sb, es)) {
3421d25425f8SDarrick J. Wong 		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
3422d25425f8SDarrick J. Wong 			 "unknown checksum algorithm.");
3423d25425f8SDarrick J. Wong 		silent = 1;
3424d25425f8SDarrick J. Wong 		goto cantfind_ext4;
3425d25425f8SDarrick J. Wong 	}
3426d25425f8SDarrick J. Wong 
34270441984aSDarrick J. Wong 	/* Load the checksum driver */
3428e2b911c5SDarrick J. Wong 	if (ext4_has_feature_metadata_csum(sb)) {
34290441984aSDarrick J. Wong 		sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
34300441984aSDarrick J. Wong 		if (IS_ERR(sbi->s_chksum_driver)) {
34310441984aSDarrick J. Wong 			ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
34320441984aSDarrick J. Wong 			ret = PTR_ERR(sbi->s_chksum_driver);
34330441984aSDarrick J. Wong 			sbi->s_chksum_driver = NULL;
34340441984aSDarrick J. Wong 			goto failed_mount;
34350441984aSDarrick J. Wong 		}
34360441984aSDarrick J. Wong 	}
34370441984aSDarrick J. Wong 
3438a9c47317SDarrick J. Wong 	/* Check superblock checksum */
3439a9c47317SDarrick J. Wong 	if (!ext4_superblock_csum_verify(sb, es)) {
3440a9c47317SDarrick J. Wong 		ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
3441a9c47317SDarrick J. Wong 			 "invalid superblock checksum.  Run e2fsck?");
3442a9c47317SDarrick J. Wong 		silent = 1;
34436a797d27SDarrick J. Wong 		ret = -EFSBADCRC;
3444a9c47317SDarrick J. Wong 		goto cantfind_ext4;
3445a9c47317SDarrick J. Wong 	}
3446a9c47317SDarrick J. Wong 
3447a9c47317SDarrick J. Wong 	/* Precompute checksum seed for all metadata */
3448e2b911c5SDarrick J. Wong 	if (ext4_has_feature_csum_seed(sb))
34498c81bd8fSDarrick J. Wong 		sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
34508c81bd8fSDarrick J. Wong 	else if (ext4_has_metadata_csum(sb))
3451a9c47317SDarrick J. Wong 		sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
3452a9c47317SDarrick J. Wong 					       sizeof(es->s_uuid));
3453a9c47317SDarrick J. Wong 
3454ac27a0ecSDave Kleikamp 	/* Set defaults before we parse the mount options */
3455ac27a0ecSDave Kleikamp 	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
3456fd8c37ecSTheodore Ts'o 	set_opt(sb, INIT_INODE_TABLE);
3457617ba13bSMingming Cao 	if (def_mount_opts & EXT4_DEFM_DEBUG)
3458fd8c37ecSTheodore Ts'o 		set_opt(sb, DEBUG);
345987f26807STheodore Ts'o 	if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
3460fd8c37ecSTheodore Ts'o 		set_opt(sb, GRPID);
3461617ba13bSMingming Cao 	if (def_mount_opts & EXT4_DEFM_UID16)
3462fd8c37ecSTheodore Ts'o 		set_opt(sb, NO_UID32);
3463ea663336SEric Sandeen 	/* xattr user namespace & acls are now defaulted on */
3464fd8c37ecSTheodore Ts'o 	set_opt(sb, XATTR_USER);
346503010a33STheodore Ts'o #ifdef CONFIG_EXT4_FS_POSIX_ACL
3466fd8c37ecSTheodore Ts'o 	set_opt(sb, POSIX_ACL);
34672e7842b8SHugh Dickins #endif
346898c1a759SDarrick J. Wong 	/* don't forget to enable journal_csum when metadata_csum is enabled. */
346998c1a759SDarrick J. Wong 	if (ext4_has_metadata_csum(sb))
347098c1a759SDarrick J. Wong 		set_opt(sb, JOURNAL_CHECKSUM);
347198c1a759SDarrick J. Wong 
3472617ba13bSMingming Cao 	if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
3473fd8c37ecSTheodore Ts'o 		set_opt(sb, JOURNAL_DATA);
3474617ba13bSMingming Cao 	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
3475fd8c37ecSTheodore Ts'o 		set_opt(sb, ORDERED_DATA);
3476617ba13bSMingming Cao 	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
3477fd8c37ecSTheodore Ts'o 		set_opt(sb, WRITEBACK_DATA);
3478ac27a0ecSDave Kleikamp 
3479617ba13bSMingming Cao 	if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
3480fd8c37ecSTheodore Ts'o 		set_opt(sb, ERRORS_PANIC);
3481bb4f397aSAneesh Kumar K.V 	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
3482fd8c37ecSTheodore Ts'o 		set_opt(sb, ERRORS_CONT);
3483bb4f397aSAneesh Kumar K.V 	else
3484fd8c37ecSTheodore Ts'o 		set_opt(sb, ERRORS_RO);
348545f1a9c3SDarrick J. Wong 	/* block_validity enabled by default; disable with noblock_validity */
3486fd8c37ecSTheodore Ts'o 	set_opt(sb, BLOCK_VALIDITY);
34878b67f04aSTheodore Ts'o 	if (def_mount_opts & EXT4_DEFM_DISCARD)
3488fd8c37ecSTheodore Ts'o 		set_opt(sb, DISCARD);
3489ac27a0ecSDave Kleikamp 
349008cefc7aSEric W. Biederman 	sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
349108cefc7aSEric W. Biederman 	sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
349230773840STheodore Ts'o 	sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
349330773840STheodore Ts'o 	sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
349430773840STheodore Ts'o 	sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
3495ac27a0ecSDave Kleikamp 
34968b67f04aSTheodore Ts'o 	if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
3497fd8c37ecSTheodore Ts'o 		set_opt(sb, BARRIER);
3498ac27a0ecSDave Kleikamp 
34991e2462f9SMingming Cao 	/*
3500dd919b98SAneesh Kumar K.V 	 * enable delayed allocation by default
3501dd919b98SAneesh Kumar K.V 	 * Use -o nodelalloc to turn it off
3502dd919b98SAneesh Kumar K.V 	 */
3503bc0b75f7STheodore Ts'o 	if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
35048b67f04aSTheodore Ts'o 	    ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
3505fd8c37ecSTheodore Ts'o 		set_opt(sb, DELALLOC);
3506dd919b98SAneesh Kumar K.V 
350751ce6511SLukas Czerner 	/*
350851ce6511SLukas Czerner 	 * set default s_li_wait_mult for lazyinit, for the case there is
350951ce6511SLukas Czerner 	 * no mount option specified.
351051ce6511SLukas Czerner 	 */
351151ce6511SLukas Czerner 	sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
351251ce6511SLukas Czerner 
35135aee0f8aSTheodore Ts'o 	if (sbi->s_es->s_mount_opts[0]) {
35145aee0f8aSTheodore Ts'o 		char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
35155aee0f8aSTheodore Ts'o 					      sizeof(sbi->s_es->s_mount_opts),
35165aee0f8aSTheodore Ts'o 					      GFP_KERNEL);
35175aee0f8aSTheodore Ts'o 		if (!s_mount_opts)
35185aee0f8aSTheodore Ts'o 			goto failed_mount;
35195aee0f8aSTheodore Ts'o 		if (!parse_options(s_mount_opts, sb, &journal_devnum,
35205aee0f8aSTheodore Ts'o 				   &journal_ioprio, 0)) {
35218b67f04aSTheodore Ts'o 			ext4_msg(sb, KERN_WARNING,
35228b67f04aSTheodore Ts'o 				 "failed to parse options in superblock: %s",
35235aee0f8aSTheodore Ts'o 				 s_mount_opts);
35245aee0f8aSTheodore Ts'o 		}
35255aee0f8aSTheodore Ts'o 		kfree(s_mount_opts);
35268b67f04aSTheodore Ts'o 	}
35275a916be1STheodore Ts'o 	sbi->s_def_mount_opt = sbi->s_mount_opt;
3528b3881f74STheodore Ts'o 	if (!parse_options((char *) data, sb, &journal_devnum,
3529661aa520SEric Sandeen 			   &journal_ioprio, 0))
3530ac27a0ecSDave Kleikamp 		goto failed_mount;
3531ac27a0ecSDave Kleikamp 
353256889787STheodore Ts'o 	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
353356889787STheodore Ts'o 		printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
353456889787STheodore Ts'o 			    "with data=journal disables delayed "
353556889787STheodore Ts'o 			    "allocation and O_DIRECT support!\n");
353656889787STheodore Ts'o 		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
353756889787STheodore Ts'o 			ext4_msg(sb, KERN_ERR, "can't mount with "
353856889787STheodore Ts'o 				 "both data=journal and delalloc");
353956889787STheodore Ts'o 			goto failed_mount;
354056889787STheodore Ts'o 		}
354156889787STheodore Ts'o 		if (test_opt(sb, DIOREAD_NOLOCK)) {
354256889787STheodore Ts'o 			ext4_msg(sb, KERN_ERR, "can't mount with "
35436ae6514bSPiotr Sarna 				 "both data=journal and dioread_nolock");
354456889787STheodore Ts'o 			goto failed_mount;
354556889787STheodore Ts'o 		}
3546923ae0ffSRoss Zwisler 		if (test_opt(sb, DAX)) {
3547923ae0ffSRoss Zwisler 			ext4_msg(sb, KERN_ERR, "can't mount with "
3548923ae0ffSRoss Zwisler 				 "both data=journal and dax");
3549923ae0ffSRoss Zwisler 			goto failed_mount;
3550923ae0ffSRoss Zwisler 		}
355173b92a2aSSergey Karamov 		if (ext4_has_feature_encrypt(sb)) {
355273b92a2aSSergey Karamov 			ext4_msg(sb, KERN_WARNING,
355373b92a2aSSergey Karamov 				 "encrypted files will use data=ordered "
355473b92a2aSSergey Karamov 				 "instead of data journaling mode");
355573b92a2aSSergey Karamov 		}
355656889787STheodore Ts'o 		if (test_opt(sb, DELALLOC))
355756889787STheodore Ts'o 			clear_opt(sb, DELALLOC);
3558001e4a87STejun Heo 	} else {
3559001e4a87STejun Heo 		sb->s_iflags |= SB_I_CGROUPWB;
356056889787STheodore Ts'o 	}
356156889787STheodore Ts'o 
3562ac27a0ecSDave Kleikamp 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
3563482a7425SDmitry Monakhov 		(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
3564ac27a0ecSDave Kleikamp 
3565617ba13bSMingming Cao 	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
3566e2b911c5SDarrick J. Wong 	    (ext4_has_compat_features(sb) ||
3567e2b911c5SDarrick J. Wong 	     ext4_has_ro_compat_features(sb) ||
3568e2b911c5SDarrick J. Wong 	     ext4_has_incompat_features(sb)))
3569b31e1552SEric Sandeen 		ext4_msg(sb, KERN_WARNING,
3570b31e1552SEric Sandeen 		       "feature flags set on rev 0 fs, "
3571b31e1552SEric Sandeen 		       "running e2fsck is recommended");
3572469108ffSTheodore Tso 
3573ed3654ebSTheodore Ts'o 	if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) {
3574ed3654ebSTheodore Ts'o 		set_opt2(sb, HURD_COMPAT);
3575e2b911c5SDarrick J. Wong 		if (ext4_has_feature_64bit(sb)) {
3576ed3654ebSTheodore Ts'o 			ext4_msg(sb, KERN_ERR,
3577ed3654ebSTheodore Ts'o 				 "The Hurd can't support 64-bit file systems");
3578ed3654ebSTheodore Ts'o 			goto failed_mount;
3579ed3654ebSTheodore Ts'o 		}
3580ed3654ebSTheodore Ts'o 	}
3581ed3654ebSTheodore Ts'o 
35822035e776STheodore Ts'o 	if (IS_EXT2_SB(sb)) {
35832035e776STheodore Ts'o 		if (ext2_feature_set_ok(sb))
35842035e776STheodore Ts'o 			ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
35852035e776STheodore Ts'o 				 "using the ext4 subsystem");
35862035e776STheodore Ts'o 		else {
35872035e776STheodore Ts'o 			ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
35882035e776STheodore Ts'o 				 "to feature incompatibilities");
35892035e776STheodore Ts'o 			goto failed_mount;
35902035e776STheodore Ts'o 		}
35912035e776STheodore Ts'o 	}
35922035e776STheodore Ts'o 
35932035e776STheodore Ts'o 	if (IS_EXT3_SB(sb)) {
35942035e776STheodore Ts'o 		if (ext3_feature_set_ok(sb))
35952035e776STheodore Ts'o 			ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
35962035e776STheodore Ts'o 				 "using the ext4 subsystem");
35972035e776STheodore Ts'o 		else {
35982035e776STheodore Ts'o 			ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
35992035e776STheodore Ts'o 				 "to feature incompatibilities");
36002035e776STheodore Ts'o 			goto failed_mount;
36012035e776STheodore Ts'o 		}
36022035e776STheodore Ts'o 	}
36032035e776STheodore Ts'o 
3604469108ffSTheodore Tso 	/*
3605ac27a0ecSDave Kleikamp 	 * Check feature flags regardless of the revision level, since we
3606ac27a0ecSDave Kleikamp 	 * previously didn't change the revision level when setting the flags,
3607ac27a0ecSDave Kleikamp 	 * so there is a chance incompat flags are set on a rev 0 filesystem.
3608ac27a0ecSDave Kleikamp 	 */
3609a13fb1a4SEric Sandeen 	if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY)))
3610ac27a0ecSDave Kleikamp 		goto failed_mount;
3611a13fb1a4SEric Sandeen 
3612261cb20cSJan Kara 	blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
3613617ba13bSMingming Cao 	if (blocksize < EXT4_MIN_BLOCK_SIZE ||
3614617ba13bSMingming Cao 	    blocksize > EXT4_MAX_BLOCK_SIZE) {
3615b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR,
36168cdf3372STheodore Ts'o 		       "Unsupported filesystem blocksize %d (%d log_block_size)",
36178cdf3372STheodore Ts'o 			 blocksize, le32_to_cpu(es->s_log_block_size));
36188cdf3372STheodore Ts'o 		goto failed_mount;
36198cdf3372STheodore Ts'o 	}
36208cdf3372STheodore Ts'o 	if (le32_to_cpu(es->s_log_block_size) >
36218cdf3372STheodore Ts'o 	    (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
36228cdf3372STheodore Ts'o 		ext4_msg(sb, KERN_ERR,
36238cdf3372STheodore Ts'o 			 "Invalid log block size: %u",
36248cdf3372STheodore Ts'o 			 le32_to_cpu(es->s_log_block_size));
3625ac27a0ecSDave Kleikamp 		goto failed_mount;
3626ac27a0ecSDave Kleikamp 	}
3627ac27a0ecSDave Kleikamp 
36285b9554dcSTheodore Ts'o 	if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
36295b9554dcSTheodore Ts'o 		ext4_msg(sb, KERN_ERR,
36305b9554dcSTheodore Ts'o 			 "Number of reserved GDT blocks insanely large: %d",
36315b9554dcSTheodore Ts'o 			 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
36325b9554dcSTheodore Ts'o 		goto failed_mount;
36335b9554dcSTheodore Ts'o 	}
36345b9554dcSTheodore Ts'o 
3635923ae0ffSRoss Zwisler 	if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
363687eefeb4SToshi Kani 		err = bdev_dax_supported(sb, blocksize);
363787eefeb4SToshi Kani 		if (err)
3638923ae0ffSRoss Zwisler 			goto failed_mount;
3639923ae0ffSRoss Zwisler 	}
3640923ae0ffSRoss Zwisler 
3641e2b911c5SDarrick J. Wong 	if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
36426ddb2447STheodore Ts'o 		ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
36436ddb2447STheodore Ts'o 			 es->s_encryption_level);
36446ddb2447STheodore Ts'o 		goto failed_mount;
36456ddb2447STheodore Ts'o 	}
36466ddb2447STheodore Ts'o 
3647ac27a0ecSDave Kleikamp 	if (sb->s_blocksize != blocksize) {
3648ce40733cSAneesh Kumar K.V 		/* Validate the filesystem blocksize */
3649ce40733cSAneesh Kumar K.V 		if (!sb_set_blocksize(sb, blocksize)) {
3650b31e1552SEric Sandeen 			ext4_msg(sb, KERN_ERR, "bad block size %d",
3651ce40733cSAneesh Kumar K.V 					blocksize);
3652ac27a0ecSDave Kleikamp 			goto failed_mount;
3653ac27a0ecSDave Kleikamp 		}
3654ac27a0ecSDave Kleikamp 
3655ac27a0ecSDave Kleikamp 		brelse(bh);
365670bbb3e0SAndrew Morton 		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
365770bbb3e0SAndrew Morton 		offset = do_div(logical_sb_block, blocksize);
3658a8ac900bSGioh Kim 		bh = sb_bread_unmovable(sb, logical_sb_block);
3659ac27a0ecSDave Kleikamp 		if (!bh) {
3660b31e1552SEric Sandeen 			ext4_msg(sb, KERN_ERR,
3661b31e1552SEric Sandeen 			       "Can't read superblock on 2nd try");
3662ac27a0ecSDave Kleikamp 			goto failed_mount;
3663ac27a0ecSDave Kleikamp 		}
36642716b802STheodore Ts'o 		es = (struct ext4_super_block *)(bh->b_data + offset);
3665ac27a0ecSDave Kleikamp 		sbi->s_es = es;
3666617ba13bSMingming Cao 		if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
3667b31e1552SEric Sandeen 			ext4_msg(sb, KERN_ERR,
3668b31e1552SEric Sandeen 			       "Magic mismatch, very weird!");
3669ac27a0ecSDave Kleikamp 			goto failed_mount;
3670ac27a0ecSDave Kleikamp 		}
3671ac27a0ecSDave Kleikamp 	}
3672ac27a0ecSDave Kleikamp 
3673e2b911c5SDarrick J. Wong 	has_huge_files = ext4_has_feature_huge_file(sb);
3674f287a1a5STheodore Ts'o 	sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
3675f287a1a5STheodore Ts'o 						      has_huge_files);
3676f287a1a5STheodore Ts'o 	sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
3677ac27a0ecSDave Kleikamp 
3678617ba13bSMingming Cao 	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
3679617ba13bSMingming Cao 		sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
3680617ba13bSMingming Cao 		sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
3681ac27a0ecSDave Kleikamp 	} else {
3682ac27a0ecSDave Kleikamp 		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
3683ac27a0ecSDave Kleikamp 		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
3684617ba13bSMingming Cao 		if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
36851330593eSVignesh Babu 		    (!is_power_of_2(sbi->s_inode_size)) ||
3686ac27a0ecSDave Kleikamp 		    (sbi->s_inode_size > blocksize)) {
3687b31e1552SEric Sandeen 			ext4_msg(sb, KERN_ERR,
3688b31e1552SEric Sandeen 			       "unsupported inode size: %d",
3689ac27a0ecSDave Kleikamp 			       sbi->s_inode_size);
3690ac27a0ecSDave Kleikamp 			goto failed_mount;
3691ac27a0ecSDave Kleikamp 		}
3692ef7f3835SKalpak Shah 		if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
3693ef7f3835SKalpak Shah 			sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
3694ac27a0ecSDave Kleikamp 	}
36950b8e58a1SAndreas Dilger 
36960d1ee42fSAlexandre Ratchov 	sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
3697e2b911c5SDarrick J. Wong 	if (ext4_has_feature_64bit(sb)) {
36988fadc143SAlexandre Ratchov 		if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
36990d1ee42fSAlexandre Ratchov 		    sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
3700d8ea6cf8Svignesh babu 		    !is_power_of_2(sbi->s_desc_size)) {
3701b31e1552SEric Sandeen 			ext4_msg(sb, KERN_ERR,
3702b31e1552SEric Sandeen 			       "unsupported descriptor size %lu",
37030d1ee42fSAlexandre Ratchov 			       sbi->s_desc_size);
37040d1ee42fSAlexandre Ratchov 			goto failed_mount;
37050d1ee42fSAlexandre Ratchov 		}
37060d1ee42fSAlexandre Ratchov 	} else
37070d1ee42fSAlexandre Ratchov 		sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
37080b8e58a1SAndreas Dilger 
3709ac27a0ecSDave Kleikamp 	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
3710ac27a0ecSDave Kleikamp 	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
37110b8e58a1SAndreas Dilger 
3712617ba13bSMingming Cao 	sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
3713ac27a0ecSDave Kleikamp 	if (sbi->s_inodes_per_block == 0)
3714617ba13bSMingming Cao 		goto cantfind_ext4;
3715cd6bb35bSTheodore Ts'o 	if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
3716cd6bb35bSTheodore Ts'o 	    sbi->s_inodes_per_group > blocksize * 8) {
3717cd6bb35bSTheodore Ts'o 		ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
3718cd6bb35bSTheodore Ts'o 			 sbi->s_blocks_per_group);
3719cd6bb35bSTheodore Ts'o 		goto failed_mount;
3720cd6bb35bSTheodore Ts'o 	}
3721ac27a0ecSDave Kleikamp 	sbi->s_itb_per_group = sbi->s_inodes_per_group /
3722ac27a0ecSDave Kleikamp 					sbi->s_inodes_per_block;
37230d1ee42fSAlexandre Ratchov 	sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
3724ac27a0ecSDave Kleikamp 	sbi->s_sbh = bh;
3725ac27a0ecSDave Kleikamp 	sbi->s_mount_state = le16_to_cpu(es->s_state);
3726e57aa839SFengguang Wu 	sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
3727e57aa839SFengguang Wu 	sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
37280b8e58a1SAndreas Dilger 
3729ac27a0ecSDave Kleikamp 	for (i = 0; i < 4; i++)
3730ac27a0ecSDave Kleikamp 		sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
3731ac27a0ecSDave Kleikamp 	sbi->s_def_hash_version = es->s_def_hash_version;
3732e2b911c5SDarrick J. Wong 	if (ext4_has_feature_dir_index(sb)) {
3733f99b2589STheodore Ts'o 		i = le32_to_cpu(es->s_flags);
3734f99b2589STheodore Ts'o 		if (i & EXT2_FLAGS_UNSIGNED_HASH)
3735f99b2589STheodore Ts'o 			sbi->s_hash_unsigned = 3;
3736f99b2589STheodore Ts'o 		else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
3737f99b2589STheodore Ts'o #ifdef __CHAR_UNSIGNED__
373823301410STheodore Ts'o 			if (!(sb->s_flags & MS_RDONLY))
373923301410STheodore Ts'o 				es->s_flags |=
374023301410STheodore Ts'o 					cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
3741f99b2589STheodore Ts'o 			sbi->s_hash_unsigned = 3;
3742f99b2589STheodore Ts'o #else
374323301410STheodore Ts'o 			if (!(sb->s_flags & MS_RDONLY))
374423301410STheodore Ts'o 				es->s_flags |=
374523301410STheodore Ts'o 					cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
3746f99b2589STheodore Ts'o #endif
3747f99b2589STheodore Ts'o 		}
374823301410STheodore Ts'o 	}
3749ac27a0ecSDave Kleikamp 
3750281b5995STheodore Ts'o 	/* Handle clustersize */
3751281b5995STheodore Ts'o 	clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
3752e2b911c5SDarrick J. Wong 	has_bigalloc = ext4_has_feature_bigalloc(sb);
3753281b5995STheodore Ts'o 	if (has_bigalloc) {
3754281b5995STheodore Ts'o 		if (clustersize < blocksize) {
3755281b5995STheodore Ts'o 			ext4_msg(sb, KERN_ERR,
3756281b5995STheodore Ts'o 				 "cluster size (%d) smaller than "
3757281b5995STheodore Ts'o 				 "block size (%d)", clustersize, blocksize);
3758281b5995STheodore Ts'o 			goto failed_mount;
3759281b5995STheodore Ts'o 		}
37608cdf3372STheodore Ts'o 		if (le32_to_cpu(es->s_log_cluster_size) >
37618cdf3372STheodore Ts'o 		    (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
37628cdf3372STheodore Ts'o 			ext4_msg(sb, KERN_ERR,
37638cdf3372STheodore Ts'o 				 "Invalid log cluster size: %u",
37648cdf3372STheodore Ts'o 				 le32_to_cpu(es->s_log_cluster_size));
37658cdf3372STheodore Ts'o 			goto failed_mount;
37668cdf3372STheodore Ts'o 		}
3767281b5995STheodore Ts'o 		sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
3768281b5995STheodore Ts'o 			le32_to_cpu(es->s_log_block_size);
3769281b5995STheodore Ts'o 		sbi->s_clusters_per_group =
3770281b5995STheodore Ts'o 			le32_to_cpu(es->s_clusters_per_group);
3771281b5995STheodore Ts'o 		if (sbi->s_clusters_per_group > blocksize * 8) {
3772281b5995STheodore Ts'o 			ext4_msg(sb, KERN_ERR,
3773281b5995STheodore Ts'o 				 "#clusters per group too big: %lu",
3774281b5995STheodore Ts'o 				 sbi->s_clusters_per_group);
3775281b5995STheodore Ts'o 			goto failed_mount;
3776281b5995STheodore Ts'o 		}
3777281b5995STheodore Ts'o 		if (sbi->s_blocks_per_group !=
3778281b5995STheodore Ts'o 		    (sbi->s_clusters_per_group * (clustersize / blocksize))) {
3779281b5995STheodore Ts'o 			ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and "
3780281b5995STheodore Ts'o 				 "clusters per group (%lu) inconsistent",
3781281b5995STheodore Ts'o 				 sbi->s_blocks_per_group,
3782281b5995STheodore Ts'o 				 sbi->s_clusters_per_group);
3783281b5995STheodore Ts'o 			goto failed_mount;
3784281b5995STheodore Ts'o 		}
3785281b5995STheodore Ts'o 	} else {
3786281b5995STheodore Ts'o 		if (clustersize != blocksize) {
3787281b5995STheodore Ts'o 			ext4_warning(sb, "fragment/cluster size (%d) != "
3788281b5995STheodore Ts'o 				     "block size (%d)", clustersize,
3789281b5995STheodore Ts'o 				     blocksize);
3790281b5995STheodore Ts'o 			clustersize = blocksize;
3791281b5995STheodore Ts'o 		}
3792ac27a0ecSDave Kleikamp 		if (sbi->s_blocks_per_group > blocksize * 8) {
3793b31e1552SEric Sandeen 			ext4_msg(sb, KERN_ERR,
3794b31e1552SEric Sandeen 				 "#blocks per group too big: %lu",
3795ac27a0ecSDave Kleikamp 				 sbi->s_blocks_per_group);
3796ac27a0ecSDave Kleikamp 			goto failed_mount;
3797ac27a0ecSDave Kleikamp 		}
3798281b5995STheodore Ts'o 		sbi->s_clusters_per_group = sbi->s_blocks_per_group;
3799281b5995STheodore Ts'o 		sbi->s_cluster_bits = 0;
3800281b5995STheodore Ts'o 	}
3801281b5995STheodore Ts'o 	sbi->s_cluster_ratio = clustersize / blocksize;
3802281b5995STheodore Ts'o 
3803960fd856STheodore Ts'o 	/* Do we have standard group size of clustersize * 8 blocks ? */
3804960fd856STheodore Ts'o 	if (sbi->s_blocks_per_group == clustersize << 3)
3805960fd856STheodore Ts'o 		set_opt2(sb, STD_GROUP_SIZE);
3806960fd856STheodore Ts'o 
3807bf43d84bSEric Sandeen 	/*
3808bf43d84bSEric Sandeen 	 * Test whether we have more sectors than will fit in sector_t,
3809bf43d84bSEric Sandeen 	 * and whether the max offset is addressable by the page cache.
3810bf43d84bSEric Sandeen 	 */
38115a9ae68aSDarrick J. Wong 	err = generic_check_addressable(sb->s_blocksize_bits,
381230ca22c7SPatrick J. LoPresti 					ext4_blocks_count(es));
38135a9ae68aSDarrick J. Wong 	if (err) {
3814b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "filesystem"
3815bf43d84bSEric Sandeen 			 " too large to mount safely on this system");
3816ac27a0ecSDave Kleikamp 		if (sizeof(sector_t) < 8)
381790c699a9SBartlomiej Zolnierkiewicz 			ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
3818ac27a0ecSDave Kleikamp 		goto failed_mount;
3819ac27a0ecSDave Kleikamp 	}
3820ac27a0ecSDave Kleikamp 
3821617ba13bSMingming Cao 	if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
3822617ba13bSMingming Cao 		goto cantfind_ext4;
3823e7c95593SEric Sandeen 
38240f2ddca6SFrom: Thiemo Nagel 	/* check blocks count against device size */
38250f2ddca6SFrom: Thiemo Nagel 	blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
38260f2ddca6SFrom: Thiemo Nagel 	if (blocks_count && ext4_blocks_count(es) > blocks_count) {
3827b31e1552SEric Sandeen 		ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
3828b31e1552SEric Sandeen 		       "exceeds size of device (%llu blocks)",
38290f2ddca6SFrom: Thiemo Nagel 		       ext4_blocks_count(es), blocks_count);
38300f2ddca6SFrom: Thiemo Nagel 		goto failed_mount;
38310f2ddca6SFrom: Thiemo Nagel 	}
38320f2ddca6SFrom: Thiemo Nagel 
38334ec11028STheodore Ts'o 	/*
38344ec11028STheodore Ts'o 	 * It makes no sense for the first data block to be beyond the end
38354ec11028STheodore Ts'o 	 * of the filesystem.
38364ec11028STheodore Ts'o 	 */
38374ec11028STheodore Ts'o 	if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
3838b31e1552SEric Sandeen 		ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
3839b31e1552SEric Sandeen 			 "block %u is beyond end of filesystem (%llu)",
3840e7c95593SEric Sandeen 			 le32_to_cpu(es->s_first_data_block),
38414ec11028STheodore Ts'o 			 ext4_blocks_count(es));
3842e7c95593SEric Sandeen 		goto failed_mount;
3843e7c95593SEric Sandeen 	}
3844bd81d8eeSLaurent Vivier 	blocks_count = (ext4_blocks_count(es) -
3845bd81d8eeSLaurent Vivier 			le32_to_cpu(es->s_first_data_block) +
3846bd81d8eeSLaurent Vivier 			EXT4_BLOCKS_PER_GROUP(sb) - 1);
3847bd81d8eeSLaurent Vivier 	do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
38484ec11028STheodore Ts'o 	if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
3849b31e1552SEric Sandeen 		ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
38504ec11028STheodore Ts'o 		       "(block count %llu, first data block %u, "
3851b31e1552SEric Sandeen 		       "blocks per group %lu)", sbi->s_groups_count,
38524ec11028STheodore Ts'o 		       ext4_blocks_count(es),
38534ec11028STheodore Ts'o 		       le32_to_cpu(es->s_first_data_block),
38544ec11028STheodore Ts'o 		       EXT4_BLOCKS_PER_GROUP(sb));
38554ec11028STheodore Ts'o 		goto failed_mount;
38564ec11028STheodore Ts'o 	}
3857bd81d8eeSLaurent Vivier 	sbi->s_groups_count = blocks_count;
3858fb0a387dSEric Sandeen 	sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
3859fb0a387dSEric Sandeen 			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
3860617ba13bSMingming Cao 	db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
3861617ba13bSMingming Cao 		   EXT4_DESC_PER_BLOCK(sb);
38623a4b77cdSEryu Guan 	if (ext4_has_feature_meta_bg(sb)) {
38633a4b77cdSEryu Guan 		if (le32_to_cpu(es->s_first_meta_bg) >= db_count) {
38643a4b77cdSEryu Guan 			ext4_msg(sb, KERN_WARNING,
38653a4b77cdSEryu Guan 				 "first meta block group too large: %u "
38663a4b77cdSEryu Guan 				 "(group descriptor block count %u)",
38673a4b77cdSEryu Guan 				 le32_to_cpu(es->s_first_meta_bg), db_count);
38683a4b77cdSEryu Guan 			goto failed_mount;
38693a4b77cdSEryu Guan 		}
38703a4b77cdSEryu Guan 	}
3871f18a5f21STheodore Ts'o 	sbi->s_group_desc = ext4_kvmalloc(db_count *
3872f18a5f21STheodore Ts'o 					  sizeof(struct buffer_head *),
3873ac27a0ecSDave Kleikamp 					  GFP_KERNEL);
3874ac27a0ecSDave Kleikamp 	if (sbi->s_group_desc == NULL) {
3875b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "not enough memory");
38762cde417dSTheodore Ts'o 		ret = -ENOMEM;
3877ac27a0ecSDave Kleikamp 		goto failed_mount;
3878ac27a0ecSDave Kleikamp 	}
3879ac27a0ecSDave Kleikamp 
3880705895b6SPekka Enberg 	bgl_lock_init(sbi->s_blockgroup_lock);
3881ac27a0ecSDave Kleikamp 
3882ac27a0ecSDave Kleikamp 	for (i = 0; i < db_count; i++) {
388370bbb3e0SAndrew Morton 		block = descriptor_loc(sb, logical_sb_block, i);
3884a8ac900bSGioh Kim 		sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
3885ac27a0ecSDave Kleikamp 		if (!sbi->s_group_desc[i]) {
3886b31e1552SEric Sandeen 			ext4_msg(sb, KERN_ERR,
3887b31e1552SEric Sandeen 			       "can't read group descriptor %d", i);
3888ac27a0ecSDave Kleikamp 			db_count = i;
3889ac27a0ecSDave Kleikamp 			goto failed_mount2;
3890ac27a0ecSDave Kleikamp 		}
3891ac27a0ecSDave Kleikamp 	}
3892829fa70dSTheodore Ts'o 	if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
3893b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
38946a797d27SDarrick J. Wong 		ret = -EFSCORRUPTED;
3895f9ae9cf5STheodore Ts'o 		goto failed_mount2;
3896ac27a0ecSDave Kleikamp 	}
3897772cb7c8SJose R. Santos 
3898f9ae9cf5STheodore Ts'o 	sbi->s_gdb_count = db_count;
3899ac27a0ecSDave Kleikamp 	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
3900ac27a0ecSDave Kleikamp 	spin_lock_init(&sbi->s_next_gen_lock);
3901ac27a0ecSDave Kleikamp 
390204ecddb7SJan Mrazek 	setup_timer(&sbi->s_err_report, print_daily_error_info,
390304ecddb7SJan Mrazek 		(unsigned long) sb);
390404496411STao Ma 
3905a75ae78fSDmitry Monakhov 	/* Register extent status tree shrinker */
3906eb68d0e2SZheng Liu 	if (ext4_es_register_shrinker(sbi))
3907ce7e010aSTheodore Ts'o 		goto failed_mount3;
3908ce7e010aSTheodore Ts'o 
3909c9de560dSAlex Tomas 	sbi->s_stripe = ext4_get_stripe_size(sbi);
391067a5da56SZheng Liu 	sbi->s_extent_max_zeroout_kb = 32;
3911c9de560dSAlex Tomas 
3912f9ae9cf5STheodore Ts'o 	/*
3913f9ae9cf5STheodore Ts'o 	 * set up enough so that it can read an inode
3914f9ae9cf5STheodore Ts'o 	 */
3915f9ae9cf5STheodore Ts'o 	sb->s_op = &ext4_sops;
3916617ba13bSMingming Cao 	sb->s_export_op = &ext4_export_ops;
3917617ba13bSMingming Cao 	sb->s_xattr = ext4_xattr_handlers;
3918a7550b30SJaegeuk Kim 	sb->s_cop = &ext4_cryptops;
3919ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA
3920617ba13bSMingming Cao 	sb->dq_op = &ext4_quota_operations;
3921e2b911c5SDarrick J. Wong 	if (ext4_has_feature_quota(sb))
39221fa5efe3SJan Kara 		sb->s_qcop = &dquot_quotactl_sysfile_ops;
3923262b4662SJan Kara 	else
3924262b4662SJan Kara 		sb->s_qcop = &ext4_qctl_operations;
3925689c958cSLi Xi 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
3926ac27a0ecSDave Kleikamp #endif
3927f2fa2ffcSAneesh Kumar K.V 	memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
3928f2fa2ffcSAneesh Kumar K.V 
3929ac27a0ecSDave Kleikamp 	INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
39303b9d4ed2STheodore Ts'o 	mutex_init(&sbi->s_orphan_lock);
3931ac27a0ecSDave Kleikamp 
3932ac27a0ecSDave Kleikamp 	sb->s_root = NULL;
3933ac27a0ecSDave Kleikamp 
3934ac27a0ecSDave Kleikamp 	needs_recovery = (es->s_last_orphan != 0 ||
3935e2b911c5SDarrick J. Wong 			  ext4_has_feature_journal_needs_recovery(sb));
3936ac27a0ecSDave Kleikamp 
3937e2b911c5SDarrick J. Wong 	if (ext4_has_feature_mmp(sb) && !(sb->s_flags & MS_RDONLY))
3938c5e06d10SJohann Lombardi 		if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
393950460fe8SDarrick J. Wong 			goto failed_mount3a;
3940c5e06d10SJohann Lombardi 
3941ac27a0ecSDave Kleikamp 	/*
3942ac27a0ecSDave Kleikamp 	 * The first inode we look at is the journal inode.  Don't try
3943ac27a0ecSDave Kleikamp 	 * root first: it may be modified in the journal!
3944ac27a0ecSDave Kleikamp 	 */
3945e2b911c5SDarrick J. Wong 	if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
39464753d8a2STheodore Ts'o 		err = ext4_load_journal(sb, es, journal_devnum);
39474753d8a2STheodore Ts'o 		if (err)
394850460fe8SDarrick J. Wong 			goto failed_mount3a;
39490390131bSFrank Mayhar 	} else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
3950e2b911c5SDarrick J. Wong 		   ext4_has_feature_journal_needs_recovery(sb)) {
3951b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "required journal recovery "
3952b31e1552SEric Sandeen 		       "suppressed and not mounted read-only");
3953744692dcSJiaying Zhang 		goto failed_mount_wq;
3954ac27a0ecSDave Kleikamp 	} else {
39551e381f60SDmitry Monakhov 		/* Nojournal mode, all journal mount options are illegal */
39561e381f60SDmitry Monakhov 		if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
39571e381f60SDmitry Monakhov 			ext4_msg(sb, KERN_ERR, "can't mount with "
39581e381f60SDmitry Monakhov 				 "journal_checksum, fs mounted w/o journal");
39591e381f60SDmitry Monakhov 			goto failed_mount_wq;
39601e381f60SDmitry Monakhov 		}
39611e381f60SDmitry Monakhov 		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
39621e381f60SDmitry Monakhov 			ext4_msg(sb, KERN_ERR, "can't mount with "
39631e381f60SDmitry Monakhov 				 "journal_async_commit, fs mounted w/o journal");
39641e381f60SDmitry Monakhov 			goto failed_mount_wq;
39651e381f60SDmitry Monakhov 		}
39661e381f60SDmitry Monakhov 		if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
39671e381f60SDmitry Monakhov 			ext4_msg(sb, KERN_ERR, "can't mount with "
39681e381f60SDmitry Monakhov 				 "commit=%lu, fs mounted w/o journal",
39691e381f60SDmitry Monakhov 				 sbi->s_commit_interval / HZ);
39701e381f60SDmitry Monakhov 			goto failed_mount_wq;
39711e381f60SDmitry Monakhov 		}
39721e381f60SDmitry Monakhov 		if (EXT4_MOUNT_DATA_FLAGS &
39731e381f60SDmitry Monakhov 		    (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
39741e381f60SDmitry Monakhov 			ext4_msg(sb, KERN_ERR, "can't mount with "
39751e381f60SDmitry Monakhov 				 "data=, fs mounted w/o journal");
39761e381f60SDmitry Monakhov 			goto failed_mount_wq;
39771e381f60SDmitry Monakhov 		}
39781e381f60SDmitry Monakhov 		sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM;
39791e381f60SDmitry Monakhov 		clear_opt(sb, JOURNAL_CHECKSUM);
3980fd8c37ecSTheodore Ts'o 		clear_opt(sb, DATA_FLAGS);
39810390131bSFrank Mayhar 		sbi->s_journal = NULL;
39820390131bSFrank Mayhar 		needs_recovery = 0;
39830390131bSFrank Mayhar 		goto no_journal;
3984ac27a0ecSDave Kleikamp 	}
3985ac27a0ecSDave Kleikamp 
3986e2b911c5SDarrick J. Wong 	if (ext4_has_feature_64bit(sb) &&
3987eb40a09cSJose R. Santos 	    !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
3988eb40a09cSJose R. Santos 				       JBD2_FEATURE_INCOMPAT_64BIT)) {
3989b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
3990744692dcSJiaying Zhang 		goto failed_mount_wq;
3991eb40a09cSJose R. Santos 	}
3992eb40a09cSJose R. Santos 
399325ed6e8aSDarrick J. Wong 	if (!set_journal_csum_feature_set(sb)) {
399425ed6e8aSDarrick J. Wong 		ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
399525ed6e8aSDarrick J. Wong 			 "feature set");
399625ed6e8aSDarrick J. Wong 		goto failed_mount_wq;
3997d4da6c9cSLinus Torvalds 	}
3998818d276cSGirish Shilamkar 
3999ac27a0ecSDave Kleikamp 	/* We have now updated the journal if required, so we can
4000ac27a0ecSDave Kleikamp 	 * validate the data journaling mode. */
4001ac27a0ecSDave Kleikamp 	switch (test_opt(sb, DATA_FLAGS)) {
4002ac27a0ecSDave Kleikamp 	case 0:
4003ac27a0ecSDave Kleikamp 		/* No mode set, assume a default based on the journal
400463f57933SAndrew Morton 		 * capabilities: ORDERED_DATA if the journal can
400563f57933SAndrew Morton 		 * cope, else JOURNAL_DATA
400663f57933SAndrew Morton 		 */
4007dab291afSMingming Cao 		if (jbd2_journal_check_available_features
4008dab291afSMingming Cao 		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
4009fd8c37ecSTheodore Ts'o 			set_opt(sb, ORDERED_DATA);
4010ac27a0ecSDave Kleikamp 		else
4011fd8c37ecSTheodore Ts'o 			set_opt(sb, JOURNAL_DATA);
4012ac27a0ecSDave Kleikamp 		break;
4013ac27a0ecSDave Kleikamp 
4014617ba13bSMingming Cao 	case EXT4_MOUNT_ORDERED_DATA:
4015617ba13bSMingming Cao 	case EXT4_MOUNT_WRITEBACK_DATA:
4016dab291afSMingming Cao 		if (!jbd2_journal_check_available_features
4017dab291afSMingming Cao 		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
4018b31e1552SEric Sandeen 			ext4_msg(sb, KERN_ERR, "Journal does not support "
4019b31e1552SEric Sandeen 			       "requested data journaling mode");
4020744692dcSJiaying Zhang 			goto failed_mount_wq;
4021ac27a0ecSDave Kleikamp 		}
4022ac27a0ecSDave Kleikamp 	default:
4023ac27a0ecSDave Kleikamp 		break;
4024ac27a0ecSDave Kleikamp 	}
4025ab04df78SJan Kara 
4026ab04df78SJan Kara 	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
4027ab04df78SJan Kara 	    test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
4028ab04df78SJan Kara 		ext4_msg(sb, KERN_ERR, "can't mount with "
4029ab04df78SJan Kara 			"journal_async_commit in data=ordered mode");
4030ab04df78SJan Kara 		goto failed_mount_wq;
4031ab04df78SJan Kara 	}
4032ab04df78SJan Kara 
4033b3881f74STheodore Ts'o 	set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
4034ac27a0ecSDave Kleikamp 
403518aadd47SBobi Jam 	sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
403618aadd47SBobi Jam 
4037ce7e010aSTheodore Ts'o no_journal:
403882939d79SJan Kara 	sbi->s_mb_cache = ext4_xattr_create_cache();
40399c191f70ST Makphaibulchoke 	if (!sbi->s_mb_cache) {
40409c191f70ST Makphaibulchoke 		ext4_msg(sb, KERN_ERR, "Failed to create an mb_cache");
40419c191f70ST Makphaibulchoke 		goto failed_mount_wq;
40429c191f70ST Makphaibulchoke 	}
40439c191f70ST Makphaibulchoke 
4044e2b911c5SDarrick J. Wong 	if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
404509cbfeafSKirill A. Shutemov 	    (blocksize != PAGE_SIZE)) {
40461cb767cdSTheodore Ts'o 		ext4_msg(sb, KERN_ERR,
40471cb767cdSTheodore Ts'o 			 "Unsupported blocksize for fs encryption");
40481cb767cdSTheodore Ts'o 		goto failed_mount_wq;
40491cb767cdSTheodore Ts'o 	}
40501cb767cdSTheodore Ts'o 
4051e2b911c5SDarrick J. Wong 	if (DUMMY_ENCRYPTION_ENABLED(sbi) && !(sb->s_flags & MS_RDONLY) &&
4052e2b911c5SDarrick J. Wong 	    !ext4_has_feature_encrypt(sb)) {
4053e2b911c5SDarrick J. Wong 		ext4_set_feature_encrypt(sb);
40546ddb2447STheodore Ts'o 		ext4_commit_super(sb, 1);
40556ddb2447STheodore Ts'o 	}
40566ddb2447STheodore Ts'o 
4057fd89d5f2STejun Heo 	/*
4058952fc18eSTheodore Ts'o 	 * Get the # of file system overhead blocks from the
4059952fc18eSTheodore Ts'o 	 * superblock if present.
4060952fc18eSTheodore Ts'o 	 */
4061952fc18eSTheodore Ts'o 	if (es->s_overhead_clusters)
4062952fc18eSTheodore Ts'o 		sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
4063952fc18eSTheodore Ts'o 	else {
406407aa2ea1SLukas Czerner 		err = ext4_calculate_overhead(sb);
406507aa2ea1SLukas Czerner 		if (err)
4066952fc18eSTheodore Ts'o 			goto failed_mount_wq;
4067952fc18eSTheodore Ts'o 	}
4068952fc18eSTheodore Ts'o 
4069952fc18eSTheodore Ts'o 	/*
4070fd89d5f2STejun Heo 	 * The maximum number of concurrent works can be high and
4071fd89d5f2STejun Heo 	 * concurrency isn't really necessary.  Limit it to 1.
4072fd89d5f2STejun Heo 	 */
40732e8fa54eSJan Kara 	EXT4_SB(sb)->rsv_conversion_wq =
40742e8fa54eSJan Kara 		alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
40752e8fa54eSJan Kara 	if (!EXT4_SB(sb)->rsv_conversion_wq) {
40762e8fa54eSJan Kara 		printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
407707aa2ea1SLukas Czerner 		ret = -ENOMEM;
40782e8fa54eSJan Kara 		goto failed_mount4;
40792e8fa54eSJan Kara 	}
40802e8fa54eSJan Kara 
4081ac27a0ecSDave Kleikamp 	/*
4082dab291afSMingming Cao 	 * The jbd2_journal_load will have done any necessary log recovery,
4083ac27a0ecSDave Kleikamp 	 * so we can safely mount the rest of the filesystem now.
4084ac27a0ecSDave Kleikamp 	 */
4085ac27a0ecSDave Kleikamp 
40861d1fe1eeSDavid Howells 	root = ext4_iget(sb, EXT4_ROOT_INO);
40871d1fe1eeSDavid Howells 	if (IS_ERR(root)) {
4088b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "get root inode failed");
40891d1fe1eeSDavid Howells 		ret = PTR_ERR(root);
409032a9bb57SManish Katiyar 		root = NULL;
4091ac27a0ecSDave Kleikamp 		goto failed_mount4;
4092ac27a0ecSDave Kleikamp 	}
4093ac27a0ecSDave Kleikamp 	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
4094b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
409594bf608aSAl Viro 		iput(root);
4096ac27a0ecSDave Kleikamp 		goto failed_mount4;
4097ac27a0ecSDave Kleikamp 	}
409848fde701SAl Viro 	sb->s_root = d_make_root(root);
40991d1fe1eeSDavid Howells 	if (!sb->s_root) {
4100b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "get root dentry failed");
41011d1fe1eeSDavid Howells 		ret = -ENOMEM;
41021d1fe1eeSDavid Howells 		goto failed_mount4;
41031d1fe1eeSDavid Howells 	}
4104ac27a0ecSDave Kleikamp 
41057e84b621SEric Sandeen 	if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY))
41067e84b621SEric Sandeen 		sb->s_flags |= MS_RDONLY;
4107ef7f3835SKalpak Shah 
4108ef7f3835SKalpak Shah 	/* determine the minimum size of new large inodes, if present */
4109670e9875STheodore Ts'o 	if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
4110670e9875STheodore Ts'o 	    sbi->s_want_extra_isize == 0) {
4111ef7f3835SKalpak Shah 		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
4112ef7f3835SKalpak Shah 						     EXT4_GOOD_OLD_INODE_SIZE;
4113e2b911c5SDarrick J. Wong 		if (ext4_has_feature_extra_isize(sb)) {
4114ef7f3835SKalpak Shah 			if (sbi->s_want_extra_isize <
4115ef7f3835SKalpak Shah 			    le16_to_cpu(es->s_want_extra_isize))
4116ef7f3835SKalpak Shah 				sbi->s_want_extra_isize =
4117ef7f3835SKalpak Shah 					le16_to_cpu(es->s_want_extra_isize);
4118ef7f3835SKalpak Shah 			if (sbi->s_want_extra_isize <
4119ef7f3835SKalpak Shah 			    le16_to_cpu(es->s_min_extra_isize))
4120ef7f3835SKalpak Shah 				sbi->s_want_extra_isize =
4121ef7f3835SKalpak Shah 					le16_to_cpu(es->s_min_extra_isize);
4122ef7f3835SKalpak Shah 		}
4123ef7f3835SKalpak Shah 	}
4124ef7f3835SKalpak Shah 	/* Check if enough inode space is available */
4125ef7f3835SKalpak Shah 	if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
4126ef7f3835SKalpak Shah 							sbi->s_inode_size) {
4127ef7f3835SKalpak Shah 		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
4128ef7f3835SKalpak Shah 						       EXT4_GOOD_OLD_INODE_SIZE;
4129b31e1552SEric Sandeen 		ext4_msg(sb, KERN_INFO, "required extra inode space not"
4130b31e1552SEric Sandeen 			 "available");
4131ef7f3835SKalpak Shah 	}
4132ef7f3835SKalpak Shah 
4133b5799018STheodore Ts'o 	ext4_set_resv_clusters(sb);
413427dd4385SLukas Czerner 
41356fd058f7STheodore Ts'o 	err = ext4_setup_system_zone(sb);
41366fd058f7STheodore Ts'o 	if (err) {
4137b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "failed to initialize system "
4138fbe845ddSCurt Wohlgemuth 			 "zone (%d)", err);
4139f9ae9cf5STheodore Ts'o 		goto failed_mount4a;
4140f9ae9cf5STheodore Ts'o 	}
4141f9ae9cf5STheodore Ts'o 
4142f9ae9cf5STheodore Ts'o 	ext4_ext_init(sb);
4143f9ae9cf5STheodore Ts'o 	err = ext4_mb_init(sb);
4144f9ae9cf5STheodore Ts'o 	if (err) {
4145f9ae9cf5STheodore Ts'o 		ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
4146f9ae9cf5STheodore Ts'o 			 err);
4147dcf2d804STao Ma 		goto failed_mount5;
4148c2774d84SAneesh Kumar K.V 	}
4149c2774d84SAneesh Kumar K.V 
4150d5e03cbbSTheodore Ts'o 	block = ext4_count_free_clusters(sb);
4151d5e03cbbSTheodore Ts'o 	ext4_free_blocks_count_set(sbi->s_es,
4152d5e03cbbSTheodore Ts'o 				   EXT4_C2B(sbi, block));
4153908c7f19STejun Heo 	err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
4154908c7f19STejun Heo 				  GFP_KERNEL);
4155d5e03cbbSTheodore Ts'o 	if (!err) {
4156d5e03cbbSTheodore Ts'o 		unsigned long freei = ext4_count_free_inodes(sb);
4157d5e03cbbSTheodore Ts'o 		sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
4158908c7f19STejun Heo 		err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
4159908c7f19STejun Heo 					  GFP_KERNEL);
4160d5e03cbbSTheodore Ts'o 	}
4161d5e03cbbSTheodore Ts'o 	if (!err)
4162d5e03cbbSTheodore Ts'o 		err = percpu_counter_init(&sbi->s_dirs_counter,
4163908c7f19STejun Heo 					  ext4_count_dirs(sb), GFP_KERNEL);
4164d5e03cbbSTheodore Ts'o 	if (!err)
4165908c7f19STejun Heo 		err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
4166908c7f19STejun Heo 					  GFP_KERNEL);
4167c8585c6fSDaeho Jeong 	if (!err)
4168c8585c6fSDaeho Jeong 		err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);
4169c8585c6fSDaeho Jeong 
4170d5e03cbbSTheodore Ts'o 	if (err) {
4171d5e03cbbSTheodore Ts'o 		ext4_msg(sb, KERN_ERR, "insufficient memory");
4172d5e03cbbSTheodore Ts'o 		goto failed_mount6;
4173d5e03cbbSTheodore Ts'o 	}
4174d5e03cbbSTheodore Ts'o 
4175e2b911c5SDarrick J. Wong 	if (ext4_has_feature_flex_bg(sb))
4176d5e03cbbSTheodore Ts'o 		if (!ext4_fill_flex_info(sb)) {
4177d5e03cbbSTheodore Ts'o 			ext4_msg(sb, KERN_ERR,
4178d5e03cbbSTheodore Ts'o 			       "unable to initialize "
4179d5e03cbbSTheodore Ts'o 			       "flex_bg meta info!");
4180d5e03cbbSTheodore Ts'o 			goto failed_mount6;
4181d5e03cbbSTheodore Ts'o 		}
4182d5e03cbbSTheodore Ts'o 
4183bfff6873SLukas Czerner 	err = ext4_register_li_request(sb, first_not_zeroed);
4184bfff6873SLukas Czerner 	if (err)
4185dcf2d804STao Ma 		goto failed_mount6;
4186bfff6873SLukas Czerner 
4187b5799018STheodore Ts'o 	err = ext4_register_sysfs(sb);
4188dcf2d804STao Ma 	if (err)
4189dcf2d804STao Ma 		goto failed_mount7;
41903197ebdbSTheodore Ts'o 
41919b2ff357SJan Kara #ifdef CONFIG_QUOTA
41929b2ff357SJan Kara 	/* Enable quota usage during mount. */
4193e2b911c5SDarrick J. Wong 	if (ext4_has_feature_quota(sb) && !(sb->s_flags & MS_RDONLY)) {
41949b2ff357SJan Kara 		err = ext4_enable_quotas(sb);
41959b2ff357SJan Kara 		if (err)
41969b2ff357SJan Kara 			goto failed_mount8;
41979b2ff357SJan Kara 	}
41989b2ff357SJan Kara #endif  /* CONFIG_QUOTA */
41999b2ff357SJan Kara 
4200617ba13bSMingming Cao 	EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
4201617ba13bSMingming Cao 	ext4_orphan_cleanup(sb, es);
4202617ba13bSMingming Cao 	EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
42030390131bSFrank Mayhar 	if (needs_recovery) {
4204b31e1552SEric Sandeen 		ext4_msg(sb, KERN_INFO, "recovery complete");
4205617ba13bSMingming Cao 		ext4_mark_recovery_complete(sb, es);
42060390131bSFrank Mayhar 	}
42070390131bSFrank Mayhar 	if (EXT4_SB(sb)->s_journal) {
42080390131bSFrank Mayhar 		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
42090390131bSFrank Mayhar 			descr = " journalled data mode";
42100390131bSFrank Mayhar 		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
42110390131bSFrank Mayhar 			descr = " ordered data mode";
42120390131bSFrank Mayhar 		else
42130390131bSFrank Mayhar 			descr = " writeback data mode";
42140390131bSFrank Mayhar 	} else
42150390131bSFrank Mayhar 		descr = "out journal";
42160390131bSFrank Mayhar 
421779add3a3SLukas Czerner 	if (test_opt(sb, DISCARD)) {
421879add3a3SLukas Czerner 		struct request_queue *q = bdev_get_queue(sb->s_bdev);
421979add3a3SLukas Czerner 		if (!blk_queue_discard(q))
422079add3a3SLukas Czerner 			ext4_msg(sb, KERN_WARNING,
422179add3a3SLukas Czerner 				 "mounting with \"discard\" option, but "
422279add3a3SLukas Czerner 				 "the device does not support discard");
422379add3a3SLukas Czerner 	}
422479add3a3SLukas Czerner 
4225e294a537STheodore Ts'o 	if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
4226d4c402d9SCurt Wohlgemuth 		ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
42275aee0f8aSTheodore Ts'o 			 "Opts: %.*s%s%s", descr,
42285aee0f8aSTheodore Ts'o 			 (int) sizeof(sbi->s_es->s_mount_opts),
42295aee0f8aSTheodore Ts'o 			 sbi->s_es->s_mount_opts,
42308b67f04aSTheodore Ts'o 			 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
4231ac27a0ecSDave Kleikamp 
423266e61a9eSTheodore Ts'o 	if (es->s_error_count)
423366e61a9eSTheodore Ts'o 		mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
4234ac27a0ecSDave Kleikamp 
4235efbed4dcSTheodore Ts'o 	/* Enable message ratelimiting. Default is 10 messages per 5 secs. */
4236efbed4dcSTheodore Ts'o 	ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
4237efbed4dcSTheodore Ts'o 	ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
4238efbed4dcSTheodore Ts'o 	ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
4239efbed4dcSTheodore Ts'o 
4240d4c402d9SCurt Wohlgemuth 	kfree(orig_data);
4241ac27a0ecSDave Kleikamp 	return 0;
4242ac27a0ecSDave Kleikamp 
4243617ba13bSMingming Cao cantfind_ext4:
4244ac27a0ecSDave Kleikamp 	if (!silent)
4245b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
4246ac27a0ecSDave Kleikamp 	goto failed_mount;
4247ac27a0ecSDave Kleikamp 
424872ba7450STheodore Ts'o #ifdef CONFIG_QUOTA
424972ba7450STheodore Ts'o failed_mount8:
4250ebd173beSTheodore Ts'o 	ext4_unregister_sysfs(sb);
425172ba7450STheodore Ts'o #endif
4252dcf2d804STao Ma failed_mount7:
4253dcf2d804STao Ma 	ext4_unregister_li_request(sb);
4254dcf2d804STao Ma failed_mount6:
4255f9ae9cf5STheodore Ts'o 	ext4_mb_release(sb);
4256d5e03cbbSTheodore Ts'o 	if (sbi->s_flex_groups)
4257b93b41d4SAl Viro 		kvfree(sbi->s_flex_groups);
4258d5e03cbbSTheodore Ts'o 	percpu_counter_destroy(&sbi->s_freeclusters_counter);
4259d5e03cbbSTheodore Ts'o 	percpu_counter_destroy(&sbi->s_freeinodes_counter);
4260d5e03cbbSTheodore Ts'o 	percpu_counter_destroy(&sbi->s_dirs_counter);
4261d5e03cbbSTheodore Ts'o 	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
426200764937SAzat Khuzhin failed_mount5:
4263f9ae9cf5STheodore Ts'o 	ext4_ext_release(sb);
4264f9ae9cf5STheodore Ts'o 	ext4_release_system_zone(sb);
4265f9ae9cf5STheodore Ts'o failed_mount4a:
426694bf608aSAl Viro 	dput(sb->s_root);
426732a9bb57SManish Katiyar 	sb->s_root = NULL;
426894bf608aSAl Viro failed_mount4:
4269b31e1552SEric Sandeen 	ext4_msg(sb, KERN_ERR, "mount failed");
42702e8fa54eSJan Kara 	if (EXT4_SB(sb)->rsv_conversion_wq)
42712e8fa54eSJan Kara 		destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
42724c0425ffSMingming Cao failed_mount_wq:
427382939d79SJan Kara 	if (sbi->s_mb_cache) {
427482939d79SJan Kara 		ext4_xattr_destroy_cache(sbi->s_mb_cache);
427582939d79SJan Kara 		sbi->s_mb_cache = NULL;
427682939d79SJan Kara 	}
42770390131bSFrank Mayhar 	if (sbi->s_journal) {
4278dab291afSMingming Cao 		jbd2_journal_destroy(sbi->s_journal);
427947b4a50bSJan Kara 		sbi->s_journal = NULL;
42800390131bSFrank Mayhar 	}
428150460fe8SDarrick J. Wong failed_mount3a:
4282d3922a77SZheng Liu 	ext4_es_unregister_shrinker(sbi);
4283eb68d0e2SZheng Liu failed_mount3:
42849105bb14SAl Viro 	del_timer_sync(&sbi->s_err_report);
4285c5e06d10SJohann Lombardi 	if (sbi->s_mmp_tsk)
4286c5e06d10SJohann Lombardi 		kthread_stop(sbi->s_mmp_tsk);
4287ac27a0ecSDave Kleikamp failed_mount2:
4288ac27a0ecSDave Kleikamp 	for (i = 0; i < db_count; i++)
4289ac27a0ecSDave Kleikamp 		brelse(sbi->s_group_desc[i]);
4290b93b41d4SAl Viro 	kvfree(sbi->s_group_desc);
4291ac27a0ecSDave Kleikamp failed_mount:
42920441984aSDarrick J. Wong 	if (sbi->s_chksum_driver)
42930441984aSDarrick J. Wong 		crypto_free_shash(sbi->s_chksum_driver);
4294ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA
4295a2d4a646SJan Kara 	for (i = 0; i < EXT4_MAXQUOTAS; i++)
4296ac27a0ecSDave Kleikamp 		kfree(sbi->s_qf_names[i]);
4297ac27a0ecSDave Kleikamp #endif
4298617ba13bSMingming Cao 	ext4_blkdev_remove(sbi);
4299ac27a0ecSDave Kleikamp 	brelse(bh);
4300ac27a0ecSDave Kleikamp out_fail:
4301ac27a0ecSDave Kleikamp 	sb->s_fs_info = NULL;
4302f6830165SManish Katiyar 	kfree(sbi->s_blockgroup_lock);
43035aee0f8aSTheodore Ts'o out_free_base:
4304ac27a0ecSDave Kleikamp 	kfree(sbi);
4305d4c402d9SCurt Wohlgemuth 	kfree(orig_data);
430607aa2ea1SLukas Czerner 	return err ? err : ret;
4307ac27a0ecSDave Kleikamp }
4308ac27a0ecSDave Kleikamp 
4309ac27a0ecSDave Kleikamp /*
4310ac27a0ecSDave Kleikamp  * Setup any per-fs journal parameters now.  We'll do this both on
4311ac27a0ecSDave Kleikamp  * initial mount, once the journal has been initialised but before we've
4312ac27a0ecSDave Kleikamp  * done any recovery; and again on any subsequent remount.
4313ac27a0ecSDave Kleikamp  */
4314617ba13bSMingming Cao static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
4315ac27a0ecSDave Kleikamp {
4316617ba13bSMingming Cao 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4317ac27a0ecSDave Kleikamp 
4318ac27a0ecSDave Kleikamp 	journal->j_commit_interval = sbi->s_commit_interval;
431930773840STheodore Ts'o 	journal->j_min_batch_time = sbi->s_min_batch_time;
432030773840STheodore Ts'o 	journal->j_max_batch_time = sbi->s_max_batch_time;
4321ac27a0ecSDave Kleikamp 
4322a931da6aSTheodore Ts'o 	write_lock(&journal->j_state_lock);
4323ac27a0ecSDave Kleikamp 	if (test_opt(sb, BARRIER))
4324dab291afSMingming Cao 		journal->j_flags |= JBD2_BARRIER;
4325ac27a0ecSDave Kleikamp 	else
4326dab291afSMingming Cao 		journal->j_flags &= ~JBD2_BARRIER;
43275bf5683aSHidehiro Kawai 	if (test_opt(sb, DATA_ERR_ABORT))
43285bf5683aSHidehiro Kawai 		journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
43295bf5683aSHidehiro Kawai 	else
43305bf5683aSHidehiro Kawai 		journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
4331a931da6aSTheodore Ts'o 	write_unlock(&journal->j_state_lock);
4332ac27a0ecSDave Kleikamp }
4333ac27a0ecSDave Kleikamp 
4334c6cb7e77SEric Whitney static struct inode *ext4_get_journal_inode(struct super_block *sb,
4335ac27a0ecSDave Kleikamp 					     unsigned int journal_inum)
4336ac27a0ecSDave Kleikamp {
4337ac27a0ecSDave Kleikamp 	struct inode *journal_inode;
4338ac27a0ecSDave Kleikamp 
4339c6cb7e77SEric Whitney 	/*
4340c6cb7e77SEric Whitney 	 * Test for the existence of a valid inode on disk.  Bad things
4341c6cb7e77SEric Whitney 	 * happen if we iget() an unused inode, as the subsequent iput()
4342c6cb7e77SEric Whitney 	 * will try to delete it.
4343c6cb7e77SEric Whitney 	 */
43441d1fe1eeSDavid Howells 	journal_inode = ext4_iget(sb, journal_inum);
43451d1fe1eeSDavid Howells 	if (IS_ERR(journal_inode)) {
4346b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "no journal found");
4347ac27a0ecSDave Kleikamp 		return NULL;
4348ac27a0ecSDave Kleikamp 	}
4349ac27a0ecSDave Kleikamp 	if (!journal_inode->i_nlink) {
4350ac27a0ecSDave Kleikamp 		make_bad_inode(journal_inode);
4351ac27a0ecSDave Kleikamp 		iput(journal_inode);
4352b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "journal inode is deleted");
4353ac27a0ecSDave Kleikamp 		return NULL;
4354ac27a0ecSDave Kleikamp 	}
4355ac27a0ecSDave Kleikamp 
4356e5f8eab8STheodore Ts'o 	jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
4357ac27a0ecSDave Kleikamp 		  journal_inode, journal_inode->i_size);
43581d1fe1eeSDavid Howells 	if (!S_ISREG(journal_inode->i_mode)) {
4359b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "invalid journal inode");
4360ac27a0ecSDave Kleikamp 		iput(journal_inode);
4361ac27a0ecSDave Kleikamp 		return NULL;
4362ac27a0ecSDave Kleikamp 	}
4363c6cb7e77SEric Whitney 	return journal_inode;
4364c6cb7e77SEric Whitney }
4365c6cb7e77SEric Whitney 
4366c6cb7e77SEric Whitney static journal_t *ext4_get_journal(struct super_block *sb,
4367c6cb7e77SEric Whitney 				   unsigned int journal_inum)
4368c6cb7e77SEric Whitney {
4369c6cb7e77SEric Whitney 	struct inode *journal_inode;
4370c6cb7e77SEric Whitney 	journal_t *journal;
4371c6cb7e77SEric Whitney 
4372c6cb7e77SEric Whitney 	BUG_ON(!ext4_has_feature_journal(sb));
4373c6cb7e77SEric Whitney 
4374c6cb7e77SEric Whitney 	journal_inode = ext4_get_journal_inode(sb, journal_inum);
4375c6cb7e77SEric Whitney 	if (!journal_inode)
4376c6cb7e77SEric Whitney 		return NULL;
4377ac27a0ecSDave Kleikamp 
4378dab291afSMingming Cao 	journal = jbd2_journal_init_inode(journal_inode);
4379ac27a0ecSDave Kleikamp 	if (!journal) {
4380b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "Could not load journal inode");
4381ac27a0ecSDave Kleikamp 		iput(journal_inode);
4382ac27a0ecSDave Kleikamp 		return NULL;
4383ac27a0ecSDave Kleikamp 	}
4384ac27a0ecSDave Kleikamp 	journal->j_private = sb;
4385617ba13bSMingming Cao 	ext4_init_journal_params(sb, journal);
4386ac27a0ecSDave Kleikamp 	return journal;
4387ac27a0ecSDave Kleikamp }
4388ac27a0ecSDave Kleikamp 
4389617ba13bSMingming Cao static journal_t *ext4_get_dev_journal(struct super_block *sb,
4390ac27a0ecSDave Kleikamp 				       dev_t j_dev)
4391ac27a0ecSDave Kleikamp {
4392ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
4393ac27a0ecSDave Kleikamp 	journal_t *journal;
4394617ba13bSMingming Cao 	ext4_fsblk_t start;
4395617ba13bSMingming Cao 	ext4_fsblk_t len;
4396ac27a0ecSDave Kleikamp 	int hblock, blocksize;
4397617ba13bSMingming Cao 	ext4_fsblk_t sb_block;
4398ac27a0ecSDave Kleikamp 	unsigned long offset;
4399617ba13bSMingming Cao 	struct ext4_super_block *es;
4400ac27a0ecSDave Kleikamp 	struct block_device *bdev;
4401ac27a0ecSDave Kleikamp 
4402e2b911c5SDarrick J. Wong 	BUG_ON(!ext4_has_feature_journal(sb));
44030390131bSFrank Mayhar 
4404b31e1552SEric Sandeen 	bdev = ext4_blkdev_get(j_dev, sb);
4405ac27a0ecSDave Kleikamp 	if (bdev == NULL)
4406ac27a0ecSDave Kleikamp 		return NULL;
4407ac27a0ecSDave Kleikamp 
4408ac27a0ecSDave Kleikamp 	blocksize = sb->s_blocksize;
4409e1defc4fSMartin K. Petersen 	hblock = bdev_logical_block_size(bdev);
4410ac27a0ecSDave Kleikamp 	if (blocksize < hblock) {
4411b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR,
4412b31e1552SEric Sandeen 			"blocksize too small for journal device");
4413ac27a0ecSDave Kleikamp 		goto out_bdev;
4414ac27a0ecSDave Kleikamp 	}
4415ac27a0ecSDave Kleikamp 
4416617ba13bSMingming Cao 	sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
4417617ba13bSMingming Cao 	offset = EXT4_MIN_BLOCK_SIZE % blocksize;
4418ac27a0ecSDave Kleikamp 	set_blocksize(bdev, blocksize);
4419ac27a0ecSDave Kleikamp 	if (!(bh = __bread(bdev, sb_block, blocksize))) {
4420b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
4421b31e1552SEric Sandeen 		       "external journal");
4422ac27a0ecSDave Kleikamp 		goto out_bdev;
4423ac27a0ecSDave Kleikamp 	}
4424ac27a0ecSDave Kleikamp 
44252716b802STheodore Ts'o 	es = (struct ext4_super_block *) (bh->b_data + offset);
4426617ba13bSMingming Cao 	if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
4427ac27a0ecSDave Kleikamp 	    !(le32_to_cpu(es->s_feature_incompat) &
4428617ba13bSMingming Cao 	      EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
4429b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "external journal has "
4430b31e1552SEric Sandeen 					"bad superblock");
4431ac27a0ecSDave Kleikamp 		brelse(bh);
4432ac27a0ecSDave Kleikamp 		goto out_bdev;
4433ac27a0ecSDave Kleikamp 	}
4434ac27a0ecSDave Kleikamp 
4435df4763beSDarrick J. Wong 	if ((le32_to_cpu(es->s_feature_ro_compat) &
4436df4763beSDarrick J. Wong 	     EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
4437df4763beSDarrick J. Wong 	    es->s_checksum != ext4_superblock_csum(sb, es)) {
4438df4763beSDarrick J. Wong 		ext4_msg(sb, KERN_ERR, "external journal has "
4439df4763beSDarrick J. Wong 				       "corrupt superblock");
4440df4763beSDarrick J. Wong 		brelse(bh);
4441df4763beSDarrick J. Wong 		goto out_bdev;
4442df4763beSDarrick J. Wong 	}
4443df4763beSDarrick J. Wong 
4444617ba13bSMingming Cao 	if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
4445b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "journal UUID does not match");
4446ac27a0ecSDave Kleikamp 		brelse(bh);
4447ac27a0ecSDave Kleikamp 		goto out_bdev;
4448ac27a0ecSDave Kleikamp 	}
4449ac27a0ecSDave Kleikamp 
4450bd81d8eeSLaurent Vivier 	len = ext4_blocks_count(es);
4451ac27a0ecSDave Kleikamp 	start = sb_block + 1;
4452ac27a0ecSDave Kleikamp 	brelse(bh);	/* we're done with the superblock */
4453ac27a0ecSDave Kleikamp 
4454dab291afSMingming Cao 	journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
4455ac27a0ecSDave Kleikamp 					start, len, blocksize);
4456ac27a0ecSDave Kleikamp 	if (!journal) {
4457b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "failed to create device journal");
4458ac27a0ecSDave Kleikamp 		goto out_bdev;
4459ac27a0ecSDave Kleikamp 	}
4460ac27a0ecSDave Kleikamp 	journal->j_private = sb;
4461dfec8a14SMike Christie 	ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
4462ac27a0ecSDave Kleikamp 	wait_on_buffer(journal->j_sb_buffer);
4463ac27a0ecSDave Kleikamp 	if (!buffer_uptodate(journal->j_sb_buffer)) {
4464b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "I/O error on journal device");
4465ac27a0ecSDave Kleikamp 		goto out_journal;
4466ac27a0ecSDave Kleikamp 	}
4467ac27a0ecSDave Kleikamp 	if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
4468b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "External journal has more than one "
4469b31e1552SEric Sandeen 					"user (unsupported) - %d",
4470ac27a0ecSDave Kleikamp 			be32_to_cpu(journal->j_superblock->s_nr_users));
4471ac27a0ecSDave Kleikamp 		goto out_journal;
4472ac27a0ecSDave Kleikamp 	}
4473617ba13bSMingming Cao 	EXT4_SB(sb)->journal_bdev = bdev;
4474617ba13bSMingming Cao 	ext4_init_journal_params(sb, journal);
4475ac27a0ecSDave Kleikamp 	return journal;
44760b8e58a1SAndreas Dilger 
4477ac27a0ecSDave Kleikamp out_journal:
4478dab291afSMingming Cao 	jbd2_journal_destroy(journal);
4479ac27a0ecSDave Kleikamp out_bdev:
4480617ba13bSMingming Cao 	ext4_blkdev_put(bdev);
4481ac27a0ecSDave Kleikamp 	return NULL;
4482ac27a0ecSDave Kleikamp }
4483ac27a0ecSDave Kleikamp 
4484617ba13bSMingming Cao static int ext4_load_journal(struct super_block *sb,
4485617ba13bSMingming Cao 			     struct ext4_super_block *es,
4486ac27a0ecSDave Kleikamp 			     unsigned long journal_devnum)
4487ac27a0ecSDave Kleikamp {
4488ac27a0ecSDave Kleikamp 	journal_t *journal;
4489ac27a0ecSDave Kleikamp 	unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
4490ac27a0ecSDave Kleikamp 	dev_t journal_dev;
4491ac27a0ecSDave Kleikamp 	int err = 0;
4492ac27a0ecSDave Kleikamp 	int really_read_only;
4493ac27a0ecSDave Kleikamp 
4494e2b911c5SDarrick J. Wong 	BUG_ON(!ext4_has_feature_journal(sb));
44950390131bSFrank Mayhar 
4496ac27a0ecSDave Kleikamp 	if (journal_devnum &&
4497ac27a0ecSDave Kleikamp 	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
4498b31e1552SEric Sandeen 		ext4_msg(sb, KERN_INFO, "external journal device major/minor "
4499b31e1552SEric Sandeen 			"numbers have changed");
4500ac27a0ecSDave Kleikamp 		journal_dev = new_decode_dev(journal_devnum);
4501ac27a0ecSDave Kleikamp 	} else
4502ac27a0ecSDave Kleikamp 		journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
4503ac27a0ecSDave Kleikamp 
4504ac27a0ecSDave Kleikamp 	really_read_only = bdev_read_only(sb->s_bdev);
4505ac27a0ecSDave Kleikamp 
4506ac27a0ecSDave Kleikamp 	/*
4507ac27a0ecSDave Kleikamp 	 * Are we loading a blank journal or performing recovery after a
4508ac27a0ecSDave Kleikamp 	 * crash?  For recovery, we need to check in advance whether we
4509ac27a0ecSDave Kleikamp 	 * can get read-write access to the device.
4510ac27a0ecSDave Kleikamp 	 */
4511e2b911c5SDarrick J. Wong 	if (ext4_has_feature_journal_needs_recovery(sb)) {
4512ac27a0ecSDave Kleikamp 		if (sb->s_flags & MS_RDONLY) {
4513b31e1552SEric Sandeen 			ext4_msg(sb, KERN_INFO, "INFO: recovery "
4514b31e1552SEric Sandeen 					"required on readonly filesystem");
4515ac27a0ecSDave Kleikamp 			if (really_read_only) {
4516b31e1552SEric Sandeen 				ext4_msg(sb, KERN_ERR, "write access "
4517b31e1552SEric Sandeen 					"unavailable, cannot proceed");
4518ac27a0ecSDave Kleikamp 				return -EROFS;
4519ac27a0ecSDave Kleikamp 			}
4520b31e1552SEric Sandeen 			ext4_msg(sb, KERN_INFO, "write access will "
4521b31e1552SEric Sandeen 			       "be enabled during recovery");
4522ac27a0ecSDave Kleikamp 		}
4523ac27a0ecSDave Kleikamp 	}
4524ac27a0ecSDave Kleikamp 
4525ac27a0ecSDave Kleikamp 	if (journal_inum && journal_dev) {
4526b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "filesystem has both journal "
4527b31e1552SEric Sandeen 		       "and inode journals!");
4528ac27a0ecSDave Kleikamp 		return -EINVAL;
4529ac27a0ecSDave Kleikamp 	}
4530ac27a0ecSDave Kleikamp 
4531ac27a0ecSDave Kleikamp 	if (journal_inum) {
4532617ba13bSMingming Cao 		if (!(journal = ext4_get_journal(sb, journal_inum)))
4533ac27a0ecSDave Kleikamp 			return -EINVAL;
4534ac27a0ecSDave Kleikamp 	} else {
4535617ba13bSMingming Cao 		if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
4536ac27a0ecSDave Kleikamp 			return -EINVAL;
4537ac27a0ecSDave Kleikamp 	}
4538ac27a0ecSDave Kleikamp 
453990576c0bSTheodore Ts'o 	if (!(journal->j_flags & JBD2_BARRIER))
4540b31e1552SEric Sandeen 		ext4_msg(sb, KERN_INFO, "barriers disabled");
45414776004fSTheodore Ts'o 
4542e2b911c5SDarrick J. Wong 	if (!ext4_has_feature_journal_needs_recovery(sb))
4543dab291afSMingming Cao 		err = jbd2_journal_wipe(journal, !really_read_only);
45441c13d5c0STheodore Ts'o 	if (!err) {
45451c13d5c0STheodore Ts'o 		char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
45461c13d5c0STheodore Ts'o 		if (save)
45471c13d5c0STheodore Ts'o 			memcpy(save, ((char *) es) +
45481c13d5c0STheodore Ts'o 			       EXT4_S_ERR_START, EXT4_S_ERR_LEN);
4549dab291afSMingming Cao 		err = jbd2_journal_load(journal);
45501c13d5c0STheodore Ts'o 		if (save)
45511c13d5c0STheodore Ts'o 			memcpy(((char *) es) + EXT4_S_ERR_START,
45521c13d5c0STheodore Ts'o 			       save, EXT4_S_ERR_LEN);
45531c13d5c0STheodore Ts'o 		kfree(save);
45541c13d5c0STheodore Ts'o 	}
4555ac27a0ecSDave Kleikamp 
4556ac27a0ecSDave Kleikamp 	if (err) {
4557b31e1552SEric Sandeen 		ext4_msg(sb, KERN_ERR, "error loading journal");
4558dab291afSMingming Cao 		jbd2_journal_destroy(journal);
4559ac27a0ecSDave Kleikamp 		return err;
4560ac27a0ecSDave Kleikamp 	}
4561ac27a0ecSDave Kleikamp 
4562617ba13bSMingming Cao 	EXT4_SB(sb)->s_journal = journal;
4563617ba13bSMingming Cao 	ext4_clear_journal_err(sb, es);
4564ac27a0ecSDave Kleikamp 
4565c41303ceSMaciej Żenczykowski 	if (!really_read_only && journal_devnum &&
4566ac27a0ecSDave Kleikamp 	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
4567ac27a0ecSDave Kleikamp 		es->s_journal_dev = cpu_to_le32(journal_devnum);
4568ac27a0ecSDave Kleikamp 
4569ac27a0ecSDave Kleikamp 		/* Make sure we flush the recovery flag to disk. */
4570e2d67052STheodore Ts'o 		ext4_commit_super(sb, 1);
4571ac27a0ecSDave Kleikamp 	}
4572ac27a0ecSDave Kleikamp 
4573ac27a0ecSDave Kleikamp 	return 0;
4574ac27a0ecSDave Kleikamp }
4575ac27a0ecSDave Kleikamp 
4576e2d67052STheodore Ts'o static int ext4_commit_super(struct super_block *sb, int sync)
4577ac27a0ecSDave Kleikamp {
4578e2d67052STheodore Ts'o 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
4579617ba13bSMingming Cao 	struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
4580c4be0c1dSTakashi Sato 	int error = 0;
4581ac27a0ecSDave Kleikamp 
4582bdfe0cbdSTheodore Ts'o 	if (!sbh || block_device_ejected(sb))
4583c4be0c1dSTakashi Sato 		return error;
458471290b36STheodore Ts'o 	/*
458571290b36STheodore Ts'o 	 * If the file system is mounted read-only, don't update the
458671290b36STheodore Ts'o 	 * superblock write time.  This avoids updating the superblock
458771290b36STheodore Ts'o 	 * write time when we are mounting the root file system
458871290b36STheodore Ts'o 	 * read/only but we need to replay the journal; at that point,
458971290b36STheodore Ts'o 	 * for people who are east of GMT and who make their clock
459071290b36STheodore Ts'o 	 * tick in localtime for Windows bug-for-bug compatibility,
459171290b36STheodore Ts'o 	 * the clock is set in the future, and this will cause e2fsck
459271290b36STheodore Ts'o 	 * to complain and force a full file system check.
459371290b36STheodore Ts'o 	 */
459471290b36STheodore Ts'o 	if (!(sb->s_flags & MS_RDONLY))
4595ac27a0ecSDave Kleikamp 		es->s_wtime = cpu_to_le32(get_seconds());
4596f613dfcbSTheodore Ts'o 	if (sb->s_bdev->bd_part)
4597afc32f7eSTheodore Ts'o 		es->s_kbytes_written =
4598afc32f7eSTheodore Ts'o 			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
4599afc32f7eSTheodore Ts'o 			    ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
4600afc32f7eSTheodore Ts'o 			      EXT4_SB(sb)->s_sectors_written_start) >> 1));
4601f613dfcbSTheodore Ts'o 	else
4602f613dfcbSTheodore Ts'o 		es->s_kbytes_written =
4603f613dfcbSTheodore Ts'o 			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
4604d5e03cbbSTheodore Ts'o 	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
460557042651STheodore Ts'o 		ext4_free_blocks_count_set(es,
460657042651STheodore Ts'o 			EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
460757042651STheodore Ts'o 				&EXT4_SB(sb)->s_freeclusters_counter)));
4608d5e03cbbSTheodore Ts'o 	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
46097f93cff9STheodore Ts'o 		es->s_free_inodes_count =
46107f93cff9STheodore Ts'o 			cpu_to_le32(percpu_counter_sum_positive(
46115d1b1b3fSAneesh Kumar K.V 				&EXT4_SB(sb)->s_freeinodes_counter));
4612ac27a0ecSDave Kleikamp 	BUFFER_TRACE(sbh, "marking dirty");
461306db49e6STheodore Ts'o 	ext4_superblock_csum_set(sb);
46141566a48aSTheodore Ts'o 	if (sync)
46154743f839SPranay Kr. Srivastava 		lock_buffer(sbh);
46164743f839SPranay Kr. Srivastava 	if (buffer_write_io_error(sbh)) {
46174743f839SPranay Kr. Srivastava 		/*
46184743f839SPranay Kr. Srivastava 		 * Oh, dear.  A previous attempt to write the
46194743f839SPranay Kr. Srivastava 		 * superblock failed.  This could happen because the
46204743f839SPranay Kr. Srivastava 		 * USB device was yanked out.  Or it could happen to
46214743f839SPranay Kr. Srivastava 		 * be a transient write error and maybe the block will
46224743f839SPranay Kr. Srivastava 		 * be remapped.  Nothing we can do but to retry the
46234743f839SPranay Kr. Srivastava 		 * write and hope for the best.
46244743f839SPranay Kr. Srivastava 		 */
46254743f839SPranay Kr. Srivastava 		ext4_msg(sb, KERN_ERR, "previous I/O error to "
46264743f839SPranay Kr. Srivastava 		       "superblock detected");
46274743f839SPranay Kr. Srivastava 		clear_buffer_write_io_error(sbh);
46284743f839SPranay Kr. Srivastava 		set_buffer_uptodate(sbh);
46294743f839SPranay Kr. Srivastava 	}
4630ac27a0ecSDave Kleikamp 	mark_buffer_dirty(sbh);
4631914258bfSTheodore Ts'o 	if (sync) {
46321566a48aSTheodore Ts'o 		unlock_buffer(sbh);
4633564bc402SDaeho Jeong 		error = __sync_dirty_buffer(sbh,
463470fd7614SChristoph Hellwig 			test_opt(sb, BARRIER) ? REQ_FUA : REQ_SYNC);
4635c4be0c1dSTakashi Sato 		if (error)
4636c4be0c1dSTakashi Sato 			return error;
4637c4be0c1dSTakashi Sato 
4638c4be0c1dSTakashi Sato 		error = buffer_write_io_error(sbh);
4639c4be0c1dSTakashi Sato 		if (error) {
4640b31e1552SEric Sandeen 			ext4_msg(sb, KERN_ERR, "I/O error while writing "
4641b31e1552SEric Sandeen 			       "superblock");
4642914258bfSTheodore Ts'o 			clear_buffer_write_io_error(sbh);
4643914258bfSTheodore Ts'o 			set_buffer_uptodate(sbh);
4644914258bfSTheodore Ts'o 		}
4645914258bfSTheodore Ts'o 	}
4646c4be0c1dSTakashi Sato 	return error;
4647ac27a0ecSDave Kleikamp }
4648ac27a0ecSDave Kleikamp 
4649ac27a0ecSDave Kleikamp /*
4650ac27a0ecSDave Kleikamp  * Have we just finished recovery?  If so, and if we are mounting (or
4651ac27a0ecSDave Kleikamp  * remounting) the filesystem readonly, then we will end up with a
4652ac27a0ecSDave Kleikamp  * consistent fs on disk.  Record that fact.
4653ac27a0ecSDave Kleikamp  */
4654617ba13bSMingming Cao static void ext4_mark_recovery_complete(struct super_block *sb,
4655617ba13bSMingming Cao 					struct ext4_super_block *es)
4656ac27a0ecSDave Kleikamp {
4657617ba13bSMingming Cao 	journal_t *journal = EXT4_SB(sb)->s_journal;
4658ac27a0ecSDave Kleikamp 
4659e2b911c5SDarrick J. Wong 	if (!ext4_has_feature_journal(sb)) {
46600390131bSFrank Mayhar 		BUG_ON(journal != NULL);
46610390131bSFrank Mayhar 		return;
46620390131bSFrank Mayhar 	}
4663dab291afSMingming Cao 	jbd2_journal_lock_updates(journal);
46647ffe1ea8SHidehiro Kawai 	if (jbd2_journal_flush(journal) < 0)
46657ffe1ea8SHidehiro Kawai 		goto out;
46667ffe1ea8SHidehiro Kawai 
4667e2b911c5SDarrick J. Wong 	if (ext4_has_feature_journal_needs_recovery(sb) &&
4668ac27a0ecSDave Kleikamp 	    sb->s_flags & MS_RDONLY) {
4669e2b911c5SDarrick J. Wong 		ext4_clear_feature_journal_needs_recovery(sb);
4670e2d67052STheodore Ts'o 		ext4_commit_super(sb, 1);
4671ac27a0ecSDave Kleikamp 	}
46727ffe1ea8SHidehiro Kawai 
46737ffe1ea8SHidehiro Kawai out:
4674dab291afSMingming Cao 	jbd2_journal_unlock_updates(journal);
4675ac27a0ecSDave Kleikamp }
4676ac27a0ecSDave Kleikamp 
4677ac27a0ecSDave Kleikamp /*
4678ac27a0ecSDave Kleikamp  * If we are mounting (or read-write remounting) a filesystem whose journal
4679ac27a0ecSDave Kleikamp  * has recorded an error from a previous lifetime, move that error to the
4680ac27a0ecSDave Kleikamp  * main filesystem now.
4681ac27a0ecSDave Kleikamp  */
4682617ba13bSMingming Cao static void ext4_clear_journal_err(struct super_block *sb,
4683617ba13bSMingming Cao 				   struct ext4_super_block *es)
4684ac27a0ecSDave Kleikamp {
4685ac27a0ecSDave Kleikamp 	journal_t *journal;
4686ac27a0ecSDave Kleikamp 	int j_errno;
4687ac27a0ecSDave Kleikamp 	const char *errstr;
4688ac27a0ecSDave Kleikamp 
4689e2b911c5SDarrick J. Wong 	BUG_ON(!ext4_has_feature_journal(sb));
46900390131bSFrank Mayhar 
4691617ba13bSMingming Cao 	journal = EXT4_SB(sb)->s_journal;
4692ac27a0ecSDave Kleikamp 
4693ac27a0ecSDave Kleikamp 	/*
4694ac27a0ecSDave Kleikamp 	 * Now check for any error status which may have been recorded in the
4695617ba13bSMingming Cao 	 * journal by a prior ext4_error() or ext4_abort()
4696ac27a0ecSDave Kleikamp 	 */
4697ac27a0ecSDave Kleikamp 
4698dab291afSMingming Cao 	j_errno = jbd2_journal_errno(journal);
4699ac27a0ecSDave Kleikamp 	if (j_errno) {
4700ac27a0ecSDave Kleikamp 		char nbuf[16];
4701ac27a0ecSDave Kleikamp 
4702617ba13bSMingming Cao 		errstr = ext4_decode_error(sb, j_errno, nbuf);
470312062dddSEric Sandeen 		ext4_warning(sb, "Filesystem error recorded "
4704ac27a0ecSDave Kleikamp 			     "from previous mount: %s", errstr);
470512062dddSEric Sandeen 		ext4_warning(sb, "Marking fs in need of filesystem check.");
4706ac27a0ecSDave Kleikamp 
4707617ba13bSMingming Cao 		EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
4708617ba13bSMingming Cao 		es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
4709e2d67052STheodore Ts'o 		ext4_commit_super(sb, 1);
4710ac27a0ecSDave Kleikamp 
4711dab291afSMingming Cao 		jbd2_journal_clear_err(journal);
4712d796c52eSTheodore Ts'o 		jbd2_journal_update_sb_errno(journal);
4713ac27a0ecSDave Kleikamp 	}
4714ac27a0ecSDave Kleikamp }
4715ac27a0ecSDave Kleikamp 
4716ac27a0ecSDave Kleikamp /*
4717ac27a0ecSDave Kleikamp  * Force the running and committing transactions to commit,
4718ac27a0ecSDave Kleikamp  * and wait on the commit.
4719ac27a0ecSDave Kleikamp  */
4720617ba13bSMingming Cao int ext4_force_commit(struct super_block *sb)
4721ac27a0ecSDave Kleikamp {
4722ac27a0ecSDave Kleikamp 	journal_t *journal;
4723ac27a0ecSDave Kleikamp 
4724ac27a0ecSDave Kleikamp 	if (sb->s_flags & MS_RDONLY)
4725ac27a0ecSDave Kleikamp 		return 0;
4726ac27a0ecSDave Kleikamp 
4727617ba13bSMingming Cao 	journal = EXT4_SB(sb)->s_journal;
4728b1deefc9SGuo Chao 	return ext4_journal_force_commit(journal);
4729ac27a0ecSDave Kleikamp }
4730ac27a0ecSDave Kleikamp 
4731617ba13bSMingming Cao static int ext4_sync_fs(struct super_block *sb, int wait)
4732ac27a0ecSDave Kleikamp {
473314ce0cb4STheodore Ts'o 	int ret = 0;
47349eddacf9SJan Kara 	tid_t target;
473506a407f1SDmitry Monakhov 	bool needs_barrier = false;
47368d5d02e6SMingming Cao 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4737ac27a0ecSDave Kleikamp 
4738*0db1ff22STheodore Ts'o 	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
4739*0db1ff22STheodore Ts'o 		return 0;
4740*0db1ff22STheodore Ts'o 
47419bffad1eSTheodore Ts'o 	trace_ext4_sync_fs(sb, wait);
47422e8fa54eSJan Kara 	flush_workqueue(sbi->rsv_conversion_wq);
4743a1177825SJan Kara 	/*
4744a1177825SJan Kara 	 * Writeback quota in non-journalled quota case - journalled quota has
4745a1177825SJan Kara 	 * no dirty dquots
4746a1177825SJan Kara 	 */
4747a1177825SJan Kara 	dquot_writeback_dquots(sb, -1);
474806a407f1SDmitry Monakhov 	/*
474906a407f1SDmitry Monakhov 	 * Data writeback is possible w/o journal transaction, so barrier must
475006a407f1SDmitry Monakhov 	 * being sent at the end of the function. But we can skip it if
475106a407f1SDmitry Monakhov 	 * transaction_commit will do it for us.
475206a407f1SDmitry Monakhov 	 */
4753bda32530STheodore Ts'o 	if (sbi->s_journal) {
475406a407f1SDmitry Monakhov 		target = jbd2_get_latest_transaction(sbi->s_journal);
475506a407f1SDmitry Monakhov 		if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
475606a407f1SDmitry Monakhov 		    !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
475706a407f1SDmitry Monakhov 			needs_barrier = true;
475806a407f1SDmitry Monakhov 
47598d5d02e6SMingming Cao 		if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
4760ac27a0ecSDave Kleikamp 			if (wait)
4761bda32530STheodore Ts'o 				ret = jbd2_log_wait_commit(sbi->s_journal,
4762bda32530STheodore Ts'o 							   target);
47630390131bSFrank Mayhar 		}
4764bda32530STheodore Ts'o 	} else if (wait && test_opt(sb, BARRIER))
4765bda32530STheodore Ts'o 		needs_barrier = true;
476606a407f1SDmitry Monakhov 	if (needs_barrier) {
476706a407f1SDmitry Monakhov 		int err;
476806a407f1SDmitry Monakhov 		err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
476906a407f1SDmitry Monakhov 		if (!ret)
477006a407f1SDmitry Monakhov 			ret = err;
477106a407f1SDmitry Monakhov 	}
477206a407f1SDmitry Monakhov 
477306a407f1SDmitry Monakhov 	return ret;
477406a407f1SDmitry Monakhov }
477506a407f1SDmitry Monakhov 
4776ac27a0ecSDave Kleikamp /*
4777ac27a0ecSDave Kleikamp  * LVM calls this function before a (read-only) snapshot is created.  This
4778ac27a0ecSDave Kleikamp  * gives us a chance to flush the journal completely and mark the fs clean.
4779be4f27d3SYongqiang Yang  *
4780be4f27d3SYongqiang Yang  * Note that only this function cannot bring a filesystem to be in a clean
47818e8ad8a5SJan Kara  * state independently. It relies on upper layer to stop all data & metadata
47828e8ad8a5SJan Kara  * modifications.
4783ac27a0ecSDave Kleikamp  */
4784c4be0c1dSTakashi Sato static int ext4_freeze(struct super_block *sb)
4785ac27a0ecSDave Kleikamp {
4786c4be0c1dSTakashi Sato 	int error = 0;
4787c4be0c1dSTakashi Sato 	journal_t *journal;
4788ac27a0ecSDave Kleikamp 
47899ca92389STheodore Ts'o 	if (sb->s_flags & MS_RDONLY)
47909ca92389STheodore Ts'o 		return 0;
47919ca92389STheodore Ts'o 
4792c4be0c1dSTakashi Sato 	journal = EXT4_SB(sb)->s_journal;
4793ac27a0ecSDave Kleikamp 
4794bb044576STheodore Ts'o 	if (journal) {
4795ac27a0ecSDave Kleikamp 		/* Now we set up the journal barrier. */
4796dab291afSMingming Cao 		jbd2_journal_lock_updates(journal);
47977ffe1ea8SHidehiro Kawai 
47987ffe1ea8SHidehiro Kawai 		/*
4799bb044576STheodore Ts'o 		 * Don't clear the needs_recovery flag if we failed to
4800bb044576STheodore Ts'o 		 * flush the journal.
48017ffe1ea8SHidehiro Kawai 		 */
4802c4be0c1dSTakashi Sato 		error = jbd2_journal_flush(journal);
48036b0310fbSEric Sandeen 		if (error < 0)
48046b0310fbSEric Sandeen 			goto out;
4805ac27a0ecSDave Kleikamp 
4806ac27a0ecSDave Kleikamp 		/* Journal blocked and flushed, clear needs_recovery flag. */
4807e2b911c5SDarrick J. Wong 		ext4_clear_feature_journal_needs_recovery(sb);
4808c642dc9eSEric Sandeen 	}
4809c642dc9eSEric Sandeen 
4810e2d67052STheodore Ts'o 	error = ext4_commit_super(sb, 1);
48116b0310fbSEric Sandeen out:
4812bb044576STheodore Ts'o 	if (journal)
48138e8ad8a5SJan Kara 		/* we rely on upper layer to stop further updates */
4814bb044576STheodore Ts'o 		jbd2_journal_unlock_updates(journal);
48156b0310fbSEric Sandeen 	return error;
4816ac27a0ecSDave Kleikamp }
4817ac27a0ecSDave Kleikamp 
4818ac27a0ecSDave Kleikamp /*
4819ac27a0ecSDave Kleikamp  * Called by LVM after the snapshot is done.  We need to reset the RECOVER
4820ac27a0ecSDave Kleikamp  * flag here, even though the filesystem is not technically dirty yet.
4821ac27a0ecSDave Kleikamp  */
4822c4be0c1dSTakashi Sato static int ext4_unfreeze(struct super_block *sb)
4823ac27a0ecSDave Kleikamp {
48249ca92389STheodore Ts'o 	if (sb->s_flags & MS_RDONLY)
48259ca92389STheodore Ts'o 		return 0;
48269ca92389STheodore Ts'o 
4827c642dc9eSEric Sandeen 	if (EXT4_SB(sb)->s_journal) {
48289ca92389STheodore Ts'o 		/* Reset the needs_recovery flag before the fs is unlocked. */
4829e2b911c5SDarrick J. Wong 		ext4_set_feature_journal_needs_recovery(sb);
4830c642dc9eSEric Sandeen 	}
4831c642dc9eSEric Sandeen 
4832e2d67052STheodore Ts'o 	ext4_commit_super(sb, 1);
4833c4be0c1dSTakashi Sato 	return 0;
4834ac27a0ecSDave Kleikamp }
4835ac27a0ecSDave Kleikamp 
4836673c6100STheodore Ts'o /*
4837673c6100STheodore Ts'o  * Structure to save mount options for ext4_remount's benefit
4838673c6100STheodore Ts'o  */
4839673c6100STheodore Ts'o struct ext4_mount_options {
4840673c6100STheodore Ts'o 	unsigned long s_mount_opt;
4841a2595b8aSTheodore Ts'o 	unsigned long s_mount_opt2;
484208cefc7aSEric W. Biederman 	kuid_t s_resuid;
484308cefc7aSEric W. Biederman 	kgid_t s_resgid;
4844673c6100STheodore Ts'o 	unsigned long s_commit_interval;
4845673c6100STheodore Ts'o 	u32 s_min_batch_time, s_max_batch_time;
4846673c6100STheodore Ts'o #ifdef CONFIG_QUOTA
4847673c6100STheodore Ts'o 	int s_jquota_fmt;
4848a2d4a646SJan Kara 	char *s_qf_names[EXT4_MAXQUOTAS];
4849673c6100STheodore Ts'o #endif
4850673c6100STheodore Ts'o };
4851673c6100STheodore Ts'o 
4852617ba13bSMingming Cao static int ext4_remount(struct super_block *sb, int *flags, char *data)
4853ac27a0ecSDave Kleikamp {
4854617ba13bSMingming Cao 	struct ext4_super_block *es;
4855617ba13bSMingming Cao 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4856ac27a0ecSDave Kleikamp 	unsigned long old_sb_flags;
4857617ba13bSMingming Cao 	struct ext4_mount_options old_opts;
4858c79d967dSChristoph Hellwig 	int enable_quota = 0;
48598a266467STheodore Ts'o 	ext4_group_t g;
4860b3881f74STheodore Ts'o 	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
4861c5e06d10SJohann Lombardi 	int err = 0;
4862ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA
486303dafb5fSChen Gang 	int i, j;
4864ac27a0ecSDave Kleikamp #endif
4865d4c402d9SCurt Wohlgemuth 	char *orig_data = kstrdup(data, GFP_KERNEL);
4866ac27a0ecSDave Kleikamp 
4867ac27a0ecSDave Kleikamp 	/* Store the original options */
4868ac27a0ecSDave Kleikamp 	old_sb_flags = sb->s_flags;
4869ac27a0ecSDave Kleikamp 	old_opts.s_mount_opt = sbi->s_mount_opt;
4870a2595b8aSTheodore Ts'o 	old_opts.s_mount_opt2 = sbi->s_mount_opt2;
4871ac27a0ecSDave Kleikamp 	old_opts.s_resuid = sbi->s_resuid;
4872ac27a0ecSDave Kleikamp 	old_opts.s_resgid = sbi->s_resgid;
4873ac27a0ecSDave Kleikamp 	old_opts.s_commit_interval = sbi->s_commit_interval;
487430773840STheodore Ts'o 	old_opts.s_min_batch_time = sbi->s_min_batch_time;
487530773840STheodore Ts'o 	old_opts.s_max_batch_time = sbi->s_max_batch_time;
4876ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA
4877ac27a0ecSDave Kleikamp 	old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
4878a2d4a646SJan Kara 	for (i = 0; i < EXT4_MAXQUOTAS; i++)
487903dafb5fSChen Gang 		if (sbi->s_qf_names[i]) {
488003dafb5fSChen Gang 			old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
488103dafb5fSChen Gang 							 GFP_KERNEL);
488203dafb5fSChen Gang 			if (!old_opts.s_qf_names[i]) {
488303dafb5fSChen Gang 				for (j = 0; j < i; j++)
488403dafb5fSChen Gang 					kfree(old_opts.s_qf_names[j]);
48853e36a163SWei Yongjun 				kfree(orig_data);
488603dafb5fSChen Gang 				return -ENOMEM;
488703dafb5fSChen Gang 			}
488803dafb5fSChen Gang 		} else
488903dafb5fSChen Gang 			old_opts.s_qf_names[i] = NULL;
4890ac27a0ecSDave Kleikamp #endif
4891b3881f74STheodore Ts'o 	if (sbi->s_journal && sbi->s_journal->j_task->io_context)
4892b3881f74STheodore Ts'o 		journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
4893ac27a0ecSDave Kleikamp 
4894661aa520SEric Sandeen 	if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
4895ac27a0ecSDave Kleikamp 		err = -EINVAL;
4896ac27a0ecSDave Kleikamp 		goto restore_opts;
4897ac27a0ecSDave Kleikamp 	}
4898ac27a0ecSDave Kleikamp 
48996b992ff2SDarrick J. Wong 	if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
49006b992ff2SDarrick J. Wong 	    test_opt(sb, JOURNAL_CHECKSUM)) {
49016b992ff2SDarrick J. Wong 		ext4_msg(sb, KERN_ERR, "changing journal_checksum "
49022d5b86e0SEric Sandeen 			 "during remount not supported; ignoring");
49032d5b86e0SEric Sandeen 		sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
4904c6d3d56dSDarrick J. Wong 	}
4905c6d3d56dSDarrick J. Wong 
49066ae6514bSPiotr Sarna 	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
49076ae6514bSPiotr Sarna 		if (test_opt2(sb, EXPLICIT_DELALLOC)) {
49086ae6514bSPiotr Sarna 			ext4_msg(sb, KERN_ERR, "can't mount with "
49096ae6514bSPiotr Sarna 				 "both data=journal and delalloc");
49106ae6514bSPiotr Sarna 			err = -EINVAL;
49116ae6514bSPiotr Sarna 			goto restore_opts;
49126ae6514bSPiotr Sarna 		}
49136ae6514bSPiotr Sarna 		if (test_opt(sb, DIOREAD_NOLOCK)) {
49146ae6514bSPiotr Sarna 			ext4_msg(sb, KERN_ERR, "can't mount with "
49156ae6514bSPiotr Sarna 				 "both data=journal and dioread_nolock");
49166ae6514bSPiotr Sarna 			err = -EINVAL;
49176ae6514bSPiotr Sarna 			goto restore_opts;
49186ae6514bSPiotr Sarna 		}
4919923ae0ffSRoss Zwisler 		if (test_opt(sb, DAX)) {
4920923ae0ffSRoss Zwisler 			ext4_msg(sb, KERN_ERR, "can't mount with "
4921923ae0ffSRoss Zwisler 				 "both data=journal and dax");
4922923ae0ffSRoss Zwisler 			err = -EINVAL;
4923923ae0ffSRoss Zwisler 			goto restore_opts;
4924923ae0ffSRoss Zwisler 		}
4925ab04df78SJan Kara 	} else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
4926ab04df78SJan Kara 		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
4927ab04df78SJan Kara 			ext4_msg(sb, KERN_ERR, "can't mount with "
4928ab04df78SJan Kara 				"journal_async_commit in data=ordered mode");
4929ab04df78SJan Kara 			err = -EINVAL;
4930ab04df78SJan Kara 			goto restore_opts;
4931ab04df78SJan Kara 		}
4932923ae0ffSRoss Zwisler 	}
4933923ae0ffSRoss Zwisler 
4934923ae0ffSRoss Zwisler 	if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
4935923ae0ffSRoss Zwisler 		ext4_msg(sb, KERN_WARNING, "warning: refusing change of "
4936923ae0ffSRoss Zwisler 			"dax flag with busy inodes while remounting");
4937923ae0ffSRoss Zwisler 		sbi->s_mount_opt ^= EXT4_MOUNT_DAX;
49386ae6514bSPiotr Sarna 	}
49396ae6514bSPiotr Sarna 
49404ab2f15bSTheodore Ts'o 	if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
4941c67d859eSTheodore Ts'o 		ext4_abort(sb, "Abort forced by user");
4942ac27a0ecSDave Kleikamp 
4943ac27a0ecSDave Kleikamp 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
4944482a7425SDmitry Monakhov 		(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
4945ac27a0ecSDave Kleikamp 
4946ac27a0ecSDave Kleikamp 	es = sbi->s_es;
4947ac27a0ecSDave Kleikamp 
4948b3881f74STheodore Ts'o 	if (sbi->s_journal) {
4949617ba13bSMingming Cao 		ext4_init_journal_params(sb, sbi->s_journal);
4950b3881f74STheodore Ts'o 		set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
4951b3881f74STheodore Ts'o 	}
4952ac27a0ecSDave Kleikamp 
4953a2fd66d0STheodore Ts'o 	if (*flags & MS_LAZYTIME)
4954a2fd66d0STheodore Ts'o 		sb->s_flags |= MS_LAZYTIME;
4955a2fd66d0STheodore Ts'o 
4956661aa520SEric Sandeen 	if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
49574ab2f15bSTheodore Ts'o 		if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
4958ac27a0ecSDave Kleikamp 			err = -EROFS;
4959ac27a0ecSDave Kleikamp 			goto restore_opts;
4960ac27a0ecSDave Kleikamp 		}
4961ac27a0ecSDave Kleikamp 
4962ac27a0ecSDave Kleikamp 		if (*flags & MS_RDONLY) {
496338c03b34STheodore Ts'o 			err = sync_filesystem(sb);
496438c03b34STheodore Ts'o 			if (err < 0)
496538c03b34STheodore Ts'o 				goto restore_opts;
49660f0dd62fSChristoph Hellwig 			err = dquot_suspend(sb, -1);
49670f0dd62fSChristoph Hellwig 			if (err < 0)
4968c79d967dSChristoph Hellwig 				goto restore_opts;
4969c79d967dSChristoph Hellwig 
4970ac27a0ecSDave Kleikamp 			/*
4971ac27a0ecSDave Kleikamp 			 * First of all, the unconditional stuff we have to do
4972ac27a0ecSDave Kleikamp 			 * to disable replay of the journal when we next remount
4973ac27a0ecSDave Kleikamp 			 */
4974ac27a0ecSDave Kleikamp 			sb->s_flags |= MS_RDONLY;
4975ac27a0ecSDave Kleikamp 
4976ac27a0ecSDave Kleikamp 			/*
4977ac27a0ecSDave Kleikamp 			 * OK, test if we are remounting a valid rw partition
4978ac27a0ecSDave Kleikamp 			 * readonly, and if so set the rdonly flag and then
4979ac27a0ecSDave Kleikamp 			 * mark the partition as valid again.
4980ac27a0ecSDave Kleikamp 			 */
4981617ba13bSMingming Cao 			if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
4982617ba13bSMingming Cao 			    (sbi->s_mount_state & EXT4_VALID_FS))
4983ac27a0ecSDave Kleikamp 				es->s_state = cpu_to_le16(sbi->s_mount_state);
4984ac27a0ecSDave Kleikamp 
4985a63c9eb2STheodore Ts'o 			if (sbi->s_journal)
4986617ba13bSMingming Cao 				ext4_mark_recovery_complete(sb, es);
4987ac27a0ecSDave Kleikamp 		} else {
4988a13fb1a4SEric Sandeen 			/* Make sure we can mount this feature set readwrite */
4989e2b911c5SDarrick J. Wong 			if (ext4_has_feature_readonly(sb) ||
49902cb5cc8bSDarrick J. Wong 			    !ext4_feature_set_ok(sb, 0)) {
4991ac27a0ecSDave Kleikamp 				err = -EROFS;
4992ac27a0ecSDave Kleikamp 				goto restore_opts;
4993ac27a0ecSDave Kleikamp 			}
4994ead6596bSEric Sandeen 			/*
49958a266467STheodore Ts'o 			 * Make sure the group descriptor checksums
49960b8e58a1SAndreas Dilger 			 * are sane.  If they aren't, refuse to remount r/w.
49978a266467STheodore Ts'o 			 */
49988a266467STheodore Ts'o 			for (g = 0; g < sbi->s_groups_count; g++) {
49998a266467STheodore Ts'o 				struct ext4_group_desc *gdp =
50008a266467STheodore Ts'o 					ext4_get_group_desc(sb, g, NULL);
50018a266467STheodore Ts'o 
5002feb0ab32SDarrick J. Wong 				if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
5003b31e1552SEric Sandeen 					ext4_msg(sb, KERN_ERR,
5004b31e1552SEric Sandeen 	       "ext4_remount: Checksum for group %u failed (%u!=%u)",
5005e2b911c5SDarrick J. Wong 		g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)),
50068a266467STheodore Ts'o 					       le16_to_cpu(gdp->bg_checksum));
50076a797d27SDarrick J. Wong 					err = -EFSBADCRC;
50088a266467STheodore Ts'o 					goto restore_opts;
50098a266467STheodore Ts'o 				}
50108a266467STheodore Ts'o 			}
50118a266467STheodore Ts'o 
50128a266467STheodore Ts'o 			/*
5013ead6596bSEric Sandeen 			 * If we have an unprocessed orphan list hanging
5014ead6596bSEric Sandeen 			 * around from a previously readonly bdev mount,
5015ead6596bSEric Sandeen 			 * require a full umount/remount for now.
5016ead6596bSEric Sandeen 			 */
5017ead6596bSEric Sandeen 			if (es->s_last_orphan) {
5018b31e1552SEric Sandeen 				ext4_msg(sb, KERN_WARNING, "Couldn't "
5019ead6596bSEric Sandeen 				       "remount RDWR because of unprocessed "
5020ead6596bSEric Sandeen 				       "orphan inode list.  Please "
5021b31e1552SEric Sandeen 				       "umount/remount instead");
5022ead6596bSEric Sandeen 				err = -EINVAL;
5023ead6596bSEric Sandeen 				goto restore_opts;
5024ead6596bSEric Sandeen 			}
5025ead6596bSEric Sandeen 
5026ac27a0ecSDave Kleikamp 			/*
5027ac27a0ecSDave Kleikamp 			 * Mounting a RDONLY partition read-write, so reread
5028ac27a0ecSDave Kleikamp 			 * and store the current valid flag.  (It may have
5029ac27a0ecSDave Kleikamp 			 * been changed by e2fsck since we originally mounted
5030ac27a0ecSDave Kleikamp 			 * the partition.)
5031ac27a0ecSDave Kleikamp 			 */
50320390131bSFrank Mayhar 			if (sbi->s_journal)
5033617ba13bSMingming Cao 				ext4_clear_journal_err(sb, es);
5034ac27a0ecSDave Kleikamp 			sbi->s_mount_state = le16_to_cpu(es->s_state);
5035617ba13bSMingming Cao 			if (!ext4_setup_super(sb, es, 0))
5036ac27a0ecSDave Kleikamp 				sb->s_flags &= ~MS_RDONLY;
5037e2b911c5SDarrick J. Wong 			if (ext4_has_feature_mmp(sb))
5038c5e06d10SJohann Lombardi 				if (ext4_multi_mount_protect(sb,
5039c5e06d10SJohann Lombardi 						le64_to_cpu(es->s_mmp_block))) {
5040c5e06d10SJohann Lombardi 					err = -EROFS;
5041c5e06d10SJohann Lombardi 					goto restore_opts;
5042c5e06d10SJohann Lombardi 				}
5043c79d967dSChristoph Hellwig 			enable_quota = 1;
5044ac27a0ecSDave Kleikamp 		}
5045ac27a0ecSDave Kleikamp 	}
5046bfff6873SLukas Czerner 
5047bfff6873SLukas Czerner 	/*
5048bfff6873SLukas Czerner 	 * Reinitialize lazy itable initialization thread based on
5049bfff6873SLukas Czerner 	 * current settings
5050bfff6873SLukas Czerner 	 */
5051bfff6873SLukas Czerner 	if ((sb->s_flags & MS_RDONLY) || !test_opt(sb, INIT_INODE_TABLE))
5052bfff6873SLukas Czerner 		ext4_unregister_li_request(sb);
5053bfff6873SLukas Czerner 	else {
5054bfff6873SLukas Czerner 		ext4_group_t first_not_zeroed;
5055bfff6873SLukas Czerner 		first_not_zeroed = ext4_has_uninit_itable(sb);
5056bfff6873SLukas Czerner 		ext4_register_li_request(sb, first_not_zeroed);
5057bfff6873SLukas Czerner 	}
5058bfff6873SLukas Czerner 
50596fd058f7STheodore Ts'o 	ext4_setup_system_zone(sb);
5060d096ad0fSMichael Tokarev 	if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
5061e2d67052STheodore Ts'o 		ext4_commit_super(sb, 1);
50620390131bSFrank Mayhar 
5063ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA
5064ac27a0ecSDave Kleikamp 	/* Release old quota file names */
5065a2d4a646SJan Kara 	for (i = 0; i < EXT4_MAXQUOTAS; i++)
5066ac27a0ecSDave Kleikamp 		kfree(old_opts.s_qf_names[i]);
50677c319d32SAditya Kali 	if (enable_quota) {
50687c319d32SAditya Kali 		if (sb_any_quota_suspended(sb))
50690f0dd62fSChristoph Hellwig 			dquot_resume(sb, -1);
5070e2b911c5SDarrick J. Wong 		else if (ext4_has_feature_quota(sb)) {
50717c319d32SAditya Kali 			err = ext4_enable_quotas(sb);
507207724f98STheodore Ts'o 			if (err)
50737c319d32SAditya Kali 				goto restore_opts;
50747c319d32SAditya Kali 		}
50757c319d32SAditya Kali 	}
50767c319d32SAditya Kali #endif
5077d4c402d9SCurt Wohlgemuth 
5078a26f4992STheodore Ts'o 	*flags = (*flags & ~MS_LAZYTIME) | (sb->s_flags & MS_LAZYTIME);
5079d4c402d9SCurt Wohlgemuth 	ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
5080d4c402d9SCurt Wohlgemuth 	kfree(orig_data);
5081ac27a0ecSDave Kleikamp 	return 0;
50820b8e58a1SAndreas Dilger 
5083ac27a0ecSDave Kleikamp restore_opts:
5084ac27a0ecSDave Kleikamp 	sb->s_flags = old_sb_flags;
5085ac27a0ecSDave Kleikamp 	sbi->s_mount_opt = old_opts.s_mount_opt;
5086a2595b8aSTheodore Ts'o 	sbi->s_mount_opt2 = old_opts.s_mount_opt2;
5087ac27a0ecSDave Kleikamp 	sbi->s_resuid = old_opts.s_resuid;
5088ac27a0ecSDave Kleikamp 	sbi->s_resgid = old_opts.s_resgid;
5089ac27a0ecSDave Kleikamp 	sbi->s_commit_interval = old_opts.s_commit_interval;
509030773840STheodore Ts'o 	sbi->s_min_batch_time = old_opts.s_min_batch_time;
509130773840STheodore Ts'o 	sbi->s_max_batch_time = old_opts.s_max_batch_time;
5092ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA
5093ac27a0ecSDave Kleikamp 	sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
5094a2d4a646SJan Kara 	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
5095ac27a0ecSDave Kleikamp 		kfree(sbi->s_qf_names[i]);
5096ac27a0ecSDave Kleikamp 		sbi->s_qf_names[i] = old_opts.s_qf_names[i];
5097ac27a0ecSDave Kleikamp 	}
5098ac27a0ecSDave Kleikamp #endif
5099d4c402d9SCurt Wohlgemuth 	kfree(orig_data);
5100ac27a0ecSDave Kleikamp 	return err;
5101ac27a0ecSDave Kleikamp }
5102ac27a0ecSDave Kleikamp 
5103689c958cSLi Xi #ifdef CONFIG_QUOTA
5104689c958cSLi Xi static int ext4_statfs_project(struct super_block *sb,
5105689c958cSLi Xi 			       kprojid_t projid, struct kstatfs *buf)
5106689c958cSLi Xi {
5107689c958cSLi Xi 	struct kqid qid;
5108689c958cSLi Xi 	struct dquot *dquot;
5109689c958cSLi Xi 	u64 limit;
5110689c958cSLi Xi 	u64 curblock;
5111689c958cSLi Xi 
5112689c958cSLi Xi 	qid = make_kqid_projid(projid);
5113689c958cSLi Xi 	dquot = dqget(sb, qid);
5114689c958cSLi Xi 	if (IS_ERR(dquot))
5115689c958cSLi Xi 		return PTR_ERR(dquot);
5116689c958cSLi Xi 	spin_lock(&dq_data_lock);
5117689c958cSLi Xi 
5118689c958cSLi Xi 	limit = (dquot->dq_dqb.dqb_bsoftlimit ?
5119689c958cSLi Xi 		 dquot->dq_dqb.dqb_bsoftlimit :
5120689c958cSLi Xi 		 dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
5121689c958cSLi Xi 	if (limit && buf->f_blocks > limit) {
5122689c958cSLi Xi 		curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
5123689c958cSLi Xi 		buf->f_blocks = limit;
5124689c958cSLi Xi 		buf->f_bfree = buf->f_bavail =
5125689c958cSLi Xi 			(buf->f_blocks > curblock) ?
5126689c958cSLi Xi 			 (buf->f_blocks - curblock) : 0;
5127689c958cSLi Xi 	}
5128689c958cSLi Xi 
5129689c958cSLi Xi 	limit = dquot->dq_dqb.dqb_isoftlimit ?
5130689c958cSLi Xi 		dquot->dq_dqb.dqb_isoftlimit :
5131689c958cSLi Xi 		dquot->dq_dqb.dqb_ihardlimit;
5132689c958cSLi Xi 	if (limit && buf->f_files > limit) {
5133689c958cSLi Xi 		buf->f_files = limit;
5134689c958cSLi Xi 		buf->f_ffree =
5135689c958cSLi Xi 			(buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
5136689c958cSLi Xi 			 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
5137689c958cSLi Xi 	}
5138689c958cSLi Xi 
5139689c958cSLi Xi 	spin_unlock(&dq_data_lock);
5140689c958cSLi Xi 	dqput(dquot);
5141689c958cSLi Xi 	return 0;
5142689c958cSLi Xi }
5143689c958cSLi Xi #endif
5144689c958cSLi Xi 
5145617ba13bSMingming Cao static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
5146ac27a0ecSDave Kleikamp {
5147ac27a0ecSDave Kleikamp 	struct super_block *sb = dentry->d_sb;
5148617ba13bSMingming Cao 	struct ext4_sb_info *sbi = EXT4_SB(sb);
5149617ba13bSMingming Cao 	struct ext4_super_block *es = sbi->s_es;
515027dd4385SLukas Czerner 	ext4_fsblk_t overhead = 0, resv_blocks;
5151960cc398SPekka Enberg 	u64 fsid;
5152d02a9391SKazuya Mio 	s64 bfree;
515327dd4385SLukas Czerner 	resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters));
5154ac27a0ecSDave Kleikamp 
5155952fc18eSTheodore Ts'o 	if (!test_opt(sb, MINIX_DF))
5156952fc18eSTheodore Ts'o 		overhead = sbi->s_overhead;
5157ac27a0ecSDave Kleikamp 
5158617ba13bSMingming Cao 	buf->f_type = EXT4_SUPER_MAGIC;
5159ac27a0ecSDave Kleikamp 	buf->f_bsize = sb->s_blocksize;
5160b72f78cbSEric Sandeen 	buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
516157042651STheodore Ts'o 	bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
516257042651STheodore Ts'o 		percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
5163d02a9391SKazuya Mio 	/* prevent underflow in case that few free space is available */
516457042651STheodore Ts'o 	buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0));
516527dd4385SLukas Czerner 	buf->f_bavail = buf->f_bfree -
516627dd4385SLukas Czerner 			(ext4_r_blocks_count(es) + resv_blocks);
516727dd4385SLukas Czerner 	if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks))
5168ac27a0ecSDave Kleikamp 		buf->f_bavail = 0;
5169ac27a0ecSDave Kleikamp 	buf->f_files = le32_to_cpu(es->s_inodes_count);
517052d9f3b4SPeter Zijlstra 	buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
5171617ba13bSMingming Cao 	buf->f_namelen = EXT4_NAME_LEN;
5172960cc398SPekka Enberg 	fsid = le64_to_cpup((void *)es->s_uuid) ^
5173960cc398SPekka Enberg 	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
5174960cc398SPekka Enberg 	buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
5175960cc398SPekka Enberg 	buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
51760b8e58a1SAndreas Dilger 
5177689c958cSLi Xi #ifdef CONFIG_QUOTA
5178689c958cSLi Xi 	if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) &&
5179689c958cSLi Xi 	    sb_has_quota_limits_enabled(sb, PRJQUOTA))
5180689c958cSLi Xi 		ext4_statfs_project(sb, EXT4_I(dentry->d_inode)->i_projid, buf);
5181689c958cSLi Xi #endif
5182ac27a0ecSDave Kleikamp 	return 0;
5183ac27a0ecSDave Kleikamp }
5184ac27a0ecSDave Kleikamp 
51850b8e58a1SAndreas Dilger /* Helper function for writing quotas on sync - we need to start transaction
51860b8e58a1SAndreas Dilger  * before quota file is locked for write. Otherwise the are possible deadlocks:
5187ac27a0ecSDave Kleikamp  * Process 1                         Process 2
5188617ba13bSMingming Cao  * ext4_create()                     quota_sync()
5189dab291afSMingming Cao  *   jbd2_journal_start()                  write_dquot()
5190871a2931SChristoph Hellwig  *   dquot_initialize()                         down(dqio_mutex)
5191dab291afSMingming Cao  *     down(dqio_mutex)                    jbd2_journal_start()
5192ac27a0ecSDave Kleikamp  *
5193ac27a0ecSDave Kleikamp  */
5194ac27a0ecSDave Kleikamp 
5195ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA
5196ac27a0ecSDave Kleikamp 
5197ac27a0ecSDave Kleikamp static inline struct inode *dquot_to_inode(struct dquot *dquot)
5198ac27a0ecSDave Kleikamp {
51994c376dcaSEric W. Biederman 	return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
5200ac27a0ecSDave Kleikamp }
5201ac27a0ecSDave Kleikamp 
5202617ba13bSMingming Cao static int ext4_write_dquot(struct dquot *dquot)
5203ac27a0ecSDave Kleikamp {
5204ac27a0ecSDave Kleikamp 	int ret, err;
5205ac27a0ecSDave Kleikamp 	handle_t *handle;
5206ac27a0ecSDave Kleikamp 	struct inode *inode;
5207ac27a0ecSDave Kleikamp 
5208ac27a0ecSDave Kleikamp 	inode = dquot_to_inode(dquot);
52099924a92aSTheodore Ts'o 	handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5210617ba13bSMingming Cao 				    EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
5211ac27a0ecSDave Kleikamp 	if (IS_ERR(handle))
5212ac27a0ecSDave Kleikamp 		return PTR_ERR(handle);
5213ac27a0ecSDave Kleikamp 	ret = dquot_commit(dquot);
5214617ba13bSMingming Cao 	err = ext4_journal_stop(handle);
5215ac27a0ecSDave Kleikamp 	if (!ret)
5216ac27a0ecSDave Kleikamp 		ret = err;
5217ac27a0ecSDave Kleikamp 	return ret;
5218ac27a0ecSDave Kleikamp }
5219ac27a0ecSDave Kleikamp 
5220617ba13bSMingming Cao static int ext4_acquire_dquot(struct dquot *dquot)
5221ac27a0ecSDave Kleikamp {
5222ac27a0ecSDave Kleikamp 	int ret, err;
5223ac27a0ecSDave Kleikamp 	handle_t *handle;
5224ac27a0ecSDave Kleikamp 
52259924a92aSTheodore Ts'o 	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
5226617ba13bSMingming Cao 				    EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
5227ac27a0ecSDave Kleikamp 	if (IS_ERR(handle))
5228ac27a0ecSDave Kleikamp 		return PTR_ERR(handle);
5229ac27a0ecSDave Kleikamp 	ret = dquot_acquire(dquot);
5230617ba13bSMingming Cao 	err = ext4_journal_stop(handle);
5231ac27a0ecSDave Kleikamp 	if (!ret)
5232ac27a0ecSDave Kleikamp 		ret = err;
5233ac27a0ecSDave Kleikamp 	return ret;
5234ac27a0ecSDave Kleikamp }
5235ac27a0ecSDave Kleikamp 
5236617ba13bSMingming Cao static int ext4_release_dquot(struct dquot *dquot)
5237ac27a0ecSDave Kleikamp {
5238ac27a0ecSDave Kleikamp 	int ret, err;
5239ac27a0ecSDave Kleikamp 	handle_t *handle;
5240ac27a0ecSDave Kleikamp 
52419924a92aSTheodore Ts'o 	handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
5242617ba13bSMingming Cao 				    EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
52439c3013e9SJan Kara 	if (IS_ERR(handle)) {
52449c3013e9SJan Kara 		/* Release dquot anyway to avoid endless cycle in dqput() */
52459c3013e9SJan Kara 		dquot_release(dquot);
5246ac27a0ecSDave Kleikamp 		return PTR_ERR(handle);
52479c3013e9SJan Kara 	}
5248ac27a0ecSDave Kleikamp 	ret = dquot_release(dquot);
5249617ba13bSMingming Cao 	err = ext4_journal_stop(handle);
5250ac27a0ecSDave Kleikamp 	if (!ret)
5251ac27a0ecSDave Kleikamp 		ret = err;
5252ac27a0ecSDave Kleikamp 	return ret;
5253ac27a0ecSDave Kleikamp }
5254ac27a0ecSDave Kleikamp 
5255617ba13bSMingming Cao static int ext4_mark_dquot_dirty(struct dquot *dquot)
5256ac27a0ecSDave Kleikamp {
5257262b4662SJan Kara 	struct super_block *sb = dquot->dq_sb;
5258262b4662SJan Kara 	struct ext4_sb_info *sbi = EXT4_SB(sb);
5259262b4662SJan Kara 
52602c8be6b2SJan Kara 	/* Are we journaling quotas? */
5261e2b911c5SDarrick J. Wong 	if (ext4_has_feature_quota(sb) ||
5262262b4662SJan Kara 	    sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
5263ac27a0ecSDave Kleikamp 		dquot_mark_dquot_dirty(dquot);
5264617ba13bSMingming Cao 		return ext4_write_dquot(dquot);
5265ac27a0ecSDave Kleikamp 	} else {
5266ac27a0ecSDave Kleikamp 		return dquot_mark_dquot_dirty(dquot);
5267ac27a0ecSDave Kleikamp 	}
5268ac27a0ecSDave Kleikamp }
5269ac27a0ecSDave Kleikamp 
5270617ba13bSMingming Cao static int ext4_write_info(struct super_block *sb, int type)
5271ac27a0ecSDave Kleikamp {
5272ac27a0ecSDave Kleikamp 	int ret, err;
5273ac27a0ecSDave Kleikamp 	handle_t *handle;
5274ac27a0ecSDave Kleikamp 
5275ac27a0ecSDave Kleikamp 	/* Data block + inode block */
52762b0143b5SDavid Howells 	handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
5277ac27a0ecSDave Kleikamp 	if (IS_ERR(handle))
5278ac27a0ecSDave Kleikamp 		return PTR_ERR(handle);
5279ac27a0ecSDave Kleikamp 	ret = dquot_commit_info(sb, type);
5280617ba13bSMingming Cao 	err = ext4_journal_stop(handle);
5281ac27a0ecSDave Kleikamp 	if (!ret)
5282ac27a0ecSDave Kleikamp 		ret = err;
5283ac27a0ecSDave Kleikamp 	return ret;
5284ac27a0ecSDave Kleikamp }
5285ac27a0ecSDave Kleikamp 
5286ac27a0ecSDave Kleikamp /*
5287ac27a0ecSDave Kleikamp  * Turn on quotas during mount time - we need to find
5288ac27a0ecSDave Kleikamp  * the quota file and such...
5289ac27a0ecSDave Kleikamp  */
5290617ba13bSMingming Cao static int ext4_quota_on_mount(struct super_block *sb, int type)
5291ac27a0ecSDave Kleikamp {
5292287a8095SChristoph Hellwig 	return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
5293617ba13bSMingming Cao 					EXT4_SB(sb)->s_jquota_fmt, type);
5294ac27a0ecSDave Kleikamp }
5295ac27a0ecSDave Kleikamp 
5296daf647d2STheodore Ts'o static void lockdep_set_quota_inode(struct inode *inode, int subclass)
5297daf647d2STheodore Ts'o {
5298daf647d2STheodore Ts'o 	struct ext4_inode_info *ei = EXT4_I(inode);
5299daf647d2STheodore Ts'o 
5300daf647d2STheodore Ts'o 	/* The first argument of lockdep_set_subclass has to be
5301daf647d2STheodore Ts'o 	 * *exactly* the same as the argument to init_rwsem() --- in
5302daf647d2STheodore Ts'o 	 * this case, in init_once() --- or lockdep gets unhappy
5303daf647d2STheodore Ts'o 	 * because the name of the lock is set using the
5304daf647d2STheodore Ts'o 	 * stringification of the argument to init_rwsem().
5305daf647d2STheodore Ts'o 	 */
5306daf647d2STheodore Ts'o 	(void) ei;	/* shut up clang warning if !CONFIG_LOCKDEP */
5307daf647d2STheodore Ts'o 	lockdep_set_subclass(&ei->i_data_sem, subclass);
5308daf647d2STheodore Ts'o }
5309daf647d2STheodore Ts'o 
5310ac27a0ecSDave Kleikamp /*
5311ac27a0ecSDave Kleikamp  * Standard function to be called on quota_on
5312ac27a0ecSDave Kleikamp  */
5313617ba13bSMingming Cao static int ext4_quota_on(struct super_block *sb, int type, int format_id,
53148c54ca9cSAl Viro 			 const struct path *path)
5315ac27a0ecSDave Kleikamp {
5316ac27a0ecSDave Kleikamp 	int err;
5317ac27a0ecSDave Kleikamp 
5318ac27a0ecSDave Kleikamp 	if (!test_opt(sb, QUOTA))
5319ac27a0ecSDave Kleikamp 		return -EINVAL;
53200623543bSJan Kara 
5321ac27a0ecSDave Kleikamp 	/* Quotafile not on the same filesystem? */
5322d8c9584eSAl Viro 	if (path->dentry->d_sb != sb)
5323ac27a0ecSDave Kleikamp 		return -EXDEV;
53240623543bSJan Kara 	/* Journaling quota? */
53250623543bSJan Kara 	if (EXT4_SB(sb)->s_qf_names[type]) {
53262b2d6d01STheodore Ts'o 		/* Quotafile not in fs root? */
5327f00c9e44SJan Kara 		if (path->dentry->d_parent != sb->s_root)
5328b31e1552SEric Sandeen 			ext4_msg(sb, KERN_WARNING,
5329b31e1552SEric Sandeen 				"Quota file not on filesystem root. "
5330b31e1552SEric Sandeen 				"Journaled quota will not work");
53310623543bSJan Kara 	}
53320623543bSJan Kara 
53330623543bSJan Kara 	/*
53340623543bSJan Kara 	 * When we journal data on quota file, we have to flush journal to see
53350623543bSJan Kara 	 * all updates to the file when we bypass pagecache...
53360623543bSJan Kara 	 */
53370390131bSFrank Mayhar 	if (EXT4_SB(sb)->s_journal &&
53382b0143b5SDavid Howells 	    ext4_should_journal_data(d_inode(path->dentry))) {
53390623543bSJan Kara 		/*
53400623543bSJan Kara 		 * We don't need to lock updates but journal_flush() could
53410623543bSJan Kara 		 * otherwise be livelocked...
53420623543bSJan Kara 		 */
53430623543bSJan Kara 		jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
53447ffe1ea8SHidehiro Kawai 		err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
53450623543bSJan Kara 		jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
5346f00c9e44SJan Kara 		if (err)
53477ffe1ea8SHidehiro Kawai 			return err;
53487ffe1ea8SHidehiro Kawai 	}
5349daf647d2STheodore Ts'o 	lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
5350daf647d2STheodore Ts'o 	err = dquot_quota_on(sb, type, format_id, path);
5351daf647d2STheodore Ts'o 	if (err)
5352daf647d2STheodore Ts'o 		lockdep_set_quota_inode(path->dentry->d_inode,
5353daf647d2STheodore Ts'o 					     I_DATA_SEM_NORMAL);
5354daf647d2STheodore Ts'o 	return err;
5355ac27a0ecSDave Kleikamp }
5356ac27a0ecSDave Kleikamp 
53577c319d32SAditya Kali static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
53587c319d32SAditya Kali 			     unsigned int flags)
53597c319d32SAditya Kali {
53607c319d32SAditya Kali 	int err;
53617c319d32SAditya Kali 	struct inode *qf_inode;
5362a2d4a646SJan Kara 	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
53637c319d32SAditya Kali 		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
5364689c958cSLi Xi 		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
5365689c958cSLi Xi 		le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
53667c319d32SAditya Kali 	};
53677c319d32SAditya Kali 
5368e2b911c5SDarrick J. Wong 	BUG_ON(!ext4_has_feature_quota(sb));
53697c319d32SAditya Kali 
53707c319d32SAditya Kali 	if (!qf_inums[type])
53717c319d32SAditya Kali 		return -EPERM;
53727c319d32SAditya Kali 
53737c319d32SAditya Kali 	qf_inode = ext4_iget(sb, qf_inums[type]);
53747c319d32SAditya Kali 	if (IS_ERR(qf_inode)) {
53757c319d32SAditya Kali 		ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
53767c319d32SAditya Kali 		return PTR_ERR(qf_inode);
53777c319d32SAditya Kali 	}
53787c319d32SAditya Kali 
5379bcb13850SJan Kara 	/* Don't account quota for quota files to avoid recursion */
5380bcb13850SJan Kara 	qf_inode->i_flags |= S_NOQUOTA;
5381daf647d2STheodore Ts'o 	lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
53827c319d32SAditya Kali 	err = dquot_enable(qf_inode, type, format_id, flags);
53837c319d32SAditya Kali 	iput(qf_inode);
5384daf647d2STheodore Ts'o 	if (err)
5385daf647d2STheodore Ts'o 		lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
53867c319d32SAditya Kali 
53877c319d32SAditya Kali 	return err;
53887c319d32SAditya Kali }
53897c319d32SAditya Kali 
53907c319d32SAditya Kali /* Enable usage tracking for all quota types. */
53917c319d32SAditya Kali static int ext4_enable_quotas(struct super_block *sb)
53927c319d32SAditya Kali {
53937c319d32SAditya Kali 	int type, err = 0;
5394a2d4a646SJan Kara 	unsigned long qf_inums[EXT4_MAXQUOTAS] = {
53957c319d32SAditya Kali 		le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
5396689c958cSLi Xi 		le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum),
5397689c958cSLi Xi 		le32_to_cpu(EXT4_SB(sb)->s_es->s_prj_quota_inum)
53987c319d32SAditya Kali 	};
539949da9392SJan Kara 	bool quota_mopt[EXT4_MAXQUOTAS] = {
540049da9392SJan Kara 		test_opt(sb, USRQUOTA),
540149da9392SJan Kara 		test_opt(sb, GRPQUOTA),
540249da9392SJan Kara 		test_opt(sb, PRJQUOTA),
540349da9392SJan Kara 	};
54047c319d32SAditya Kali 
54057c319d32SAditya Kali 	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
5406a2d4a646SJan Kara 	for (type = 0; type < EXT4_MAXQUOTAS; type++) {
54077c319d32SAditya Kali 		if (qf_inums[type]) {
54087c319d32SAditya Kali 			err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
540949da9392SJan Kara 				DQUOT_USAGE_ENABLED |
541049da9392SJan Kara 				(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
54117c319d32SAditya Kali 			if (err) {
54127c319d32SAditya Kali 				ext4_warning(sb,
541372ba7450STheodore Ts'o 					"Failed to enable quota tracking "
541472ba7450STheodore Ts'o 					"(type=%d, err=%d). Please run "
541572ba7450STheodore Ts'o 					"e2fsck to fix.", type, err);
54167c319d32SAditya Kali 				return err;
54177c319d32SAditya Kali 			}
54187c319d32SAditya Kali 		}
54197c319d32SAditya Kali 	}
54207c319d32SAditya Kali 	return 0;
54217c319d32SAditya Kali }
54227c319d32SAditya Kali 
5423ca0e05e4SDmitry Monakhov static int ext4_quota_off(struct super_block *sb, int type)
5424ca0e05e4SDmitry Monakhov {
542521f97697SJan Kara 	struct inode *inode = sb_dqopt(sb)->files[type];
542621f97697SJan Kara 	handle_t *handle;
542721f97697SJan Kara 
542887009d86SDmitry Monakhov 	/* Force all delayed allocation blocks to be allocated.
542987009d86SDmitry Monakhov 	 * Caller already holds s_umount sem */
543087009d86SDmitry Monakhov 	if (test_opt(sb, DELALLOC))
5431ca0e05e4SDmitry Monakhov 		sync_filesystem(sb);
5432ca0e05e4SDmitry Monakhov 
54330b268590SAmir Goldstein 	if (!inode)
54340b268590SAmir Goldstein 		goto out;
54350b268590SAmir Goldstein 
543621f97697SJan Kara 	/* Update modification times of quota files when userspace can
543721f97697SJan Kara 	 * start looking at them */
54389924a92aSTheodore Ts'o 	handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
543921f97697SJan Kara 	if (IS_ERR(handle))
544021f97697SJan Kara 		goto out;
5441eeca7ea1SDeepa Dinamani 	inode->i_mtime = inode->i_ctime = current_time(inode);
544221f97697SJan Kara 	ext4_mark_inode_dirty(handle, inode);
544321f97697SJan Kara 	ext4_journal_stop(handle);
544421f97697SJan Kara 
544521f97697SJan Kara out:
5446ca0e05e4SDmitry Monakhov 	return dquot_quota_off(sb, type);
5447ca0e05e4SDmitry Monakhov }
5448ca0e05e4SDmitry Monakhov 
5449ac27a0ecSDave Kleikamp /* Read data from quotafile - avoid pagecache and such because we cannot afford
5450ac27a0ecSDave Kleikamp  * acquiring the locks... As quota files are never truncated and quota code
5451ac27a0ecSDave Kleikamp  * itself serializes the operations (and no one else should touch the files)
5452ac27a0ecSDave Kleikamp  * we don't have to be afraid of races */
5453617ba13bSMingming Cao static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
5454ac27a0ecSDave Kleikamp 			       size_t len, loff_t off)
5455ac27a0ecSDave Kleikamp {
5456ac27a0ecSDave Kleikamp 	struct inode *inode = sb_dqopt(sb)->files[type];
5457725d26d3SAneesh Kumar K.V 	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
5458ac27a0ecSDave Kleikamp 	int offset = off & (sb->s_blocksize - 1);
5459ac27a0ecSDave Kleikamp 	int tocopy;
5460ac27a0ecSDave Kleikamp 	size_t toread;
5461ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
5462ac27a0ecSDave Kleikamp 	loff_t i_size = i_size_read(inode);
5463ac27a0ecSDave Kleikamp 
5464ac27a0ecSDave Kleikamp 	if (off > i_size)
5465ac27a0ecSDave Kleikamp 		return 0;
5466ac27a0ecSDave Kleikamp 	if (off+len > i_size)
5467ac27a0ecSDave Kleikamp 		len = i_size-off;
5468ac27a0ecSDave Kleikamp 	toread = len;
5469ac27a0ecSDave Kleikamp 	while (toread > 0) {
5470ac27a0ecSDave Kleikamp 		tocopy = sb->s_blocksize - offset < toread ?
5471ac27a0ecSDave Kleikamp 				sb->s_blocksize - offset : toread;
54721c215028STheodore Ts'o 		bh = ext4_bread(NULL, inode, blk, 0);
54731c215028STheodore Ts'o 		if (IS_ERR(bh))
54741c215028STheodore Ts'o 			return PTR_ERR(bh);
5475ac27a0ecSDave Kleikamp 		if (!bh)	/* A hole? */
5476ac27a0ecSDave Kleikamp 			memset(data, 0, tocopy);
5477ac27a0ecSDave Kleikamp 		else
5478ac27a0ecSDave Kleikamp 			memcpy(data, bh->b_data+offset, tocopy);
5479ac27a0ecSDave Kleikamp 		brelse(bh);
5480ac27a0ecSDave Kleikamp 		offset = 0;
5481ac27a0ecSDave Kleikamp 		toread -= tocopy;
5482ac27a0ecSDave Kleikamp 		data += tocopy;
5483ac27a0ecSDave Kleikamp 		blk++;
5484ac27a0ecSDave Kleikamp 	}
5485ac27a0ecSDave Kleikamp 	return len;
5486ac27a0ecSDave Kleikamp }
5487ac27a0ecSDave Kleikamp 
5488ac27a0ecSDave Kleikamp /* Write to quotafile (we know the transaction is already started and has
5489ac27a0ecSDave Kleikamp  * enough credits) */
5490617ba13bSMingming Cao static ssize_t ext4_quota_write(struct super_block *sb, int type,
5491ac27a0ecSDave Kleikamp 				const char *data, size_t len, loff_t off)
5492ac27a0ecSDave Kleikamp {
5493ac27a0ecSDave Kleikamp 	struct inode *inode = sb_dqopt(sb)->files[type];
5494725d26d3SAneesh Kumar K.V 	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
54951c215028STheodore Ts'o 	int err, offset = off & (sb->s_blocksize - 1);
5496c5e298aeSTheodore Ts'o 	int retries = 0;
5497ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
5498ac27a0ecSDave Kleikamp 	handle_t *handle = journal_current_handle();
5499ac27a0ecSDave Kleikamp 
55000390131bSFrank Mayhar 	if (EXT4_SB(sb)->s_journal && !handle) {
5501b31e1552SEric Sandeen 		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
5502b31e1552SEric Sandeen 			" cancelled because transaction is not started",
55039c3013e9SJan Kara 			(unsigned long long)off, (unsigned long long)len);
55049c3013e9SJan Kara 		return -EIO;
55059c3013e9SJan Kara 	}
550667eeb568SDmitry Monakhov 	/*
550767eeb568SDmitry Monakhov 	 * Since we account only one data block in transaction credits,
550867eeb568SDmitry Monakhov 	 * then it is impossible to cross a block boundary.
550967eeb568SDmitry Monakhov 	 */
551067eeb568SDmitry Monakhov 	if (sb->s_blocksize - offset < len) {
551167eeb568SDmitry Monakhov 		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
551267eeb568SDmitry Monakhov 			" cancelled because not block aligned",
551367eeb568SDmitry Monakhov 			(unsigned long long)off, (unsigned long long)len);
551467eeb568SDmitry Monakhov 		return -EIO;
551567eeb568SDmitry Monakhov 	}
551667eeb568SDmitry Monakhov 
5517c5e298aeSTheodore Ts'o 	do {
5518c5e298aeSTheodore Ts'o 		bh = ext4_bread(handle, inode, blk,
5519c5e298aeSTheodore Ts'o 				EXT4_GET_BLOCKS_CREATE |
5520c5e298aeSTheodore Ts'o 				EXT4_GET_BLOCKS_METADATA_NOFAIL);
5521c5e298aeSTheodore Ts'o 	} while (IS_ERR(bh) && (PTR_ERR(bh) == -ENOSPC) &&
5522c5e298aeSTheodore Ts'o 		 ext4_should_retry_alloc(inode->i_sb, &retries));
55231c215028STheodore Ts'o 	if (IS_ERR(bh))
55241c215028STheodore Ts'o 		return PTR_ERR(bh);
5525ac27a0ecSDave Kleikamp 	if (!bh)
5526ac27a0ecSDave Kleikamp 		goto out;
55275d601255Sliang xie 	BUFFER_TRACE(bh, "get write access");
5528617ba13bSMingming Cao 	err = ext4_journal_get_write_access(handle, bh);
5529ac27a0ecSDave Kleikamp 	if (err) {
5530ac27a0ecSDave Kleikamp 		brelse(bh);
55311c215028STheodore Ts'o 		return err;
5532ac27a0ecSDave Kleikamp 	}
5533ac27a0ecSDave Kleikamp 	lock_buffer(bh);
553467eeb568SDmitry Monakhov 	memcpy(bh->b_data+offset, data, len);
5535ac27a0ecSDave Kleikamp 	flush_dcache_page(bh->b_page);
5536ac27a0ecSDave Kleikamp 	unlock_buffer(bh);
55370390131bSFrank Mayhar 	err = ext4_handle_dirty_metadata(handle, NULL, bh);
5538ac27a0ecSDave Kleikamp 	brelse(bh);
5539ac27a0ecSDave Kleikamp out:
554067eeb568SDmitry Monakhov 	if (inode->i_size < off + len) {
554167eeb568SDmitry Monakhov 		i_size_write(inode, off + len);
5542617ba13bSMingming Cao 		EXT4_I(inode)->i_disksize = inode->i_size;
5543617ba13bSMingming Cao 		ext4_mark_inode_dirty(handle, inode);
554421f97697SJan Kara 	}
554567eeb568SDmitry Monakhov 	return len;
5546ac27a0ecSDave Kleikamp }
5547ac27a0ecSDave Kleikamp 
55488f0e8746STheodore Ts'o static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
55498f0e8746STheodore Ts'o {
55508f0e8746STheodore Ts'o 	const struct quota_format_ops	*ops;
55518f0e8746STheodore Ts'o 
55528f0e8746STheodore Ts'o 	if (!sb_has_quota_loaded(sb, qid->type))
55538f0e8746STheodore Ts'o 		return -ESRCH;
55548f0e8746STheodore Ts'o 	ops = sb_dqopt(sb)->ops[qid->type];
55558f0e8746STheodore Ts'o 	if (!ops || !ops->get_next_id)
55568f0e8746STheodore Ts'o 		return -ENOSYS;
55578f0e8746STheodore Ts'o 	return dquot_get_next_id(sb, qid);
55588f0e8746STheodore Ts'o }
5559ac27a0ecSDave Kleikamp #endif
5560ac27a0ecSDave Kleikamp 
5561152a0836SAl Viro static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
5562152a0836SAl Viro 		       const char *dev_name, void *data)
5563ac27a0ecSDave Kleikamp {
5564152a0836SAl Viro 	return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
5565ac27a0ecSDave Kleikamp }
5566ac27a0ecSDave Kleikamp 
5567c290ea01SJan Kara #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
556824b58424STheodore Ts'o static inline void register_as_ext2(void)
556924b58424STheodore Ts'o {
557024b58424STheodore Ts'o 	int err = register_filesystem(&ext2_fs_type);
557124b58424STheodore Ts'o 	if (err)
557224b58424STheodore Ts'o 		printk(KERN_WARNING
557324b58424STheodore Ts'o 		       "EXT4-fs: Unable to register as ext2 (%d)\n", err);
557424b58424STheodore Ts'o }
557524b58424STheodore Ts'o 
557624b58424STheodore Ts'o static inline void unregister_as_ext2(void)
557724b58424STheodore Ts'o {
557824b58424STheodore Ts'o 	unregister_filesystem(&ext2_fs_type);
557924b58424STheodore Ts'o }
55802035e776STheodore Ts'o 
55812035e776STheodore Ts'o static inline int ext2_feature_set_ok(struct super_block *sb)
55822035e776STheodore Ts'o {
5583e2b911c5SDarrick J. Wong 	if (ext4_has_unknown_ext2_incompat_features(sb))
55842035e776STheodore Ts'o 		return 0;
55852035e776STheodore Ts'o 	if (sb->s_flags & MS_RDONLY)
55862035e776STheodore Ts'o 		return 1;
5587e2b911c5SDarrick J. Wong 	if (ext4_has_unknown_ext2_ro_compat_features(sb))
55882035e776STheodore Ts'o 		return 0;
55892035e776STheodore Ts'o 	return 1;
55902035e776STheodore Ts'o }
559124b58424STheodore Ts'o #else
559224b58424STheodore Ts'o static inline void register_as_ext2(void) { }
559324b58424STheodore Ts'o static inline void unregister_as_ext2(void) { }
55942035e776STheodore Ts'o static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
559524b58424STheodore Ts'o #endif
559624b58424STheodore Ts'o 
559724b58424STheodore Ts'o static inline void register_as_ext3(void)
559824b58424STheodore Ts'o {
559924b58424STheodore Ts'o 	int err = register_filesystem(&ext3_fs_type);
560024b58424STheodore Ts'o 	if (err)
560124b58424STheodore Ts'o 		printk(KERN_WARNING
560224b58424STheodore Ts'o 		       "EXT4-fs: Unable to register as ext3 (%d)\n", err);
560324b58424STheodore Ts'o }
560424b58424STheodore Ts'o 
560524b58424STheodore Ts'o static inline void unregister_as_ext3(void)
560624b58424STheodore Ts'o {
560724b58424STheodore Ts'o 	unregister_filesystem(&ext3_fs_type);
560824b58424STheodore Ts'o }
56092035e776STheodore Ts'o 
56102035e776STheodore Ts'o static inline int ext3_feature_set_ok(struct super_block *sb)
56112035e776STheodore Ts'o {
5612e2b911c5SDarrick J. Wong 	if (ext4_has_unknown_ext3_incompat_features(sb))
56132035e776STheodore Ts'o 		return 0;
5614e2b911c5SDarrick J. Wong 	if (!ext4_has_feature_journal(sb))
56152035e776STheodore Ts'o 		return 0;
56162035e776STheodore Ts'o 	if (sb->s_flags & MS_RDONLY)
56172035e776STheodore Ts'o 		return 1;
5618e2b911c5SDarrick J. Wong 	if (ext4_has_unknown_ext3_ro_compat_features(sb))
56192035e776STheodore Ts'o 		return 0;
56202035e776STheodore Ts'o 	return 1;
56212035e776STheodore Ts'o }
562224b58424STheodore Ts'o 
562303010a33STheodore Ts'o static struct file_system_type ext4_fs_type = {
5624ac27a0ecSDave Kleikamp 	.owner		= THIS_MODULE,
562503010a33STheodore Ts'o 	.name		= "ext4",
5626152a0836SAl Viro 	.mount		= ext4_mount,
5627ac27a0ecSDave Kleikamp 	.kill_sb	= kill_block_super,
5628ac27a0ecSDave Kleikamp 	.fs_flags	= FS_REQUIRES_DEV,
5629ac27a0ecSDave Kleikamp };
56307f78e035SEric W. Biederman MODULE_ALIAS_FS("ext4");
5631ac27a0ecSDave Kleikamp 
5632e9e3bcecSEric Sandeen /* Shared across all ext4 file systems */
5633e9e3bcecSEric Sandeen wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
5634e9e3bcecSEric Sandeen 
56355dabfc78STheodore Ts'o static int __init ext4_init_fs(void)
5636ac27a0ecSDave Kleikamp {
5637e9e3bcecSEric Sandeen 	int i, err;
5638c9de560dSAlex Tomas 
5639e294a537STheodore Ts'o 	ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
564007c0c5d8SAl Viro 	ext4_li_info = NULL;
564107c0c5d8SAl Viro 	mutex_init(&ext4_li_mtx);
564207c0c5d8SAl Viro 
56439a4c8019SCarlos Maiolino 	/* Build-time check for flags consistency */
564412e9b892SDmitry Monakhov 	ext4_check_flag_values();
5645e9e3bcecSEric Sandeen 
5646e142d052SJan Kara 	for (i = 0; i < EXT4_WQ_HASH_SZ; i++)
5647e9e3bcecSEric Sandeen 		init_waitqueue_head(&ext4__ioend_wq[i]);
5648e9e3bcecSEric Sandeen 
564951865fdaSZheng Liu 	err = ext4_init_es();
56506fd058f7STheodore Ts'o 	if (err)
56516fd058f7STheodore Ts'o 		return err;
565251865fdaSZheng Liu 
565351865fdaSZheng Liu 	err = ext4_init_pageio();
565451865fdaSZheng Liu 	if (err)
5655b5799018STheodore Ts'o 		goto out5;
565651865fdaSZheng Liu 
56575dabfc78STheodore Ts'o 	err = ext4_init_system_zone();
5658bd2d0210STheodore Ts'o 	if (err)
5659b5799018STheodore Ts'o 		goto out4;
5660857ac889SLukas Czerner 
5661b5799018STheodore Ts'o 	err = ext4_init_sysfs();
5662dd68314cSTheodore Ts'o 	if (err)
5663b5799018STheodore Ts'o 		goto out3;
5664857ac889SLukas Czerner 
56655dabfc78STheodore Ts'o 	err = ext4_init_mballoc();
5666ac27a0ecSDave Kleikamp 	if (err)
5667c9de560dSAlex Tomas 		goto out2;
5668ac27a0ecSDave Kleikamp 	err = init_inodecache();
5669ac27a0ecSDave Kleikamp 	if (err)
5670ac27a0ecSDave Kleikamp 		goto out1;
567124b58424STheodore Ts'o 	register_as_ext3();
56722035e776STheodore Ts'o 	register_as_ext2();
567303010a33STheodore Ts'o 	err = register_filesystem(&ext4_fs_type);
5674ac27a0ecSDave Kleikamp 	if (err)
5675ac27a0ecSDave Kleikamp 		goto out;
5676bfff6873SLukas Czerner 
5677ac27a0ecSDave Kleikamp 	return 0;
5678ac27a0ecSDave Kleikamp out:
567924b58424STheodore Ts'o 	unregister_as_ext2();
568024b58424STheodore Ts'o 	unregister_as_ext3();
5681ac27a0ecSDave Kleikamp 	destroy_inodecache();
5682ac27a0ecSDave Kleikamp out1:
56835dabfc78STheodore Ts'o 	ext4_exit_mballoc();
56849c191f70ST Makphaibulchoke out2:
5685b5799018STheodore Ts'o 	ext4_exit_sysfs();
5686b5799018STheodore Ts'o out3:
5687dd68314cSTheodore Ts'o 	ext4_exit_system_zone();
5688b5799018STheodore Ts'o out4:
56895dabfc78STheodore Ts'o 	ext4_exit_pageio();
5690b5799018STheodore Ts'o out5:
569151865fdaSZheng Liu 	ext4_exit_es();
569251865fdaSZheng Liu 
5693ac27a0ecSDave Kleikamp 	return err;
5694ac27a0ecSDave Kleikamp }
5695ac27a0ecSDave Kleikamp 
56965dabfc78STheodore Ts'o static void __exit ext4_exit_fs(void)
5697ac27a0ecSDave Kleikamp {
5698bfff6873SLukas Czerner 	ext4_destroy_lazyinit_thread();
569924b58424STheodore Ts'o 	unregister_as_ext2();
570024b58424STheodore Ts'o 	unregister_as_ext3();
570103010a33STheodore Ts'o 	unregister_filesystem(&ext4_fs_type);
5702ac27a0ecSDave Kleikamp 	destroy_inodecache();
57035dabfc78STheodore Ts'o 	ext4_exit_mballoc();
5704b5799018STheodore Ts'o 	ext4_exit_sysfs();
57055dabfc78STheodore Ts'o 	ext4_exit_system_zone();
57065dabfc78STheodore Ts'o 	ext4_exit_pageio();
5707dd12ed14SEric Sandeen 	ext4_exit_es();
5708ac27a0ecSDave Kleikamp }
5709ac27a0ecSDave Kleikamp 
5710ac27a0ecSDave Kleikamp MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
571183982b6fSTheodore Ts'o MODULE_DESCRIPTION("Fourth Extended Filesystem");
5712ac27a0ecSDave Kleikamp MODULE_LICENSE("GPL");
57135dabfc78STheodore Ts'o module_init(ext4_init_fs)
57145dabfc78STheodore Ts'o module_exit(ext4_exit_fs)
5715